cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

milbeaut-hdmac.c (14324B)


      1// SPDX-License-Identifier: GPL-2.0
      2//
      3// Copyright (C) 2019 Linaro Ltd.
      4// Copyright (C) 2019 Socionext Inc.
      5
      6#include <linux/bits.h>
      7#include <linux/clk.h>
      8#include <linux/dma-mapping.h>
      9#include <linux/interrupt.h>
     10#include <linux/iopoll.h>
     11#include <linux/list.h>
     12#include <linux/module.h>
     13#include <linux/of_dma.h>
     14#include <linux/platform_device.h>
     15#include <linux/slab.h>
     16#include <linux/types.h>
     17#include <linux/bitfield.h>
     18
     19#include "virt-dma.h"
     20
     21#define MLB_HDMAC_DMACR		0x0	/* global */
     22#define MLB_HDMAC_DE		BIT(31)
     23#define MLB_HDMAC_DS		BIT(30)
     24#define MLB_HDMAC_PR		BIT(28)
     25#define MLB_HDMAC_DH		GENMASK(27, 24)
     26
     27#define MLB_HDMAC_CH_STRIDE	0x10
     28
     29#define MLB_HDMAC_DMACA		0x0	/* channel */
     30#define MLB_HDMAC_EB		BIT(31)
     31#define MLB_HDMAC_PB		BIT(30)
     32#define MLB_HDMAC_ST		BIT(29)
     33#define MLB_HDMAC_IS		GENMASK(28, 24)
     34#define MLB_HDMAC_BT		GENMASK(23, 20)
     35#define MLB_HDMAC_BC		GENMASK(19, 16)
     36#define MLB_HDMAC_TC		GENMASK(15, 0)
     37#define MLB_HDMAC_DMACB		0x4
     38#define MLB_HDMAC_TT		GENMASK(31, 30)
     39#define MLB_HDMAC_MS		GENMASK(29, 28)
     40#define MLB_HDMAC_TW		GENMASK(27, 26)
     41#define MLB_HDMAC_FS		BIT(25)
     42#define MLB_HDMAC_FD		BIT(24)
     43#define MLB_HDMAC_RC		BIT(23)
     44#define MLB_HDMAC_RS		BIT(22)
     45#define MLB_HDMAC_RD		BIT(21)
     46#define MLB_HDMAC_EI		BIT(20)
     47#define MLB_HDMAC_CI		BIT(19)
     48#define HDMAC_PAUSE		0x7
     49#define MLB_HDMAC_SS		GENMASK(18, 16)
     50#define MLB_HDMAC_SP		GENMASK(15, 12)
     51#define MLB_HDMAC_DP		GENMASK(11, 8)
     52#define MLB_HDMAC_DMACSA	0x8
     53#define MLB_HDMAC_DMACDA	0xc
     54
     55#define MLB_HDMAC_BUSWIDTHS		(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
     56					BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
     57					BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
     58
     59struct milbeaut_hdmac_desc {
     60	struct virt_dma_desc vd;
     61	struct scatterlist *sgl;
     62	unsigned int sg_len;
     63	unsigned int sg_cur;
     64	enum dma_transfer_direction dir;
     65};
     66
     67struct milbeaut_hdmac_chan {
     68	struct virt_dma_chan vc;
     69	struct milbeaut_hdmac_device *mdev;
     70	struct milbeaut_hdmac_desc *md;
     71	void __iomem *reg_ch_base;
     72	unsigned int slave_id;
     73	struct dma_slave_config	cfg;
     74};
     75
     76struct milbeaut_hdmac_device {
     77	struct dma_device ddev;
     78	struct clk *clk;
     79	void __iomem *reg_base;
     80	struct milbeaut_hdmac_chan channels[];
     81};
     82
     83static struct milbeaut_hdmac_chan *
     84to_milbeaut_hdmac_chan(struct virt_dma_chan *vc)
     85{
     86	return container_of(vc, struct milbeaut_hdmac_chan, vc);
     87}
     88
     89static struct milbeaut_hdmac_desc *
     90to_milbeaut_hdmac_desc(struct virt_dma_desc *vd)
     91{
     92	return container_of(vd, struct milbeaut_hdmac_desc, vd);
     93}
     94
     95/* mc->vc.lock must be held by caller */
     96static struct milbeaut_hdmac_desc *
     97milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc)
     98{
     99	struct virt_dma_desc *vd;
    100
    101	vd = vchan_next_desc(&mc->vc);
    102	if (!vd) {
    103		mc->md = NULL;
    104		return NULL;
    105	}
    106
    107	list_del(&vd->node);
    108
    109	mc->md = to_milbeaut_hdmac_desc(vd);
    110
    111	return mc->md;
    112}
    113
    114/* mc->vc.lock must be held by caller */
    115static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc,
    116				struct milbeaut_hdmac_desc *md)
    117{
    118	struct scatterlist *sg;
    119	u32 cb, ca, src_addr, dest_addr, len;
    120	u32 width, burst;
    121
    122	sg = &md->sgl[md->sg_cur];
    123	len = sg_dma_len(sg);
    124
    125	cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
    126	if (md->dir == DMA_MEM_TO_DEV) {
    127		cb |= MLB_HDMAC_FD;
    128		width = mc->cfg.dst_addr_width;
    129		burst = mc->cfg.dst_maxburst;
    130		src_addr = sg_dma_address(sg);
    131		dest_addr = mc->cfg.dst_addr;
    132	} else {
    133		cb |= MLB_HDMAC_FS;
    134		width = mc->cfg.src_addr_width;
    135		burst = mc->cfg.src_maxburst;
    136		src_addr = mc->cfg.src_addr;
    137		dest_addr = sg_dma_address(sg);
    138	}
    139	cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
    140	cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
    141
    142	writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR);
    143	writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA);
    144	writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA);
    145	writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
    146
    147	ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
    148	if (burst == 16)
    149		ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
    150	else if (burst == 8)
    151		ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
    152	else if (burst == 4)
    153		ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
    154	burst *= width;
    155	ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
    156	writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
    157	ca |= MLB_HDMAC_EB;
    158	writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
    159}
    160
    161/* mc->vc.lock must be held by caller */
    162static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc)
    163{
    164	struct milbeaut_hdmac_desc *md;
    165
    166	md = milbeaut_hdmac_next_desc(mc);
    167	if (md)
    168		milbeaut_chan_start(mc, md);
    169}
    170
    171static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id)
    172{
    173	struct milbeaut_hdmac_chan *mc = dev_id;
    174	struct milbeaut_hdmac_desc *md;
    175	u32 val;
    176
    177	spin_lock(&mc->vc.lock);
    178
    179	/* Ack and Disable irqs */
    180	val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB);
    181	val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE));
    182	writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
    183	val &= ~MLB_HDMAC_EI;
    184	val &= ~MLB_HDMAC_CI;
    185	writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
    186
    187	md = mc->md;
    188	if (!md)
    189		goto out;
    190
    191	md->sg_cur++;
    192
    193	if (md->sg_cur >= md->sg_len) {
    194		vchan_cookie_complete(&md->vd);
    195		md = milbeaut_hdmac_next_desc(mc);
    196		if (!md)
    197			goto out;
    198	}
    199
    200	milbeaut_chan_start(mc, md);
    201
    202out:
    203	spin_unlock(&mc->vc.lock);
    204	return IRQ_HANDLED;
    205}
    206
    207static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan)
    208{
    209	vchan_free_chan_resources(to_virt_chan(chan));
    210}
    211
    212static int
    213milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg)
    214{
    215	struct virt_dma_chan *vc = to_virt_chan(chan);
    216	struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
    217
    218	spin_lock(&mc->vc.lock);
    219	mc->cfg = *cfg;
    220	spin_unlock(&mc->vc.lock);
    221
    222	return 0;
    223}
    224
    225static int milbeaut_hdmac_chan_pause(struct dma_chan *chan)
    226{
    227	struct virt_dma_chan *vc = to_virt_chan(chan);
    228	struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
    229	u32 val;
    230
    231	spin_lock(&mc->vc.lock);
    232	val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
    233	val |= MLB_HDMAC_PB;
    234	writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
    235	spin_unlock(&mc->vc.lock);
    236
    237	return 0;
    238}
    239
    240static int milbeaut_hdmac_chan_resume(struct dma_chan *chan)
    241{
    242	struct virt_dma_chan *vc = to_virt_chan(chan);
    243	struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
    244	u32 val;
    245
    246	spin_lock(&mc->vc.lock);
    247	val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
    248	val &= ~MLB_HDMAC_PB;
    249	writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
    250	spin_unlock(&mc->vc.lock);
    251
    252	return 0;
    253}
    254
    255static struct dma_async_tx_descriptor *
    256milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
    257			     unsigned int sg_len,
    258			     enum dma_transfer_direction direction,
    259			     unsigned long flags, void *context)
    260{
    261	struct virt_dma_chan *vc = to_virt_chan(chan);
    262	struct milbeaut_hdmac_desc *md;
    263	int i;
    264
    265	if (!is_slave_direction(direction))
    266		return NULL;
    267
    268	md = kzalloc(sizeof(*md), GFP_NOWAIT);
    269	if (!md)
    270		return NULL;
    271
    272	md->sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
    273	if (!md->sgl) {
    274		kfree(md);
    275		return NULL;
    276	}
    277
    278	for (i = 0; i < sg_len; i++)
    279		md->sgl[i] = sgl[i];
    280
    281	md->sg_len = sg_len;
    282	md->dir = direction;
    283
    284	return vchan_tx_prep(vc, &md->vd, flags);
    285}
    286
    287static int milbeaut_hdmac_terminate_all(struct dma_chan *chan)
    288{
    289	struct virt_dma_chan *vc = to_virt_chan(chan);
    290	struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
    291	unsigned long flags;
    292	u32 val;
    293
    294	LIST_HEAD(head);
    295
    296	spin_lock_irqsave(&vc->lock, flags);
    297
    298	val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
    299	val &= ~MLB_HDMAC_EB; /* disable the channel */
    300	writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
    301
    302	if (mc->md) {
    303		vchan_terminate_vdesc(&mc->md->vd);
    304		mc->md = NULL;
    305	}
    306
    307	vchan_get_all_descriptors(vc, &head);
    308
    309	spin_unlock_irqrestore(&vc->lock, flags);
    310
    311	vchan_dma_desc_free_list(vc, &head);
    312
    313	return 0;
    314}
    315
    316static void milbeaut_hdmac_synchronize(struct dma_chan *chan)
    317{
    318	vchan_synchronize(to_virt_chan(chan));
    319}
    320
    321static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan,
    322						dma_cookie_t cookie,
    323						struct dma_tx_state *txstate)
    324{
    325	struct virt_dma_chan *vc;
    326	struct virt_dma_desc *vd;
    327	struct milbeaut_hdmac_chan *mc;
    328	struct milbeaut_hdmac_desc *md = NULL;
    329	enum dma_status stat;
    330	unsigned long flags;
    331	int i;
    332
    333	stat = dma_cookie_status(chan, cookie, txstate);
    334	/* Return immediately if we do not need to compute the residue. */
    335	if (stat == DMA_COMPLETE || !txstate)
    336		return stat;
    337
    338	vc = to_virt_chan(chan);
    339
    340	spin_lock_irqsave(&vc->lock, flags);
    341
    342	mc = to_milbeaut_hdmac_chan(vc);
    343
    344	/* residue from the on-flight chunk */
    345	if (mc->md && mc->md->vd.tx.cookie == cookie) {
    346		struct scatterlist *sg;
    347		u32 done;
    348
    349		md = mc->md;
    350		sg = &md->sgl[md->sg_cur];
    351
    352		if (md->dir == DMA_DEV_TO_MEM)
    353			done = readl_relaxed(mc->reg_ch_base
    354					     + MLB_HDMAC_DMACDA);
    355		else
    356			done = readl_relaxed(mc->reg_ch_base
    357					     + MLB_HDMAC_DMACSA);
    358		done -= sg_dma_address(sg);
    359
    360		txstate->residue = -done;
    361	}
    362
    363	if (!md) {
    364		vd = vchan_find_desc(vc, cookie);
    365		if (vd)
    366			md = to_milbeaut_hdmac_desc(vd);
    367	}
    368
    369	if (md) {
    370		/* residue from the queued chunks */
    371		for (i = md->sg_cur; i < md->sg_len; i++)
    372			txstate->residue += sg_dma_len(&md->sgl[i]);
    373	}
    374
    375	spin_unlock_irqrestore(&vc->lock, flags);
    376
    377	return stat;
    378}
    379
    380static void milbeaut_hdmac_issue_pending(struct dma_chan *chan)
    381{
    382	struct virt_dma_chan *vc = to_virt_chan(chan);
    383	struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
    384	unsigned long flags;
    385
    386	spin_lock_irqsave(&vc->lock, flags);
    387
    388	if (vchan_issue_pending(vc) && !mc->md)
    389		milbeaut_hdmac_start(mc);
    390
    391	spin_unlock_irqrestore(&vc->lock, flags);
    392}
    393
    394static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd)
    395{
    396	struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd);
    397
    398	kfree(md->sgl);
    399	kfree(md);
    400}
    401
    402static struct dma_chan *
    403milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma)
    404{
    405	struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data;
    406	struct milbeaut_hdmac_chan *mc;
    407	struct virt_dma_chan *vc;
    408	struct dma_chan *chan;
    409
    410	if (dma_spec->args_count != 1)
    411		return NULL;
    412
    413	chan = dma_get_any_slave_channel(&mdev->ddev);
    414	if (!chan)
    415		return NULL;
    416
    417	vc = to_virt_chan(chan);
    418	mc = to_milbeaut_hdmac_chan(vc);
    419	mc->slave_id = dma_spec->args[0];
    420
    421	return chan;
    422}
    423
    424static int milbeaut_hdmac_chan_init(struct platform_device *pdev,
    425				    struct milbeaut_hdmac_device *mdev,
    426				    int chan_id)
    427{
    428	struct device *dev = &pdev->dev;
    429	struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id];
    430	char *irq_name;
    431	int irq, ret;
    432
    433	irq = platform_get_irq(pdev, chan_id);
    434	if (irq < 0)
    435		return irq;
    436
    437	irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d",
    438				  chan_id);
    439	if (!irq_name)
    440		return -ENOMEM;
    441
    442	ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt,
    443			       IRQF_SHARED, irq_name, mc);
    444	if (ret)
    445		return ret;
    446
    447	mc->mdev = mdev;
    448	mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1);
    449	mc->vc.desc_free = milbeaut_hdmac_desc_free;
    450	vchan_init(&mc->vc, &mdev->ddev);
    451
    452	return 0;
    453}
    454
    455static int milbeaut_hdmac_probe(struct platform_device *pdev)
    456{
    457	struct device *dev = &pdev->dev;
    458	struct milbeaut_hdmac_device *mdev;
    459	struct dma_device *ddev;
    460	int nr_chans, ret, i;
    461
    462	nr_chans = platform_irq_count(pdev);
    463	if (nr_chans < 0)
    464		return nr_chans;
    465
    466	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
    467	if (ret)
    468		return ret;
    469
    470	mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
    471			    GFP_KERNEL);
    472	if (!mdev)
    473		return -ENOMEM;
    474
    475	mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
    476	if (IS_ERR(mdev->reg_base))
    477		return PTR_ERR(mdev->reg_base);
    478
    479	mdev->clk = devm_clk_get(dev, NULL);
    480	if (IS_ERR(mdev->clk)) {
    481		dev_err(dev, "failed to get clock\n");
    482		return PTR_ERR(mdev->clk);
    483	}
    484
    485	ret = clk_prepare_enable(mdev->clk);
    486	if (ret)
    487		return ret;
    488
    489	ddev = &mdev->ddev;
    490	ddev->dev = dev;
    491	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
    492	dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
    493	ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS;
    494	ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS;
    495	ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
    496	ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources;
    497	ddev->device_config = milbeaut_hdmac_chan_config;
    498	ddev->device_pause = milbeaut_hdmac_chan_pause;
    499	ddev->device_resume = milbeaut_hdmac_chan_resume;
    500	ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg;
    501	ddev->device_terminate_all = milbeaut_hdmac_terminate_all;
    502	ddev->device_synchronize = milbeaut_hdmac_synchronize;
    503	ddev->device_tx_status = milbeaut_hdmac_tx_status;
    504	ddev->device_issue_pending = milbeaut_hdmac_issue_pending;
    505	INIT_LIST_HEAD(&ddev->channels);
    506
    507	for (i = 0; i < nr_chans; i++) {
    508		ret = milbeaut_hdmac_chan_init(pdev, mdev, i);
    509		if (ret)
    510			goto disable_clk;
    511	}
    512
    513	ret = dma_async_device_register(ddev);
    514	if (ret)
    515		goto disable_clk;
    516
    517	ret = of_dma_controller_register(dev->of_node,
    518					 milbeaut_hdmac_xlate, mdev);
    519	if (ret)
    520		goto unregister_dmac;
    521
    522	platform_set_drvdata(pdev, mdev);
    523
    524	return 0;
    525
    526unregister_dmac:
    527	dma_async_device_unregister(ddev);
    528disable_clk:
    529	clk_disable_unprepare(mdev->clk);
    530
    531	return ret;
    532}
    533
    534static int milbeaut_hdmac_remove(struct platform_device *pdev)
    535{
    536	struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
    537	struct dma_chan *chan;
    538	int ret;
    539
    540	/*
    541	 * Before reaching here, almost all descriptors have been freed by the
    542	 * ->device_free_chan_resources() hook. However, each channel might
    543	 * be still holding one descriptor that was on-flight at that moment.
    544	 * Terminate it to make sure this hardware is no longer running. Then,
    545	 * free the channel resources once again to avoid memory leak.
    546	 */
    547	list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
    548		ret = dmaengine_terminate_sync(chan);
    549		if (ret)
    550			return ret;
    551		milbeaut_hdmac_free_chan_resources(chan);
    552	}
    553
    554	of_dma_controller_free(pdev->dev.of_node);
    555	dma_async_device_unregister(&mdev->ddev);
    556	clk_disable_unprepare(mdev->clk);
    557
    558	return 0;
    559}
    560
    561static const struct of_device_id milbeaut_hdmac_match[] = {
    562	{ .compatible = "socionext,milbeaut-m10v-hdmac" },
    563	{ /* sentinel */ }
    564};
    565MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
    566
    567static struct platform_driver milbeaut_hdmac_driver = {
    568	.probe = milbeaut_hdmac_probe,
    569	.remove = milbeaut_hdmac_remove,
    570	.driver = {
    571		.name = "milbeaut-m10v-hdmac",
    572		.of_match_table = milbeaut_hdmac_match,
    573	},
    574};
    575module_platform_driver(milbeaut_hdmac_driver);
    576
    577MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
    578MODULE_LICENSE("GPL v2");