cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mpc512x_lpbfifo.c (14318B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * The driver for Freescale MPC512x LocalPlus Bus FIFO
      4 * (called SCLPC in the Reference Manual).
      5 *
      6 * Copyright (C) 2013-2015 Alexander Popov <alex.popov@linux.com>.
      7 */
      8
      9#include <linux/interrupt.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/of.h>
     13#include <linux/of_platform.h>
     14#include <linux/of_address.h>
     15#include <linux/of_irq.h>
     16#include <asm/mpc5121.h>
     17#include <asm/io.h>
     18#include <linux/spinlock.h>
     19#include <linux/slab.h>
     20#include <linux/dmaengine.h>
     21#include <linux/dma-direction.h>
     22#include <linux/dma-mapping.h>
     23
     24#define DRV_NAME "mpc512x_lpbfifo"
     25
     26struct cs_range {
     27	u32 csnum;
     28	u32 base; /* must be zero */
     29	u32 addr;
     30	u32 size;
     31};
     32
     33static struct lpbfifo_data {
     34	spinlock_t lock; /* for protecting lpbfifo_data */
     35	phys_addr_t regs_phys;
     36	resource_size_t regs_size;
     37	struct mpc512x_lpbfifo __iomem *regs;
     38	int irq;
     39	struct cs_range *cs_ranges;
     40	size_t cs_n;
     41	struct dma_chan *chan;
     42	struct mpc512x_lpbfifo_request *req;
     43	dma_addr_t ram_bus_addr;
     44	bool wait_lpbfifo_irq;
     45	bool wait_lpbfifo_callback;
     46} lpbfifo;
     47
     48/*
     49 * A data transfer from RAM to some device on LPB is finished
     50 * when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback()
     51 * have been called. We execute the callback registered in
     52 * mpc512x_lpbfifo_request just after that.
     53 * But for a data transfer from some device on LPB to RAM we don't enable
     54 * LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag
     55 * automatically disables LPBFIFO reading request to the DMA controller
     56 * and the data transfer hangs. So the callback registered in
     57 * mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback().
     58 */
     59
     60/*
     61 * mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO
     62 */
     63static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
     64{
     65	struct device *dev = (struct device *)param;
     66	struct mpc512x_lpbfifo_request *req = NULL;
     67	unsigned long flags;
     68	u32 status;
     69
     70	spin_lock_irqsave(&lpbfifo.lock, flags);
     71
     72	if (!lpbfifo.regs)
     73		goto end;
     74
     75	req = lpbfifo.req;
     76	if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
     77		dev_err(dev, "bogus LPBFIFO IRQ\n");
     78		goto end;
     79	}
     80
     81	status = in_be32(&lpbfifo.regs->status);
     82	if (status != MPC512X_SCLPC_SUCCESS) {
     83		dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
     84		out_be32(&lpbfifo.regs->enable,
     85				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
     86		goto end;
     87	}
     88	/* Clear the interrupt flag */
     89	out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
     90
     91	lpbfifo.wait_lpbfifo_irq = false;
     92
     93	if (lpbfifo.wait_lpbfifo_callback)
     94		goto end;
     95
     96	/* Transfer is finished, set the FIFO as idle */
     97	lpbfifo.req = NULL;
     98
     99	spin_unlock_irqrestore(&lpbfifo.lock, flags);
    100
    101	if (req->callback)
    102		req->callback(req);
    103
    104	return IRQ_HANDLED;
    105
    106 end:
    107	spin_unlock_irqrestore(&lpbfifo.lock, flags);
    108	return IRQ_HANDLED;
    109}
    110
    111/*
    112 * mpc512x_lpbfifo_callback is called by DMA driver when
    113 * DMA transaction is finished.
    114 */
    115static void mpc512x_lpbfifo_callback(void *param)
    116{
    117	unsigned long flags;
    118	struct mpc512x_lpbfifo_request *req = NULL;
    119	enum dma_data_direction dir;
    120
    121	spin_lock_irqsave(&lpbfifo.lock, flags);
    122
    123	if (!lpbfifo.regs) {
    124		spin_unlock_irqrestore(&lpbfifo.lock, flags);
    125		return;
    126	}
    127
    128	req = lpbfifo.req;
    129	if (!req) {
    130		pr_err("bogus LPBFIFO callback\n");
    131		spin_unlock_irqrestore(&lpbfifo.lock, flags);
    132		return;
    133	}
    134
    135	/* Release the mapping */
    136	if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
    137		dir = DMA_TO_DEVICE;
    138	else
    139		dir = DMA_FROM_DEVICE;
    140	dma_unmap_single(lpbfifo.chan->device->dev,
    141			lpbfifo.ram_bus_addr, req->size, dir);
    142
    143	lpbfifo.wait_lpbfifo_callback = false;
    144
    145	if (!lpbfifo.wait_lpbfifo_irq) {
    146		/* Transfer is finished, set the FIFO as idle */
    147		lpbfifo.req = NULL;
    148
    149		spin_unlock_irqrestore(&lpbfifo.lock, flags);
    150
    151		if (req->callback)
    152			req->callback(req);
    153	} else {
    154		spin_unlock_irqrestore(&lpbfifo.lock, flags);
    155	}
    156}
    157
    158static int mpc512x_lpbfifo_kick(void)
    159{
    160	u32 bits;
    161	bool no_incr = false;
    162	u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */
    163	u32 cs = 0;
    164	size_t i;
    165	struct dma_device *dma_dev = NULL;
    166	struct scatterlist sg;
    167	enum dma_data_direction dir;
    168	struct dma_slave_config dma_conf = {};
    169	struct dma_async_tx_descriptor *dma_tx = NULL;
    170	dma_cookie_t cookie;
    171	int ret;
    172
    173	/*
    174	 * 1. Fit the requirements:
    175	 * - the packet size must be a multiple of 4 since FIFO Data Word
    176	 *    Register allows only full-word access according the Reference
    177	 *    Manual;
    178	 * - the physical address of the device on LPB and the packet size
    179	 *    must be aligned on BPT (bytes per transaction) or 8-bytes
    180	 *    boundary according the Reference Manual;
    181	 * - but we choose DMA maxburst equal (or very close to) BPT to prevent
    182	 *    DMA controller from overtaking FIFO and causing FIFO underflow
    183	 *    error. So we force the packet size to be aligned on BPT boundary
    184	 *    not to confuse DMA driver which requires the packet size to be
    185	 *    aligned on maxburst boundary;
    186	 * - BPT should be set to the LPB device port size for operation with
    187	 *    disabled auto-incrementing according Reference Manual.
    188	 */
    189	if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
    190		return -EINVAL;
    191
    192	if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
    193		bpt = lpbfifo.req->portsize;
    194		no_incr = true;
    195	}
    196
    197	while (bpt > 1) {
    198		if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
    199					IS_ALIGNED(lpbfifo.req->size, bpt)) {
    200			break;
    201		}
    202
    203		if (no_incr)
    204			return -EINVAL;
    205
    206		bpt >>= 1;
    207	}
    208	dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
    209	dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
    210
    211	for (i = 0; i < lpbfifo.cs_n; i++) {
    212		phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
    213		phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
    214		phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
    215		phys_addr_t access_end = access_start + lpbfifo.req->size;
    216
    217		if (access_start >= cs_start && access_end <= cs_end) {
    218			cs = lpbfifo.cs_ranges[i].csnum;
    219			break;
    220		}
    221	}
    222	if (i == lpbfifo.cs_n)
    223		return -EFAULT;
    224
    225	/* 2. Prepare DMA */
    226	dma_dev = lpbfifo.chan->device;
    227
    228	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
    229		dir = DMA_TO_DEVICE;
    230		dma_conf.direction = DMA_MEM_TO_DEV;
    231		dma_conf.dst_addr = lpbfifo.regs_phys +
    232				offsetof(struct mpc512x_lpbfifo, data_word);
    233	} else {
    234		dir = DMA_FROM_DEVICE;
    235		dma_conf.direction = DMA_DEV_TO_MEM;
    236		dma_conf.src_addr = lpbfifo.regs_phys +
    237				offsetof(struct mpc512x_lpbfifo, data_word);
    238	}
    239	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    240	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    241
    242	/* Make DMA channel work with LPB FIFO data register */
    243	if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
    244		ret = -EINVAL;
    245		goto err_dma_prep;
    246	}
    247
    248	sg_init_table(&sg, 1);
    249
    250	sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
    251			lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
    252	if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
    253		return -EFAULT;
    254
    255	lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */
    256
    257	sg_dma_len(&sg) = lpbfifo.req->size;
    258
    259	dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
    260						1, dma_conf.direction, 0);
    261	if (!dma_tx) {
    262		ret = -ENOSPC;
    263		goto err_dma_prep;
    264	}
    265	dma_tx->callback = mpc512x_lpbfifo_callback;
    266	dma_tx->callback_param = NULL;
    267
    268	/* 3. Prepare FIFO */
    269	out_be32(&lpbfifo.regs->enable,
    270				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
    271	out_be32(&lpbfifo.regs->enable, 0x0);
    272
    273	/*
    274	 * Configure the watermarks for write operation (RAM->DMA->FIFO->dev):
    275	 * - high watermark 7 words according the Reference Manual,
    276	 * - low watermark 512 bytes (half of the FIFO).
    277	 * These watermarks don't work for read operation since the
    278	 * MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual).
    279	 */
    280	out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
    281	out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
    282
    283	/*
    284	 * Start address is a physical address of the region which belongs
    285	 * to the device on the LocalPlus Bus
    286	 */
    287	out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
    288
    289	/*
    290	 * Configure chip select, transfer direction, address increment option
    291	 * and bytes per transaction option
    292	 */
    293	bits = MPC512X_SCLPC_CS(cs);
    294	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
    295		bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
    296	if (no_incr)
    297		bits |= MPC512X_SCLPC_DAI;
    298	bits |= MPC512X_SCLPC_BPT(bpt);
    299	out_be32(&lpbfifo.regs->ctrl, bits);
    300
    301	/* Unmask irqs */
    302	bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
    303	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
    304		bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
    305	else
    306		lpbfifo.wait_lpbfifo_irq = false;
    307
    308	out_be32(&lpbfifo.regs->enable, bits);
    309
    310	/* 4. Set packet size and kick FIFO off */
    311	bits = lpbfifo.req->size | MPC512X_SCLPC_START;
    312	out_be32(&lpbfifo.regs->pkt_size, bits);
    313
    314	/* 5. Finally kick DMA off */
    315	cookie = dma_tx->tx_submit(dma_tx);
    316	if (dma_submit_error(cookie)) {
    317		ret = -ENOSPC;
    318		goto err_dma_submit;
    319	}
    320
    321	return 0;
    322
    323 err_dma_submit:
    324	out_be32(&lpbfifo.regs->enable,
    325				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
    326 err_dma_prep:
    327	dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
    328						lpbfifo.req->size, dir);
    329	return ret;
    330}
    331
    332static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
    333{
    334	int ret = 0;
    335
    336	if (!lpbfifo.regs)
    337		return -ENODEV;
    338
    339	/* Check whether a transfer is in progress */
    340	if (lpbfifo.req)
    341		return -EBUSY;
    342
    343	lpbfifo.wait_lpbfifo_irq = true;
    344	lpbfifo.wait_lpbfifo_callback = true;
    345	lpbfifo.req = req;
    346
    347	ret = mpc512x_lpbfifo_kick();
    348	if (ret != 0)
    349		lpbfifo.req = NULL; /* Set the FIFO as idle */
    350
    351	return ret;
    352}
    353
    354int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
    355{
    356	unsigned long flags;
    357	int ret = 0;
    358
    359	spin_lock_irqsave(&lpbfifo.lock, flags);
    360	ret = mpc512x_lpbfifo_submit_locked(req);
    361	spin_unlock_irqrestore(&lpbfifo.lock, flags);
    362
    363	return ret;
    364}
    365EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
    366
    367/*
    368 * LPBFIFO driver uses "ranges" property of "localbus" device tree node
    369 * for being able to determine the chip select number of a client device
    370 * ordering a DMA transfer.
    371 */
    372static int get_cs_ranges(struct device *dev)
    373{
    374	int ret = -ENODEV;
    375	struct device_node *lb_node;
    376	const u32 *addr_cells_p;
    377	const u32 *size_cells_p;
    378	int proplen;
    379	size_t i;
    380
    381	lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
    382	if (!lb_node)
    383		return ret;
    384
    385	/*
    386	 * The node defined as compatible with 'fsl,mpc5121-localbus'
    387	 * should have two address cells and one size cell.
    388	 * Every item of its ranges property should consist of:
    389	 * - the first address cell which is the chipselect number;
    390	 * - the second address cell which is the offset in the chipselect,
    391	 *    must be zero.
    392	 * - CPU address of the beginning of an access window;
    393	 * - the only size cell which is the size of an access window.
    394	 */
    395	addr_cells_p = of_get_property(lb_node, "#address-cells", NULL);
    396	size_cells_p = of_get_property(lb_node, "#size-cells", NULL);
    397	if (addr_cells_p == NULL || *addr_cells_p != 2 ||
    398				size_cells_p == NULL ||	*size_cells_p != 1) {
    399		goto end;
    400	}
    401
    402	proplen = of_property_count_u32_elems(lb_node, "ranges");
    403	if (proplen <= 0 || proplen % 4 != 0)
    404		goto end;
    405
    406	lpbfifo.cs_n = proplen / 4;
    407	lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
    408					sizeof(struct cs_range), GFP_KERNEL);
    409	if (!lpbfifo.cs_ranges)
    410		goto end;
    411
    412	if (of_property_read_u32_array(lb_node, "ranges",
    413				(u32 *)lpbfifo.cs_ranges, proplen) != 0) {
    414		goto end;
    415	}
    416
    417	for (i = 0; i < lpbfifo.cs_n; i++) {
    418		if (lpbfifo.cs_ranges[i].base != 0)
    419			goto end;
    420	}
    421
    422	ret = 0;
    423
    424 end:
    425	of_node_put(lb_node);
    426	return ret;
    427}
    428
    429static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
    430{
    431	struct resource r;
    432	int ret = 0;
    433
    434	memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
    435	spin_lock_init(&lpbfifo.lock);
    436
    437	lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
    438	if (IS_ERR(lpbfifo.chan))
    439		return PTR_ERR(lpbfifo.chan);
    440
    441	if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
    442		dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
    443		ret = -ENODEV;
    444		goto err0;
    445	}
    446
    447	lpbfifo.regs_phys = r.start;
    448	lpbfifo.regs_size = resource_size(&r);
    449
    450	if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
    451					lpbfifo.regs_size, DRV_NAME)) {
    452		dev_err(&pdev->dev, "unable to request region\n");
    453		ret = -EBUSY;
    454		goto err0;
    455	}
    456
    457	lpbfifo.regs = devm_ioremap(&pdev->dev,
    458					lpbfifo.regs_phys, lpbfifo.regs_size);
    459	if (!lpbfifo.regs) {
    460		dev_err(&pdev->dev, "mapping registers failed\n");
    461		ret = -ENOMEM;
    462		goto err0;
    463	}
    464
    465	out_be32(&lpbfifo.regs->enable,
    466				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
    467
    468	if (get_cs_ranges(&pdev->dev) != 0) {
    469		dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
    470		ret = -ENODEV;
    471		goto err0;
    472	}
    473
    474	lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
    475	if (!lpbfifo.irq) {
    476		dev_err(&pdev->dev, "mapping irq failed\n");
    477		ret = -ENODEV;
    478		goto err0;
    479	}
    480
    481	if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
    482						DRV_NAME, &pdev->dev) != 0) {
    483		dev_err(&pdev->dev, "requesting irq failed\n");
    484		ret = -ENODEV;
    485		goto err1;
    486	}
    487
    488	dev_info(&pdev->dev, "probe succeeded\n");
    489	return 0;
    490
    491 err1:
    492	irq_dispose_mapping(lpbfifo.irq);
    493 err0:
    494	dma_release_channel(lpbfifo.chan);
    495	return ret;
    496}
    497
    498static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
    499{
    500	unsigned long flags;
    501	struct dma_device *dma_dev = lpbfifo.chan->device;
    502	struct mpc512x_lpbfifo __iomem *regs = NULL;
    503
    504	spin_lock_irqsave(&lpbfifo.lock, flags);
    505	regs = lpbfifo.regs;
    506	lpbfifo.regs = NULL;
    507	spin_unlock_irqrestore(&lpbfifo.lock, flags);
    508
    509	dma_dev->device_terminate_all(lpbfifo.chan);
    510	out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
    511
    512	free_irq(lpbfifo.irq, &pdev->dev);
    513	irq_dispose_mapping(lpbfifo.irq);
    514	dma_release_channel(lpbfifo.chan);
    515
    516	return 0;
    517}
    518
    519static const struct of_device_id mpc512x_lpbfifo_match[] = {
    520	{ .compatible = "fsl,mpc512x-lpbfifo", },
    521	{},
    522};
    523MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
    524
    525static struct platform_driver mpc512x_lpbfifo_driver = {
    526	.probe = mpc512x_lpbfifo_probe,
    527	.remove = mpc512x_lpbfifo_remove,
    528	.driver = {
    529		.name = DRV_NAME,
    530		.of_match_table = mpc512x_lpbfifo_match,
    531	},
    532};
    533
    534module_platform_driver(mpc512x_lpbfifo_driver);
    535
    536MODULE_AUTHOR("Alexander Popov <alex.popov@linux.com>");
    537MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
    538MODULE_LICENSE("GPL v2");