cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uio_dmem_genirq.c (9371B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * drivers/uio/uio_dmem_genirq.c
      4 *
      5 * Userspace I/O platform driver with generic IRQ handling code.
      6 *
      7 * Copyright (C) 2012 Damian Hobson-Garcia
      8 *
      9 * Based on uio_pdrv_genirq.c by Magnus Damm
     10 */
     11
     12#include <linux/platform_device.h>
     13#include <linux/uio_driver.h>
     14#include <linux/spinlock.h>
     15#include <linux/bitops.h>
     16#include <linux/module.h>
     17#include <linux/interrupt.h>
     18#include <linux/platform_data/uio_dmem_genirq.h>
     19#include <linux/stringify.h>
     20#include <linux/pm_runtime.h>
     21#include <linux/dma-mapping.h>
     22#include <linux/slab.h>
     23#include <linux/irq.h>
     24
     25#include <linux/of.h>
     26#include <linux/of_platform.h>
     27#include <linux/of_address.h>
     28
     29#define DRIVER_NAME "uio_dmem_genirq"
     30#define DMEM_MAP_ERROR (~0)
     31
     32struct uio_dmem_genirq_platdata {
     33	struct uio_info *uioinfo;
     34	spinlock_t lock;
     35	unsigned long flags;
     36	struct platform_device *pdev;
     37	unsigned int dmem_region_start;
     38	unsigned int num_dmem_regions;
     39	void *dmem_region_vaddr[MAX_UIO_MAPS];
     40	struct mutex alloc_lock;
     41	unsigned int refcnt;
     42};
     43
     44static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
     45{
     46	struct uio_dmem_genirq_platdata *priv = info->priv;
     47	struct uio_mem *uiomem;
     48	int dmem_region = priv->dmem_region_start;
     49
     50	uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
     51
     52	mutex_lock(&priv->alloc_lock);
     53	while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
     54		void *addr;
     55		if (!uiomem->size)
     56			break;
     57
     58		addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
     59				(dma_addr_t *)&uiomem->addr, GFP_KERNEL);
     60		if (!addr) {
     61			uiomem->addr = DMEM_MAP_ERROR;
     62		}
     63		priv->dmem_region_vaddr[dmem_region++] = addr;
     64		++uiomem;
     65	}
     66	priv->refcnt++;
     67
     68	mutex_unlock(&priv->alloc_lock);
     69	/* Wait until the Runtime PM code has woken up the device */
     70	pm_runtime_get_sync(&priv->pdev->dev);
     71	return 0;
     72}
     73
     74static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
     75{
     76	struct uio_dmem_genirq_platdata *priv = info->priv;
     77	struct uio_mem *uiomem;
     78	int dmem_region = priv->dmem_region_start;
     79
     80	/* Tell the Runtime PM code that the device has become idle */
     81	pm_runtime_put_sync(&priv->pdev->dev);
     82
     83	uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
     84
     85	mutex_lock(&priv->alloc_lock);
     86
     87	priv->refcnt--;
     88	while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
     89		if (!uiomem->size)
     90			break;
     91		if (priv->dmem_region_vaddr[dmem_region]) {
     92			dma_free_coherent(&priv->pdev->dev, uiomem->size,
     93					priv->dmem_region_vaddr[dmem_region],
     94					uiomem->addr);
     95		}
     96		uiomem->addr = DMEM_MAP_ERROR;
     97		++dmem_region;
     98		++uiomem;
     99	}
    100
    101	mutex_unlock(&priv->alloc_lock);
    102	return 0;
    103}
    104
    105static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
    106{
    107	struct uio_dmem_genirq_platdata *priv = dev_info->priv;
    108
    109	/* Just disable the interrupt in the interrupt controller, and
    110	 * remember the state so we can allow user space to enable it later.
    111	 */
    112
    113	if (!test_and_set_bit(0, &priv->flags))
    114		disable_irq_nosync(irq);
    115
    116	return IRQ_HANDLED;
    117}
    118
    119static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
    120{
    121	struct uio_dmem_genirq_platdata *priv = dev_info->priv;
    122	unsigned long flags;
    123
    124	/* Allow user space to enable and disable the interrupt
    125	 * in the interrupt controller, but keep track of the
    126	 * state to prevent per-irq depth damage.
    127	 *
    128	 * Serialize this operation to support multiple tasks.
    129	 */
    130
    131	spin_lock_irqsave(&priv->lock, flags);
    132	if (irq_on) {
    133		if (test_and_clear_bit(0, &priv->flags))
    134			enable_irq(dev_info->irq);
    135		spin_unlock_irqrestore(&priv->lock, flags);
    136	} else {
    137		if (!test_and_set_bit(0, &priv->flags)) {
    138			spin_unlock_irqrestore(&priv->lock, flags);
    139			disable_irq(dev_info->irq);
    140		}
    141	}
    142
    143	return 0;
    144}
    145
    146static void uio_dmem_genirq_pm_disable(void *data)
    147{
    148	struct device *dev = data;
    149
    150	pm_runtime_disable(dev);
    151}
    152
    153static int uio_dmem_genirq_probe(struct platform_device *pdev)
    154{
    155	struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
    156	struct uio_info *uioinfo = &pdata->uioinfo;
    157	struct uio_dmem_genirq_platdata *priv;
    158	struct uio_mem *uiomem;
    159	int ret = -EINVAL;
    160	int i;
    161
    162	if (pdev->dev.of_node) {
    163		/* alloc uioinfo for one device */
    164		uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), GFP_KERNEL);
    165		if (!uioinfo) {
    166			dev_err(&pdev->dev, "unable to kmalloc\n");
    167			return -ENOMEM;
    168		}
    169		uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
    170					       pdev->dev.of_node);
    171		uioinfo->version = "devicetree";
    172	}
    173
    174	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
    175		dev_err(&pdev->dev, "missing platform_data\n");
    176		return -EINVAL;
    177	}
    178
    179	if (uioinfo->handler || uioinfo->irqcontrol ||
    180	    uioinfo->irq_flags & IRQF_SHARED) {
    181		dev_err(&pdev->dev, "interrupt configuration error\n");
    182		return -EINVAL;
    183	}
    184
    185	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
    186	if (!priv) {
    187		dev_err(&pdev->dev, "unable to kmalloc\n");
    188		return -ENOMEM;
    189	}
    190
    191	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
    192	if (ret) {
    193		dev_err(&pdev->dev, "DMA enable failed\n");
    194		return ret;
    195	}
    196
    197	priv->uioinfo = uioinfo;
    198	spin_lock_init(&priv->lock);
    199	priv->flags = 0; /* interrupt is enabled to begin with */
    200	priv->pdev = pdev;
    201	mutex_init(&priv->alloc_lock);
    202
    203	if (!uioinfo->irq) {
    204		/* Multiple IRQs are not supported */
    205		ret = platform_get_irq(pdev, 0);
    206		if (ret == -ENXIO && pdev->dev.of_node)
    207			ret = UIO_IRQ_NONE;
    208		else if (ret < 0)
    209			return ret;
    210		uioinfo->irq = ret;
    211	}
    212
    213	if (uioinfo->irq) {
    214		struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
    215
    216		/*
    217		 * If a level interrupt, dont do lazy disable. Otherwise the
    218		 * irq will fire again since clearing of the actual cause, on
    219		 * device level, is done in userspace
    220		 * irqd_is_level_type() isn't used since isn't valid until
    221		 * irq is configured.
    222		 */
    223		if (irq_data &&
    224		    irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
    225			dev_dbg(&pdev->dev, "disable lazy unmask\n");
    226			irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
    227		}
    228	}
    229
    230	uiomem = &uioinfo->mem[0];
    231
    232	for (i = 0; i < pdev->num_resources; ++i) {
    233		struct resource *r = &pdev->resource[i];
    234
    235		if (r->flags != IORESOURCE_MEM)
    236			continue;
    237
    238		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
    239			dev_warn(&pdev->dev, "device has more than "
    240					__stringify(MAX_UIO_MAPS)
    241					" I/O memory resources.\n");
    242			break;
    243		}
    244
    245		uiomem->memtype = UIO_MEM_PHYS;
    246		uiomem->addr = r->start;
    247		uiomem->size = resource_size(r);
    248		++uiomem;
    249	}
    250
    251	priv->dmem_region_start = uiomem - &uioinfo->mem[0];
    252	priv->num_dmem_regions = pdata->num_dynamic_regions;
    253
    254	for (i = 0; i < pdata->num_dynamic_regions; ++i) {
    255		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
    256			dev_warn(&pdev->dev, "device has more than "
    257					__stringify(MAX_UIO_MAPS)
    258					" dynamic and fixed memory regions.\n");
    259			break;
    260		}
    261		uiomem->memtype = UIO_MEM_PHYS;
    262		uiomem->addr = DMEM_MAP_ERROR;
    263		uiomem->size = pdata->dynamic_region_sizes[i];
    264		++uiomem;
    265	}
    266
    267	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
    268		uiomem->size = 0;
    269		++uiomem;
    270	}
    271
    272	/* This driver requires no hardware specific kernel code to handle
    273	 * interrupts. Instead, the interrupt handler simply disables the
    274	 * interrupt in the interrupt controller. User space is responsible
    275	 * for performing hardware specific acknowledge and re-enabling of
    276	 * the interrupt in the interrupt controller.
    277	 *
    278	 * Interrupt sharing is not supported.
    279	 */
    280
    281	uioinfo->handler = uio_dmem_genirq_handler;
    282	uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
    283	uioinfo->open = uio_dmem_genirq_open;
    284	uioinfo->release = uio_dmem_genirq_release;
    285	uioinfo->priv = priv;
    286
    287	/* Enable Runtime PM for this device:
    288	 * The device starts in suspended state to allow the hardware to be
    289	 * turned off by default. The Runtime PM bus code should power on the
    290	 * hardware and enable clocks at open().
    291	 */
    292	pm_runtime_enable(&pdev->dev);
    293
    294	ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev);
    295	if (ret)
    296		return ret;
    297
    298	return devm_uio_register_device(&pdev->dev, priv->uioinfo);
    299}
    300
    301static int uio_dmem_genirq_runtime_nop(struct device *dev)
    302{
    303	/* Runtime PM callback shared between ->runtime_suspend()
    304	 * and ->runtime_resume(). Simply returns success.
    305	 *
    306	 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
    307	 * are used at open() and release() time. This allows the
    308	 * Runtime PM code to turn off power to the device while the
    309	 * device is unused, ie before open() and after release().
    310	 *
    311	 * This Runtime PM callback does not need to save or restore
    312	 * any registers since user space is responsbile for hardware
    313	 * register reinitialization after open().
    314	 */
    315	return 0;
    316}
    317
    318static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
    319	.runtime_suspend = uio_dmem_genirq_runtime_nop,
    320	.runtime_resume = uio_dmem_genirq_runtime_nop,
    321};
    322
    323#ifdef CONFIG_OF
    324static const struct of_device_id uio_of_genirq_match[] = {
    325	{ /* empty for now */ },
    326};
    327MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
    328#endif
    329
    330static struct platform_driver uio_dmem_genirq = {
    331	.probe = uio_dmem_genirq_probe,
    332	.driver = {
    333		.name = DRIVER_NAME,
    334		.pm = &uio_dmem_genirq_dev_pm_ops,
    335		.of_match_table = of_match_ptr(uio_of_genirq_match),
    336	},
    337};
    338
    339module_platform_driver(uio_dmem_genirq);
    340
    341MODULE_AUTHOR("Damian Hobson-Garcia");
    342MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
    343MODULE_LICENSE("GPL v2");
    344MODULE_ALIAS("platform:" DRIVER_NAME);