cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rpmsg_char.c (12216B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2022, STMicroelectronics
      4 * Copyright (c) 2016, Linaro Ltd.
      5 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
      6 * Copyright (c) 2012, PetaLogix
      7 * Copyright (c) 2011, Texas Instruments, Inc.
      8 * Copyright (c) 2011, Google, Inc.
      9 *
     10 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
     11 * was based on TI & Google OMX rpmsg driver.
     12 */
     13
     14#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
     15
     16#include <linux/cdev.h>
     17#include <linux/device.h>
     18#include <linux/fs.h>
     19#include <linux/idr.h>
     20#include <linux/kernel.h>
     21#include <linux/module.h>
     22#include <linux/poll.h>
     23#include <linux/rpmsg.h>
     24#include <linux/skbuff.h>
     25#include <linux/slab.h>
     26#include <linux/uaccess.h>
     27#include <uapi/linux/rpmsg.h>
     28
     29#include "rpmsg_char.h"
     30#include "rpmsg_internal.h"
     31
     32#define RPMSG_DEV_MAX	(MINORMASK + 1)
     33
     34static dev_t rpmsg_major;
     35
     36static DEFINE_IDA(rpmsg_ept_ida);
     37static DEFINE_IDA(rpmsg_minor_ida);
     38
     39#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
     40#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
     41
     42/**
     43 * struct rpmsg_eptdev - endpoint device context
     44 * @dev:	endpoint device
     45 * @cdev:	cdev for the endpoint device
     46 * @rpdev:	underlaying rpmsg device
     47 * @chinfo:	info used to open the endpoint
     48 * @ept_lock:	synchronization of @ept modifications
     49 * @ept:	rpmsg endpoint reference, when open
     50 * @queue_lock:	synchronization of @queue operations
     51 * @queue:	incoming message queue
     52 * @readq:	wait object for incoming queue
     53 * @default_ept: set to channel default endpoint if the default endpoint should be re-used
     54 *              on device open to prevent endpoint address update.
     55 */
     56struct rpmsg_eptdev {
     57	struct device dev;
     58	struct cdev cdev;
     59
     60	struct rpmsg_device *rpdev;
     61	struct rpmsg_channel_info chinfo;
     62
     63	struct mutex ept_lock;
     64	struct rpmsg_endpoint *ept;
     65	struct rpmsg_endpoint *default_ept;
     66
     67	spinlock_t queue_lock;
     68	struct sk_buff_head queue;
     69	wait_queue_head_t readq;
     70
     71};
     72
     73int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
     74{
     75	struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
     76
     77	mutex_lock(&eptdev->ept_lock);
     78	if (eptdev->ept) {
     79		rpmsg_destroy_ept(eptdev->ept);
     80		eptdev->ept = NULL;
     81	}
     82	mutex_unlock(&eptdev->ept_lock);
     83
     84	/* wake up any blocked readers */
     85	wake_up_interruptible(&eptdev->readq);
     86
     87	cdev_device_del(&eptdev->cdev, &eptdev->dev);
     88	put_device(&eptdev->dev);
     89
     90	return 0;
     91}
     92EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
     93
     94static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
     95			void *priv, u32 addr)
     96{
     97	struct rpmsg_eptdev *eptdev = priv;
     98	struct sk_buff *skb;
     99
    100	skb = alloc_skb(len, GFP_ATOMIC);
    101	if (!skb)
    102		return -ENOMEM;
    103
    104	skb_put_data(skb, buf, len);
    105
    106	spin_lock(&eptdev->queue_lock);
    107	skb_queue_tail(&eptdev->queue, skb);
    108	spin_unlock(&eptdev->queue_lock);
    109
    110	/* wake up any blocking processes, waiting for new data */
    111	wake_up_interruptible(&eptdev->readq);
    112
    113	return 0;
    114}
    115
    116static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
    117{
    118	struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
    119	struct rpmsg_endpoint *ept;
    120	struct rpmsg_device *rpdev = eptdev->rpdev;
    121	struct device *dev = &eptdev->dev;
    122
    123	if (eptdev->ept)
    124		return -EBUSY;
    125
    126	get_device(dev);
    127
    128	/*
    129	 * If the default_ept is set, the rpmsg device default endpoint is used.
    130	 * Else a new endpoint is created on open that will be destroyed on release.
    131	 */
    132	if (eptdev->default_ept)
    133		ept = eptdev->default_ept;
    134	else
    135		ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
    136
    137	if (!ept) {
    138		dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
    139		put_device(dev);
    140		return -EINVAL;
    141	}
    142
    143	eptdev->ept = ept;
    144	filp->private_data = eptdev;
    145
    146	return 0;
    147}
    148
    149static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
    150{
    151	struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
    152	struct device *dev = &eptdev->dev;
    153
    154	/* Close the endpoint, if it's not already destroyed by the parent */
    155	mutex_lock(&eptdev->ept_lock);
    156	if (eptdev->ept) {
    157		if (!eptdev->default_ept)
    158			rpmsg_destroy_ept(eptdev->ept);
    159		eptdev->ept = NULL;
    160	}
    161	mutex_unlock(&eptdev->ept_lock);
    162
    163	/* Discard all SKBs */
    164	skb_queue_purge(&eptdev->queue);
    165
    166	put_device(dev);
    167
    168	return 0;
    169}
    170
    171static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
    172{
    173	struct file *filp = iocb->ki_filp;
    174	struct rpmsg_eptdev *eptdev = filp->private_data;
    175	unsigned long flags;
    176	struct sk_buff *skb;
    177	int use;
    178
    179	if (!eptdev->ept)
    180		return -EPIPE;
    181
    182	spin_lock_irqsave(&eptdev->queue_lock, flags);
    183
    184	/* Wait for data in the queue */
    185	if (skb_queue_empty(&eptdev->queue)) {
    186		spin_unlock_irqrestore(&eptdev->queue_lock, flags);
    187
    188		if (filp->f_flags & O_NONBLOCK)
    189			return -EAGAIN;
    190
    191		/* Wait until we get data or the endpoint goes away */
    192		if (wait_event_interruptible(eptdev->readq,
    193					     !skb_queue_empty(&eptdev->queue) ||
    194					     !eptdev->ept))
    195			return -ERESTARTSYS;
    196
    197		/* We lost the endpoint while waiting */
    198		if (!eptdev->ept)
    199			return -EPIPE;
    200
    201		spin_lock_irqsave(&eptdev->queue_lock, flags);
    202	}
    203
    204	skb = skb_dequeue(&eptdev->queue);
    205	spin_unlock_irqrestore(&eptdev->queue_lock, flags);
    206	if (!skb)
    207		return -EFAULT;
    208
    209	use = min_t(size_t, iov_iter_count(to), skb->len);
    210	if (copy_to_iter(skb->data, use, to) != use)
    211		use = -EFAULT;
    212
    213	kfree_skb(skb);
    214
    215	return use;
    216}
    217
    218static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
    219				       struct iov_iter *from)
    220{
    221	struct file *filp = iocb->ki_filp;
    222	struct rpmsg_eptdev *eptdev = filp->private_data;
    223	size_t len = iov_iter_count(from);
    224	void *kbuf;
    225	int ret;
    226
    227	kbuf = kzalloc(len, GFP_KERNEL);
    228	if (!kbuf)
    229		return -ENOMEM;
    230
    231	if (!copy_from_iter_full(kbuf, len, from)) {
    232		ret = -EFAULT;
    233		goto free_kbuf;
    234	}
    235
    236	if (mutex_lock_interruptible(&eptdev->ept_lock)) {
    237		ret = -ERESTARTSYS;
    238		goto free_kbuf;
    239	}
    240
    241	if (!eptdev->ept) {
    242		ret = -EPIPE;
    243		goto unlock_eptdev;
    244	}
    245
    246	if (filp->f_flags & O_NONBLOCK) {
    247		ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
    248		if (ret == -ENOMEM)
    249			ret = -EAGAIN;
    250	} else {
    251		ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
    252	}
    253
    254unlock_eptdev:
    255	mutex_unlock(&eptdev->ept_lock);
    256
    257free_kbuf:
    258	kfree(kbuf);
    259	return ret < 0 ? ret : len;
    260}
    261
    262static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
    263{
    264	struct rpmsg_eptdev *eptdev = filp->private_data;
    265	__poll_t mask = 0;
    266
    267	if (!eptdev->ept)
    268		return EPOLLERR;
    269
    270	poll_wait(filp, &eptdev->readq, wait);
    271
    272	if (!skb_queue_empty(&eptdev->queue))
    273		mask |= EPOLLIN | EPOLLRDNORM;
    274
    275	mask |= rpmsg_poll(eptdev->ept, filp, wait);
    276
    277	return mask;
    278}
    279
    280static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
    281			       unsigned long arg)
    282{
    283	struct rpmsg_eptdev *eptdev = fp->private_data;
    284
    285	if (cmd != RPMSG_DESTROY_EPT_IOCTL)
    286		return -EINVAL;
    287
    288	/* Don't allow to destroy a default endpoint. */
    289	if (eptdev->default_ept)
    290		return -EINVAL;
    291
    292	return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
    293}
    294
    295static const struct file_operations rpmsg_eptdev_fops = {
    296	.owner = THIS_MODULE,
    297	.open = rpmsg_eptdev_open,
    298	.release = rpmsg_eptdev_release,
    299	.read_iter = rpmsg_eptdev_read_iter,
    300	.write_iter = rpmsg_eptdev_write_iter,
    301	.poll = rpmsg_eptdev_poll,
    302	.unlocked_ioctl = rpmsg_eptdev_ioctl,
    303	.compat_ioctl = compat_ptr_ioctl,
    304};
    305
    306static ssize_t name_show(struct device *dev, struct device_attribute *attr,
    307			 char *buf)
    308{
    309	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
    310
    311	return sprintf(buf, "%s\n", eptdev->chinfo.name);
    312}
    313static DEVICE_ATTR_RO(name);
    314
    315static ssize_t src_show(struct device *dev, struct device_attribute *attr,
    316			 char *buf)
    317{
    318	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
    319
    320	return sprintf(buf, "%d\n", eptdev->chinfo.src);
    321}
    322static DEVICE_ATTR_RO(src);
    323
    324static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
    325			 char *buf)
    326{
    327	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
    328
    329	return sprintf(buf, "%d\n", eptdev->chinfo.dst);
    330}
    331static DEVICE_ATTR_RO(dst);
    332
    333static struct attribute *rpmsg_eptdev_attrs[] = {
    334	&dev_attr_name.attr,
    335	&dev_attr_src.attr,
    336	&dev_attr_dst.attr,
    337	NULL
    338};
    339ATTRIBUTE_GROUPS(rpmsg_eptdev);
    340
    341static void rpmsg_eptdev_release_device(struct device *dev)
    342{
    343	struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
    344
    345	ida_simple_remove(&rpmsg_ept_ida, dev->id);
    346	ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
    347	kfree(eptdev);
    348}
    349
    350static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
    351						      struct device *parent)
    352{
    353	struct rpmsg_eptdev *eptdev;
    354	struct device *dev;
    355
    356	eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
    357	if (!eptdev)
    358		return ERR_PTR(-ENOMEM);
    359
    360	dev = &eptdev->dev;
    361	eptdev->rpdev = rpdev;
    362
    363	mutex_init(&eptdev->ept_lock);
    364	spin_lock_init(&eptdev->queue_lock);
    365	skb_queue_head_init(&eptdev->queue);
    366	init_waitqueue_head(&eptdev->readq);
    367
    368	device_initialize(dev);
    369	dev->class = rpmsg_class;
    370	dev->parent = parent;
    371	dev->groups = rpmsg_eptdev_groups;
    372	dev_set_drvdata(dev, eptdev);
    373
    374	cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
    375	eptdev->cdev.owner = THIS_MODULE;
    376
    377	return eptdev;
    378}
    379
    380static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
    381{
    382	struct device *dev = &eptdev->dev;
    383	int ret;
    384
    385	eptdev->chinfo = chinfo;
    386
    387	ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
    388	if (ret < 0)
    389		goto free_eptdev;
    390	dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
    391
    392	ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
    393	if (ret < 0)
    394		goto free_minor_ida;
    395	dev->id = ret;
    396	dev_set_name(dev, "rpmsg%d", ret);
    397
    398	ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
    399	if (ret)
    400		goto free_ept_ida;
    401
    402	/* We can now rely on the release function for cleanup */
    403	dev->release = rpmsg_eptdev_release_device;
    404
    405	return ret;
    406
    407free_ept_ida:
    408	ida_simple_remove(&rpmsg_ept_ida, dev->id);
    409free_minor_ida:
    410	ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
    411free_eptdev:
    412	put_device(dev);
    413	kfree(eptdev);
    414
    415	return ret;
    416}
    417
    418int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
    419			       struct rpmsg_channel_info chinfo)
    420{
    421	struct rpmsg_eptdev *eptdev;
    422	int ret;
    423
    424	eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
    425	if (IS_ERR(eptdev))
    426		return PTR_ERR(eptdev);
    427
    428	ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
    429
    430	return ret;
    431}
    432EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
    433
    434static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
    435{
    436	struct rpmsg_channel_info chinfo;
    437	struct rpmsg_eptdev *eptdev;
    438	struct device *dev = &rpdev->dev;
    439
    440	memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
    441	chinfo.src = rpdev->src;
    442	chinfo.dst = rpdev->dst;
    443
    444	eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
    445	if (IS_ERR(eptdev))
    446		return PTR_ERR(eptdev);
    447
    448	/* Set the default_ept to the rpmsg device endpoint */
    449	eptdev->default_ept = rpdev->ept;
    450
    451	/*
    452	 * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
    453	 * Storedit in default_ept *priv field.
    454	 */
    455	eptdev->default_ept->priv = eptdev;
    456
    457	return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
    458}
    459
    460static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
    461{
    462	int ret;
    463
    464	ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
    465	if (ret)
    466		dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
    467}
    468
    469static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
    470	{ .name	= "rpmsg-raw" },
    471	{ },
    472};
    473
    474static struct rpmsg_driver rpmsg_chrdev_driver = {
    475	.probe = rpmsg_chrdev_probe,
    476	.remove = rpmsg_chrdev_remove,
    477	.callback = rpmsg_ept_cb,
    478	.id_table = rpmsg_chrdev_id_table,
    479	.drv.name = "rpmsg_chrdev",
    480};
    481
    482static int rpmsg_chrdev_init(void)
    483{
    484	int ret;
    485
    486	ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
    487	if (ret < 0) {
    488		pr_err("failed to allocate char dev region\n");
    489		return ret;
    490	}
    491
    492	ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
    493	if (ret < 0) {
    494		pr_err("rpmsg: failed to register rpmsg raw driver\n");
    495		goto free_region;
    496	}
    497
    498	return 0;
    499
    500free_region:
    501	unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
    502
    503	return ret;
    504}
    505postcore_initcall(rpmsg_chrdev_init);
    506
    507static void rpmsg_chrdev_exit(void)
    508{
    509	unregister_rpmsg_driver(&rpmsg_chrdev_driver);
    510	unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
    511}
    512module_exit(rpmsg_chrdev_exit);
    513
    514MODULE_ALIAS("rpmsg:rpmsg_chrdev");
    515MODULE_LICENSE("GPL v2");