cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

api.c (11891B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright 2014 IBM Corp.
      4 */
      5
      6#include <linux/pci.h>
      7#include <linux/slab.h>
      8#include <linux/file.h>
      9#include <misc/cxl.h>
     10#include <linux/module.h>
     11#include <linux/mount.h>
     12#include <linux/pseudo_fs.h>
     13#include <linux/sched/mm.h>
     14#include <linux/mmu_context.h>
     15#include <linux/irqdomain.h>
     16
     17#include "cxl.h"
     18
     19/*
     20 * Since we want to track memory mappings to be able to force-unmap
     21 * when the AFU is no longer reachable, we need an inode. For devices
     22 * opened through the cxl user API, this is not a problem, but a
     23 * userland process can also get a cxl fd through the cxl_get_fd()
     24 * API, which is used by the cxlflash driver.
     25 *
     26 * Therefore we implement our own simple pseudo-filesystem and inode
     27 * allocator. We don't use the anonymous inode, as we need the
     28 * meta-data associated with it (address_space) and it is shared by
     29 * other drivers/processes, so it could lead to cxl unmapping VMAs
     30 * from random processes.
     31 */
     32
     33#define CXL_PSEUDO_FS_MAGIC	0x1697697f
     34
     35static int cxl_fs_cnt;
     36static struct vfsmount *cxl_vfs_mount;
     37
     38static int cxl_fs_init_fs_context(struct fs_context *fc)
     39{
     40	return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
     41}
     42
     43static struct file_system_type cxl_fs_type = {
     44	.name		= "cxl",
     45	.owner		= THIS_MODULE,
     46	.init_fs_context = cxl_fs_init_fs_context,
     47	.kill_sb	= kill_anon_super,
     48};
     49
     50
     51void cxl_release_mapping(struct cxl_context *ctx)
     52{
     53	if (ctx->kernelapi && ctx->mapping)
     54		simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
     55}
     56
     57static struct file *cxl_getfile(const char *name,
     58				const struct file_operations *fops,
     59				void *priv, int flags)
     60{
     61	struct file *file;
     62	struct inode *inode;
     63	int rc;
     64
     65	/* strongly inspired by anon_inode_getfile() */
     66
     67	if (fops->owner && !try_module_get(fops->owner))
     68		return ERR_PTR(-ENOENT);
     69
     70	rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
     71	if (rc < 0) {
     72		pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
     73		file = ERR_PTR(rc);
     74		goto err_module;
     75	}
     76
     77	inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
     78	if (IS_ERR(inode)) {
     79		file = ERR_CAST(inode);
     80		goto err_fs;
     81	}
     82
     83	file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
     84				 flags & (O_ACCMODE | O_NONBLOCK), fops);
     85	if (IS_ERR(file))
     86		goto err_inode;
     87
     88	file->private_data = priv;
     89
     90	return file;
     91
     92err_inode:
     93	iput(inode);
     94err_fs:
     95	simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
     96err_module:
     97	module_put(fops->owner);
     98	return file;
     99}
    100
    101struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
    102{
    103	struct cxl_afu *afu;
    104	struct cxl_context  *ctx;
    105	int rc;
    106
    107	afu = cxl_pci_to_afu(dev);
    108	if (IS_ERR(afu))
    109		return ERR_CAST(afu);
    110
    111	ctx = cxl_context_alloc();
    112	if (!ctx)
    113		return ERR_PTR(-ENOMEM);
    114
    115	ctx->kernelapi = true;
    116
    117	/* Make it a slave context.  We can promote it later? */
    118	rc = cxl_context_init(ctx, afu, false);
    119	if (rc)
    120		goto err_ctx;
    121
    122	return ctx;
    123
    124err_ctx:
    125	kfree(ctx);
    126	return ERR_PTR(rc);
    127}
    128EXPORT_SYMBOL_GPL(cxl_dev_context_init);
    129
    130struct cxl_context *cxl_get_context(struct pci_dev *dev)
    131{
    132	return dev->dev.archdata.cxl_ctx;
    133}
    134EXPORT_SYMBOL_GPL(cxl_get_context);
    135
    136int cxl_release_context(struct cxl_context *ctx)
    137{
    138	if (ctx->status >= STARTED)
    139		return -EBUSY;
    140
    141	cxl_context_free(ctx);
    142
    143	return 0;
    144}
    145EXPORT_SYMBOL_GPL(cxl_release_context);
    146
    147static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
    148{
    149	__u16 range;
    150	int r;
    151
    152	for (r = 0; r < CXL_IRQ_RANGES; r++) {
    153		range = ctx->irqs.range[r];
    154		if (num < range) {
    155			return ctx->irqs.offset[r] + num;
    156		}
    157		num -= range;
    158	}
    159	return 0;
    160}
    161
    162
    163int cxl_set_priv(struct cxl_context *ctx, void *priv)
    164{
    165	if (!ctx)
    166		return -EINVAL;
    167
    168	ctx->priv = priv;
    169
    170	return 0;
    171}
    172EXPORT_SYMBOL_GPL(cxl_set_priv);
    173
    174void *cxl_get_priv(struct cxl_context *ctx)
    175{
    176	if (!ctx)
    177		return ERR_PTR(-EINVAL);
    178
    179	return ctx->priv;
    180}
    181EXPORT_SYMBOL_GPL(cxl_get_priv);
    182
    183int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
    184{
    185	int res;
    186	irq_hw_number_t hwirq;
    187
    188	if (num == 0)
    189		num = ctx->afu->pp_irqs;
    190	res = afu_allocate_irqs(ctx, num);
    191	if (res)
    192		return res;
    193
    194	if (!cpu_has_feature(CPU_FTR_HVMODE)) {
    195		/* In a guest, the PSL interrupt is not multiplexed. It was
    196		 * allocated above, and we need to set its handler
    197		 */
    198		hwirq = cxl_find_afu_irq(ctx, 0);
    199		if (hwirq)
    200			cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
    201	}
    202
    203	if (ctx->status == STARTED) {
    204		if (cxl_ops->update_ivtes)
    205			cxl_ops->update_ivtes(ctx);
    206		else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
    207	}
    208
    209	return res;
    210}
    211EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
    212
    213void cxl_free_afu_irqs(struct cxl_context *ctx)
    214{
    215	irq_hw_number_t hwirq;
    216	unsigned int virq;
    217
    218	if (!cpu_has_feature(CPU_FTR_HVMODE)) {
    219		hwirq = cxl_find_afu_irq(ctx, 0);
    220		if (hwirq) {
    221			virq = irq_find_mapping(NULL, hwirq);
    222			if (virq)
    223				cxl_unmap_irq(virq, ctx);
    224		}
    225	}
    226	afu_irq_name_free(ctx);
    227	cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
    228}
    229EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
    230
    231int cxl_map_afu_irq(struct cxl_context *ctx, int num,
    232		    irq_handler_t handler, void *cookie, char *name)
    233{
    234	irq_hw_number_t hwirq;
    235
    236	/*
    237	 * Find interrupt we are to register.
    238	 */
    239	hwirq = cxl_find_afu_irq(ctx, num);
    240	if (!hwirq)
    241		return -ENOENT;
    242
    243	return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
    244}
    245EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
    246
    247void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
    248{
    249	irq_hw_number_t hwirq;
    250	unsigned int virq;
    251
    252	hwirq = cxl_find_afu_irq(ctx, num);
    253	if (!hwirq)
    254		return;
    255
    256	virq = irq_find_mapping(NULL, hwirq);
    257	if (virq)
    258		cxl_unmap_irq(virq, cookie);
    259}
    260EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
    261
    262/*
    263 * Start a context
    264 * Code here similar to afu_ioctl_start_work().
    265 */
    266int cxl_start_context(struct cxl_context *ctx, u64 wed,
    267		      struct task_struct *task)
    268{
    269	int rc = 0;
    270	bool kernel = true;
    271
    272	pr_devel("%s: pe: %i\n", __func__, ctx->pe);
    273
    274	mutex_lock(&ctx->status_mutex);
    275	if (ctx->status == STARTED)
    276		goto out; /* already started */
    277
    278	/*
    279	 * Increment the mapped context count for adapter. This also checks
    280	 * if adapter_context_lock is taken.
    281	 */
    282	rc = cxl_adapter_context_get(ctx->afu->adapter);
    283	if (rc)
    284		goto out;
    285
    286	if (task) {
    287		ctx->pid = get_task_pid(task, PIDTYPE_PID);
    288		kernel = false;
    289
    290		/* acquire a reference to the task's mm */
    291		ctx->mm = get_task_mm(current);
    292
    293		/* ensure this mm_struct can't be freed */
    294		cxl_context_mm_count_get(ctx);
    295
    296		if (ctx->mm) {
    297			/* decrement the use count from above */
    298			mmput(ctx->mm);
    299			/* make TLBIs for this context global */
    300			mm_context_add_copro(ctx->mm);
    301		}
    302	}
    303
    304	/*
    305	 * Increment driver use count. Enables global TLBIs for hash
    306	 * and callbacks to handle the segment table
    307	 */
    308	cxl_ctx_get();
    309
    310	/* See the comment in afu_ioctl_start_work() */
    311	smp_mb();
    312
    313	if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
    314		put_pid(ctx->pid);
    315		ctx->pid = NULL;
    316		cxl_adapter_context_put(ctx->afu->adapter);
    317		cxl_ctx_put();
    318		if (task) {
    319			cxl_context_mm_count_put(ctx);
    320			if (ctx->mm)
    321				mm_context_remove_copro(ctx->mm);
    322		}
    323		goto out;
    324	}
    325
    326	ctx->status = STARTED;
    327out:
    328	mutex_unlock(&ctx->status_mutex);
    329	return rc;
    330}
    331EXPORT_SYMBOL_GPL(cxl_start_context);
    332
    333int cxl_process_element(struct cxl_context *ctx)
    334{
    335	return ctx->external_pe;
    336}
    337EXPORT_SYMBOL_GPL(cxl_process_element);
    338
    339/* Stop a context.  Returns 0 on success, otherwise -Errno */
    340int cxl_stop_context(struct cxl_context *ctx)
    341{
    342	return __detach_context(ctx);
    343}
    344EXPORT_SYMBOL_GPL(cxl_stop_context);
    345
    346void cxl_set_master(struct cxl_context *ctx)
    347{
    348	ctx->master = true;
    349}
    350EXPORT_SYMBOL_GPL(cxl_set_master);
    351
    352/* wrappers around afu_* file ops which are EXPORTED */
    353int cxl_fd_open(struct inode *inode, struct file *file)
    354{
    355	return afu_open(inode, file);
    356}
    357EXPORT_SYMBOL_GPL(cxl_fd_open);
    358int cxl_fd_release(struct inode *inode, struct file *file)
    359{
    360	return afu_release(inode, file);
    361}
    362EXPORT_SYMBOL_GPL(cxl_fd_release);
    363long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
    364{
    365	return afu_ioctl(file, cmd, arg);
    366}
    367EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
    368int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
    369{
    370	return afu_mmap(file, vm);
    371}
    372EXPORT_SYMBOL_GPL(cxl_fd_mmap);
    373__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
    374{
    375	return afu_poll(file, poll);
    376}
    377EXPORT_SYMBOL_GPL(cxl_fd_poll);
    378ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
    379			loff_t *off)
    380{
    381	return afu_read(file, buf, count, off);
    382}
    383EXPORT_SYMBOL_GPL(cxl_fd_read);
    384
    385#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
    386
    387/* Get a struct file and fd for a context and attach the ops */
    388struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
    389			int *fd)
    390{
    391	struct file *file;
    392	int rc, flags, fdtmp;
    393	char *name = NULL;
    394
    395	/* only allow one per context */
    396	if (ctx->mapping)
    397		return ERR_PTR(-EEXIST);
    398
    399	flags = O_RDWR | O_CLOEXEC;
    400
    401	/* This code is similar to anon_inode_getfd() */
    402	rc = get_unused_fd_flags(flags);
    403	if (rc < 0)
    404		return ERR_PTR(rc);
    405	fdtmp = rc;
    406
    407	/*
    408	 * Patch the file ops.  Needs to be careful that this is rentrant safe.
    409	 */
    410	if (fops) {
    411		PATCH_FOPS(open);
    412		PATCH_FOPS(poll);
    413		PATCH_FOPS(read);
    414		PATCH_FOPS(release);
    415		PATCH_FOPS(unlocked_ioctl);
    416		PATCH_FOPS(compat_ioctl);
    417		PATCH_FOPS(mmap);
    418	} else /* use default ops */
    419		fops = (struct file_operations *)&afu_fops;
    420
    421	name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
    422	file = cxl_getfile(name, fops, ctx, flags);
    423	kfree(name);
    424	if (IS_ERR(file))
    425		goto err_fd;
    426
    427	cxl_context_set_mapping(ctx, file->f_mapping);
    428	*fd = fdtmp;
    429	return file;
    430
    431err_fd:
    432	put_unused_fd(fdtmp);
    433	return NULL;
    434}
    435EXPORT_SYMBOL_GPL(cxl_get_fd);
    436
    437struct cxl_context *cxl_fops_get_context(struct file *file)
    438{
    439	return file->private_data;
    440}
    441EXPORT_SYMBOL_GPL(cxl_fops_get_context);
    442
    443void cxl_set_driver_ops(struct cxl_context *ctx,
    444			struct cxl_afu_driver_ops *ops)
    445{
    446	WARN_ON(!ops->fetch_event || !ops->event_delivered);
    447	atomic_set(&ctx->afu_driver_events, 0);
    448	ctx->afu_driver_ops = ops;
    449}
    450EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
    451
    452void cxl_context_events_pending(struct cxl_context *ctx,
    453				unsigned int new_events)
    454{
    455	atomic_add(new_events, &ctx->afu_driver_events);
    456	wake_up_all(&ctx->wq);
    457}
    458EXPORT_SYMBOL_GPL(cxl_context_events_pending);
    459
    460int cxl_start_work(struct cxl_context *ctx,
    461		   struct cxl_ioctl_start_work *work)
    462{
    463	int rc;
    464
    465	/* code taken from afu_ioctl_start_work */
    466	if (!(work->flags & CXL_START_WORK_NUM_IRQS))
    467		work->num_interrupts = ctx->afu->pp_irqs;
    468	else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
    469		 (work->num_interrupts > ctx->afu->irqs_max)) {
    470		return -EINVAL;
    471	}
    472
    473	rc = afu_register_irqs(ctx, work->num_interrupts);
    474	if (rc)
    475		return rc;
    476
    477	rc = cxl_start_context(ctx, work->work_element_descriptor, current);
    478	if (rc < 0) {
    479		afu_release_irqs(ctx, ctx);
    480		return rc;
    481	}
    482
    483	return 0;
    484}
    485EXPORT_SYMBOL_GPL(cxl_start_work);
    486
    487void __iomem *cxl_psa_map(struct cxl_context *ctx)
    488{
    489	if (ctx->status != STARTED)
    490		return NULL;
    491
    492	pr_devel("%s: psn_phys%llx size:%llx\n",
    493		__func__, ctx->psn_phys, ctx->psn_size);
    494	return ioremap(ctx->psn_phys, ctx->psn_size);
    495}
    496EXPORT_SYMBOL_GPL(cxl_psa_map);
    497
    498void cxl_psa_unmap(void __iomem *addr)
    499{
    500	iounmap(addr);
    501}
    502EXPORT_SYMBOL_GPL(cxl_psa_unmap);
    503
    504int cxl_afu_reset(struct cxl_context *ctx)
    505{
    506	struct cxl_afu *afu = ctx->afu;
    507	int rc;
    508
    509	rc = cxl_ops->afu_reset(afu);
    510	if (rc)
    511		return rc;
    512
    513	return cxl_ops->afu_check_and_enable(afu);
    514}
    515EXPORT_SYMBOL_GPL(cxl_afu_reset);
    516
    517void cxl_perst_reloads_same_image(struct cxl_afu *afu,
    518				  bool perst_reloads_same_image)
    519{
    520	afu->adapter->perst_same_image = perst_reloads_same_image;
    521}
    522EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
    523
    524ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
    525{
    526	struct cxl_afu *afu = cxl_pci_to_afu(dev);
    527	if (IS_ERR(afu))
    528		return -ENODEV;
    529
    530	return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
    531}
    532EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);