cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

privcmd.c (22946B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/******************************************************************************
      3 * privcmd.c
      4 *
      5 * Interface to privileged domain-0 commands.
      6 *
      7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
      8 */
      9
     10#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
     11
     12#include <linux/kernel.h>
     13#include <linux/module.h>
     14#include <linux/sched.h>
     15#include <linux/slab.h>
     16#include <linux/string.h>
     17#include <linux/errno.h>
     18#include <linux/mm.h>
     19#include <linux/mman.h>
     20#include <linux/uaccess.h>
     21#include <linux/swap.h>
     22#include <linux/highmem.h>
     23#include <linux/pagemap.h>
     24#include <linux/seq_file.h>
     25#include <linux/miscdevice.h>
     26#include <linux/moduleparam.h>
     27
     28#include <asm/xen/hypervisor.h>
     29#include <asm/xen/hypercall.h>
     30
     31#include <xen/xen.h>
     32#include <xen/privcmd.h>
     33#include <xen/interface/xen.h>
     34#include <xen/interface/memory.h>
     35#include <xen/interface/hvm/dm_op.h>
     36#include <xen/features.h>
     37#include <xen/page.h>
     38#include <xen/xen-ops.h>
     39#include <xen/balloon.h>
     40
     41#include "privcmd.h"
     42
     43MODULE_LICENSE("GPL");
     44
     45#define PRIV_VMA_LOCKED ((void *)1)
     46
     47static unsigned int privcmd_dm_op_max_num = 16;
     48module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
     49MODULE_PARM_DESC(dm_op_max_nr_bufs,
     50		 "Maximum number of buffers per dm_op hypercall");
     51
     52static unsigned int privcmd_dm_op_buf_max_size = 4096;
     53module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
     54		   0644);
     55MODULE_PARM_DESC(dm_op_buf_max_size,
     56		 "Maximum size of a dm_op hypercall buffer");
     57
     58struct privcmd_data {
     59	domid_t domid;
     60};
     61
     62static int privcmd_vma_range_is_mapped(
     63               struct vm_area_struct *vma,
     64               unsigned long addr,
     65               unsigned long nr_pages);
     66
     67static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
     68{
     69	struct privcmd_data *data = file->private_data;
     70	struct privcmd_hypercall hypercall;
     71	long ret;
     72
     73	/* Disallow arbitrary hypercalls if restricted */
     74	if (data->domid != DOMID_INVALID)
     75		return -EPERM;
     76
     77	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
     78		return -EFAULT;
     79
     80	xen_preemptible_hcall_begin();
     81	ret = privcmd_call(hypercall.op,
     82			   hypercall.arg[0], hypercall.arg[1],
     83			   hypercall.arg[2], hypercall.arg[3],
     84			   hypercall.arg[4]);
     85	xen_preemptible_hcall_end();
     86
     87	return ret;
     88}
     89
     90static void free_page_list(struct list_head *pages)
     91{
     92	struct page *p, *n;
     93
     94	list_for_each_entry_safe(p, n, pages, lru)
     95		__free_page(p);
     96
     97	INIT_LIST_HEAD(pages);
     98}
     99
    100/*
    101 * Given an array of items in userspace, return a list of pages
    102 * containing the data.  If copying fails, either because of memory
    103 * allocation failure or a problem reading user memory, return an
    104 * error code; its up to the caller to dispose of any partial list.
    105 */
    106static int gather_array(struct list_head *pagelist,
    107			unsigned nelem, size_t size,
    108			const void __user *data)
    109{
    110	unsigned pageidx;
    111	void *pagedata;
    112	int ret;
    113
    114	if (size > PAGE_SIZE)
    115		return 0;
    116
    117	pageidx = PAGE_SIZE;
    118	pagedata = NULL;	/* quiet, gcc */
    119	while (nelem--) {
    120		if (pageidx > PAGE_SIZE-size) {
    121			struct page *page = alloc_page(GFP_KERNEL);
    122
    123			ret = -ENOMEM;
    124			if (page == NULL)
    125				goto fail;
    126
    127			pagedata = page_address(page);
    128
    129			list_add_tail(&page->lru, pagelist);
    130			pageidx = 0;
    131		}
    132
    133		ret = -EFAULT;
    134		if (copy_from_user(pagedata + pageidx, data, size))
    135			goto fail;
    136
    137		data += size;
    138		pageidx += size;
    139	}
    140
    141	ret = 0;
    142
    143fail:
    144	return ret;
    145}
    146
    147/*
    148 * Call function "fn" on each element of the array fragmented
    149 * over a list of pages.
    150 */
    151static int traverse_pages(unsigned nelem, size_t size,
    152			  struct list_head *pos,
    153			  int (*fn)(void *data, void *state),
    154			  void *state)
    155{
    156	void *pagedata;
    157	unsigned pageidx;
    158	int ret = 0;
    159
    160	BUG_ON(size > PAGE_SIZE);
    161
    162	pageidx = PAGE_SIZE;
    163	pagedata = NULL;	/* hush, gcc */
    164
    165	while (nelem--) {
    166		if (pageidx > PAGE_SIZE-size) {
    167			struct page *page;
    168			pos = pos->next;
    169			page = list_entry(pos, struct page, lru);
    170			pagedata = page_address(page);
    171			pageidx = 0;
    172		}
    173
    174		ret = (*fn)(pagedata + pageidx, state);
    175		if (ret)
    176			break;
    177		pageidx += size;
    178	}
    179
    180	return ret;
    181}
    182
    183/*
    184 * Similar to traverse_pages, but use each page as a "block" of
    185 * data to be processed as one unit.
    186 */
    187static int traverse_pages_block(unsigned nelem, size_t size,
    188				struct list_head *pos,
    189				int (*fn)(void *data, int nr, void *state),
    190				void *state)
    191{
    192	void *pagedata;
    193	int ret = 0;
    194
    195	BUG_ON(size > PAGE_SIZE);
    196
    197	while (nelem) {
    198		int nr = (PAGE_SIZE/size);
    199		struct page *page;
    200		if (nr > nelem)
    201			nr = nelem;
    202		pos = pos->next;
    203		page = list_entry(pos, struct page, lru);
    204		pagedata = page_address(page);
    205		ret = (*fn)(pagedata, nr, state);
    206		if (ret)
    207			break;
    208		nelem -= nr;
    209	}
    210
    211	return ret;
    212}
    213
    214struct mmap_gfn_state {
    215	unsigned long va;
    216	struct vm_area_struct *vma;
    217	domid_t domain;
    218};
    219
    220static int mmap_gfn_range(void *data, void *state)
    221{
    222	struct privcmd_mmap_entry *msg = data;
    223	struct mmap_gfn_state *st = state;
    224	struct vm_area_struct *vma = st->vma;
    225	int rc;
    226
    227	/* Do not allow range to wrap the address space. */
    228	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
    229	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
    230		return -EINVAL;
    231
    232	/* Range chunks must be contiguous in va space. */
    233	if ((msg->va != st->va) ||
    234	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
    235		return -EINVAL;
    236
    237	rc = xen_remap_domain_gfn_range(vma,
    238					msg->va & PAGE_MASK,
    239					msg->mfn, msg->npages,
    240					vma->vm_page_prot,
    241					st->domain, NULL);
    242	if (rc < 0)
    243		return rc;
    244
    245	st->va += msg->npages << PAGE_SHIFT;
    246
    247	return 0;
    248}
    249
    250static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
    251{
    252	struct privcmd_data *data = file->private_data;
    253	struct privcmd_mmap mmapcmd;
    254	struct mm_struct *mm = current->mm;
    255	struct vm_area_struct *vma;
    256	int rc;
    257	LIST_HEAD(pagelist);
    258	struct mmap_gfn_state state;
    259
    260	/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
    261	if (xen_feature(XENFEAT_auto_translated_physmap))
    262		return -ENOSYS;
    263
    264	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
    265		return -EFAULT;
    266
    267	/* If restriction is in place, check the domid matches */
    268	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
    269		return -EPERM;
    270
    271	rc = gather_array(&pagelist,
    272			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
    273			  mmapcmd.entry);
    274
    275	if (rc || list_empty(&pagelist))
    276		goto out;
    277
    278	mmap_write_lock(mm);
    279
    280	{
    281		struct page *page = list_first_entry(&pagelist,
    282						     struct page, lru);
    283		struct privcmd_mmap_entry *msg = page_address(page);
    284
    285		vma = find_vma(mm, msg->va);
    286		rc = -EINVAL;
    287
    288		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
    289			goto out_up;
    290		vma->vm_private_data = PRIV_VMA_LOCKED;
    291	}
    292
    293	state.va = vma->vm_start;
    294	state.vma = vma;
    295	state.domain = mmapcmd.dom;
    296
    297	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
    298			    &pagelist,
    299			    mmap_gfn_range, &state);
    300
    301
    302out_up:
    303	mmap_write_unlock(mm);
    304
    305out:
    306	free_page_list(&pagelist);
    307
    308	return rc;
    309}
    310
    311struct mmap_batch_state {
    312	domid_t domain;
    313	unsigned long va;
    314	struct vm_area_struct *vma;
    315	int index;
    316	/* A tristate:
    317	 *      0 for no errors
    318	 *      1 if at least one error has happened (and no
    319	 *          -ENOENT errors have happened)
    320	 *      -ENOENT if at least 1 -ENOENT has happened.
    321	 */
    322	int global_error;
    323	int version;
    324
    325	/* User-space gfn array to store errors in the second pass for V1. */
    326	xen_pfn_t __user *user_gfn;
    327	/* User-space int array to store errors in the second pass for V2. */
    328	int __user *user_err;
    329};
    330
    331/* auto translated dom0 note: if domU being created is PV, then gfn is
    332 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
    333 */
    334static int mmap_batch_fn(void *data, int nr, void *state)
    335{
    336	xen_pfn_t *gfnp = data;
    337	struct mmap_batch_state *st = state;
    338	struct vm_area_struct *vma = st->vma;
    339	struct page **pages = vma->vm_private_data;
    340	struct page **cur_pages = NULL;
    341	int ret;
    342
    343	if (xen_feature(XENFEAT_auto_translated_physmap))
    344		cur_pages = &pages[st->index];
    345
    346	BUG_ON(nr < 0);
    347	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
    348					 (int *)gfnp, st->vma->vm_page_prot,
    349					 st->domain, cur_pages);
    350
    351	/* Adjust the global_error? */
    352	if (ret != nr) {
    353		if (ret == -ENOENT)
    354			st->global_error = -ENOENT;
    355		else {
    356			/* Record that at least one error has happened. */
    357			if (st->global_error == 0)
    358				st->global_error = 1;
    359		}
    360	}
    361	st->va += XEN_PAGE_SIZE * nr;
    362	st->index += nr / XEN_PFN_PER_PAGE;
    363
    364	return 0;
    365}
    366
    367static int mmap_return_error(int err, struct mmap_batch_state *st)
    368{
    369	int ret;
    370
    371	if (st->version == 1) {
    372		if (err) {
    373			xen_pfn_t gfn;
    374
    375			ret = get_user(gfn, st->user_gfn);
    376			if (ret < 0)
    377				return ret;
    378			/*
    379			 * V1 encodes the error codes in the 32bit top
    380			 * nibble of the gfn (with its known
    381			 * limitations vis-a-vis 64 bit callers).
    382			 */
    383			gfn |= (err == -ENOENT) ?
    384				PRIVCMD_MMAPBATCH_PAGED_ERROR :
    385				PRIVCMD_MMAPBATCH_MFN_ERROR;
    386			return __put_user(gfn, st->user_gfn++);
    387		} else
    388			st->user_gfn++;
    389	} else { /* st->version == 2 */
    390		if (err)
    391			return __put_user(err, st->user_err++);
    392		else
    393			st->user_err++;
    394	}
    395
    396	return 0;
    397}
    398
    399static int mmap_return_errors(void *data, int nr, void *state)
    400{
    401	struct mmap_batch_state *st = state;
    402	int *errs = data;
    403	int i;
    404	int ret;
    405
    406	for (i = 0; i < nr; i++) {
    407		ret = mmap_return_error(errs[i], st);
    408		if (ret < 0)
    409			return ret;
    410	}
    411	return 0;
    412}
    413
    414/* Allocate pfns that are then mapped with gfns from foreign domid. Update
    415 * the vma with the page info to use later.
    416 * Returns: 0 if success, otherwise -errno
    417 */
    418static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
    419{
    420	int rc;
    421	struct page **pages;
    422
    423	pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
    424	if (pages == NULL)
    425		return -ENOMEM;
    426
    427	rc = xen_alloc_unpopulated_pages(numpgs, pages);
    428	if (rc != 0) {
    429		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
    430			numpgs, rc);
    431		kvfree(pages);
    432		return -ENOMEM;
    433	}
    434	BUG_ON(vma->vm_private_data != NULL);
    435	vma->vm_private_data = pages;
    436
    437	return 0;
    438}
    439
    440static const struct vm_operations_struct privcmd_vm_ops;
    441
    442static long privcmd_ioctl_mmap_batch(
    443	struct file *file, void __user *udata, int version)
    444{
    445	struct privcmd_data *data = file->private_data;
    446	int ret;
    447	struct privcmd_mmapbatch_v2 m;
    448	struct mm_struct *mm = current->mm;
    449	struct vm_area_struct *vma;
    450	unsigned long nr_pages;
    451	LIST_HEAD(pagelist);
    452	struct mmap_batch_state state;
    453
    454	switch (version) {
    455	case 1:
    456		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
    457			return -EFAULT;
    458		/* Returns per-frame error in m.arr. */
    459		m.err = NULL;
    460		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
    461			return -EFAULT;
    462		break;
    463	case 2:
    464		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
    465			return -EFAULT;
    466		/* Returns per-frame error code in m.err. */
    467		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
    468			return -EFAULT;
    469		break;
    470	default:
    471		return -EINVAL;
    472	}
    473
    474	/* If restriction is in place, check the domid matches */
    475	if (data->domid != DOMID_INVALID && data->domid != m.dom)
    476		return -EPERM;
    477
    478	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
    479	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
    480		return -EINVAL;
    481
    482	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
    483
    484	if (ret)
    485		goto out;
    486	if (list_empty(&pagelist)) {
    487		ret = -EINVAL;
    488		goto out;
    489	}
    490
    491	if (version == 2) {
    492		/* Zero error array now to only copy back actual errors. */
    493		if (clear_user(m.err, sizeof(int) * m.num)) {
    494			ret = -EFAULT;
    495			goto out;
    496		}
    497	}
    498
    499	mmap_write_lock(mm);
    500
    501	vma = find_vma(mm, m.addr);
    502	if (!vma ||
    503	    vma->vm_ops != &privcmd_vm_ops) {
    504		ret = -EINVAL;
    505		goto out_unlock;
    506	}
    507
    508	/*
    509	 * Caller must either:
    510	 *
    511	 * Map the whole VMA range, which will also allocate all the
    512	 * pages required for the auto_translated_physmap case.
    513	 *
    514	 * Or
    515	 *
    516	 * Map unmapped holes left from a previous map attempt (e.g.,
    517	 * because those foreign frames were previously paged out).
    518	 */
    519	if (vma->vm_private_data == NULL) {
    520		if (m.addr != vma->vm_start ||
    521		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
    522			ret = -EINVAL;
    523			goto out_unlock;
    524		}
    525		if (xen_feature(XENFEAT_auto_translated_physmap)) {
    526			ret = alloc_empty_pages(vma, nr_pages);
    527			if (ret < 0)
    528				goto out_unlock;
    529		} else
    530			vma->vm_private_data = PRIV_VMA_LOCKED;
    531	} else {
    532		if (m.addr < vma->vm_start ||
    533		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
    534			ret = -EINVAL;
    535			goto out_unlock;
    536		}
    537		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
    538			ret = -EINVAL;
    539			goto out_unlock;
    540		}
    541	}
    542
    543	state.domain        = m.dom;
    544	state.vma           = vma;
    545	state.va            = m.addr;
    546	state.index         = 0;
    547	state.global_error  = 0;
    548	state.version       = version;
    549
    550	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
    551	/* mmap_batch_fn guarantees ret == 0 */
    552	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
    553				    &pagelist, mmap_batch_fn, &state));
    554
    555	mmap_write_unlock(mm);
    556
    557	if (state.global_error) {
    558		/* Write back errors in second pass. */
    559		state.user_gfn = (xen_pfn_t *)m.arr;
    560		state.user_err = m.err;
    561		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
    562					   &pagelist, mmap_return_errors, &state);
    563	} else
    564		ret = 0;
    565
    566	/* If we have not had any EFAULT-like global errors then set the global
    567	 * error to -ENOENT if necessary. */
    568	if ((ret == 0) && (state.global_error == -ENOENT))
    569		ret = -ENOENT;
    570
    571out:
    572	free_page_list(&pagelist);
    573	return ret;
    574
    575out_unlock:
    576	mmap_write_unlock(mm);
    577	goto out;
    578}
    579
    580static int lock_pages(
    581	struct privcmd_dm_op_buf kbufs[], unsigned int num,
    582	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
    583{
    584	unsigned int i;
    585
    586	for (i = 0; i < num; i++) {
    587		unsigned int requested;
    588		int page_count;
    589
    590		requested = DIV_ROUND_UP(
    591			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
    592			PAGE_SIZE);
    593		if (requested > nr_pages)
    594			return -ENOSPC;
    595
    596		page_count = pin_user_pages_fast(
    597			(unsigned long) kbufs[i].uptr,
    598			requested, FOLL_WRITE, pages);
    599		if (page_count < 0)
    600			return page_count;
    601
    602		*pinned += page_count;
    603		nr_pages -= page_count;
    604		pages += page_count;
    605	}
    606
    607	return 0;
    608}
    609
    610static void unlock_pages(struct page *pages[], unsigned int nr_pages)
    611{
    612	unpin_user_pages_dirty_lock(pages, nr_pages, true);
    613}
    614
    615static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
    616{
    617	struct privcmd_data *data = file->private_data;
    618	struct privcmd_dm_op kdata;
    619	struct privcmd_dm_op_buf *kbufs;
    620	unsigned int nr_pages = 0;
    621	struct page **pages = NULL;
    622	struct xen_dm_op_buf *xbufs = NULL;
    623	unsigned int i;
    624	long rc;
    625	unsigned int pinned = 0;
    626
    627	if (copy_from_user(&kdata, udata, sizeof(kdata)))
    628		return -EFAULT;
    629
    630	/* If restriction is in place, check the domid matches */
    631	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
    632		return -EPERM;
    633
    634	if (kdata.num == 0)
    635		return 0;
    636
    637	if (kdata.num > privcmd_dm_op_max_num)
    638		return -E2BIG;
    639
    640	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
    641	if (!kbufs)
    642		return -ENOMEM;
    643
    644	if (copy_from_user(kbufs, kdata.ubufs,
    645			   sizeof(*kbufs) * kdata.num)) {
    646		rc = -EFAULT;
    647		goto out;
    648	}
    649
    650	for (i = 0; i < kdata.num; i++) {
    651		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
    652			rc = -E2BIG;
    653			goto out;
    654		}
    655
    656		if (!access_ok(kbufs[i].uptr,
    657			       kbufs[i].size)) {
    658			rc = -EFAULT;
    659			goto out;
    660		}
    661
    662		nr_pages += DIV_ROUND_UP(
    663			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
    664			PAGE_SIZE);
    665	}
    666
    667	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
    668	if (!pages) {
    669		rc = -ENOMEM;
    670		goto out;
    671	}
    672
    673	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
    674	if (!xbufs) {
    675		rc = -ENOMEM;
    676		goto out;
    677	}
    678
    679	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
    680	if (rc < 0) {
    681		nr_pages = pinned;
    682		goto out;
    683	}
    684
    685	for (i = 0; i < kdata.num; i++) {
    686		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
    687		xbufs[i].size = kbufs[i].size;
    688	}
    689
    690	xen_preemptible_hcall_begin();
    691	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
    692	xen_preemptible_hcall_end();
    693
    694out:
    695	unlock_pages(pages, nr_pages);
    696	kfree(xbufs);
    697	kfree(pages);
    698	kfree(kbufs);
    699
    700	return rc;
    701}
    702
    703static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
    704{
    705	struct privcmd_data *data = file->private_data;
    706	domid_t dom;
    707
    708	if (copy_from_user(&dom, udata, sizeof(dom)))
    709		return -EFAULT;
    710
    711	/* Set restriction to the specified domain, or check it matches */
    712	if (data->domid == DOMID_INVALID)
    713		data->domid = dom;
    714	else if (data->domid != dom)
    715		return -EINVAL;
    716
    717	return 0;
    718}
    719
    720static long privcmd_ioctl_mmap_resource(struct file *file,
    721				struct privcmd_mmap_resource __user *udata)
    722{
    723	struct privcmd_data *data = file->private_data;
    724	struct mm_struct *mm = current->mm;
    725	struct vm_area_struct *vma;
    726	struct privcmd_mmap_resource kdata;
    727	xen_pfn_t *pfns = NULL;
    728	struct xen_mem_acquire_resource xdata = { };
    729	int rc;
    730
    731	if (copy_from_user(&kdata, udata, sizeof(kdata)))
    732		return -EFAULT;
    733
    734	/* If restriction is in place, check the domid matches */
    735	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
    736		return -EPERM;
    737
    738	/* Both fields must be set or unset */
    739	if (!!kdata.addr != !!kdata.num)
    740		return -EINVAL;
    741
    742	xdata.domid = kdata.dom;
    743	xdata.type = kdata.type;
    744	xdata.id = kdata.id;
    745
    746	if (!kdata.addr && !kdata.num) {
    747		/* Query the size of the resource. */
    748		rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
    749		if (rc)
    750			return rc;
    751		return __put_user(xdata.nr_frames, &udata->num);
    752	}
    753
    754	mmap_write_lock(mm);
    755
    756	vma = find_vma(mm, kdata.addr);
    757	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
    758		rc = -EINVAL;
    759		goto out;
    760	}
    761
    762	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
    763	if (!pfns) {
    764		rc = -ENOMEM;
    765		goto out;
    766	}
    767
    768	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
    769	    xen_feature(XENFEAT_auto_translated_physmap)) {
    770		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
    771		struct page **pages;
    772		unsigned int i;
    773
    774		rc = alloc_empty_pages(vma, nr);
    775		if (rc < 0)
    776			goto out;
    777
    778		pages = vma->vm_private_data;
    779		for (i = 0; i < kdata.num; i++) {
    780			xen_pfn_t pfn =
    781				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
    782
    783			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
    784		}
    785	} else
    786		vma->vm_private_data = PRIV_VMA_LOCKED;
    787
    788	xdata.frame = kdata.idx;
    789	xdata.nr_frames = kdata.num;
    790	set_xen_guest_handle(xdata.frame_list, pfns);
    791
    792	xen_preemptible_hcall_begin();
    793	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
    794	xen_preemptible_hcall_end();
    795
    796	if (rc)
    797		goto out;
    798
    799	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
    800	    xen_feature(XENFEAT_auto_translated_physmap)) {
    801		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
    802	} else {
    803		unsigned int domid =
    804			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
    805			DOMID_SELF : kdata.dom;
    806		int num, *errs = (int *)pfns;
    807
    808		BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
    809		num = xen_remap_domain_mfn_array(vma,
    810						 kdata.addr & PAGE_MASK,
    811						 pfns, kdata.num, errs,
    812						 vma->vm_page_prot,
    813						 domid);
    814		if (num < 0)
    815			rc = num;
    816		else if (num != kdata.num) {
    817			unsigned int i;
    818
    819			for (i = 0; i < num; i++) {
    820				rc = errs[i];
    821				if (rc < 0)
    822					break;
    823			}
    824		} else
    825			rc = 0;
    826	}
    827
    828out:
    829	mmap_write_unlock(mm);
    830	kfree(pfns);
    831
    832	return rc;
    833}
    834
    835static long privcmd_ioctl(struct file *file,
    836			  unsigned int cmd, unsigned long data)
    837{
    838	int ret = -ENOTTY;
    839	void __user *udata = (void __user *) data;
    840
    841	switch (cmd) {
    842	case IOCTL_PRIVCMD_HYPERCALL:
    843		ret = privcmd_ioctl_hypercall(file, udata);
    844		break;
    845
    846	case IOCTL_PRIVCMD_MMAP:
    847		ret = privcmd_ioctl_mmap(file, udata);
    848		break;
    849
    850	case IOCTL_PRIVCMD_MMAPBATCH:
    851		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
    852		break;
    853
    854	case IOCTL_PRIVCMD_MMAPBATCH_V2:
    855		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
    856		break;
    857
    858	case IOCTL_PRIVCMD_DM_OP:
    859		ret = privcmd_ioctl_dm_op(file, udata);
    860		break;
    861
    862	case IOCTL_PRIVCMD_RESTRICT:
    863		ret = privcmd_ioctl_restrict(file, udata);
    864		break;
    865
    866	case IOCTL_PRIVCMD_MMAP_RESOURCE:
    867		ret = privcmd_ioctl_mmap_resource(file, udata);
    868		break;
    869
    870	default:
    871		break;
    872	}
    873
    874	return ret;
    875}
    876
    877static int privcmd_open(struct inode *ino, struct file *file)
    878{
    879	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
    880
    881	if (!data)
    882		return -ENOMEM;
    883
    884	/* DOMID_INVALID implies no restriction */
    885	data->domid = DOMID_INVALID;
    886
    887	file->private_data = data;
    888	return 0;
    889}
    890
    891static int privcmd_release(struct inode *ino, struct file *file)
    892{
    893	struct privcmd_data *data = file->private_data;
    894
    895	kfree(data);
    896	return 0;
    897}
    898
    899static void privcmd_close(struct vm_area_struct *vma)
    900{
    901	struct page **pages = vma->vm_private_data;
    902	int numpgs = vma_pages(vma);
    903	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
    904	int rc;
    905
    906	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
    907		return;
    908
    909	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
    910	if (rc == 0)
    911		xen_free_unpopulated_pages(numpgs, pages);
    912	else
    913		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
    914			numpgs, rc);
    915	kvfree(pages);
    916}
    917
    918static vm_fault_t privcmd_fault(struct vm_fault *vmf)
    919{
    920	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
    921	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
    922	       vmf->pgoff, (void *)vmf->address);
    923
    924	return VM_FAULT_SIGBUS;
    925}
    926
    927static const struct vm_operations_struct privcmd_vm_ops = {
    928	.close = privcmd_close,
    929	.fault = privcmd_fault
    930};
    931
    932static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
    933{
    934	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
    935	 * how to recreate these mappings */
    936	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
    937			 VM_DONTEXPAND | VM_DONTDUMP;
    938	vma->vm_ops = &privcmd_vm_ops;
    939	vma->vm_private_data = NULL;
    940
    941	return 0;
    942}
    943
    944/*
    945 * For MMAPBATCH*. This allows asserting the singleshot mapping
    946 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
    947 * can be then retried until success.
    948 */
    949static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
    950{
    951	return pte_none(*pte) ? 0 : -EBUSY;
    952}
    953
    954static int privcmd_vma_range_is_mapped(
    955	           struct vm_area_struct *vma,
    956	           unsigned long addr,
    957	           unsigned long nr_pages)
    958{
    959	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
    960				   is_mapped_fn, NULL) != 0;
    961}
    962
    963const struct file_operations xen_privcmd_fops = {
    964	.owner = THIS_MODULE,
    965	.unlocked_ioctl = privcmd_ioctl,
    966	.open = privcmd_open,
    967	.release = privcmd_release,
    968	.mmap = privcmd_mmap,
    969};
    970EXPORT_SYMBOL_GPL(xen_privcmd_fops);
    971
    972static struct miscdevice privcmd_dev = {
    973	.minor = MISC_DYNAMIC_MINOR,
    974	.name = "xen/privcmd",
    975	.fops = &xen_privcmd_fops,
    976};
    977
    978static int __init privcmd_init(void)
    979{
    980	int err;
    981
    982	if (!xen_domain())
    983		return -ENODEV;
    984
    985	err = misc_register(&privcmd_dev);
    986	if (err != 0) {
    987		pr_err("Could not register Xen privcmd device\n");
    988		return err;
    989	}
    990
    991	err = misc_register(&xen_privcmdbuf_dev);
    992	if (err != 0) {
    993		pr_err("Could not register Xen hypercall-buf device\n");
    994		misc_deregister(&privcmd_dev);
    995		return err;
    996	}
    997
    998	return 0;
    999}
   1000
   1001static void __exit privcmd_exit(void)
   1002{
   1003	misc_deregister(&privcmd_dev);
   1004	misc_deregister(&xen_privcmdbuf_dev);
   1005}
   1006
   1007module_init(privcmd_init);
   1008module_exit(privcmd_exit);