cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xlate_mmu.c (7954B)


      1/*
      2 * MMU operations common to all auto-translated physmap guests.
      3 *
      4 * Copyright (C) 2015 Citrix Systems R&D Ltd.
      5 *
      6 * This program is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU General Public License version 2
      8 * as published by the Free Software Foundation; or, when distributed
      9 * separately from the Linux kernel or incorporated into other
     10 * software packages, subject to the following license:
     11 *
     12 * Permission is hereby granted, free of charge, to any person obtaining a copy
     13 * of this source file (the "Software"), to deal in the Software without
     14 * restriction, including without limitation the rights to use, copy, modify,
     15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
     16 * and to permit persons to whom the Software is furnished to do so, subject to
     17 * the following conditions:
     18 *
     19 * The above copyright notice and this permission notice shall be included in
     20 * all copies or substantial portions of the Software.
     21 *
     22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     28 * IN THE SOFTWARE.
     29 */
     30#include <linux/kernel.h>
     31#include <linux/mm.h>
     32#include <linux/slab.h>
     33#include <linux/vmalloc.h>
     34
     35#include <asm/xen/hypercall.h>
     36#include <asm/xen/hypervisor.h>
     37
     38#include <xen/xen.h>
     39#include <xen/xen-ops.h>
     40#include <xen/page.h>
     41#include <xen/interface/xen.h>
     42#include <xen/interface/memory.h>
     43#include <xen/balloon.h>
     44
     45typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
     46
     47/* Break down the pages in 4KB chunk and call fn for each gfn */
     48static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
     49			     xen_gfn_fn_t fn, void *data)
     50{
     51	unsigned long xen_pfn = 0;
     52	struct page *page;
     53	int i;
     54
     55	for (i = 0; i < nr_gfn; i++) {
     56		if ((i % XEN_PFN_PER_PAGE) == 0) {
     57			page = pages[i / XEN_PFN_PER_PAGE];
     58			xen_pfn = page_to_xen_pfn(page);
     59		}
     60		fn(pfn_to_gfn(xen_pfn++), data);
     61	}
     62}
     63
     64struct remap_data {
     65	xen_pfn_t *fgfn; /* foreign domain's gfn */
     66	int nr_fgfn; /* Number of foreign gfn left to map */
     67	pgprot_t prot;
     68	domid_t  domid;
     69	struct vm_area_struct *vma;
     70	int index;
     71	struct page **pages;
     72	struct xen_remap_gfn_info *info;
     73	int *err_ptr;
     74	int mapped;
     75
     76	/* Hypercall parameters */
     77	int h_errs[XEN_PFN_PER_PAGE];
     78	xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
     79	xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
     80
     81	int h_iter;	/* Iterator */
     82};
     83
     84static void setup_hparams(unsigned long gfn, void *data)
     85{
     86	struct remap_data *info = data;
     87
     88	info->h_idxs[info->h_iter] = *info->fgfn;
     89	info->h_gpfns[info->h_iter] = gfn;
     90	info->h_errs[info->h_iter] = 0;
     91
     92	info->h_iter++;
     93	info->fgfn++;
     94}
     95
     96static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
     97{
     98	struct remap_data *info = data;
     99	struct page *page = info->pages[info->index++];
    100	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
    101	int rc, nr_gfn;
    102	uint32_t i;
    103	struct xen_add_to_physmap_range xatp = {
    104		.domid = DOMID_SELF,
    105		.foreign_domid = info->domid,
    106		.space = XENMAPSPACE_gmfn_foreign,
    107	};
    108
    109	nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
    110	info->nr_fgfn -= nr_gfn;
    111
    112	info->h_iter = 0;
    113	xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
    114	BUG_ON(info->h_iter != nr_gfn);
    115
    116	set_xen_guest_handle(xatp.idxs, info->h_idxs);
    117	set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
    118	set_xen_guest_handle(xatp.errs, info->h_errs);
    119	xatp.size = nr_gfn;
    120
    121	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
    122
    123	/* info->err_ptr expect to have one error status per Xen PFN */
    124	for (i = 0; i < nr_gfn; i++) {
    125		int err = (rc < 0) ? rc : info->h_errs[i];
    126
    127		*(info->err_ptr++) = err;
    128		if (!err)
    129			info->mapped++;
    130	}
    131
    132	/*
    133	 * Note: The hypercall will return 0 in most of the case if even if
    134	 * all the fgmfn are not mapped. We still have to update the pte
    135	 * as the userspace may decide to continue.
    136	 */
    137	if (!rc)
    138		set_pte_at(info->vma->vm_mm, addr, ptep, pte);
    139
    140	return 0;
    141}
    142
    143int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
    144			      unsigned long addr,
    145			      xen_pfn_t *gfn, int nr,
    146			      int *err_ptr, pgprot_t prot,
    147			      unsigned domid,
    148			      struct page **pages)
    149{
    150	int err;
    151	struct remap_data data;
    152	unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
    153
    154	/* Kept here for the purpose of making sure code doesn't break
    155	   x86 PVOPS */
    156	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
    157
    158	data.fgfn = gfn;
    159	data.nr_fgfn = nr;
    160	data.prot  = prot;
    161	data.domid = domid;
    162	data.vma   = vma;
    163	data.pages = pages;
    164	data.index = 0;
    165	data.err_ptr = err_ptr;
    166	data.mapped = 0;
    167
    168	err = apply_to_page_range(vma->vm_mm, addr, range,
    169				  remap_pte_fn, &data);
    170	return err < 0 ? err : data.mapped;
    171}
    172EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
    173
    174static void unmap_gfn(unsigned long gfn, void *data)
    175{
    176	struct xen_remove_from_physmap xrp;
    177
    178	xrp.domid = DOMID_SELF;
    179	xrp.gpfn = gfn;
    180	(void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
    181}
    182
    183int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
    184			      int nr, struct page **pages)
    185{
    186	xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
    187
    188	return 0;
    189}
    190EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
    191
    192struct map_balloon_pages {
    193	xen_pfn_t *pfns;
    194	unsigned int idx;
    195};
    196
    197static void setup_balloon_gfn(unsigned long gfn, void *data)
    198{
    199	struct map_balloon_pages *info = data;
    200
    201	info->pfns[info->idx++] = gfn;
    202}
    203
    204/**
    205 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
    206 * @gfns: returns the array of corresponding GFNs
    207 * @virt: returns the virtual address of the mapped region
    208 * @nr_grant_frames: number of GFNs
    209 * @return 0 on success, error otherwise
    210 *
    211 * This allocates a set of ballooned pages and maps them into the
    212 * kernel's address space.
    213 */
    214int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
    215					 unsigned long nr_grant_frames)
    216{
    217	struct page **pages;
    218	xen_pfn_t *pfns;
    219	void *vaddr;
    220	struct map_balloon_pages data;
    221	int rc;
    222	unsigned long nr_pages;
    223
    224	BUG_ON(nr_grant_frames == 0);
    225	nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
    226	pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
    227	if (!pages)
    228		return -ENOMEM;
    229
    230	pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
    231	if (!pfns) {
    232		kfree(pages);
    233		return -ENOMEM;
    234	}
    235	rc = xen_alloc_unpopulated_pages(nr_pages, pages);
    236	if (rc) {
    237		pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
    238			nr_pages, rc);
    239		kfree(pages);
    240		kfree(pfns);
    241		return rc;
    242	}
    243
    244	data.pfns = pfns;
    245	data.idx = 0;
    246	xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
    247
    248	vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
    249	if (!vaddr) {
    250		pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
    251			nr_pages, rc);
    252		xen_free_unpopulated_pages(nr_pages, pages);
    253		kfree(pages);
    254		kfree(pfns);
    255		return -ENOMEM;
    256	}
    257	kfree(pages);
    258
    259	*gfns = pfns;
    260	*virt = vaddr;
    261
    262	return 0;
    263}
    264
    265struct remap_pfn {
    266	struct mm_struct *mm;
    267	struct page **pages;
    268	pgprot_t prot;
    269	unsigned long i;
    270};
    271
    272static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
    273{
    274	struct remap_pfn *r = data;
    275	struct page *page = r->pages[r->i];
    276	pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
    277
    278	set_pte_at(r->mm, addr, ptep, pte);
    279	r->i++;
    280
    281	return 0;
    282}
    283
    284/* Used by the privcmd module, but has to be built-in on ARM */
    285int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
    286{
    287	struct remap_pfn r = {
    288		.mm = vma->vm_mm,
    289		.pages = vma->vm_private_data,
    290		.prot = vma->vm_page_prot,
    291	};
    292
    293	return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
    294}
    295EXPORT_SYMBOL_GPL(xen_remap_vma_range);