cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mm.c (8552B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * ACRN: Memory mapping management
      4 *
      5 * Copyright (C) 2020 Intel Corporation. All rights reserved.
      6 *
      7 * Authors:
      8 *	Fei Li <lei1.li@intel.com>
      9 *	Shuo Liu <shuo.a.liu@intel.com>
     10 */
     11
     12#include <linux/io.h>
     13#include <linux/mm.h>
     14#include <linux/slab.h>
     15
     16#include "acrn_drv.h"
     17
     18static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
     19{
     20	struct vm_memory_region_batch *regions;
     21	int ret;
     22
     23	regions = kzalloc(sizeof(*regions), GFP_KERNEL);
     24	if (!regions)
     25		return -ENOMEM;
     26
     27	regions->vmid = vm->vmid;
     28	regions->regions_num = 1;
     29	regions->regions_gpa = virt_to_phys(region);
     30
     31	ret = hcall_set_memory_regions(virt_to_phys(regions));
     32	if (ret < 0)
     33		dev_dbg(acrn_dev.this_device,
     34			"Failed to set memory region for VM[%u]!\n", vm->vmid);
     35
     36	kfree(regions);
     37	return ret;
     38}
     39
     40/**
     41 * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
     42 * @vm:			User VM.
     43 * @user_gpa:		A GPA of User VM.
     44 * @service_gpa:	A GPA of Service VM.
     45 * @size:		Size of the region.
     46 * @mem_type:		Combination of ACRN_MEM_TYPE_*.
     47 * @mem_access_right:	Combination of ACRN_MEM_ACCESS_*.
     48 *
     49 * Return: 0 on success, <0 on error.
     50 */
     51int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
     52		       u64 size, u32 mem_type, u32 mem_access_right)
     53{
     54	struct vm_memory_region_op *region;
     55	int ret = 0;
     56
     57	region = kzalloc(sizeof(*region), GFP_KERNEL);
     58	if (!region)
     59		return -ENOMEM;
     60
     61	region->type = ACRN_MEM_REGION_ADD;
     62	region->user_vm_pa = user_gpa;
     63	region->service_vm_pa = service_gpa;
     64	region->size = size;
     65	region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
     66			(mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
     67	ret = modify_region(vm, region);
     68
     69	dev_dbg(acrn_dev.this_device,
     70		"%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
     71		__func__, (void *)user_gpa, (void *)service_gpa, size);
     72	kfree(region);
     73	return ret;
     74}
     75
     76/**
     77 * acrn_mm_region_del() - Del the EPT mapping of a memory region.
     78 * @vm:		User VM.
     79 * @user_gpa:	A GPA of the User VM.
     80 * @size:	Size of the region.
     81 *
     82 * Return: 0 on success, <0 for error.
     83 */
     84int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
     85{
     86	struct vm_memory_region_op *region;
     87	int ret = 0;
     88
     89	region = kzalloc(sizeof(*region), GFP_KERNEL);
     90	if (!region)
     91		return -ENOMEM;
     92
     93	region->type = ACRN_MEM_REGION_DEL;
     94	region->user_vm_pa = user_gpa;
     95	region->service_vm_pa = 0UL;
     96	region->size = size;
     97	region->attr = 0U;
     98
     99	ret = modify_region(vm, region);
    100
    101	dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
    102		__func__, (void *)user_gpa, size);
    103	kfree(region);
    104	return ret;
    105}
    106
    107int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
    108{
    109	int ret;
    110
    111	if (memmap->type == ACRN_MEMMAP_RAM)
    112		return acrn_vm_ram_map(vm, memmap);
    113
    114	if (memmap->type != ACRN_MEMMAP_MMIO) {
    115		dev_dbg(acrn_dev.this_device,
    116			"Invalid memmap type: %u\n", memmap->type);
    117		return -EINVAL;
    118	}
    119
    120	ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
    121				 memmap->service_vm_pa, memmap->len,
    122				 ACRN_MEM_TYPE_UC, memmap->attr);
    123	if (ret < 0)
    124		dev_dbg(acrn_dev.this_device,
    125			"Add memory region failed, VM[%u]!\n", vm->vmid);
    126
    127	return ret;
    128}
    129
    130int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
    131{
    132	int ret;
    133
    134	if (memmap->type != ACRN_MEMMAP_MMIO) {
    135		dev_dbg(acrn_dev.this_device,
    136			"Invalid memmap type: %u\n", memmap->type);
    137		return -EINVAL;
    138	}
    139
    140	ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
    141	if (ret < 0)
    142		dev_dbg(acrn_dev.this_device,
    143			"Del memory region failed, VM[%u]!\n", vm->vmid);
    144
    145	return ret;
    146}
    147
    148/**
    149 * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
    150 * @vm:		The User VM pointer
    151 * @memmap:	Info of the EPT mapping
    152 *
    153 * Return: 0 on success, <0 for error.
    154 */
    155int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
    156{
    157	struct vm_memory_region_batch *regions_info;
    158	int nr_pages, i = 0, order, nr_regions = 0;
    159	struct vm_memory_mapping *region_mapping;
    160	struct vm_memory_region_op *vm_region;
    161	struct page **pages = NULL, *page;
    162	void *remap_vaddr;
    163	int ret, pinned;
    164	u64 user_vm_pa;
    165	unsigned long pfn;
    166	struct vm_area_struct *vma;
    167
    168	if (!vm || !memmap)
    169		return -EINVAL;
    170
    171	mmap_read_lock(current->mm);
    172	vma = vma_lookup(current->mm, memmap->vma_base);
    173	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
    174		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
    175			mmap_read_unlock(current->mm);
    176			return -EINVAL;
    177		}
    178
    179		ret = follow_pfn(vma, memmap->vma_base, &pfn);
    180		mmap_read_unlock(current->mm);
    181		if (ret < 0) {
    182			dev_dbg(acrn_dev.this_device,
    183				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
    184			return ret;
    185		}
    186
    187		return acrn_mm_region_add(vm, memmap->user_vm_pa,
    188			 PFN_PHYS(pfn), memmap->len,
    189			 ACRN_MEM_TYPE_WB, memmap->attr);
    190	}
    191	mmap_read_unlock(current->mm);
    192
    193	/* Get the page number of the map region */
    194	nr_pages = memmap->len >> PAGE_SHIFT;
    195	pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
    196	if (!pages)
    197		return -ENOMEM;
    198
    199	/* Lock the pages of user memory map region */
    200	pinned = pin_user_pages_fast(memmap->vma_base,
    201				     nr_pages, FOLL_WRITE | FOLL_LONGTERM,
    202				     pages);
    203	if (pinned < 0) {
    204		ret = pinned;
    205		goto free_pages;
    206	} else if (pinned != nr_pages) {
    207		ret = -EFAULT;
    208		goto put_pages;
    209	}
    210
    211	/* Create a kernel map for the map region */
    212	remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
    213	if (!remap_vaddr) {
    214		ret = -ENOMEM;
    215		goto put_pages;
    216	}
    217
    218	/* Record Service VM va <-> User VM pa mapping */
    219	mutex_lock(&vm->regions_mapping_lock);
    220	region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
    221	if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
    222		region_mapping->pages = pages;
    223		region_mapping->npages = nr_pages;
    224		region_mapping->size = memmap->len;
    225		region_mapping->service_vm_va = remap_vaddr;
    226		region_mapping->user_vm_pa = memmap->user_vm_pa;
    227		vm->regions_mapping_count++;
    228	} else {
    229		dev_warn(acrn_dev.this_device,
    230			"Run out of memory mapping slots!\n");
    231		ret = -ENOMEM;
    232		mutex_unlock(&vm->regions_mapping_lock);
    233		goto unmap_no_count;
    234	}
    235	mutex_unlock(&vm->regions_mapping_lock);
    236
    237	/* Calculate count of vm_memory_region_op */
    238	while (i < nr_pages) {
    239		page = pages[i];
    240		VM_BUG_ON_PAGE(PageTail(page), page);
    241		order = compound_order(page);
    242		nr_regions++;
    243		i += 1 << order;
    244	}
    245
    246	/* Prepare the vm_memory_region_batch */
    247	regions_info = kzalloc(struct_size(regions_info, regions_op,
    248					   nr_regions), GFP_KERNEL);
    249	if (!regions_info) {
    250		ret = -ENOMEM;
    251		goto unmap_kernel_map;
    252	}
    253
    254	/* Fill each vm_memory_region_op */
    255	vm_region = regions_info->regions_op;
    256	regions_info->vmid = vm->vmid;
    257	regions_info->regions_num = nr_regions;
    258	regions_info->regions_gpa = virt_to_phys(vm_region);
    259	user_vm_pa = memmap->user_vm_pa;
    260	i = 0;
    261	while (i < nr_pages) {
    262		u32 region_size;
    263
    264		page = pages[i];
    265		VM_BUG_ON_PAGE(PageTail(page), page);
    266		order = compound_order(page);
    267		region_size = PAGE_SIZE << order;
    268		vm_region->type = ACRN_MEM_REGION_ADD;
    269		vm_region->user_vm_pa = user_vm_pa;
    270		vm_region->service_vm_pa = page_to_phys(page);
    271		vm_region->size = region_size;
    272		vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
    273				  (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
    274
    275		vm_region++;
    276		user_vm_pa += region_size;
    277		i += 1 << order;
    278	}
    279
    280	/* Inform the ACRN Hypervisor to set up EPT mappings */
    281	ret = hcall_set_memory_regions(virt_to_phys(regions_info));
    282	if (ret < 0) {
    283		dev_dbg(acrn_dev.this_device,
    284			"Failed to set regions, VM[%u]!\n", vm->vmid);
    285		goto unset_region;
    286	}
    287	kfree(regions_info);
    288
    289	dev_dbg(acrn_dev.this_device,
    290		"%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
    291		__func__, vm->vmid,
    292		remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
    293	return ret;
    294
    295unset_region:
    296	kfree(regions_info);
    297unmap_kernel_map:
    298	mutex_lock(&vm->regions_mapping_lock);
    299	vm->regions_mapping_count--;
    300	mutex_unlock(&vm->regions_mapping_lock);
    301unmap_no_count:
    302	vunmap(remap_vaddr);
    303put_pages:
    304	for (i = 0; i < pinned; i++)
    305		unpin_user_page(pages[i]);
    306free_pages:
    307	vfree(pages);
    308	return ret;
    309}
    310
    311/**
    312 * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
    313 * @vm:	The User VM
    314 */
    315void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
    316{
    317	struct vm_memory_mapping *region_mapping;
    318	int i, j;
    319
    320	mutex_lock(&vm->regions_mapping_lock);
    321	for (i = 0; i < vm->regions_mapping_count; i++) {
    322		region_mapping = &vm->regions_mapping[i];
    323		vunmap(region_mapping->service_vm_va);
    324		for (j = 0; j < region_mapping->npages; j++)
    325			unpin_user_page(region_mapping->pages[j]);
    326		vfree(region_mapping->pages);
    327	}
    328	mutex_unlock(&vm->regions_mapping_lock);
    329}