cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

setup.c (8725B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2020 Google LLC
      4 * Author: Quentin Perret <qperret@google.com>
      5 */
      6
      7#include <linux/kvm_host.h>
      8#include <asm/kvm_hyp.h>
      9#include <asm/kvm_mmu.h>
     10#include <asm/kvm_pgtable.h>
     11#include <asm/kvm_pkvm.h>
     12
     13#include <nvhe/early_alloc.h>
     14#include <nvhe/fixed_config.h>
     15#include <nvhe/gfp.h>
     16#include <nvhe/memory.h>
     17#include <nvhe/mem_protect.h>
     18#include <nvhe/mm.h>
     19#include <nvhe/trap_handler.h>
     20
     21unsigned long hyp_nr_cpus;
     22
     23#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
     24			 (unsigned long)__per_cpu_start)
     25
     26static void *vmemmap_base;
     27static void *hyp_pgt_base;
     28static void *host_s2_pgt_base;
     29static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
     30static struct hyp_pool hpool;
     31
     32static int divide_memory_pool(void *virt, unsigned long size)
     33{
     34	unsigned long vstart, vend, nr_pages;
     35
     36	hyp_early_alloc_init(virt, size);
     37
     38	hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
     39	nr_pages = (vend - vstart) >> PAGE_SHIFT;
     40	vmemmap_base = hyp_early_alloc_contig(nr_pages);
     41	if (!vmemmap_base)
     42		return -ENOMEM;
     43
     44	nr_pages = hyp_s1_pgtable_pages();
     45	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
     46	if (!hyp_pgt_base)
     47		return -ENOMEM;
     48
     49	nr_pages = host_s2_pgtable_pages();
     50	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
     51	if (!host_s2_pgt_base)
     52		return -ENOMEM;
     53
     54	return 0;
     55}
     56
     57static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
     58				 unsigned long *per_cpu_base,
     59				 u32 hyp_va_bits)
     60{
     61	void *start, *end, *virt = hyp_phys_to_virt(phys);
     62	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
     63	enum kvm_pgtable_prot prot;
     64	int ret, i;
     65
     66	/* Recreate the hyp page-table using the early page allocator */
     67	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
     68	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
     69				   &hyp_early_alloc_mm_ops);
     70	if (ret)
     71		return ret;
     72
     73	ret = hyp_create_idmap(hyp_va_bits);
     74	if (ret)
     75		return ret;
     76
     77	ret = hyp_map_vectors();
     78	if (ret)
     79		return ret;
     80
     81	ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
     82	if (ret)
     83		return ret;
     84
     85	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
     86	if (ret)
     87		return ret;
     88
     89	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
     90	if (ret)
     91		return ret;
     92
     93	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
     94	if (ret)
     95		return ret;
     96
     97	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
     98	if (ret)
     99		return ret;
    100
    101	for (i = 0; i < hyp_nr_cpus; i++) {
    102		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
    103		unsigned long hyp_addr;
    104
    105		start = (void *)kern_hyp_va(per_cpu_base[i]);
    106		end = start + PAGE_ALIGN(hyp_percpu_size);
    107		ret = pkvm_create_mappings(start, end, PAGE_HYP);
    108		if (ret)
    109			return ret;
    110
    111		/*
    112		 * Allocate a contiguous HYP private VA range for the stack
    113		 * and guard page. The allocation is also aligned based on
    114		 * the order of its size.
    115		 */
    116		ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
    117		if (ret)
    118			return ret;
    119
    120		/*
    121		 * Since the stack grows downwards, map the stack to the page
    122		 * at the higher address and leave the lower guard page
    123		 * unbacked.
    124		 *
    125		 * Any valid stack address now has the PAGE_SHIFT bit as 1
    126		 * and addresses corresponding to the guard page have the
    127		 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
    128		 */
    129		hyp_spin_lock(&pkvm_pgd_lock);
    130		ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
    131					PAGE_SIZE, params->stack_pa, PAGE_HYP);
    132		hyp_spin_unlock(&pkvm_pgd_lock);
    133		if (ret)
    134			return ret;
    135
    136		/* Update stack_hyp_va to end of the stack's private VA range */
    137		params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
    138	}
    139
    140	/*
    141	 * Map the host's .bss and .rodata sections RO in the hypervisor, but
    142	 * transfer the ownership from the host to the hypervisor itself to
    143	 * make sure it can't be donated or shared with another entity.
    144	 *
    145	 * The ownership transition requires matching changes in the host
    146	 * stage-2. This will be done later (see finalize_host_mappings()) once
    147	 * the hyp_vmemmap is addressable.
    148	 */
    149	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
    150	ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
    151	if (ret)
    152		return ret;
    153
    154	ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
    155	if (ret)
    156		return ret;
    157
    158	return 0;
    159}
    160
    161static void update_nvhe_init_params(void)
    162{
    163	struct kvm_nvhe_init_params *params;
    164	unsigned long i;
    165
    166	for (i = 0; i < hyp_nr_cpus; i++) {
    167		params = per_cpu_ptr(&kvm_init_params, i);
    168		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
    169		dcache_clean_inval_poc((unsigned long)params,
    170				    (unsigned long)params + sizeof(*params));
    171	}
    172}
    173
    174static void *hyp_zalloc_hyp_page(void *arg)
    175{
    176	return hyp_alloc_pages(&hpool, 0);
    177}
    178
    179static void hpool_get_page(void *addr)
    180{
    181	hyp_get_page(&hpool, addr);
    182}
    183
    184static void hpool_put_page(void *addr)
    185{
    186	hyp_put_page(&hpool, addr);
    187}
    188
    189static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
    190					 kvm_pte_t *ptep,
    191					 enum kvm_pgtable_walk_flags flag,
    192					 void * const arg)
    193{
    194	struct kvm_pgtable_mm_ops *mm_ops = arg;
    195	enum kvm_pgtable_prot prot;
    196	enum pkvm_page_state state;
    197	kvm_pte_t pte = *ptep;
    198	phys_addr_t phys;
    199
    200	if (!kvm_pte_valid(pte))
    201		return 0;
    202
    203	/*
    204	 * Fix-up the refcount for the page-table pages as the early allocator
    205	 * was unable to access the hyp_vmemmap and so the buddy allocator has
    206	 * initialised the refcount to '1'.
    207	 */
    208	mm_ops->get_page(ptep);
    209	if (flag != KVM_PGTABLE_WALK_LEAF)
    210		return 0;
    211
    212	if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
    213		return -EINVAL;
    214
    215	phys = kvm_pte_to_phys(pte);
    216	if (!addr_is_memory(phys))
    217		return -EINVAL;
    218
    219	/*
    220	 * Adjust the host stage-2 mappings to match the ownership attributes
    221	 * configured in the hypervisor stage-1.
    222	 */
    223	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
    224	switch (state) {
    225	case PKVM_PAGE_OWNED:
    226		return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
    227	case PKVM_PAGE_SHARED_OWNED:
    228		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
    229		break;
    230	case PKVM_PAGE_SHARED_BORROWED:
    231		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
    232		break;
    233	default:
    234		return -EINVAL;
    235	}
    236
    237	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
    238}
    239
    240static int finalize_host_mappings(void)
    241{
    242	struct kvm_pgtable_walker walker = {
    243		.cb	= finalize_host_mappings_walker,
    244		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
    245		.arg	= pkvm_pgtable.mm_ops,
    246	};
    247	int i, ret;
    248
    249	for (i = 0; i < hyp_memblock_nr; i++) {
    250		struct memblock_region *reg = &hyp_memory[i];
    251		u64 start = (u64)hyp_phys_to_virt(reg->base);
    252
    253		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
    254		if (ret)
    255			return ret;
    256	}
    257
    258	return 0;
    259}
    260
    261void __noreturn __pkvm_init_finalise(void)
    262{
    263	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
    264	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
    265	unsigned long nr_pages, reserved_pages, pfn;
    266	int ret;
    267
    268	/* Now that the vmemmap is backed, install the full-fledged allocator */
    269	pfn = hyp_virt_to_pfn(hyp_pgt_base);
    270	nr_pages = hyp_s1_pgtable_pages();
    271	reserved_pages = hyp_early_alloc_nr_used_pages();
    272	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
    273	if (ret)
    274		goto out;
    275
    276	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
    277	if (ret)
    278		goto out;
    279
    280	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
    281		.zalloc_page = hyp_zalloc_hyp_page,
    282		.phys_to_virt = hyp_phys_to_virt,
    283		.virt_to_phys = hyp_virt_to_phys,
    284		.get_page = hpool_get_page,
    285		.put_page = hpool_put_page,
    286		.page_count = hyp_page_count,
    287	};
    288	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
    289
    290	ret = finalize_host_mappings();
    291	if (ret)
    292		goto out;
    293
    294out:
    295	/*
    296	 * We tail-called to here from handle___pkvm_init() and will not return,
    297	 * so make sure to propagate the return value to the host.
    298	 */
    299	cpu_reg(host_ctxt, 1) = ret;
    300
    301	__host_enter(host_ctxt);
    302}
    303
    304int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
    305		unsigned long *per_cpu_base, u32 hyp_va_bits)
    306{
    307	struct kvm_nvhe_init_params *params;
    308	void *virt = hyp_phys_to_virt(phys);
    309	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
    310	int ret;
    311
    312	BUG_ON(kvm_check_pvm_sysreg_table());
    313
    314	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
    315		return -EINVAL;
    316
    317	hyp_spin_lock_init(&pkvm_pgd_lock);
    318	hyp_nr_cpus = nr_cpus;
    319
    320	ret = divide_memory_pool(virt, size);
    321	if (ret)
    322		return ret;
    323
    324	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
    325	if (ret)
    326		return ret;
    327
    328	update_nvhe_init_params();
    329
    330	/* Jump in the idmap page to switch to the new page-tables */
    331	params = this_cpu_ptr(&kvm_init_params);
    332	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
    333	fn(__hyp_pa(params), __pkvm_init_finalise);
    334
    335	unreachable();
    336}