cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

processor.c (10701B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * RISC-V code
      4 *
      5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
      6 */
      7
      8#include <linux/compiler.h>
      9#include <assert.h>
     10
     11#include "kvm_util.h"
     12#include "../kvm_util_internal.h"
     13#include "processor.h"
     14
     15#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN	0xac0000
     16
     17static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
     18{
     19	return (v + vm->page_size) & ~(vm->page_size - 1);
     20}
     21
     22static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
     23{
     24	return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
     25		PGTBL_PAGE_SIZE_SHIFT;
     26}
     27
     28static uint64_t ptrs_per_pte(struct kvm_vm *vm)
     29{
     30	return PGTBL_PAGE_SIZE / sizeof(uint64_t);
     31}
     32
     33static uint64_t pte_index_mask[] = {
     34	PGTBL_L0_INDEX_MASK,
     35	PGTBL_L1_INDEX_MASK,
     36	PGTBL_L2_INDEX_MASK,
     37	PGTBL_L3_INDEX_MASK,
     38};
     39
     40static uint32_t pte_index_shift[] = {
     41	PGTBL_L0_INDEX_SHIFT,
     42	PGTBL_L1_INDEX_SHIFT,
     43	PGTBL_L2_INDEX_SHIFT,
     44	PGTBL_L3_INDEX_SHIFT,
     45};
     46
     47static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
     48{
     49	TEST_ASSERT(level > -1,
     50		"Negative page table level (%d) not possible", level);
     51	TEST_ASSERT(level < vm->pgtable_levels,
     52		"Invalid page table level (%d)", level);
     53
     54	return (gva & pte_index_mask[level]) >> pte_index_shift[level];
     55}
     56
     57void virt_pgd_alloc(struct kvm_vm *vm)
     58{
     59	if (!vm->pgd_created) {
     60		vm_paddr_t paddr = vm_phy_pages_alloc(vm,
     61			page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size,
     62			KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
     63		vm->pgd = paddr;
     64		vm->pgd_created = true;
     65	}
     66}
     67
     68void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
     69{
     70	uint64_t *ptep, next_ppn;
     71	int level = vm->pgtable_levels - 1;
     72
     73	TEST_ASSERT((vaddr % vm->page_size) == 0,
     74		"Virtual address not on page boundary,\n"
     75		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
     76	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
     77		(vaddr >> vm->page_shift)),
     78		"Invalid virtual address, vaddr: 0x%lx", vaddr);
     79	TEST_ASSERT((paddr % vm->page_size) == 0,
     80		"Physical address not on page boundary,\n"
     81		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
     82	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
     83		"Physical address beyond maximum supported,\n"
     84		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
     85		paddr, vm->max_gfn, vm->page_size);
     86
     87	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8;
     88	if (!*ptep) {
     89		next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
     90		*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
     91			PGTBL_PTE_VALID_MASK;
     92	}
     93	level--;
     94
     95	while (level > -1) {
     96		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
     97		       pte_index(vm, vaddr, level) * 8;
     98		if (!*ptep && level > 0) {
     99			next_ppn = vm_alloc_page_table(vm) >>
    100				   PGTBL_PAGE_SIZE_SHIFT;
    101			*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
    102				PGTBL_PTE_VALID_MASK;
    103		}
    104		level--;
    105	}
    106
    107	paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
    108	*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
    109		PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
    110}
    111
    112vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
    113{
    114	uint64_t *ptep;
    115	int level = vm->pgtable_levels - 1;
    116
    117	if (!vm->pgd_created)
    118		goto unmapped_gva;
    119
    120	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8;
    121	if (!ptep)
    122		goto unmapped_gva;
    123	level--;
    124
    125	while (level > -1) {
    126		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
    127		       pte_index(vm, gva, level) * 8;
    128		if (!ptep)
    129			goto unmapped_gva;
    130		level--;
    131	}
    132
    133	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
    134
    135unmapped_gva:
    136	TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
    137		  gva, level);
    138	exit(1);
    139}
    140
    141static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
    142		     uint64_t page, int level)
    143{
    144#ifdef DEBUG
    145	static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
    146	uint64_t pte, *ptep;
    147
    148	if (level < 0)
    149		return;
    150
    151	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
    152		ptep = addr_gpa2hva(vm, pte);
    153		if (!*ptep)
    154			continue;
    155		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "",
    156			type[level], pte, *ptep, ptep);
    157		pte_dump(stream, vm, indent + 1,
    158			 pte_addr(vm, *ptep), level - 1);
    159	}
    160#endif
    161}
    162
    163void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
    164{
    165	int level = vm->pgtable_levels - 1;
    166	uint64_t pgd, *ptep;
    167
    168	if (!vm->pgd_created)
    169		return;
    170
    171	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
    172		ptep = addr_gpa2hva(vm, pgd);
    173		if (!*ptep)
    174			continue;
    175		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "",
    176			pgd, *ptep, ptep);
    177		pte_dump(stream, vm, indent + 1,
    178			 pte_addr(vm, *ptep), level - 1);
    179	}
    180}
    181
    182void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
    183{
    184	unsigned long satp;
    185
    186	/*
    187	 * The RISC-V Sv48 MMU mode supports 56-bit physical address
    188	 * for 48-bit virtual address with 4KB last level page size.
    189	 */
    190	switch (vm->mode) {
    191	case VM_MODE_P52V48_4K:
    192	case VM_MODE_P48V48_4K:
    193	case VM_MODE_P40V48_4K:
    194		break;
    195	default:
    196		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
    197	}
    198
    199	satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
    200	satp |= SATP_MODE_48;
    201
    202	set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
    203}
    204
    205void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
    206{
    207	struct kvm_riscv_core core;
    208
    209	get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
    210	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
    211	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
    212	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
    213	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
    214	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
    215	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
    216	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
    217	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
    218	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
    219	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
    220	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
    221	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
    222	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
    223	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
    224	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
    225	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
    226	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
    227	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
    228	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
    229	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
    230	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
    231	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
    232	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
    233	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
    234	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
    235	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
    236	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
    237	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
    238	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
    239	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
    240	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
    241	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);
    242
    243	fprintf(stream,
    244		" MODE:  0x%lx\n", core.mode);
    245	fprintf(stream,
    246		" PC: 0x%016lx   RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n",
    247		core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp);
    248	fprintf(stream,
    249		" TP: 0x%016lx   T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n",
    250		core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2);
    251	fprintf(stream,
    252		" S0: 0x%016lx   S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n",
    253		core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1);
    254	fprintf(stream,
    255		" A2: 0x%016lx   A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n",
    256		core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5);
    257	fprintf(stream,
    258		" A6: 0x%016lx   A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n",
    259		core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3);
    260	fprintf(stream,
    261		" S4: 0x%016lx   S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n",
    262		core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7);
    263	fprintf(stream,
    264		" S8: 0x%016lx   S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n",
    265		core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11);
    266	fprintf(stream,
    267		" T3: 0x%016lx   T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n",
    268		core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
    269}
    270
    271static void __aligned(16) guest_unexp_trap(void)
    272{
    273	sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
    274		  KVM_RISCV_SELFTESTS_SBI_UNEXP,
    275		  0, 0, 0, 0, 0, 0);
    276}
    277
    278void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
    279{
    280	int r;
    281	size_t stack_size = vm->page_size == 4096 ?
    282					DEFAULT_STACK_PGS * vm->page_size :
    283					vm->page_size;
    284	unsigned long stack_vaddr = vm_vaddr_alloc(vm, stack_size,
    285					DEFAULT_RISCV_GUEST_STACK_VADDR_MIN);
    286	unsigned long current_gp = 0;
    287	struct kvm_mp_state mps;
    288
    289	vm_vcpu_add(vm, vcpuid);
    290	riscv_vcpu_mmu_setup(vm, vcpuid);
    291
    292	/*
    293	 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are
    294	 * powered-off by default so we ensure that all secondary VCPUs
    295	 * are powered-on using KVM_SET_MP_STATE ioctl().
    296	 */
    297	mps.mp_state = KVM_MP_STATE_RUNNABLE;
    298	r = _vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, &mps);
    299	TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
    300
    301	/* Setup global pointer of guest to be same as the host */
    302	asm volatile (
    303		"add %0, gp, zero" : "=r" (current_gp) : : "memory");
    304	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), current_gp);
    305
    306	/* Setup stack pointer and program counter of guest */
    307	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp),
    308		stack_vaddr + stack_size);
    309	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc),
    310		(unsigned long)guest_code);
    311
    312	/* Setup default exception vector of guest */
    313	set_reg(vm, vcpuid, RISCV_CSR_REG(stvec),
    314		(unsigned long)guest_unexp_trap);
    315}
    316
    317void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
    318{
    319	va_list ap;
    320	uint64_t id = RISCV_CORE_REG(regs.a0);
    321	int i;
    322
    323	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
    324		    "  num: %u\n", num);
    325
    326	va_start(ap, num);
    327
    328	for (i = 0; i < num; i++) {
    329		switch (i) {
    330		case 0:
    331			id = RISCV_CORE_REG(regs.a0);
    332			break;
    333		case 1:
    334			id = RISCV_CORE_REG(regs.a1);
    335			break;
    336		case 2:
    337			id = RISCV_CORE_REG(regs.a2);
    338			break;
    339		case 3:
    340			id = RISCV_CORE_REG(regs.a3);
    341			break;
    342		case 4:
    343			id = RISCV_CORE_REG(regs.a4);
    344			break;
    345		case 5:
    346			id = RISCV_CORE_REG(regs.a5);
    347			break;
    348		case 6:
    349			id = RISCV_CORE_REG(regs.a6);
    350			break;
    351		case 7:
    352			id = RISCV_CORE_REG(regs.a7);
    353			break;
    354		}
    355		set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
    356	}
    357
    358	va_end(ap);
    359}
    360
    361void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
    362{
    363}