cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

processor.c (6068B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * KVM selftest s390x library code - CPU-related functions (page tables...)
      4 *
      5 * Copyright (C) 2019, Red Hat, Inc.
      6 */
      7
      8#include "processor.h"
      9#include "kvm_util.h"
     10#include "../kvm_util_internal.h"
     11
     12#define PAGES_PER_REGION 4
     13
     14void virt_pgd_alloc(struct kvm_vm *vm)
     15{
     16	vm_paddr_t paddr;
     17
     18	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
     19		    vm->page_size);
     20
     21	if (vm->pgd_created)
     22		return;
     23
     24	paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
     25				   KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
     26	memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
     27
     28	vm->pgd = paddr;
     29	vm->pgd_created = true;
     30}
     31
     32/*
     33 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
     34 * a page table (ri == 4). Returns a suitable region/segment table entry
     35 * which points to the freshly allocated pages.
     36 */
     37static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
     38{
     39	uint64_t taddr;
     40
     41	taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
     42				   KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
     43	memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
     44
     45	return (taddr & REGION_ENTRY_ORIGIN)
     46		| (((4 - ri) << 2) & REGION_ENTRY_TYPE)
     47		| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
     48}
     49
     50void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
     51{
     52	int ri, idx;
     53	uint64_t *entry;
     54
     55	TEST_ASSERT((gva % vm->page_size) == 0,
     56		"Virtual address not on page boundary,\n"
     57		"  vaddr: 0x%lx vm->page_size: 0x%x",
     58		gva, vm->page_size);
     59	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
     60		(gva >> vm->page_shift)),
     61		"Invalid virtual address, vaddr: 0x%lx",
     62		gva);
     63	TEST_ASSERT((gpa % vm->page_size) == 0,
     64		"Physical address not on page boundary,\n"
     65		"  paddr: 0x%lx vm->page_size: 0x%x",
     66		gva, vm->page_size);
     67	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
     68		"Physical address beyond beyond maximum supported,\n"
     69		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
     70		gva, vm->max_gfn, vm->page_size);
     71
     72	/* Walk through region and segment tables */
     73	entry = addr_gpa2hva(vm, vm->pgd);
     74	for (ri = 1; ri <= 4; ri++) {
     75		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
     76		if (entry[idx] & REGION_ENTRY_INVALID)
     77			entry[idx] = virt_alloc_region(vm, ri);
     78		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
     79	}
     80
     81	/* Fill in page table entry */
     82	idx = (gva >> 12) & 0x0ffu;		/* page index */
     83	if (!(entry[idx] & PAGE_INVALID))
     84		fprintf(stderr,
     85			"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
     86	entry[idx] = gpa;
     87}
     88
     89vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
     90{
     91	int ri, idx;
     92	uint64_t *entry;
     93
     94	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
     95		    vm->page_size);
     96
     97	entry = addr_gpa2hva(vm, vm->pgd);
     98	for (ri = 1; ri <= 4; ri++) {
     99		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
    100		TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
    101			    "No region mapping for vm virtual address 0x%lx",
    102			    gva);
    103		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
    104	}
    105
    106	idx = (gva >> 12) & 0x0ffu;		/* page index */
    107
    108	TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
    109		    "No page mapping for vm virtual address 0x%lx", gva);
    110
    111	return (entry[idx] & ~0xffful) + (gva & 0xffful);
    112}
    113
    114static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
    115			   uint64_t ptea_start)
    116{
    117	uint64_t *pte, ptea;
    118
    119	for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
    120		pte = addr_gpa2hva(vm, ptea);
    121		if (*pte & PAGE_INVALID)
    122			continue;
    123		fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
    124			indent, "", ptea, *pte);
    125	}
    126}
    127
    128static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
    129			     uint64_t reg_tab_addr)
    130{
    131	uint64_t addr, *entry;
    132
    133	for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
    134		entry = addr_gpa2hva(vm, addr);
    135		if (*entry & REGION_ENTRY_INVALID)
    136			continue;
    137		fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
    138			indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
    139			addr, *entry);
    140		if (*entry & REGION_ENTRY_TYPE) {
    141			virt_dump_region(stream, vm, indent + 2,
    142					 *entry & REGION_ENTRY_ORIGIN);
    143		} else {
    144			virt_dump_ptes(stream, vm, indent + 2,
    145				       *entry & REGION_ENTRY_ORIGIN);
    146		}
    147	}
    148}
    149
    150void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
    151{
    152	if (!vm->pgd_created)
    153		return;
    154
    155	virt_dump_region(stream, vm, indent, vm->pgd);
    156}
    157
    158void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
    159{
    160	size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
    161	uint64_t stack_vaddr;
    162	struct kvm_regs regs;
    163	struct kvm_sregs sregs;
    164	struct kvm_run *run;
    165
    166	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
    167		    vm->page_size);
    168
    169	stack_vaddr = vm_vaddr_alloc(vm, stack_size,
    170				     DEFAULT_GUEST_STACK_VADDR_MIN);
    171
    172	vm_vcpu_add(vm, vcpuid);
    173
    174	/* Setup guest registers */
    175	vcpu_regs_get(vm, vcpuid, &regs);
    176	regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
    177	vcpu_regs_set(vm, vcpuid, &regs);
    178
    179	vcpu_sregs_get(vm, vcpuid, &sregs);
    180	sregs.crs[0] |= 0x00040000;		/* Enable floating point regs */
    181	sregs.crs[1] = vm->pgd | 0xf;		/* Primary region table */
    182	vcpu_sregs_set(vm, vcpuid, &sregs);
    183
    184	run = vcpu_state(vm, vcpuid);
    185	run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
    186	run->psw_addr = (uintptr_t)guest_code;
    187}
    188
    189void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
    190{
    191	va_list ap;
    192	struct kvm_regs regs;
    193	int i;
    194
    195	TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
    196		    "  num: %u\n",
    197		    num);
    198
    199	va_start(ap, num);
    200	vcpu_regs_get(vm, vcpuid, &regs);
    201
    202	for (i = 0; i < num; i++)
    203		regs.gprs[i + 2] = va_arg(ap, uint64_t);
    204
    205	vcpu_regs_set(vm, vcpuid, &regs);
    206	va_end(ap);
    207}
    208
    209void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
    210{
    211	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
    212
    213	if (!vcpu)
    214		return;
    215
    216	fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
    217		indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
    218}
    219
    220void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
    221{
    222}