cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

emulator_error_test.c (5801B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2020, Google LLC.
      4 *
      5 * Tests for KVM_CAP_EXIT_ON_EMULATION_FAILURE capability.
      6 */
      7
      8#define _GNU_SOURCE /* for program_invocation_short_name */
      9
     10#include "test_util.h"
     11#include "kvm_util.h"
     12#include "vmx.h"
     13
     14#define VCPU_ID	   1
     15#define MAXPHYADDR 36
     16
     17#define MEM_REGION_GVA	0x0000123456789000
     18#define MEM_REGION_GPA	0x0000000700000000
     19#define MEM_REGION_SLOT	10
     20#define MEM_REGION_SIZE PAGE_SIZE
     21
     22static void guest_code(void)
     23{
     24	__asm__ __volatile__("flds (%[addr])"
     25			     :: [addr]"r"(MEM_REGION_GVA));
     26
     27	GUEST_DONE();
     28}
     29
     30static void run_guest(struct kvm_vm *vm)
     31{
     32	int rc;
     33
     34	rc = _vcpu_run(vm, VCPU_ID);
     35	TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
     36}
     37
     38/*
     39 * Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
     40 * figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
     41 */
     42#define GET_RM(insn_byte) (insn_byte & 0x7)
     43#define GET_REG(insn_byte) ((insn_byte & 0x38) >> 3)
     44#define GET_MOD(insn_byte) ((insn_byte & 0xc) >> 6)
     45
     46/* Ensure we are dealing with a simple 2-byte flds instruction. */
     47static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
     48{
     49	return insn_size >= 2 &&
     50	       insn_bytes[0] == 0xd9 &&
     51	       GET_REG(insn_bytes[1]) == 0x0 &&
     52	       GET_MOD(insn_bytes[1]) == 0x0 &&
     53	       /* Ensure there is no SIB byte. */
     54	       GET_RM(insn_bytes[1]) != 0x4 &&
     55	       /* Ensure there is no displacement byte. */
     56	       GET_RM(insn_bytes[1]) != 0x5;
     57}
     58
     59static void process_exit_on_emulation_error(struct kvm_vm *vm)
     60{
     61	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
     62	struct kvm_regs regs;
     63	uint8_t *insn_bytes;
     64	uint8_t insn_size;
     65	uint64_t flags;
     66
     67	TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
     68		    "Unexpected exit reason: %u (%s)",
     69		    run->exit_reason,
     70		    exit_reason_str(run->exit_reason));
     71
     72	TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
     73		    "Unexpected suberror: %u",
     74		    run->emulation_failure.suberror);
     75
     76	if (run->emulation_failure.ndata >= 1) {
     77		flags = run->emulation_failure.flags;
     78		if ((flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES) &&
     79		    run->emulation_failure.ndata >= 3) {
     80			insn_size = run->emulation_failure.insn_size;
     81			insn_bytes = run->emulation_failure.insn_bytes;
     82
     83			TEST_ASSERT(insn_size <= 15 && insn_size > 0,
     84				    "Unexpected instruction size: %u",
     85				    insn_size);
     86
     87			TEST_ASSERT(is_flds(insn_bytes, insn_size),
     88				    "Unexpected instruction.  Expected 'flds' (0xd9 /0)");
     89
     90			/*
     91			 * If is_flds() succeeded then the instruction bytes
     92			 * contained an flds instruction that is 2-bytes in
     93			 * length (ie: no prefix, no SIB, no displacement).
     94			 */
     95			vcpu_regs_get(vm, VCPU_ID, &regs);
     96			regs.rip += 2;
     97			vcpu_regs_set(vm, VCPU_ID, &regs);
     98		}
     99	}
    100}
    101
    102static void do_guest_assert(struct kvm_vm *vm, struct ucall *uc)
    103{
    104	TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], __FILE__,
    105		  uc->args[1]);
    106}
    107
    108static void check_for_guest_assert(struct kvm_vm *vm)
    109{
    110	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    111	struct ucall uc;
    112
    113	if (run->exit_reason == KVM_EXIT_IO &&
    114	    get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
    115		do_guest_assert(vm, &uc);
    116	}
    117}
    118
    119static void process_ucall_done(struct kvm_vm *vm)
    120{
    121	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    122	struct ucall uc;
    123
    124	check_for_guest_assert(vm);
    125
    126	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    127		    "Unexpected exit reason: %u (%s)",
    128		    run->exit_reason,
    129		    exit_reason_str(run->exit_reason));
    130
    131	TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
    132		    "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
    133		    uc.cmd, UCALL_DONE);
    134}
    135
    136static uint64_t process_ucall(struct kvm_vm *vm)
    137{
    138	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    139	struct ucall uc;
    140
    141	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    142		    "Unexpected exit reason: %u (%s)",
    143		    run->exit_reason,
    144		    exit_reason_str(run->exit_reason));
    145
    146	switch (get_ucall(vm, VCPU_ID, &uc)) {
    147	case UCALL_SYNC:
    148		break;
    149	case UCALL_ABORT:
    150		do_guest_assert(vm, &uc);
    151		break;
    152	case UCALL_DONE:
    153		process_ucall_done(vm);
    154		break;
    155	default:
    156		TEST_ASSERT(false, "Unexpected ucall");
    157	}
    158
    159	return uc.cmd;
    160}
    161
    162int main(int argc, char *argv[])
    163{
    164	struct kvm_enable_cap emul_failure_cap = {
    165		.cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
    166		.args[0] = 1,
    167	};
    168	struct kvm_cpuid_entry2 *entry;
    169	struct kvm_cpuid2 *cpuid;
    170	struct kvm_vm *vm;
    171	uint64_t gpa, pte;
    172	uint64_t *hva;
    173	int rc;
    174
    175	/* Tell stdout not to buffer its content */
    176	setbuf(stdout, NULL);
    177
    178	vm = vm_create_default(VCPU_ID, 0, guest_code);
    179
    180	if (!kvm_check_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
    181		printf("module parameter 'allow_smaller_maxphyaddr' is not set.  Skipping test.\n");
    182		return 0;
    183	}
    184
    185	cpuid = kvm_get_supported_cpuid();
    186
    187	entry = kvm_get_supported_cpuid_index(0x80000008, 0);
    188	entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
    189	set_cpuid(cpuid, entry);
    190
    191	vcpu_set_cpuid(vm, VCPU_ID, cpuid);
    192
    193	rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
    194	TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
    195	vm_enable_cap(vm, &emul_failure_cap);
    196
    197	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
    198				    MEM_REGION_GPA, MEM_REGION_SLOT,
    199				    MEM_REGION_SIZE / PAGE_SIZE, 0);
    200	gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
    201				 MEM_REGION_GPA, MEM_REGION_SLOT);
    202	TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
    203	virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
    204	hva = addr_gpa2hva(vm, MEM_REGION_GPA);
    205	memset(hva, 0, PAGE_SIZE);
    206	pte = vm_get_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA);
    207	vm_set_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA, pte | (1ull << 36));
    208
    209	run_guest(vm);
    210	process_exit_on_emulation_error(vm);
    211	run_guest(vm);
    212
    213	TEST_ASSERT(process_ucall(vm) == UCALL_DONE, "Expected UCALL_DONE");
    214
    215	kvm_vm_free(vm);
    216
    217	return 0;
    218}