cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmx_apic_access_test.c (4108B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * vmx_apic_access_test
      4 *
      5 * Copyright (C) 2020, Google LLC.
      6 *
      7 * This work is licensed under the terms of the GNU GPL, version 2.
      8 *
      9 * The first subtest simply checks to see that an L2 guest can be
     10 * launched with a valid APIC-access address that is backed by a
     11 * page of L1 physical memory.
     12 *
     13 * The second subtest sets the APIC-access address to a (valid) L1
     14 * physical address that is not backed by memory. KVM can't handle
     15 * this situation, so resuming L2 should result in a KVM exit for
     16 * internal error (emulation). This is not an architectural
     17 * requirement. It is just a shortcoming of KVM. The internal error
     18 * is unfortunate, but it's better than what used to happen!
     19 */
     20
     21#include "test_util.h"
     22#include "kvm_util.h"
     23#include "processor.h"
     24#include "vmx.h"
     25
     26#include <string.h>
     27#include <sys/ioctl.h>
     28
     29#include "kselftest.h"
     30
     31#define VCPU_ID		0
     32
     33/* The virtual machine object. */
     34static struct kvm_vm *vm;
     35
     36static void l2_guest_code(void)
     37{
     38	/* Exit to L1 */
     39	__asm__ __volatile__("vmcall");
     40}
     41
     42static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
     43{
     44#define L2_GUEST_STACK_SIZE 64
     45	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
     46	uint32_t control;
     47
     48	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
     49	GUEST_ASSERT(load_vmcs(vmx_pages));
     50
     51	/* Prepare the VMCS for L2 execution. */
     52	prepare_vmcs(vmx_pages, l2_guest_code,
     53		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
     54	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
     55	control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
     56	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
     57	control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
     58	control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
     59	vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
     60	vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
     61
     62	/* Try to launch L2 with the memory-backed APIC-access address. */
     63	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
     64	GUEST_ASSERT(!vmlaunch());
     65	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
     66
     67	vmwrite(APIC_ACCESS_ADDR, high_gpa);
     68
     69	/* Try to resume L2 with the unbacked APIC-access address. */
     70	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
     71	GUEST_ASSERT(!vmresume());
     72	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
     73
     74	GUEST_DONE();
     75}
     76
     77int main(int argc, char *argv[])
     78{
     79	unsigned long apic_access_addr = ~0ul;
     80	unsigned int paddr_width;
     81	unsigned int vaddr_width;
     82	vm_vaddr_t vmx_pages_gva;
     83	unsigned long high_gpa;
     84	struct vmx_pages *vmx;
     85	bool done = false;
     86
     87	nested_vmx_check_supported();
     88
     89	vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
     90
     91	kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
     92	high_gpa = (1ul << paddr_width) - getpagesize();
     93	if ((unsigned long)DEFAULT_GUEST_PHY_PAGES * getpagesize() > high_gpa) {
     94		print_skip("No unbacked physical page available");
     95		exit(KSFT_SKIP);
     96	}
     97
     98	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
     99	prepare_virtualize_apic_accesses(vmx, vm);
    100	vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
    101
    102	while (!done) {
    103		volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    104		struct ucall uc;
    105
    106		vcpu_run(vm, VCPU_ID);
    107		if (apic_access_addr == high_gpa) {
    108			TEST_ASSERT(run->exit_reason ==
    109				    KVM_EXIT_INTERNAL_ERROR,
    110				    "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
    111				    run->exit_reason,
    112				    exit_reason_str(run->exit_reason));
    113			TEST_ASSERT(run->internal.suberror ==
    114				    KVM_INTERNAL_ERROR_EMULATION,
    115				    "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
    116				    run->internal.suberror);
    117			break;
    118		}
    119		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    120			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
    121			    run->exit_reason,
    122			    exit_reason_str(run->exit_reason));
    123
    124		switch (get_ucall(vm, VCPU_ID, &uc)) {
    125		case UCALL_ABORT:
    126			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
    127				  __FILE__, uc.args[1]);
    128			/* NOT REACHED */
    129		case UCALL_SYNC:
    130			apic_access_addr = uc.args[1];
    131			break;
    132		case UCALL_DONE:
    133			done = true;
    134			break;
    135		default:
    136			TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
    137		}
    138	}
    139	kvm_vm_free(vm);
    140	return 0;
    141}