cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

evmcs_test.c (7517B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2018, Red Hat, Inc.
      4 *
      5 * Tests for Enlightened VMCS, including nested guest state.
      6 */
      7#define _GNU_SOURCE /* for program_invocation_short_name */
      8#include <fcntl.h>
      9#include <stdio.h>
     10#include <stdlib.h>
     11#include <string.h>
     12#include <sys/ioctl.h>
     13#include <linux/bitmap.h>
     14
     15#include "test_util.h"
     16
     17#include "kvm_util.h"
     18
     19#include "vmx.h"
     20
     21#define VCPU_ID		5
     22#define NMI_VECTOR	2
     23
     24static int ud_count;
     25
     26static void guest_ud_handler(struct ex_regs *regs)
     27{
     28	ud_count++;
     29	regs->rip += 3; /* VMLAUNCH */
     30}
     31
     32static void guest_nmi_handler(struct ex_regs *regs)
     33{
     34}
     35
     36/* Exits to L1 destroy GRPs! */
     37static inline void rdmsr_fs_base(void)
     38{
     39	__asm__ __volatile__ ("mov $0xc0000100, %%rcx; rdmsr" : : :
     40			      "rax", "rbx", "rcx", "rdx",
     41			      "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
     42			      "r13", "r14", "r15");
     43}
     44static inline void rdmsr_gs_base(void)
     45{
     46	__asm__ __volatile__ ("mov $0xc0000101, %%rcx; rdmsr" : : :
     47			      "rax", "rbx", "rcx", "rdx",
     48			      "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
     49			      "r13", "r14", "r15");
     50}
     51
     52void l2_guest_code(void)
     53{
     54	GUEST_SYNC(7);
     55
     56	GUEST_SYNC(8);
     57
     58	/* Forced exit to L1 upon restore */
     59	GUEST_SYNC(9);
     60
     61	vmcall();
     62
     63	/* MSR-Bitmap tests */
     64	rdmsr_fs_base(); /* intercepted */
     65	rdmsr_fs_base(); /* intercepted */
     66	rdmsr_gs_base(); /* not intercepted */
     67	vmcall();
     68	rdmsr_gs_base(); /* intercepted */
     69
     70	/* Done, exit to L1 and never come back.  */
     71	vmcall();
     72}
     73
     74void guest_code(struct vmx_pages *vmx_pages)
     75{
     76#define L2_GUEST_STACK_SIZE 64
     77	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
     78
     79	x2apic_enable();
     80
     81	GUEST_SYNC(1);
     82	GUEST_SYNC(2);
     83
     84	enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
     85
     86	GUEST_ASSERT(vmx_pages->vmcs_gpa);
     87	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
     88	GUEST_SYNC(3);
     89	GUEST_ASSERT(load_vmcs(vmx_pages));
     90	GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
     91
     92	GUEST_SYNC(4);
     93	GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
     94
     95	prepare_vmcs(vmx_pages, l2_guest_code,
     96		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
     97
     98	GUEST_SYNC(5);
     99	GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
    100	current_evmcs->revision_id = -1u;
    101	GUEST_ASSERT(vmlaunch());
    102	current_evmcs->revision_id = EVMCS_VERSION;
    103	GUEST_SYNC(6);
    104
    105	vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
    106		PIN_BASED_NMI_EXITING);
    107
    108	GUEST_ASSERT(!vmlaunch());
    109	GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
    110
    111	/*
    112	 * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
    113	 * up-to-date (RIP points where it should and not at the beginning
    114	 * of l2_guest_code(). GUEST_SYNC(9) checkes that.
    115	 */
    116	GUEST_ASSERT(!vmresume());
    117
    118	GUEST_SYNC(10);
    119
    120	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    121	current_evmcs->guest_rip += 3; /* vmcall */
    122
    123	/* Intercept RDMSR 0xc0000100 */
    124	vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmreadz(CPU_BASED_VM_EXEC_CONTROL) |
    125		CPU_BASED_USE_MSR_BITMAPS);
    126	set_bit(MSR_FS_BASE & 0x1fff, vmx_pages->msr + 0x400);
    127	GUEST_ASSERT(!vmresume());
    128	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
    129	current_evmcs->guest_rip += 2; /* rdmsr */
    130
    131	/* Enable enlightened MSR bitmap */
    132	current_evmcs->hv_enlightenments_control.msr_bitmap = 1;
    133	GUEST_ASSERT(!vmresume());
    134	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
    135	current_evmcs->guest_rip += 2; /* rdmsr */
    136
    137	/* Intercept RDMSR 0xc0000101 without telling KVM about it */
    138	set_bit(MSR_GS_BASE & 0x1fff, vmx_pages->msr + 0x400);
    139	/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
    140	current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
    141	GUEST_ASSERT(!vmresume());
    142	/* Make sure we don't see EXIT_REASON_MSR_READ here so eMSR bitmap works */
    143	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    144	current_evmcs->guest_rip += 3; /* vmcall */
    145
    146	/* Now tell KVM we've changed MSR-Bitmap */
    147	current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
    148	GUEST_ASSERT(!vmresume());
    149	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
    150	current_evmcs->guest_rip += 2; /* rdmsr */
    151
    152	GUEST_ASSERT(!vmresume());
    153	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    154	GUEST_SYNC(11);
    155
    156	/* Try enlightened vmptrld with an incorrect GPA */
    157	evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
    158	GUEST_ASSERT(vmlaunch());
    159	GUEST_ASSERT(ud_count == 1);
    160	GUEST_DONE();
    161}
    162
    163void inject_nmi(struct kvm_vm *vm)
    164{
    165	struct kvm_vcpu_events events;
    166
    167	vcpu_events_get(vm, VCPU_ID, &events);
    168
    169	events.nmi.pending = 1;
    170	events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
    171
    172	vcpu_events_set(vm, VCPU_ID, &events);
    173}
    174
    175static void save_restore_vm(struct kvm_vm *vm)
    176{
    177	struct kvm_regs regs1, regs2;
    178	struct kvm_x86_state *state;
    179
    180	state = vcpu_save_state(vm, VCPU_ID);
    181	memset(&regs1, 0, sizeof(regs1));
    182	vcpu_regs_get(vm, VCPU_ID, &regs1);
    183
    184	kvm_vm_release(vm);
    185
    186	/* Restore state in a new VM.  */
    187	kvm_vm_restart(vm, O_RDWR);
    188	vm_vcpu_add(vm, VCPU_ID);
    189	vcpu_set_hv_cpuid(vm, VCPU_ID);
    190	vcpu_enable_evmcs(vm, VCPU_ID);
    191	vcpu_load_state(vm, VCPU_ID, state);
    192	kvm_x86_state_cleanup(state);
    193
    194	memset(&regs2, 0, sizeof(regs2));
    195	vcpu_regs_get(vm, VCPU_ID, &regs2);
    196	TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
    197		    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
    198		    (ulong) regs2.rdi, (ulong) regs2.rsi);
    199}
    200
    201int main(int argc, char *argv[])
    202{
    203	vm_vaddr_t vmx_pages_gva = 0;
    204
    205	struct kvm_vm *vm;
    206	struct kvm_run *run;
    207	struct ucall uc;
    208	int stage;
    209
    210	/* Create VM */
    211	vm = vm_create_default(VCPU_ID, 0, guest_code);
    212
    213	if (!nested_vmx_supported() ||
    214	    !kvm_check_cap(KVM_CAP_NESTED_STATE) ||
    215	    !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
    216		print_skip("Enlightened VMCS is unsupported");
    217		exit(KSFT_SKIP);
    218	}
    219
    220	vcpu_set_hv_cpuid(vm, VCPU_ID);
    221	vcpu_enable_evmcs(vm, VCPU_ID);
    222
    223	vcpu_alloc_vmx(vm, &vmx_pages_gva);
    224	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
    225
    226	vm_init_descriptor_tables(vm);
    227	vcpu_init_descriptor_tables(vm, VCPU_ID);
    228	vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
    229	vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
    230
    231	pr_info("Running L1 which uses EVMCS to run L2\n");
    232
    233	for (stage = 1;; stage++) {
    234		run = vcpu_state(vm, VCPU_ID);
    235		_vcpu_run(vm, VCPU_ID);
    236		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    237			    "Stage %d: unexpected exit reason: %u (%s),\n",
    238			    stage, run->exit_reason,
    239			    exit_reason_str(run->exit_reason));
    240
    241		switch (get_ucall(vm, VCPU_ID, &uc)) {
    242		case UCALL_ABORT:
    243			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
    244		      		  __FILE__, uc.args[1]);
    245			/* NOT REACHED */
    246		case UCALL_SYNC:
    247			break;
    248		case UCALL_DONE:
    249			goto done;
    250		default:
    251			TEST_FAIL("Unknown ucall %lu", uc.cmd);
    252		}
    253
    254		/* UCALL_SYNC is handled here.  */
    255		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
    256			    uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
    257			    stage, (ulong)uc.args[1]);
    258
    259		save_restore_vm(vm);
    260
    261		/* Force immediate L2->L1 exit before resuming */
    262		if (stage == 8) {
    263			pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
    264			inject_nmi(vm);
    265		}
    266
    267		/*
    268		 * Do KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE for a freshly
    269		 * restored VM (before the first KVM_RUN) to check that
    270		 * KVM_STATE_NESTED_EVMCS is not lost.
    271		 */
    272		if (stage == 9) {
    273			pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
    274			save_restore_vm(vm);
    275		}
    276	}
    277
    278done:
    279	kvm_vm_free(vm);
    280}