cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmx_tsc_adjust_test.c (3898B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * vmx_tsc_adjust_test
      4 *
      5 * Copyright (C) 2018, Google LLC.
      6 *
      7 * IA32_TSC_ADJUST test
      8 *
      9 * According to the SDM, "if an execution of WRMSR to the
     10 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
     11 * the logical processor also adds (or subtracts) value X from the
     12 * IA32_TSC_ADJUST MSR.
     13 *
     14 * Note that when L1 doesn't intercept writes to IA32_TSC, a
     15 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
     16 * value.
     17 *
     18 * This test verifies that this unusual case is handled correctly.
     19 */
     20
     21#include "test_util.h"
     22#include "kvm_util.h"
     23#include "processor.h"
     24#include "vmx.h"
     25
     26#include <string.h>
     27#include <sys/ioctl.h>
     28
     29#include "kselftest.h"
     30
     31#ifndef MSR_IA32_TSC_ADJUST
     32#define MSR_IA32_TSC_ADJUST 0x3b
     33#endif
     34
     35#define VCPU_ID		5
     36
     37#define TSC_ADJUST_VALUE (1ll << 32)
     38#define TSC_OFFSET_VALUE -(1ll << 48)
     39
     40enum {
     41	PORT_ABORT = 0x1000,
     42	PORT_REPORT,
     43	PORT_DONE,
     44};
     45
     46enum {
     47	VMXON_PAGE = 0,
     48	VMCS_PAGE,
     49	MSR_BITMAP_PAGE,
     50
     51	NUM_VMX_PAGES,
     52};
     53
     54struct kvm_single_msr {
     55	struct kvm_msrs header;
     56	struct kvm_msr_entry entry;
     57} __attribute__((packed));
     58
     59/* The virtual machine object. */
     60static struct kvm_vm *vm;
     61
     62static void check_ia32_tsc_adjust(int64_t max)
     63{
     64	int64_t adjust;
     65
     66	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
     67	GUEST_SYNC(adjust);
     68	GUEST_ASSERT(adjust <= max);
     69}
     70
     71static void l2_guest_code(void)
     72{
     73	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
     74
     75	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
     76	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
     77
     78	/* Exit to L1 */
     79	__asm__ __volatile__("vmcall");
     80}
     81
     82static void l1_guest_code(struct vmx_pages *vmx_pages)
     83{
     84#define L2_GUEST_STACK_SIZE 64
     85	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
     86	uint32_t control;
     87	uintptr_t save_cr3;
     88
     89	GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
     90	wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
     91	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
     92
     93	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
     94	GUEST_ASSERT(load_vmcs(vmx_pages));
     95
     96	/* Prepare the VMCS for L2 execution. */
     97	prepare_vmcs(vmx_pages, l2_guest_code,
     98		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
     99	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
    100	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
    101	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
    102	vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
    103
    104	/* Jump into L2.  First, test failure to load guest CR3.  */
    105	save_cr3 = vmreadz(GUEST_CR3);
    106	vmwrite(GUEST_CR3, -1ull);
    107	GUEST_ASSERT(!vmlaunch());
    108	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
    109		     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
    110	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
    111	vmwrite(GUEST_CR3, save_cr3);
    112
    113	GUEST_ASSERT(!vmlaunch());
    114	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    115
    116	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
    117
    118	GUEST_DONE();
    119}
    120
    121static void report(int64_t val)
    122{
    123	pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
    124		val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
    125}
    126
    127int main(int argc, char *argv[])
    128{
    129	vm_vaddr_t vmx_pages_gva;
    130
    131	nested_vmx_check_supported();
    132
    133	vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
    134
    135	/* Allocate VMX pages and shared descriptors (vmx_pages). */
    136	vcpu_alloc_vmx(vm, &vmx_pages_gva);
    137	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
    138
    139	for (;;) {
    140		volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    141		struct ucall uc;
    142
    143		vcpu_run(vm, VCPU_ID);
    144		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    145			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
    146			    run->exit_reason,
    147			    exit_reason_str(run->exit_reason));
    148
    149		switch (get_ucall(vm, VCPU_ID, &uc)) {
    150		case UCALL_ABORT:
    151			TEST_FAIL("%s", (const char *)uc.args[0]);
    152			/* NOT REACHED */
    153		case UCALL_SYNC:
    154			report(uc.args[1]);
    155			break;
    156		case UCALL_DONE:
    157			goto done;
    158		default:
    159			TEST_FAIL("Unknown ucall %lu", uc.cmd);
    160		}
    161	}
    162
    163done:
    164	kvm_vm_free(vm);
    165	return 0;
    166}