cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tsc_msrs_test.c (4830B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
      4 *
      5 * Copyright (C) 2020, Red Hat, Inc.
      6 */
      7#include <stdio.h>
      8#include <string.h>
      9#include "kvm_util.h"
     10#include "processor.h"
     11
     12#define VCPU_ID 0
     13
     14#define UNITY                  (1ull << 30)
     15#define HOST_ADJUST            (UNITY * 64)
     16#define GUEST_STEP             (UNITY * 4)
     17#define ROUND(x)               ((x + UNITY / 2) & -UNITY)
     18#define rounded_rdmsr(x)       ROUND(rdmsr(x))
     19#define rounded_host_rdmsr(x)  ROUND(vcpu_get_msr(vm, 0, x))
     20
     21static void guest_code(void)
     22{
     23	u64 val = 0;
     24
     25	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
     26	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
     27
     28	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
     29	val = 1ull * GUEST_STEP;
     30	wrmsr(MSR_IA32_TSC, val);
     31	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
     32	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
     33
     34	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
     35	GUEST_SYNC(2);
     36	val = 2ull * GUEST_STEP;
     37	wrmsr(MSR_IA32_TSC_ADJUST, val);
     38	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
     39	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
     40
     41	/* Host: setting the TSC offset.  */
     42	GUEST_SYNC(3);
     43	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
     44	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
     45
     46	/*
     47	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
     48	 * host-side offset and affect both MSRs.
     49	 */
     50	GUEST_SYNC(4);
     51	val = 3ull * GUEST_STEP;
     52	wrmsr(MSR_IA32_TSC_ADJUST, val);
     53	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
     54	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
     55
     56	/*
     57	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
     58	 * offset is now visible in MSR_IA32_TSC_ADJUST.
     59	 */
     60	GUEST_SYNC(5);
     61	val = 4ull * GUEST_STEP;
     62	wrmsr(MSR_IA32_TSC, val);
     63	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
     64	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
     65
     66	GUEST_DONE();
     67}
     68
     69static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
     70{
     71	struct ucall uc;
     72
     73	vcpu_args_set(vm, vcpuid, 1, vcpuid);
     74
     75	vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
     76
     77	switch (get_ucall(vm, vcpuid, &uc)) {
     78	case UCALL_SYNC:
     79		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
     80			    uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
     81			    stage + 1, (ulong)uc.args[1]);
     82		return;
     83	case UCALL_DONE:
     84		return;
     85	case UCALL_ABORT:
     86		TEST_ASSERT(false, "%s at %s:%ld\n" \
     87			    "\tvalues: %#lx, %#lx", (const char *)uc.args[0],
     88			    __FILE__, uc.args[1], uc.args[2], uc.args[3]);
     89	default:
     90		TEST_ASSERT(false, "Unexpected exit: %s",
     91			    exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
     92	}
     93}
     94
     95int main(void)
     96{
     97	struct kvm_vm *vm;
     98	uint64_t val;
     99
    100	vm = vm_create_default(VCPU_ID, 0, guest_code);
    101
    102	val = 0;
    103	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
    104	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    105
    106	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
    107	run_vcpu(vm, VCPU_ID, 1);
    108	val = 1ull * GUEST_STEP;
    109	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
    110	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    111
    112	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
    113	run_vcpu(vm, VCPU_ID, 2);
    114	val = 2ull * GUEST_STEP;
    115	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
    116	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    117
    118	/*
    119	 * Host: writes to MSR_IA32_TSC set the host-side offset
    120	 * and therefore do not change MSR_IA32_TSC_ADJUST.
    121	 */
    122	vcpu_set_msr(vm, 0, MSR_IA32_TSC, HOST_ADJUST + val);
    123	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
    124	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    125	run_vcpu(vm, VCPU_ID, 3);
    126
    127	/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC.  */
    128	vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, UNITY * 123456);
    129	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
    130	ASSERT_EQ(vcpu_get_msr(vm, 0, MSR_IA32_TSC_ADJUST), UNITY * 123456);
    131
    132	/* Restore previous value.  */
    133	vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, val);
    134	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
    135	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    136
    137	/*
    138	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
    139	 * host-side offset and affect both MSRs.
    140	 */
    141	run_vcpu(vm, VCPU_ID, 4);
    142	val = 3ull * GUEST_STEP;
    143	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
    144	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
    145
    146	/*
    147	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
    148	 * offset is now visible in MSR_IA32_TSC_ADJUST.
    149	 */
    150	run_vcpu(vm, VCPU_ID, 5);
    151	val = 4ull * GUEST_STEP;
    152	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
    153	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
    154
    155	kvm_vm_free(vm);
    156
    157	return 0;
    158}