cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xen_vmcall_test.c (4172B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * xen_vmcall_test
      4 *
      5 * Copyright © 2020 Amazon.com, Inc. or its affiliates.
      6 *
      7 * Userspace hypercall testing
      8 */
      9
     10#include "test_util.h"
     11#include "kvm_util.h"
     12#include "processor.h"
     13
     14#define VCPU_ID		5
     15
     16#define HCALL_REGION_GPA	0xc0000000ULL
     17#define HCALL_REGION_SLOT	10
     18
     19static struct kvm_vm *vm;
     20
     21#define INPUTVALUE 17
     22#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
     23#define RETVALUE 0xcafef00dfbfbffffUL
     24
     25#define XEN_HYPERCALL_MSR	0x40000200
     26#define HV_GUEST_OS_ID_MSR	0x40000000
     27#define HV_HYPERCALL_MSR	0x40000001
     28
     29#define HVCALL_SIGNAL_EVENT		0x005d
     30#define HV_STATUS_INVALID_ALIGNMENT	4
     31
     32static void guest_code(void)
     33{
     34	unsigned long rax = INPUTVALUE;
     35	unsigned long rdi = ARGVALUE(1);
     36	unsigned long rsi = ARGVALUE(2);
     37	unsigned long rdx = ARGVALUE(3);
     38	unsigned long rcx;
     39	register unsigned long r10 __asm__("r10") = ARGVALUE(4);
     40	register unsigned long r8 __asm__("r8") = ARGVALUE(5);
     41	register unsigned long r9 __asm__("r9") = ARGVALUE(6);
     42
     43	/* First a direct invocation of 'vmcall' */
     44	__asm__ __volatile__("vmcall" :
     45			     "=a"(rax) :
     46			     "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
     47			     "r"(r10), "r"(r8), "r"(r9));
     48	GUEST_ASSERT(rax == RETVALUE);
     49
     50	/* Fill in the Xen hypercall page */
     51	__asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR),
     52			     "a" (HCALL_REGION_GPA & 0xffffffff),
     53			     "d" (HCALL_REGION_GPA >> 32));
     54
     55	/* Set Hyper-V Guest OS ID */
     56	__asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR),
     57			     "a" (0x5a), "d" (0));
     58
     59	/* Hyper-V hypercall page */
     60	u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
     61	__asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR),
     62			     "a" (msrval & 0xffffffff),
     63			     "d" (msrval >> 32));
     64
     65	/* Invoke a Xen hypercall */
     66	__asm__ __volatile__("call *%1" : "=a"(rax) :
     67			     "r"(HCALL_REGION_GPA + INPUTVALUE * 32),
     68			     "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
     69			     "r"(r10), "r"(r8), "r"(r9));
     70	GUEST_ASSERT(rax == RETVALUE);
     71
     72	/* Invoke a Hyper-V hypercall */
     73	rax = 0;
     74	rcx = HVCALL_SIGNAL_EVENT;	/* code */
     75	rdx = 0x5a5a5a5a;		/* ingpa (badly aligned) */
     76	__asm__ __volatile__("call *%1" : "=a"(rax) :
     77			     "r"(HCALL_REGION_GPA + PAGE_SIZE),
     78			     "a"(rax), "c"(rcx), "d"(rdx),
     79			     "r"(r8));
     80	GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT);
     81
     82	GUEST_DONE();
     83}
     84
     85int main(int argc, char *argv[])
     86{
     87	if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
     88	      KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
     89		print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
     90		exit(KSFT_SKIP);
     91	}
     92
     93	vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
     94	vcpu_set_hv_cpuid(vm, VCPU_ID);
     95
     96	struct kvm_xen_hvm_config hvmc = {
     97		.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
     98		.msr = XEN_HYPERCALL_MSR,
     99	};
    100	vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
    101
    102	/* Map a region for the hypercall pages */
    103	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
    104				    HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
    105	virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
    106
    107	for (;;) {
    108		volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
    109		struct ucall uc;
    110
    111		vcpu_run(vm, VCPU_ID);
    112
    113		if (run->exit_reason == KVM_EXIT_XEN) {
    114			ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
    115			ASSERT_EQ(run->xen.u.hcall.cpl, 0);
    116			ASSERT_EQ(run->xen.u.hcall.longmode, 1);
    117			ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
    118			ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
    119			ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
    120			ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
    121			ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
    122			ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
    123			ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
    124			run->xen.u.hcall.result = RETVALUE;
    125			continue;
    126		}
    127
    128		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    129			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
    130			    run->exit_reason,
    131			    exit_reason_str(run->exit_reason));
    132
    133		switch (get_ucall(vm, VCPU_ID, &uc)) {
    134		case UCALL_ABORT:
    135			TEST_FAIL("%s", (const char *)uc.args[0]);
    136			/* NOT REACHED */
    137		case UCALL_SYNC:
    138			break;
    139		case UCALL_DONE:
    140			goto done;
    141		default:
    142			TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
    143		}
    144	}
    145done:
    146	kvm_vm_free(vm);
    147	return 0;
    148}