cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmx_dirty_log_test.c (4312B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * KVM dirty page logging test
      4 *
      5 * Copyright (C) 2018, Red Hat, Inc.
      6 */
      7
      8#define _GNU_SOURCE /* for program_invocation_name */
      9
     10#include <stdio.h>
     11#include <stdlib.h>
     12#include <linux/bitmap.h>
     13#include <linux/bitops.h>
     14
     15#include "test_util.h"
     16#include "kvm_util.h"
     17#include "processor.h"
     18#include "vmx.h"
     19
     20#define VCPU_ID				1
     21
     22/* The memory slot index to track dirty pages */
     23#define TEST_MEM_SLOT_INDEX		1
     24#define TEST_MEM_PAGES			3
     25
     26/* L1 guest test virtual memory offset */
     27#define GUEST_TEST_MEM			0xc0000000
     28
     29/* L2 guest test virtual memory offset */
     30#define NESTED_TEST_MEM1		0xc0001000
     31#define NESTED_TEST_MEM2		0xc0002000
     32
     33static void l2_guest_code(void)
     34{
     35	*(volatile uint64_t *)NESTED_TEST_MEM1;
     36	*(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
     37	GUEST_SYNC(true);
     38	GUEST_SYNC(false);
     39
     40	*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
     41	GUEST_SYNC(true);
     42	*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
     43	GUEST_SYNC(true);
     44	GUEST_SYNC(false);
     45
     46	/* Exit to L1 and never come back.  */
     47	vmcall();
     48}
     49
     50void l1_guest_code(struct vmx_pages *vmx)
     51{
     52#define L2_GUEST_STACK_SIZE 64
     53	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
     54
     55	GUEST_ASSERT(vmx->vmcs_gpa);
     56	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
     57	GUEST_ASSERT(load_vmcs(vmx));
     58
     59	prepare_vmcs(vmx, l2_guest_code,
     60		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
     61
     62	GUEST_SYNC(false);
     63	GUEST_ASSERT(!vmlaunch());
     64	GUEST_SYNC(false);
     65	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
     66	GUEST_DONE();
     67}
     68
     69int main(int argc, char *argv[])
     70{
     71	vm_vaddr_t vmx_pages_gva = 0;
     72	struct vmx_pages *vmx;
     73	unsigned long *bmap;
     74	uint64_t *host_test_mem;
     75
     76	struct kvm_vm *vm;
     77	struct kvm_run *run;
     78	struct ucall uc;
     79	bool done = false;
     80
     81	nested_vmx_check_supported();
     82
     83	/* Create VM */
     84	vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
     85	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
     86	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
     87	run = vcpu_state(vm, VCPU_ID);
     88
     89	/* Add an extra memory slot for testing dirty logging */
     90	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
     91				    GUEST_TEST_MEM,
     92				    TEST_MEM_SLOT_INDEX,
     93				    TEST_MEM_PAGES,
     94				    KVM_MEM_LOG_DIRTY_PAGES);
     95
     96	/*
     97	 * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
     98	 * affects both L1 and L2.  However...
     99	 */
    100	virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
    101
    102	/*
    103	 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
    104	 * 0xc0000000.
    105	 *
    106	 * Note that prepare_eptp should be called only L1's GPA map is done,
    107	 * meaning after the last call to virt_map.
    108	 */
    109	prepare_eptp(vmx, vm, 0);
    110	nested_map_memslot(vmx, vm, 0);
    111	nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
    112	nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
    113
    114	bmap = bitmap_zalloc(TEST_MEM_PAGES);
    115	host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
    116
    117	while (!done) {
    118		memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
    119		_vcpu_run(vm, VCPU_ID);
    120		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    121			    "Unexpected exit reason: %u (%s),\n",
    122			    run->exit_reason,
    123			    exit_reason_str(run->exit_reason));
    124
    125		switch (get_ucall(vm, VCPU_ID, &uc)) {
    126		case UCALL_ABORT:
    127			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
    128			       	  __FILE__, uc.args[1]);
    129			/* NOT REACHED */
    130		case UCALL_SYNC:
    131			/*
    132			 * The nested guest wrote at offset 0x1000 in the memslot, but the
    133			 * dirty bitmap must be filled in according to L1 GPA, not L2.
    134			 */
    135			kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
    136			if (uc.args[1]) {
    137				TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
    138				TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
    139			} else {
    140				TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
    141				TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
    142			}
    143
    144			TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
    145			TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
    146			TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
    147			TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
    148			break;
    149		case UCALL_DONE:
    150			done = true;
    151			break;
    152		default:
    153			TEST_FAIL("Unknown ucall %lu", uc.cmd);
    154		}
    155	}
    156}