cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

set_memory_region_test.c (11071B)


      1// SPDX-License-Identifier: GPL-2.0
      2#define _GNU_SOURCE /* for program_invocation_short_name */
      3#include <fcntl.h>
      4#include <pthread.h>
      5#include <sched.h>
      6#include <semaphore.h>
      7#include <signal.h>
      8#include <stdio.h>
      9#include <stdlib.h>
     10#include <string.h>
     11#include <sys/ioctl.h>
     12#include <sys/mman.h>
     13
     14#include <linux/compiler.h>
     15
     16#include <test_util.h>
     17#include <kvm_util.h>
     18#include <processor.h>
     19
     20#define VCPU_ID 0
     21
     22/*
     23 * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
     24 * 2MB sized and aligned region so that the initial region corresponds to
     25 * exactly one large page.
     26 */
     27#define MEM_REGION_SIZE		0x200000
     28
     29#ifdef __x86_64__
     30/*
     31 * Somewhat arbitrary location and slot, intended to not overlap anything.
     32 */
     33#define MEM_REGION_GPA		0xc0000000
     34#define MEM_REGION_SLOT		10
     35
     36static const uint64_t MMIO_VAL = 0xbeefull;
     37
     38extern const uint64_t final_rip_start;
     39extern const uint64_t final_rip_end;
     40
     41static sem_t vcpu_ready;
     42
     43static inline uint64_t guest_spin_on_val(uint64_t spin_val)
     44{
     45	uint64_t val;
     46
     47	do {
     48		val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
     49	} while (val == spin_val);
     50
     51	GUEST_SYNC(0);
     52	return val;
     53}
     54
     55static void *vcpu_worker(void *data)
     56{
     57	struct kvm_vm *vm = data;
     58	struct kvm_run *run;
     59	struct ucall uc;
     60	uint64_t cmd;
     61
     62	/*
     63	 * Loop until the guest is done.  Re-enter the guest on all MMIO exits,
     64	 * which will occur if the guest attempts to access a memslot after it
     65	 * has been deleted or while it is being moved .
     66	 */
     67	run = vcpu_state(vm, VCPU_ID);
     68
     69	while (1) {
     70		vcpu_run(vm, VCPU_ID);
     71
     72		if (run->exit_reason == KVM_EXIT_IO) {
     73			cmd = get_ucall(vm, VCPU_ID, &uc);
     74			if (cmd != UCALL_SYNC)
     75				break;
     76
     77			sem_post(&vcpu_ready);
     78			continue;
     79		}
     80
     81		if (run->exit_reason != KVM_EXIT_MMIO)
     82			break;
     83
     84		TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
     85		TEST_ASSERT(run->mmio.len == 8,
     86			    "Unexpected exit mmio size = %u", run->mmio.len);
     87
     88		TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
     89			    "Unexpected exit mmio address = 0x%llx",
     90			    run->mmio.phys_addr);
     91		memcpy(run->mmio.data, &MMIO_VAL, 8);
     92	}
     93
     94	if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
     95		TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
     96			  __FILE__, uc.args[1], uc.args[2]);
     97
     98	return NULL;
     99}
    100
    101static void wait_for_vcpu(void)
    102{
    103	struct timespec ts;
    104
    105	TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
    106		    "clock_gettime() failed: %d\n", errno);
    107
    108	ts.tv_sec += 2;
    109	TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
    110		    "sem_timedwait() failed: %d\n", errno);
    111
    112	/* Wait for the vCPU thread to reenter the guest. */
    113	usleep(100000);
    114}
    115
    116static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
    117{
    118	struct kvm_vm *vm;
    119	uint64_t *hva;
    120	uint64_t gpa;
    121
    122	vm = vm_create_default(VCPU_ID, 0, guest_code);
    123
    124	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
    125				    MEM_REGION_GPA, MEM_REGION_SLOT,
    126				    MEM_REGION_SIZE / getpagesize(), 0);
    127
    128	/*
    129	 * Allocate and map two pages so that the GPA accessed by guest_code()
    130	 * stays valid across the memslot move.
    131	 */
    132	gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
    133	TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
    134
    135	virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
    136
    137	/* Ditto for the host mapping so that both pages can be zeroed. */
    138	hva = addr_gpa2hva(vm, MEM_REGION_GPA);
    139	memset(hva, 0, 2 * 4096);
    140
    141	pthread_create(vcpu_thread, NULL, vcpu_worker, vm);
    142
    143	/* Ensure the guest thread is spun up. */
    144	wait_for_vcpu();
    145
    146	return vm;
    147}
    148
    149
    150static void guest_code_move_memory_region(void)
    151{
    152	uint64_t val;
    153
    154	GUEST_SYNC(0);
    155
    156	/*
    157	 * Spin until the memory region starts getting moved to a
    158	 * misaligned address.
    159	 * Every region move may or may not trigger MMIO, as the
    160	 * window where the memslot is invalid is usually quite small.
    161	 */
    162	val = guest_spin_on_val(0);
    163	GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
    164
    165	/* Spin until the misaligning memory region move completes. */
    166	val = guest_spin_on_val(MMIO_VAL);
    167	GUEST_ASSERT_1(val == 1 || val == 0, val);
    168
    169	/* Spin until the memory region starts to get re-aligned. */
    170	val = guest_spin_on_val(0);
    171	GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
    172
    173	/* Spin until the re-aligning memory region move completes. */
    174	val = guest_spin_on_val(MMIO_VAL);
    175	GUEST_ASSERT_1(val == 1, val);
    176
    177	GUEST_DONE();
    178}
    179
    180static void test_move_memory_region(void)
    181{
    182	pthread_t vcpu_thread;
    183	struct kvm_vm *vm;
    184	uint64_t *hva;
    185
    186	vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region);
    187
    188	hva = addr_gpa2hva(vm, MEM_REGION_GPA);
    189
    190	/*
    191	 * Shift the region's base GPA.  The guest should not see "2" as the
    192	 * hva->gpa translation is misaligned, i.e. the guest is accessing a
    193	 * different host pfn.
    194	 */
    195	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
    196	WRITE_ONCE(*hva, 2);
    197
    198	/*
    199	 * The guest _might_ see an invalid memslot and trigger MMIO, but it's
    200	 * a tiny window.  Spin and defer the sync until the memslot is
    201	 * restored and guest behavior is once again deterministic.
    202	 */
    203	usleep(100000);
    204
    205	/*
    206	 * Note, value in memory needs to be changed *before* restoring the
    207	 * memslot, else the guest could race the update and see "2".
    208	 */
    209	WRITE_ONCE(*hva, 1);
    210
    211	/* Restore the original base, the guest should see "1". */
    212	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
    213	wait_for_vcpu();
    214	/* Defered sync from when the memslot was misaligned (above). */
    215	wait_for_vcpu();
    216
    217	pthread_join(vcpu_thread, NULL);
    218
    219	kvm_vm_free(vm);
    220}
    221
    222static void guest_code_delete_memory_region(void)
    223{
    224	uint64_t val;
    225
    226	GUEST_SYNC(0);
    227
    228	/* Spin until the memory region is deleted. */
    229	val = guest_spin_on_val(0);
    230	GUEST_ASSERT_1(val == MMIO_VAL, val);
    231
    232	/* Spin until the memory region is recreated. */
    233	val = guest_spin_on_val(MMIO_VAL);
    234	GUEST_ASSERT_1(val == 0, val);
    235
    236	/* Spin until the memory region is deleted. */
    237	val = guest_spin_on_val(0);
    238	GUEST_ASSERT_1(val == MMIO_VAL, val);
    239
    240	asm("1:\n\t"
    241	    ".pushsection .rodata\n\t"
    242	    ".global final_rip_start\n\t"
    243	    "final_rip_start: .quad 1b\n\t"
    244	    ".popsection");
    245
    246	/* Spin indefinitely (until the code memslot is deleted). */
    247	guest_spin_on_val(MMIO_VAL);
    248
    249	asm("1:\n\t"
    250	    ".pushsection .rodata\n\t"
    251	    ".global final_rip_end\n\t"
    252	    "final_rip_end: .quad 1b\n\t"
    253	    ".popsection");
    254
    255	GUEST_ASSERT_1(0, 0);
    256}
    257
    258static void test_delete_memory_region(void)
    259{
    260	pthread_t vcpu_thread;
    261	struct kvm_regs regs;
    262	struct kvm_run *run;
    263	struct kvm_vm *vm;
    264
    265	vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region);
    266
    267	/* Delete the memory region, the guest should not die. */
    268	vm_mem_region_delete(vm, MEM_REGION_SLOT);
    269	wait_for_vcpu();
    270
    271	/* Recreate the memory region.  The guest should see "0". */
    272	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
    273				    MEM_REGION_GPA, MEM_REGION_SLOT,
    274				    MEM_REGION_SIZE / getpagesize(), 0);
    275	wait_for_vcpu();
    276
    277	/* Delete the region again so that there's only one memslot left. */
    278	vm_mem_region_delete(vm, MEM_REGION_SLOT);
    279	wait_for_vcpu();
    280
    281	/*
    282	 * Delete the primary memslot.  This should cause an emulation error or
    283	 * shutdown due to the page tables getting nuked.
    284	 */
    285	vm_mem_region_delete(vm, 0);
    286
    287	pthread_join(vcpu_thread, NULL);
    288
    289	run = vcpu_state(vm, VCPU_ID);
    290
    291	TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
    292		    run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
    293		    "Unexpected exit reason = %d", run->exit_reason);
    294
    295	vcpu_regs_get(vm, VCPU_ID, &regs);
    296
    297	/*
    298	 * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
    299	 * so the instruction pointer would point to the reset vector.
    300	 */
    301	if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
    302		TEST_ASSERT(regs.rip >= final_rip_start &&
    303			    regs.rip < final_rip_end,
    304			    "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
    305			    final_rip_start, final_rip_end, regs.rip);
    306
    307	kvm_vm_free(vm);
    308}
    309
    310static void test_zero_memory_regions(void)
    311{
    312	struct kvm_run *run;
    313	struct kvm_vm *vm;
    314
    315	pr_info("Testing KVM_RUN with zero added memory regions\n");
    316
    317	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
    318	vm_vcpu_add(vm, VCPU_ID);
    319
    320	TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64),
    321		    "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno);
    322	vcpu_run(vm, VCPU_ID);
    323
    324	run = vcpu_state(vm, VCPU_ID);
    325	TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
    326		    "Unexpected exit_reason = %u\n", run->exit_reason);
    327
    328	kvm_vm_free(vm);
    329}
    330#endif /* __x86_64__ */
    331
    332/*
    333 * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
    334 * tentative to add further slots should fail.
    335 */
    336static void test_add_max_memory_regions(void)
    337{
    338	int ret;
    339	struct kvm_vm *vm;
    340	uint32_t max_mem_slots;
    341	uint32_t slot;
    342	void *mem, *mem_aligned, *mem_extra;
    343	size_t alignment;
    344
    345#ifdef __s390x__
    346	/* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
    347	alignment = 0x100000;
    348#else
    349	alignment = 1;
    350#endif
    351
    352	max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
    353	TEST_ASSERT(max_mem_slots > 0,
    354		    "KVM_CAP_NR_MEMSLOTS should be greater than 0");
    355	pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
    356
    357	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
    358
    359	/* Check it can be added memory slots up to the maximum allowed */
    360	pr_info("Adding slots 0..%i, each memory region with %dK size\n",
    361		(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
    362
    363	mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
    364		   PROT_READ | PROT_WRITE,
    365		   MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
    366	TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
    367	mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
    368
    369	for (slot = 0; slot < max_mem_slots; slot++)
    370		vm_set_user_memory_region(vm, slot, 0,
    371					  ((uint64_t)slot * MEM_REGION_SIZE),
    372					  MEM_REGION_SIZE,
    373					  mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
    374
    375	/* Check it cannot be added memory slots beyond the limit */
    376	mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
    377			 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    378	TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
    379
    380	ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
    381					  (uint64_t)max_mem_slots * MEM_REGION_SIZE,
    382					  MEM_REGION_SIZE, mem_extra);
    383	TEST_ASSERT(ret == -1 && errno == EINVAL,
    384		    "Adding one more memory slot should fail with EINVAL");
    385
    386	munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
    387	munmap(mem_extra, MEM_REGION_SIZE);
    388	kvm_vm_free(vm);
    389}
    390
    391int main(int argc, char *argv[])
    392{
    393#ifdef __x86_64__
    394	int i, loops;
    395#endif
    396
    397	/* Tell stdout not to buffer its content */
    398	setbuf(stdout, NULL);
    399
    400#ifdef __x86_64__
    401	/*
    402	 * FIXME: the zero-memslot test fails on aarch64 and s390x because
    403	 * KVM_RUN fails with ENOEXEC or EFAULT.
    404	 */
    405	test_zero_memory_regions();
    406#endif
    407
    408	test_add_max_memory_regions();
    409
    410#ifdef __x86_64__
    411	if (argc > 1)
    412		loops = atoi(argv[1]);
    413	else
    414		loops = 10;
    415
    416	pr_info("Testing MOVE of in-use region, %d loops\n", loops);
    417	for (i = 0; i < loops; i++)
    418		test_move_memory_region();
    419
    420	pr_info("Testing DELETE of in-use region, %d loops\n", loops);
    421	for (i = 0; i < loops; i++)
    422		test_delete_memory_region();
    423#endif
    424
    425	return 0;
    426}