cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

access_tracking_perf_test.c (11179B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * access_tracking_perf_test
      4 *
      5 * Copyright (C) 2021, Google, Inc.
      6 *
      7 * This test measures the performance effects of KVM's access tracking.
      8 * Access tracking is driven by the MMU notifiers test_young, clear_young, and
      9 * clear_flush_young. These notifiers do not have a direct userspace API,
     10 * however the clear_young notifier can be triggered by marking a pages as idle
     11 * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
     12 * enable access tracking on guest memory.
     13 *
     14 * To measure performance this test runs a VM with a configurable number of
     15 * vCPUs that each touch every page in disjoint regions of memory. Performance
     16 * is measured in the time it takes all vCPUs to finish touching their
     17 * predefined region.
     18 *
     19 * Note that a deterministic correctness test of access tracking is not possible
     20 * by using page_idle as it exists today. This is for a few reasons:
     21 *
     22 * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
     23 *    means subsequent guest accesses are not guaranteed to see page table
     24 *    updates made by KVM until some time in the future.
     25 *
     26 * 2. page_idle only operates on LRU pages. Newly allocated pages are not
     27 *    immediately allocated to LRU lists. Instead they are held in a "pagevec",
     28 *    which is drained to LRU lists some time in the future. There is no
     29 *    userspace API to force this drain to occur.
     30 *
     31 * These limitations are worked around in this test by using a large enough
     32 * region of memory for each vCPU such that the number of translations cached in
     33 * the TLB and the number of pages held in pagevecs are a small fraction of the
     34 * overall workload. And if either of those conditions are not true this test
     35 * will fail rather than silently passing.
     36 */
     37#include <inttypes.h>
     38#include <limits.h>
     39#include <pthread.h>
     40#include <sys/mman.h>
     41#include <sys/types.h>
     42#include <sys/stat.h>
     43
     44#include "kvm_util.h"
     45#include "test_util.h"
     46#include "perf_test_util.h"
     47#include "guest_modes.h"
     48
     49/* Global variable used to synchronize all of the vCPU threads. */
     50static int iteration;
     51
     52/* Defines what vCPU threads should do during a given iteration. */
     53static enum {
     54	/* Run the vCPU to access all its memory. */
     55	ITERATION_ACCESS_MEMORY,
     56	/* Mark the vCPU's memory idle in page_idle. */
     57	ITERATION_MARK_IDLE,
     58} iteration_work;
     59
     60/* Set to true when vCPU threads should exit. */
     61static bool done;
     62
     63/* The iteration that was last completed by each vCPU. */
     64static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
     65
     66/* Whether to overlap the regions of memory vCPUs access. */
     67static bool overlap_memory_access;
     68
     69struct test_params {
     70	/* The backing source for the region of memory. */
     71	enum vm_mem_backing_src_type backing_src;
     72
     73	/* The amount of memory to allocate for each vCPU. */
     74	uint64_t vcpu_memory_bytes;
     75
     76	/* The number of vCPUs to create in the VM. */
     77	int vcpus;
     78};
     79
     80static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
     81{
     82	uint64_t value;
     83	off_t offset = index * sizeof(value);
     84
     85	TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
     86		    "pread from %s offset 0x%" PRIx64 " failed!",
     87		    filename, offset);
     88
     89	return value;
     90
     91}
     92
     93#define PAGEMAP_PRESENT (1ULL << 63)
     94#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
     95
     96static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
     97{
     98	uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
     99	uint64_t entry;
    100	uint64_t pfn;
    101
    102	entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
    103	if (!(entry & PAGEMAP_PRESENT))
    104		return 0;
    105
    106	pfn = entry & PAGEMAP_PFN_MASK;
    107	if (!pfn) {
    108		print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
    109		exit(KSFT_SKIP);
    110	}
    111
    112	return pfn;
    113}
    114
    115static bool is_page_idle(int page_idle_fd, uint64_t pfn)
    116{
    117	uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
    118
    119	return !!((bits >> (pfn % 64)) & 1);
    120}
    121
    122static void mark_page_idle(int page_idle_fd, uint64_t pfn)
    123{
    124	uint64_t bits = 1ULL << (pfn % 64);
    125
    126	TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
    127		    "Set page_idle bits for PFN 0x%" PRIx64, pfn);
    128}
    129
    130static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
    131{
    132	uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
    133	uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
    134	uint64_t page;
    135	uint64_t still_idle = 0;
    136	uint64_t no_pfn = 0;
    137	int page_idle_fd;
    138	int pagemap_fd;
    139
    140	/* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
    141	if (overlap_memory_access && vcpu_id)
    142		return;
    143
    144	page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
    145	TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
    146
    147	pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
    148	TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
    149
    150	for (page = 0; page < pages; page++) {
    151		uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
    152		uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
    153
    154		if (!pfn) {
    155			no_pfn++;
    156			continue;
    157		}
    158
    159		if (is_page_idle(page_idle_fd, pfn)) {
    160			still_idle++;
    161			continue;
    162		}
    163
    164		mark_page_idle(page_idle_fd, pfn);
    165	}
    166
    167	/*
    168	 * Assumption: Less than 1% of pages are going to be swapped out from
    169	 * under us during this test.
    170	 */
    171	TEST_ASSERT(no_pfn < pages / 100,
    172		    "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
    173		    vcpu_id, no_pfn, pages);
    174
    175	/*
    176	 * Test that at least 90% of memory has been marked idle (the rest might
    177	 * not be marked idle because the pages have not yet made it to an LRU
    178	 * list or the translations are still cached in the TLB). 90% is
    179	 * arbitrary; high enough that we ensure most memory access went through
    180	 * access tracking but low enough as to not make the test too brittle
    181	 * over time and across architectures.
    182	 */
    183	TEST_ASSERT(still_idle < pages / 10,
    184		    "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
    185		    PRIu64 ").\n",
    186		    vcpu_id, still_idle, pages);
    187
    188	close(page_idle_fd);
    189	close(pagemap_fd);
    190}
    191
    192static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
    193			 uint64_t expected_ucall)
    194{
    195	struct ucall uc;
    196	uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
    197
    198	TEST_ASSERT(expected_ucall == actual_ucall,
    199		    "Guest exited unexpectedly (expected ucall %" PRIu64
    200		    ", got %" PRIu64 ")",
    201		    expected_ucall, actual_ucall);
    202}
    203
    204static bool spin_wait_for_next_iteration(int *current_iteration)
    205{
    206	int last_iteration = *current_iteration;
    207
    208	do {
    209		if (READ_ONCE(done))
    210			return false;
    211
    212		*current_iteration = READ_ONCE(iteration);
    213	} while (last_iteration == *current_iteration);
    214
    215	return true;
    216}
    217
    218static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
    219{
    220	struct kvm_vm *vm = perf_test_args.vm;
    221	int vcpu_id = vcpu_args->vcpu_id;
    222	int current_iteration = 0;
    223
    224	while (spin_wait_for_next_iteration(&current_iteration)) {
    225		switch (READ_ONCE(iteration_work)) {
    226		case ITERATION_ACCESS_MEMORY:
    227			vcpu_run(vm, vcpu_id);
    228			assert_ucall(vm, vcpu_id, UCALL_SYNC);
    229			break;
    230		case ITERATION_MARK_IDLE:
    231			mark_vcpu_memory_idle(vm, vcpu_id);
    232			break;
    233		};
    234
    235		vcpu_last_completed_iteration[vcpu_id] = current_iteration;
    236	}
    237}
    238
    239static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
    240{
    241	while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
    242	       target_iteration) {
    243		continue;
    244	}
    245}
    246
    247/* The type of memory accesses to perform in the VM. */
    248enum access_type {
    249	ACCESS_READ,
    250	ACCESS_WRITE,
    251};
    252
    253static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
    254{
    255	struct timespec ts_start;
    256	struct timespec ts_elapsed;
    257	int next_iteration;
    258	int vcpu_id;
    259
    260	/* Kick off the vCPUs by incrementing iteration. */
    261	next_iteration = ++iteration;
    262
    263	clock_gettime(CLOCK_MONOTONIC, &ts_start);
    264
    265	/* Wait for all vCPUs to finish the iteration. */
    266	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
    267		spin_wait_for_vcpu(vcpu_id, next_iteration);
    268
    269	ts_elapsed = timespec_elapsed(ts_start);
    270	pr_info("%-30s: %ld.%09lds\n",
    271		description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
    272}
    273
    274static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
    275			  const char *description)
    276{
    277	perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
    278	iteration_work = ITERATION_ACCESS_MEMORY;
    279	run_iteration(vm, vcpus, description);
    280}
    281
    282static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
    283{
    284	/*
    285	 * Even though this parallelizes the work across vCPUs, this is still a
    286	 * very slow operation because page_idle forces the test to mark one pfn
    287	 * at a time and the clear_young notifier serializes on the KVM MMU
    288	 * lock.
    289	 */
    290	pr_debug("Marking VM memory idle (slow)...\n");
    291	iteration_work = ITERATION_MARK_IDLE;
    292	run_iteration(vm, vcpus, "Mark memory idle");
    293}
    294
    295static void run_test(enum vm_guest_mode mode, void *arg)
    296{
    297	struct test_params *params = arg;
    298	struct kvm_vm *vm;
    299	int vcpus = params->vcpus;
    300
    301	vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
    302				 params->backing_src, !overlap_memory_access);
    303
    304	perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
    305
    306	pr_info("\n");
    307	access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
    308
    309	/* As a control, read and write to the populated memory first. */
    310	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
    311	access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
    312
    313	/* Repeat on memory that has been marked as idle. */
    314	mark_memory_idle(vm, vcpus);
    315	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
    316	mark_memory_idle(vm, vcpus);
    317	access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
    318
    319	/* Set done to signal the vCPU threads to exit */
    320	done = true;
    321
    322	perf_test_join_vcpu_threads(vcpus);
    323	perf_test_destroy_vm(vm);
    324}
    325
    326static void help(char *name)
    327{
    328	puts("");
    329	printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o]  [-s mem_type]\n",
    330	       name);
    331	puts("");
    332	printf(" -h: Display this help message.");
    333	guest_modes_help();
    334	printf(" -b: specify the size of the memory region which should be\n"
    335	       "     dirtied by each vCPU. e.g. 10M or 3G.\n"
    336	       "     (default: 1G)\n");
    337	printf(" -v: specify the number of vCPUs to run.\n");
    338	printf(" -o: Overlap guest memory accesses instead of partitioning\n"
    339	       "     them into a separate region of memory for each vCPU.\n");
    340	backing_src_help("-s");
    341	puts("");
    342	exit(0);
    343}
    344
    345int main(int argc, char *argv[])
    346{
    347	struct test_params params = {
    348		.backing_src = DEFAULT_VM_MEM_SRC,
    349		.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
    350		.vcpus = 1,
    351	};
    352	int page_idle_fd;
    353	int opt;
    354
    355	guest_modes_append_default();
    356
    357	while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
    358		switch (opt) {
    359		case 'm':
    360			guest_modes_cmdline(optarg);
    361			break;
    362		case 'b':
    363			params.vcpu_memory_bytes = parse_size(optarg);
    364			break;
    365		case 'v':
    366			params.vcpus = atoi(optarg);
    367			break;
    368		case 'o':
    369			overlap_memory_access = true;
    370			break;
    371		case 's':
    372			params.backing_src = parse_backing_src_type(optarg);
    373			break;
    374		case 'h':
    375		default:
    376			help(argv[0]);
    377			break;
    378		}
    379	}
    380
    381	page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
    382	if (page_idle_fd < 0) {
    383		print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
    384		exit(KSFT_SKIP);
    385	}
    386	close(page_idle_fd);
    387
    388	for_each_guest_mode(run_test, &params);
    389
    390	return 0;
    391}