cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 252b11a01e061fd17821e53a41c8451a1d2c27bd
parent 864f5fa9d539734d823b3d0613dbf1a43beec334
Author: Louis Burda <quent.burda@gmail.com>
Date:   Tue, 10 Jan 2023 01:37:23 +0100

Begin ioctl and test-case overhaul

Diffstat:
M.gitignore | 1+
MMakefile | 22++++++++++++----------
AREADME | 45+++++++++++++++++++++++++++++++++++++++++++++
Mcachepc/cachepc.c | 86+++++++++----------------------------------------------------------------------
Mcachepc/cachepc.h | 55+++++--------------------------------------------------
Mcachepc/kvm.c | 309++++++++++++++++++++++++++-----------------------------------------------------
Mcachepc/uapi.h | 53+++++++++++++++++++++++------------------------------
Mtest/.gitignore | 18++++++------------
Dtest/access-detect_guest.c | 26--------------------------
Dtest/access.c | 30------------------------------
Dtest/aes-detect.c | 85-------------------------------------------------------------------------------
Dtest/aes-detect.h | 3---
Dtest/aes-detect_guest.c | 78------------------------------------------------------------------------------
Mtest/eviction.c | 20++++++++++----------
Dtest/fullstep.c | 605-------------------------------------------------------------------------------
Dtest/fulltrack | 0
Atest/kvm-eviction.c | 613+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rtest/execstep.c -> test/kvm-pagestep.c | 0
Rtest/execstep.c -> test/kvm-step.c | 0
Atest/qemu-aes_guest.c | 74++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rtest/aes-detect_host.c -> test/qemu-aes_host.c | 0
Atest/qemu-eviction_guest.c | 30++++++++++++++++++++++++++++++
Rtest/access-detect_host.c -> test/qemu-eviction_host.c | 0
Dtest/readsvme.c | 26--------------------------
Mutil/.gitignore | 1+
Mutil/debug.c | 9++++-----
Mutil/reset.c | 13++-----------
Autil/svme.c | 25+++++++++++++++++++++++++
28 files changed, 962 insertions(+), 1265 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -6,4 +6,5 @@ push.sh *.o.d *.out *.swp +.cache compile_commands.json diff --git a/Makefile b/Makefile @@ -1,14 +1,15 @@ LINUX ?= linux -LOAD ?= $(shell ls /dev/cpu | wc -l) +CORES ?= $(shell ls /dev/cpu | wc -l) +LOAD ?= $(CORES) +JOBS ?= $(CORES) PWD := $(shell pwd) -BINS = test/eviction test/access test/kvm test/sev test/sev-es -BINS += test/fullstep test/execstep -BINS += test/aes-detect_guest test/aes-detect_host -BINS += test/access-detect_guest test/access-detect_host -BINS += test/readsvme util/debug util/reset +BINS = test/eviction test/kvm-eviction # test/kvm-execstep +# BINS += test/qemu-eviction_guest test/qemu-eviction_host +# BINS += test/qemu-aes_guest test/qemu-aes_host +BINS += util/svme util/debug util/reset -CFLAGS = -I . -I test -Wunused-variable -Wunknown-pragmas +CFLAGS = -I . -I linux/usr/include -I test -Wunused-variable -Wunknown-pragmas all: build $(BINS) @@ -28,13 +29,14 @@ host: git -C $(LINUX) checkout 0aaa1e5 $(MAKE) -C $(LINUX) oldconfig $(MAKE) -C $(LINUX) prepare - $(MAKE) -C $(LINUX) -l $(LOAD) + $(MAKE) -C $(LINUX) -j $(JOBS) -l $(LOAD) + $(MAKE) -C $(LINUX) -j $(JOBS) -l $(LOAD) headers git -C $(LINUX) checkout master git -C $(LINUX) stash pop build: $(LINUX)/arch/x86/kvm/cachepc - $(MAKE) -C $(LINUX) -l $(LOAD) M=arch/x86/kvm modules - $(MAKE) -C $(LINUX) -l $(LOAD) M=crypto modules + $(MAKE) -C $(LINUX) -j $(JOBS) -l $(LOAD) M=arch/x86/kvm modules + $(MAKE) -C $(LINUX) -j $(JOBS) -l $(LOAD) M=crypto modules load: sudo rmmod kvm_amd || true diff --git a/README b/README @@ -0,0 +1,45 @@ +CachePC +======= + +This repository contains proof-of-concept code for a novel cache side-channel +attack dubbed PRIME+COUNT that we demonstrate can be used to circumvent +AMD's latest secure virtualization solution SEV-SNP to access sensitive +guest information. + +Several test-cases were used to verify parts of the exploit chain separately: + +test/eviction: + Demonstrate that performance counters & our setup are accurate enough + to detect a single eviction in L1 cache and infer its cache set + through PRIME+COUNT + +test/kvm-eviction: + Demonstrate that the cache set of a memory access instruction can be + inferred in non-SEV / SEV / SEV-ES / SEV-SNP -enabled vms respectively. + +test/kvm-step: + Demonstrate that SEV-SNP enabled vms can be single-stepped using local + APIC timers to interrupt the guest and increment the interrupt interval + while observing the RIP+RFLAGS ciphertext in the VMSA for changes to + detect that a single instruction has been executed. + +test/kvm-pagestep: + Demonstrate that a SEV-SNP enabled vm can be quickly single-stepped + and analyzed by tracking a single page at a time. This type + of tracking creates a page-wise profile of the guests execution, + which can be used to infer what the guest is doing and to begin + fine-grained single-stepping. + +test/qemu-eviction: + Replicate result from kvm-eviction on a qemu-based vm running debian + using a specially crafted guest program to signal when measurement + should take place to infer the accessed set. + +test/qemu-aes: + Demonstrate that AES encryption keys can be leaked from a + modified qemu-based linux guest. + +test/qemu-poc: + Demonstrate that AES encryption keys can be leaked from an + unmodified qemu-based linux guest. + diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -7,15 +7,13 @@ #include <linux/delay.h> #include <linux/ioctl.h> -#define SET_MASK(SETS) (((((uintptr_t) SETS) * CACHELINE_SIZE) - 1) ^ (CACHELINE_SIZE - 1)) +#define SET_MASK(SETS) (((((uintptr_t) SETS) * L1_LINESIZE) - 1) ^ (L1_LINESIZE - 1)) #define REMOVE_PAGE_OFFSET(ptr) ((void *) (((uintptr_t) ptr) & PAGE_MASK)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) -static void cl_insert(cacheline *last_cl, cacheline *new_cl); static void *remove_cache_set(cache_ctx *ctx, void *ptr); -static void *remove_cache_group_set(void *ptr); static cacheline *prepare_cache_set_ds(cache_ctx *ctx, uint32_t *set, uint32_t sets_len); static cacheline *build_cache_ds(cache_ctx *ctx, cacheline **cacheline_ptr_arr); @@ -167,22 +165,11 @@ cachepc_get_ctx(int cache_level) ctx = kzalloc(sizeof(cache_ctx), GFP_KERNEL); BUG_ON(ctx == NULL); - BUG_ON(cache_level != L1_CACHE); - if (cache_level == L1_CACHE) { - ctx->addressing = L1_ADDRESSING; - ctx->sets = L1_SETS; - ctx->associativity = L1_ASSOC; - } else if (cache_level == L2_CACHE) { - ctx->addressing = L2_ADDRESSING; - ctx->sets = L2_SETS; - ctx->associativity = L2_ASSOC; - } else { - return NULL; - } - + ctx->sets = L1_SETS; + ctx->associativity = L1_ASSOC; ctx->cache_level = cache_level; ctx->nr_of_cachelines = ctx->sets * ctx->associativity; - ctx->set_size = CACHELINE_SIZE * ctx->associativity; + ctx->set_size = L1_LINESIZE * ctx->associativity; ctx->cache_size = ctx->sets * ctx->set_size; return ctx; @@ -194,7 +181,6 @@ cachepc_release_ctx(cache_ctx *ctx) kfree(ctx); } - /* * Initialises the complete cache data structure for the given context */ @@ -221,23 +207,10 @@ cacheline * cachepc_prepare_victim(cache_ctx *ctx, uint32_t set) { cacheline *victim_set, *victim_cl; - cacheline *curr_cl, *next_cl; victim_set = prepare_cache_set_ds(ctx, &set, 1); victim_cl = victim_set; - // Free the other lines in the same set that are not used. - if (ctx->addressing == PHYSICAL_ADDRESSING) { - curr_cl = victim_cl->next; - do { - next_cl = curr_cl->next; - // Here, it is ok to free them directly, as every line in the same - // set is from a different page anyway. - kfree(remove_cache_group_set(curr_cl)); - curr_cl = next_cl; - } while(curr_cl != victim_cl); - } - return victim_cl; } @@ -255,7 +228,7 @@ cachepc_aligned_alloc(size_t alignment, size_t size) if (size % alignment != 0) size = size - (size % alignment) + alignment; p = kzalloc(size, GFP_KERNEL); - BUG_ON(((uintptr_t) p) % alignment != 0); + BUG_ON(!p || ((uintptr_t) p) % alignment != 0); return p; } @@ -269,7 +242,7 @@ cachepc_save_msrmts(cacheline *head) curr_cl = head; do { if (CL_IS_FIRST(curr_cl->flags)) { - BUG_ON(curr_cl->cache_set >= cachepc_msrmts_count); + BUG_ON(curr_cl->cache_set >= L1_SETS); cachepc_msrmts[curr_cl->cache_set] = curr_cl->count; } @@ -277,7 +250,7 @@ cachepc_save_msrmts(cacheline *head) } while (curr_cl != head); if (cachepc_baseline_active) { - for (i = 0; i < cachepc_msrmts_count; i++) { + for (i = 0; i < L1_SETS; i++) { if (!cachepc_baseline_active) WARN_ON(cachepc_msrmts[i] < cachepc_baseline[i]); cachepc_msrmts[i] -= cachepc_baseline[i]; @@ -306,7 +279,7 @@ cachepc_update_baseline(void) { size_t i; - for (i = 0; i < cachepc_msrmts_count; i++) { + for (i = 0; i < L1_SETS; i++) { cachepc_baseline[i] = MIN(cachepc_baseline[i], cachepc_msrmts[i]); } @@ -372,15 +345,6 @@ prepare_cache_set_ds(cache_ctx *ctx, uint32_t *sets, uint32_t sets_len) last_cl_in_sets[curr_cl->cache_set] = curr_cl; } - if (ctx->addressing == PHYSICAL_ADDRESSING && !is_in_arr( - curr_cl->cache_set / CACHE_GROUP_SIZE, - cache_groups, cache_groups_len)) - { - // Already free all unused blocks of the cache ds for physical - // addressing, because we loose their refs - cl_insert(to_del_cls, curr_cl); - to_del_cls = curr_cl; - } curr_cl = next_cl; } while (curr_cl != cache_ds); @@ -392,11 +356,6 @@ prepare_cache_set_ds(cache_ctx *ctx, uint32_t *sets, uint32_t sets_len) } cache_set_ds = first_cl_in_sets[sets[0]]; - // Free unused cache lines - if (ctx->addressing == PHYSICAL_ADDRESSING) { - cachepc_release_ds(ctx, to_del_cls); - } - kfree(first_cl_in_sets); kfree(last_cl_in_sets); kfree(cache_groups); @@ -404,34 +363,12 @@ prepare_cache_set_ds(cache_ctx *ctx, uint32_t *sets, uint32_t sets_len) return cache_set_ds; } -void -cl_insert(cacheline *last_cl, cacheline *new_cl) -{ - if (last_cl == NULL) { - // Adding the first entry is a special case - new_cl->next = new_cl; - new_cl->prev = new_cl; - } else { - new_cl->next = last_cl->next; - new_cl->prev = last_cl; - last_cl->next->prev = new_cl; - last_cl->next = new_cl; - } -} - void * remove_cache_set(cache_ctx *ctx, void *ptr) { return (void *) (((uintptr_t) ptr) & ~SET_MASK(ctx->sets)); } -void * -remove_cache_group_set(void *ptr) -{ - return (void *) (((uintptr_t) ptr) & ~SET_MASK(CACHE_GROUP_SIZE)); -} - - /* * Create a randomized doubly linked list with the following structure: * set A <--> set B <--> ... <--> set X <--> set A @@ -555,12 +492,7 @@ allocate_cache_ds(cache_ctx *ctx) cl_ptr_arr = kzalloc(ctx->nr_of_cachelines * sizeof(cacheline *), GFP_KERNEL); BUG_ON(cl_ptr_arr == NULL); - BUG_ON(ctx->addressing != VIRTUAL_ADDRESSING); - - // For virtual addressing, allocating a consecutive chunk of memory is enough cl_arr = cachepc_aligned_alloc(PAGE_SIZE, ctx->cache_size); - BUG_ON(cl_arr == NULL); - for (i = 0; i < ctx->nr_of_cachelines; ++i) { cl_ptr_arr[i] = cl_arr + i; cl_ptr_arr[i]->cache_set = get_virt_cache_set(ctx, cl_ptr_arr[i]); @@ -574,7 +506,7 @@ allocate_cache_ds(cache_ctx *ctx) uint16_t get_virt_cache_set(cache_ctx *ctx, void *ptr) { - return (uint16_t) ((((uintptr_t) ptr) & SET_MASK(ctx->sets)) / CACHELINE_SIZE); + return (uint16_t) ((((uintptr_t) ptr) & SET_MASK(ctx->sets)) / L1_LINESIZE); } void diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -9,14 +9,7 @@ #define L1_CACHE 0 #define L2_CACHE 1 -#define VIRTUAL_ADDRESSING 0 -#define PHYSICAL_ADDRESSING 1 - -#define L1_ADDRESSING VIRTUAL_ADDRESSING -#define L2_ADDRESSING PHYSICAL_ADDRESSING - -#define CACHELINE_SIZE L1_LINESIZE -#define CACHE_GROUP_SIZE (PAGE_SIZE / CACHELINE_SIZE) +#define CACHE_GROUP_SIZE (PAGE_SIZE / L1_LINESIZE) #define CACHEPC_GET_BIT(b, i) (((b) >> (i)) & 1) #define CACHEPC_SET_BIT(b, i) ((b) | (1 << (i))) @@ -54,7 +47,6 @@ typedef struct cache_ctx cache_ctx; struct cache_ctx { int cache_level; - int addressing; uint32_t sets; uint32_t associativity; @@ -85,7 +77,8 @@ struct cpc_fault { struct list_head list; }; -static_assert(sizeof(struct cacheline) == CACHELINE_SIZE, "Bad cache line struct size"); +static_assert(sizeof(struct cacheline) == L1_LINESIZE, + "Bad cache line struct size"); static_assert(CL_NEXT_OFFSET == 0 && CL_PREV_OFFSET == 8); bool cachepc_verify_topology(void); @@ -116,9 +109,6 @@ __attribute__((always_inline)) static inline cacheline *cachepc_prime(cacheline *head); __attribute__((always_inline)) -static inline cacheline *cachepc_prime_rev(cacheline *head); - -__attribute__((always_inline)) static inline cacheline *cachepc_probe(cacheline *head); __attribute__((always_inline)) @@ -132,10 +122,8 @@ static inline void cachepc_apic_oneshot(uint32_t interval); extern bool cachepc_debug; -extern cpc_msrmt_t *cachepc_msrmts; -extern size_t cachepc_msrmts_count; - -extern cpc_msrmt_t *cachepc_baseline; +extern uint8_t *cachepc_msrmts; +extern uint8_t *cachepc_baseline; extern bool cachepc_baseline_measure; extern bool cachepc_baseline_active; @@ -188,39 +176,6 @@ cachepc_prime(cacheline *head) return prev_cl; } -/* - * Same as prime, but in the reverse direction, i.e. the same direction that probe - * uses. This is beneficial for the following scenarios: - * - L1: - * - Trigger collision chain-reaction to amplify an evicted set (but this has - * the downside of more noisy measurements). - * - L2: - * - Always use this for L2, otherwise the first cache sets will still reside - * in L1 unless the victim filled L1 completely. In this case, an eviction - * has randomly (depending on where the cache set is placed in the randomised - * data structure) the following effect: - * A) An evicted set is L2_ACCESS_TIME - L1_ACCESS_TIME slower - * B) An evicted set is L3_ACCESS_TIME - L2_ACCESS_TIME slower - */ -cacheline * -cachepc_prime_rev(cacheline *head) -{ - cacheline *curr_cl; - - cachepc_mfence(); - cachepc_cpuid(); - - curr_cl = head; - do { - curr_cl = curr_cl->prev; - } while(curr_cl != head); - - cachepc_mfence(); - cachepc_cpuid(); - - return curr_cl->prev; -} - cacheline * cachepc_probe(cacheline *start_cl) { diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -1,4 +1,5 @@ #include "kvm.h" +#include "asm-generic/errno.h" #include "uapi.h" #include "cachepc.h" #include "event.h" @@ -12,17 +13,16 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/sev.h> +#include <linux/types.h> #include <asm/uaccess.h> bool cachepc_debug = false; EXPORT_SYMBOL(cachepc_debug); -cpc_msrmt_t *cachepc_msrmts = NULL; -size_t cachepc_msrmts_count = 0; +uint8_t *cachepc_msrmts = NULL; EXPORT_SYMBOL(cachepc_msrmts); -EXPORT_SYMBOL(cachepc_msrmts_count); -cpc_msrmt_t *cachepc_baseline = NULL; +uint8_t *cachepc_baseline = NULL; bool cachepc_baseline_measure = false; bool cachepc_baseline_active = false; EXPORT_SYMBOL(cachepc_baseline); @@ -88,33 +88,39 @@ EXPORT_SYMBOL(cachepc_events_init); static void cachepc_kvm_prime_probe_test(void *p); static void cachepc_kvm_stream_hwpf_test(void *p); -static void cachepc_kvm_single_access_test(void *p); static void cachepc_kvm_single_eviction_test(void *p); static void cachepc_kvm_system_setup(void); -static int cachepc_kvm_test_access_ioctl(void __user *arg_user); +static int cachepc_kvm_reset_ioctl(void __user *arg_user); +static int cachepc_kvm_debug_ioctl(void __user *arg_user); + static int cachepc_kvm_test_eviction_ioctl(void __user *arg_user); -static int cachepc_kvm_init_pmc_ioctl(void __user *arg_user); -static int cachepc_kvm_read_pmc_ioctl(void __user *arg_user); + static int cachepc_kvm_read_counts_ioctl(void __user *arg_user); -static int cachepc_kvm_setup_pmc_ioctl(void __user *arg_user); -static int cachepc_kvm_measure_baseline_ioctl(void __user *arg_user); + +static int cachepc_kvm_reset_baseline_ioctl(void __user *arg_user); +static int cachepc_kvm_calc_baseline_ioctl(void __user *arg_user); static int cachepc_kvm_read_baseline_ioctl(void __user *arg_user); -static int cachepc_kvm_sub_baseline_ioctl(void __user *arg_user); +static int cachepc_kvm_apply_baseline_ioctl(void __user *arg_user); + static int cachepc_kvm_single_step_ioctl(void __user *arg_user); -static int cachepc_kvm_track_mode_ioctl(void __user *arg_user); +static int cachepc_kvm_vmsa_read_ioctl(void __user *arg_user); +static int cachepc_kvm_svme_read_ioctl(void __user *arg_user); + +static int cachepc_kvm_track_mode_ioctl(void __user *arg_user); static int cachepc_kvm_track_page_ioctl(void __user *arg_user); static int cachepc_kvm_track_all_ioctl(void __user *arg_user); static int cachepc_kvm_untrack_all_ioctl(void __user *arg_user); static int cachepc_kvm_reset_tracking_ioctl(void __user *arg_user); -static int cachepc_kvm_poll_event_ioctl(void __user *arg_user); -static int cachepc_kvm_ack_event_ioctl(void __user *arg_user); static int cachepc_kvm_track_range_start_ioctl(void __user *arg_user); static int cachepc_kvm_track_range_end_ioctl(void __user *arg_user); static int cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user); +static int cachepc_kvm_poll_event_ioctl(void __user *arg_user); +static int cachepc_kvm_ack_event_ioctl(void __user *arg_user); + void cachepc_kvm_prime_probe_test(void *p) { @@ -130,7 +136,6 @@ cachepc_kvm_prime_probe_test(void *p) cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); - BUG_ON(lines == NULL); max = cachepc_ctx->nr_of_cachelines; @@ -150,7 +155,7 @@ cachepc_kvm_prime_probe_test(void *p) cl = cl->next; } while (cl != head); - printk(KERN_WARNING "CachePC: Prime-probe test done (%u vs. %u => %s)\n", + CPC_WARN("Prime-probe test done (%u vs. %u => %s)\n", count, 0, (count == 0) ? "passed" : "failed"); if (arg) *arg = (count == 0); @@ -164,8 +169,7 @@ cachepc_kvm_stream_hwpf_test(void *p) cacheline *lines; uint32_t count; uint32_t *arg; - uint32_t i, max; - bool pass; + uint32_t max; arg = p; @@ -173,21 +177,26 @@ cachepc_kvm_stream_hwpf_test(void *p) cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); - BUG_ON(lines == NULL); - - max = cachepc_ctx->nr_of_cachelines; + max = 10; count = 0; cachepc_prime(cachepc_ds); count -= cachepc_read_pmc(CPC_L1MISS_PMC); - for (i = 0; i < max; i++) - asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 0) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 1) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 2) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 3) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 4) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 5) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 6) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 0) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 1) : "rbx"); + asm volatile ("mov (%0), %%rbx" : : "r"(lines + 2) : "rbx"); count += cachepc_read_pmc(CPC_L1MISS_PMC); - pass = (count == max) || (count == max + 1); /* +1 for pot. counter miss */ - printk(KERN_WARNING "CachePC: HWPF test done (%u vs. %u => %s)\n", - count, max, pass ? "passed" : "failed"); + CPC_WARN("HWPF test done (%u vs. %u => %s)\n", + count, max, count == max ? "passed" : "failed"); if (arg) *arg = (count == max); @@ -195,36 +204,6 @@ cachepc_kvm_stream_hwpf_test(void *p) } void -cachepc_kvm_single_access_test(void *p) -{ - cacheline *ptr; - uint64_t pre, post; - uint32_t *arg; - - /* l2 data cache hit & miss */ - cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); - - arg = p; - - WARN_ON(arg && *arg >= L1_SETS); - if (arg && *arg >= L1_SETS) return; - ptr = cachepc_prepare_victim(cachepc_ctx, arg ? *arg : 48); - - cachepc_prime(cachepc_ds); - - pre = cachepc_read_pmc(CPC_L1MISS_PMC); - cachepc_victim(ptr); - post = cachepc_read_pmc(CPC_L1MISS_PMC); - - printk(KERN_WARNING "CachePC: Single access test done (%llu vs %u => %s)", - post - pre, 1, (post - pre == 1) ? "passed" : "failed"); - - if (arg) *arg = post - pre; - - cachepc_release_victim(cachepc_ctx, ptr); -} - -void cachepc_kvm_single_eviction_test(void *p) { cacheline *head, *cl, *evicted; @@ -239,7 +218,7 @@ cachepc_kvm_single_eviction_test(void *p) cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); WARN_ON(arg && *arg >= L1_SETS); - if (arg && *arg >= L1_SETS) return; + if (arg && *arg >= L1_SETS) return; target = arg ? *arg : 48; ptr = cachepc_prepare_victim(cachepc_ctx, target); @@ -259,8 +238,9 @@ cachepc_kvm_single_eviction_test(void *p) cl = cl->next; } while (cl != head); - printk(KERN_WARNING "CachePC: Single eviction test done (%u vs %u => %s)\n", - count, 1, (count == 1 && evicted->cache_set == target) ? "passed" : "failed"); + CPC_WARN("Single eviction test done (%u vs %u => %s)\n", + count, 1, (count == 1 && evicted->cache_set == target) + ? "passed" : "failed"); cachepc_save_msrmts(head); if (arg) *arg = count; @@ -274,12 +254,16 @@ cachepc_kvm_system_setup(void) uint64_t reg_addr, val; uint32_t lo, hi; + /* NOTE: since most of these MSRs are poorly documented and some + * guessing work was involved, it is likely that one or more of + * these operations are not needed */ + /* disable streaming store */ reg_addr = 0xc0011020; asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr)); val = (uint64_t) lo | ((uint64_t) hi << 32); val |= 1 << 13; - printk("CachePC: Disabling streaming store (MSR %08llX: %016llX)\n", + CPC_WARN("Disabling streaming store (MSR %08llX: %016llX)\n", reg_addr, val); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00)); @@ -288,7 +272,7 @@ cachepc_kvm_system_setup(void) asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr)); val = (uint64_t) lo | ((uint64_t) hi << 32); val |= 1 << 4; - printk("CachePC: Disabling speculative reloads (MSR %08llX: %016llX)\n", + CPC_WARN("Disabling speculative reloads (MSR %08llX: %016llX)\n", reg_addr, val); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00)); @@ -297,7 +281,7 @@ cachepc_kvm_system_setup(void) asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr)); val = (uint64_t) lo | ((uint64_t) hi << 32); val |= 1 << 13; - printk("CachePC: Disabling DATA HWPF (MSR %08llX: %016llX)\n", + CPC_WARN("Disabling DATA HWPF (MSR %08llX: %016llX)\n", reg_addr, val); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00)); @@ -306,7 +290,7 @@ cachepc_kvm_system_setup(void) asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr)); val = (uint64_t) lo | ((uint64_t) hi << 32); val |= 1 << 13; - printk("CachePC: Disabling INST HWPF (MSR %08llX: %016llX)\n", + CPC_WARN("Disabling INST HWPF (MSR %08llX: %016llX)\n", reg_addr, val); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00)); @@ -333,96 +317,63 @@ cachepc_kvm_system_setup(void) } int -cachepc_kvm_test_access_ioctl(void __user *arg_user) +cachepc_kvm_reset_ioctl(void __user *arg_user) { - uint32_t u32; - int ret; - - if (!arg_user) return -EINVAL; + int cpu; - if (copy_from_user(&u32, arg_user, sizeof(u32))) - return -EFAULT; + if (arg_user) return -EINVAL; - ret = smp_call_function_single(2, - cachepc_kvm_single_access_test, &u32, true); - WARN_ON(ret != 0); + cpu = get_cpu(); - if (copy_to_user(arg_user, &u32, sizeof(u32))) + if (cpu != CPC_ISOLCPU) { + put_cpu(); return -EFAULT; + } - return 0; -} - -int -cachepc_kvm_test_eviction_ioctl(void __user *arg_user) -{ - uint32_t u32; - int ret; - - if (!arg_user) return -EINVAL; - - if (copy_from_user(&u32, arg_user, sizeof(u32))) - return -EFAULT; + /* L1 misses in host kernel */ + cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, + PMC_HOST, PMC_KERNEL); - ret = smp_call_function_single(2, - cachepc_kvm_single_eviction_test, &u32, true); - WARN_ON(ret != 0); + /* retired instructions in guest */ + cachepc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00, + PMC_GUEST, PMC_KERNEL | PMC_USER); - if (copy_to_user(arg_user, &u32, sizeof(u32))) - return -EFAULT; + put_cpu(); return 0; } int -cachepc_kvm_init_pmc_ioctl(void __user *arg_user) +cachepc_kvm_debug_ioctl(void __user *arg_user) { - uint8_t index, event_no, event_mask; - uint8_t host_guest, kernel_user; - uint32_t event; - int cpu; + uint32_t debug; if (!arg_user) return -EINVAL; - cpu = get_cpu(); - if (cpu != CPC_ISOLCPU) { - put_cpu(); - return -EFAULT; - } - - if (copy_from_user(&event, arg_user, sizeof(event))) { - put_cpu(); + if (copy_from_user(&debug, arg_user, sizeof(uint32_t))) return -EFAULT; - } - - index = (event & 0xFF000000) >> 24; - host_guest = (event & 0x00300000) >> 20; - kernel_user = (event & 0x00030000) >> 16; - event_no = (event & 0x0000FF00) >> 8; - event_mask = (event & 0x000000FF) >> 0; - cachepc_init_pmc(index, event_no, event_mask, - host_guest, kernel_user); - cachepc_reset_pmc(index); - - put_cpu(); + cachepc_debug = debug; return 0; } int -cachepc_kvm_read_pmc_ioctl(void __user *arg_user) +cachepc_kvm_test_eviction_ioctl(void __user *arg_user) { - uint64_t count; - uint32_t event; + uint32_t u32; + int ret; if (!arg_user) return -EINVAL; - if (copy_from_user(&event, arg_user, sizeof(event))) + if (copy_from_user(&u32, arg_user, sizeof(u32))) return -EFAULT; - count = cachepc_read_pmc(event); - if (copy_to_user(arg_user, &count, sizeof(count))) + ret = smp_call_function_single(2, + cachepc_kvm_single_eviction_test, &u32, true); + WARN_ON(ret != 0); + + if (copy_to_user(arg_user, &u32, sizeof(u32))) return -EFAULT; return 0; @@ -433,57 +384,34 @@ cachepc_kvm_read_counts_ioctl(void __user *arg_user) { if (!arg_user) return -EINVAL; - if (copy_to_user(arg_user, cachepc_msrmts, - cachepc_msrmts_count * sizeof(cpc_msrmt_t))) + if (copy_to_user(arg_user, cachepc_msrmts, L1_SETS)) return -EFAULT; return 0; } int -cachepc_kvm_setup_pmc_ioctl(void __user *arg_user) +cachepc_kvm_reset_baseline_ioctl(void __user *arg_user) { - int cpu; - - cpu = get_cpu(); + if (arg_user) return -EINVAL; - if (cpu != CPC_ISOLCPU) { - put_cpu(); - return -EFAULT; - } - - /* L1 Misses in Host Kernel */ - cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, - PMC_HOST, PMC_KERNEL); - - /* Retired Instructions in Guest */ - cachepc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00, - PMC_GUEST, PMC_KERNEL | PMC_USER); - - put_cpu(); + memset(cachepc_baseline, 0xff, L1_SETS); return 0; } int -cachepc_kvm_measure_baseline_ioctl(void __user *arg_user) +cachepc_kvm_calc_baseline_ioctl(void __user *arg_user) { uint32_t state; - size_t i; if (!arg_user) return -EINVAL; if (copy_from_user(&state, arg_user, sizeof(state))) return -EFAULT; - if (state) { - for (i = 0; i < cachepc_msrmts_count; i++) - cachepc_baseline[i] = CPC_MSRMT_MAX; - } - cachepc_baseline_measure = state; - return 0; } @@ -492,15 +420,14 @@ cachepc_kvm_read_baseline_ioctl(void __user *arg_user) { if (!arg_user) return -EINVAL; - if (copy_to_user(arg_user, cachepc_baseline, - cachepc_msrmts_count * sizeof(cpc_msrmt_t))) + if (copy_to_user(arg_user, cachepc_baseline, L1_SETS)) return -EFAULT; return 0; } int -cachepc_kvm_sub_baseline_ioctl(void __user *arg_user) +cachepc_kvm_apply_baseline_ioctl(void __user *arg_user) { uint32_t state; @@ -544,23 +471,18 @@ cachepc_kvm_track_page_ioctl(void __user *arg_user) struct cpc_track_config cfg; struct kvm_vcpu *vcpu; - if (!arg_user) return -EINVAL; + if (!main_vm || !arg_user) return -EINVAL; if (copy_from_user(&cfg, arg_user, sizeof(cfg))) return -EFAULT; - if (main_vm == NULL) - return -EFAULT; - if (cfg.mode < 0 || cfg.mode >= KVM_PAGE_TRACK_MAX) return -EINVAL; BUG_ON(xa_empty(&main_vm->vcpu_array)); vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode)) { - printk("KVM_TRACK_PAGE: cachepc_track_single failed"); + if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode)) return -EFAULT; - } return 0; } @@ -607,34 +529,16 @@ cachepc_kvm_svme_read_ioctl(void __user *arg_user) } int -cachepc_kvm_debug_ioctl(void __user *arg_user) -{ - uint32_t debug; - - if (!arg_user) return -EINVAL; - - if (copy_from_user(&debug, arg_user, sizeof(uint32_t))) - return -EFAULT; - - cachepc_debug = debug; - - return 0; -} - -int cachepc_kvm_track_all_ioctl(void __user *arg_user) { struct kvm_vcpu *vcpu; uint32_t mode; - if (!arg_user) return -EINVAL; + if (!main_vm || !arg_user) return -EINVAL; if (copy_from_user(&mode, arg_user, sizeof(mode))) return -EFAULT; - if (main_vm == NULL) - return -EFAULT; - if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) return -EINVAL; @@ -652,14 +556,11 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user) struct kvm_vcpu *vcpu; uint32_t mode; - if (!arg_user) return -EINVAL; + if (!main_vm || !arg_user) return -EINVAL; if (copy_from_user(&mode, arg_user, sizeof(mode))) return -EFAULT; - if (main_vm == NULL) - return -EFAULT; - if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) return -EINVAL; @@ -771,34 +672,30 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) arg_user = (void __user *)arg; switch (ioctl) { - case KVM_CPC_TEST_ACCESS: - return cachepc_kvm_test_access_ioctl(arg_user); + case KVM_CPC_RESET: + return cachepc_kvm_reset_ioctl(arg_user); + case KVM_CPC_DEBUG: + return cachepc_kvm_debug_ioctl(arg_user); case KVM_CPC_TEST_EVICTION: return cachepc_kvm_test_eviction_ioctl(arg_user); - case KVM_CPC_INIT_PMC: - return cachepc_kvm_init_pmc_ioctl(arg_user); - case KVM_CPC_READ_PMC: - return cachepc_kvm_read_pmc_ioctl(arg_user); case KVM_CPC_READ_COUNTS: return cachepc_kvm_read_counts_ioctl(arg_user); - case KVM_CPC_SETUP_PMC: - return cachepc_kvm_setup_pmc_ioctl(arg_user); - case KVM_CPC_MEASURE_BASELINE: - return cachepc_kvm_measure_baseline_ioctl(arg_user); + case KVM_CPC_RESET_BASELINE: + return cachepc_kvm_reset_baseline_ioctl(arg_user); case KVM_CPC_READ_BASELINE: return cachepc_kvm_read_baseline_ioctl(arg_user); - case KVM_CPC_SUB_BASELINE: - return cachepc_kvm_sub_baseline_ioctl(arg_user); + case KVM_CPC_CALC_BASELINE: + return cachepc_kvm_calc_baseline_ioctl(arg_user); + case KVM_CPC_APPLY_BASELINE: + return cachepc_kvm_apply_baseline_ioctl(arg_user); case KVM_CPC_SINGLE_STEP: return cachepc_kvm_single_step_ioctl(arg_user); - case KVM_CPC_TRACK_MODE: - return cachepc_kvm_track_mode_ioctl(arg_user); case KVM_CPC_VMSA_READ: return cachepc_kvm_vmsa_read_ioctl(arg_user); case KVM_CPC_SVME_READ: return cachepc_kvm_svme_read_ioctl(arg_user); - case KVM_CPC_DEBUG: - return cachepc_kvm_debug_ioctl(arg_user); + case KVM_CPC_TRACK_MODE: + return cachepc_kvm_track_mode_ioctl(arg_user); case KVM_CPC_TRACK_PAGE: return cachepc_kvm_track_page_ioctl(arg_user); case KVM_CPC_TRACK_ALL: @@ -829,7 +726,7 @@ cachepc_kvm_setup_test(void *p) cpu = get_cpu(); - pr_warn("CachePC: Running on core %i\n", cpu); + CPC_WARN("Running on core %i\n", cpu); if (cachepc_verify_topology()) goto exit; @@ -840,7 +737,6 @@ cachepc_kvm_setup_test(void *p) cachepc_kvm_system_setup(); cachepc_kvm_prime_probe_test(NULL); - cachepc_kvm_single_access_test(NULL); cachepc_kvm_single_eviction_test(NULL); cachepc_kvm_stream_hwpf_test(NULL); @@ -867,14 +763,13 @@ cachepc_kvm_init(void) INIT_LIST_HEAD(&cachepc_faults); - cachepc_msrmts_count = L1_SETS; - cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL); - BUG_ON(cachepc_msrmts == NULL); + cachepc_msrmts = kzalloc(L1_SETS, GFP_KERNEL); + BUG_ON(!cachepc_msrmts); cachepc_baseline_active = false; cachepc_baseline_measure = false; - cachepc_baseline = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL); - BUG_ON(cachepc_baseline == NULL); + cachepc_baseline = kzalloc(L1_SETS, GFP_KERNEL); + BUG_ON(!cachepc_baseline); cachepc_events_reset(); diff --git a/cachepc/uapi.h b/cachepc/uapi.h @@ -19,48 +19,43 @@ #define L2_SETS 1024 #define L2_SIZE (L2_SETS * L2_ASSOC * L2_LINESIZE) -#define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0)) - #define CPC_VMSA_MAGIC_ADDR ((void *) 0xC0FFEE) -#define KVM_HC_CPC_VMMCALL 0xC0FFEE00 -#define CPC_DO_VMMCALL(type, val) \ - asm volatile("vmmcall" : : "a" (KVM_HC_CPC_VMMCALL), \ +#define KVM_HC_CPC_VMMCALL_SIGNAL 0xC0FFEE00 +#define KVM_HC_CPC_VMMCALL_EXIT 0xC0FFEE01 +#define CPC_DO_VMMCALL(action, type, val) \ + asm volatile("vmmcall" : : "a" (KVM_HC_CPC_VMMCALL_ ## action), \ "b"(type), "c" (val) : "rdx") -#define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32) -#define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32) +#define KVM_CPC_RESET _IOWR(KVMIO, 0x20, __u32) +#define KVM_CPC_DEBUG _IOW(KVMIO, 0x21, __u32) -#define KVM_CPC_SETUP_PMC _IO(KVMIO, 0x22) -#define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x23, __u32) -#define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x24, __u32) +#define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x22, __u32) #define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x25, __u64) #define KVM_CPC_RESET_BASELINE _IO(KVMIO, 0x26) -#define KVM_CPC_MEASURE_BASELINE _IOW(KVMIO, 0x27, __u32) -#define KVM_CPC_READ_BASELINE _IOR(KVMIO, 0x28, __u64) -#define KVM_CPC_SUB_BASELINE _IOR(KVMIO, 0x29, __u32) - -#define KVM_CPC_SINGLE_STEP _IO(KVMIO, 0x29) +#define KVM_CPC_READ_BASELINE _IOR(KVMIO, 0x27, __u64) +#define KVM_CPC_CALC_BASELINE _IOR(KVMIO, 0x28, __u32) +#define KVM_CPC_APPLY_BASELINE _IOR(KVMIO, 0x29, __u32) -#define KVM_CPC_TRACK_MODE _IOWR(KVMIO, 0x2A, __u32) +#define KVM_CPC_SINGLE_STEP _IO(KVMIO, 0x2A) -#define KVM_CPC_VMSA_READ _IOR(KVMIO, 0x2B, __u64) -#define KVM_CPC_SVME_READ _IOR(KVMIO, 0x2C, __u32) +#define KVM_CPC_VMSA_READ _IOR(KVMIO, 0x2C, __u64) +#define KVM_CPC_SVME_READ _IOR(KVMIO, 0x2D, __u32) -#define KVM_CPC_DEBUG _IOW(KVMIO, 0x2D, __u32) +#define KVM_CPC_TRACK_MODE _IOWR(KVMIO, 0x40, __u32) +#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x41, struct cpc_track_config) +#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x42, __u32) +#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x43, __u32) +#define KVM_CPC_RESET_TRACKING _IO(KVMIO, 0x44) +#define KVM_CPC_TRACK_RANGE_START _IOWR(KVMIO, 0x45, __u64) +#define KVM_CPC_TRACK_RANGE_END _IOWR(KVMIO, 0x46, __u64) +#define KVM_CPC_TRACK_EXEC_CUR _IOWR(KVMIO, 0x47, __u64) -#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config) -#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u32) -#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x32, __u32) -#define KVM_CPC_RESET_TRACKING _IO(KVMIO, 0x33) -#define KVM_CPC_TRACK_RANGE_START _IOWR(KVMIO, 0x34, __u64) -#define KVM_CPC_TRACK_RANGE_END _IOWR(KVMIO, 0x35, __u64) -#define KVM_CPC_TRACK_EXEC_CUR _IOWR(KVMIO, 0x36, __u64) +#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x48, struct cpc_event) +#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x49, __u64) -#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x37, struct cpc_event) -#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x38, __u64) enum { CPC_EVENT_NONE, @@ -124,5 +119,3 @@ struct cpc_event { struct cpc_guest_event guest; }; }; - -typedef __u64 cpc_msrmt_t; diff --git a/test/.gitignore b/test/.gitignore @@ -1,13 +1,7 @@ -access eviction -kvm -sev -sev-es -sev-snp -fullstep -execstep -aes-detect_guest -aes-detect_host -access-detect_guest -access-detect_host -readsvme +kvm-eviction +kvm-step +kvm-pagestep +qemu-eviction +qemu-aes +qemu-poc diff --git a/test/access-detect_guest.c b/test/access-detect_guest.c @@ -1,26 +0,0 @@ -#include "cachepc/uapi.h" - -#include <err.h> -#include <unistd.h> -#include <stdint.h> -#include <string.h> -#include <stdio.h> -#include <stdlib.h> - -int -main(int argc, const char **argv) -{ - void *buf; - - buf = NULL; - if (posix_memalign(&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS)) - err(1, "memalign"); - memset(buf, 0, L1_LINESIZE * L1_SETS); - - while (1) { - printf("LOOP\n"); - CPC_DO_VMMCALL(CPC_GUEST_START_TRACK, 0); - *(uint8_t *)(buf + L1_LINESIZE * 15) = 1; - CPC_DO_VMMCALL(CPC_GUEST_STOP_TRACK, 0); - } -} diff --git a/test/access.c b/test/access.c @@ -1,30 +0,0 @@ -#include "cachepc/uapi.h" - -#include <sys/ioctl.h> -#include <stdlib.h> -#include <stdio.h> -#include <fcntl.h> -#include <stdint.h> -#include <err.h> -#include <fcntl.h> -#include <unistd.h> - -int -main(int argc, const char **argv) -{ - uint32_t arg; - int fd, ret; - size_t i; - - fd = open("/dev/kvm", O_RDONLY); - if (fd < 0) err(1, "open"); - - for (i = 0; i < 100; i++) { - arg = 48; /* target set */ - ret = ioctl(fd, KVM_CPC_TEST_ACCESS, &arg); - if (ret == -1) err(1, "ioctl TEST_ACCESS"); - if (arg != 1) errx(1, "access result (%i) != 1", arg); - } - - close(fd); -} diff --git a/test/aes-detect.c b/test/aes-detect.c @@ -1,85 +0,0 @@ -#include "cachepc/uapi.h" - -#include <stdint.h> - -#define ACCESS_LINE(n) \ - asm volatile ("mov (%0), %%rbx" \ - : : "r"(((uint8_t*) L1) + n * L1_LINESIZE) : "rbx"); - -#define DO_ACCESS_PATTERN() \ - ACCESS_LINE(60) \ - ACCESS_LINE(13) \ - ACCESS_LINE(24) \ - ACCESS_LINE(19) \ - ACCESS_LINE(38) \ - ACCESS_LINE(17) \ - ACCESS_LINE( 2) \ - ACCESS_LINE(12) \ - ACCESS_LINE(22) \ - ACCESS_LINE(46) \ - ACCESS_LINE( 4) \ - ACCESS_LINE(61) \ - ACCESS_LINE( 5) \ - ACCESS_LINE(14) \ - ACCESS_LINE(11) \ - ACCESS_LINE(35) \ - ACCESS_LINE(45) \ - ACCESS_LINE(10) \ - ACCESS_LINE(49) \ - ACCESS_LINE(56) \ - ACCESS_LINE(27) \ - ACCESS_LINE(37) \ - ACCESS_LINE(63) \ - ACCESS_LINE(54) \ - ACCESS_LINE(55) \ - ACCESS_LINE(29) \ - ACCESS_LINE(48) \ - ACCESS_LINE( 9) \ - ACCESS_LINE(16) \ - ACCESS_LINE(39) \ - ACCESS_LINE(20) \ - ACCESS_LINE(21) \ - ACCESS_LINE(62) \ - ACCESS_LINE( 0) \ - ACCESS_LINE(34) \ - ACCESS_LINE( 8) \ - ACCESS_LINE(53) \ - ACCESS_LINE(42) \ - ACCESS_LINE(51) \ - ACCESS_LINE(50) \ - ACCESS_LINE(57) \ - ACCESS_LINE( 7) \ - ACCESS_LINE( 6) \ - ACCESS_LINE(33) \ - ACCESS_LINE(26) \ - ACCESS_LINE(40) \ - ACCESS_LINE(58) \ - ACCESS_LINE( 1) \ - ACCESS_LINE(44) \ - ACCESS_LINE(23) \ - ACCESS_LINE(25) \ - ACCESS_LINE(47) \ - ACCESS_LINE(15) \ - ACCESS_LINE(36) \ - ACCESS_LINE( 3) \ - ACCESS_LINE(41) \ - ACCESS_LINE(52) \ - ACCESS_LINE(59) \ - ACCESS_LINE(18) \ - ACCESS_LINE(31) \ - ACCESS_LINE(28) \ - ACCESS_LINE(32) \ - ACCESS_LINE(30) \ - ACCESS_LINE(43) - -/* corresponding physical memory will also be page-aligned, - * in our case PAGE_SIZE is the size of our L1 without associativity */ -#pragma DATA_ALIGN(L1, PAGE_SIZE) -uint8_t L1[L1_SETS * L1_LINESIZE]; - -int sync_access_pattern[] = { - 60, 13, 24, 19, 38, 17, 2, 12, 22, 46, 4, 61, 5, 14, 11, 35, - 45, 10, 49, 56, 27, 37, 63, 54, 55, 29, 48, 9, 16, 39, 20, 21, - 62, 0, 34, 8, 53, 42, 51, 50, 57, 7, 6, 33, 26, 40, 58, 1, - 44, 23, 25, 47, 15, 36, 3, 41, 52, 59, 18, 31, 28, 32, 30, 43 -}; diff --git a/test/aes-detect.h b/test/aes-detect.h @@ -1,3 +0,0 @@ -#pragma once - -extern int sync_access_pattern[]; diff --git a/test/aes-detect_guest.c b/test/aes-detect_guest.c @@ -1,78 +0,0 @@ -#include "cachepc/uapi.h" -#include "kcapi.h" - -#include <sys/random.h> -#include <err.h> -#include <time.h> -#include <assert.h> -#include <unistd.h> -#include <string.h> -#include <stdio.h> -#include <stdint.h> -#include <stdlib.h> - -// #include "aes-detect.c" - -static uint8_t key[16]; - -void -printhex(uint8_t *buf, size_t size) -{ - size_t i; - - for (i = 0; i < size; i++) - printf("%02X", buf[i]); - printf("\n"); -} - -int -main(int argc, const char **argv) -{ - struct kcapi_handle *kcapi; - uint8_t block[128]; - uint8_t *buf; - size_t n; - - buf = NULL; - if (posix_memalign((void *)&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS)) - err(1, "memalign"); - memset(buf, 0, L1_LINESIZE * L1_SETS); - - while (1) { - CPC_DO_VMMCALL(CPC_GUEST_START_TRACK, 0); - buf[L1_LINESIZE * 5] += 1; - CPC_DO_VMMCALL(CPC_GUEST_STOP_TRACK, 0); - } - - return 0; - - kcapi = NULL; - if (kcapi_cipher_init(&kcapi, "ecb(aes)", 0)) - err(1, "kcapi init"); - - for (n = 0; n < 16; n++) - key[n] = (uint8_t) n; - - if (kcapi_cipher_setkey(kcapi, key, sizeof(key))) - err(1, "kcapi setkey"); - - while (1) { - printf("RUN %li\n", time(NULL)); - - memset(block, 0, sizeof(block)); - strncpy((char *) block, "Hello world", sizeof(block)); - - // DO_ACCESS_PATTERN(); - - printhex(block, sizeof(block)); - n = kcapi_cipher_encrypt(kcapi, block, sizeof(block), NULL, - block, sizeof(block), KCAPI_ACCESS_HEURISTIC); - if (n != sizeof(block)) - err(1, "encrypt"); - printhex(block, sizeof(block)); - - sleep(1); - } - - kcapi_cipher_destroy(kcapi); -} diff --git a/test/eviction.c b/test/eviction.c @@ -2,31 +2,31 @@ #include <sys/ioctl.h> #include <fcntl.h> -#include <stdint.h> #include <assert.h> #include <unistd.h> #include <err.h> -#include <stdlib.h> +#include <stdint.h> #include <stdio.h> +#include <stdlib.h> int main(int argc, const char **argv) { - cpc_msrmt_t counts[L1_SETS]; - uint32_t arg; + uint8_t counts[L1_SETS]; + uint32_t set; int i, fd, ret; fd = open("/dev/kvm", O_RDONLY); if (fd < 0) err(1, "open"); - arg = 48; - if (argc == 2) arg = atoi(argv[1]); + set = 48; + if (argc > 1) set = atoi(argv[1]); - ret = ioctl(fd, KVM_CPC_TEST_EVICTION, &arg); - if (ret == -1) err(1, "ioctl TEST_EVICTION"); + ret = ioctl(fd, KVM_CPC_TEST_EVICTION, &set); + if (ret == -1) err(1, "ioctl KVM_CPC_TEST_EVICTION"); ret = ioctl(fd, KVM_CPC_READ_COUNTS, counts); - if (ret == -1) err(1, "ioctl READ_COUNTS"); + if (ret == -1) err(1, "ioctl KVM_CPC_READ_COUNTS"); for (i = 0; i < 64; i++) { if (i % 16 == 0 && i) @@ -38,6 +38,6 @@ main(int argc, const char **argv) printf("\x1b[0m"); } printf("\n"); - + close(fd); } diff --git a/test/fullstep.c b/test/fullstep.c @@ -1,605 +0,0 @@ -#define _GNU_SOURCE - -#include "cachepc/uapi.h" - -#include <linux/psp-sev.h> -#include <linux/kvm.h> -#include <sys/syscall.h> -#include <sys/ioctl.h> -#include <sys/user.h> -#include <sys/wait.h> -#include <sys/ioctl.h> -#include <sys/mman.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <unistd.h> -#include <signal.h> -#include <dirent.h> -#include <assert.h> -#include <errno.h> -#include <err.h> -#include <fcntl.h> -#include <sched.h> -#include <string.h> -#include <stdbool.h> -#include <stdlib.h> -#include <stdint.h> -#include <stdio.h> -#include <stdarg.h> - -#define ARRLEN(x) (sizeof(x) / sizeof((x)[0])) -#define MIN(a,b) ((a) > (b) ? (b) : (a)) - -#define TARGET_CORE 2 -#define SECONDARY_CORE 3 - -#define TARGET_SET 15 - -struct kvm { - int vmfd, vcpufd; - void *mem; - size_t memsize; - struct kvm_run *run; -}; - -/* start and end for guest assembly */ -extern uint8_t __start_guest_with[]; -extern uint8_t __stop_guest_with[]; - -/* ioctl dev fds */ -static int kvm_dev, sev_dev, kvm_dev; -static int faultcnt; - -enum { - GSTATE_INIT, - GSTATE_LAUNCH, - GSTATE_RUNNING, -}; - -const char *sev_fwerr_strs[] = { - [0x00] = "Success", - [0x01] = "Platform state is invalid", - [0x02] = "Guest state is invalid", - [0x03] = "Platform configuration is invalid", - [0x04] = "Buffer too small", - [0x05] = "Platform is already owned", - [0x06] = "Certificate is invalid", - [0x07] = "Request not allowed by policy", - [0x08] = "Guest is inactive", - [0x09] = "Invalid address", - [0x0A] = "Bad signature", - [0x0B] = "Bad measurement", - [0x0C] = "Asid is already owned", - [0x0D] = "Invalid ASID", - [0x0E] = "WBINVD is required", - [0x0F] = "DF_FLUSH is required", - [0x10] = "Guest handle is invalid", - [0x11] = "Invalid command", - [0x12] = "Guest is active", - [0x13] = "Hardware error", - [0x14] = "Hardware unsafe", - [0x15] = "Feature not supported", - [0x16] = "Invalid parameter", - [0x17] = "Out of resources", - [0x18] = "Integrity checks failed" -}; - -const char *sev_gstate_strs[] = { - "INIT", - "LAUNCH", - "RUNNING", -}; - -void -hexdump(void *data, int len) -{ - int i; - - for (i = 0; i < len; i++) { - if (i % 16 == 0 && i) - printf("\n"); - printf("%02X ", *(uint8_t *)(data + i)); - } - printf("\n"); -} - -__attribute__((section("guest_with"))) void -vm_guest_with(void) -{ - while (1) { - asm volatile("mov (%0), %%eax" : : - "r" (L1_LINESIZE * (L1_SETS * 3 + TARGET_SET)) : "rax"); - asm volatile("nop"); - asm volatile("mov (%0), %%eax" : : - "r" (L1_LINESIZE * (L1_SETS * 3 + TARGET_SET)) : "rax"); - } -} - -bool -pin_process(pid_t pid, int cpu, bool assert) -{ - cpu_set_t cpuset; - int ret; - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset); - if (ret < 0) { - if (assert) err(1, "sched_setaffinity"); - return false; - } - - return true; -} - -int -read_stat_core(pid_t pid) -{ - char path[256]; - char line[2048]; - FILE *file; - char *p; - int i, cpu; - - snprintf(path, sizeof(path), "/proc/%u/stat", pid); - file = fopen(path, "r"); - if (!file) return -1; - - if (!fgets(line, sizeof(line), file)) - err(1, "read stat"); - - p = line; - for (i = 0; i < 38 && (p = strchr(p, ' ')); i++) - p += 1; - - if (!p) errx(1, "stat format"); - cpu = atoi(p); - - fclose(file); - - return cpu; -} - -const char * -sev_fwerr_str(int code) -{ - if (code < 0 || code >= ARRLEN(sev_fwerr_strs)) - return "Unknown error"; - - return sev_fwerr_strs[code]; -} - -const char * -sev_gstate_str(int code) -{ - if (code < 0 || code >= ARRLEN(sev_gstate_strs)) - return "Unknown gstate"; - - return sev_gstate_strs[code]; -} - -int -sev_ioctl(int vmfd, int cmd, void *data, int *error) -{ - struct kvm_sev_cmd input; - int ret; - - memset(&input, 0, sizeof(input)); - input.id = cmd; - input.sev_fd = sev_dev; - input.data = (uintptr_t) data; - - ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input); - if (error) *error = input.error; - - return ret; -} - -uint8_t -snp_guest_state(int vmfd) -{ - struct kvm_sev_guest_status status; - int ret, fwerr; - - assert(false); /* ioctl not implemented yet */ - - ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - - return status.state; -} - -void -snp_dbg_encrypt(int vmfd, void *dst, void *src, size_t size) -{ - struct kvm_sev_dbg enc; - int ret, fwerr; - - assert(false); /* ioctl not implemented yet */ - - memset(&enc, 0, sizeof(struct kvm_sev_dbg)); - enc.src_uaddr = (uintptr_t) src; - enc.dst_uaddr = (uintptr_t) dst; - enc.len = size; - - ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); -} - -void -snp_dbg_decrypt(int vmfd, void *dst, void *src, size_t size) -{ - struct kvm_sev_dbg enc; - int ret, fwerr; - - // assert(false); /* ioctl not implemented yet */ - - memset(&enc, 0, sizeof(struct kvm_sev_dbg)); - enc.src_uaddr = (uintptr_t) src; - enc.dst_uaddr = (uintptr_t) dst; - enc.len = size; - - ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); -} - -uint64_t -snp_dbg_rip(int vmfd) -{ - uint8_t vmsa[PAGE_SIZE]; - uint64_t rip; - - memset(vmsa, 0, PAGE_SIZE); - snp_dbg_decrypt(vmfd, vmsa, CPC_VMSA_MAGIC_ADDR, PAGE_SIZE); - - rip = *(uint64_t *)(vmsa + 0x178); - - return rip; -} - -void -snp_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) -{ - // REF: https://www.amd.com/system/files/TechDocs/55766_SEV-KM_API_Specification.pdf - struct kvm_sev_snp_launch_update update; - struct kvm_sev_snp_launch_start start; - struct kvm_sev_snp_launch_finish finish; - struct kvm_snp_init init; - struct kvm_userspace_memory_region region; - struct kvm_enc_region enc_region; - struct kvm_regs regs; - struct kvm_sregs sregs; - int ret, fwerr; - - /* Create a kvm instance */ - kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); - if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); - - /* Allocate guest memory */ - kvm->memsize = ramsize; - kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); - if (!kvm->mem) err(1, "Allocating guest memory"); - assert(code_stop - code_start <= kvm->memsize); - memcpy(kvm->mem, code_start, code_stop - code_start); - - /* Map it into the vm */ - memset(&region, 0, sizeof(region)); - region.slot = 0; - region.memory_size = kvm->memsize; - region.guest_phys_addr = 0; - region.userspace_addr = (uintptr_t) kvm->mem; - ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, &region); - if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION"); - - /* Enable SEV for vm */ - memset(&init, 0, sizeof(init)); - ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_INIT, &init, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_SNP_INIT: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - - /* Register memory region */ - memset(&enc_region, 0, sizeof(enc_region)); - enc_region.addr = (uintptr_t) kvm->mem; - enc_region.size = kvm->memsize; - ret = ioctl(kvm->vmfd, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region); - if (ret < 0) err(1, "KVM_MEMORY_ENCRYPT_REG_REGION"); - - /* Create virtual cpu */ - kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); - if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); - - /* Map the shared kvm_run structure and following data */ - ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); - if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE"); - if (ret < sizeof(struct kvm_run)) - errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); - kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, - MAP_SHARED, kvm->vcpufd, 0); - if (!kvm->run) err(1, "mmap vcpu"); - - /* Initialize segment regs */ - memset(&sregs, 0, sizeof(sregs)); - ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); - if (ret < 0) err(1, "KVM_GET_SREGS"); - sregs.cs.base = 0; - sregs.cs.selector = 0; - ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); - if (ret < 0) err(1, "KVM_SET_SREGS"); - - /* Initialize rest of registers */ - memset(&regs, 0, sizeof(regs)); - regs.rip = 0; - regs.rsp = kvm->memsize - L1_SETS * L1_LINESIZE - 8; - regs.rbp = kvm->memsize - L1_SETS * L1_LINESIZE - 8; - ret = ioctl(kvm->vcpufd, KVM_SET_REGS, &regs); - if (ret < 0) err(1, "KVM_SET_REGS"); - - /* Generate encryption keys and set policy */ - memset(&start, 0, sizeof(start)); - start.policy = 1 << 17; /* must be set */ - start.policy |= 1 << 19; /* allow debug */ - start.policy |= 1 << 16; /* allow simultaneous multi-threading */ - ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_START, &start, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_START: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - - /* Prepare the vm memory */ - memset(&update, 0, sizeof(update)); - update.uaddr = (uintptr_t) kvm->mem; - update.len = ramsize; - update.start_gfn = 0; - update.page_type = KVM_SEV_SNP_PAGE_TYPE_NORMAL; - ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_UPDATE, &update, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_UPDATE: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - - /* Finalize launch process */ - memset(&finish, 0, sizeof(finish)); - ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_FINISH, &finish, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_FINISH: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); -} - -void -snp_kvm_deinit(struct kvm *kvm) -{ - close(kvm->vmfd); - close(kvm->vcpufd); - munmap(kvm->mem, kvm->memsize); -} - -cpc_msrmt_t * -read_counts() -{ - cpc_msrmt_t *counts; - int i, ret; - - counts = malloc(L1_SETS * sizeof(cpc_msrmt_t)); - if (!counts) err(1, "malloc"); - - ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); - if (ret == -1) err(1, "ioctl READ_COUNTS"); - - for (i = 0; i < L1_SETS; i++) { - if (counts[i] > 8) - errx(1, "Invalid counts set %i", i); - } - - return counts; -} - -void -print_counts(cpc_msrmt_t *counts) -{ - int i; - - for (i = 0; i < 64; i++) { - if (i % 16 == 0 && i) - printf("\n"); - if (counts[i] == 1) - printf("\x1b[38;5;88m"); - else if (counts[i] > 1) - printf("\x1b[38;5;196m"); - printf("%2i ", i); - if (counts[i] > 0) - printf("\x1b[0m"); - } - printf("\n"); -} - -void -print_counts_raw(cpc_msrmt_t *counts) -{ - int i; - - for (i = 0; i < 64; i++) { - if (i % 16 == 0 && i) - printf("\n"); - if (counts[i] == 1) - printf("\x1b[38;5;88m"); - else if (counts[i] > 1) - printf("\x1b[38;5;196m"); - printf("%02X ", (uint8_t) counts[i]); - if (counts[i] > 0) - printf("\x1b[0m"); - } - printf("\n"); -} - -void -runonce(struct kvm *kvm) -{ - int ret; - - ret = ioctl(kvm->vcpufd, KVM_RUN, NULL); - if (ret < 0) err(1, "KVM_RUN"); -} - -int -monitor(struct kvm *kvm, bool baseline) -{ - struct cpc_event event; - cpc_msrmt_t counts[64]; - uint64_t rip; - int ret, i; - - /* Get page fault info */ - ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event); - if (ret) { - if (errno == EAGAIN) - return 0; - perror("ioctl POLL_EVENT"); - return 1; - } - - if (event.type == CPC_EVENT_TRACK_STEP) { - ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); - if (ret == -1) err(1, "ioctl READ_COUNTS"); - - if (!baseline) { - rip = snp_dbg_rip(kvm->vmfd); - printf("Event: cnt:%llu inst:%llu data:%llu retired:%llu rip:%lu\n", - event.step.fault_count, - event.step.fault_gfns[0], - event.step.fault_gfns[1], - event.step.retinst, rip); - print_counts(counts); - printf("\n"); - } - faultcnt++; - - for (i = 0; i < 64; i++) { - if (counts[i] > 8) { - errx(1, "Invalid count for set %i (%llu)", - i, counts[i]); - } - } - } - - ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id); - if (ret == -1) err(1, "ioctl ACK_EVENT"); - - return 0; -} - -int -main(int argc, const char **argv) -{ - struct kvm kvm_with_access; - uint64_t track_mode; - pid_t ppid, pid; - uint32_t arg; - struct cpc_event event; - cpc_msrmt_t baseline[64]; - int ret, i; - - setvbuf(stdout, NULL, _IONBF, 0); - - pin_process(0, TARGET_CORE, true); - - sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC); - if (sev_dev < 0) err(1, "open /dev/sev"); - - kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC); - if (kvm_dev < 0) err(1, "open /dev/kvm"); - - /* Make sure we have the stable version of the API */ - ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL); - if (ret < 0) err(1, "KVM_GET_API_VERSION"); - if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret); - - /* Setup needed performance counters */ - ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL); - if (ret < 0) err(1, "ioctl SETUP_PMC"); - - snp_kvm_init(&kvm_with_access, L1_SIZE * 2, - __start_guest_with, __stop_guest_with); - - /* Page tracking init needs to happen after kvm - * init so main_kvm is set.. */ - - ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL); - if (ret == -1) err(1, "ioctl RESET_TRACKING"); - - arg = CPC_TRACK_FULL; - ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg); - if (ret == -1) err(1, "ioctl TRACK_MODE"); - - /* Init page tracking */ - track_mode = KVM_PAGE_TRACK_ACCESS; - ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode); - if (ret == -1) err(1, "ioctl TRACK_ALL"); - - arg = true; - ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg); - if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); - - ppid = getpid(); - if ((pid = fork())) { - if (pid < 0) err(1, "fork"); - - sleep(1); /* wait for child to pin other core */ - - printf("VMRUN\n"); - runonce(&kvm_with_access); - printf("VMRUN DONE\n"); - } else { - pin_process(0, SECONDARY_CORE, true); - printf("PINNED\n"); - - faultcnt = 0; - while (faultcnt < 300) { - if (monitor(&kvm_with_access, true)) break; - } - - do { - ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event); - if (ret == -1 && errno != EAGAIN) - err(1, "ioctl POLL_EVENT"); - } while (ret == -1 && errno == EAGAIN); - - arg = false; - ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg); - if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); - - ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline); - if (ret == -1) err(1, "ioctl READ_BASELINE"); - - printf("\n>>> BASELINE:\n"); - print_counts(baseline); - printf("\n"); - print_counts_raw(baseline); - printf("\n"); - - /* Check baseline for saturated sets */ - for (i = 0; i < 64; i++) { - if (baseline[i] >= 8) - errx(1, "!!! Baseline set %i full\n", i); - } - - arg = true; - ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg); - if (ret == -1) err(1, "ioctl SUB_BASELINE"); - - ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id); - if (ret == -1) err(1, "ioctl ACK_EVENT"); - - faultcnt = 0; - while (faultcnt < 20) { - if (monitor(&kvm_with_access, false)) break; - } - - kill(ppid, SIGTERM); - exit(0); - } - - snp_kvm_deinit(&kvm_with_access); - - close(kvm_dev); - close(sev_dev); -} - diff --git a/test/fulltrack b/test/fulltrack Binary files differ. diff --git a/test/kvm-eviction.c b/test/kvm-eviction.c @@ -0,0 +1,613 @@ +#define _GNU_SOURCE + +#include "cachepc/uapi.h" + +#include <linux/psp-sev.h> +#include <linux/kvm.h> +#include <sys/syscall.h> +#include <sys/ioctl.h> +#include <sys/user.h> +#include <sys/wait.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <signal.h> +#include <dirent.h> +#include <assert.h> +#include <errno.h> +#include <err.h> +#include <fcntl.h> +#include <sched.h> +#include <string.h> +#include <stdbool.h> +#include <stdlib.h> +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> + +#define ARRLEN(x) (sizeof(x) / sizeof((x)[0])) +#define MIN(a,b) ((a) > (b) ? (b) : (a)) + +#define SAMPLE_COUNT 64 + +#define TARGET_CORE 2 +#define SECONDARY_CORE 3 + +#define TARGET_SET 15 + +enum { + WITH, + WITHOUT +}; + +struct kvm { + int fd, vmfd, vcpufd; + void *mem; + size_t memsize; + struct kvm_run *run; +}; + +/* start and end for guest assembly */ +extern uint8_t __start_guest_with[]; +extern uint8_t __stop_guest_with[]; +extern uint8_t __start_guest_without[]; +extern uint8_t __stop_guest_without[]; + +static const char *vmtype; + +static int kvm_dev, sev_dev; + +enum { + GSTATE_UNINIT, + GSTATE_LUPDATE, + GSTATE_LSECRET, + GSTATE_RUNNING, + GSTATE_SUPDATE, + GSTATE_RUPDATE, + GSTATE_SENT +}; + +const char *sev_fwerr_strs[] = { + "Success", + "Platform state is invalid", + "Guest state is invalid", + "Platform configuration is invalid", + "Buffer too small", + "Platform is already owned", + "Certificate is invalid", + "Policy is not allowed", + "Guest is not active", + "Invalid address", + "Bad signature", + "Bad measurement", + "Asid is already owned", + "Invalid ASID", + "WBINVD is required", + "DF_FLUSH is required", + "Guest handle is invalid", + "Invalid command", + "Guest is active", + "Hardware error", + "Hardware unsafe", + "Feature not supported", + "Invalid parameter", + "Out of resources", + "Integrity checks failed" +}; + +const char *sev_gstate_strs[] = { + "UNINIT", + "LUPDATE", + "LSECRET", + "RUNNING", + "SUPDATE", + "RUPDATE", + "SEND" +}; + +void +hexdump(void *data, int len) +{ + int i; + + for (i = 0; i < len; i++) { + if (i % 16 == 0 && i) + printf("\n"); + printf("%02X ", *(uint8_t *)(data + i)); + } + printf("\n"); +} + +__attribute__((section("guest_with"))) void +vm_guest_with(void) +{ + while (1) { + asm volatile("mov (%[v]), %%bl" + : : [v] "r" (L1_LINESIZE * TARGET_SET)); + CPC_DO_VMMCALL(EXIT, 0, 0); + } +} + +__attribute__((section("guest_without"))) void +vm_guest_without(void) +{ + while (1) { + CPC_DO_VMMCALL(EXIT, 0, 0); + } +} + +bool +pin_process(pid_t pid, int cpu, bool assert) +{ + cpu_set_t cpuset; + int ret; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset); + if (ret < 0) { + if (assert) err(1, "sched_setaffinity"); + return false; + } + + return true; +} + +int +read_stat_core(pid_t pid) +{ + char path[256]; + char line[2048]; + FILE *file; + char *p; + int i, cpu; + + snprintf(path, sizeof(path), "/proc/%u/stat", pid); + file = fopen(path, "r"); + if (!file) return -1; + + if (!fgets(line, sizeof(line), file)) + err(1, "read stat"); + + p = line; + for (i = 0; i < 38 && (p = strchr(p, ' ')); i++) + p += 1; + + if (!p) errx(1, "stat format"); + cpu = atoi(p); + + fclose(file); + + return cpu; +} + +const char * +sev_fwerr_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_fwerr_strs)) + return "Unknown error"; + + return sev_fwerr_strs[code]; +} + +const char * +sev_gstate_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_gstate_strs)) + return "Unknown gstate"; + + return sev_gstate_strs[code]; +} + +int +sev_ioctl(int vmfd, int cmd, void *data, int *error) +{ + struct kvm_sev_cmd input; + int ret; + + memset(&input, 0, sizeof(input)); + input.id = cmd; + input.sev_fd = sev_dev; + input.data = (uintptr_t) data; + + ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input); + if (error) *error = input.error; + + return ret; +} + +void +sev_get_measure(int vmfd) +{ + struct kvm_sev_launch_measure msrmt; + int ret, fwerr; + uint8_t *data; + + memset(&msrmt, 0, sizeof(msrmt)); + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret < 0 && fwerr != SEV_RET_INVALID_LEN) + errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + data = malloc(msrmt.len); + msrmt.uaddr = (uintptr_t) data; + + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret < 0) + errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + free(data); +} + +uint8_t +sev_guest_state(int vmfd, uint32_t handle) +{ + struct kvm_sev_guest_status status; + int ret, fwerr; + + status.handle = handle; + ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr); + if (ret < 0) { + errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + } + + return status.state; +} + +void +sev_debug_encrypt(int vmfd, void *src, void *dst, size_t size) +{ + struct kvm_sev_dbg enc; + int ret, fwerr; + + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} + +void +sev_debug_decrypt(int vmfd, void *src, void *dst, size_t size) +{ + struct kvm_sev_dbg enc; + int ret, fwerr; + + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} + +void +kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) +{ + struct kvm_userspace_memory_region region; + struct kvm_regs regs; + struct kvm_sregs sregs; + int ret; + + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); + + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); + + /* Map it into the vm */ + memset(&region, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0x0000; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, &region); + if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Create virtual cpu core */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_SET_SREGS"); + + /* Initialize rest of registers */ + memset(&regs, 0, sizeof(regs)); + regs.rip = 0x0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; + regs.rax = 0; + regs.rdx = 0; + regs.rflags = 0x2; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, &regs); + if (ret < 0) err(1, "KVM_SET_REGS"); +} + +void +sev_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) +{ + struct kvm_userspace_memory_region region; + struct kvm_sev_launch_update_data update; + struct kvm_sev_launch_start start; + struct kvm_regs regs; + struct kvm_sregs sregs; + int ret, fwerr; + + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); + + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); + + /* Map it into the vm */ + memset(&region, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0x0000; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, &region); + if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Enable SEV for vm */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_INIT, NULL, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_INIT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.handle = 0; + start.policy = 0; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory (by encrypting it) */ + memset(&update, 0, sizeof(update)); + update.uaddr = (uintptr_t) kvm->mem; + update.len = ramsize; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Collect a measurement (necessary) */ + sev_get_measure(kvm->vmfd); + + /* Finalize launch process */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + ret = sev_guest_state(kvm->vmfd, start.handle); + if (ret != GSTATE_RUNNING) + errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); + + /* Create virtual cpu core */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + if (ret < sizeof(struct kvm_run)) + errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); + + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_SET_SREGS"); + + /* Initialize rest of registers */ + memset(&regs, 0, sizeof(regs)); + regs.rip = 0x0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; + regs.rax = 0; + regs.rdx = 0; + regs.rflags = 0x2; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, &regs); + if (ret < 0) err(1, "KVM_SET_REGS"); +} + +void +sev_es_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) +{ + errx(1, "Not implemented"); +} + +void +sev_snp_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) +{ + errx(1, "Not implemented"); +} + +void +vm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_end) +{ + if (!strcmp(vmtype, "kvm")) { + kvm_init(kvm, ramsize, code_start, code_end); + } else if (!strcmp(vmtype, "sev")) { + sev_kvm_init(kvm, ramsize, code_start, code_end); + } else if (!strcmp(vmtype, "sev-es")) { + sev_es_kvm_init(kvm, ramsize, code_start, code_end); + } else if (!strcmp(vmtype, "sev-snp")) { + sev_snp_kvm_init(kvm, ramsize, code_start, code_end); + } else { + errx(1, "invalid version"); + } +} + +void +vm_deinit(struct kvm *kvm) +{ + close(kvm->vmfd); + close(kvm->vcpufd); + munmap(kvm->mem, kvm->memsize); +} + +void +print_counts(uint8_t *counts) +{ + int i; + + for (i = 0; i < 64; i++) { + if (i % 16 == 0 && i) + printf("\n"); + if (counts[i] == 1) + printf("\x1b[38;5;88m"); + else if (counts[i] > 1) + printf("\x1b[38;5;196m"); + printf("%2i ", i); + if (counts[i] > 0) + printf("\x1b[0m"); + } + + printf("\nTarget Set %i Count: %u\n\n", + TARGET_SET, counts[TARGET_SET]); +} + +void +collect(struct kvm *kvm, uint8_t *counts) +{ + struct kvm_regs regs; + int ret; + + /* run vm twice, use count without initial stack setup */ + ret = ioctl(kvm->vcpufd, KVM_RUN, NULL); + if (ret < 0) err(1, "KVM_RUN"); + + if (kvm->run->exit_reason == KVM_EXIT_MMIO) { + memset(&regs, 0, sizeof(regs)); + ret = ioctl(kvm->vcpufd, KVM_GET_REGS, &regs); + if (ret < 0) err(1, "KVM_GET_REGS"); + errx(1, "Victim access OOB: %llu %08llx => %02X\n", + kvm->run->mmio.phys_addr, regs.rip, + ((uint8_t *)kvm->mem)[regs.rip]); + } else if (kvm->run->exit_reason != KVM_EXIT_IO) { + errx(1, "KVM died: %i\n", kvm->run->exit_reason); + } + + ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); + if (ret == -1) err(1, "ioctl KVM_CPC_READ_COUNTS"); +} + +int +main(int argc, const char **argv) +{ + struct kvm vms[2]; + uint8_t counts[2][SAMPLE_COUNT][64]; + uint8_t baseline[64]; + uint32_t calc; + int i, k, ret; + + vmtype = "kvm"; + if (argc > 1) vmtype = argv[1]; + if (strcmp(vmtype, "kvm") && strcmp(vmtype, "sev") + && strcmp(vmtype, "sev-es") + && strcmp(vmtype, "sev-snp")) + errx(1, "invalid version: %s", vmtype); + + setvbuf(stdout, NULL, _IONBF, 0); + + pin_process(0, TARGET_CORE, true); + + kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC); + if (kvm_dev < 0) err(1, "open /dev/kvm"); + + sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC); + if (sev_dev < 0) err(1, "open /dev/sev"); + + /* Make sure we have the stable version of the API */ + ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL); + if (ret < 0) err(1, "KVM_GET_API_VERSION"); + if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret); + + /* Reset kernel module state */ + ret = ioctl(kvm_dev, KVM_CPC_RESET); + if (ret < 0) err(1, "ioctl KVM_CPC_RESET"); + + vm_init(&vms[WITH], 64 * 64 * 8 * 2, + __start_guest_with, __stop_guest_with); + vm_init(&vms[WITHOUT], 64 * 64 * 8 * 2, + __start_guest_without, __stop_guest_without); + + calc = true; + ret = ioctl(kvm_dev, KVM_CPC_CALC_BASELINE, &calc); + if (ret == -1) err(1, "ioctl KVM_CPC_CALC_BASELINE"); + + for (i = 0; i < SAMPLE_COUNT; i++) { + collect(&vms[WITH], counts[WITH][i]); + collect(&vms[WITHOUT], counts[WITHOUT][i]); + } + + calc = false; + ret = ioctl(kvm_dev, KVM_CPC_CALC_BASELINE, &calc); + if (ret == -1) err(1, "ioctl KVM_CPC_CALC_BASELINE"); + + ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline); + if (ret == -1) err(1, "ioctl KVM_CPC_READ_BASELINE"); + + for (i = 0; i < SAMPLE_COUNT; i++) { + printf("Evictions with access:\n"); + print_counts(counts[WITH][i]); + + printf("Evictions without access:\n"); + print_counts(counts[WITHOUT][i]); + + for (k = 0; k < 64; k++) { + if (counts[WITH][i][k] >= 8) + warnx("with: Count OOB (%i, %i)", i, k); + if (baseline[k] > counts[WITH][i][k]) + warnx("with: Baseline oob (%i, %i)", i, k); + counts[WITH][i][k] -= baseline[k]; + + if (counts[WITHOUT][i][k] >= 8) + warnx("without: Count OOB (%i, %i)", i, k); + if (baseline[k] > counts[WITHOUT][i][k]) + warnx("without: Baseline OOB (%i, %i)", i, k); + counts[WITHOUT][i][k] -= baseline[k]; + } + + if (!counts[WITH][i][TARGET_SET]) + warnx("with: Missing eviction in target set"); + } + + vm_deinit(&vms[WITH]); + vm_deinit(&vms[WITHOUT]); + + close(kvm_dev); + close(sev_dev); +} + diff --git a/test/execstep.c b/test/kvm-pagestep.c diff --git a/test/execstep.c b/test/kvm-step.c diff --git a/test/qemu-aes_guest.c b/test/qemu-aes_guest.c @@ -0,0 +1,74 @@ +#include "cachepc/uapi.h" +#include "kcapi.h" + +#include <sys/random.h> +#include <err.h> +#include <time.h> +#include <assert.h> +#include <unistd.h> +#include <string.h> +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> + +static uint8_t key[16]; + +void +printhex(uint8_t *buf, size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + printf("%02X", buf[i]); + printf("\n"); +} + +int +main(int argc, const char **argv) +{ + struct kcapi_handle *kcapi; + uint8_t block[128]; + uint8_t *buf; + size_t n; + + buf = NULL; + if (posix_memalign((void *)&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS)) + err(1, "memalign"); + memset(buf, 0, L1_LINESIZE * L1_SETS); + + while (1) { + CPC_DO_VMMCALL(CPC_GUEST_START_TRACK, 0); + buf[L1_LINESIZE * 5] += 1; + CPC_DO_VMMCALL(CPC_GUEST_STOP_TRACK, 0); + } + + return 0; + + kcapi = NULL; + if (kcapi_cipher_init(&kcapi, "ecb(aes)", 0)) + err(1, "kcapi init"); + + for (n = 0; n < 16; n++) + key[n] = (uint8_t) n; + + if (kcapi_cipher_setkey(kcapi, key, sizeof(key))) + err(1, "kcapi setkey"); + + while (1) { + printf("RUN %li\n", time(NULL)); + + memset(block, 0, sizeof(block)); + strncpy((char *) block, "Hello world", sizeof(block)); + + printhex(block, sizeof(block)); + n = kcapi_cipher_encrypt(kcapi, block, sizeof(block), NULL, + block, sizeof(block), KCAPI_ACCESS_HEURISTIC); + if (n != sizeof(block)) + err(1, "encrypt"); + printhex(block, sizeof(block)); + + sleep(1); + } + + kcapi_cipher_destroy(kcapi); +} diff --git a/test/aes-detect_host.c b/test/qemu-aes_host.c diff --git a/test/qemu-eviction_guest.c b/test/qemu-eviction_guest.c @@ -0,0 +1,30 @@ +#include "cachepc/uapi.h" + +#include <sys/time.h> +#include <sys/resource.h> +#include <err.h> +#include <unistd.h> +#include <stdint.h> +#include <string.h> +#include <stdio.h> +#include <stdlib.h> + +int +main(int argc, const char **argv) +{ + void *buf; + + buf = NULL; + if (posix_memalign(&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS)) + err(1, "memalign"); + memset(buf, 0, L1_LINESIZE * L1_SETS); + + setpriority(PRIO_PROCESS, 0, -20); + + while (1) { + printf("LOOP\n"); + CPC_DO_VMMCALL(CPC_GUEST_START_TRACK, 0); + *(uint8_t *)(buf + L1_LINESIZE * 15) = 1; + CPC_DO_VMMCALL(CPC_GUEST_STOP_TRACK, 0); + } +} diff --git a/test/access-detect_host.c b/test/qemu-eviction_host.c diff --git a/test/readsvme.c b/test/readsvme.c @@ -1,26 +0,0 @@ -#include "cachepc/uapi.h" - -#include <sys/ioctl.h> -#include <err.h> -#include <fcntl.h> -#include <unistd.h> -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> - -int -main(int argc, const char **argv) -{ - uint32_t svme; - int kvm_fd; - - kvm_fd = open("/dev/kvm", O_RDWR); - if (kvm_fd < 0) err(1, "open /dev/kvm"); - - if (ioctl(kvm_fd, KVM_CPC_SVME_READ, &svme)) - err(1, "ioctl SVME_READ"); - - printf("%u\n", svme); - - close(kvm_fd); -} diff --git a/util/.gitignore b/util/.gitignore @@ -1,2 +1,3 @@ debug +svme reset diff --git a/util/debug.c b/util/debug.c @@ -1,13 +1,12 @@ #include "cachepc/uapi.h" #include <sys/ioctl.h> -#include <stdlib.h> -#include <stdio.h> #include <fcntl.h> -#include <stdint.h> #include <err.h> #include <fcntl.h> #include <unistd.h> +#include <stdint.h> +#include <stdlib.h> int main(int argc, const char **argv) @@ -18,9 +17,9 @@ main(int argc, const char **argv) fd = open("/dev/kvm", O_RDONLY); if (fd < 0) err(1, "open"); - arg = argc > 1 ? atoi(argv[1]) : 1; + arg = argc > 1 ? atoi(argv[1]) : 1; ret = ioctl(fd, KVM_CPC_DEBUG, &arg); - if (ret == -1) err(1, "ioctl DEBUG"); + if (ret == -1) err(1, "ioctl KVM_CPC_DEBUG"); close(fd); } diff --git a/util/reset.c b/util/reset.c @@ -1,29 +1,20 @@ #include "cachepc/uapi.h" #include <sys/ioctl.h> -#include <stdlib.h> -#include <stdio.h> #include <fcntl.h> -#include <stdint.h> #include <err.h> -#include <fcntl.h> #include <unistd.h> int main(int argc, const char **argv) { - uint64_t arg; int fd, ret; fd = open("/dev/kvm", O_RDONLY); if (fd < 0) err(1, "open"); - ret = ioctl(fd, KVM_CPC_RESET_TRACKING); - if (ret) warn("ioctl RESET_TRACKING"); - - arg = 0; - ret = ioctl(fd, KVM_CPC_ACK_EVENT, &arg); - if (ret) warn("ioctl ACK_EVENT"); + ret = ioctl(fd, KVM_CPC_RESET); + if (ret) warn("ioctl KVM_CPC_RESET"); close(fd); } diff --git a/util/svme.c b/util/svme.c @@ -0,0 +1,25 @@ +#include "cachepc/uapi.h" + +#include <sys/ioctl.h> +#include <err.h> +#include <fcntl.h> +#include <unistd.h> +#include <stdint.h> +#include <stdio.h> + +int +main(int argc, const char **argv) +{ + uint32_t svme; + int kvm_fd; + + kvm_fd = open("/dev/kvm", O_RDWR); + if (kvm_fd < 0) err(1, "open /dev/kvm"); + + if (ioctl(kvm_fd, KVM_CPC_SVME_READ, &svme)) + err(1, "ioctl KVM_CPC_SVME_READ"); + + printf("%u\n", svme); + + close(kvm_fd); +}