cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 61d67ae0927eeade04171374a91a1adaa4d94a28
parent 434a6210041dd447acae38b95561a2d990efa153
Author: Louis Burda <quent.burda@gmail.com>
Date:   Wed,  2 Nov 2022 14:21:35 +0100

Outline aes-detection guest host setup

Diffstat:
A.gitmodules | 3+++
MMakefile | 11++++++++---
Mcachepc/cachepc.c | 6++++++
Mcachepc/cachepc.h | 11++---------
Mcachepc/kvm.c | 13+++++++++----
Mcachepc/mmu.c | 15++++++---------
Mcachepc/uapi.h | 20++++++++++++--------
Mcachepc/uspt.c | 13+++++--------
Mcachepc/uspt.h | 6+-----
Mtest/access.c | 0
Atest/aes-detect.c | 85+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atest/aes-detect.h | 3+++
Atest/aes-detect_guest | 0
Atest/aes-detect_guest.c | 64++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atest/aes-detect_host | 0
Atest/aes-detect_host.c | 527+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mtest/eviction.c | 0
Mtest/kvm.c | 0
Atest/libkcapi | 1+
Mtest/sev-es.c | 0
Mtest/sev.c | 0
Mtest/sevstep.c | 4++--
22 files changed, 734 insertions(+), 48 deletions(-)

diff --git a/.gitmodules b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "guest/libkcapi"] + path = test/libkcapi + url = https://github.com/smuellerDD/libkcapi.git diff --git a/Makefile b/Makefile @@ -1,7 +1,10 @@ LINUX ?= /usr/src/linux PWD := $(shell pwd) -all: build test/eviction test/access test/kvm test/sev test/sev-es test/sevstep +TARGETS = build test/eviction test/access test/kvm test/sev test/sev-es test/sevstep +TARGETS += test/aes-detect_guest test/aes-detect_host + +all: $(TARGETS) clean: $(MAKE) -C $(LINUX) SUBDIRS=arch/x86/kvm clean @@ -18,9 +21,11 @@ load: sudo insmod $(LINUX)/arch/x86/kvm/kvm.ko sudo insmod $(LINUX)/arch/x86/kvm/kvm-amd.ko -test/%: test/%.c cachepc/uapi.h - clang -o $@ $< -fsanitize=address -I . -Wunused-variable +test/aes-detect_%: test/aes-detect_%.c test/aes-detect.c + clang -o $@ $< -I . -I test/libkcapi/lib -L test/libkcapi/.libs -lkcapi -static +test/%: test/%.c cachepc/uapi.h + clang -o $@ $< -fsanitize=address -I . -I test -Wunused-variable update: git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -1,4 +1,5 @@ #include "cachepc.h" +#include "uapi.h" #include <linux/kernel.h> #include <linux/types.h> @@ -35,6 +36,11 @@ cachepc_verify_topology(void) uint32_t size; uint32_t sets; + if (PAGE_SIZE != L1_SETS * L1_LINESIZE) + pr_warn("Cachepc: System pagesize does not guarentee " + "virtual memory access will hit corresponding " + "physical cacheline, PAGE_SIZE != L1_SETS * L1_LINESIZE\n"); + /* REF: https://developer.amd.com/resources/developer-guides-manuals * (PPR 17H 31H, P.81) */ diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -10,16 +10,7 @@ #define PHYSICAL_ADDRESSING 1 #define L1_ADDRESSING VIRTUAL_ADDRESSING -#define L1_ASSOC 8 -#define L1_LINESIZE 64 -#define L1_SETS 64 -#define L1_SIZE (L1_SETS * L1_ASSOC * L1_LINESIZE) - #define L2_ADDRESSING PHYSICAL_ADDRESSING -#define L2_ASSOC 8 -#define L2_LINESIZE 64 -#define L2_SETS 1024 -#define L2_SIZE (L2_SETS * L2_ASSOC * L2_LINESIZE) #define CACHELINE_SIZE L1_LINESIZE #define CACHE_GROUP_SIZE (PAGE_SIZE / CACHELINE_SIZE) @@ -127,6 +118,8 @@ extern cpc_msrmt_t *cachepc_baseline; extern bool cachepc_baseline_measure; extern bool cachepc_baseline_active; +extern uint64_t cachepc_retinst; + extern cache_ctx *cachepc_ctx; extern cacheline *cachepc_ds; diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -22,6 +22,9 @@ EXPORT_SYMBOL(cachepc_baseline); EXPORT_SYMBOL(cachepc_baseline_measure); EXPORT_SYMBOL(cachepc_baseline_active); +uint64_t cachepc_retinst = 0; +EXPORT_SYMBOL(cachepc_retinst); + cache_ctx *cachepc_ctx = NULL; cacheline *cachepc_ds = NULL; EXPORT_SYMBOL(cachepc_ctx); @@ -65,7 +68,7 @@ cachepc_kvm_prime_probe_test(void *p) arg = p; /* l2 data cache hit & miss */ - cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); + cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); BUG_ON(lines == NULL); @@ -108,7 +111,7 @@ cachepc_kvm_stream_hwpf_test(void *p) arg = p; /* l2 data cache hit & miss */ - cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); + cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); BUG_ON(lines == NULL); @@ -140,7 +143,7 @@ cachepc_kvm_single_access_test(void *p) uint32_t *arg; /* l2 data cache hit & miss */ - cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); + cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); arg = p; @@ -174,7 +177,7 @@ cachepc_kvm_single_eviction_test(void *p) arg = p; /* l2 data cache hit & miss */ - cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); + cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); WARN_ON(arg && *arg >= L1_SETS); if (arg && *arg >= L1_SETS) return; @@ -608,6 +611,8 @@ cachepc_kvm_init(void) cachepc_ctx = NULL; cachepc_ds = NULL; + cachepc_retinst = 0; + cachepc_msrmts_count = L1_SETS; cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL); BUG_ON(cachepc_msrmts == NULL); diff --git a/cachepc/mmu.c b/cachepc/mmu.c @@ -10,10 +10,9 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, KVM_PAGE_TRACK_ACCESS, KVM_PAGE_TRACK_EXEC }; - uint64_t current_rip; bool was_tracked; - int have_rip, i; - int send_err; + int i; + int err; pr_warn("Sevstep: Got page fault (gfn:%llu)", fault->gfn); @@ -27,12 +26,10 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, } if (was_tracked) { - have_rip = false; - send_err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT, - fault->error_code, have_rip, current_rip); - if (send_err) { - printk("Sevstep: uspt_send_and_block failed with %d\n", - send_err); + err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT, + fault->error_code); + if (err) { + printk("Sevstep: uspt_send_and_block failed (%d)\n", err); } } } diff --git a/cachepc/uapi.h b/cachepc/uapi.h @@ -9,6 +9,16 @@ #define CPC_L1MISS_PMC 0 #define CPC_RETINST_PMC 1 +#define L1_ASSOC 8 +#define L1_LINESIZE 64 +#define L1_SETS 64 +#define L1_SIZE (L1_SETS * L1_ASSOC * L1_LINESIZE) + +#define L2_ASSOC 8 +#define L2_LINESIZE 64 +#define L2_SETS 1024 +#define L2_SIZE (L2_SETS * L2_ASSOC * L2_LINESIZE) + #define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0)) #define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32) @@ -28,9 +38,6 @@ #define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x34, struct cpc_track_event) #define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x35, __u64) -#define CPC_USPT_POLL_EVENT_NO_EVENT 1000 -#define CPC_USPT_POLL_EVENT_GOT_EVENT 0 - enum kvm_page_track_mode { KVM_PAGE_TRACK_WRITE, KVM_PAGE_TRACK_ACCESS, @@ -49,11 +56,8 @@ struct cpc_track_event { __u64 id; /* filled automatically */ __u64 faulted_gpa; __u32 error_code; - __u8 have_rip_info; - __u64 rip; - __u64 ns_timestamp; - __u8 have_retired_instructions; - __u64 retired_instructions; + __u64 timestamp_ns; + __u64 retinst; }; typedef __u64 cpc_msrmt_t; diff --git a/cachepc/uspt.c b/cachepc/uspt.c @@ -40,8 +40,7 @@ sevstep_uspt_is_initialiized() } int -sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code, - bool have_rip, uint64_t rip) +sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code) { struct cpc_track_event event; ktime_t deadline; @@ -66,10 +65,8 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code, event.id = last_sent_eventid; event.faulted_gpa = faulted_gpa; event.error_code = error_code; - event.have_rip_info = have_rip; - event.rip = rip; - event.ns_timestamp = ktime_get_real_ns(); - event.have_retired_instructions = false; + event.timestamp_ns = ktime_get_real_ns(); + event.retinst = cachepc_retinst; have_event = true; sent_event = event; @@ -110,7 +107,7 @@ sevstep_uspt_handle_poll_event(struct cpc_track_event __user *event) read_lock(&event_lock); if (!have_event) { read_unlock(&event_lock); - return CPC_USPT_POLL_EVENT_NO_EVENT; + return -EAGAIN; } read_unlock(&event_lock); @@ -120,7 +117,7 @@ sevstep_uspt_handle_poll_event(struct cpc_track_event __user *event) sizeof(struct cpc_track_event)); have_event = false; } else { - err = CPC_USPT_POLL_EVENT_NO_EVENT; + err = -EAGAIN; } write_unlock(&event_lock); diff --git a/cachepc/uspt.h b/cachepc/uspt.h @@ -10,12 +10,8 @@ bool sevstep_uspt_is_initialiized(void); void sevstep_uspt_clear(void); -bool sevstep_uspt_should_get_rip(void); - -int sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code, - bool have_rip, uint64_t rip); +int sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code); int sevstep_uspt_is_event_done(uint64_t id); int sevstep_uspt_handle_poll_event(struct cpc_track_event *userpace_mem); - int sevstep_uspt_handle_ack_event_ioctl(uint64_t eventid); diff --git a/test/access.c b/test/access.c diff --git a/test/aes-detect.c b/test/aes-detect.c @@ -0,0 +1,85 @@ +#include "cachepc/uapi.h" + +#include <stdint.h> + +#define ACCESS_LINE(n) \ + asm volatile ("mov (%0), %%rbx" \ + : : "r"(((uint8_t*) L1) + n * L1_LINESIZE) : "rbx"); + +#define DO_ACCESS_PATTERN() \ + ACCESS_LINE(60) \ + ACCESS_LINE(13) \ + ACCESS_LINE(24) \ + ACCESS_LINE(19) \ + ACCESS_LINE(38) \ + ACCESS_LINE(17) \ + ACCESS_LINE( 2) \ + ACCESS_LINE(12) \ + ACCESS_LINE(22) \ + ACCESS_LINE(46) \ + ACCESS_LINE( 4) \ + ACCESS_LINE(61) \ + ACCESS_LINE( 5) \ + ACCESS_LINE(14) \ + ACCESS_LINE(11) \ + ACCESS_LINE(35) \ + ACCESS_LINE(45) \ + ACCESS_LINE(10) \ + ACCESS_LINE(49) \ + ACCESS_LINE(56) \ + ACCESS_LINE(27) \ + ACCESS_LINE(37) \ + ACCESS_LINE(63) \ + ACCESS_LINE(54) \ + ACCESS_LINE(55) \ + ACCESS_LINE(29) \ + ACCESS_LINE(48) \ + ACCESS_LINE( 9) \ + ACCESS_LINE(16) \ + ACCESS_LINE(39) \ + ACCESS_LINE(20) \ + ACCESS_LINE(21) \ + ACCESS_LINE(62) \ + ACCESS_LINE( 0) \ + ACCESS_LINE(34) \ + ACCESS_LINE( 8) \ + ACCESS_LINE(53) \ + ACCESS_LINE(42) \ + ACCESS_LINE(51) \ + ACCESS_LINE(50) \ + ACCESS_LINE(57) \ + ACCESS_LINE( 7) \ + ACCESS_LINE( 6) \ + ACCESS_LINE(33) \ + ACCESS_LINE(26) \ + ACCESS_LINE(40) \ + ACCESS_LINE(58) \ + ACCESS_LINE( 1) \ + ACCESS_LINE(44) \ + ACCESS_LINE(23) \ + ACCESS_LINE(25) \ + ACCESS_LINE(47) \ + ACCESS_LINE(15) \ + ACCESS_LINE(36) \ + ACCESS_LINE( 3) \ + ACCESS_LINE(41) \ + ACCESS_LINE(52) \ + ACCESS_LINE(59) \ + ACCESS_LINE(18) \ + ACCESS_LINE(31) \ + ACCESS_LINE(28) \ + ACCESS_LINE(32) \ + ACCESS_LINE(30) \ + ACCESS_LINE(43) + +/* corresponding physical memory will also be page-aligned, + * in our case PAGE_SIZE is the size of our L1 without associativity */ +#pragma DATA_ALIGN(L1, PAGE_SIZE) +uint8_t L1[L1_SETS * L1_LINESIZE]; + +int sync_access_pattern[] = { + 60, 13, 24, 19, 38, 17, 2, 12, 22, 46, 4, 61, 5, 14, 11, 35, + 45, 10, 49, 56, 27, 37, 63, 54, 55, 29, 48, 9, 16, 39, 20, 21, + 62, 0, 34, 8, 53, 42, 51, 50, 57, 7, 6, 33, 26, 40, 58, 1, + 44, 23, 25, 47, 15, 36, 3, 41, 52, 59, 18, 31, 28, 32, 30, 43 +}; diff --git a/test/aes-detect.h b/test/aes-detect.h @@ -0,0 +1,3 @@ +#pragma once + +extern int sync_access_pattern[]; diff --git a/test/aes-detect_guest b/test/aes-detect_guest Binary files differ. diff --git a/test/aes-detect_guest.c b/test/aes-detect_guest.c @@ -0,0 +1,64 @@ +#include "cachepc/uapi.h" +#include "kcapi.h" + +#include <sys/random.h> +#include <err.h> +#include <time.h> +#include <assert.h> +#include <unistd.h> +#include <string.h> +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> + +#include "aes-detect.c" + +static uint8_t key[16]; + +void +printhex(uint8_t *buf, size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + printf("%02X", buf[i]); + printf("\n"); +} + +int +main(int argc, const char **argv) +{ + struct kcapi_handle *kcapi; + uint8_t block[128]; + size_t n; + + kcapi = NULL; + if (kcapi_cipher_init(&kcapi, "ecb(aes)", 0)) + err(1, "kcapi init"); + + for (n = 0; n < 16; n++) + key[n] = (uint8_t) n; + + if (kcapi_cipher_setkey(kcapi, key, sizeof(key))) + err(1, "kcapi setkey"); + + while (1) { + printf("RUN %li\n", time(NULL)); + + memset(block, 0, sizeof(block)); + strncpy((char *) block, "Hello world", sizeof(block)); + + DO_ACCESS_PATTERN(); + + // printhex(block, sizeof(block)); + n = kcapi_cipher_encrypt(kcapi, block, sizeof(block), NULL, + block, sizeof(block), KCAPI_ACCESS_HEURISTIC); + if (n != sizeof(block)) + err(1, "encrypt"); + // printhex(block, sizeof(block)); + + sleep(1); + } + + kcapi_cipher_destroy(kcapi); +} diff --git a/test/aes-detect_host b/test/aes-detect_host Binary files differ. diff --git a/test/aes-detect_host.c b/test/aes-detect_host.c @@ -0,0 +1,527 @@ +#define _GNU_SOURCE + +#include "cachepc/uapi.h" + +#include <linux/psp-sev.h> +#include <linux/kvm.h> +#include <sys/syscall.h> +#include <sys/ioctl.h> +#include <sys/user.h> +#include <sys/wait.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <signal.h> +#include <dirent.h> +#include <assert.h> +#include <errno.h> +#include <err.h> +#include <fcntl.h> +#include <sched.h> +#include <string.h> +#include <stdbool.h> +#include <stdlib.h> +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> + +#define ARRLEN(x) (sizeof(x) / sizeof((x)[0])) +#define MIN(a,b) ((a) > (b) ? (b) : (a)) + +#define SAMPLE_COUNT 100 + +#define TARGET_CORE 2 +#define SECONDARY_CORE 3 + +#define TARGET_CACHE_LINESIZE 64 +#define TARGET_SET 15 + +struct kvm { + int vmfd, vcpufd; + void *mem; + size_t memsize; + struct kvm_run *run; +}; + +/* start and end for guest assembly */ +extern uint8_t __start_guest_with[]; +extern uint8_t __stop_guest_with[]; + +/* ioctl dev fds */ +static int kvm_dev, sev_dev, kvm_dev; +static int faultcnt; + +enum { + GSTATE_UNINIT, + GSTATE_LUPDATE, + GSTATE_LSECRET, + GSTATE_RUNNING, + GSTATE_SUPDATE, + GSTATE_RUPDATE, + GSTATE_SENT +}; + +const char *sev_fwerr_strs[] = { + "Success", + "Platform state is invalid", + "Guest state is invalid", + "Platform configuration is invalid", + "Buffer too small", + "Platform is already owned", + "Certificate is invalid", + "Policy is not allowed", + "Guest is not active", + "Invalid address", + "Bad signature", + "Bad measurement", + "Asid is already owned", + "Invalid ASID", + "WBINVD is required", + "DF_FLUSH is required", + "Guest handle is invalid", + "Invalid command", + "Guest is active", + "Hardware error", + "Hardware unsafe", + "Feature not supported", + "Invalid parameter", + "Out of resources", + "Integrity checks failed" +}; + +const char *sev_gstate_strs[] = { + "UNINIT", + "LUPDATE", + "LSECRET", + "RUNNING", + "SUPDATE", + "RUPDATE", + "SEND" +}; + +void +hexdump(void *data, int len) +{ + int i; + + for (i = 0; i < len; i++) { + if (i % 16 == 0 && i) + printf("\n"); + printf("%02X ", *(uint8_t *)(data + i)); + } + printf("\n"); +} + +__attribute__((section("guest_with"))) void +vm_guest_with(void) +{ + asm volatile("hlt"); + while (1) { + asm volatile("mov (%[v]), %%bl" + : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET)); + } +} + +bool +pin_process(pid_t pid, int cpu, bool assert) +{ + cpu_set_t cpuset; + int ret; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset); + if (ret < 0) { + if (assert) err(1, "sched_setaffinity"); + return false; + } + + return true; +} + +int +read_stat_core(pid_t pid) +{ + char path[256]; + char line[2048]; + FILE *file; + char *p; + int i, cpu; + + snprintf(path, sizeof(path), "/proc/%u/stat", pid); + file = fopen(path, "r"); + if (!file) return -1; + + if (!fgets(line, sizeof(line), file)) + err(1, "read stat"); + + p = line; + for (i = 0; i < 38 && (p = strchr(p, ' ')); i++) + p += 1; + + if (!p) errx(1, "stat format"); + cpu = atoi(p); + + fclose(file); + + return cpu; +} + +const char * +sev_fwerr_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_fwerr_strs)) + return "Unknown error"; + + return sev_fwerr_strs[code]; +} + +const char * +sev_gstate_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_gstate_strs)) + return "Unknown gstate"; + + return sev_gstate_strs[code]; +} + +int +sev_ioctl(int vmfd, int cmd, void *data, int *error) +{ + struct kvm_sev_cmd input; + int ret; + + memset(&input, 0, sizeof(input)); + input.id = cmd; + input.sev_fd = sev_dev; + input.data = (uintptr_t) data; + + ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input); + if (error) *error = input.error; + + return ret; +} + +uint8_t * +sev_get_measure(int vmfd) +{ + struct kvm_sev_launch_measure msrmt; + int ret, fwerr; + uint8_t *data; + + memset(&msrmt, 0, sizeof(msrmt)); + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret < 0 && fwerr != SEV_RET_INVALID_LEN) + errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr)); + + data = malloc(msrmt.len); + msrmt.uaddr = (uintptr_t) data; + + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret < 0) + errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr)); + + return data; +} + +uint8_t +sev_guest_state(int vmfd, uint32_t handle) +{ + struct kvm_sev_guest_status status; + int ret, fwerr; + + status.handle = handle; + ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr); + if (ret < 0) { + errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + } + + return status.state; +} + +void +sev_dbg_encrypt(int vmfd, void *dst, void *src, size_t size) +{ + struct kvm_sev_dbg enc; + int ret, fwerr; + + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} + +void +sev_dbg_decrypt(int vmfd, void *dst, void *src, size_t size) +{ + struct kvm_sev_dbg enc; + int ret, fwerr; + + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} + +void +sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) +{ + // REF: https://www.amd.com/system/files/TechDocs/55766_SEV-KM_API_Specification.pdf + struct kvm_sev_launch_update_data update; + struct kvm_sev_launch_start start; + struct kvm_userspace_memory_region region; + struct kvm_regs regs; + struct kvm_sregs sregs; + uint8_t *msrmt; + int ret, fwerr; + + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); + + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); + + /* Map it into the vm */ + memset(&region, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, &region); + if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Enable SEV for vm */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_ES_INIT, NULL, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_ES_INIT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Create virtual cpu */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + if (ret < sizeof(struct kvm_run)) + errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); + + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret < 0) err(1, "KVM_SET_SREGS"); + + /* Initialize rest of registers */ + memset(&regs, 0, sizeof(regs)); + regs.rip = 0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, &regs); + if (ret < 0) err(1, "KVM_SET_REGS"); + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.handle = 0; + start.policy = 1 << 2; /* require ES */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory (by encrypting it) */ + memset(&update, 0, sizeof(update)); + update.uaddr = (uintptr_t) kvm->mem; + update.len = ramsize; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm save area */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Collect a measurement (necessary) */ + msrmt = sev_get_measure(kvm->vmfd); + free(msrmt); + + /* Finalize launch process */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + ret = sev_guest_state(kvm->vmfd, start.handle); + if (ret != GSTATE_RUNNING) + errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); +} + +void +sev_kvm_deinit(struct kvm *kvm) +{ + close(kvm->vmfd); + close(kvm->vcpufd); + munmap(kvm->mem, kvm->memsize); +} + +uint16_t * +read_counts() +{ + uint16_t *counts; + int ret; + + counts = malloc(64 * sizeof(uint16_t)); + if (!counts) err(1, "malloc"); + ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); + if (ret == -1) err(1, "ioctl READ_COUNTS"); + + return counts; +} + +void +print_counts(uint16_t *counts) +{ + int i; + + for (i = 0; i < 64; i++) { + if (i % 16 == 0 && i) + printf("\n"); + if (counts[i] == 1) + printf("\x1b[38;5;88m"); + else if (counts[i] > 1) + printf("\x1b[38;5;196m"); + printf("%2i ", i); + if (counts[i] > 0) + printf("\x1b[0m"); + } + printf("\n Target Set %i Count: %hu\n", TARGET_SET, counts[TARGET_SET]); + printf("\n"); +} + +void +runonce(struct kvm *kvm) +{ + struct kvm_regs regs; + int ret; + + ret = ioctl(kvm->vcpufd, KVM_RUN, NULL); + if (ret < 0) err(1, "KVM_RUN"); + printf("VMEXIT\n"); + + if (kvm->run->exit_reason == KVM_EXIT_MMIO) { + memset(&regs, 0, sizeof(regs)); + ret = ioctl(kvm->vcpufd, KVM_GET_REGS, &regs); + if (ret < 0) err(1, "KVM_GET_REGS"); + errx(1, "KVM_EXTI_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n", + kvm->run->mmio.is_write ? "write" : "read", + kvm->run->mmio.phys_addr, regs.rip); + } else if (kvm->run->exit_reason != KVM_EXIT_HLT) { + errx(1, "KVM died: %i\n", kvm->run->exit_reason); + } +} + +int +monitor(void) +{ + struct cpc_track_event event; + int ret; + + /* Get page fault info */ + ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event); + if (!ret) { + printf("Got page fault! %llu retired insts\n", + event.retinst); + faultcnt++; + + printf("Acking event %llu\n", event.id); + ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id); + if (ret == -1) err(1, "ioctl ACK_EVENT"); + } else if (errno != EAGAIN) { + perror("ioctl POLL_EVENT"); + return 1; + } + + return 0; +} + +int +main(int argc, const char **argv) +{ + struct kvm kvm_with_access; + uint64_t track_mode; + pid_t ppid, pid; + int ret; + + setvbuf(stdout, NULL, _IONBF, 0); + + pin_process(0, TARGET_CORE, true); + + sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC); + if (sev_dev < 0) err(1, "open /dev/sev"); + + kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC); + if (kvm_dev < 0) err(1, "open /dev/kvm"); + + /* Make sure we have the stable version of the API */ + ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL); + if (ret < 0) err(1, "KVM_GET_API_VERSION"); + if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret); + + /* Setup needed performance counters */ + ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL); + if (ret < 0) err(1, "ioctl SETUP_PMC"); + + sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2, + __start_guest_with, __stop_guest_with); + + /* One run to skip stack setup */ + ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL); + + /* Page tracking init needs to happen after kvm + * init so main_kvm is set.. */ + + /* Reset previous tracking */ + ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL); + if (ret == -1) err(1, "ioctl RESET_TRACKING"); + + /* Init page tracking */ + track_mode = KVM_PAGE_TRACK_ACCESS; + ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode); + if (ret == -1) err(1, "ioctl TRACK_ALL"); + + ppid = getpid(); + if ((pid = fork())) { + if (pid < 0) err(1, "fork"); + runonce(&kvm_with_access); + } else { + pin_process(0, SECONDARY_CORE, true); + faultcnt = 0; + while (faultcnt < SAMPLE_COUNT) { + if (monitor()) break; + } + kill(ppid, SIGTERM); + exit(0); + } + + sev_kvm_deinit(&kvm_with_access); + + close(kvm_dev); + close(sev_dev); +} + diff --git a/test/eviction.c b/test/eviction.c diff --git a/test/kvm.c b/test/kvm.c diff --git a/test/libkcapi b/test/libkcapi @@ -0,0 +1 @@ +Subproject commit c2af62dcc7a287f3c14f6aaec5724401c1ea470a diff --git a/test/sev-es.c b/test/sev-es.c diff --git a/test/sev.c b/test/sev.c diff --git a/test/sevstep.c b/test/sevstep.c @@ -446,13 +446,13 @@ monitor(void) ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event); if (!ret) { printf("Got page fault! %llu retired insts\n", - event.retired_instructions); + event.retinst); faultcnt++; printf("Acking event %llu\n", event.id); ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id); if (ret == -1) err(1, "ioctl ACK_EVENT"); - } else if (ret != CPC_USPT_POLL_EVENT_NO_EVENT) { + } else if (errno != EAGAIN) { perror("ioctl POLL_EVENT"); return 1; }