diff options
| author | Louis Burda <quent.burda@gmail.com> | 2023-01-17 16:30:33 +0100 |
|---|---|---|
| committer | Louis Burda <quent.burda@gmail.com> | 2023-01-17 16:30:33 +0100 |
| commit | f2ea010b8180b4160d85c92e312971d0cd8a34d4 (patch) | |
| tree | 64e2598df2a0860187b1516e196ce7124de459a9 /test/kvm.c | |
| parent | 89785aa3c8d5d4007f856b14543a9b8aef31d661 (diff) | |
| download | cachepc-f2ea010b8180b4160d85c92e312971d0cd8a34d4.tar.gz cachepc-f2ea010b8180b4160d85c92e312971d0cd8a34d4.zip | |
Fixup kvm-eviction and refactor kvm-step
Diffstat (limited to 'test/kvm.c')
| -rw-r--r-- | test/kvm.c | 730 |
1 files changed, 501 insertions, 229 deletions
@@ -1,7 +1,10 @@ #define _GNU_SOURCE +#include "test/kvm.h" +#include "test/util.h" #include "cachepc/uapi.h" +#include <linux/psp-sev.h> #include <linux/kvm.h> #include <sys/syscall.h> #include <sys/ioctl.h> @@ -21,309 +24,578 @@ #include <sched.h> #include <string.h> #include <stdbool.h> -#include <stdlib.h> #include <stdint.h> #include <stdio.h> +#include <stdlib.h> -#define ARRLEN(x) (sizeof(x) / sizeof((x)[0])) -#define MIN(a,b) ((a) > (b) ? (b) : (a)) +int kvm_dev, sev_dev; + +const char *sev_fwerr_strs[] = { + [0x00] = "Success", + [0x01] = "Platform state is invalid", + [0x02] = "Guest state is invalid", + [0x03] = "Platform configuration is invalid", + [0x04] = "Buffer too small", + [0x05] = "Platform is already owned", + [0x06] = "Certificate is invalid", + [0x07] = "Request not allowed by policy", + [0x08] = "Guest is inactive", + [0x09] = "Invalid address", + [0x0A] = "Bad signature", + [0x0B] = "Bad measurement", + [0x0C] = "Asid is already owned", + [0x0D] = "Invalid ASID", + [0x0E] = "WBINVD is required", + [0x0F] = "DF_FLUSH is required", + [0x10] = "Guest handle is invalid", + [0x11] = "Invalid command", + [0x12] = "Guest is active", + [0x13] = "Hardware error", + [0x14] = "Hardware unsafe", + [0x15] = "Feature not supported", + [0x16] = "Invalid parameter", + [0x17] = "Out of resources", + [0x18] = "Integrity checks failed", + [0x19] = "RMP page size is incorrect", + [0x1A] = "RMP page state is incorrect", +}; -#define SAMPLE_COUNT 100 +const char *sev_gstate_strs[] = { + "UNINIT", + "LUPDATE", + "LSECRET", + "RUNNING", + "SUPDATE", + "RUPDATE", + "SEND" +}; -#define TARGET_CORE 2 -#define SECONDARY_CORE 3 +const char * +sev_fwerr_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_fwerr_strs)) + return "Unknown error"; -struct kvm { - int fd; - int vmfd; - int vcpufd; - void *mem; -}; + return sev_fwerr_strs[code]; +} -/* start and end for guest assembly */ -extern uint8_t __start_guest_with[]; -extern uint8_t __stop_guest_with[]; -extern uint8_t __start_guest_without[]; -extern uint8_t __stop_guest_without[]; +const char * +sev_gstate_str(int code) +{ + if (code < 0 || code >= ARRLEN(sev_gstate_strs)) + return "Unknown gstate"; -static struct kvm kvm; -static struct kvm_run *kvm_run; + return sev_gstate_strs[code]; +} -static int kvm_fd; +int +sev_ioctl(int vmfd, int cmd, void *data, int *error) +{ + struct kvm_sev_cmd input; + int ret; + + memset(&input, 0, sizeof(input)); + input.id = cmd; + input.sev_fd = sev_dev; + input.data = (uintptr_t) data; -#define TARGET_CACHE_LINESIZE 64 -#define TARGET_SET 15 + ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input); + if (error) *error = input.error; -__attribute__((section("guest_with"))) void -vm_guest_with(void) + return ret; +} + +void +sev_get_measure(int vmfd) { - while (1) { - asm volatile("mov (%[v]), %%bl" - : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET)); - asm volatile("out %%al, (%%dx)" : : ); - } + struct kvm_sev_launch_measure msrmt; + int ret, fwerr; + uint8_t *data; + + memset(&msrmt, 0, sizeof(msrmt)); + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret == -1 && fwerr != SEV_RET_INVALID_LEN) + errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + data = malloc(msrmt.len); + msrmt.uaddr = (uintptr_t) data; + + ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + free(data); } -__attribute__((section("guest_without"))) void -vm_guest_without(void) +uint8_t +sev_guest_state(int vmfd, uint32_t handle) { - while (1) { - asm volatile("out %%al, (%%dx)" : : ); - } + struct kvm_sev_guest_status status; + int ret, fwerr; + + status.handle = handle; + ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + return status.state; } -bool -pin_process(pid_t pid, int cpu, bool assert) +void +sev_dbg_decrypt(int vmfd, void *src, void *dst, size_t size) { - cpu_set_t cpuset; - int status; - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - status = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset); - if (status < 0) { - if (assert) err(1, "sched_setaffinity"); - return false; - } - - return true; + struct kvm_sev_dbg enc; + int ret, fwerr; + + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); } -int -read_stat_core(pid_t pid) +uint64_t +sev_dbg_decrypt_rip(int vmfd) { - char path[256]; - char line[2048]; - FILE *file; - char *p; - int i, cpu; + uint8_t vmsa[PAGE_SIZE]; + uint64_t rip; - snprintf(path, sizeof(path), "/proc/%u/stat", pid); - file = fopen(path, "r"); - if (!file) return -1; + memset(vmsa, 0, PAGE_SIZE); + sev_dbg_decrypt(vmfd, vmsa, CPC_VMSA_MAGIC_ADDR, PAGE_SIZE); - if (!fgets(line, sizeof(line), file)) - err(1, "read stat"); + rip = *(uint64_t *)(vmsa + 0x178); - p = line; - for (i = 0; i < 38 && (p = strchr(p, ' ')); i++) - p += 1; + return rip; +} - if (!p) errx(1, "stat format"); - cpu = atoi(p); +void +snp_dbg_decrypt(int vmfd, void *src, void *dst, size_t size) +{ + struct kvm_sev_dbg enc; + int ret, fwerr; - fclose(file); + assert(src == CPC_VMSA_MAGIC_ADDR); - return cpu; + memset(&enc, 0, sizeof(struct kvm_sev_dbg)); + enc.src_uaddr = (uintptr_t) src; + enc.dst_uaddr = (uintptr_t) dst; + enc.len = size; + + ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} + +uint64_t +snp_dbg_decrypt_rip(int vmfd) +{ + uint8_t vmsa[PAGE_SIZE]; + uint64_t rip; + + memset(vmsa, 0, PAGE_SIZE); + snp_dbg_decrypt(vmfd, CPC_VMSA_MAGIC_ADDR, vmsa, PAGE_SIZE); + + rip = *(uint64_t *)(vmsa + 0x178); + + return rip; } void -kvm_init(size_t ramsize, void *code_start, void *code_stop) +kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) { struct kvm_userspace_memory_region region; struct kvm_regs regs; struct kvm_sregs sregs; int ret; - kvm.fd = open("/dev/kvm", O_RDWR | O_CLOEXEC); - if (kvm.fd < 0) err(1, "/dev/kvm"); + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); - /* Make sure we have the stable version of the API */ - ret = ioctl(kvm.fd, KVM_GET_API_VERSION, NULL); - if (ret == -1) err(1, "KVM_GET_API_VERSION"); - if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret); - - kvm.vmfd = ioctl(kvm.fd, KVM_CREATE_VM, 0); - if (kvm.vmfd < 0) err(1, "KVM_CREATE_VM"); - - /* Allocate one aligned page of guest memory to hold the code. */ - kvm.mem = mmap(NULL, ramsize, PROT_READ | PROT_WRITE, + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); - if (!kvm.mem) err(1, "allocating guest memory"); - assert(code_stop - code_start <= ramsize); - memcpy(kvm.mem, code_start, code_stop - code_start); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); - /* Map it into vm memory */ + /* Map it into the vm */ memset(®ion, 0, sizeof(region)); region.slot = 0; - region.memory_size = ramsize; + region.memory_size = kvm->memsize; region.guest_phys_addr = 0x0000; - region.userspace_addr = (uint64_t) kvm.mem; - - ret = ioctl(kvm.vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); - if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION"); - - kvm.vcpufd = ioctl(kvm.vmfd, KVM_CREATE_VCPU, 0); - if (kvm.vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); + if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION"); - /* Map the shared kvm_run structure and following data. */ - ret = ioctl(kvm.fd, KVM_GET_VCPU_MMAP_SIZE, NULL); - if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + /* Create virtual cpu core */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE"); if (ret < sizeof(struct kvm_run)) errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); - kvm_run = mmap(NULL, ret, PROT_READ | PROT_WRITE, - MAP_SHARED, kvm.vcpufd, 0); - if (!kvm_run) err(1, "mmap vcpu"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); - /* Initialize CS to point at 0, via a read-modify-write of sregs. */ + /* Initialize segment regs */ memset(&sregs, 0, sizeof(sregs)); - ret = ioctl(kvm.vcpufd, KVM_GET_SREGS, &sregs); - if (ret < 0) err(1, "KVM_GET_SREGS"); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_GET_SREGS"); sregs.cs.base = 0; sregs.cs.selector = 0; - ret = ioctl(kvm.vcpufd, KVM_SET_SREGS, &sregs); - if (ret < 0) err(1, "KVM_SET_SREGS"); + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_SET_SREGS"); - /* Initialize registers: instruction pointer for our code, addends, and - * initial flags required by x86 architecture. */ + /* Initialize rest of registers */ memset(®s, 0, sizeof(regs)); - regs.rip = 0x0; - regs.rsp = ramsize - 1; - regs.rbp = ramsize - 1; - regs.rax = 0; - regs.rdx = 0; + regs.rip = 0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; regs.rflags = 0x2; - ret = ioctl(kvm.vcpufd, KVM_SET_REGS, ®s); - if (ret < 0) err(1, "KVM_SET_REGS"); + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s); + if (ret == -1) err(1, "KVM_SET_REGS"); } -cpc_msrmt_t * -read_counts() +void +sev_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) { - cpc_msrmt_t *counts; - int ret; + struct kvm_userspace_memory_region region; + struct kvm_sev_launch_update_data update; + struct kvm_sev_launch_start start; + struct kvm_regs regs; + struct kvm_sregs sregs; + int ret, fwerr; + + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); - counts = malloc(64 * sizeof(cpc_msrmt_t)); - if (!counts) err(1, "malloc"); - ret = ioctl(kvm_fd, KVM_CPC_READ_COUNTS, counts); - if (ret == -1) err(1, "ioctl READ_COUNTS"); + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); - return counts; + /* Map it into the vm */ + memset(®ion, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); + if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Enable SEV for vm */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_INIT, NULL, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_INIT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Create virtual cpu core */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + if (ret < sizeof(struct kvm_run)) + errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); + + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_SET_SREGS"); + + /* Initialize rest of registers */ + memset(®s, 0, sizeof(regs)); + regs.rip = 0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; + regs.rflags = 0x2; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s); + if (ret == -1) err(1, "KVM_SET_REGS"); + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.handle = 0; + start.policy = 0; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory (by encrypting it) */ + memset(&update, 0, sizeof(update)); + update.uaddr = (uintptr_t) kvm->mem; + update.len = ramsize; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Collect a measurement (necessary) */ + sev_get_measure(kvm->vmfd); + + /* Finalize launch process */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + ret = sev_guest_state(kvm->vmfd, start.handle); + if (ret != GSTATE_RUNNING) + errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); } void -print_counts(cpc_msrmt_t *counts) +sev_es_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) { - int i; - - for (i = 0; i < 64; i++) { - if (i % 16 == 0 && i) - printf("\n"); - if (counts[i] == 1) - printf("\x1b[38;5;88m"); - else if (counts[i] > 1) - printf("\x1b[38;5;196m"); - printf("%2i ", i); - if (counts[i] > 0) - printf("\x1b[0m"); - } - printf("\n Target Set Count: %d %llu \n", TARGET_SET, counts[TARGET_SET]); - printf("\n"); + struct kvm_userspace_memory_region region; + struct kvm_sev_launch_update_data update; + struct kvm_sev_launch_start start; + struct kvm_regs regs; + struct kvm_sregs sregs; + int ret, fwerr; + + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); + + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); + + /* Map it into the vm */ + memset(®ion, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); + if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Enable SEV for vm */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_ES_INIT, NULL, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_ES_INIT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Create virtual cpu core */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + if (ret < sizeof(struct kvm_run)) + errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); + + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_SET_SREGS"); + + /* Initialize rest of registers */ + memset(®s, 0, sizeof(regs)); + regs.rip = 0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; + regs.rflags = 0x2; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s); + if (ret == -1) err(1, "KVM_SET_REGS"); + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.handle = 0; + start.policy = 1 << 2; /* require SEV-ES */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory (by encrypting it) */ + memset(&update, 0, sizeof(update)); + update.uaddr = (uintptr_t) kvm->mem; + update.len = ramsize; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm save area */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Collect a measurement (necessary) */ + sev_get_measure(kvm->vmfd); + + /* Finalize launch process */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + ret = sev_guest_state(kvm->vmfd, start.handle); + if (ret != GSTATE_RUNNING) + errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); } -cpc_msrmt_t * -collect(const char *prefix, void *code_start, void *code_stop) +void +sev_snp_kvm_init(struct kvm *kvm, size_t ramsize, + void *code_start, void *code_stop) { + struct kvm_sev_snp_launch_update update; + struct kvm_sev_snp_launch_start start; + struct kvm_sev_snp_launch_finish finish; + struct kvm_snp_init init; + struct kvm_userspace_memory_region region; + struct kvm_enc_region enc_region; + struct kvm_sregs sregs; struct kvm_regs regs; - cpc_msrmt_t *counts; - int ret; - - /* using cache size for alignment of kvm memory access */ - kvm_init(64 * 64 * 8 * 2, code_start, code_stop); + int ret, fwerr; - ret = 0; - kvm_run->exit_reason = 0; + /* Create a kvm instance */ + kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0); + if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM"); - /* run vm twice, use count without initial stack setup */ - ret = ioctl(kvm.vcpufd, KVM_RUN, NULL); - ret = ioctl(kvm.vcpufd, KVM_RUN, NULL); - if (ret == -1) err(1, "KVM_RUN"); + /* Allocate guest memory */ + kvm->memsize = ramsize; + kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!kvm->mem) err(1, "Allocating guest memory"); + assert(code_stop - code_start <= kvm->memsize); + memcpy(kvm->mem, code_start, code_stop - code_start); - if (kvm_run->exit_reason == KVM_EXIT_MMIO || kvm_run->exit_reason == KVM_EXIT_HLT) { - memset(®s, 0, sizeof(regs)); - ret = ioctl(kvm.vcpufd, KVM_GET_REGS, ®s); - if (ret < 0) err(1, "KVM_GET_REGS"); - errx(1, "Victim access OOB: %llu %08llx => %02X\n", - kvm_run->mmio.phys_addr, regs.rip, - ((uint8_t *)kvm.mem)[regs.rip]); - } else if (kvm_run->exit_reason != KVM_EXIT_IO) { - errx(1, "KVM died: %i\n", kvm_run->exit_reason); - } + /* Map it into the vm */ + memset(®ion, 0, sizeof(region)); + region.slot = 0; + region.memory_size = kvm->memsize; + region.guest_phys_addr = 0; + region.userspace_addr = (uintptr_t) kvm->mem; + ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion); + if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION"); + + /* Enable SEV for vm */ + memset(&init, 0, sizeof(init)); + ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_INIT, &init, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_SNP_INIT: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Register memory region */ + memset(&enc_region, 0, sizeof(enc_region)); + enc_region.addr = (uintptr_t) kvm->mem; + enc_region.size = kvm->memsize; + ret = ioctl(kvm->vmfd, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region); + if (ret == -1) err(1, "KVM_MEMORY_ENCRYPT_REG_REGION"); + + /* Create virtual cpu */ + kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); + if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + + /* Map the shared kvm_run structure and following data */ + ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); + if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE"); + if (ret < sizeof(struct kvm_run)) + errx(1, "KVM_GET_VCPU_MMAP_SIZE too small"); + kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, + MAP_SHARED, kvm->vcpufd, 0); + if (!kvm->run) err(1, "mmap vcpu"); - counts = read_counts(); + /* Initialize segment regs */ + memset(&sregs, 0, sizeof(sregs)); + ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_GET_SREGS"); + sregs.cs.base = 0; + sregs.cs.selector = 0; + ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); + if (ret == -1) err(1, "KVM_SET_SREGS"); - close(kvm.fd); - close(kvm.vmfd); - close(kvm.vcpufd); + /* Initialize rest of registers */ + memset(®s, 0, sizeof(regs)); + regs.rip = 0; + regs.rsp = kvm->memsize - 8 - L1_LINESIZE * L1_SETS; + regs.rbp = kvm->memsize - 8 - L1_LINESIZE * L1_SETS; + ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s); + if (ret == -1) err(1, "KVM_SET_REGS"); + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.policy = 1 << 17; /* must be set */ + start.policy |= 1 << 19; /* allow debug */ + start.policy |= 1 << 16; /* allow simultaneous multi-threading */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_START, &start, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory */ + memset(&update, 0, sizeof(update)); + update.uaddr = (uintptr_t) kvm->mem; + update.len = ramsize; + update.start_gfn = 0; + update.page_type = KVM_SEV_SNP_PAGE_TYPE_NORMAL; + ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_UPDATE, &update, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_UPDATE: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Finalize launch process */ + memset(&finish, 0, sizeof(finish)); + ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_FINISH, &finish, &fwerr); + if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_FINISH: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); +} - return counts; +void +kvm_deinit(struct kvm *kvm) +{ + close(kvm->vmfd); + close(kvm->vcpufd); + munmap(kvm->mem, kvm->memsize); } -int -main(int argc, const char **argv) +void +kvm_setup_init(void) { - cpc_msrmt_t without_access[SAMPLE_COUNT][64]; - cpc_msrmt_t with_access[SAMPLE_COUNT][64]; - cpc_msrmt_t *counts, *baseline; - uint32_t arg, measure; - int i, k, ret; - - setvbuf(stdout, NULL, _IONBF, 0); - - pin_process(0, TARGET_CORE, true); - - kvm_fd = open("/dev/kvm", O_RDONLY); - if (kvm_fd < 0) err(1, "open"); - - /* init L1 miss counter for host kernel */ - arg = 0x002264D8; - ret = ioctl(kvm_fd, KVM_CPC_INIT_PMC, &arg); - if (ret == -1) err(1, "ioctl INIT_PMC"); - - baseline = calloc(sizeof(cpc_msrmt_t), 64); - if (!baseline) err(1, "calloc"); - - measure = true; - ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure); - if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); - - for (i = 0; i < SAMPLE_COUNT; i++) { - counts = collect("without", __start_guest_without, __stop_guest_without); - memcpy(without_access[i], counts, 64 * sizeof(cpc_msrmt_t)); - free(counts); - - counts = collect("with", __start_guest_with, __stop_guest_with); - memcpy(with_access[i], counts, 64 * sizeof(cpc_msrmt_t)); - free(counts); - } - - measure = false; - ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure); - if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); - - ret = ioctl(kvm_fd, KVM_CPC_READ_BASELINE, baseline); - if (ret == -1) err(1, "ioctl READ_BASELINE"); - - for (i = 0; i < SAMPLE_COUNT; i++) { - for (k = 0; k < 64; k++) { - with_access[i][k] -= baseline[k]; - without_access[i][k] -= baseline[k]; - } - - printf("Evictions with access:\n"); - print_counts(with_access[i]); - - printf("Evictions without access:\n"); - print_counts(without_access[i]); - } - - for (i = 0; i < SAMPLE_COUNT; i++) { - assert(with_access[i][TARGET_SET] > 0); - //assert(without_access[i][TARGET_SET] == 0); - } - - free(baseline); - close(kvm_fd); + int ret; + + kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC); + if (kvm_dev < 0) err(1, "open /dev/kvm"); + + sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC); + if (sev_dev < 0) err(1, "open /dev/sev"); + + /* ensure we have the stable version of the api */ + ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL); + if (ret == -1) err(1, "KVM_GET_API_VERSION"); + if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret); + } +void +kvm_setup_deinit(void) +{ + close(kvm_dev); + close(sev_dev); +} |
