cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 6907b53a850bde2faaa9c2c9342501b775008721
parent 13ff10bd81a3a6d7e13c43596a2f0efb6be39088
Author: Louis Burda <quent.burda@gmail.com>
Date:   Sat, 12 Nov 2022 02:06:38 +0100

Rename sevstep to cachepc tracking

Diffstat:
Mcachepc/events.c | 2+-
Mcachepc/kvm.c | 16++++++++--------
Mcachepc/mmu.c | 28++++++++++++++--------------
Dcachepc/sevstep.c | 149-------------------------------------------------------------------------------
Dcachepc/sevstep.h | 32--------------------------------
Acachepc/tracking.c | 149+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Acachepc/tracking.h | 32++++++++++++++++++++++++++++++++
7 files changed, 204 insertions(+), 204 deletions(-)

diff --git a/cachepc/events.c b/cachepc/events.c @@ -1,5 +1,5 @@ #include "events.h" -#include "sevstep.h" +#include "tracking.h" #include "cachepc.h" #include "uapi.h" diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -1,7 +1,7 @@ #include "kvm.h" #include "events.h" #include "cachepc.h" -#include "sevstep.h" +#include "tracking.h" #include "uapi.h" #include "svm/svm.h" @@ -510,8 +510,8 @@ cachepc_kvm_track_page_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_track_single(vcpu, cfg.gfn, cfg.mode)) { - printk("KVM_TRACK_PAGE: sevstep_track_single failed"); + if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode)) { + printk("KVM_TRACK_PAGE: cachepc_track_single failed"); return -EFAULT; } @@ -553,7 +553,7 @@ cachepc_kvm_track_all_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_track_all(vcpu, mode)) + if (!cachepc_track_all(vcpu, mode)) return -EFAULT; return 0; @@ -577,7 +577,7 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_untrack_all(vcpu, mode)) + if (!cachepc_untrack_all(vcpu, mode)) return -EFAULT; return 0; @@ -590,9 +590,9 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user) cachepc_events_reset(); vcpu = xa_load(&main_vm->vcpu_array, 0); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE); return 0; } diff --git a/cachepc/mmu.c b/cachepc/mmu.c @@ -1,9 +1,9 @@ -#include "../cachepc/sevstep.h" +#include "../cachepc/tracking.h" #include "../cachepc/cachepc.h" #include "../cachepc/events.h" static void -sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, +cachepc_uspt_page_fault_handle(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { if (!kvm_slot_page_track_is_active(vcpu->kvm, @@ -15,7 +15,7 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, //pr_warn("Sevstep: Tracked page fault attrs %i %i %i\n", // fault->present, fault->write, fault->user); - sevstep_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); + cachepc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS) { if (cachepc_single_step && cachepc_inst_fault_avail) { @@ -45,20 +45,20 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, /* TODO: skip if not exec */ /* TODO: calculate retired instructions (save and subtract global counter) */ if (cachepc_inst_fault_avail) { - sevstep_track_single(vcpu, cachepc_inst_fault_gfn, + cachepc_track_single(vcpu, cachepc_inst_fault_gfn, KVM_PAGE_TRACK_ACCESS); } cachepc_inst_fault_gfn = fault->gfn; cachepc_inst_fault_err = fault->error_code; cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0); } else if (cachepc_track_mode == CPC_TRACK_ACCESS) { - sevstep_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); + cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0); } } bool -sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) +cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) { u64 spte; bool flush; @@ -102,9 +102,9 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) return flush; } -EXPORT_SYMBOL(sevstep_spte_protect); +EXPORT_SYMBOL(cachepc_spte_protect); -bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, +bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, bool pt_protect, enum kvm_page_track_mode mode) { struct rmap_iterator iter; @@ -113,15 +113,15 @@ bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) { - flush |= sevstep_spte_protect(sptep, pt_protect, mode); + flush |= cachepc_spte_protect(sptep, pt_protect, mode); } return flush; } -EXPORT_SYMBOL(sevstep_rmap_protect); +EXPORT_SYMBOL(cachepc_rmap_protect); bool -sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, +cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, uint64_t gfn, int min_level, enum kvm_page_track_mode mode) { struct kvm_rmap_head *rmap_head; @@ -135,10 +135,10 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = gfn_to_rmap(gfn, i, slot); - protected |= sevstep_rmap_protect(rmap_head, true, mode); + protected |= cachepc_rmap_protect(rmap_head, true, mode); } } else if (is_tdp_mmu_enabled(kvm)) { - protected |= sevstep_tdp_protect_gfn(kvm, + protected |= cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level, mode); } else { pr_err("CachePC: Tracking unsupported!\n"); @@ -147,5 +147,5 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, return true; //return protected; } -EXPORT_SYMBOL(sevstep_kvm_mmu_slot_gfn_protect); +EXPORT_SYMBOL(cachepc_kvm_mmu_slot_gfn_protect); diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c @@ -1,149 +0,0 @@ -#include "sevstep.h" -#include "cachepc.h" - -#include "mmu/mmu_internal.h" -#include "mmu.h" - -#include "irq.h" -#include "ioapic.h" -#include "mmu.h" -#include "mmu/tdp_mmu.h" -#include "x86.h" -#include "kvm_cache_regs.h" -#include "kvm_emulate.h" -#include "cpuid.h" -#include "mmu/spte.h" - -#include <linux/kvm_host.h> -#include <linux/types.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/highmem.h> -#include <linux/moduleparam.h> -#include <linux/export.h> -#include <linux/swap.h> -#include <linux/hugetlb.h> -#include <linux/compiler.h> -#include <linux/srcu.h> -#include <linux/slab.h> -#include <linux/sched/signal.h> -#include <linux/uaccess.h> -#include <linux/hash.h> -#include <linux/kern_levels.h> -#include <linux/kthread.h> -#include <linux/sev.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/sched.h> - -#include "kvm_cache_regs.h" -#include "svm/svm.h" - -struct kvm* main_vm; -EXPORT_SYMBOL(main_vm); - -bool -sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot; - int idx; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { - write_lock(&vcpu->kvm->mmu_lock); - kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode); - write_unlock(&vcpu->kvm->mmu_lock); - } - - srcu_read_unlock(&vcpu->kvm->srcu, idx); - - if (!slot) pr_err("Sevstep: Failed to track gfn %llu\n", gfn); - - return slot != NULL; -} -EXPORT_SYMBOL(sevstep_track_single); - -bool -sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot; - int idx; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { - write_lock(&vcpu->kvm->mmu_lock); - kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode); - write_unlock(&vcpu->kvm->mmu_lock); - } - - srcu_read_unlock(&vcpu->kvm->srcu, idx); - - if (!slot) pr_err("Sevstep: Failed to untrack gfn %llu\n", gfn); - - return slot != NULL; -} -EXPORT_SYMBOL(sevstep_untrack_single); - -long -sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot; - struct kvm_memslots *slots; - long count = 0; - int bkt; - u64 gfn; - - pr_warn("Sevstep: Start tracking (mode:%i)\n", mode); - - slots = kvm_vcpu_memslots(vcpu); - kvm_for_each_memslot(slot, bkt, slots) { - pr_warn("Sevstep: Slot page count: %lu\n", slot->npages); - for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) { - if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { - write_lock(&vcpu->kvm->mmu_lock); - kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode); - write_unlock(&vcpu->kvm->mmu_lock); - count++; - } - } - } - - return count; -} -EXPORT_SYMBOL(sevstep_track_all); - -long -sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) -{ - struct kvm_memory_slot *slot; - struct kvm_memslots *slots; - long count = 0; - int bkt; - u64 gfn; - - pr_warn("Sevstep: Stop tracking (mode:%i)\n", mode); - - slots = kvm_vcpu_memslots(vcpu); - kvm_for_each_memslot(slot, bkt, slots) { - for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) { - if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { - write_lock(&vcpu->kvm->mmu_lock); - kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode); - write_unlock(&vcpu->kvm->mmu_lock); - count++; - } - } - } - - return count; -} -EXPORT_SYMBOL(sevstep_untrack_all); - diff --git a/cachepc/sevstep.h b/cachepc/sevstep.h @@ -1,32 +0,0 @@ -#pragma once - -#include <linux/types.h> -#include <linux/spinlock_types.h> -#include <asm/atomic.h> -#include <linux/kvm_types.h> -#include <asm/kvm_page_track.h> - -#include <linux/kvm_host.h> -#include <linux/pid.h> -#include <linux/psp-sev.h> - -extern struct kvm* main_vm; - -/* defined in mmu.c as they rely on static mmu-internal functions */ -bool sevstep_spte_protect(u64 *sptep, - bool pt_protect, enum kvm_page_track_mode mode); -bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, - bool pt_protect, enum kvm_page_track_mode mode); -bool sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, - uint64_t gfn, int min_level, enum kvm_page_track_mode mode); - -bool sevstep_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, int min_level, int mode); - -bool sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode); -bool sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, - enum kvm_page_track_mode mode); - -long sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); -long sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); diff --git a/cachepc/tracking.c b/cachepc/tracking.c @@ -0,0 +1,149 @@ +#include "tracking.h" +#include "cachepc.h" + +#include "mmu/mmu_internal.h" +#include "mmu.h" + +#include "irq.h" +#include "ioapic.h" +#include "mmu.h" +#include "mmu/tdp_mmu.h" +#include "x86.h" +#include "kvm_cache_regs.h" +#include "kvm_emulate.h" +#include "cpuid.h" +#include "mmu/spte.h" + +#include <linux/kvm_host.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/highmem.h> +#include <linux/moduleparam.h> +#include <linux/export.h> +#include <linux/swap.h> +#include <linux/hugetlb.h> +#include <linux/compiler.h> +#include <linux/srcu.h> +#include <linux/slab.h> +#include <linux/sched/signal.h> +#include <linux/uaccess.h> +#include <linux/hash.h> +#include <linux/kern_levels.h> +#include <linux/kthread.h> +#include <linux/sev.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include "kvm_cache_regs.h" +#include "svm/svm.h" + +struct kvm* main_vm; +EXPORT_SYMBOL(main_vm); + +bool +cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { + write_lock(&vcpu->kvm->mmu_lock); + kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode); + write_unlock(&vcpu->kvm->mmu_lock); + } + + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + if (!slot) pr_err("Sevstep: Failed to track gfn %llu\n", gfn); + + return slot != NULL; +} +EXPORT_SYMBOL(cachepc_track_single); + +bool +cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { + write_lock(&vcpu->kvm->mmu_lock); + kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode); + write_unlock(&vcpu->kvm->mmu_lock); + } + + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + if (!slot) pr_err("Sevstep: Failed to untrack gfn %llu\n", gfn); + + return slot != NULL; +} +EXPORT_SYMBOL(cachepc_untrack_single); + +long +cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + long count = 0; + int bkt; + u64 gfn; + + pr_warn("Sevstep: Start tracking (mode:%i)\n", mode); + + slots = kvm_vcpu_memslots(vcpu); + kvm_for_each_memslot(slot, bkt, slots) { + pr_warn("Sevstep: Slot page count: %lu\n", slot->npages); + for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) { + if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { + write_lock(&vcpu->kvm->mmu_lock); + kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode); + write_unlock(&vcpu->kvm->mmu_lock); + count++; + } + } + } + + return count; +} +EXPORT_SYMBOL(cachepc_track_all); + +long +cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + long count = 0; + int bkt; + u64 gfn; + + pr_warn("Sevstep: Stop tracking (mode:%i)\n", mode); + + slots = kvm_vcpu_memslots(vcpu); + kvm_for_each_memslot(slot, bkt, slots) { + for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) { + if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) { + write_lock(&vcpu->kvm->mmu_lock); + kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode); + write_unlock(&vcpu->kvm->mmu_lock); + count++; + } + } + } + + return count; +} +EXPORT_SYMBOL(cachepc_untrack_all); + diff --git a/cachepc/tracking.h b/cachepc/tracking.h @@ -0,0 +1,32 @@ +#pragma once + +#include <linux/types.h> +#include <linux/spinlock_types.h> +#include <asm/atomic.h> +#include <linux/kvm_types.h> +#include <asm/kvm_page_track.h> + +#include <linux/kvm_host.h> +#include <linux/pid.h> +#include <linux/psp-sev.h> + +extern struct kvm* main_vm; + +/* defined in mmu.c as they rely on static mmu-internal functions */ +bool cachepc_spte_protect(u64 *sptep, + bool pt_protect, enum kvm_page_track_mode mode); +bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, + bool pt_protect, enum kvm_page_track_mode mode); +bool cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, + uint64_t gfn, int min_level, enum kvm_page_track_mode mode); + +bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, int min_level, int mode); + +bool cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode); +bool cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode); + +long cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); +long cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);