diff options
| -rw-r--r-- | cachepc/events.c | 2 | ||||
| -rw-r--r-- | cachepc/kvm.c | 16 | ||||
| -rw-r--r-- | cachepc/mmu.c | 28 | ||||
| -rw-r--r-- | cachepc/tracking.c (renamed from cachepc/sevstep.c) | 18 | ||||
| -rw-r--r-- | cachepc/tracking.h (renamed from cachepc/sevstep.h) | 16 |
5 files changed, 40 insertions, 40 deletions
diff --git a/cachepc/events.c b/cachepc/events.c index d3e1475..b06e0c0 100644 --- a/cachepc/events.c +++ b/cachepc/events.c @@ -1,5 +1,5 @@ #include "events.h" -#include "sevstep.h" +#include "tracking.h" #include "cachepc.h" #include "uapi.h" diff --git a/cachepc/kvm.c b/cachepc/kvm.c index c8ea494..04421f3 100644 --- a/cachepc/kvm.c +++ b/cachepc/kvm.c @@ -1,7 +1,7 @@ #include "kvm.h" #include "events.h" #include "cachepc.h" -#include "sevstep.h" +#include "tracking.h" #include "uapi.h" #include "svm/svm.h" @@ -510,8 +510,8 @@ cachepc_kvm_track_page_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_track_single(vcpu, cfg.gfn, cfg.mode)) { - printk("KVM_TRACK_PAGE: sevstep_track_single failed"); + if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode)) { + printk("KVM_TRACK_PAGE: cachepc_track_single failed"); return -EFAULT; } @@ -553,7 +553,7 @@ cachepc_kvm_track_all_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_track_all(vcpu, mode)) + if (!cachepc_track_all(vcpu, mode)) return -EFAULT; return 0; @@ -577,7 +577,7 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user) return -EINVAL; vcpu = xa_load(&main_vm->vcpu_array, 0); - if (!sevstep_untrack_all(vcpu, mode)) + if (!cachepc_untrack_all(vcpu, mode)) return -EFAULT; return 0; @@ -590,9 +590,9 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user) cachepc_events_reset(); vcpu = xa_load(&main_vm->vcpu_array, 0); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS); - sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS); + cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE); return 0; } diff --git a/cachepc/mmu.c b/cachepc/mmu.c index 454dff1..695c882 100644 --- a/cachepc/mmu.c +++ b/cachepc/mmu.c @@ -1,9 +1,9 @@ -#include "../cachepc/sevstep.h" +#include "../cachepc/tracking.h" #include "../cachepc/cachepc.h" #include "../cachepc/events.h" static void -sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, +cachepc_uspt_page_fault_handle(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { if (!kvm_slot_page_track_is_active(vcpu->kvm, @@ -15,7 +15,7 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, //pr_warn("Sevstep: Tracked page fault attrs %i %i %i\n", // fault->present, fault->write, fault->user); - sevstep_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); + cachepc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS) { if (cachepc_single_step && cachepc_inst_fault_avail) { @@ -45,20 +45,20 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu, /* TODO: skip if not exec */ /* TODO: calculate retired instructions (save and subtract global counter) */ if (cachepc_inst_fault_avail) { - sevstep_track_single(vcpu, cachepc_inst_fault_gfn, + cachepc_track_single(vcpu, cachepc_inst_fault_gfn, KVM_PAGE_TRACK_ACCESS); } cachepc_inst_fault_gfn = fault->gfn; cachepc_inst_fault_err = fault->error_code; cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0); } else if (cachepc_track_mode == CPC_TRACK_ACCESS) { - sevstep_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); + cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0); } } bool -sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) +cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) { u64 spte; bool flush; @@ -102,9 +102,9 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) return flush; } -EXPORT_SYMBOL(sevstep_spte_protect); +EXPORT_SYMBOL(cachepc_spte_protect); -bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, +bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, bool pt_protect, enum kvm_page_track_mode mode) { struct rmap_iterator iter; @@ -113,15 +113,15 @@ bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) { - flush |= sevstep_spte_protect(sptep, pt_protect, mode); + flush |= cachepc_spte_protect(sptep, pt_protect, mode); } return flush; } -EXPORT_SYMBOL(sevstep_rmap_protect); +EXPORT_SYMBOL(cachepc_rmap_protect); bool -sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, +cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, uint64_t gfn, int min_level, enum kvm_page_track_mode mode) { struct kvm_rmap_head *rmap_head; @@ -135,10 +135,10 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = gfn_to_rmap(gfn, i, slot); - protected |= sevstep_rmap_protect(rmap_head, true, mode); + protected |= cachepc_rmap_protect(rmap_head, true, mode); } } else if (is_tdp_mmu_enabled(kvm)) { - protected |= sevstep_tdp_protect_gfn(kvm, + protected |= cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level, mode); } else { pr_err("CachePC: Tracking unsupported!\n"); @@ -147,5 +147,5 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, return true; //return protected; } -EXPORT_SYMBOL(sevstep_kvm_mmu_slot_gfn_protect); +EXPORT_SYMBOL(cachepc_kvm_mmu_slot_gfn_protect); diff --git a/cachepc/sevstep.c b/cachepc/tracking.c index 0f5e011..a6b89e3 100644 --- a/cachepc/sevstep.c +++ b/cachepc/tracking.c @@ -1,4 +1,4 @@ -#include "sevstep.h" +#include "tracking.h" #include "cachepc.h" #include "mmu/mmu_internal.h" @@ -45,7 +45,7 @@ struct kvm* main_vm; EXPORT_SYMBOL(main_vm); bool -sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, +cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode) { struct kvm_memory_slot *slot; @@ -66,10 +66,10 @@ sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, return slot != NULL; } -EXPORT_SYMBOL(sevstep_track_single); +EXPORT_SYMBOL(cachepc_track_single); bool -sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, +cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode) { struct kvm_memory_slot *slot; @@ -90,10 +90,10 @@ sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, return slot != NULL; } -EXPORT_SYMBOL(sevstep_untrack_single); +EXPORT_SYMBOL(cachepc_untrack_single); long -sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) +cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) { struct kvm_memory_slot *slot; struct kvm_memslots *slots; @@ -118,10 +118,10 @@ sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) return count; } -EXPORT_SYMBOL(sevstep_track_all); +EXPORT_SYMBOL(cachepc_track_all); long -sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) +cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) { struct kvm_memory_slot *slot; struct kvm_memslots *slots; @@ -145,5 +145,5 @@ sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) return count; } -EXPORT_SYMBOL(sevstep_untrack_all); +EXPORT_SYMBOL(cachepc_untrack_all); diff --git a/cachepc/sevstep.h b/cachepc/tracking.h index 666198a..d38a205 100644 --- a/cachepc/sevstep.h +++ b/cachepc/tracking.h @@ -13,20 +13,20 @@ extern struct kvm* main_vm; /* defined in mmu.c as they rely on static mmu-internal functions */ -bool sevstep_spte_protect(u64 *sptep, +bool cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode); -bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head, +bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, bool pt_protect, enum kvm_page_track_mode mode); -bool sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, +bool cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, uint64_t gfn, int min_level, enum kvm_page_track_mode mode); -bool sevstep_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, +bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level, int mode); -bool sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, +bool cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode); -bool sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, +bool cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode); -long sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); -long sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); +long cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); +long cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); |
