summaryrefslogtreecommitdiffstats
path: root/sevstep/sevstep.c
diff options
context:
space:
mode:
Diffstat (limited to 'sevstep/sevstep.c')
-rw-r--r--sevstep/sevstep.c172
1 files changed, 86 insertions, 86 deletions
diff --git a/sevstep/sevstep.c b/sevstep/sevstep.c
index ffc6c7e..d603769 100644
--- a/sevstep/sevstep.c
+++ b/sevstep/sevstep.c
@@ -44,19 +44,8 @@
struct kvm* main_vm;
EXPORT_SYMBOL(main_vm);
-void
-sevstep_setup_pmcs(void)
-{
- /* retired instructions */
- cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER);
-
- /* l2 data cache hit & miss */
- cachepc_init_pmc(1, 0x64, 0x70, PMC_HOST, PMC_KERNEL);
-}
-EXPORT_SYMBOL(sevstep_setup_pmcs);
-
bool
-sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
int idx;
@@ -65,35 +54,41 @@ sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (mode == KVM_PAGE_TRACK_ACCESS) {
- //printk("Removing gfn: %016llx from acess page track pool\n", gfn);
+ pr_warn("Adding gfn: %016llx to access page track pool\n", gfn);
}
+
if (mode == KVM_PAGE_TRACK_WRITE) {
- //printk("Removing gfn: %016llx from write page track pool\n", gfn);
+ pr_warn("Adding gfn: %016llx to write page track pool\n", gfn);
}
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm,slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
+ kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
} else {
- printk("Failed to untrack %016llx because ", gfn);
+ pr_warn("Failed to track %016llx because ", gfn);
if (slot == NULL) {
printk(KERN_CONT "slot was null");
- } else if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page track was not active");
+ }
+ if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ printk(KERN_CONT "page is already tracked");
}
printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_untrack_single_page);
+EXPORT_SYMBOL(sevstep_track_single_page);
bool
-sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ enum kvm_page_track_mode mode)
{
int idx;
bool ret;
@@ -101,23 +96,38 @@ sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ if (mode == KVM_PAGE_TRACK_ACCESS) {
+ pr_warn("Removing gfn: %016llx from acess page track pool\n", gfn);
+ }
+ if (mode == KVM_PAGE_TRACK_WRITE) {
+ pr_warn("Removing gfn: %016llx from write page track pool\n", gfn);
+ }
+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if( slot != NULL ) {
+ if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
- //Vincent: The kvm mmu function now requires min_level
- //We want all pages to protected so we do PG_LEVEL_4K
- //https://patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
- sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm,slot,gfn,PG_LEVEL_4K,KVM_PAGE_TRACK_RESET_ACCESSED);
+ kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
+ } else {
+ pr_warn("Failed to untrack %016llx because ", gfn);
+ if (slot == NULL) {
+ printk(KERN_CONT "slot was null");
+ } else if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ printk(KERN_CONT "page track was not active");
+ }
+ printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_reset_accessed_on_page);
+EXPORT_SYMBOL(sevstep_untrack_single_page);
bool
-sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int idx;
bool ret;
@@ -125,25 +135,27 @@ sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if( slot != NULL ) {
+ if (slot != NULL) {
write_lock(&vcpu->kvm->mmu_lock);
- //Vincent: The kvm mmu function now requires min_level
- //We want all pages to protected so we do PG_LEVEL_4K
- //https://patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
+ // Vincent: The kvm mmu function now requires min_level
+ // We want all pages to protected so we do PG_LEVEL_4K
+ // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
- PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC);
+ PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_ACCESSED);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_clear_nx_on_page);
+EXPORT_SYMBOL(sevstep_reset_accessed_on_page);
bool
-sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- enum kvm_page_track_mode mode)
+sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int idx;
bool ret;
@@ -151,53 +163,41 @@ sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (mode == KVM_PAGE_TRACK_ACCESS) {
- //printk_ratelimited("Adding gfn: %016llx to acess page track pool\n", gfn);
- //printk("Adding gfn: %016llx to acess page track pool\n", gfn);
- }
- if (mode == KVM_PAGE_TRACK_WRITE) {
- //printk_ratelimited("Adding gfn: %016llx to write page track pool\n", gfn);
- }
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm,slot, gfn, mode)) {
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (slot != NULL) {
write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
+ // Vincent: The kvm mmu function now requires min_level
+ // We want all pages to protected so we do PG_LEVEL_4K
+ // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
+ sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
+ PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
-
- } else {
-
- printk("Failed to track %016llx because ", gfn);
- if (slot == NULL) {
- printk(KERN_CONT "slot was null");
- }
- if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page is already tracked");
- }
- printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_track_single_page);
+EXPORT_SYMBOL(sevstep_clear_nx_on_page);
long
sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
- long count = 0;
- u64 iterator, iterat_max;
struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *first_memslot;
+ struct rb_node *node;
+ u64 iterator, iterat_max;
+ long count = 0;
int idx;
- //Vincent: Memslots interface changed into a rb tree, see
- //here: https://lwn.net/Articles/856392/
- //and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
- //Thus we use instead of
- //iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
- // + vcpu->kvm->memslots[0]->memslots[0].npages;
- struct rb_node *node;
- struct kvm_memory_slot *first_memslot;
+ // Vincent: Memslots interface changed into a rb tree, see
+ // here: https:// lwn.net/Articles/856392/
+ // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
+ // Thus we use instead of
+ // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
+ // + vcpu->kvm->memslots[0]->memslots[0].npages;
node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
iterat_max = first_memslot->base_gfn + first_memslot->npages;
@@ -221,20 +221,19 @@ EXPORT_SYMBOL(sevstep_start_tracking);
long
sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
- long count = 0;
- u64 iterator, iterat_max;
struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *first_memslot;
+ struct rb_node *node;
+ u64 iterator, iterat_max;
+ long count = 0;
int idx;
-
- //Vincent: Memslots interface changed into a rb tree, see
- //here: https://lwn.net/Articles/856392/
- //and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
- //Thus we use instead of
- //iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
- // + vcpu->kvm->memslots[0]->memslots[0].npages;
- struct rb_node *node;
- struct kvm_memory_slot *first_memslot;
+ // Vincent: Memslots interface changed into a rb tree, see
+ // here: https:// lwn.net/Articles/856392/
+ // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
+ // Thus we use instead of
+ // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
+ // + vcpu->kvm->memslots[0]->memslots[0].npages;
node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
iterat_max = first_memslot->base_gfn + first_memslot->npages;
@@ -242,12 +241,13 @@ sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
idx = srcu_read_lock(&vcpu->kvm->srcu);
slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator);
- //Vincent: I think see here https://patchwork.kernel.org/project/kvm/patch/20210924163152.289027-22-pbonzini@redhat.com/
- if ( slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
- write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode);
- write_unlock(&vcpu->kvm->mmu_lock);
- count++;
+ // Vincent: I think see here
+ // https:// patchwork.kernel.org/project/kvm/patch/20210924163152.289027-22-pbonzini@redhat.com/
+ if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
+ write_lock(&vcpu->kvm->mmu_lock);
+ kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode);
+ write_unlock(&vcpu->kvm->mmu_lock);
+ count++;
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}