diff options
| -rwxr-xr-x[-rw-r--r--] | cachepc/asm.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/cachepc.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/cachepc.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/kvm.c | 28 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/kvm.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/mmu.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/sevstep.c | 31 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/sevstep.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/uapi.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/uspt.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | cachepc/uspt.h | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/access.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/eviction.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/kvm.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/sev-es.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/sev.c | 0 | ||||
| -rwxr-xr-x[-rw-r--r--] | test/sevstep.c | 0 |
17 files changed, 20 insertions, 39 deletions
diff --git a/cachepc/asm.h b/cachepc/asm.h index 9e9385a..9e9385a 100644..100755 --- a/cachepc/asm.h +++ b/cachepc/asm.h diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c index 3a64609..3a64609 100644..100755 --- a/cachepc/cachepc.c +++ b/cachepc/cachepc.c diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h index feda05e..feda05e 100644..100755 --- a/cachepc/cachepc.h +++ b/cachepc/cachepc.h diff --git a/cachepc/kvm.c b/cachepc/kvm.c index f478806..e02e0f0 100644..100755 --- a/cachepc/kvm.c +++ b/cachepc/kvm.c @@ -375,8 +375,6 @@ cachepc_kvm_stream_hwpf_test(void *p) arg = p; - /* TODO: improve detection */ - /* l2 data cache hit & miss */ cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); @@ -763,27 +761,23 @@ cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user) { batch_track_stop_and_get_t param; page_fault_event_t* buf; - uint64_t buf_bytes; + size_t buflen; void __user* inner_user_out_buf; int ret; if (!arg_user) return -EINVAL; if (copy_from_user(¶m, arg_user, sizeof(param))) { - pr_warn("KVM_CPC_BATCH_TRACK_STOP: " + pr_warn("CachePC: BATCH_TRACK_STOP: " "error copying arguments, exiting\n"); return -EFAULT; } inner_user_out_buf = param.out_buf; - buf_bytes = sizeof(page_fault_event_t)*param.len; - pr_warn("KVM_CPC_BATCH_TRACK_STOP: " - "allocating %llu bytes for tmp buf\n", buf_bytes); - - buf = vmalloc(buf_bytes); + buflen = sizeof(page_fault_event_t) * param.len; + buf = vmalloc(buflen); if (buf == NULL) { - pr_warn("KVM_CPC_BATCH_TRACK_STOP: " - "failed to alloc tmp buf\n"); + pr_warn("CachePC: BATCH_TRACK_STOP: OOM\n"); return -EFAULT; } param.out_buf = buf; @@ -791,21 +785,21 @@ cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user) ret = sevstep_uspt_batch_tracking_stop(buf, param.len, ¶m.error_during_batch); if (ret != 0) { - pr_warn("KVM_CPC_BATCH_TRACK_STOP: failed\n"); + pr_warn("CachePC: BATCH_TRACK_STOP: Error\n"); vfree(buf); return -EFAULT; } if (copy_to_user(arg_user, ¶m, sizeof(param))) { - pr_warn("KVM_CPC_BATCH_TRACK_STOP: " - "error copying result to user, exiting\n"); + pr_warn("CachePC: BATCH_TRACK_STOP: " + "Error copying result to user\n"); vfree(buf); return -EFAULT; } - if (copy_to_user(inner_user_out_buf, buf,buf_bytes)) { - pr_warn("KVM_CPC_BATCH_TRACK_STOP: " - "error copying result to user, exiting\n"); + if (copy_to_user(inner_user_out_buf, buf, buflen)) { + pr_warn("CachePC: BATCH_TRACK_STOP: " + "Error copying result to user\n"); vfree(buf); return -EFAULT; } diff --git a/cachepc/kvm.h b/cachepc/kvm.h index b843c1d..b843c1d 100644..100755 --- a/cachepc/kvm.h +++ b/cachepc/kvm.h diff --git a/cachepc/mmu.c b/cachepc/mmu.c index c6eac85..c6eac85 100644..100755 --- a/cachepc/mmu.c +++ b/cachepc/mmu.c diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c index f9b0ebe..619d5c0 100644..100755 --- a/cachepc/sevstep.c +++ b/cachepc/sevstep.c @@ -139,9 +139,6 @@ sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn) slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); if (slot != NULL) { write_lock(&vcpu->kvm->mmu_lock); - // Vincent: The kvm mmu function now requires min_level - // We want all pages to protected so we do PG_LEVEL_4K - // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/ sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_ACCESSED); write_unlock(&vcpu->kvm->mmu_lock); @@ -167,9 +164,6 @@ sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn) slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); if (slot != NULL) { write_lock(&vcpu->kvm->mmu_lock); - // Vincent: The kvm mmu function now requires min_level - // We want all pages to protected so we do PG_LEVEL_4K - // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/ sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC); write_unlock(&vcpu->kvm->mmu_lock); @@ -187,6 +181,7 @@ sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) { struct kvm_memory_slot *slot; struct kvm_memory_slot *first_memslot; + //struct kvm_memory_slot *second_memslot; struct rb_node *node; u64 iterator, iterat_max; long count = 0; @@ -194,15 +189,15 @@ sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) pr_warn("Sevstep: Start tracking %i\n", mode); - // Vincent: Memslots interface changed into a rb tree, see - // here: https://lwn.net/Articles/856392/ - // and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u - // Thus we use instead of - // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn - // + vcpu->kvm->memslots[0]->memslots[0].npages; node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree)); first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]); - iterat_max = first_memslot->base_gfn + first_memslot->npages; + //second_memslot = container_of(node, struct kvm_memory_slot, gfn_node[1]); + pr_warn("Sevstep: Total memslot pages %ld", vcpu->kvm->nr_memslot_pages); + //pr_warn("Sevstep: First memslot pages %ld base gfn 0x%llx", first_memslot->npages, //first_memslot->base_gfn); + //pr_warn("Sevstep: Second memslot pages %ld base gfn 0x%llx",second_memslot->npages, //second_memslot->base_gfn); + iterat_max = first_memslot->base_gfn + vcpu->kvm->nr_memslot_pages;//first_memslot->npages; + //VU: We retrieve the total nr of memslot pages directly from the kvm struct. + //VU: I think this should work, but only if my understanding of the memslots is correct pr_warn("Sevstep: Page count: %llu\n", iterat_max); for (iterator = 0; iterator < iterat_max; iterator++) { idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -233,20 +228,12 @@ sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) pr_warn("Sevstep: Stop tracking %i\n", mode); - // Vincent: Memslots interface changed into a rb tree, see - // here: https:// lwn.net/Articles/856392/ - // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u - // Thus we use instead of - // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn - // + vcpu->kvm->memslots[0]->memslots[0].npages; node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree)); first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]); - iterat_max = first_memslot->base_gfn + first_memslot->npages; + iterat_max = first_memslot->base_gfn + + vcpu->kvm->nr_memslot_pages;//first_memslot->npages; for (iterator = 0; iterator < iterat_max; iterator++) { idx = srcu_read_lock(&vcpu->kvm->srcu); slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator); - // Vincent: I think see here - // https:// patchwork.kernel.org/project/kvm/patch/20210924163152.289027-22-pbonzini@redhat.com/ if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) { write_lock(&vcpu->kvm->mmu_lock); kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode); diff --git a/cachepc/sevstep.h b/cachepc/sevstep.h index 839351f..839351f 100644..100755 --- a/cachepc/sevstep.h +++ b/cachepc/sevstep.h diff --git a/cachepc/uapi.h b/cachepc/uapi.h index abece89..abece89 100644..100755 --- a/cachepc/uapi.h +++ b/cachepc/uapi.h diff --git a/cachepc/uspt.c b/cachepc/uspt.c index 11a6fb9..11a6fb9 100644..100755 --- a/cachepc/uspt.c +++ b/cachepc/uspt.c diff --git a/cachepc/uspt.h b/cachepc/uspt.h index 9728983..9728983 100644..100755 --- a/cachepc/uspt.h +++ b/cachepc/uspt.h diff --git a/test/access.c b/test/access.c index dc704fb..dc704fb 100644..100755 --- a/test/access.c +++ b/test/access.c diff --git a/test/eviction.c b/test/eviction.c index 9ad6e56..9ad6e56 100644..100755 --- a/test/eviction.c +++ b/test/eviction.c diff --git a/test/kvm.c b/test/kvm.c index 9632fec..9632fec 100644..100755 --- a/test/kvm.c +++ b/test/kvm.c diff --git a/test/sev-es.c b/test/sev-es.c index bf534d9..bf534d9 100644..100755 --- a/test/sev-es.c +++ b/test/sev-es.c diff --git a/test/sev.c b/test/sev.c index 7d99fd1..7d99fd1 100644..100755 --- a/test/sev.c +++ b/test/sev.c diff --git a/test/sevstep.c b/test/sevstep.c index b2c3533..b2c3533 100644..100755 --- a/test/sevstep.c +++ b/test/sevstep.c |
