commit d4ac8d64359fcaf25a65196c83ba0a091f645a3b
parent 49c88d32d25d4eb39ad6452cfba2ca93d60e1b81
Author: Louis Burda <quent.burda@gmail.com>
Date: Fri, 4 Nov 2022 01:16:50 +0100
Fixed page tracking and somewhat working single step probe
Diffstat:
9 files changed, 241 insertions(+), 54 deletions(-)
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -267,9 +267,6 @@ cachepc_kvm_system_setup(void)
val |= 1 << 13;
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
-
- /* enable local apic */
-
}
int
@@ -568,7 +565,7 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user)
int
cachepc_kvm_poll_event_ioctl(void __user *arg_user)
{
- if (!sevstep_uspt_is_initialiized())
+ if (!sevstep_uspt_is_initialized())
return -EINVAL;
return sevstep_uspt_handle_poll_event(arg_user);
@@ -581,7 +578,7 @@ cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
- if (!sevstep_uspt_is_initialiized())
+ if (!sevstep_uspt_is_initialized())
return -EINVAL;
if (copy_from_user(&eventid, arg_user, sizeof(eventid)))
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -12,7 +12,8 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
KVM_PAGE_TRACK_EXEC
};
bool was_tracked;
- int err, i;
+ int err;
+ int i;
pr_warn("CachePCTest: Page fault %llu\n", fault->gfn);
@@ -23,6 +24,8 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
pr_warn("CachePCTest: Page attrs %i %i %i\n",
fault->present, fault->write, fault->user);
sevstep_untrack_single(vcpu, fault->gfn, modes[i]);
+ if (!cachepc_track_single_step)
+ sevstep_track_single(vcpu, fault->gfn, modes[i]);
was_tracked = true;
}
}
@@ -47,6 +50,8 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
u64 spte;
bool flush;
+ pr_warn("Sevstep: spte_protect\n");
+
spte = *sptep;
if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte)))
return false;
@@ -54,7 +59,7 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
rmap_printk("spte %p %llx\n", sptep, *sptep);
if (pt_protect)
- spte &= ~EPT_SPTE_MMU_WRITABLE;
+ spte &= ~shadow_mmu_writable_mask;
flush = false;
if (mode == KVM_PAGE_TRACK_WRITE) {
@@ -80,6 +85,8 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
}
flush |= mmu_spte_update(sptep, spte);
+ pr_warn("Sevstep: spte_protect flush:%i\n", flush);
+
return flush;
}
EXPORT_SYMBOL(sevstep_spte_protect);
@@ -87,10 +94,11 @@ EXPORT_SYMBOL(sevstep_spte_protect);
bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
bool pt_protect, enum kvm_page_track_mode mode)
{
- u64 *sptep;
struct rmap_iterator iter;
- bool flush = false;
+ bool flush;
+ u64 *sptep;
+ flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep) {
flush |= sevstep_spte_protect(sptep, pt_protect, mode);
}
@@ -109,19 +117,21 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
protected = false;
+ pr_warn("Sevstep: mmu_slot_gfn_protect gfn:%llu\n", gfn);
+
if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = gfn_to_rmap(gfn, i, slot);
protected |= sevstep_rmap_protect(rmap_head, true, mode);
}
+ } else if (is_tdp_mmu_enabled(kvm)) {
+ protected |= sevstep_tdp_protect_gfn(kvm,
+ slot, gfn, min_level, mode);
+ } else {
+ pr_warn("CachePC: Tracking unsupported!\n");
}
- if (is_tdp_mmu_enabled(kvm)) {
- protected |= kvm_tdp_mmu_write_protect_gfn(kvm,
- slot, gfn, min_level);
- }
-
- return protected;
+ return true;
}
EXPORT_SYMBOL(sevstep_kvm_mmu_slot_gfn_protect);
diff --git a/cachepc/sevstep.h b/cachepc/sevstep.h
@@ -20,6 +20,9 @@ bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
bool sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
uint64_t gfn, int min_level, enum kvm_page_track_mode mode);
+bool sevstep_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, int min_level, int mode);
+
bool sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
bool sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -25,7 +25,7 @@ sevstep_uspt_clear(void)
}
bool
-sevstep_uspt_is_initialiized()
+sevstep_uspt_is_initialized()
{
return uspt_init;
}
@@ -37,7 +37,7 @@ sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code)
ktime_t deadline;
read_lock(&event_lock);
- if (!sevstep_uspt_is_initialiized()) {
+ if (!sevstep_uspt_is_initialized()) {
pr_warn("Sevstep: uspt_send_and_block: ctx not initialized!\n");
read_unlock(&event_lock);
return 1;
@@ -126,7 +126,7 @@ sevstep_uspt_handle_ack_event_ioctl(uint64_t eventid)
last_acked_eventid = last_sent_eventid;
} else {
err = 1;
- pr_warn("Sevstep: ack'd event does not match sent: %llu %llu\n",
+ pr_warn("Sevstep: Ackd event does not match sent: %llu %llu\n",
last_sent_eventid, eventid);
}
write_unlock(&event_lock);
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
@@ -15,7 +15,7 @@ extern bool have_event;
extern bool uspt_init;
-bool sevstep_uspt_is_initialiized(void);
+bool sevstep_uspt_is_initialized(void);
void sevstep_uspt_clear(void);
int sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code);
diff --git a/notes b/notes
@@ -0,0 +1,26 @@
+Observations:
+- some times get extra faults with race condition.. think there is
+ a race cond with track_page and something is untracking the pages again
+- on
+
+Questions:
+- test/sevstep: why 0 then 15, arent both accesses in the first page?
+ => first code page and stack access
+- test/sevstep: why does it seem to work with event race but not without?
+ => it doesnt (anymore)
+
+Tried:
+- try invalidating tlbs (no effect, prob was already)
+
+Next steps:
+- enable single-steping with multiple page faults (only one ends up in
+ last_fault_gfn and others stay untracked)
+- try adjusting timer for single stepping
+- Try to setup the non-baseline step without apic, but remapping page
+ such that we see the relevant page faults and the gfn increment
+- need to not retrack, allow the page to get remapped
+ (return false from sevstep_uspt_track.. caller) and enable single_step,
+ THEN when interrupt hits retrack the page
+- test/sevstep: implement counter.. read program memory to see
+ how many instructions were executed on apic timer
+- add warning for high values in baseline
diff --git a/patch.diff b/patch.diff
@@ -59,7 +59,7 @@ index 000000000000..9119e44af1f0
+/home/louis/kvm-prime-count/cachepc
\ No newline at end of file
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
-index d871b8dee7b3..3b7720aebbc6 100644
+index d871b8dee7b3..bfeab994420e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1152,6 +1152,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
@@ -136,18 +136,19 @@ index d871b8dee7b3..3b7720aebbc6 100644
}
static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
-@@ -3901,6 +3870,10 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+@@ -3901,18 +3870,25 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
+- if (unlikely(fault->rsvd))
+- return false;
+ int active;
-+
+
+- if (!fault->present || !fault->write)
+- return false;
+ sevstep_uspt_page_fault_handle(vcpu, fault);
-+
- if (unlikely(fault->rsvd))
- return false;
-@@ -3911,8 +3884,11 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+ /*
* guest is writing the page which is write tracked which can
* not be fixed by page fault handler.
*/
@@ -158,11 +159,17 @@ index d871b8dee7b3..3b7720aebbc6 100644
+ active |= kvm_slot_page_track_is_active(vcpu->kvm,
+ fault->slot, fault->gfn, KVM_PAGE_TRACK_ACCESS);
+ if (active) return true;
++
++ if (unlikely(fault->rsvd))
++ return false;
++
++ if (!fault->present || !fault->write)
++ return false;
return false;
}
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
-index 2e09d1b6249f..9b40e71564bf 100644
+index 2e09d1b6249f..b139ea33b0e1 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -19,6 +19,8 @@
@@ -174,7 +181,24 @@ index 2e09d1b6249f..9b40e71564bf 100644
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
{
return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
-@@ -131,9 +133,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+@@ -115,7 +117,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+ {
+-
+ if (WARN_ON(!page_track_mode_is_valid(mode)))
+ return;
+
+@@ -123,6 +124,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+ !kvm_page_track_write_tracking_enabled(kvm)))
+ return;
+
++ pr_warn("CachePCTest: Tracking page: %llu\n", gfn);
++
+ update_gfn_track(slot, gfn, mode, 1);
+
+ /*
+@@ -131,9 +134,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
*/
kvm_mmu_gfn_disallow_lpage(slot, gfn);
@@ -183,26 +207,147 @@ index 2e09d1b6249f..9b40e71564bf 100644
- kvm_flush_remote_tlbs(kvm);
+ if (sevstep_kvm_mmu_slot_gfn_protect(kvm,
+ slot, gfn, PG_LEVEL_4K, mode)) {
++ pr_warn("CachePCTest: Flushing kvm TLBs\n");
+ kvm_flush_remote_tlbs(kvm);
+ }
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 7b9265d67131..749bbb2930f3 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1810,13 +1810,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ zap_collapsible_spte_range(kvm, root, slot);
+ }
+
+-/*
+- * Removes write access on the last level SPTE mapping this GFN and unsets the
+- * MMU-writable bit to ensure future writes continue to be intercepted.
+- * Returns true if an SPTE was set and a TLB flush is needed.
+- */
+-static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
+- gfn_t gfn, int min_level)
++static bool sevstep_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
++ gfn_t gfn, int min_level, int mode)
+ {
+ struct tdp_iter iter;
+ u64 new_spte;
+@@ -1831,8 +1826,14 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
+- new_spte = iter.old_spte &
+- ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
++ pr_warn("Sevstep: tdp_protect_gfn\n");
++ new_spte = iter.old_spte & ~shadow_mmu_writable_mask;
++ new_spte &= ~PT_WRITABLE_MASK;
++ if (mode == KVM_PAGE_TRACK_ACCESS) {
++ new_spte &= ~PT_PRESENT_MASK;
++ new_spte &= ~PT_USER_MASK;
++ new_spte |= (0x1ULL << PT64_NX_SHIFT);
++ }
+
+ if (new_spte == iter.old_spte)
+ break;
+@@ -1846,6 +1847,58 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
+ return spte_set;
+ }
+
++bool sevstep_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
++ gfn_t gfn, int min_level, int mode)
++{
++ struct kvm_mmu_page *root;
++ bool spte_set = false;
++
++ pr_warn("Sevstep: tdp_protect_gfn\n");
++
++ lockdep_assert_held_write(&kvm->mmu_lock);
++ for_each_tdp_mmu_root(kvm, root, slot->as_id)
++ spte_set |= sevstep_protect_gfn(kvm, root, gfn, min_level, mode);
++
++ return spte_set;
++}
++EXPORT_SYMBOL(sevstep_tdp_protect_gfn);
++
++/*
++ * Removes write access on the last level SPTE mapping this GFN and unsets the
++ * MMU-writable bit to ensure future writes continue to be intercepted.
++ * Returns true if an SPTE was set and a TLB flush is needed.
++ */
++// static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
++// gfn_t gfn, int min_level)
++// {
++// struct tdp_iter iter;
++// u64 new_spte;
++// bool spte_set = false;
++//
++// BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
++//
++// rcu_read_lock();
++//
++// for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
++// if (!is_shadow_present_pte(iter.old_spte) ||
++// !is_last_spte(iter.old_spte, iter.level))
++// continue;
++//
++// new_spte = iter.old_spte & ~shadow_mmu_writable_mask;
++// new_spte &= ~PT_WRITABLE_MASK;
++//
++// if (new_spte == iter.old_spte)
++// break;
++//
++// tdp_mmu_set_spte(kvm, &iter, new_spte);
++// spte_set = true;
++// }
++//
++// rcu_read_unlock();
++//
++// return spte_set;
++// }
++
+ /*
+ * Removes write access on the last level SPTE mapping this GFN and unsets the
+ * MMU-writable bit to ensure future writes continue to be intercepted.
+@@ -1855,14 +1908,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level)
+ {
+- struct kvm_mmu_page *root;
+- bool spte_set = false;
++ return sevstep_tdp_protect_gfn(kvm, slot, gfn, min_level,
++ KVM_PAGE_TRACK_WRITE);
++ // struct kvm_mmu_page *root;
++ // bool spte_set = false;
+
+- lockdep_assert_held_write(&kvm->mmu_lock);
+- for_each_tdp_mmu_root(kvm, root, slot->as_id)
+- spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
++ // lockdep_assert_held_write(&kvm->mmu_lock);
++ // for_each_tdp_mmu_root(kvm, root, slot->as_id)
++ // spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
+
+- return spte_set;
++ // return spte_set;
+ }
+
+ /*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index cf0bf456d520..1e1667dc8f96 100644
+index cf0bf456d520..c179012ab268 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
-@@ -2,6 +2,9 @@
+@@ -2,6 +2,10 @@
#include <linux/kvm_host.h>
+#include "cachepc/cachepc.h"
+#include "cachepc/uspt.h"
++#include "cachepc/sevstep.h"
+
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -2083,6 +2086,17 @@ static int smi_interception(struct kvm_vcpu *vcpu)
+@@ -2083,6 +2087,21 @@ static int smi_interception(struct kvm_vcpu *vcpu)
static int intr_interception(struct kvm_vcpu *vcpu)
{
@@ -215,12 +360,16 @@ index cf0bf456d520..1e1667dc8f96 100644
+ err = sevstep_uspt_send_and_block(cachepc_last_fault_gfn,
+ cachepc_last_fault_err);
+ if (err) pr_warn("Sevstep: uspt_send_and_block failed (%d)\n", err);
++
++ if (!sevstep_track_single(vcpu, cachepc_last_fault_gfn,
++ KVM_PAGE_TRACK_ACCESS))
++ pr_warn("Sevstep: Failed to retrack page afer single step\n");
+ }
+
++vcpu->stat.irq_exits;
return 1;
}
-@@ -3788,14 +3802,42 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3788,14 +3807,42 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
@@ -240,7 +389,7 @@ index cf0bf456d520..1e1667dc8f96 100644
+ cachepc_reset_pmc(CPC_RETINST_PMC);
+
+ if (cachepc_single_step)
-+ cachepc_apic_oneshot(10);
++ cachepc_apic_oneshot(150);
__svm_sev_es_vcpu_run(vmcb_pa);
+ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+
@@ -263,7 +412,7 @@ index cf0bf456d520..1e1667dc8f96 100644
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
-@@ -3803,10 +3845,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3803,10 +3850,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
* vmcb02 when switching vmcbs for nested virtualization.
*/
vmload(svm->vmcb01.pa);
diff --git a/test/eviction.c b/test/eviction.c
@@ -12,7 +12,7 @@
int
main(int argc, const char **argv)
{
- uint16_t counts[64];
+ cpc_msrmt_t counts[L1_SETS];
uint32_t arg;
int i, fd, ret;
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -30,12 +30,11 @@
#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
#define MIN(a,b) ((a) > (b) ? (b) : (a))
-#define SAMPLE_COUNT 100
+#define SAMPLE_COUNT 20
#define TARGET_CORE 2
#define SECONDARY_CORE 3
-#define TARGET_CACHE_LINESIZE 64
#define TARGET_SET1 14
#define TARGET_SET2 15
@@ -120,9 +119,9 @@ vm_guest_with(void)
{
while (1) {
asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET1));
+ : : [v] "r" (L1_LINESIZE * (L1_SETS + TARGET_SET1)));
asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET2));
+ : : [v] "r" (L1_LINESIZE * (L1_SETS * 2 + TARGET_SET2)));
}
}
@@ -441,7 +440,6 @@ svm_dbg_rip(struct kvm *kvm)
int
monitor(struct kvm *kvm)
{
- struct cpc_track_config cfg;
struct cpc_track_event event;
cpc_msrmt_t counts[64];
int ret;
@@ -458,12 +456,6 @@ monitor(struct kvm *kvm)
if (ret == -1) err(1, "ioctl READ_COUNTS");
print_counts(counts);
- /* retrack page */
- cfg.gfn = event.fault_gfn;
- cfg.mode = KVM_PAGE_TRACK_ACCESS;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_PAGE, &cfg);
- if (ret == -1) err(1, "ioctl TRACK_PAGE");
-
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
} else if (errno != EAGAIN) {
@@ -481,6 +473,7 @@ main(int argc, const char **argv)
uint64_t track_mode;
pid_t ppid, pid;
uint32_t arg;
+ struct cpc_track_event event;
cpc_msrmt_t baseline[64];
int ret;
@@ -503,7 +496,7 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL);
if (ret < 0) err(1, "ioctl SETUP_PMC");
- sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2,
+ sev_kvm_init(&kvm_with_access, L1_SIZE * 2,
__start_guest_with, __stop_guest_with);
/* Page tracking init needs to happen after kvm
@@ -530,37 +523,46 @@ main(int argc, const char **argv)
pin_process(0, SECONDARY_CORE, true);
printf("PINNED\n");
- printf("Doing baseline measurement..\n");
-
arg = true;
ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
faultcnt = 0;
- while (faultcnt < 20) {
- if (monitor(&kvm_with_access)) break;
+ while (faultcnt < SAMPLE_COUNT) {
+ if (monitor(&kvm_with_access)) break;
}
+ do {
+ ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
+ if (ret == -1 && errno != EAGAIN)
+ err(1, "ioctl POLL_EVENT");
+ } while (ret == -1 && errno == EAGAIN);
+
arg = false;
ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline);
if (ret == -1) err(1, "ioctl READ_BASELINE");
+
printf("\n>>> BASELINE:\n");
print_counts(baseline);
+ printf("\n");
arg = true;
ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
if (ret == -1) err(1, "ioctl SUB_BASELINE");
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
+ if (ret == -1) err(1, "ioctl ACK_EVENT");
+
arg = true;
ret = ioctl(kvm_dev, KVM_CPC_TRACK_SINGLE_STEP, &arg);
- if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
-
+ if (ret == -1) err(1, "ioctl TRACK_SINGLE_STEP");
+
faultcnt = 0;
while (faultcnt < SAMPLE_COUNT) {
- if (monitor(&kvm_with_access)) break;
+ if (monitor(&kvm_with_access)) break;
}
kill(ppid, SIGTERM);