commit 0bdf830ba840ce53cb1362acbf71965cd80e9397
parent d16af2b7f9b0026e2776237dd0ea673d88531e7d
Author: Louis Burda <quent.burda@gmail.com>
Date: Fri, 25 Nov 2022 21:50:43 +0100
Implement VMSA change based single stepping and guest vmmcall intercept among other things
Diffstat:
17 files changed, 372 insertions(+), 143 deletions(-)
diff --git a/Makefile b/Makefile
@@ -4,7 +4,7 @@ PWD := $(shell pwd)
TARGETS = build test/eviction test/access test/kvm test/sev test/sev-es test/sevstep
TARGETS += test/aes-detect_guest test/aes-detect_host
TARGETS += test/access-detect_guest test/access-detect_host
-TARGETS += test/readsvme
+TARGETS += test/readsvme test/debug
CFLAGS = -I . -I test -Wunused-variable -Wunknown-pragmas
@@ -34,7 +34,7 @@ freq:
update:
git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff
-test/aes-detect_%: test/aes-detect_%.c test/aes-detect.c
+test/aes-detect_%: test/aes-detect_%.c test/aes-detect.c cachepc/uapi.h
clang -o $@ $< $(CFLAGS) -I test/libkcapi/lib -L test/libkcapi/.libs -lkcapi -static
test/%: test/%.c cachepc/uapi.h
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -37,7 +37,7 @@ cachepc_verify_topology(void)
uint32_t sets;
if (PAGE_SIZE != L1_SETS * L1_LINESIZE)
- pr_warn("Cachepc: System pagesize does not guarentee "
+ CPC_ERR("System pagesize does not guarentee "
"virtual memory access will hit corresponding "
"physical cacheline, PAGE_SIZE != L1_SETS * L1_LINESIZE\n");
@@ -51,14 +51,14 @@ cachepc_verify_topology(void)
sets = size / (linesize * assoc);
if (size != L1_SIZE || assoc != L1_ASSOC
|| linesize != L1_LINESIZE || sets != L1_SETS) {
- pr_warn("CachePC: L1 topology is invalid!\n");
- pr_warn("CachePC: L1_SIZE (expected) %u vs. (real) %u\n",
+ CPC_ERR("L1 topology is invalid!\n");
+ CPC_ERR("L1_SIZE (expected) %u vs. (real) %u\n",
L1_SIZE, size);
- pr_warn("CachePC: L1_ASSOC (expected) %u vs. (real) %u\n",
+ CPC_ERR("L1_ASSOC (expected) %u vs. (real) %u\n",
L1_ASSOC, assoc);
- pr_warn("CachePC: L1_LINESIZE (expected) %u vs. (real) %u\n",
+ CPC_ERR("L1_LINESIZE (expected) %u vs. (real) %u\n",
L1_LINESIZE, linesize);
- pr_warn("CachePC: L1_SETS (expected) %u vs. (real) %u\n",
+ CPC_ERR("L1_SETS (expected) %u vs. (real) %u\n",
L1_SETS, sets);
return true;
}
@@ -97,20 +97,20 @@ cachepc_verify_topology(void)
assoc = size / linesize;
break;
default:
- pr_warn("CachePC: Read invalid L2 associativity: %i\n", assoc);
+ CPC_ERR("Read invalid L2 associativity: %i\n", assoc);
return true;
}
sets = size / (linesize * assoc);
if (size != L2_SIZE || assoc != L2_ASSOC
|| linesize != L2_LINESIZE || sets != L2_SETS) {
- pr_warn("CachePC: L2 topology is invalid!\n");
- pr_warn("CachePC: L2_SIZE (expected) %u vs. (real) %u\n",
+ CPC_ERR("L2 topology is invalid!\n");
+ CPC_ERR("L2_SIZE (expected) %u vs. (real) %u\n",
L2_SIZE, size);
- pr_warn("CachePC: L2_ASSOC (expected) %u vs. (real) %u\n",
+ CPC_ERR("L2_ASSOC (expected) %u vs. (real) %u\n",
L2_ASSOC, assoc);
- pr_warn("CachePC: L2_LINESIZE (expected) %u vs. (real) %u\n",
+ CPC_ERR("L2_LINESIZE (expected) %u vs. (real) %u\n",
L2_LINESIZE, linesize);
- pr_warn("CachePC: L2_SETS (expected) %u vs. (real) %u\n",
+ CPC_ERR("L2_SETS (expected) %u vs. (real) %u\n",
L2_SETS, sets);
return true;
}
@@ -293,7 +293,7 @@ cachepc_print_msrmts(cacheline *head)
curr_cl = head;
do {
if (CL_IS_FIRST(curr_cl->flags)) {
- printk(KERN_WARNING "CachePC: Count for cache set %i: %llu\n",
+ CPC_INFO("Count for cache set %i: %llu\n",
curr_cl->cache_set, curr_cl->count);
}
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -43,6 +43,12 @@
#define PMC_HOST (1 << 1)
#define PMC_GUEST (1 << 0)
+#define CPC_DBG(...) do { \
+ if (cachepc_debug) pr_info("CachePC: " __VA_ARGS__); } while (0)
+#define CPC_INFO(...) do { pr_info("CachePC: " __VA_ARGS__); } while (0)
+#define CPC_WARN(...) do { pr_warn("CachePC: " __VA_ARGS__); } while (0)
+#define CPC_ERR(...) do { pr_err("CachePC: " __VA_ARGS__); } while (0)
+
typedef struct cacheline cacheline;
typedef struct cache_ctx cache_ctx;
@@ -117,6 +123,8 @@ static inline uint64_t cachepc_read_pmc(uint64_t event);
__attribute__((always_inline))
static inline void cachepc_apic_oneshot(uint32_t interval);
+extern bool cachepc_debug;
+
extern cpc_msrmt_t *cachepc_msrmts;
extern size_t cachepc_msrmts_count;
@@ -129,6 +137,7 @@ extern uint64_t cachepc_retinst;
extern bool cachepc_single_step;
extern uint32_t cachepc_track_mode;
extern uint32_t cachepc_apic_timer;
+extern uint64_t cachepc_prev_rip;
extern uint32_t cachepc_track_state;
extern uint32_t cachepc_track_state_next;
diff --git a/cachepc/event.c b/cachepc/event.c
@@ -52,7 +52,7 @@ cachepc_send_event(struct cpc_event event)
write_unlock(&cachepc_event_lock);
/* wait for ack with timeout */
- deadline = ktime_get_ns() + 2000000000ULL; /* 2s in ns */
+ deadline = ktime_get_ns() + 60000000000ULL; /* 60s in ns */
while (!cachepc_event_is_done(cachepc_event.id)) {
if (ktime_get_ns() > deadline) {
pr_warn("CachePC: Timeout waiting for ack of event %llu\n",
@@ -65,13 +65,13 @@ cachepc_send_event(struct cpc_event event)
}
int
-cachepc_send_cpuid_event(uint8_t type, uint32_t val)
+cachepc_send_guest_event(uint64_t type, uint64_t val)
{
struct cpc_event event;
event.type = CPC_EVENT_CPUID;
- event.cpuid.type = type;
- event.cpuid.val = val;
+ event.guest.type = type;
+ event.guest.val = val;
return cachepc_send_event(event);
}
@@ -89,7 +89,7 @@ cachepc_send_track_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
event.track.data_fault_gfn = data_fault_gfn;
event.track.data_fault_err = data_fault_err;
event.track.timestamp_ns = ktime_get_real_ns();
- event.track.retinst = cachepc_retinst - CPC_RETINST_KERNEL;
+ event.track.retinst = cachepc_retinst;
return cachepc_send_event(event);
}
diff --git a/cachepc/event.h b/cachepc/event.h
@@ -17,7 +17,7 @@ extern bool cachepc_events_init;
void cachepc_events_reset(void);
-int cachepc_send_cpuid_event(uint8_t type, uint32_t val);
+int cachepc_send_guest_event(uint64_t type, uint64_t val);
int cachepc_send_track_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
uint64_t data_fault_gfn, uint32_t data_fault_err);
bool cachepc_event_is_done(uint64_t id);
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -13,6 +13,9 @@
#include <linux/sev.h>
#include <asm/uaccess.h>
+bool cachepc_debug = true;
+EXPORT_SYMBOL(cachepc_debug);
+
cpc_msrmt_t *cachepc_msrmts = NULL;
size_t cachepc_msrmts_count = 0;
EXPORT_SYMBOL(cachepc_msrmts);
@@ -31,9 +34,11 @@ EXPORT_SYMBOL(cachepc_retinst);
bool cachepc_single_step = false;
uint32_t cachepc_track_mode = false;
uint32_t cachepc_apic_timer = 0;
+uint64_t cachepc_prev_rip = 0;
EXPORT_SYMBOL(cachepc_single_step);
EXPORT_SYMBOL(cachepc_track_mode);
EXPORT_SYMBOL(cachepc_apic_timer);
+EXPORT_SYMBOL(cachepc_prev_rip);
uint32_t cachepc_track_state;
uint32_t cachepc_track_state_next;
@@ -514,6 +519,7 @@ cachepc_kvm_track_page_ioctl(void __user *arg_user)
if (cfg.mode < 0 || cfg.mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
+ BUG_ON(xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode)) {
printk("KVM_TRACK_PAGE: cachepc_track_single failed");
@@ -531,6 +537,7 @@ cachepc_kvm_vmsa_read_ioctl(void __user *arg_user)
if (!main_vm || !arg_user) return -EINVAL;
+ BUG_ON(xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
svm = to_svm(vcpu);
@@ -564,6 +571,21 @@ cachepc_kvm_svme_read_ioctl(void __user *arg_user)
}
int
+cachepc_kvm_debug_ioctl(void __user *arg_user)
+{
+ uint32_t debug;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&debug, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+
+ cachepc_debug = debug;
+
+ return 0;
+}
+
+int
cachepc_kvm_track_all_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
@@ -580,6 +602,7 @@ cachepc_kvm_track_all_ioctl(void __user *arg_user)
if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
+ BUG_ON(xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
if (!cachepc_track_all(vcpu, mode))
return -EFAULT;
@@ -604,6 +627,7 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
+ BUG_ON(xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
if (!cachepc_untrack_all(vcpu, mode))
return -EFAULT;
@@ -617,6 +641,7 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user)
struct kvm_vcpu *vcpu;
cachepc_events_reset();
+ BUG_ON(xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
@@ -683,6 +708,8 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
return cachepc_kvm_vmsa_read_ioctl(arg_user);
case KVM_CPC_SVME_READ:
return cachepc_kvm_svme_read_ioctl(arg_user);
+ case KVM_CPC_DEBUG:
+ return cachepc_kvm_debug_ioctl(arg_user);
case KVM_CPC_TRACK_PAGE:
return cachepc_kvm_track_page_ioctl(arg_user);
case KVM_CPC_TRACK_ALL:
@@ -737,7 +764,7 @@ cachepc_kvm_init(void)
cachepc_retinst = 0;
cachepc_single_step = false;
- cachepc_track_mode = CPC_TRACK_ACCESS;
+ cachepc_track_mode = CPC_TRACK_NONE;
cachepc_track_state = CPC_TRACK_AWAIT_INST_FAULT;
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -12,11 +12,11 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
fault->slot, fault->gfn, KVM_PAGE_TRACK_ACCESS))
return;
- pr_warn("CachePC: Tracked page fault (gfn:%llu err:%u)\n",
+ CPC_DBG("Tracked page fault (gfn:%llu err:%u)\n",
fault->gfn, fault->error_code);
inst_fetch = fault->error_code & PFERR_FETCH_MASK;
- pr_warn("CachePC: Tracked page fault attrs p:%i w:%i x:%i f:%i\n",
+ CPC_DBG("Tracked page fault attrs p:%i w:%i x:%i f:%i\n",
fault->present, inst_fetch, fault->write, fault->exec);
cachepc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS);
@@ -24,7 +24,7 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS) {
if (cachepc_track_state == CPC_TRACK_AWAIT_INST_FAULT) {
/* first fault from instruction fetch */
- pr_warn("CachePC: Got inst fault gfn:%llu err:%u\n",
+ CPC_DBG("Got inst fault gfn:%llu err:%u\n",
fault->gfn, fault->error_code);
cachepc_inst_fault_gfn = fault->gfn;
@@ -33,29 +33,29 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
cachepc_data_fault_avail = false;
cachepc_single_step = true;
- cachepc_apic_timer = 100; /* starting value */
+ cachepc_apic_timer = 0;
cachepc_track_state_next = CPC_TRACK_AWAIT_DATA_FAULT;
} else if (cachepc_track_state == CPC_TRACK_AWAIT_DATA_FAULT) {
/* second fault from data access */
- pr_warn("CachePC: Got data fault gfn:%llu err:%u\n",
+ CPC_DBG("Got data fault gfn:%llu err:%u\n",
fault->gfn, fault->error_code);
if (!cachepc_inst_fault_avail)
- pr_err("CachePC: Waiting for data fault without inst\n");
+ CPC_ERR("Waiting for data fault without inst\n");
cachepc_data_fault_gfn = fault->gfn;
cachepc_data_fault_err = fault->error_code;
cachepc_data_fault_avail = true;
cachepc_single_step = true;
- cachepc_apic_timer = 100; /* reset in-case part of inst done */
+ cachepc_apic_timer = 0;
cachepc_track_state_next = CPC_TRACK_AWAIT_STEP_INTR;
} else if (cachepc_track_state == CPC_TRACK_AWAIT_STEP_INTR) {
/* unexpected extra fault before APIC interrupt */
- pr_err("CachePC: Got unexpected data fault gfn:%llu err:%u\n",
+ CPC_ERR("Got unexpected data fault gfn:%llu err:%u\n",
fault->gfn, fault->error_code);
- pr_err("CachePC: Data access step apic timer too large?\n");
+ CPC_ERR("Data access step apic timer too large?\n");
cachepc_track_single(vcpu, cachepc_inst_fault_gfn,
KVM_PAGE_TRACK_ACCESS);
@@ -76,7 +76,9 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
cachepc_track_state_next = CPC_TRACK_AWAIT_INST_FAULT;
} else {
- pr_err("CachePC: Invalid tracking state: %i\n", cachepc_track_state);
+ CPC_ERR("Invalid tracking state: %i\n",
+ cachepc_track_state);
+
cachepc_track_state_next = CPC_TRACK_AWAIT_INST_FAULT;
}
} else if (cachepc_track_mode == CPC_TRACK_EXEC_PAGES) {
@@ -104,8 +106,6 @@ cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
u64 spte;
bool flush;
- // pr_warn("CachePC: spte_protect\n");
-
spte = *sptep;
if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte)))
return false;
@@ -139,8 +139,6 @@ cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
}
flush |= mmu_spte_update(sptep, spte);
- // pr_warn("CachePC: spte_protect flush:%i\n", flush);
-
return flush;
}
EXPORT_SYMBOL(cachepc_spte_protect);
@@ -171,8 +169,6 @@ cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
protected = false;
- // pr_warn("CachePC: mmu_slot_gfn_protect gfn:%llu\n", gfn);
-
if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = gfn_to_rmap(gfn, i, slot);
@@ -182,7 +178,7 @@ cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
protected |= cachepc_tdp_protect_gfn(kvm,
slot, gfn, min_level, mode);
} else {
- pr_err("CachePC: Tracking unsupported!\n");
+ CPC_ERR("Tracking unsupported!\n");
}
return true;
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -21,18 +21,13 @@
#define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0))
-#define CPC_RETINST_KERNEL 4327
-
-#define CPC_CPUID_SIGNAL(type, val) \
- asm volatile("cpuid" : : "a" (CPC_CPUID_MAGIC(type)), "c" (val) \
- : "ebx", "edx")
-#define CPC_CPUID_MAGIC(type) (CPC_CPUID_MAGIC_VAL | (type & CPC_CPUID_TYPE_MASK))
-#define CPC_CPUID_MAGIC_VAL ((__u32) 0xC0FFEE00)
-#define CPC_CPUID_MAGIC_MASK ((__u32) 0xFFFFFF00)
-#define CPC_CPUID_TYPE_MASK ((__u32) 0x000000FF)
-
#define CPC_VMSA_MAGIC_ADDR ((void *) 0xC0FFEE)
+#define KVM_HC_CPC_VMMCALL 0xC0FFEE00
+#define CPC_DO_VMMCALL(type, val) \
+ asm volatile("vmmcall" : : "a" (KVM_HC_CPC_VMMCALL), \
+ "b"(type), "c" (val) : "rdx")
+
#define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32)
#define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32)
#define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x22, __u32)
@@ -46,6 +41,7 @@
#define KVM_CPC_TRACK_MODE _IOWR(KVMIO, 0x2A, __u32)
#define KVM_CPC_VMSA_READ _IOR(KVMIO, 0x2B, __u64)
#define KVM_CPC_SVME_READ _IOR(KVMIO, 0x2C, __u32)
+#define KVM_CPC_DEBUG _IOW(KVMIO, 0x2D, __u32)
#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config)
#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u64)
@@ -62,7 +58,7 @@ enum {
enum {
CPC_CPUID_START_TRACK,
- CPC_CPUID_STOP_TRACK,
+ CPC_CPUID_STOP_TRACK,
};
enum {
@@ -102,9 +98,9 @@ struct cpc_track_event {
__u64 retinst;
};
-struct cpc_cpuid_event {
- __u8 type;
- __u32 val;
+struct cpc_guest_event {
+ __u64 type;
+ __u64 val;
};
struct cpc_event {
@@ -112,7 +108,7 @@ struct cpc_event {
__u64 id;
union {
struct cpc_track_event track;
- struct cpc_cpuid_event cpuid;
+ struct cpc_guest_event guest;
};
};
diff --git a/patch.diff b/patch.diff
@@ -17,7 +17,7 @@ index eb186bc57f6a..b96e80934005 100644
/*
* The notifier represented by @kvm_page_track_notifier_node is linked into
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
-index 30f244b64523..c75819a6cd77 100644
+index 30f244b64523..a1e3c5ae2f80 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
@@ -28,25 +28,24 @@ index 30f244b64523..c75819a6cd77 100644
ccflags-$(CONFIG_KVM_WERROR) += -Werror
ifeq ($(CONFIG_FRAME_POINTER),y)
-@@ -11,8 +11,9 @@ include $(srctree)/virt/kvm/Makefile.kvm
+@@ -11,8 +11,8 @@ include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
- mmu/spte.o
+ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o \
-+ cachepc/cachepc.o cachepc/kvm.o \
-+ cachepc/tracking.o cachepc/events.o
++ cachepc/cachepc.o cachepc/kvm.o cachepc/track.o cachepc/event.o
ifdef CONFIG_HYPERV
kvm-y += kvm_onhyperv.o
-@@ -25,7 +26,8 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
+@@ -25,7 +25,8 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
-kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o \
-+ svm/avic.o svm/sev.o cachepc/cachepc.o cachepc/events.o
++ svm/avic.o svm/sev.o cachepc/cachepc.o cachepc/event.o
ifdef CONFIG_HYPERV
kvm-amd-y += svm/svm_onhyperv.o
@@ -58,8 +57,34 @@ index 000000000000..9119e44af1f0
@@ -0,0 +1 @@
+/home/louis/kvm-prime-count/cachepc
\ No newline at end of file
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index de6d44e07e34..b63672b47321 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -26,6 +26,10 @@
+ #include "trace.h"
+ #include "pmu.h"
+
++#include "cachepc/cachepc.h"
++#include "cachepc/uapi.h"
++#include "cachepc/event.h"
++
+ /*
+ * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
+ * aligned to sizeof(unsigned long) because it's not accessed via bitops.
+@@ -1445,8 +1449,8 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+ if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
+ return 1;
+
+- eax = kvm_rax_read(vcpu);
+- ecx = kvm_rcx_read(vcpu);
++ eax = kvm_rax_read(vcpu);
++ ecx = kvm_rcx_read(vcpu);
+ kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
+ kvm_rax_write(vcpu, eax);
+ kvm_rbx_write(vcpu, ebx);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
-index d871b8dee7b3..317dcb165e92 100644
+index d871b8dee7b3..c70fff62f1ab 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1152,6 +1152,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
@@ -146,7 +171,7 @@ index d871b8dee7b3..317dcb165e92 100644
- if (!fault->present || !fault->write)
- return false;
-+ cachepc_uspt_page_fault_handle(vcpu, fault);
++ cachepc_page_fault_handle(vcpu, fault);
/*
* guest is writing the page which is write tracked which can
@@ -169,19 +194,20 @@ index d871b8dee7b3..317dcb165e92 100644
return false;
}
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
-index 2e09d1b6249f..02821df27f51 100644
+index 2e09d1b6249f..315b2d06118c 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
-@@ -19,6 +19,8 @@
+@@ -19,6 +19,9 @@
#include "mmu.h"
#include "mmu_internal.h"
-+#include "../cachepc/tracking.h"
++#include "../cachepc/cachepc.h"
++#include "../cachepc/track.h"
+
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
{
return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
-@@ -115,7 +117,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+@@ -115,7 +118,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode)
{
@@ -189,16 +215,16 @@ index 2e09d1b6249f..02821df27f51 100644
if (WARN_ON(!page_track_mode_is_valid(mode)))
return;
-@@ -123,6 +124,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+@@ -123,6 +125,8 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
!kvm_page_track_write_tracking_enabled(kvm)))
return;
-+ pr_warn("CachePC: Tracking page: %llu\n", gfn);
++ CPC_DBG("Tracking page: %llu %i\n", gfn, mode);
+
update_gfn_track(slot, gfn, mode, 1);
/*
-@@ -131,9 +134,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+@@ -131,9 +135,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
*/
kvm_mmu_gfn_disallow_lpage(slot, gfn);
@@ -212,11 +238,11 @@ index 2e09d1b6249f..02821df27f51 100644
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
-@@ -161,6 +165,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
+@@ -161,6 +166,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
!kvm_page_track_write_tracking_enabled(kvm)))
return;
-+ pr_warn("CachePC: Untracking page: %llu\n", gfn);
++ CPC_DBG("Untracking page: %llu %i\n", gfn, mode);
+
update_gfn_track(slot, gfn, mode, -1);
@@ -341,10 +367,19 @@ index 7b9265d67131..68b9134970da 100644
/*
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
-index a4f6d10b0ef3..a1ac048b35cf 100644
+index a4f6d10b0ef3..0c5aae1de162 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
-@@ -888,7 +888,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+@@ -35,6 +35,8 @@
+ #include "trace.h"
+ #include "mmu.h"
+
++#include "cachepc/cachepc.h"
++
+ #ifndef CONFIG_KVM_AMD_SEV
+ /*
+ * When this config is not defined, SEV feature is not supported and APIs in
+@@ -888,7 +890,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
&data, error);
}
@@ -353,7 +388,7 @@ index a4f6d10b0ef3..a1ac048b35cf 100644
unsigned long dst_paddr, int sz, int *err)
{
int offset;
-@@ -904,6 +904,13 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
+@@ -904,12 +906,20 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
}
@@ -367,17 +402,72 @@ index a4f6d10b0ef3..a1ac048b35cf 100644
static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
void __user *dst_uaddr,
unsigned long dst_paddr,
-@@ -1026,6 +1033,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ int size, int *err)
+ {
+ struct page *tpage = NULL;
++ struct vcpu_svm *svm;
+ int ret, offset;
+
+ /* if inputs are not 16-byte then use intermediate buffer */
+@@ -923,6 +933,11 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
+ dst_paddr = __sme_page_pa(tpage);
+ }
+
++ if (dst_uaddr == CPC_VMSA_MAGIC_ADDR) {
++ svm = to_svm(xa_load(&kvm->vcpu_array, 0));
++ paddr = __pa(svm->sev_es.vmsa);
++ }
++
+ ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
+ if (ret)
+ goto e_free;
+@@ -1024,6 +1039,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ struct kvm_sev_dbg debug;
+ unsigned long n;
unsigned int size;
++ bool vmsa_dec;
int ret;
-+ pr_warn("DEBUG CRYPT\n");
-+
if (!sev_guest(kvm))
- return -ENOTTY;
-
+@@ -1037,6 +1053,13 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ if (!debug.dst_uaddr)
+ return -EINVAL;
+
++ vmsa_dec = false;
++ if (debug.src_uaddr == (uintptr_t) CPC_VMSA_MAGIC_ADDR) {
++ debug.len = PAGE_SIZE;
++ debug.src_uaddr = debug.dst_uaddr;
++ vmsa_dec = true;
++ }
++
+ vaddr = debug.src_uaddr;
+ size = debug.len;
+ vaddr_end = vaddr + size;
+@@ -1075,7 +1098,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ if (dec)
+ ret = __sev_dbg_decrypt_user(kvm,
+ __sme_page_pa(src_p[0]) + s_off,
+- (void __user *)dst_vaddr,
++ vmsa_dec ? CPC_VMSA_MAGIC_ADDR
++ : (void __user *)dst_vaddr,
+ __sme_page_pa(dst_p[0]) + d_off,
+ len, &argp->error);
+ else
+@@ -3149,9 +3173,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
+ }
+ break;
+ case SVM_EXIT_VMMCALL:
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_cpl_is_valid(ghcb))
+- goto vmgexit_err;
++ // if (!ghcb_rax_is_valid(ghcb) ||
++ // !ghcb_cpl_is_valid(ghcb))
++ // goto vmgexit_err;
+ break;
+ case SVM_EXIT_RDTSCP:
+ break;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index cf0bf456d520..4134049e6b08 100644
+index cf0bf456d520..ff84cedfefd0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2,6 +2,10 @@
@@ -385,25 +475,64 @@ index cf0bf456d520..4134049e6b08 100644
#include <linux/kvm_host.h>
+#include "cachepc/cachepc.h"
-+#include "cachepc/events.h"
-+#include "cachepc/tracking.h"
++#include "cachepc/event.h"
++#include "cachepc/track.h"
+
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -2083,6 +2087,38 @@ static int smi_interception(struct kvm_vcpu *vcpu)
+@@ -1887,6 +1891,8 @@ static int npf_interception(struct kvm_vcpu *vcpu)
+ u64 fault_address = svm->vmcb->control.exit_info_2;
+ u64 error_code = svm->vmcb->control.exit_info_1;
+
++ cachepc_track_state_next = cachepc_track_state;
++
+ trace_kvm_page_fault(fault_address, error_code);
+ rc = kvm_mmu_page_fault(vcpu, fault_address, error_code,
+ static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
+@@ -1896,6 +1902,8 @@ static int npf_interception(struct kvm_vcpu *vcpu)
+ if (error_code & PFERR_GUEST_RMP_MASK)
+ handle_rmp_page_fault(vcpu, fault_address, error_code);
+
++ cachepc_track_state = cachepc_track_state_next;
++
+ return rc;
+ }
+
+@@ -2083,7 +2091,56 @@ static int smi_interception(struct kvm_vcpu *vcpu)
static int intr_interception(struct kvm_vcpu *vcpu)
{
++ struct vcpu_svm *svm;
++
+ ++vcpu->stat.irq_exits;
++
+ if (cachepc_track_mode == CPC_TRACK_DATA_ACCESS && cachepc_single_step) {
++ svm = to_svm(vcpu);
++
++ if (svm->sev_es.vmsa->rip == cachepc_prev_rip) {
++ cachepc_apic_timer += 1;
++ return 1;
++ }
++
+ cachepc_single_step = false;
+
-+ if (cachepc_data_fault_avail) {
-+ pr_warn("CachePC: Caught single step WITH data!\n");
++ switch (cachepc_track_state) {
++ case CPC_TRACK_AWAIT_DATA_FAULT:
++ CPC_INFO("Caught single step WITHOUT data!\n");
++
++ cachepc_track_single(vcpu, cachepc_inst_fault_gfn,
++ KVM_PAGE_TRACK_ACCESS);
++ cachepc_inst_fault_avail = false;
+
-+ cachepc_send_tracking_event(
++ cachepc_send_track_event(
+ cachepc_inst_fault_gfn, cachepc_inst_fault_err,
-+ cachepc_data_fault_gfn, cachepc_data_fault_err);
++ 0, 0);
++
++ cachepc_track_state = CPC_TRACK_AWAIT_INST_FAULT;
++ break;
++ case CPC_TRACK_AWAIT_STEP_INTR:
++ CPC_INFO("Caught single step WITH data!\n");
+
+ cachepc_track_single(vcpu, cachepc_data_fault_gfn,
+ KVM_PAGE_TRACK_ACCESS);
@@ -412,25 +541,22 @@ index cf0bf456d520..4134049e6b08 100644
+ cachepc_track_single(vcpu, cachepc_inst_fault_gfn,
+ KVM_PAGE_TRACK_ACCESS);
+ cachepc_inst_fault_avail = false;
-+ } else if (cachepc_inst_fault_avail) {
-+ pr_warn("CachePC: Caught single step WITHOUT data!\n");
+
-+ cachepc_send_tracking_event(
++ cachepc_send_track_event(
+ cachepc_inst_fault_gfn, cachepc_inst_fault_err,
-+ 0, 0);
++ cachepc_data_fault_gfn, cachepc_data_fault_err);
+
-+ cachepc_track_single(vcpu, cachepc_inst_fault_gfn,
-+ KVM_PAGE_TRACK_ACCESS);
-+ cachepc_inst_fault_avail = false;
-+ } else {
-+ pr_err("CachePC: Unexpected single step\n");
++ cachepc_track_state = CPC_TRACK_AWAIT_INST_FAULT;
++ break;
++ default:
++ CPC_ERR("Unexpected single step\n");
+ }
+ }
+
- ++vcpu->stat.irq_exits;
return 1;
}
-@@ -3269,9 +3305,23 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
+
+@@ -3269,9 +3326,25 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
{
@@ -446,15 +572,17 @@ index cf0bf456d520..4134049e6b08 100644
if (!svm_check_exit_valid(exit_code))
return svm_handle_invalid_exit(vcpu, exit_code);
-+ for (i = 0; i < sizeof(codelut) / sizeof(codelut[0]); i++) {
-+ if (codelut[i].code == exit_code)
-+ pr_warn("KVM EXIT (%s)\n", codelut[i].name);
++ if (cachepc_debug && cachepc_track_mode != CPC_TRACK_NONE) {
++ for (i = 0; i < sizeof(codelut) / sizeof(codelut[0]); i++) {
++ if (codelut[i].code == exit_code)
++ pr_warn("KVM EXIT (%s)\n", codelut[i].name);
++ }
+ }
+
#ifdef CONFIG_RETPOLINE
if (exit_code == SVM_EXIT_MSR)
return msr_interception(vcpu);
-@@ -3788,14 +3838,39 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3788,14 +3861,42 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
@@ -462,50 +590,53 @@ index cf0bf456d520..4134049e6b08 100644
guest_state_enter_irqoff();
++ if (cachepc_apic_timer == 0) {
++ cachepc_prev_rip = svm->sev_es.vmsa->rip;
++ cachepc_apic_timer = 100;
++ }
++
if (sev_es_guest(vcpu->kvm)) {
+ cpu = get_cpu();
-+ local_irq_disable();
++ // local_irq_disable();
+ WARN_ON(cpu != 2);
+
+ memset(cachepc_msrmts, 0,
+ cachepc_msrmts_count * sizeof(cpc_msrmt_t));
-+ cachepc_reset_pmc(CPC_L1MISS_PMC);
+
-+ cachepc_reset_pmc(CPC_RETINST_PMC);
- __svm_sev_es_vcpu_run(vmcb_pa);
+ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+ __svm_sev_es_vcpu_run(vmcb_pa);
++ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC) - cachepc_retinst;
+
+ cachepc_save_msrmts(cachepc_ds);
+ if (cachepc_baseline_measure)
+ cachepc_update_baseline();
-+ local_irq_enable();
++ // local_irq_enable();
+ put_cpu();
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ cpu = get_cpu();
-+ local_irq_disable();
++ // local_irq_disable();
+ WARN_ON(cpu != 2);
+
+ memset(cachepc_msrmts, 0,
+ cachepc_msrmts_count * sizeof(cpc_msrmt_t));
-+ cachepc_reset_pmc(CPC_L1MISS_PMC);
+
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
-@@ -3806,7 +3881,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3806,7 +3907,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
__svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
vmsave(svm->vmcb01.pa);
-+ cachepc_reset_pmc(CPC_RETINST_PMC);
- vmload(__sme_page_pa(sd->save_area));
+ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+ vmload(__sme_page_pa(sd->save_area));
++ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC) - cachepc_retinst;
+
+ cachepc_save_msrmts(cachepc_ds);
+ if (cachepc_baseline_measure)
+ cachepc_update_baseline();
-+ local_irq_enable();
++ // local_irq_enable();
+ put_cpu();
}
@@ -643,26 +774,53 @@ index dfaeb47fcf2a..0626f3fdddfd 100644
2: cli
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index d9adf79124f9..6ca86ab417cb 100644
+index d9adf79124f9..fc64f9b7614d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -82,6 +82,8 @@
+@@ -82,6 +82,10 @@
#include <asm/sgx.h>
#include <clocksource/hyperv_timer.h>
-+#include "cachepc/sevstep.h"
++#include "cachepc/cachepc.h"
++#include "cachepc/event.h"
++#include "cachepc/track.h"
+
#define CREATE_TRACE_POINTS
#include "trace.h"
-@@ -6597,6 +6599,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
- if (!kvm_x86_ops.mem_enc_ioctl)
- goto out;
+@@ -9267,10 +9271,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+ a3 &= 0xFFFFFFFF;
+ }
+
+- if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
+- ret = -KVM_EPERM;
+- goto out;
+- }
++ // if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
++ // ret = -KVM_EPERM;
++ // goto out;
++ // }
+
+ ret = -KVM_ENOSYS;
-+ pr_warn("ENCRYPT_OP\n");
- r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp);
+@@ -9326,11 +9330,16 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+ vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+ return 0;
+ }
++ case KVM_HC_CPC_VMMCALL:
++ CPC_WARN("Cachepc: Hypecrcall Run\n");
++ cachepc_send_cpuid_event(a0, a1);
++ ret = 0;
++ break;
+ default:
+ ret = -KVM_ENOSYS;
break;
}
+-out:
++//out:
+ if (!op_64_bit)
+ ret = (u32)ret;
+ kvm_rax_write(vcpu, ret);
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 27ab27931813..90679ec8ba79 100644
--- a/crypto/aes_generic.c
@@ -789,7 +947,7 @@ index e089fbf9017f..7899e1efe852
static int __sev_init_locked(int *error)
{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index f2a63cb2658b..c77a29e14771 100644
+index f2a63cb2658b..869faf927e5d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -13,6 +13,7 @@
@@ -804,7 +962,7 @@ index f2a63cb2658b..c77a29e14771 100644
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
-+#include "../../arch/x86/kvm/cachepc/tracking.h"
++#include "../../arch/x86/kvm/cachepc/track.h"
#include <linux/kvm_dirty_ring.h>
diff --git a/test/.gitignore b/test/.gitignore
@@ -10,3 +10,4 @@ aes-detect_host
access-detect_guest
access-detect_host
readsvme
+debug
diff --git a/test/access-detect_guest.c b/test/access-detect_guest.c
@@ -17,10 +17,10 @@ main(int argc, const char **argv)
memset(buf, 0, L1_LINESIZE * L1_SETS);
while (1) {
- CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ CPC_DO_VMMCALL(CPC_CPUID_START_TRACK, 0);
- *(uint8_t *)(buf + L1_LINESIZE * 5) += 1;
+ *(uint8_t *)(buf + L1_LINESIZE * 15) += 1;
- CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ CPC_DO_VMMCALL(CPC_CPUID_STOP_TRACK, 0);
}
}
diff --git a/test/access-detect_host.c b/test/access-detect_host.c
@@ -133,6 +133,7 @@ monitor(bool baseline)
struct cpc_event event;
cpc_msrmt_t counts[64];
uint64_t track_mode;
+ uint32_t arg;
int ret, i;
/* Get page fault info */
@@ -140,15 +141,27 @@ monitor(bool baseline)
if (!ret) {
if (event.type == CPC_EVENT_CPUID) {
printf("CPUID EVENT\n");
- if (event.cpuid.type == CPC_CPUID_START_TRACK) {
+ if (event.guest.type == CPC_CPUID_START_TRACK) {
+ arg = CPC_TRACK_DATA_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret == -1) err(1, "ioctl TRACK_MODE");
+
track_mode = KVM_PAGE_TRACK_ACCESS;
ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
if (ret) err(1, "ioctl TRACK_ALL");
- } else if (event.cpuid.type == CPC_CPUID_STOP_TRACK) {
+ } else if (event.guest.type == CPC_CPUID_STOP_TRACK) {
+ arg = CPC_TRACK_NONE;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret == -1) err(1, "ioctl TRACK_MODE");
+
track_mode = KVM_PAGE_TRACK_ACCESS;
ret = ioctl(kvm_dev, KVM_CPC_UNTRACK_ALL, &track_mode);
if (ret) err(1, "ioctl UNTRACK_ALL");
}
+
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
+ if (ret == -1) err(1, "ioctl ACK_EVENT");
+
return 0;
} else if (event.type != CPC_EVENT_TRACK) {
return 0;
@@ -175,7 +188,7 @@ monitor(bool baseline)
}
}
- ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.track.id);
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
faultcnt++;
@@ -219,11 +232,6 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
if (ret == -1) err(1, "ioctl RESET_TRACKING");
- /* Do data access stepping */
- arg = CPC_TRACK_DATA_ACCESS;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
- if (ret == -1) err(1, "ioctl TRACK_MODE");
-
pin_process(0, SECONDARY_CORE, true);
printf("PINNED\n");
@@ -239,21 +247,29 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &arg);
if (ret) err(1, "ioctl TRACK_ALL");
+ arg = CPC_TRACK_DATA_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret == -1) err(1, "ioctl TRACK_MODE");
+
faultcnt = 0;
while (faultcnt < 100) {
if (monitor(true)) break;
}
- arg = KVM_PAGE_TRACK_ACCESS;
- ret = ioctl(kvm_dev, KVM_CPC_UNTRACK_ALL, &arg);
- if (ret) err(1, "ioctl TRACK_ALL");
-
do {
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (ret == -1 && errno != EAGAIN)
err(1, "ioctl POLL_EVENT");
} while (ret == -1 && errno == EAGAIN);
+ arg = CPC_TRACK_NONE;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret == -1) err(1, "ioctl TRACK_MODE");
+
+ arg = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_UNTRACK_ALL, &arg);
+ if (ret) err(1, "ioctl TRACK_ALL");
+
arg = false;
ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
@@ -277,7 +293,7 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
if (ret == -1) err(1, "ioctl SUB_BASELINE");
- ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.track.id);
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
faultcnt = 0;
diff --git a/test/aes-detect_guest b/test/aes-detect_guest
Binary files differ.
diff --git a/test/aes-detect_guest.c b/test/aes-detect_guest.c
@@ -39,11 +39,11 @@ main(int argc, const char **argv)
memset(buf, 0, L1_LINESIZE * L1_SETS);
while (1) {
- CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ CPC_DO_VMMCALL(CPC_CPUID_START_TRACK, 0);
buf[L1_LINESIZE * 5] += 1;
- CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ CPC_DO_VMMCALL(CPC_CPUID_START_TRACK, 0);
}
return 0;
diff --git a/test/aes-detect_host b/test/aes-detect_host
Binary files differ.
diff --git a/test/aes-detect_host.c b/test/aes-detect_host.c
@@ -439,14 +439,14 @@ runonce(struct kvm *kvm)
int
monitor(void)
{
- struct cpc_track_event event;
+ struct cpc_event event;
int ret;
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (!ret) {
printf("Got page fault! %llu retired insts\n",
- event.retinst);
+ event.track.retinst);
faultcnt++;
printf("Acking event %llu\n", event.id);
diff --git a/test/debug.c b/test/debug.c
@@ -0,0 +1,26 @@
+#include "cachepc/uapi.h"
+
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <err.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+int
+main(int argc, const char **argv)
+{
+ uint32_t arg;
+ int fd, ret;
+
+ fd = open("/dev/kvm", O_RDONLY);
+ if (fd < 0) err(1, "open");
+
+ arg = argc > 1 ? atoi(argv[1]) : 1;
+ ret = ioctl(fd, KVM_CPC_DEBUG, &arg);
+ if (ret == -1) err(1, "ioctl DEBUG");
+
+ close(fd);
+}