commit 82d56ef77c114ac0b375fef04cea3a50f10f1843
parent afe49b52f6b2ac52efe8e610e64ee07c7c2efef4
Author: Louis Burda <quent.burda@gmail.com>
Date: Tue, 22 Nov 2022 15:03:02 +0100
Improved single stepping and added initial cpuid events
Diffstat:
15 files changed, 444 insertions(+), 70 deletions(-)
diff --git a/Makefile b/Makefile
@@ -3,6 +3,7 @@ PWD := $(shell pwd)
TARGETS = build test/eviction test/access test/kvm test/sev test/sev-es test/sevstep
TARGETS += test/aes-detect_guest test/aes-detect_host
+TARGETS += test/access-detect_guest test/access-detect_host
CFLAGS = -I . -I test -Wunused-variable -Wunknown-pragmas
@@ -26,6 +27,8 @@ load:
freq:
sudo cpupower frequency-set -f 1.5GHz
+ sudo cpupower frequency-set -u 1.5GHz
+ sudo cpupower frequency-set -d 1.5GHz
update:
git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -290,6 +290,6 @@ void
cachepc_apic_oneshot(uint32_t interval)
{
native_apic_mem_write(APIC_LVTT, LOCAL_TIMER_VECTOR | APIC_LVT_TIMER_ONESHOT);
- native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_2);
+ native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_1);
native_apic_mem_write(APIC_TMICT, interval);
}
diff --git a/cachepc/event.c b/cachepc/event.c
@@ -1,5 +1,5 @@
-#include "events.h"
-#include "tracking.h"
+#include "event.h"
+#include "track.h"
#include "cachepc.h"
#include "uapi.h"
@@ -25,10 +25,8 @@ cachepc_events_reset(void)
}
int
-cachepc_send_tracking_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
- uint64_t data_fault_gfn, uint32_t data_fault_err)
+cachepc_send_event(struct cpc_event event)
{
- struct cpc_track_event event;
ktime_t deadline;
read_lock(&cachepc_event_lock);
@@ -47,15 +45,8 @@ cachepc_send_tracking_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
} else {
cachepc_last_event_sent++;
}
- event.id = cachepc_last_event_sent;
- event.inst_fault_gfn = inst_fault_gfn;
- event.inst_fault_err = inst_fault_err;
- event.data_fault_avail = (data_fault_err != 0);
- event.data_fault_gfn = data_fault_gfn;
- event.data_fault_err = data_fault_err;
- event.timestamp_ns = ktime_get_real_ns();
- event.retinst = cachepc_retinst;
+ event.id = cachepc_last_event_sent;
cachepc_event_avail = true;
cachepc_event = event;
write_unlock(&cachepc_event_lock);
@@ -73,6 +64,36 @@ cachepc_send_tracking_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
return 0;
}
+int
+cachepc_send_cpuid_event(uint8_t type, uint32_t val)
+{
+ struct cpc_event event;
+
+ event.type = CPC_EVENT_CPUID;
+ event.cpuid.type = type;
+ event.cpuid.val = val;
+
+ return cachepc_send_event(event);
+}
+
+int
+cachepc_send_track_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
+ uint64_t data_fault_gfn, uint32_t data_fault_err)
+{
+ struct cpc_event event;
+
+ event.type = CPC_EVENT_TRACK;
+ event.track.inst_fault_gfn = inst_fault_gfn;
+ event.track.inst_fault_err = inst_fault_err;
+ event.track.data_fault_avail = (data_fault_err != 0);
+ event.track.data_fault_gfn = data_fault_gfn;
+ event.track.data_fault_err = data_fault_err;
+ event.track.timestamp_ns = ktime_get_real_ns();
+ event.track.retinst = cachepc_retinst - CPC_RETINST_KERNEL;
+
+ return cachepc_send_event(event);
+}
+
bool
cachepc_event_is_done(uint64_t id)
{
@@ -86,7 +107,7 @@ cachepc_event_is_done(uint64_t id)
}
int
-cachepc_handle_poll_event_ioctl(struct cpc_track_event __user *event)
+cachepc_handle_poll_event_ioctl(struct cpc_event __user *event)
{
int err;
@@ -99,8 +120,7 @@ cachepc_handle_poll_event_ioctl(struct cpc_track_event __user *event)
write_lock(&cachepc_event_lock);
if (cachepc_event_avail) {
- err = copy_to_user(event, &cachepc_event,
- sizeof(struct cpc_track_event));
+ err = copy_to_user(event, &cachepc_event, sizeof(struct cpc_event));
cachepc_event_avail = false;
} else {
err = -EAGAIN;
diff --git a/cachepc/event.h b/cachepc/event.h
@@ -10,16 +10,17 @@ extern uint64_t cachepc_last_event_sent;
extern uint64_t cachepc_last_event_acked;
extern rwlock_t cachepc_event_lock;
-extern struct cpc_track_event cachepc_event;
+extern struct cpc_event cachepc_event;
extern bool cachepc_event_avail;
extern bool cachepc_events_init;
void cachepc_events_reset(void);
-int cachepc_send_tracking_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
+int cachepc_send_cpuid_event(uint8_t type, uint32_t val);
+int cachepc_send_track_event(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
uint64_t data_fault_gfn, uint32_t data_fault_err);
bool cachepc_event_is_done(uint64_t id);
-int cachepc_handle_poll_event_ioctl(struct cpc_track_event *userpace_mem);
+int cachepc_handle_poll_event_ioctl(struct cpc_event *user);
int cachepc_handle_ack_event_ioctl(uint64_t eventid);
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(cachepc_last_event_sent);
EXPORT_SYMBOL(cachepc_last_event_acked);
EXPORT_SYMBOL(cachepc_event_lock);
-struct cpc_track_event cachepc_event;
+struct cpc_event cachepc_event;
bool cachepc_event_avail;
EXPORT_SYMBOL(cachepc_event);
EXPORT_SYMBOL(cachepc_event_avail);
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -26,8 +26,8 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
/* first fault from instruction fetch */
pr_warn("CachePC: Got inst fault gfn:%llu err:%u\n",
fault->gfn, fault->error_code);
- if (!inst_fetch)
- pr_err("CachePC: Expected inst fault but was not on fetch\n");
+ //if (!inst_fetch)
+ // pr_err("CachePC: Expected inst fault but was not on fetch\n");
cachepc_inst_fault_gfn = fault->gfn;
cachepc_inst_fault_err = fault->error_code;
@@ -35,7 +35,7 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
cachepc_data_fault_avail = false;
cachepc_single_step = true;
- cachepc_apic_timer = 390;
+ cachepc_apic_timer = 100; /* starting value */
cachepc_track_state_next = CPC_TRACK_AWAIT_DATA_FAULT;
} else if (cachepc_track_state == CPC_TRACK_AWAIT_DATA_FAULT) {
@@ -50,7 +50,7 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
cachepc_data_fault_avail = true;
cachepc_single_step = true;
- cachepc_apic_timer = 390;
+ cachepc_apic_timer = 100; /* reset in-case part of inst done */
cachepc_track_state_next = CPC_TRACK_AWAIT_STEP_INTR;
} else if (cachepc_track_state == CPC_TRACK_AWAIT_STEP_INTR) {
@@ -70,7 +70,7 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
/* retrack fault we just got so we can start from scratch */
cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS);
- cachepc_send_tracking_event(
+ cachepc_send_track_event(
cachepc_inst_fault_gfn, cachepc_inst_fault_err,
cachepc_data_fault_gfn, cachepc_data_fault_err);
@@ -93,10 +93,10 @@ cachepc_page_fault_handle(struct kvm_vcpu *vcpu,
}
cachepc_inst_fault_gfn = fault->gfn;
cachepc_inst_fault_err = fault->error_code;
- cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0);
+ cachepc_send_track_event(fault->gfn, fault->error_code, 0, 0);
} else if (cachepc_track_mode == CPC_TRACK_ACCESS) {
cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS);
- cachepc_send_tracking_event(fault->gfn, fault->error_code, 0, 0);
+ cachepc_send_track_event(fault->gfn, fault->error_code, 0, 0);
}
}
diff --git a/cachepc/track.c b/cachepc/track.c
@@ -1,4 +1,4 @@
-#include "tracking.h"
+#include "track.h"
#include "cachepc.h"
#include "mmu/mmu_internal.h"
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -21,6 +21,16 @@
#define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0))
+#define CPC_RETINST_KERNEL 4327
+
+#define CPC_CPUID_SIGNAL(type, val) \
+ asm volatile("cpuid" : : "a" (CPC_CPUID_MAGIC(type)), "c" (val) \
+ : "ebx", "edx")
+#define CPC_CPUID_MAGIC(type) (CPC_CPUID_MAGIC_VAL | (type & CPC_CPUID_TYPE_MASK))
+#define CPC_CPUID_MAGIC_VAL ((__u32) 0xC0FFEE00)
+#define CPC_CPUID_MAGIC_MASK ((__u32) 0xFFFFFF00)
+#define CPC_CPUID_TYPE_MASK ((__u32) 0x000000FF)
+
#define CPC_VMSA_MAGIC_ADDR ((void *) 0xC0FFEE)
#define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32)
@@ -44,6 +54,18 @@
#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x35, __u64)
enum {
+ CPC_EVENT_NONE,
+ CPC_EVENT_TRACK,
+ CPC_EVENT_CPUID
+};
+
+enum {
+ CPC_CPUID_START_TRACK,
+ CPC_CPUID_STOP_TRACK,
+};
+
+enum {
+ CPC_TRACK_NONE,
CPC_TRACK_ACCESS,
CPC_TRACK_DATA_ACCESS,
CPC_TRACK_EXEC_PAGES
@@ -70,7 +92,6 @@ struct cpc_track_config {
};
struct cpc_track_event {
- __u64 id;
__u64 inst_fault_gfn;
__u64 inst_fault_err;
__u32 data_fault_avail;
@@ -80,4 +101,18 @@ struct cpc_track_event {
__u64 retinst;
};
+struct cpc_cpuid_event {
+ __u8 type;
+ __u32 val;
+};
+
+struct cpc_event {
+ __u32 type;
+ __u64 id;
+ union {
+ struct cpc_track_event track;
+ struct cpc_cpuid_event cpuid;
+ };
+};
+
typedef __u64 cpc_msrmt_t;
diff --git a/notes b/notes
@@ -1,28 +0,0 @@
-Observations:
-
-Questions:
-- test/sevstep: why 0 then 15, arent both accesses in the first page?
- => first code page and stack access
-- test/sevstep: why does it seem to work with event race but not without?
- => it doesnt (anymore)
-
-Next steps:
-- enable single-steping with multiple page faults (only one ends up in
- last_fault_gfn and others stay untracked)
-- try adjusting timer for single stepping
-- Try to setup the non-baseline step without apic, but remapping page
- such that we see the relevant page faults and the gfn increment
-- test/sevstep: implement counter.. read program memory to see
- how many instructions were executed on apic timer
-
-Meeting questions:
-- Why is decrypted rip sometimes off-by-one?
- =>
-- VM gets interrupted independent of our APIC (by the scheduler?) was this a problem for you?
- =>
-- Is supplementing apic with waiting for next fault viable? (seems to work well actually)
- =>
-- How does single stepping work with fixed cpu frequency when we always step less than one instruction?
- =>
-
-
diff --git a/test/.gitignore b/test/.gitignore
@@ -5,3 +5,7 @@ sev
sev-es
sev-snp
sevstep
+aes-detect_guest
+aes-detect_host
+access-detect_guest
+access-detect_host
diff --git a/test/access-detect_guest.c b/test/access-detect_guest.c
@@ -0,0 +1,26 @@
+#include "cachepc/uapi.h"
+
+#include <err.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+int
+main(int argc, const char **argv)
+{
+ void *buf;
+
+ buf = NULL;
+ if (posix_memalign(&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS))
+ err(1, "memalign");
+ memset(buf, 0, L1_LINESIZE * L1_SETS);
+
+ while (1) {
+ CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+
+ *(uint8_t *)(buf + L1_LINESIZE * 5) += 1;
+
+ CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ }
+}
diff --git a/test/access-detect_host.c b/test/access-detect_host.c
@@ -0,0 +1,288 @@
+#define _GNU_SOURCE
+
+#include "cachepc/uapi.h"
+
+#include <linux/psp-sev.h>
+#include <linux/kvm.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/user.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+#include <dirent.h>
+#include <assert.h>
+#include <errno.h>
+#include <err.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
+#define MIN(a,b) ((a) > (b) ? (b) : (a))
+
+#define TARGET_CORE 2
+#define SECONDARY_CORE 3
+
+#define TARGET_SET 15
+
+/* ioctl dev fds */
+static int kvm_dev;
+static int faultcnt;
+
+void
+hexdump(void *data, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ printf("%02X ", *(uint8_t *)(data + i));
+ }
+ printf("\n");
+}
+
+bool
+pin_process(pid_t pid, int cpu, bool assert)
+{
+ cpu_set_t cpuset;
+ int ret;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
+ if (ret < 0) {
+ if (assert) err(1, "sched_setaffinity");
+ return false;
+ }
+
+ return true;
+}
+
+cpc_msrmt_t *
+read_counts()
+{
+ cpc_msrmt_t *counts;
+ int i, ret;
+
+ counts = malloc(L1_SETS * sizeof(cpc_msrmt_t));
+ if (!counts) err(1, "malloc");
+
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
+
+ for (i = 0; i < L1_SETS; i++) {
+ if (counts[i] > 8)
+ errx(1, "Invalid counts set %i", i);
+ }
+
+ return counts;
+}
+
+void
+print_counts(cpc_msrmt_t *counts)
+{
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ if (counts[i] == 1)
+ printf("\x1b[38;5;88m");
+ else if (counts[i] > 1)
+ printf("\x1b[38;5;196m");
+ printf("%2i ", i);
+ if (counts[i] > 0)
+ printf("\x1b[0m");
+ }
+ printf("\n");
+}
+
+void
+print_counts_raw(cpc_msrmt_t *counts)
+{
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ if (counts[i] == 1)
+ printf("\x1b[38;5;88m");
+ else if (counts[i] > 1)
+ printf("\x1b[38;5;196m");
+ printf("%02X ", (uint8_t) counts[i]);
+ if (counts[i] > 0)
+ printf("\x1b[0m");
+ }
+ printf("\n");
+}
+
+int
+monitor(bool baseline)
+{
+ struct cpc_event event;
+ cpc_msrmt_t counts[64];
+ uint64_t track_mode;
+ int ret, i;
+
+ /* Get page fault info */
+ ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
+ if (!ret) {
+ if (event.type == CPC_EVENT_CPUID) {
+ printf("CPUID EVENT\n");
+ if (event.cpuid.type == CPC_CPUID_START_TRACK) {
+ track_mode = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
+ if (ret) err(1, "ioctl TRACK_ALL");
+ } else if (event.cpuid.type == CPC_CPUID_STOP_TRACK) {
+ track_mode = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_UNTRACK_ALL, &track_mode);
+ if (ret) err(1, "ioctl UNTRACK_ALL");
+ }
+ return 0;
+ } else if (event.type != CPC_EVENT_TRACK) {
+ return 0;
+ }
+
+ printf("EVENT\n");
+
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
+
+ if (!baseline) {
+ printf("Event: inst:%llu data:%llu retired:%llu\n",
+ event.track.inst_fault_gfn,
+ event.track.data_fault_gfn,
+ event.track.retinst);
+ print_counts(counts);
+ printf("\n");
+ }
+
+ for (i = 0; i < 64; i++) {
+ if (counts[i] > 8) {
+ errx(1, "Invalid count for set %i (%llu)",
+ i, counts[i]);
+ }
+ }
+
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.track.id);
+ if (ret == -1) err(1, "ioctl ACK_EVENT");
+
+ faultcnt++;
+ } else if (errno != EAGAIN) {
+ perror("ioctl POLL_EVENT");
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+main(int argc, const char **argv)
+{
+ pid_t pid;
+ uint32_t arg;
+ struct cpc_event event;
+ cpc_msrmt_t baseline[64];
+ int ret, i;
+
+ if (argc <= 1 || !atoi(argv[1])) {
+ printf("Specify qemu process to pin\n");
+ return 0;
+ }
+
+ kvm_dev = open("/dev/kvm", O_RDWR);
+ if (!kvm_dev) err(1, "open /dev/kvm");
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ pid = atoi(argv[1]);
+ pin_process(pid, TARGET_CORE, true);
+
+ pin_process(0, TARGET_CORE, true);
+
+ /* Setup needed performance counters */
+ ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL);
+ if (ret < 0) err(1, "ioctl SETUP_PMC");
+
+ /* Reset previous tracking */
+ ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
+ if (ret == -1) err(1, "ioctl RESET_TRACKING");
+
+ /* Do data access stepping */
+ arg = CPC_TRACK_DATA_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret == -1) err(1, "ioctl TRACK_MODE");
+
+ pin_process(0, SECONDARY_CORE, true);
+ printf("PINNED\n");
+
+ arg = false;
+ ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl SUB_BASELINE");
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
+ arg = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &arg);
+ if (ret) err(1, "ioctl TRACK_ALL");
+
+ faultcnt = 0;
+ while (faultcnt < 100) {
+ if (monitor(true)) break;
+ }
+
+ arg = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_UNTRACK_ALL, &arg);
+ if (ret) err(1, "ioctl TRACK_ALL");
+
+ do {
+ ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
+ if (ret == -1 && errno != EAGAIN)
+ err(1, "ioctl POLL_EVENT");
+ } while (ret == -1 && errno == EAGAIN);
+
+ arg = false;
+ ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
+ ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline);
+ if (ret == -1) err(1, "ioctl READ_BASELINE");
+
+ printf("\n>>> BASELINE:\n");
+ print_counts(baseline);
+ printf("\n");
+ print_counts_raw(baseline);
+ printf("\n");
+
+ /* Check baseline for saturated sets */
+ for (i = 0; i < 64; i++) {
+ if (baseline[i] >= 8)
+ errx(1, "!!! Baseline set %i full\n", i);
+ }
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl SUB_BASELINE");
+
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.track.id);
+ if (ret == -1) err(1, "ioctl ACK_EVENT");
+
+ faultcnt = 0;
+ while (faultcnt < 10) {
+ if (monitor(false)) break;
+ }
+}
+
diff --git a/test/aes-detect_guest b/test/aes-detect_guest
Binary files differ.
diff --git a/test/aes-detect_guest.c b/test/aes-detect_guest.c
@@ -30,8 +30,24 @@ main(int argc, const char **argv)
{
struct kcapi_handle *kcapi;
uint8_t block[128];
+ uint8_t *buf;
size_t n;
+ buf = NULL;
+ if (posix_memalign((void *)&buf, L1_LINESIZE * L1_SETS, L1_LINESIZE * L1_SETS))
+ err(1, "memalign");
+ memset(buf, 0, L1_LINESIZE * L1_SETS);
+
+ while (1) {
+ CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+
+ buf[L1_LINESIZE * 5] += 1;
+
+ CPC_CPUID_SIGNAL(CPC_CPUID_START_TRACK, 0);
+ }
+
+ return 0;
+
kcapi = NULL;
if (kcapi_cipher_init(&kcapi, "ecb(aes)", 0))
err(1, "kcapi init");
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -481,7 +481,7 @@ int
monitor(struct kvm *kvm, bool baseline)
{
static uint64_t rip_prev = 1;
- struct cpc_track_event event;
+ struct cpc_event event;
cpc_msrmt_t counts[64];
uint64_t rip;
int ret, i;
@@ -489,17 +489,24 @@ monitor(struct kvm *kvm, bool baseline)
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (!ret) {
+ if (event.type != CPC_EVENT_TRACK)
+ return 0;
+
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
if (ret == -1) err(1, "ioctl READ_COUNTS");
rip = sev_dbg_rip(kvm->vmfd);
if (!baseline && rip != rip_prev) {
printf("Event: inst:%llu data:%llu retired:%llu rip:%lu\n",
- event.inst_fault_gfn, event.data_fault_gfn,
- event.retinst, rip);
+ event.track.inst_fault_gfn,
+ event.track.data_fault_gfn,
+ event.track.retinst, rip);
print_counts(counts);
printf("\n");
rip_prev = rip;
+ faultcnt++;
+ } else if (baseline) {
+ faultcnt++;
}
for (i = 0; i < 64; i++) {
@@ -511,8 +518,6 @@ monitor(struct kvm *kvm, bool baseline)
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
-
- faultcnt++;
} else if (errno != EAGAIN) {
perror("ioctl POLL_EVENT");
return 1;
@@ -528,7 +533,7 @@ main(int argc, const char **argv)
uint64_t track_mode;
pid_t ppid, pid;
uint32_t arg;
- struct cpc_track_event event;
+ struct cpc_event event;
cpc_msrmt_t baseline[64];
int ret, i;
@@ -571,6 +576,14 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
if (ret == -1) err(1, "ioctl TRACK_ALL");
+ arg = false;
+ ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl SUB_BASELINE");
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
ppid = getpid();
if ((pid = fork())) {
if (pid < 0) err(1, "fork");
@@ -584,12 +597,8 @@ main(int argc, const char **argv)
pin_process(0, SECONDARY_CORE, true);
printf("PINNED\n");
- arg = true;
- ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
- if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
-
faultcnt = 0;
- while (faultcnt < 50) {
+ while (faultcnt < 300) {
if (monitor(&kvm_with_access, true)) break;
}
@@ -626,7 +635,7 @@ main(int argc, const char **argv)
if (ret == -1) err(1, "ioctl ACK_EVENT");
faultcnt = 0;
- while (faultcnt < 50) {
+ while (faultcnt < 20) {
if (monitor(&kvm_with_access, false)) break;
}