commit 3f0a50bf0999a1aadfeae0568eb0852da01433f5
parent bb2c0277010fa5fc3010dca79685d527bd87d9fb
Author: Louis Burda <quent.burda@gmail.com>
Date: Fri, 27 Jan 2023 16:42:44 +0100
Consistent use of cpc shorthand instead of cachepc
Diffstat:
10 files changed, 428 insertions(+), 447 deletions(-)
diff --git a/cachepc/asm.S b/cachepc/asm.S
@@ -2,12 +2,12 @@
#include <linux/linkage.h>
-.global cachepc_read_pmc
-.global cachepc_prime_probe_test_asm
-.global cachepc_stream_hwpf_test_asm
-.global cachepc_single_eviction_test_asm
+.global cpc_read_pmc
+.global cpc_prime_probe_test_asm
+.global cpc_stream_hwpf_test_asm
+.global cpc_single_eviction_test_asm
-SYM_FUNC_START(cachepc_read_pmc)
+SYM_FUNC_START(cpc_read_pmc)
push %rbx
push %rcx
push %rdx
@@ -22,9 +22,9 @@ SYM_FUNC_START(cachepc_read_pmc)
pop %rbx
ret
-SYM_FUNC_END(cachepc_read_pmc)
+SYM_FUNC_END(cpc_read_pmc)
-SYM_FUNC_START(cachepc_prime_probe_test_asm)
+SYM_FUNC_START(cpc_prime_probe_test_asm)
push %rbx
push %rcx
push %rdx
@@ -36,7 +36,7 @@ SYM_FUNC_START(cachepc_prime_probe_test_asm)
wbinvd
- mov cachepc_ds, %r9
+ mov cpc_ds, %r9
prime prime_probe_test %r9 %r10 %r8
prime prime_probe_test1 %r9 %r10 %r8
prime prime_probe_test2 %r9 %r10 %r8
@@ -52,9 +52,9 @@ SYM_FUNC_START(cachepc_prime_probe_test_asm)
pop %rbx
ret
-SYM_FUNC_END(cachepc_prime_probe_test_asm)
+SYM_FUNC_END(cpc_prime_probe_test_asm)
-SYM_FUNC_START(cachepc_stream_hwpf_test_asm)
+SYM_FUNC_START(cpc_stream_hwpf_test_asm)
push %rbx
push %rcx
push %rdx
@@ -88,9 +88,9 @@ SYM_FUNC_START(cachepc_stream_hwpf_test_asm)
pop %rbx
ret
-SYM_FUNC_END(cachepc_stream_hwpf_test_asm)
+SYM_FUNC_END(cpc_stream_hwpf_test_asm)
-SYM_FUNC_START(cachepc_single_eviction_test_asm)
+SYM_FUNC_START(cpc_single_eviction_test_asm)
push %rbx
push %rcx
push %rdx
@@ -102,7 +102,7 @@ SYM_FUNC_START(cachepc_single_eviction_test_asm)
wbinvd
- mov cachepc_ds, %r9
+ mov cpc_ds, %r9
prime single_eviction_test %r9 %r10 %r8
prime single_eviction_test2 %r9 %r10 %r8
prime single_eviction_test3 %r9 %r10 %r8
@@ -119,5 +119,5 @@ SYM_FUNC_START(cachepc_single_eviction_test_asm)
pop %rbx
ret
-SYM_FUNC_END(cachepc_single_eviction_test_asm)
+SYM_FUNC_END(cpc_single_eviction_test_asm)
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -11,10 +11,10 @@
#define MIN(a, b) ((a) < (b) ? (a) : (b))
-EXPORT_SYMBOL(cachepc_read_pmc);
+EXPORT_SYMBOL(cpc_read_pmc);
bool
-cachepc_verify_topology(void)
+cpc_verify_topology(void)
{
uint32_t assoc, linesize;
uint32_t size, sets;
@@ -47,10 +47,10 @@ cachepc_verify_topology(void)
return false;
}
-EXPORT_SYMBOL(cachepc_verify_topology);
+EXPORT_SYMBOL(cpc_verify_topology);
void
-cachepc_write_msr(uint64_t addr, uint64_t clear_bits, uint64_t set_bits)
+cpc_write_msr(uint64_t addr, uint64_t clear_bits, uint64_t set_bits)
{
uint64_t val, newval;
@@ -65,10 +65,10 @@ cachepc_write_msr(uint64_t addr, uint64_t clear_bits, uint64_t set_bits)
addr, val, newval);
}
}
-EXPORT_SYMBOL(cachepc_write_msr);
+EXPORT_SYMBOL(cpc_write_msr);
void
-cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
+cpc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
uint8_t host_guest, uint8_t kernel_user)
{
uint64_t event;
@@ -85,30 +85,30 @@ cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
CPC_DBG("Initializing %i. PMC %02X:%02X (%016llx)\n",
index, event_no, event_mask, event);
- cachepc_write_msr(0xc0010200 + index * 2, ~0ULL, event);
+ cpc_write_msr(0xc0010200 + index * 2, ~0ULL, event);
}
-EXPORT_SYMBOL(cachepc_init_pmc);
+EXPORT_SYMBOL(cpc_init_pmc);
void
-cachepc_reset_pmc(uint8_t index)
+cpc_reset_pmc(uint8_t index)
{
WARN_ON(index >= 6);
if (index >= 6) return;
- cachepc_write_msr(0xc0010201 + index * 2, ~0ULL, 0);
+ cpc_write_msr(0xc0010201 + index * 2, ~0ULL, 0);
}
-EXPORT_SYMBOL(cachepc_reset_pmc);
+EXPORT_SYMBOL(cpc_reset_pmc);
-struct cacheline *
-cachepc_ds_alloc(struct cacheline **cl_arr_out)
+struct cpc_cl *
+cpc_ds_alloc(struct cpc_cl **cl_arr_out)
{
- struct cacheline **cl_ptr_arr;
- struct cacheline *cl_arr, *ds;
+ struct cpc_cl **cl_ptr_arr;
+ struct cpc_cl *cl_arr, *ds;
size_t i, idx;
- cl_arr = cachepc_aligned_alloc(PAGE_SIZE, L1_SIZE);
+ cl_arr = cpc_aligned_alloc(PAGE_SIZE, L1_SIZE);
- cl_ptr_arr = kzalloc(L1_LINES * sizeof(struct cacheline *), GFP_KERNEL);
+ cl_ptr_arr = kzalloc(L1_LINES * sizeof(struct cpc_cl *), GFP_KERNEL);
BUG_ON(cl_ptr_arr == NULL);
/* order cachelines by set then line number */
@@ -135,10 +135,10 @@ cachepc_ds_alloc(struct cacheline **cl_arr_out)
return ds;
}
-EXPORT_SYMBOL(cachepc_ds_alloc);
+EXPORT_SYMBOL(cpc_ds_alloc);
void *
-cachepc_aligned_alloc(size_t alignment, size_t size)
+cpc_aligned_alloc(size_t alignment, size_t size)
{
void *p;
@@ -149,12 +149,12 @@ cachepc_aligned_alloc(size_t alignment, size_t size)
return p;
}
-EXPORT_SYMBOL(cachepc_aligned_alloc);
+EXPORT_SYMBOL(cpc_aligned_alloc);
void
-cachepc_save_msrmts(struct cacheline *head)
+cpc_save_msrmts(struct cpc_cl *head)
{
- struct cacheline *cl;
+ struct cpc_cl *cl;
size_t i;
cl = head;
@@ -165,7 +165,7 @@ cachepc_save_msrmts(struct cacheline *head)
CPC_ERR("Read count %llu for set %u line %u",
cl->count, cl->cache_set, cl->cache_line);
}
- cachepc_msrmts[cl->cache_set] = cl->count;
+ cpc_msrmts[cl->cache_set] = cl->count;
} else {
BUG_ON(cl->count != 0);
}
@@ -174,31 +174,31 @@ cachepc_save_msrmts(struct cacheline *head)
cl = cl->prev;
} while (cl != head);
- if (cachepc_baseline_measure) {
+ if (cpc_baseline_measure) {
for (i = 0; i < L1_SETS; i++) {
- cachepc_baseline[i] = MIN(cachepc_baseline[i],
- cachepc_msrmts[i]);
+ cpc_baseline[i] = MIN(cpc_baseline[i],
+ cpc_msrmts[i]);
}
}
- if (cachepc_baseline_active) {
+ if (cpc_baseline_active) {
for (i = 0; i < L1_SETS; i++) {
- if (cachepc_msrmts[i] < cachepc_baseline[i]) {
+ if (cpc_msrmts[i] < cpc_baseline[i]) {
CPC_ERR("Count (%u) under baseline (%u) "
"for set %u line %u",
- cachepc_msrmts[i], cachepc_baseline[i],
+ cpc_msrmts[i], cpc_baseline[i],
cl->cache_set, cl->cache_line);
}
- cachepc_msrmts[i] -= cachepc_baseline[i];
+ cpc_msrmts[i] -= cpc_baseline[i];
}
}
}
-EXPORT_SYMBOL(cachepc_save_msrmts);
+EXPORT_SYMBOL(cpc_save_msrmts);
void
-cachepc_print_msrmts(struct cacheline *head)
+cpc_print_msrmts(struct cpc_cl *head)
{
- struct cacheline *cl;
+ struct cpc_cl *cl;
cl = head;
do {
@@ -210,13 +210,13 @@ cachepc_print_msrmts(struct cacheline *head)
cl = cl->prev;
} while (cl != head);
}
-EXPORT_SYMBOL(cachepc_print_msrmts);
+EXPORT_SYMBOL(cpc_print_msrmts);
void
-cachepc_apic_oneshot_run(uint32_t interval)
+cpc_apic_oneshot_run(uint32_t interval)
{
native_apic_mem_write(APIC_LVTT, LOCAL_TIMER_VECTOR | APIC_LVT_TIMER_ONESHOT);
native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_1);
native_apic_mem_write(APIC_TMICT, interval);
}
-EXPORT_SYMBOL(cachepc_apic_oneshot_run);
+EXPORT_SYMBOL(cpc_apic_oneshot_run);
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -12,14 +12,14 @@
#define PMC_GUEST (1 << 0)
#define CPC_DBG(...) do { \
- if (cachepc_debug) pr_info("CachePC: " __VA_ARGS__); } while (0)
+ if (cpc_debug) pr_info("CachePC: " __VA_ARGS__); } while (0)
#define CPC_INFO(...) do { pr_info("CachePC: " __VA_ARGS__); } while (0)
#define CPC_WARN(...) do { pr_warn("CachePC: " __VA_ARGS__); } while (0)
#define CPC_ERR(...) do { pr_err("CachePC: " __VA_ARGS__); } while (0)
-struct cacheline {
- struct cacheline *next;
- struct cacheline *prev;
+struct cpc_cl {
+ struct cpc_cl *next;
+ struct cpc_cl *prev;
uint64_t count;
uint32_t cache_set;
@@ -54,69 +54,69 @@ struct cpc_track_steps_signalled {
uint64_t target_gfn;
};
-static_assert(sizeof(struct cacheline) == L1_LINESIZE, "Bad cacheline struct");
-static_assert(CPC_CL_NEXT_OFFSET == offsetof(struct cacheline, next));
-static_assert(CPC_CL_PREV_OFFSET == offsetof(struct cacheline, prev));
-static_assert(CPC_CL_COUNT_OFFSET == offsetof(struct cacheline, count));
+static_assert(sizeof(struct cpc_cl) == L1_LINESIZE, "Bad cacheline struct");
+static_assert(CPC_CL_NEXT_OFFSET == offsetof(struct cpc_cl, next));
+static_assert(CPC_CL_PREV_OFFSET == offsetof(struct cpc_cl, prev));
+static_assert(CPC_CL_COUNT_OFFSET == offsetof(struct cpc_cl, count));
-bool cachepc_verify_topology(void);
+bool cpc_verify_topology(void);
-void cachepc_write_msr(uint64_t addr, uint64_t clear_bits, uint64_t set_bits);
+void cpc_write_msr(uint64_t addr, uint64_t clear_bits, uint64_t set_bits);
-void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
+void cpc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
uint8_t host_guest, uint8_t kernel_user);
-void cachepc_reset_pmc(uint8_t index);
+void cpc_reset_pmc(uint8_t index);
-struct cacheline *cachepc_ds_alloc(struct cacheline **ds_ul);
+struct cpc_cl *cpc_ds_alloc(struct cpc_cl **ds_ul);
-void *cachepc_aligned_alloc(size_t alignment, size_t size);
+void *cpc_aligned_alloc(size_t alignment, size_t size);
-void cachepc_save_msrmts(struct cacheline *head);
-void cachepc_print_msrmts(struct cacheline *head);
+void cpc_save_msrmts(struct cpc_cl *head);
+void cpc_print_msrmts(struct cpc_cl *head);
-struct cacheline *cachepc_prime(struct cacheline *head);
-void cachepc_probe(struct cacheline *head);
+struct cpc_cl *cpc_prime(struct cpc_cl *head);
+void cpc_probe(struct cpc_cl *head);
-uint64_t cachepc_read_pmc(uint64_t event);
+uint64_t cpc_read_pmc(uint64_t event);
-void cachepc_apic_oneshot_run(uint32_t interval);
+void cpc_apic_oneshot_run(uint32_t interval);
-extern bool cachepc_debug;
+extern bool cpc_debug;
-extern uint8_t *cachepc_msrmts;
-extern uint8_t *cachepc_baseline;
-extern bool cachepc_baseline_measure;
-extern bool cachepc_baseline_active;
+extern uint8_t *cpc_msrmts;
+extern uint8_t *cpc_baseline;
+extern bool cpc_baseline_measure;
+extern bool cpc_baseline_active;
-extern bool cachepc_pause_vm;
+extern bool cpc_pause_vm;
-extern bool cachepc_prime_probe;
+extern bool cpc_prime_probe;
-extern bool cachepc_singlestep;
-extern bool cachepc_singlestep_reset;
-extern bool cachepc_long_step;
+extern bool cpc_singlestep;
+extern bool cpc_singlestep_reset;
+extern bool cpc_long_step;
-extern bool cachepc_apic_oneshot;
-extern uint32_t cachepc_apic_timer;
+extern bool cpc_apic_oneshot;
+extern uint32_t cpc_apic_timer;
-extern uint32_t cachepc_track_mode;
-extern uint64_t cachepc_track_start_gfn;
-extern uint64_t cachepc_track_end_gfn;
+extern uint32_t cpc_track_mode;
+extern uint64_t cpc_track_start_gfn;
+extern uint64_t cpc_track_end_gfn;
-extern uint64_t cachepc_retinst;
-extern uint64_t cachepc_retinst_prev;
+extern uint64_t cpc_retinst;
+extern uint64_t cpc_retinst_prev;
-extern uint64_t cachepc_rip;
-extern uint64_t cachepc_rip_prev;
-extern bool cachepc_rip_prev_set;
+extern uint64_t cpc_rip;
+extern uint64_t cpc_rip_prev;
+extern bool cpc_rip_prev_set;
extern struct cpc_track_pages cpc_track_pages;
extern struct cpc_track_steps cpc_track_steps;
extern struct cpc_track_steps_signalled cpc_track_steps_signalled;
-extern struct list_head cachepc_faults;
+extern struct list_head cpc_faults;
-extern struct cacheline *cachepc_ds;
+extern struct cpc_cl *cpc_ds;
-extern uint64_t cachepc_regs_tmp[16];
-extern uint64_t cachepc_regs_vm[16];
+extern uint64_t cpc_regs_tmp[16];
+extern uint64_t cpc_regs_vm[16];
diff --git a/cachepc/event.c b/cachepc/event.c
@@ -13,61 +13,61 @@
#define ARRLEN(x) (sizeof(x)/sizeof((x)[0]))
-uint64_t cachepc_last_event_sent;
-uint64_t cachepc_last_event_acked;
-rwlock_t cachepc_event_lock;
+uint64_t cpc_last_event_sent;
+uint64_t cpc_last_event_acked;
+rwlock_t cpc_event_lock;
-struct cpc_event cachepc_event;
-bool cachepc_event_avail;
+struct cpc_event cpc_event;
+bool cpc_event_avail;
-bool cachepc_events_init;
+bool cpc_events_init;
void
-cachepc_events_reset(void)
+cpc_events_reset(void)
{
- write_lock(&cachepc_event_lock);
- cachepc_events_init = true;
- cachepc_last_event_sent = 1;
- cachepc_last_event_acked = 1;
- cachepc_event_avail = false;
- write_unlock(&cachepc_event_lock);
+ write_lock(&cpc_event_lock);
+ cpc_events_init = true;
+ cpc_last_event_sent = 1;
+ cpc_last_event_acked = 1;
+ cpc_event_avail = false;
+ write_unlock(&cpc_event_lock);
}
int
-cachepc_send_event(struct cpc_event event)
+cpc_send_event(struct cpc_event event)
{
ktime_t deadline;
- read_lock(&cachepc_event_lock);
- if (!cachepc_events_init) {
+ read_lock(&cpc_event_lock);
+ if (!cpc_events_init) {
CPC_WARN("events ctx not initialized!\n");
- read_unlock(&cachepc_event_lock);
+ read_unlock(&cpc_event_lock);
return 1;
}
- read_unlock(&cachepc_event_lock);
+ read_unlock(&cpc_event_lock);
- write_lock(&cachepc_event_lock);
- if (cachepc_last_event_sent != cachepc_last_event_acked) {
+ write_lock(&cpc_event_lock);
+ if (cpc_last_event_sent != cpc_last_event_acked) {
CPC_WARN("event IDs out of sync\n");
- write_unlock(&cachepc_event_lock);
+ write_unlock(&cpc_event_lock);
return 1;
} else {
- cachepc_last_event_sent++;
+ cpc_last_event_sent++;
}
- event.id = cachepc_last_event_sent;
- cachepc_event_avail = true;
- cachepc_event = event;
+ event.id = cpc_last_event_sent;
+ cpc_event_avail = true;
+ cpc_event = event;
//CPC_DBG("Sent Event: id %llu\n", event.id);
- write_unlock(&cachepc_event_lock);
+ write_unlock(&cpc_event_lock);
/* wait for ack with timeout */
deadline = ktime_get_ns() + 10000000000ULL; /* 10s in ns */
- while (!cachepc_event_is_done()) {
+ while (!cpc_event_is_done()) {
if (ktime_get_ns() > deadline) {
CPC_WARN("Timeout waiting for ack of event %llu\n",
- cachepc_event.id);
+ cpc_event.id);
return 1;
}
}
@@ -76,7 +76,7 @@ cachepc_send_event(struct cpc_event event)
}
int
-cachepc_send_guest_event(uint64_t type, uint64_t val)
+cpc_send_guest_event(uint64_t type, uint64_t val)
{
struct cpc_event event;
@@ -84,23 +84,23 @@ cachepc_send_guest_event(uint64_t type, uint64_t val)
event.guest.type = type;
event.guest.val = val;
- return cachepc_send_event(event);
+ return cpc_send_event(event);
}
-EXPORT_SYMBOL(cachepc_send_guest_event);
+EXPORT_SYMBOL(cpc_send_guest_event);
int
-cachepc_send_pause_event(void)
+cpc_send_pause_event(void)
{
struct cpc_event event;
event.type = CPC_EVENT_PAUSE;
- return cachepc_send_event(event);
+ return cpc_send_event(event);
}
-EXPORT_SYMBOL(cachepc_send_pause_event);
+EXPORT_SYMBOL(cpc_send_pause_event);
int
-cachepc_send_track_step_event(struct list_head *list)
+cpc_send_track_step_event(struct list_head *list)
{
struct cpc_event event = { 0 };
struct cpc_fault *fault;
@@ -118,14 +118,14 @@ cachepc_send_track_step_event(struct list_head *list)
count += 1;
}
event.step.fault_count = count;
- event.step.retinst = cachepc_retinst;
+ event.step.retinst = cpc_retinst;
- return cachepc_send_event(event);
+ return cpc_send_event(event);
}
-EXPORT_SYMBOL(cachepc_send_track_step_event);
+EXPORT_SYMBOL(cpc_send_track_step_event);
int
-cachepc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst)
+cpc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst)
{
struct cpc_event event = { 0 };
@@ -134,12 +134,12 @@ cachepc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst)
event.page.inst_gfn = gfn;
event.page.retinst = retinst;
- return cachepc_send_event(event);
+ return cpc_send_event(event);
}
-EXPORT_SYMBOL(cachepc_send_track_page_event);
+EXPORT_SYMBOL(cpc_send_track_page_event);
int
-cachepc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retinst)
+cpc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retinst)
{
struct cpc_event event = { 0 };
@@ -150,58 +150,58 @@ cachepc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retins
event.step.inst_gfn = gfn;
event.step.retinst = retinst;
- return cachepc_send_event(event);
+ return cpc_send_event(event);
}
-EXPORT_SYMBOL(cachepc_send_track_step_event_single);
+EXPORT_SYMBOL(cpc_send_track_step_event_single);
bool
-cachepc_event_is_done(void)
+cpc_event_is_done(void)
{
bool done;
- read_lock(&cachepc_event_lock);
+ read_lock(&cpc_event_lock);
//CPC_DBG("Event Send: Event not done %llu %llu\n",
- // cachepc_last_event_acked, id);
- done = cachepc_last_event_acked == cachepc_last_event_sent;
- read_unlock(&cachepc_event_lock);
+ // cpc_last_event_acked, id);
+ done = cpc_last_event_acked == cpc_last_event_sent;
+ read_unlock(&cpc_event_lock);
return done;
}
int
-cachepc_poll_event_ioctl(void __user *arg_user)
+cpc_poll_event_ioctl(void __user *arg_user)
{
int err;
- read_lock(&cachepc_event_lock);
- if (!cachepc_event_avail) {
+ read_lock(&cpc_event_lock);
+ if (!cpc_event_avail) {
//CPC_DBG("Event Poll: No event avail %llu %llu\n",
- // cachepc_last_event_sent, cachepc_last_event_acked);
- read_unlock(&cachepc_event_lock);
+ // cpc_last_event_sent, cpc_last_event_acked);
+ read_unlock(&cpc_event_lock);
return -EAGAIN;
}
- read_unlock(&cachepc_event_lock);
+ read_unlock(&cpc_event_lock);
err = 0;
- write_lock(&cachepc_event_lock);
- if (cachepc_event_avail) {
+ write_lock(&cpc_event_lock);
+ if (cpc_event_avail) {
//CPC_DBG("Event Poll: Event is avail %px %llu %llu\n", arg_user,
- // cachepc_last_event_sent, cachepc_last_event_acked);
- if (copy_to_user(arg_user, &cachepc_event, sizeof(cachepc_event)))
+ // cpc_last_event_sent, cpc_last_event_acked);
+ if (copy_to_user(arg_user, &cpc_event, sizeof(cpc_event)))
err = -EFAULT;
} else {
//CPC_DBG("Event Poll: Event was avail %llu %llu\n",
- // cachepc_last_event_sent, cachepc_last_event_acked);
+ // cpc_last_event_sent, cpc_last_event_acked);
err = -EAGAIN;
}
- if (!err) cachepc_event_avail = false;
- write_unlock(&cachepc_event_lock);
+ if (!err) cpc_event_avail = false;
+ write_unlock(&cpc_event_lock);
return err;
}
int
-cachepc_ack_event_ioctl(void __user *arg_user)
+cpc_ack_event_ioctl(void __user *arg_user)
{
uint64_t eventid;
int err;
@@ -212,17 +212,17 @@ cachepc_ack_event_ioctl(void __user *arg_user)
return -EFAULT;
err = 0;
- write_lock(&cachepc_event_lock);
- if (!eventid || eventid == cachepc_last_event_sent) {
- if (cachepc_event.type == CPC_EVENT_PAUSE)
- cachepc_pause_vm = false;
- cachepc_last_event_acked = cachepc_last_event_sent;
+ write_lock(&cpc_event_lock);
+ if (!eventid || eventid == cpc_last_event_sent) {
+ if (cpc_event.type == CPC_EVENT_PAUSE)
+ cpc_pause_vm = false;
+ cpc_last_event_acked = cpc_last_event_sent;
} else {
err = -EFAULT;
CPC_WARN("Acked event (%llu) does not match sent (%llu)\n",
- eventid, cachepc_last_event_sent);
+ eventid, cpc_last_event_sent);
}
- write_unlock(&cachepc_event_lock);
+ write_unlock(&cpc_event_lock);
return err;
}
diff --git a/cachepc/event.h b/cachepc/event.h
@@ -7,15 +7,15 @@
#include <linux/kvm_host.h>
#include <linux/types.h>
-void cachepc_events_reset(void);
+void cpc_events_reset(void);
-int cachepc_send_guest_event(uint64_t type, uint64_t val);
-int cachepc_send_pause_event(void);
-int cachepc_send_track_step_event(struct list_head *list);
-int cachepc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retinst);
-int cachepc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst);
+int cpc_send_guest_event(uint64_t type, uint64_t val);
+int cpc_send_pause_event(void);
+int cpc_send_track_step_event(struct list_head *list);
+int cpc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retinst);
+int cpc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst);
-bool cachepc_event_is_done(void);
+bool cpc_event_is_done(void);
-int cachepc_poll_event_ioctl(void __user *arg_user);
-int cachepc_ack_event_ioctl(void __user *arg_user);
+int cpc_poll_event_ioctl(void __user *arg_user);
+int cpc_ack_event_ioctl(void __user *arg_user);
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -18,58 +18,58 @@
#define TEST_REPEAT_MAX 1000
-bool cachepc_debug = false;
-EXPORT_SYMBOL(cachepc_debug);
-
-uint8_t *cachepc_msrmts = NULL;
-EXPORT_SYMBOL(cachepc_msrmts);
-
-uint8_t *cachepc_baseline = NULL;
-bool cachepc_baseline_measure = false;
-bool cachepc_baseline_active = false;
-EXPORT_SYMBOL(cachepc_baseline);
-EXPORT_SYMBOL(cachepc_baseline_measure);
-EXPORT_SYMBOL(cachepc_baseline_active);
-
-bool cachepc_pause_vm = false;
-EXPORT_SYMBOL(cachepc_pause_vm);
-
-bool cachepc_prime_probe = false;
-EXPORT_SYMBOL(cachepc_prime_probe);
-
-uint64_t cachepc_retinst = 0;
-uint64_t cachepc_retinst_prev = 0;
-EXPORT_SYMBOL(cachepc_retinst);
-EXPORT_SYMBOL(cachepc_retinst_prev);
-
-uint64_t cachepc_rip = 0;
-uint64_t cachepc_rip_prev = 0;
-bool cachepc_rip_prev_set = false;
-EXPORT_SYMBOL(cachepc_rip);
-EXPORT_SYMBOL(cachepc_rip_prev);
-EXPORT_SYMBOL(cachepc_rip_prev_set);
-
-bool cachepc_singlestep = false;
-bool cachepc_singlestep_reset = false;
-bool cachepc_long_step = false;
-EXPORT_SYMBOL(cachepc_singlestep);
-EXPORT_SYMBOL(cachepc_singlestep_reset);
-EXPORT_SYMBOL(cachepc_long_step);
-
-bool cachepc_apic_oneshot = false;
-uint32_t cachepc_apic_timer = 0;
-EXPORT_SYMBOL(cachepc_apic_oneshot);
-EXPORT_SYMBOL(cachepc_apic_timer);
-
-uint32_t cachepc_track_mode = false;
-uint64_t cachepc_track_start_gfn = 0;
-uint64_t cachepc_track_end_gfn = 0;
-EXPORT_SYMBOL(cachepc_track_mode);
-EXPORT_SYMBOL(cachepc_track_start_gfn);
-EXPORT_SYMBOL(cachepc_track_end_gfn);
-
-LIST_HEAD(cachepc_faults);
-EXPORT_SYMBOL(cachepc_faults);
+bool cpc_debug = false;
+EXPORT_SYMBOL(cpc_debug);
+
+uint8_t *cpc_msrmts = NULL;
+EXPORT_SYMBOL(cpc_msrmts);
+
+uint8_t *cpc_baseline = NULL;
+bool cpc_baseline_measure = false;
+bool cpc_baseline_active = false;
+EXPORT_SYMBOL(cpc_baseline);
+EXPORT_SYMBOL(cpc_baseline_measure);
+EXPORT_SYMBOL(cpc_baseline_active);
+
+bool cpc_pause_vm = false;
+EXPORT_SYMBOL(cpc_pause_vm);
+
+bool cpc_prime_probe = false;
+EXPORT_SYMBOL(cpc_prime_probe);
+
+uint64_t cpc_retinst = 0;
+uint64_t cpc_retinst_prev = 0;
+EXPORT_SYMBOL(cpc_retinst);
+EXPORT_SYMBOL(cpc_retinst_prev);
+
+uint64_t cpc_rip = 0;
+uint64_t cpc_rip_prev = 0;
+bool cpc_rip_prev_set = false;
+EXPORT_SYMBOL(cpc_rip);
+EXPORT_SYMBOL(cpc_rip_prev);
+EXPORT_SYMBOL(cpc_rip_prev_set);
+
+bool cpc_singlestep = false;
+bool cpc_singlestep_reset = false;
+bool cpc_long_step = false;
+EXPORT_SYMBOL(cpc_singlestep);
+EXPORT_SYMBOL(cpc_singlestep_reset);
+EXPORT_SYMBOL(cpc_long_step);
+
+bool cpc_apic_oneshot = false;
+uint32_t cpc_apic_timer = 0;
+EXPORT_SYMBOL(cpc_apic_oneshot);
+EXPORT_SYMBOL(cpc_apic_timer);
+
+uint32_t cpc_track_mode = false;
+uint64_t cpc_track_start_gfn = 0;
+uint64_t cpc_track_end_gfn = 0;
+EXPORT_SYMBOL(cpc_track_mode);
+EXPORT_SYMBOL(cpc_track_start_gfn);
+EXPORT_SYMBOL(cpc_track_end_gfn);
+
+LIST_HEAD(cpc_faults);
+EXPORT_SYMBOL(cpc_faults);
struct cpc_track_pages cpc_track_pages;
struct cpc_track_steps cpc_track_steps;
@@ -78,66 +78,66 @@ EXPORT_SYMBOL(cpc_track_pages);
EXPORT_SYMBOL(cpc_track_steps);
EXPORT_SYMBOL(cpc_track_steps_signalled);
-struct cacheline *cachepc_ds_ul = NULL;
-struct cacheline *cachepc_ds = NULL;
-EXPORT_SYMBOL(cachepc_ds);
+struct cpc_cl *cpc_ds_ul = NULL;
+struct cpc_cl *cpc_ds = NULL;
+EXPORT_SYMBOL(cpc_ds);
-uint64_t cachepc_regs_tmp[16];
-uint64_t cachepc_regs_vm[16];
-EXPORT_SYMBOL(cachepc_regs_tmp);
-EXPORT_SYMBOL(cachepc_regs_vm);
+uint64_t cpc_regs_tmp[16];
+uint64_t cpc_regs_vm[16];
+EXPORT_SYMBOL(cpc_regs_tmp);
+EXPORT_SYMBOL(cpc_regs_vm);
-void cachepc_prime_probe_test_asm(void);
-static noinline void cachepc_prime_probe_test(void);
+void cpc_prime_probe_test_asm(void);
+static noinline void cpc_prime_probe_test(void);
-uint64_t cachepc_stream_hwpf_test_asm(void *lines);
-static noinline void cachepc_stream_hwpf_test(void);
+uint64_t cpc_stream_hwpf_test_asm(void *lines);
+static noinline void cpc_stream_hwpf_test(void);
-void cachepc_single_eviction_test_asm(void *ptr);
-static noinline void cachepc_single_eviction_test(void *p);
+void cpc_single_eviction_test_asm(void *ptr);
+static noinline void cpc_single_eviction_test(void *p);
-static void cachepc_kvm_pmc_setup(void *p);
-static void cachepc_kvm_system_setup(void);
+static void cpc_kvm_pmc_setup(void *p);
+static void cpc_kvm_system_setup(void);
-static int cachepc_kvm_reset_ioctl(void __user *arg_user);
-static int cachepc_kvm_debug_ioctl(void __user *arg_user);
+static int cpc_kvm_reset_ioctl(void __user *arg_user);
+static int cpc_kvm_debug_ioctl(void __user *arg_user);
-static int cachepc_kvm_memory_encrypt_op_ioctl(void __user *arg_user);
+static int cpc_kvm_memory_encrypt_op_ioctl(void __user *arg_user);
-static int cachepc_kvm_test_eviction_ioctl(void __user *arg_user);
+static int cpc_kvm_test_eviction_ioctl(void __user *arg_user);
-static int cachepc_kvm_read_counts_ioctl(void __user *arg_user);
+static int cpc_kvm_read_counts_ioctl(void __user *arg_user);
-static int cachepc_kvm_reset_baseline_ioctl(void __user *arg_user);
-static int cachepc_kvm_calc_baseline_ioctl(void __user *arg_user);
-static int cachepc_kvm_read_baseline_ioctl(void __user *arg_user);
-static int cachepc_kvm_apply_baseline_ioctl(void __user *arg_user);
+static int cpc_kvm_reset_baseline_ioctl(void __user *arg_user);
+static int cpc_kvm_calc_baseline_ioctl(void __user *arg_user);
+static int cpc_kvm_read_baseline_ioctl(void __user *arg_user);
+static int cpc_kvm_apply_baseline_ioctl(void __user *arg_user);
-static int cachepc_kvm_reset_tracking_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_mode_ioctl(void __user *arg_user);
-// static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
-// static int cachepc_kvm_track_range_start_ioctl(void __user *arg_user);
-// static int cachepc_kvm_track_range_end_ioctl(void __user *arg_user);
-// static int cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user);
+static int cpc_kvm_reset_tracking_ioctl(void __user *arg_user);
+static int cpc_kvm_track_mode_ioctl(void __user *arg_user);
+// static int cpc_kvm_track_page_ioctl(void __user *arg_user);
+// static int cpc_kvm_track_range_start_ioctl(void __user *arg_user);
+// static int cpc_kvm_track_range_end_ioctl(void __user *arg_user);
+// static int cpc_kvm_track_exec_cur_ioctl(void __user *arg_user);
-static int cachepc_kvm_req_pause_ioctl(void __user *arg_user);
+static int cpc_kvm_req_pause_ioctl(void __user *arg_user);
void
-cachepc_prime_probe_test(void)
+cpc_prime_probe_test(void)
{
int i, n, count;
/* l2 data cache hit & miss */
- cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
+ cpc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
for (n = 0; n < TEST_REPEAT_MAX; n++) {
- memset(cachepc_msrmts, 0, L1_SETS);
- cachepc_prime_probe_test_asm();
- cachepc_save_msrmts(cachepc_ds);
+ memset(cpc_msrmts, 0, L1_SETS);
+ cpc_prime_probe_test_asm();
+ cpc_save_msrmts(cpc_ds);
count = 0;
for (i = 0; i < L1_SETS; i++)
- count += cachepc_msrmts[i];
+ count += cpc_msrmts[i];
if (count != 0) {
CPC_ERR("Prime-probe %i. test failed (%u vs. %u)\n",
@@ -151,21 +151,21 @@ cachepc_prime_probe_test(void)
}
void
-cachepc_stream_hwpf_test(void)
+cpc_stream_hwpf_test(void)
{
const uint32_t max = 10;
- struct cacheline *lines;
+ struct cpc_cl *lines;
uint32_t count;
int n;
/* l2 data cache hit & miss */
- cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
+ cpc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
- lines = cachepc_aligned_alloc(L1_SIZE, L1_SIZE);
+ lines = cpc_aligned_alloc(L1_SIZE, L1_SIZE);
count = 0;
for (n = 0; n < TEST_REPEAT_MAX; n++) {
- count = cachepc_stream_hwpf_test_asm(lines);
+ count = cpc_stream_hwpf_test_asm(lines);
if (count != max) {
CPC_ERR("HWPF %i. test failed (%u vs. %u)\n",
n, count, max);
@@ -180,35 +180,35 @@ cachepc_stream_hwpf_test(void)
}
void
-cachepc_single_eviction_test(void *p)
+cpc_single_eviction_test(void *p)
{
- struct cacheline *victim_ul;
- struct cacheline *victim;
+ struct cpc_cl *victim_ul;
+ struct cpc_cl *victim;
uint32_t target, *arg;
int n, i, count;
arg = p;
/* l2 data cache hit & miss */
- cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
+ cpc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, 0, PMC_KERNEL);
WARN_ON(arg && *arg >= L1_SETS);
if (arg && *arg >= L1_SETS) return;
target = arg ? *arg : 48;
- victim_ul = cachepc_aligned_alloc(PAGE_SIZE, L1_SIZE);
+ victim_ul = cpc_aligned_alloc(PAGE_SIZE, L1_SIZE);
victim = &victim_ul[target];
for (n = 0; n < TEST_REPEAT_MAX; n++) {
- memset(cachepc_msrmts, 0, L1_SETS);
- cachepc_single_eviction_test_asm(victim);
- cachepc_save_msrmts(cachepc_ds);
+ memset(cpc_msrmts, 0, L1_SETS);
+ cpc_single_eviction_test_asm(victim);
+ cpc_save_msrmts(cpc_ds);
count = 0;
for (i = 0; i < L1_SETS; i++)
- count += cachepc_msrmts[i];
+ count += cpc_msrmts[i];
- if (count != 1 || cachepc_msrmts[target] != 1) {
+ if (count != 1 || cpc_msrmts[target] != 1) {
CPC_ERR("Single eviction %i. test failed (%u vs %u)\n",
n, count, 1);
if (arg) *arg = count;
@@ -225,19 +225,19 @@ cachepc_single_eviction_test(void *p)
}
void
-cachepc_kvm_pmc_setup(void *p)
+cpc_kvm_pmc_setup(void *p)
{
/* L1 misses in host kernel */
- cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8,
+ cpc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8,
PMC_HOST, PMC_KERNEL);
/* retired instructions in guest */
- cachepc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00,
+ cpc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00,
PMC_GUEST, PMC_KERNEL | PMC_USER);
}
void
-cachepc_kvm_system_setup(void)
+cpc_kvm_system_setup(void)
{
/* NOTE: since most of these MSRs are poorly documented and some
* guessing work was involved, it is likely that one or more of
@@ -245,59 +245,59 @@ cachepc_kvm_system_setup(void)
/* REF: BKDG Family 15h Model 00h-0Fh Rev 3.14 January 23, 2013 P.38 */
/* disable streaming store */
- cachepc_write_msr(0xc0011020, 0, 1ULL << 28);
+ cpc_write_msr(0xc0011020, 0, 1ULL << 28);
/* disable data cache hw prefetcher */
- cachepc_write_msr(0xc0011022, 0, 1ULL << 13);
+ cpc_write_msr(0xc0011022, 0, 1ULL << 13);
/* REF: https://arxiv.org/pdf/2204.03290.pdf */
/* l1 and l2 prefetchers */
- cachepc_write_msr(0xc0011022, 0, 1ULL << 16);
- cachepc_write_msr(0xc001102b, 1ULL << 0, 0);
+ cpc_write_msr(0xc0011022, 0, 1ULL << 16);
+ cpc_write_msr(0xc001102b, 1ULL << 0, 0);
/* REF: https://community.amd.com/t5/archives-discussions/modifying-msr-to-disable-the-prefetcher/td-p/143443 */
- cachepc_write_msr(0xc001102b, 0, 1ULL << 18);
+ cpc_write_msr(0xc001102b, 0, 1ULL << 18);
/* REF: PPR Family 19h Model 01h Vol 1/2 Rev 0.50 May 27.2021 P.168 */
/* disable L1 and L2 prefetcher */
- cachepc_write_msr(0xC0000108, 0, 0b00101111);
+ cpc_write_msr(0xC0000108, 0, 0b00101111);
/* REF: PPR Family 19h Model 01h Vol 1/2 Rev 0.50 May 27.2021 P.111 */
/* disable speculation */
- cachepc_write_msr(0x00000048, 0, 0b10000111);
+ cpc_write_msr(0x00000048, 0, 0b10000111);
}
int
-cachepc_kvm_reset_ioctl(void __user *arg_user)
+cpc_kvm_reset_ioctl(void __user *arg_user)
{
int ret;
ret = smp_call_function_single(CPC_ISOLCPU,
- cachepc_kvm_pmc_setup, NULL, true);
+ cpc_kvm_pmc_setup, NULL, true);
if (ret) return -EFAULT;
- cachepc_events_reset();
+ cpc_events_reset();
- cachepc_kvm_reset_tracking_ioctl(NULL);
- cachepc_kvm_reset_baseline_ioctl(NULL);
+ cpc_kvm_reset_tracking_ioctl(NULL);
+ cpc_kvm_reset_baseline_ioctl(NULL);
- cachepc_pause_vm = false;
+ cpc_pause_vm = false;
- cachepc_singlestep = false;
- cachepc_singlestep_reset = false;
+ cpc_singlestep = false;
+ cpc_singlestep_reset = false;
- cachepc_apic_oneshot = false;
- cachepc_apic_timer = 0;
+ cpc_apic_oneshot = false;
+ cpc_apic_timer = 0;
- cachepc_prime_probe = false;
+ cpc_prime_probe = false;
- cachepc_retinst = 0;
- cachepc_rip_prev_set = false;
+ cpc_retinst = 0;
+ cpc_rip_prev_set = false;
return 0;
}
int
-cachepc_kvm_debug_ioctl(void __user *arg_user)
+cpc_kvm_debug_ioctl(void __user *arg_user)
{
uint32_t debug;
@@ -306,13 +306,13 @@ cachepc_kvm_debug_ioctl(void __user *arg_user)
if (copy_from_user(&debug, arg_user, sizeof(uint32_t)))
return -EFAULT;
- cachepc_debug = debug;
+ cpc_debug = debug;
return 0;
}
int
-cachepc_kvm_memory_encrypt_op_ioctl(void __user *arg_user)
+cpc_kvm_memory_encrypt_op_ioctl(void __user *arg_user)
{
if (!arg_user || !main_vm) return -EFAULT;
@@ -320,7 +320,7 @@ cachepc_kvm_memory_encrypt_op_ioctl(void __user *arg_user)
}
int
-cachepc_kvm_test_eviction_ioctl(void __user *arg_user)
+cpc_kvm_test_eviction_ioctl(void __user *arg_user)
{
uint32_t u32;
int ret;
@@ -331,7 +331,7 @@ cachepc_kvm_test_eviction_ioctl(void __user *arg_user)
return -EFAULT;
ret = smp_call_function_single(CPC_ISOLCPU,
- cachepc_single_eviction_test, &u32, true);
+ cpc_single_eviction_test, &u32, true);
WARN_ON(ret != 0);
if (copy_to_user(arg_user, &u32, sizeof(u32)))
@@ -341,30 +341,30 @@ cachepc_kvm_test_eviction_ioctl(void __user *arg_user)
}
int
-cachepc_kvm_read_counts_ioctl(void __user *arg_user)
+cpc_kvm_read_counts_ioctl(void __user *arg_user)
{
if (!arg_user) return -EINVAL;
- if (copy_to_user(arg_user, cachepc_msrmts, L1_SETS))
+ if (copy_to_user(arg_user, cpc_msrmts, L1_SETS))
return -EFAULT;
return 0;
}
int
-cachepc_kvm_reset_baseline_ioctl(void __user *arg_user)
+cpc_kvm_reset_baseline_ioctl(void __user *arg_user)
{
if (arg_user) return -EINVAL;
- cachepc_baseline_active = false;
- cachepc_baseline_measure = false;
- memset(cachepc_baseline, 0xff, L1_SETS);
+ cpc_baseline_active = false;
+ cpc_baseline_measure = false;
+ memset(cpc_baseline, 0xff, L1_SETS);
return 0;
}
int
-cachepc_kvm_calc_baseline_ioctl(void __user *arg_user)
+cpc_kvm_calc_baseline_ioctl(void __user *arg_user)
{
uint32_t state;
@@ -373,24 +373,24 @@ cachepc_kvm_calc_baseline_ioctl(void __user *arg_user)
if (copy_from_user(&state, arg_user, sizeof(state)))
return -EFAULT;
- cachepc_baseline_measure = state;
+ cpc_baseline_measure = state;
return 0;
}
int
-cachepc_kvm_read_baseline_ioctl(void __user *arg_user)
+cpc_kvm_read_baseline_ioctl(void __user *arg_user)
{
if (!arg_user) return -EINVAL;
- if (copy_to_user(arg_user, cachepc_baseline, L1_SETS))
+ if (copy_to_user(arg_user, cpc_baseline, L1_SETS))
return -EFAULT;
return 0;
}
int
-cachepc_kvm_apply_baseline_ioctl(void __user *arg_user)
+cpc_kvm_apply_baseline_ioctl(void __user *arg_user)
{
uint32_t state;
@@ -399,23 +399,23 @@ cachepc_kvm_apply_baseline_ioctl(void __user *arg_user)
if (copy_from_user(&state, arg_user, sizeof(state)))
return -EFAULT;
- cachepc_baseline_active = state;
+ cpc_baseline_active = state;
return 0;
}
int
-cachepc_kvm_long_step_ioctl(void __user *arg_user)
+cpc_kvm_long_step_ioctl(void __user *arg_user)
{
if (arg_user) return -EINVAL;
- cachepc_long_step = true;
+ cpc_long_step = true;
return 0;
}
int
-cachepc_kvm_reset_tracking_ioctl(void __user *arg_user)
+cpc_kvm_reset_tracking_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
struct cpc_fault *fault, *next;
@@ -424,21 +424,21 @@ cachepc_kvm_reset_tracking_ioctl(void __user *arg_user)
return -EFAULT;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
- cachepc_track_start_gfn = 0;
- cachepc_track_end_gfn = 0;
+ cpc_track_start_gfn = 0;
+ cpc_track_end_gfn = 0;
- cachepc_singlestep = false;
- cachepc_singlestep_reset = false;
+ cpc_singlestep = false;
+ cpc_singlestep_reset = false;
- cachepc_long_step = false;
+ cpc_long_step = false;
- cachepc_track_mode = CPC_TRACK_NONE;
+ cpc_track_mode = CPC_TRACK_NONE;
- list_for_each_entry_safe(fault, next, &cachepc_faults, list) {
+ list_for_each_entry_safe(fault, next, &cpc_faults, list) {
list_del(&fault->list);
kfree(fault);
}
@@ -447,7 +447,7 @@ cachepc_kvm_reset_tracking_ioctl(void __user *arg_user)
}
int
-cachepc_kvm_track_mode_ioctl(void __user *arg_user)
+cpc_kvm_track_mode_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
uint32_t mode;
@@ -462,38 +462,38 @@ cachepc_kvm_track_mode_ioctl(void __user *arg_user)
vcpu = xa_load(&main_vm->vcpu_array, 0);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
- cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ cpc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
- cachepc_apic_timer = 0;
- cachepc_apic_oneshot = false;
- cachepc_prime_probe = false;
- cachepc_singlestep = false;
- cachepc_singlestep_reset = false;
- cachepc_long_step = false;
+ cpc_apic_timer = 0;
+ cpc_apic_oneshot = false;
+ cpc_prime_probe = false;
+ cpc_singlestep = false;
+ cpc_singlestep_reset = false;
+ cpc_long_step = false;
switch (mode) {
case CPC_TRACK_FAULT_NO_RUN:
- cachepc_prime_probe = true;
- cachepc_track_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ cpc_prime_probe = true;
+ cpc_track_all(vcpu, KVM_PAGE_TRACK_ACCESS);
break;
case CPC_TRACK_EXIT_EVICTIONS:
- cachepc_prime_probe = true;
- cachepc_long_step = true;
+ cpc_prime_probe = true;
+ cpc_long_step = true;
break;
case CPC_TRACK_PAGES:
case CPC_TRACK_PAGES_RESOLVE:
memset(&cpc_track_pages, 0, sizeof(cpc_track_pages));
- cachepc_track_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ cpc_track_all(vcpu, KVM_PAGE_TRACK_EXEC);
break;
case CPC_TRACK_STEPS:
memset(&cpc_track_steps, 0, sizeof(cpc_track_steps));
break;
case CPC_TRACK_STEPS_AND_FAULTS:
- cachepc_prime_probe = true;
- cachepc_track_all(vcpu, KVM_PAGE_TRACK_ACCESS);
- cachepc_singlestep_reset = true;
+ cpc_prime_probe = true;
+ cpc_track_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ cpc_singlestep_reset = true;
break;
case CPC_TRACK_STEPS_SIGNALLED:
memset(&cpc_track_steps_signalled, 0,
@@ -505,13 +505,13 @@ cachepc_kvm_track_mode_ioctl(void __user *arg_user)
return -EINVAL;
}
- cachepc_track_mode = mode;
+ cpc_track_mode = mode;
return 0;
}
// int
-// cachepc_kvm_track_page_ioctl(void __user *arg_user)
+// cpc_kvm_track_page_ioctl(void __user *arg_user)
// {
// struct cpc_track_config cfg;
// struct kvm_vcpu *vcpu;
@@ -526,42 +526,42 @@ cachepc_kvm_track_mode_ioctl(void __user *arg_user)
//
// BUG_ON(xa_empty(&main_vm->vcpu_array));
// vcpu = xa_load(&main_vm->vcpu_array, 0);
-// if (!cachepc_track_single(vcpu, cfg.gfn, cfg.mode))
+// if (!cpc_track_single(vcpu, cfg.gfn, cfg.mode))
// return -EFAULT;
//
// return 0;
// }
//
// int
-// cachepc_kvm_track_range_start_ioctl(void __user *arg_user)
+// cpc_kvm_track_range_start_ioctl(void __user *arg_user)
// {
// if (!arg_user) return -EINVAL;
//
-// if (copy_from_user(&cachepc_track_start_gfn, arg_user, sizeof(uint64_t)))
+// if (copy_from_user(&cpc_track_start_gfn, arg_user, sizeof(uint64_t)))
// return -EFAULT;
//
// return 0;
// }
//
// int
-// cachepc_kvm_track_range_end_ioctl(void __user *arg_user)
+// cpc_kvm_track_range_end_ioctl(void __user *arg_user)
// {
// if (!arg_user) return -EINVAL;
//
-// if (copy_from_user(&cachepc_track_end_gfn, arg_user, sizeof(uint64_t)))
+// if (copy_from_user(&cpc_track_end_gfn, arg_user, sizeof(uint64_t)))
// return -EFAULT;
//
// return 0;
// }
//
// int
-// cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user)
+// cpc_kvm_track_exec_cur_ioctl(void __user *arg_user)
// {
// struct cpc_fault *fault;
//
// if (!arg_user) return -EINVAL;
//
-// fault = list_first_entry(&cachepc_faults, struct cpc_fault, list);
+// fault = list_first_entry(&cpc_faults, struct cpc_fault, list);
// if (!fault) return -EFAULT;
//
// if (copy_to_user(arg_user, &fault->gfn, sizeof(uint64_t)))
@@ -571,67 +571,67 @@ cachepc_kvm_track_mode_ioctl(void __user *arg_user)
// }
int
-cachepc_kvm_req_pause_ioctl(void __user *arg_user)
+cpc_kvm_req_pause_ioctl(void __user *arg_user)
{
if (arg_user) return -EINVAL;
- cachepc_pause_vm = true;
+ cpc_pause_vm = true;
return 0;
}
long
-cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+cpc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *arg_user;
arg_user = (void __user *)arg;
switch (ioctl) {
case KVM_CPC_RESET:
- return cachepc_kvm_reset_ioctl(arg_user);
+ return cpc_kvm_reset_ioctl(arg_user);
case KVM_CPC_DEBUG:
- return cachepc_kvm_debug_ioctl(arg_user);
+ return cpc_kvm_debug_ioctl(arg_user);
case KVM_CPC_MEMORY_ENCRYPT_OP:
- return cachepc_kvm_memory_encrypt_op_ioctl(arg_user);
+ return cpc_kvm_memory_encrypt_op_ioctl(arg_user);
case KVM_CPC_TEST_EVICTION:
- return cachepc_kvm_test_eviction_ioctl(arg_user);
+ return cpc_kvm_test_eviction_ioctl(arg_user);
case KVM_CPC_READ_COUNTS:
- return cachepc_kvm_read_counts_ioctl(arg_user);
+ return cpc_kvm_read_counts_ioctl(arg_user);
case KVM_CPC_RESET_BASELINE:
- return cachepc_kvm_reset_baseline_ioctl(arg_user);
+ return cpc_kvm_reset_baseline_ioctl(arg_user);
case KVM_CPC_READ_BASELINE:
- return cachepc_kvm_read_baseline_ioctl(arg_user);
+ return cpc_kvm_read_baseline_ioctl(arg_user);
case KVM_CPC_CALC_BASELINE:
- return cachepc_kvm_calc_baseline_ioctl(arg_user);
+ return cpc_kvm_calc_baseline_ioctl(arg_user);
case KVM_CPC_APPLY_BASELINE:
- return cachepc_kvm_apply_baseline_ioctl(arg_user);
+ return cpc_kvm_apply_baseline_ioctl(arg_user);
case KVM_CPC_LONG_STEP:
- return cachepc_kvm_long_step_ioctl(arg_user);
+ return cpc_kvm_long_step_ioctl(arg_user);
case KVM_CPC_RESET_TRACKING:
- return cachepc_kvm_reset_tracking_ioctl(arg_user);
+ return cpc_kvm_reset_tracking_ioctl(arg_user);
case KVM_CPC_TRACK_MODE:
- return cachepc_kvm_track_mode_ioctl(arg_user);
+ return cpc_kvm_track_mode_ioctl(arg_user);
case KVM_CPC_POLL_EVENT:
- return cachepc_poll_event_ioctl(arg_user);
+ return cpc_poll_event_ioctl(arg_user);
case KVM_CPC_ACK_EVENT:
- return cachepc_ack_event_ioctl(arg_user);
+ return cpc_ack_event_ioctl(arg_user);
// case KVM_CPC_TRACK_PAGE:
- // return cachepc_kvm_track_page_ioctl(arg_user);
+ // return cpc_kvm_track_page_ioctl(arg_user);
// case KVM_CPC_TRACK_RANGE_START:
- // return cachepc_kvm_track_range_start_ioctl(arg_user);
+ // return cpc_kvm_track_range_start_ioctl(arg_user);
// case KVM_CPC_TRACK_RANGE_END:
- // return cachepc_kvm_track_range_end_ioctl(arg_user);
+ // return cpc_kvm_track_range_end_ioctl(arg_user);
// case KVM_CPC_TRACK_EXEC_CUR:
- // return cachepc_kvm_track_exec_cur_ioctl(arg_user);
+ // return cpc_kvm_track_exec_cur_ioctl(arg_user);
case KVM_CPC_VM_REQ_PAUSE:
- return cachepc_kvm_req_pause_ioctl(arg_user);
+ return cpc_kvm_req_pause_ioctl(arg_user);
default:
return kvm_arch_dev_ioctl(file, ioctl, arg);
}
}
void
-cachepc_kvm_setup_test(void *p)
+cpc_kvm_setup_test(void *p)
{
spinlock_t lock;
int cpu;
@@ -640,17 +640,17 @@ cachepc_kvm_setup_test(void *p)
CPC_INFO("Running on core %i\n", cpu);
- if (cachepc_verify_topology())
+ if (cpc_verify_topology())
goto exit;
- cachepc_ds = cachepc_ds_alloc(&cachepc_ds_ul);
+ cpc_ds = cpc_ds_alloc(&cpc_ds_ul);
- cachepc_kvm_system_setup();
+ cpc_kvm_system_setup();
spin_lock_irq(&lock);
- cachepc_prime_probe_test();
- cachepc_stream_hwpf_test();
- cachepc_single_eviction_test(NULL);
+ cpc_prime_probe_test();
+ cpc_stream_hwpf_test();
+ cpc_single_eviction_test(NULL);
spin_unlock_irq(&lock);
exit:
@@ -658,49 +658,49 @@ exit:
}
void
-cachepc_kvm_init(void)
+cpc_kvm_init(void)
{
int ret;
- cachepc_debug = false;
+ cpc_debug = false;
- cachepc_ds = NULL;
- cachepc_ds_ul = NULL;
+ cpc_ds = NULL;
+ cpc_ds_ul = NULL;
- cachepc_retinst = 0;
- cachepc_long_step = false;
- cachepc_singlestep = false;
- cachepc_singlestep_reset = false;
- cachepc_prime_probe = false;
- cachepc_track_mode = CPC_TRACK_NONE;
+ cpc_retinst = 0;
+ cpc_long_step = false;
+ cpc_singlestep = false;
+ cpc_singlestep_reset = false;
+ cpc_prime_probe = false;
+ cpc_track_mode = CPC_TRACK_NONE;
- cachepc_apic_oneshot = false;
- cachepc_apic_timer = 0;
+ cpc_apic_oneshot = false;
+ cpc_apic_timer = 0;
- INIT_LIST_HEAD(&cachepc_faults);
+ INIT_LIST_HEAD(&cpc_faults);
- cachepc_msrmts = kzalloc(L1_SETS, GFP_KERNEL);
- BUG_ON(!cachepc_msrmts);
+ cpc_msrmts = kzalloc(L1_SETS, GFP_KERNEL);
+ BUG_ON(!cpc_msrmts);
- cachepc_baseline_active = false;
- cachepc_baseline_measure = false;
- cachepc_baseline = kzalloc(L1_SETS, GFP_KERNEL);
- BUG_ON(!cachepc_baseline);
+ cpc_baseline_active = false;
+ cpc_baseline_measure = false;
+ cpc_baseline = kzalloc(L1_SETS, GFP_KERNEL);
+ BUG_ON(!cpc_baseline);
- cachepc_events_reset();
+ cpc_events_reset();
ret = smp_call_function_single(CPC_ISOLCPU,
- cachepc_kvm_setup_test, NULL, true);
+ cpc_kvm_setup_test, NULL, true);
WARN_ON(ret != 0);
}
void
-cachepc_kvm_exit(void)
+cpc_kvm_exit(void)
{
- kfree(cachepc_msrmts);
+ kfree(cpc_msrmts);
- kfree(cachepc_baseline);
+ kfree(cpc_baseline);
- if (cachepc_ds_ul)
- kfree(cachepc_ds_ul);
+ if (cpc_ds_ul)
+ kfree(cpc_ds_ul);
}
diff --git a/cachepc/kvm.h b/cachepc/kvm.h
@@ -4,7 +4,7 @@
#include <linux/fs.h>
-long cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp);
+long cpc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp);
-void cachepc_kvm_init(void);
-void cachepc_kvm_exit(void);
+void cpc_kvm_init(void);
+void cpc_kvm_exit(void);
diff --git a/cachepc/svm.c b/cachepc/svm.c
@@ -1,22 +1,3 @@
-int
-cachepc_kvm_get_rip_ioctl(void __user *arg_user)
-{
- struct kvm_regs *regs;
- struct kvm_vcpu *vcpu;
- if (!arg_user) return -EINVAL;
-
- if (!main_vm || xa_empty(&main_vm->vcpu_array))
- return -EFAULT;
-
- vcpu = xa_load(&main_vm->vcpu_array, 0);
-
- if (sev_es_guest(vcpu)) {
-
- }
- kvm_rip_read(vcpu);
-
- return 0;
-}
diff --git a/cachepc/track.c b/cachepc/track.c
@@ -45,7 +45,7 @@ struct kvm* main_vm;
EXPORT_SYMBOL(main_vm);
bool
-cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
+cpc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
@@ -66,10 +66,10 @@ cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
return slot != NULL;
}
-EXPORT_SYMBOL(cachepc_track_single);
+EXPORT_SYMBOL(cpc_track_single);
bool
-cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
+cpc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
@@ -90,10 +90,10 @@ cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
return slot != NULL;
}
-EXPORT_SYMBOL(cachepc_untrack_single);
+EXPORT_SYMBOL(cpc_untrack_single);
long
-cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
+cpc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
@@ -118,10 +118,10 @@ cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
return count;
}
-EXPORT_SYMBOL(cachepc_track_all);
+EXPORT_SYMBOL(cpc_track_all);
long
-cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
+cpc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
@@ -145,5 +145,5 @@ cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
return count;
}
-EXPORT_SYMBOL(cachepc_untrack_all);
+EXPORT_SYMBOL(cpc_untrack_all);
diff --git a/cachepc/track.h b/cachepc/track.h
@@ -13,10 +13,10 @@
extern struct kvm* main_vm;
-bool cachepc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
+bool cpc_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
-bool cachepc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
+bool cpc_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
-long cachepc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
-long cachepc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
+long cpc_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
+long cpc_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);