commit 49c88d32d25d4eb39ad6452cfba2ca93d60e1b81
parent 156da64fb4f36584039d06d30eab2784e4a71a5d
Author: Louis Burda <quent.burda@gmail.com>
Date: Thu, 3 Nov 2022 19:45:42 +0100
Stash progress
Diffstat:
11 files changed, 345 insertions(+), 82 deletions(-)
diff --git a/Makefile b/Makefile
@@ -15,7 +15,8 @@ $(LINUX)/arch/x86/kvm/cachepc:
ln -sf $(PWD)/cachepc $@
build: $(LINUX)/arch/x86/kvm/cachepc
- $(MAKE) -C $(LINUX) -j6 M=arch/x86/kvm M=crypto
+ $(MAKE) -C $(LINUX) -j6 M=arch/x86/kvm
+ $(MAKE) -C $(LINUX) -j6 M=crypto
load:
sudo rmmod kvm_amd || true
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -1,6 +1,9 @@
#include "cachepc.h"
#include "uapi.h"
+#include "../../include/asm/apic.h"
+#include "../../include/asm/irq_vectors.h"
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -312,6 +315,15 @@ cachepc_update_baseline(void)
}
}
+void
+cachepc_apic_oneshot(uint32_t interval)
+{
+ pr_warn("CachePCTest: Setting up APIC oneshot\n");
+ native_apic_mem_write(APIC_LVTT, LOCAL_TIMER_VECTOR | APIC_LVT_TIMER_ONESHOT);
+ native_apic_mem_write(APIC_TDCR, APIC_TDR_DIV_2);
+ native_apic_mem_write(APIC_TMICT, interval);
+}
+
void __attribute__((optimize(1))) // prevent instruction reordering
cachepc_prime_vcall(uintptr_t ret, cacheline *cl)
{
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -93,6 +93,8 @@ void cachepc_save_msrmts(cacheline *head);
void cachepc_print_msrmts(cacheline *head);
void cachepc_update_baseline(void);
+void cachepc_apic_oneshot(uint32_t interval);
+
void cachepc_prime_vcall(uintptr_t ret, cacheline *cl);
void cachepc_probe_vcall(uintptr_t ret, cacheline *cl);
@@ -120,6 +122,11 @@ extern bool cachepc_baseline_active;
extern uint64_t cachepc_retinst;
+extern bool cachepc_single_step;
+extern bool cachepc_track_single_step;
+extern uint64_t cachepc_last_fault_gfn;
+extern uint32_t cachepc_last_fault_err;
+
extern cache_ctx *cachepc_ctx;
extern cacheline *cachepc_ds;
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -25,6 +25,15 @@ EXPORT_SYMBOL(cachepc_baseline_active);
uint64_t cachepc_retinst = 0;
EXPORT_SYMBOL(cachepc_retinst);
+bool cachepc_single_step = false;
+bool cachepc_track_single_step = false;
+uint64_t cachepc_last_fault_gfn;
+uint32_t cachepc_last_fault_err;
+EXPORT_SYMBOL(cachepc_single_step);
+EXPORT_SYMBOL(cachepc_track_single_step);
+EXPORT_SYMBOL(cachepc_last_fault_gfn);
+EXPORT_SYMBOL(cachepc_last_fault_err);
+
cache_ctx *cachepc_ctx = NULL;
cacheline *cachepc_ds = NULL;
EXPORT_SYMBOL(cachepc_ctx);
@@ -35,6 +44,21 @@ uint64_t cachepc_regs_vm[16];
EXPORT_SYMBOL(cachepc_regs_tmp);
EXPORT_SYMBOL(cachepc_regs_vm);
+uint64_t last_sent_eventid;
+uint64_t last_acked_eventid;
+DEFINE_RWLOCK(event_lock);
+EXPORT_SYMBOL(last_sent_eventid);
+EXPORT_SYMBOL(last_acked_eventid);
+EXPORT_SYMBOL(event_lock);
+
+struct cpc_track_event sent_event;
+bool have_event;
+EXPORT_SYMBOL(sent_event);
+EXPORT_SYMBOL(have_event);
+
+bool uspt_init;
+EXPORT_SYMBOL(uspt_init);
+
static void cachepc_kvm_prime_probe_test(void *p);
static void cachepc_kvm_stream_hwpf_test(void *p);
static void cachepc_kvm_single_access_test(void *p);
@@ -48,6 +72,11 @@ static int cachepc_kvm_init_pmc_ioctl(void __user *arg_user);
static int cachepc_kvm_read_pmc_ioctl(void __user *arg_user);
static int cachepc_kvm_read_counts_ioctl(void __user *arg_user);
static int cachepc_kvm_setup_pmc_ioctl(void __user *arg_user);
+static int cachepc_kvm_measure_baseline_ioctl(void __user *arg_user);
+static int cachepc_kvm_read_baseline_ioctl(void __user *arg_user);
+static int cachepc_kvm_sub_baseline_ioctl(void __user *arg_user);
+static int cachepc_kvm_single_step_ioctl(void __user *arg_user);
+static int cachepc_kvm_track_single_step_ioctl(void __user *arg_user);
static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
static int cachepc_kvm_track_all_ioctl(void __user *arg_user);
@@ -238,6 +267,9 @@ cachepc_kvm_system_setup(void)
val |= 1 << 13;
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+
+ /* enable local apic */
+
}
int
@@ -384,13 +416,14 @@ cachepc_kvm_measure_baseline_ioctl(void __user *arg_user)
if (copy_from_user(&state, arg_user, sizeof(state)))
return -EFAULT;
- cachepc_baseline_measure = state;
-
if (state) {
for (i = 0; i < cachepc_msrmts_count; i++)
cachepc_baseline[i] = CPC_MSRMT_MAX;
}
+ cachepc_baseline_measure = state;
+
+
return 0;
}
@@ -422,6 +455,29 @@ cachepc_kvm_sub_baseline_ioctl(void __user *arg_user)
}
int
+cachepc_kvm_single_step_ioctl(void __user *arg_user)
+{
+ cachepc_single_step = true;
+
+ return 0;
+}
+
+int
+cachepc_kvm_track_single_step_ioctl(void __user *arg_user)
+{
+ uint32_t state;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&state, arg_user, sizeof(state)))
+ return -EFAULT;
+
+ cachepc_track_single_step = state;
+
+ return 0;
+}
+
+int
cachepc_kvm_track_page_ioctl(void __user *arg_user)
{
struct cpc_track_config cfg;
@@ -435,12 +491,11 @@ cachepc_kvm_track_page_ioctl(void __user *arg_user)
if (main_vm == NULL)
return -EFAULT;
- if (cfg.track_mode < 0 || cfg.track_mode >= KVM_PAGE_TRACK_MAX)
+ if (cfg.mode < 0 || cfg.mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!sevstep_track_single(vcpu,
- cfg.gpa >> PAGE_SHIFT, cfg.track_mode)) {
+ if (!sevstep_track_single(vcpu, cfg.gfn, cfg.mode)) {
printk("KVM_TRACK_PAGE: sevstep_track_single failed");
return -EFAULT;
}
@@ -452,21 +507,21 @@ int
cachepc_kvm_track_all_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
- uint64_t track_mode;
+ uint64_t mode;
if (!arg_user) return -EINVAL;
- if (copy_from_user(&track_mode, arg_user, sizeof(track_mode)))
+ if (copy_from_user(&mode, arg_user, sizeof(mode)))
return -EFAULT;
if (main_vm == NULL)
return -EFAULT;
- if (track_mode < 0 || track_mode >= KVM_PAGE_TRACK_MAX)
+ if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!sevstep_track_all(vcpu, track_mode))
+ if (!sevstep_track_all(vcpu, mode))
return -EFAULT;
return 0;
@@ -476,21 +531,21 @@ int
cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
- uint64_t track_mode;
+ uint64_t mode;
if (!arg_user) return -EINVAL;
- if (copy_from_user(&track_mode, arg_user, sizeof(track_mode)))
+ if (copy_from_user(&mode, arg_user, sizeof(mode)))
return -EFAULT;
if (main_vm == NULL)
return -EFAULT;
- if (track_mode < 0 || track_mode >= KVM_PAGE_TRACK_MAX)
+ if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!sevstep_untrack_all(vcpu, track_mode))
+ if (!sevstep_untrack_all(vcpu, mode))
return -EFAULT;
return 0;
@@ -560,6 +615,10 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
return cachepc_kvm_read_baseline_ioctl(arg_user);
case KVM_CPC_SUB_BASELINE:
return cachepc_kvm_sub_baseline_ioctl(arg_user);
+ case KVM_CPC_SINGLE_STEP:
+ return cachepc_kvm_single_step_ioctl(arg_user);
+ case KVM_CPC_TRACK_SINGLE_STEP:
+ return cachepc_kvm_track_single_step_ioctl(arg_user);
case KVM_CPC_TRACK_PAGE:
return cachepc_kvm_track_page_ioctl(arg_user);
case KVM_CPC_TRACK_ALL:
@@ -613,6 +672,9 @@ cachepc_kvm_init(void)
cachepc_retinst = 0;
+ cachepc_single_step = false;
+ cachepc_track_single_step = false;
+
cachepc_msrmts_count = L1_SETS;
cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL);
BUG_ON(cachepc_msrmts == NULL);
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -1,4 +1,5 @@
#include "../cachepc/sevstep.h"
+#include "../cachepc/cachepc.h"
#include "../cachepc/uspt.h"
static void
@@ -11,24 +12,31 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
KVM_PAGE_TRACK_EXEC
};
bool was_tracked;
- int i;
- int err;
+ int err, i;
+
+ pr_warn("CachePCTest: Page fault %llu\n", fault->gfn);
was_tracked = false;
for (i = 0; i < sizeof(modes) / sizeof(modes[0]); i++) {
if (kvm_slot_page_track_is_active(vcpu->kvm,
fault->slot, fault->gfn, modes[i])) {
- //sevstep_untrack_single_page(vcpu, fault->gfn, modes[i]);
+ pr_warn("CachePCTest: Page attrs %i %i %i\n",
+ fault->present, fault->write, fault->user);
+ sevstep_untrack_single(vcpu, fault->gfn, modes[i]);
was_tracked = true;
}
}
if (was_tracked) {
pr_warn("Sevstep: Tracked page fault (gfn:%llu)", fault->gfn);
- err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT,
- fault->error_code);
- if (err) {
- printk("Sevstep: uspt_send_and_block failed (%d)\n", err);
+ if (cachepc_track_single_step) {
+ cachepc_last_fault_gfn = fault->gfn;
+ cachepc_last_fault_err = fault->error_code;
+ cachepc_single_step = true;
+ } else {
+ err = sevstep_uspt_send_and_block(fault->gfn,
+ fault->error_code);
+ if (err) pr_warn("Sevstep: uspt_send_and_block failed (%d)\n", err);
}
}
}
diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c
@@ -104,7 +104,6 @@ sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
pr_warn("Sevstep: Slot page count: %lu\n", slot->npages);
for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) {
if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- pr_warn("Sevstep: Tracking page: %llu\n", gfn);
write_lock(&vcpu->kvm->mmu_lock);
kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -30,6 +30,8 @@
#define KVM_CPC_MEASURE_BASELINE _IOW(KVMIO, 0x26, __u32)
#define KVM_CPC_READ_BASELINE _IOR(KVMIO, 0x27, __u64)
#define KVM_CPC_SUB_BASELINE _IOR(KVMIO, 0x28, __u32)
+#define KVM_CPC_SINGLE_STEP _IO(KVMIO, 0x29)
+#define KVM_CPC_TRACK_SINGLE_STEP _IOWR(KVMIO, 0x2A, __u32)
#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config)
#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u64)
@@ -48,14 +50,14 @@ enum kvm_page_track_mode {
};
struct cpc_track_config {
- __u64 gpa;
- __s32 track_mode;
+ __u64 gfn;
+ __s32 mode;
};
struct cpc_track_event {
__u64 id; /* filled automatically */
- __u64 faulted_gpa;
- __u32 error_code;
+ __u64 fault_gfn;
+ __u32 fault_err;
__u64 timestamp_ns;
__u64 retinst;
};
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -13,15 +13,6 @@
#define ARRLEN(x) (sizeof(x)/sizeof((x)[0]))
-static uint64_t last_sent_eventid;
-static uint64_t last_acked_eventid;
-DEFINE_RWLOCK(event_lock);
-
-static struct cpc_track_event sent_event;
-static bool have_event;
-
-static bool uspt_init = false;
-
void
sevstep_uspt_clear(void)
{
@@ -40,7 +31,7 @@ sevstep_uspt_is_initialiized()
}
int
-sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code)
+sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code)
{
struct cpc_track_event event;
ktime_t deadline;
@@ -63,8 +54,8 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code)
last_sent_eventid++;
}
event.id = last_sent_eventid;
- event.faulted_gpa = faulted_gpa;
- event.error_code = error_code;
+ event.fault_gfn = fault_gfn;
+ event.fault_err = error_code;
event.timestamp_ns = ktime_get_real_ns();
event.retinst = cachepc_retinst;
@@ -74,7 +65,7 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code)
/* wait for ack with timeout */
pr_warn("Sevstep: uspt_send_and_block: Begin wait for event ack");
- deadline = ktime_get_ns() + 1000000000ULL; /* 1s in ns */
+ deadline = ktime_get_ns() + 2000000000ULL; /* 1s in ns */
while (!sevstep_uspt_is_event_done(sent_event.id)) {
if (ktime_get_ns() > deadline) {
pr_warn("Sevstep: uspt_send_and_block: "
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
@@ -6,11 +6,19 @@
#include <linux/kvm_host.h>
#include <linux/types.h>
+extern uint64_t last_sent_eventid;
+extern uint64_t last_acked_eventid;
+extern rwlock_t event_lock;
+
+extern struct cpc_track_event sent_event;
+extern bool have_event;
+
+extern bool uspt_init;
bool sevstep_uspt_is_initialiized(void);
void sevstep_uspt_clear(void);
-int sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code);
+int sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code);
int sevstep_uspt_is_event_done(uint64_t id);
int sevstep_uspt_handle_poll_event(struct cpc_track_event *userpace_mem);
diff --git a/patch.diff b/patch.diff
@@ -17,7 +17,7 @@ index eb186bc57f6a..b96e80934005 100644
/*
* The notifier represented by @kvm_page_track_notifier_node is linked into
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
-index 30f244b64523..e0eeffd340e8 100644
+index 30f244b64523..568cc761f0e5 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
@@ -46,7 +46,7 @@ index 30f244b64523..e0eeffd340e8 100644
-kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o \
-+ svm/avic.o svm/sev.o cachepc/cachepc.o
++ svm/avic.o svm/sev.o cachepc/cachepc.o cachepc/uspt.o
ifdef CONFIG_HYPERV
kvm-amd-y += svm/svm_onhyperv.o
@@ -189,19 +189,38 @@ index 2e09d1b6249f..9b40e71564bf 100644
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index cf0bf456d520..d6a4002fa550 100644
+index cf0bf456d520..1e1667dc8f96 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
-@@ -2,6 +2,8 @@
+@@ -2,6 +2,9 @@
#include <linux/kvm_host.h>
+#include "cachepc/cachepc.h"
++#include "cachepc/uspt.h"
+
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -3788,14 +3790,37 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -2083,6 +2086,17 @@ static int smi_interception(struct kvm_vcpu *vcpu)
+
+ static int intr_interception(struct kvm_vcpu *vcpu)
+ {
++ int err;
++
++ if (cachepc_track_single_step && cachepc_single_step) {
++ pr_warn("CachePC: Caught single step interrupt\n");
++ cachepc_single_step = false;
++
++ err = sevstep_uspt_send_and_block(cachepc_last_fault_gfn,
++ cachepc_last_fault_err);
++ if (err) pr_warn("Sevstep: uspt_send_and_block failed (%d)\n", err);
++ }
++
+ ++vcpu->stat.irq_exits;
+ return 1;
+ }
+@@ -3788,14 +3802,42 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
@@ -210,15 +229,20 @@ index cf0bf456d520..d6a4002fa550 100644
guest_state_enter_irqoff();
if (sev_es_guest(vcpu->kvm)) {
++ cpu = get_cpu();
++ local_irq_disable();
++ WARN_ON(cpu != 2);
++
+ memset(cachepc_msrmts, 0,
+ cachepc_msrmts_count * sizeof(cpc_msrmt_t));
+ cachepc_reset_pmc(CPC_L1MISS_PMC);
+
-+ cpu = get_cpu();
-+ local_irq_disable();
-+ WARN_ON(cpu != 2);
++ cachepc_reset_pmc(CPC_RETINST_PMC);
+
++ if (cachepc_single_step)
++ cachepc_apic_oneshot(10);
__svm_sev_es_vcpu_run(vmcb_pa);
++ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+
+ cachepc_save_msrmts(cachepc_ds);
+ if (cachepc_baseline_measure)
@@ -228,21 +252,29 @@ index cf0bf456d520..d6a4002fa550 100644
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
-+ memset(cachepc_msrmts, 0,
-+ cachepc_msrmts_count * sizeof(cpc_msrmt_t));
-+ cachepc_reset_pmc(CPC_L1MISS_PMC);
-+
+ cpu = get_cpu();
+ local_irq_disable();
+ WARN_ON(cpu != 2);
+
++ memset(cachepc_msrmts, 0,
++ cachepc_msrmts_count * sizeof(cpc_msrmt_t));
++ cachepc_reset_pmc(CPC_L1MISS_PMC);
++
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
-@@ -3807,6 +3832,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3803,10 +3845,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+ * vmcb02 when switching vmcbs for nested virtualization.
+ */
+ vmload(svm->vmcb01.pa);
++ if (cachepc_single_step)
++ cachepc_apic_oneshot(100);
+ __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
vmsave(svm->vmcb01.pa);
++ cachepc_reset_pmc(CPC_RETINST_PMC);
vmload(__sme_page_pa(sd->save_area));
++ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+
+ cachepc_save_msrmts(cachepc_ds);
+ if (cachepc_baseline_measure)
@@ -397,6 +429,99 @@ index d9adf79124f9..3e5c55f9bef0 100644
#define CREATE_TRACE_POINTS
#include "trace.h"
+diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
+index 27ab27931813..90679ec8ba79 100644
+--- a/crypto/aes_generic.c
++++ b/crypto/aes_generic.c
+@@ -1173,8 +1173,78 @@ EXPORT_SYMBOL_GPL(crypto_aes_set_key);
+ f_rl(bo, bi, 3, k); \
+ } while (0)
+
++#define L1_ASSOC 8
++#define L1_LINESIZE 64
++#define L1_SETS 64
++#define L1_SIZE (L1_SETS * L1_ASSOC * L1_LINESIZE)
++
++#define ACCESS_LINE(n) \
++ asm volatile ("mov (%0), %%rbx" \
++ : : "r"(((uint8_t*) L1) + n * L1_LINESIZE) : "rbx");
++
++#define DO_ACCESS_PATTERN() \
++ ACCESS_LINE(60) \
++ ACCESS_LINE(13) \
++ ACCESS_LINE(24) \
++ ACCESS_LINE(19) \
++ ACCESS_LINE(38) \
++ ACCESS_LINE(17) \
++ ACCESS_LINE( 2) \
++ ACCESS_LINE(12) \
++ ACCESS_LINE(22) \
++ ACCESS_LINE(46) \
++ ACCESS_LINE( 4) \
++ ACCESS_LINE(61) \
++ ACCESS_LINE( 5) \
++ ACCESS_LINE(14) \
++ ACCESS_LINE(11) \
++ ACCESS_LINE(35) \
++ ACCESS_LINE(45) \
++ ACCESS_LINE(10) \
++ ACCESS_LINE(49) \
++ ACCESS_LINE(56) \
++ ACCESS_LINE(27) \
++ ACCESS_LINE(37) \
++ ACCESS_LINE(63) \
++ ACCESS_LINE(54) \
++ ACCESS_LINE(55) \
++ ACCESS_LINE(29) \
++ ACCESS_LINE(48) \
++ ACCESS_LINE( 9) \
++ ACCESS_LINE(16) \
++ ACCESS_LINE(39) \
++ ACCESS_LINE(20) \
++ ACCESS_LINE(21) \
++ ACCESS_LINE(62) \
++ ACCESS_LINE( 0) \
++ ACCESS_LINE(34) \
++ ACCESS_LINE( 8) \
++ ACCESS_LINE(53) \
++ ACCESS_LINE(42) \
++ ACCESS_LINE(51) \
++ ACCESS_LINE(50) \
++ ACCESS_LINE(57) \
++ ACCESS_LINE( 7) \
++ ACCESS_LINE( 6) \
++ ACCESS_LINE(33) \
++ ACCESS_LINE(26) \
++
++uint8_t *L1 = NULL;
++
+ static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
++ int cpu;
++
++ if (L1 == NULL) {
++ L1 = kzalloc(L1_SETS * L1_LINESIZE, GFP_KERNEL);
++ BUG_ON(((uintptr_t)L1) % (L1_SETS * L1_LINESIZE) != 0);
++ }
++
++ pr_warn("CachePC-TEST: Running AES-Generic!");
++
++ cpu = get_cpu();
++ DO_ACCESS_PATTERN()
++
+ const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 b0[4], b1[4];
+ const u32 *kp = ctx->key_enc + 4;
+@@ -1210,6 +1280,9 @@ static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ put_unaligned_le32(b0[1], out + 4);
+ put_unaligned_le32(b0[2], out + 8);
+ put_unaligned_le32(b0[3], out + 12);
++
++ DO_ACCESS_PATTERN();
++ put_cpu();
+ }
+
+ /* decrypt a block of text */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
old mode 100644
new mode 100755
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -36,7 +36,8 @@
#define SECONDARY_CORE 3
#define TARGET_CACHE_LINESIZE 64
-#define TARGET_SET 15
+#define TARGET_SET1 14
+#define TARGET_SET2 15
struct kvm {
int vmfd, vcpufd;
@@ -117,10 +118,11 @@ hexdump(void *data, int len)
__attribute__((section("guest_with"))) void
vm_guest_with(void)
{
- asm volatile("hlt");
while (1) {
asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET));
+ : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET1));
+ asm volatile("mov (%[v]), %%bl"
+ : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET2));
}
}
@@ -341,6 +343,7 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
/* Generate encryption keys and set policy */
memset(&start, 0, sizeof(start));
start.handle = 0;
+ // start.policy = 1 << 0; /* disallow debug */
start.policy = 1 << 2; /* require ES */
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
@@ -380,13 +383,13 @@ sev_kvm_deinit(struct kvm *kvm)
munmap(kvm->mem, kvm->memsize);
}
-uint16_t *
+cpc_msrmt_t *
read_counts()
{
- uint16_t *counts;
+ cpc_msrmt_t *counts;
int ret;
- counts = malloc(64 * sizeof(uint16_t));
+ counts = malloc(64 * sizeof(cpc_msrmt_t));
if (!counts) err(1, "malloc");
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
if (ret == -1) err(1, "ioctl READ_COUNTS");
@@ -395,7 +398,7 @@ read_counts()
}
void
-print_counts(uint16_t *counts)
+print_counts(cpc_msrmt_t *counts)
{
int i;
@@ -410,46 +413,57 @@ print_counts(uint16_t *counts)
if (counts[i] > 0)
printf("\x1b[0m");
}
- printf("\n Target Set %i Count: %hu\n", TARGET_SET, counts[TARGET_SET]);
+ printf("\n");
+ printf(" Target Set 1 %i Count: %llu\n",
+ TARGET_SET1, counts[TARGET_SET1]);
+ printf(" Target Set 2 %i Count: %llu\n",
+ TARGET_SET2, counts[TARGET_SET2]);
printf("\n");
}
void
runonce(struct kvm *kvm)
{
- struct kvm_regs regs;
int ret;
ret = ioctl(kvm->vcpufd, KVM_RUN, NULL);
if (ret < 0) err(1, "KVM_RUN");
- printf("VMEXIT\n");
-
- if (kvm->run->exit_reason == KVM_EXIT_MMIO) {
- memset(®s, 0, sizeof(regs));
- ret = ioctl(kvm->vcpufd, KVM_GET_REGS, ®s);
- if (ret < 0) err(1, "KVM_GET_REGS");
- errx(1, "KVM_EXTI_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n",
- kvm->run->mmio.is_write ? "write" : "read",
- kvm->run->mmio.phys_addr, regs.rip);
- } else if (kvm->run->exit_reason != KVM_EXIT_HLT) {
- errx(1, "KVM died: %i\n", kvm->run->exit_reason);
- }
+}
+
+uint64_t
+svm_dbg_rip(struct kvm *kvm)
+{
+ /* TODO: decrypt vmsa */
+
+ return 0;
}
int
-monitor(void)
+monitor(struct kvm *kvm)
{
+ struct cpc_track_config cfg;
struct cpc_track_event event;
+ cpc_msrmt_t counts[64];
int ret;
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (!ret) {
- printf("Got page fault! %llu retired insts\n",
- event.retinst);
+ printf("Event: gpa:%llu retinst:%llu err:%i rip:%lu\n",
+ event.fault_gfn, event.retinst,
+ event.fault_err, svm_dbg_rip(kvm));
faultcnt++;
- printf("Acking event %llu\n", event.id);
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
+ print_counts(counts);
+
+ /* retrack page */
+ cfg.gfn = event.fault_gfn;
+ cfg.mode = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_PAGE, &cfg);
+ if (ret == -1) err(1, "ioctl TRACK_PAGE");
+
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
} else if (errno != EAGAIN) {
@@ -466,6 +480,8 @@ main(int argc, const char **argv)
struct kvm kvm_with_access;
uint64_t track_mode;
pid_t ppid, pid;
+ uint32_t arg;
+ cpc_msrmt_t baseline[64];
int ret;
setvbuf(stdout, NULL, _IONBF, 0);
@@ -490,9 +506,6 @@ main(int argc, const char **argv)
sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2,
__start_guest_with, __stop_guest_with);
- /* One run to skip stack setup */
- ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL);
-
/* Page tracking init needs to happen after kvm
* init so main_kvm is set.. */
@@ -508,13 +521,48 @@ main(int argc, const char **argv)
ppid = getpid();
if ((pid = fork())) {
if (pid < 0) err(1, "fork");
+
+ sleep(1); /* give time for child to pin other core */
+
+ printf("VMRUN\n");
runonce(&kvm_with_access);
} else {
pin_process(0, SECONDARY_CORE, true);
+ printf("PINNED\n");
+
+ printf("Doing baseline measurement..\n");
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
+ faultcnt = 0;
+ while (faultcnt < 20) {
+ if (monitor(&kvm_with_access)) break;
+ }
+
+ arg = false;
+ ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
+ ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline);
+ if (ret == -1) err(1, "ioctl READ_BASELINE");
+ printf("\n>>> BASELINE:\n");
+ print_counts(baseline);
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
+ if (ret == -1) err(1, "ioctl SUB_BASELINE");
+
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_SINGLE_STEP, &arg);
+ if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
+
faultcnt = 0;
while (faultcnt < SAMPLE_COUNT) {
- if (monitor()) break;
+ if (monitor(&kvm_with_access)) break;
}
+
kill(ppid, SIGTERM);
exit(0);
}