commit de0075e226fe615c48681b2dfa3ab5624762c76d
parent c308b0d3af8c052cd3dbe22143435917da6e7988
Author: Louis Burda <quent.burda@gmail.com>
Date: Thu, 10 Nov 2022 18:16:17 +0100
Add fault error codes to event + sevstep debugging
Diffstat:
10 files changed, 187 insertions(+), 97 deletions(-)
diff --git a/Makefile b/Makefile
@@ -24,13 +24,16 @@ load:
sudo insmod $(LINUX)/arch/x86/kvm/kvm.ko
sudo insmod $(LINUX)/arch/x86/kvm/kvm-amd.ko
+freq:
+ sudo cpupower frequency-set -f 2.60GHz
+
+update:
+ git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff
+
test/aes-detect_%: test/aes-detect_%.c test/aes-detect.c
clang -o $@ $< $(CFLAGS) -I test/libkcapi/lib -L test/libkcapi/.libs -lkcapi -static
test/%: test/%.c cachepc/uapi.h
clang -o $@ $< $(CFLAGS) -fsanitize=address
-update:
- git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff
-
-.PHONY: all clean build load update
+.PHONY: all clean build load freq update
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -315,7 +315,7 @@ cachepc_update_baseline(void)
void __attribute__((optimize(1))) // prevent instruction reordering
cachepc_prime_vcall(uintptr_t ret, cacheline *cl)
{
- cachepc_apic_oneshot(150);
+ cachepc_apic_oneshot(cachepc_apic_timer);
cachepc_prime(cl);
asm volatile ("mov %0, %%rax; jmp *%%rax" : : "r"(ret) : "rax");
}
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -128,10 +128,13 @@ extern uint64_t cachepc_retinst;
extern bool cachepc_single_step;
extern bool cachepc_track_single_step;
+extern uint32_t cachepc_apic_timer;
extern bool cachepc_inst_fault_avail;
extern uint64_t cachepc_inst_fault_gfn;
+extern uint32_t cachepc_inst_fault_err;
extern bool cachepc_data_fault_avail;
extern uint64_t cachepc_data_fault_gfn;
+extern uint32_t cachepc_data_fault_err;
extern cache_ctx *cachepc_ctx;
extern cacheline *cachepc_ds;
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -27,16 +27,24 @@ EXPORT_SYMBOL(cachepc_retinst);
bool cachepc_single_step = false;
bool cachepc_track_single_step = false;
-bool cachepc_inst_fault_avail = false;
-uint64_t cachepc_inst_fault_gfn = 0;
-bool cachepc_data_fault_avail = false;
-uint64_t cachepc_data_fault_gfn = 0;
+uint32_t cachepc_apic_timer = 0;
EXPORT_SYMBOL(cachepc_single_step);
EXPORT_SYMBOL(cachepc_track_single_step);
+EXPORT_SYMBOL(cachepc_apic_timer);
+
+bool cachepc_inst_fault_avail = false;
+uint64_t cachepc_inst_fault_gfn = 0;
+uint32_t cachepc_inst_fault_err = 0;
EXPORT_SYMBOL(cachepc_inst_fault_avail);
EXPORT_SYMBOL(cachepc_inst_fault_gfn);
+EXPORT_SYMBOL(cachepc_inst_fault_err);
+
+bool cachepc_data_fault_avail = false;
+uint64_t cachepc_data_fault_gfn = 0;
+uint32_t cachepc_data_fault_err = 0;
EXPORT_SYMBOL(cachepc_data_fault_avail);
EXPORT_SYMBOL(cachepc_data_fault_gfn);
+EXPORT_SYMBOL(cachepc_data_fault_err);
cache_ctx *cachepc_ctx = NULL;
cacheline *cachepc_ds = NULL;
@@ -254,7 +262,8 @@ cachepc_kvm_system_setup(void)
val = (uint64_t) lo | ((uint64_t) hi << 32);
val |= 1 << 13;
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
- printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+ printk("CachePC: Disabling streaming store (MSR %08llX: %016llX)\n",
+ reg_addr, val);
/* disable speculative data cache tlb reloads */
reg_addr = 0xc0011022;
@@ -262,7 +271,8 @@ cachepc_kvm_system_setup(void)
val = (uint64_t) lo | ((uint64_t) hi << 32);
val |= 1 << 4;
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
- printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+ printk("CachePC: Disabling speculative reloads (MSR %08llX: %016llX)\n",
+ reg_addr, val);
/* disable data cache hardware prefetcher */
reg_addr = 0xc0011022;
@@ -270,7 +280,8 @@ cachepc_kvm_system_setup(void)
val = (uint64_t) lo | ((uint64_t) hi << 32);
val |= 1 << 13;
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
- printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+ printk("CachePC: Disabling HWPF (MSR %08llX: %016llX)\n",
+ reg_addr, val);
}
int
@@ -675,10 +686,10 @@ cachepc_kvm_init(void)
cachepc_single_step = false;
cachepc_track_single_step = false;
+ cachepc_apic_timer = 200;
+
cachepc_data_fault_avail = false;
- cachepc_data_fault_gfn = 0;
cachepc_inst_fault_avail = false;
- cachepc_inst_fault_gfn = 0;
cachepc_msrmts_count = L1_SETS;
cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL);
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -19,24 +19,31 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
if (cachepc_track_single_step) {
if (cachepc_single_step && cachepc_inst_fault_avail) {
- /* faulted during single step => data address */
- pr_warn("Sevstep: Got data fault gfn:%llu\n", fault->gfn);
+ /* second fault from data access */
+ pr_warn("Sevstep: Got data fault gfn:%llu err:%u\n",
+ fault->gfn, fault->error_code);
- BUG_ON(!cachepc_inst_fault_avail);
cachepc_data_fault_gfn = fault->gfn;
+ cachepc_data_fault_err = fault->error_code;
cachepc_data_fault_avail = true;
+
+ cachepc_apic_timer = 170;
} else {
/* first fault from instruction fetch */
- pr_warn("Sevstep: Got inst fault gfn:%llu\n", fault->gfn);
+ pr_warn("Sevstep: Got inst fault gfn:%llu err:%u\n",
+ fault->gfn, fault->error_code);
cachepc_inst_fault_gfn = fault->gfn;
+ cachepc_inst_fault_err = fault->error_code;
cachepc_inst_fault_avail = true;
cachepc_data_fault_avail = false;
- cachepc_single_step = true;
+
+ cachepc_single_step = true; /* TODO try inverse */
+ cachepc_apic_timer = 130;
}
} else {
sevstep_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS);
- if (sevstep_uspt_send_and_block(fault->gfn, 0))
+ if (sevstep_uspt_send_and_block(fault->gfn, fault->error_code, 0, 0))
pr_warn("Sevstep: uspt_send_and_block failed (%d)\n", err);
}
}
@@ -129,7 +136,8 @@ sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
pr_err("CachePC: Tracking unsupported!\n");
}
- return protected;
+ return true;
+ //return protected;
}
EXPORT_SYMBOL(sevstep_kvm_mmu_slot_gfn_protect);
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -55,9 +55,12 @@ struct cpc_track_config {
};
struct cpc_track_event {
- __u64 id; /* filled automatically */
- __u64 fault_gfn;
- __u32 fault_err;
+ __u64 id;
+ __u64 inst_fault_gfn;
+ __u64 inst_fault_err;
+ __u32 data_fault_avail;
+ __u64 data_fault_gfn;
+ __u32 data_fault_err;
__u64 timestamp_ns;
__u64 retinst;
};
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -31,14 +31,15 @@ sevstep_uspt_is_initialized()
}
int
-sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code)
+sevstep_uspt_send_and_block(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
+ uint64_t data_fault_gfn, uint32_t data_fault_err)
{
struct cpc_track_event event;
ktime_t deadline;
read_lock(&event_lock);
if (!sevstep_uspt_is_initialized()) {
- pr_warn("Sevstep: uspt_send_and_block: ctx not initialized!\n");
+ pr_warn("Sevstep: ctx not initialized!\n");
read_unlock(&event_lock);
return 1;
}
@@ -46,16 +47,18 @@ sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code)
write_lock(&event_lock);
if (last_sent_eventid != last_acked_eventid) {
- pr_warn("Sevstep: uspt_send_and_block: "
- "event id_s out of sync, aborting. Fix this later\n");
+ pr_warn("Sevstep: event id_s out of sync, aborting\n");
write_unlock(&event_lock);
return 1;
} else {
last_sent_eventid++;
}
event.id = last_sent_eventid;
- event.fault_gfn = fault_gfn;
- event.fault_err = error_code;
+ event.inst_fault_gfn = inst_fault_gfn;
+ event.inst_fault_err = inst_fault_err;
+ event.data_fault_avail = (data_fault_err != 0);
+ event.data_fault_gfn = data_fault_gfn;
+ event.data_fault_err = data_fault_err;
event.timestamp_ns = ktime_get_real_ns();
event.retinst = cachepc_retinst;
@@ -64,12 +67,11 @@ sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code)
write_unlock(&event_lock);
/* wait for ack with timeout */
- pr_warn("Sevstep: uspt_send_and_block: Begin wait for event ack");
+ // pr_warn("Sevstep: uspt_send_and_block: Begin wait for event ack");
deadline = ktime_get_ns() + 2000000000ULL; /* 1s in ns */
while (!sevstep_uspt_is_event_done(sent_event.id)) {
if (ktime_get_ns() > deadline) {
- pr_warn("Sevstep: uspt_send_and_block: "
- "Waiting for ack of event %llu timed out",
+ pr_warn("Sevstep: Timeout waiting for ack of event %llu ",
sent_event.id);
return 3;
}
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
@@ -18,7 +18,8 @@ extern bool uspt_init;
bool sevstep_uspt_is_initialized(void);
void sevstep_uspt_clear(void);
-int sevstep_uspt_send_and_block(uint64_t fault_gfn, uint32_t error_code);
+int sevstep_uspt_send_and_block(uint64_t inst_fault_gfn, uint32_t inst_fault_err,
+ uint64_t data_fault_gfn, uint32_t data_fault_err);
int sevstep_uspt_is_event_done(uint64_t id);
int sevstep_uspt_handle_poll_event(struct cpc_track_event *userpace_mem);
diff --git a/patch.diff b/patch.diff
@@ -169,7 +169,7 @@ index d871b8dee7b3..bfeab994420e 100644
return false;
}
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
-index 2e09d1b6249f..b139ea33b0e1 100644
+index 2e09d1b6249f..fb17064b5d53 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -19,6 +19,8 @@
@@ -193,12 +193,12 @@ index 2e09d1b6249f..b139ea33b0e1 100644
!kvm_page_track_write_tracking_enabled(kvm)))
return;
-+ pr_warn("CachePCTest: Tracking page: %llu\n", gfn);
++ //pr_warn("CachePCTest: Tracking page: %llu\n", gfn);
+
update_gfn_track(slot, gfn, mode, 1);
/*
-@@ -131,9 +134,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
+@@ -131,9 +134,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
*/
kvm_mmu_gfn_disallow_lpage(slot, gfn);
@@ -207,14 +207,13 @@ index 2e09d1b6249f..b139ea33b0e1 100644
- kvm_flush_remote_tlbs(kvm);
+ if (sevstep_kvm_mmu_slot_gfn_protect(kvm,
+ slot, gfn, PG_LEVEL_4K, mode)) {
-+ pr_warn("CachePCTest: Flushing kvm TLBs\n");
+ kvm_flush_remote_tlbs(kvm);
+ }
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
-index 7b9265d67131..749bbb2930f3 100644
+index 7b9265d67131..ba7af6bcc33e 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1810,13 +1810,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
@@ -239,7 +238,7 @@ index 7b9265d67131..749bbb2930f3 100644
- new_spte = iter.old_spte &
- ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
-+ pr_warn("Sevstep: tdp_protect_gfn\n");
++ //pr_warn("Sevstep: tdp_protect_gfn\n");
+ new_spte = iter.old_spte & ~shadow_mmu_writable_mask;
+ new_spte &= ~PT_WRITABLE_MASK;
+ if (mode == KVM_PAGE_TRACK_ACCESS) {
@@ -260,7 +259,7 @@ index 7b9265d67131..749bbb2930f3 100644
+ struct kvm_mmu_page *root;
+ bool spte_set = false;
+
-+ pr_warn("Sevstep: tdp_protect_gfn\n");
++ // pr_warn("Sevstep: tdp_protect_gfn\n");
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ for_each_tdp_mmu_root(kvm, root, slot->as_id)
@@ -333,7 +332,7 @@ index 7b9265d67131..749bbb2930f3 100644
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index cf0bf456d520..c179012ab268 100644
+index cf0bf456d520..dc6f2e6e52d1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2,6 +2,10 @@
@@ -347,29 +346,42 @@ index cf0bf456d520..c179012ab268 100644
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -2083,6 +2087,21 @@ static int smi_interception(struct kvm_vcpu *vcpu)
+@@ -2083,6 +2087,34 @@ static int smi_interception(struct kvm_vcpu *vcpu)
static int intr_interception(struct kvm_vcpu *vcpu)
{
-+ int err;
-+
+ if (cachepc_track_single_step && cachepc_single_step) {
-+ pr_warn("CachePC: Caught single step interrupt\n");
+ cachepc_single_step = false;
+
-+ err = sevstep_uspt_send_and_block(cachepc_last_fault_gfn,
-+ cachepc_last_fault_err);
-+ if (err) pr_warn("Sevstep: uspt_send_and_block failed (%d)\n", err);
++ if (cachepc_data_fault_avail) {
++ pr_warn("CachePC: Caught single step WITH data!\n");
++
++ sevstep_uspt_send_and_block(
++ cachepc_inst_fault_gfn, cachepc_inst_fault_err,
++ cachepc_data_fault_gfn, cachepc_data_fault_err);
++
++ sevstep_track_single(vcpu, cachepc_data_fault_gfn,
++ KVM_PAGE_TRACK_ACCESS);
++ cachepc_data_fault_avail = false;
+
-+ if (!sevstep_track_single(vcpu, cachepc_last_fault_gfn,
-+ KVM_PAGE_TRACK_ACCESS))
-+ pr_warn("Sevstep: Failed to retrack page afer single step\n");
++ sevstep_track_single(vcpu, cachepc_inst_fault_gfn,
++ KVM_PAGE_TRACK_ACCESS);
++ cachepc_inst_fault_avail = false;
++ } else if (cachepc_inst_fault_avail) {
++ pr_warn("CachePC: Caught single step WITHOUT data!\n");
++
++ sevstep_track_single(vcpu, cachepc_inst_fault_gfn,
++ KVM_PAGE_TRACK_ACCESS);
++ cachepc_inst_fault_avail = false;
++ } else {
++ pr_warn("CachePC: Unexpected single step\n");
++ }
+ }
+
++vcpu->stat.irq_exits;
return 1;
}
-@@ -3788,14 +3807,42 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3788,14 +3820,39 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
@@ -387,9 +399,6 @@ index cf0bf456d520..c179012ab268 100644
+ cachepc_reset_pmc(CPC_L1MISS_PMC);
+
+ cachepc_reset_pmc(CPC_RETINST_PMC);
-+
-+ if (cachepc_single_step)
-+ cachepc_apic_oneshot(150);
__svm_sev_es_vcpu_run(vmcb_pa);
+ cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC);
+
@@ -412,12 +421,7 @@ index cf0bf456d520..c179012ab268 100644
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
-@@ -3803,10 +3850,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
- * vmcb02 when switching vmcbs for nested virtualization.
- */
- vmload(svm->vmcb01.pa);
-+ if (cachepc_single_step)
-+ cachepc_apic_oneshot(100);
+@@ -3806,7 +3863,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
__svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
vmsave(svm->vmcb01.pa);
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -33,8 +33,7 @@
#define TARGET_CORE 2
#define SECONDARY_CORE 3
-#define TARGET_SET1 14
-#define TARGET_SET2 15
+#define TARGET_SET 15
struct kvm {
int vmfd, vcpufd;
@@ -115,12 +114,36 @@ hexdump(void *data, int len)
__attribute__((section("guest_with"))) void
vm_guest_with(void)
{
+ /* counter starts at 10 */
+ // asm volatile("mov $10, %%ebx" : : : "ebx");
+ asm volatile("mov (%0), %%al" : :
+ "r"(L1_LINESIZE * L1_SETS * 3) : "al");
+ asm volatile("mov (%0), %%al" : :
+ "r"(L1_LINESIZE * L1_SETS * 3) : "al");
+
while (1) {
- asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (L1_LINESIZE * (L1_SETS + TARGET_SET1)));
- asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (L1_LINESIZE * (L1_SETS * 2 + TARGET_SET2)));
+ /* read from n'th page */
+ // asm volatile("mov %0, %%ecx" : : "r" (L1_LINESIZE * L1_SETS) : "ecx");
+ // asm volatile("mov %%ebx, %%eax" : : : "ebx", "eax");
+ // asm volatile("imul %%ecx" : : : "ecx");
+ // asm volatile("mov (%%eax), %%al" : : : "rax");
+
+ /* increment counter (n) */
+ // asm volatile("inc %%ebx" : : : "ebx");
+
+ /* modulo 16 */
+ // asm volatile("xor %%edx, %%edx" : : : "edx");
+ // asm volatile("mov %%ebx, %%eax" : : : "ebx", "eax");
+ // asm volatile("mov $16, %%ecx" : : : "ecx");
+ // asm volatile("idiv %%ecx" : : : "ecx");
+ // asm volatile("mov %%edx, %%ebx" : : : "ebx", "edx");
+
+ /* L1_LINESIZE * (L1_SETS * 2 + TARGET_SET) = 0x23c0 */
+ //asm volatile("movq $0x23c0, %%rcx; mov %%eax, (%%rcx); inc %%eax"
+ // : : : "eax", "ebx", "rcx");
}
+
+ asm volatile("hlt");
}
bool
@@ -212,15 +235,19 @@ sev_get_measure(int vmfd)
memset(&msrmt, 0, sizeof(msrmt));
ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
- if (ret < 0 && fwerr != SEV_RET_INVALID_LEN)
- errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr));
+ if (ret < 0 && fwerr != SEV_RET_INVALID_LEN) {
+ errx(1, "LAUNCH_MEASURE: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+ }
data = malloc(msrmt.len);
msrmt.uaddr = (uintptr_t) data;
ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
- if (ret < 0)
- errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr));
+ if (ret < 0) {
+ errx(1, "LAUNCH_MEASURE: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+ }
return data;
}
@@ -247,9 +274,11 @@ sev_dbg_encrypt(int vmfd, void *dst, void *src, size_t size)
struct kvm_sev_dbg enc;
int ret, fwerr;
+ memset(&enc, 0, sizeof(struct kvm_sev_dbg));
enc.src_uaddr = (uintptr_t) src;
enc.dst_uaddr = (uintptr_t) dst;
enc.len = size;
+
ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s",
strerror(errno), sev_fwerr_str(fwerr));
@@ -261,9 +290,11 @@ sev_dbg_decrypt(int vmfd, void *dst, void *src, size_t size)
struct kvm_sev_dbg enc;
int ret, fwerr;
+ memset(&enc, 0, sizeof(struct kvm_sev_dbg));
enc.src_uaddr = (uintptr_t) src;
enc.dst_uaddr = (uintptr_t) dst;
enc.len = size;
+
ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
strerror(errno), sev_fwerr_str(fwerr));
@@ -319,8 +350,8 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
MAP_SHARED, kvm->vcpufd, 0);
if (!kvm->run) err(1, "mmap vcpu");
-
- /* Initialize segment regs */
+
+ /* Initialize segment regs */
memset(&sregs, 0, sizeof(sregs));
ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
if (ret < 0) err(1, "KVM_GET_SREGS");
@@ -328,12 +359,12 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
sregs.cs.selector = 0;
ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
if (ret < 0) err(1, "KVM_SET_SREGS");
-
- /* Initialize rest of registers */
+
+ /* Initialize rest of registers */
memset(®s, 0, sizeof(regs));
regs.rip = 0;
- regs.rsp = kvm->memsize - 8;
- regs.rbp = kvm->memsize - 8;
+ regs.rsp = kvm->memsize - L1_SETS * L1_LINESIZE - 8;
+ regs.rbp = kvm->memsize - L1_SETS * L1_LINESIZE - 8;
ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
if (ret < 0) err(1, "KVM_SET_REGS");
@@ -345,7 +376,7 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
strerror(errno), sev_fwerr_str(fwerr));
-
+
/* Prepare the vm memory (by encrypting it) */
memset(&update, 0, sizeof(update));
update.uaddr = (uintptr_t) kvm->mem;
@@ -353,7 +384,7 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
strerror(errno), sev_fwerr_str(fwerr));
-
+
/* Prepare the vm save area */
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",
@@ -366,7 +397,7 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
/* Finalize launch process */
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
if (ret < 0) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
+ strerror(errno), sev_fwerr_str(fwerr));
ret = sev_guest_state(kvm->vmfd, start.handle);
if (ret != GSTATE_RUNNING)
errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
@@ -411,10 +442,8 @@ print_counts(cpc_msrmt_t *counts)
printf("\x1b[0m");
}
printf("\n");
- printf(" Target Set 1 %i Count: %llu\n",
- TARGET_SET1, counts[TARGET_SET1]);
- printf(" Target Set 2 %i Count: %llu\n",
- TARGET_SET2, counts[TARGET_SET2]);
+ printf(" Target Set %i Count: %llu\n",
+ TARGET_SET, counts[TARGET_SET]);
printf("\n");
}
@@ -436,23 +465,42 @@ svm_dbg_rip(struct kvm *kvm)
}
int
-monitor(struct kvm *kvm)
+monitor(struct kvm *kvm, bool baseline)
{
struct cpc_track_event event;
cpc_msrmt_t counts[64];
- int ret;
+ uint64_t counter_addr;
+ uint64_t counter;
+ int ret, i;
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (!ret) {
- printf("Event: gpa:%llu retinst:%llu err:%i rip:%lu\n",
- event.fault_gfn, event.retinst,
- event.fault_err, svm_dbg_rip(kvm));
+ if (baseline && event.data_fault_avail)
+ errx(1, "Baseline measurement has data fault\n");
+
+ if (!baseline) {
+ counter_addr = L1_LINESIZE * (L1_SETS + TARGET_SET);
+ memcpy(&counter, kvm->mem + counter_addr, 8);
+ //sev_dbg_decrypt(kvm->vmfd, &counter, &counter_enc, 8);
+ printf("Event: inst:%llu data:%llu retired:%llu cnt:%16llX\n",
+ event.inst_fault_gfn, event.data_fault_gfn,
+ event.retinst, counter);
+ }
faultcnt++;
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
if (ret == -1) err(1, "ioctl READ_COUNTS");
- print_counts(counts);
+
+ if (!baseline)
+ print_counts(counts);
+
+ for (i = 0; i < 64; i++) {
+ if (counts[i] > 8) {
+ errx(1, "Invalid count for set %i (%llu)",
+ i, counts[i]);
+ }
+ }
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
@@ -473,8 +521,8 @@ main(int argc, const char **argv)
uint32_t arg;
struct cpc_track_event event;
cpc_msrmt_t baseline[64];
- int ret;
-
+ int ret, i;
+
setvbuf(stdout, NULL, _IONBF, 0);
pin_process(0, TARGET_CORE, true);
@@ -517,6 +565,7 @@ main(int argc, const char **argv)
printf("VMRUN\n");
runonce(&kvm_with_access);
+ printf("VMRUN DONE\n");
} else {
pin_process(0, SECONDARY_CORE, true);
printf("PINNED\n");
@@ -526,8 +575,8 @@ main(int argc, const char **argv)
if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
faultcnt = 0;
- while (faultcnt < 20) {
- if (monitor(&kvm_with_access)) break;
+ while (faultcnt < 100) {
+ if (monitor(&kvm_with_access, true)) break;
}
do {
@@ -547,6 +596,12 @@ main(int argc, const char **argv)
print_counts(baseline);
printf("\n");
+ /* check baseline for saturated sets */
+ for (i = 0; i < 64; i++) {
+ if (baseline[i] >= 8)
+ printf("!!! Baseline set %i full\n", i);
+ }
+
arg = true;
ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
if (ret == -1) err(1, "ioctl SUB_BASELINE");
@@ -559,8 +614,8 @@ main(int argc, const char **argv)
if (ret == -1) err(1, "ioctl TRACK_SINGLE_STEP");
faultcnt = 0;
- while (faultcnt < 100) {
- if (monitor(&kvm_with_access)) break;
+ while (faultcnt < 10) {
+ if (monitor(&kvm_with_access, false)) break;
}
kill(ppid, SIGTERM);