commit f2ea010b8180b4160d85c92e312971d0cd8a34d4
parent 89785aa3c8d5d4007f856b14543a9b8aef31d661
Author: Louis Burda <quent.burda@gmail.com>
Date: Tue, 17 Jan 2023 16:30:33 +0100
Fixup kvm-eviction and refactor kvm-step
Diffstat:
18 files changed, 1096 insertions(+), 1535 deletions(-)
diff --git a/Makefile b/Makefile
@@ -5,13 +5,14 @@ JOBS ?= $(CORES)
PWD := $(shell pwd)
-BINS = test/eviction test/kvm-eviction # test/kvm-execstep
+BINS = test/eviction test/kvm-eviction
+BINS += test/kvm-step #test/kvm-execstep
# BINS += test/qemu-eviction_guest test/qemu-eviction_host
# BINS += test/qemu-aes_guest test/qemu-aes_host
BINS += util/svme util/debug util/reset
-CFLAGS = -I . -I linux/usr/include -I test
-CFLAGS += -g -Wunused-variable -Wunknown-pragmas
+CFLAGS = -I . -I linux/usr/include
+CFLAGS += -g -Wunused-variable -Wunknown-pragmas -Wunused-function
CFLAGS += -fsanitize=address
CACHEPC_UAPI = cachepc/uapi.h cachepc/const.h
@@ -19,9 +20,9 @@ CACHEPC_UAPI = cachepc/uapi.h cachepc/const.h
all: build $(BINS)
clean:
- $(MAKE) -C $(LINUX) clean M=arch/x86/kvm
- $(MAKE) -C $(LINUX) clean M=crypto
- rm $(BINS)
+ $(MAKE) -C $(LINUX) clean M=arch/x86/kvm
+ $(MAKE) -C $(LINUX) clean M=crypto
+ rm -f $(BINS)
$(LINUX)/arch/x86/kvm/cachepc:
ln -sf $(PWD)/cachepc $@
@@ -49,18 +50,17 @@ load:
freq:
sudo cpupower frequency-set -f 3.7GHz
- sudo cpupower frequency-set -u 3.7GHz
- sudo cpupower frequency-set -d 3.7GHz
-
-update:
- git -C $(LINUX) diff 0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 -G. > patch.diff
util/%: util/%.c $(CACHEPC_UAPI)
test/%: test/%.c $(CACHEPC_UAPI)
-test/kvm-eviction: test/kvm-eviction.c test/kvm-eviction_guest.S \
- test/kvm-eviction.h $(CACHEPC_UAPI)
- $(CC) -o $@ test/kvm-eviction.c test/kvm-eviction_guest.S $(CFLAGS)
+test/kvm-eviction: test/kvm-eviction.c test/kvm-eviction_guest.S test/util.c \
+ test/util.h test/kvm.c test/kvm.h test/kvm-eviction.h $(CACHEPC_UAPI)
+ $(CC) -o $@ $(filter %.c,$^) $(filter %.S,$^) $(CFLAGS)
+
+test/kvm-step: test/kvm-step.c test/kvm-step_guest.S \
+ test/util.c test/util.h test/kvm.c test/kvm.h $(CACHEPC_UAPI)
+ $(CC) -o $@ $(filter %.c,$^) $(filter %.S,$^) $(CFLAGS)
-.PHONY: all clean host build load freq update
+.PHONY: all clean host build load freq
diff --git a/README b/README
@@ -43,7 +43,21 @@ test/qemu-poc:
Demonstrate that AES encryption keys can be leaked from an
unmodified qemu-based linux guest.
-Testing was done on a bare-metal AMD EPYC 72F3 (Family 0x19, Model 0x01).
+Testing was done on a bare-metal AMD EPYC 72F3 (Family 0x19, Model 0x01)
+cpu and Supermicro H12SSL-i V1.01 motherboard. The following BIOS settings
+differ from the defaults:
+
+Advanced > CPU Configuration > Local APIC Mode = xAPIC
+Advanced > CPU Configuration > L1 Stream HW Prefetcher = Disabled
+Advanced > CPU Configuration > L2 Stream HW Prefetcher = Disabled
+Advanced > CPU Configuration > SMEE = Enabled
+Advanced > CPU Configuration > SEV ASID Count = 509
+Advanced > CPU Configuration > SEV ASID Space Limit Control = Manual
+Advanced > CPU Configuration > SEV ASID Space Limit = 110
+Advanced > CPU Configuration > SNP Memory (RMP Table) Coverage = Enabled
+Advanced > North Bridge Configuration > SEV-SNP Support = Enabled
+Advanced > North Bridge Configuration > Memory Configuration > TSME = Disabled
+Advanced > PCI Devices Common Settings > Memory Configuration > TSME = Disabled
To successfully build and load the kvm.ko and kvm-amd.ko modules, ensure
that a host kernel debian package was built using `make host`.
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -243,6 +243,7 @@ cachepc_save_msrmts(cacheline *head)
do {
if (CL_IS_FIRST(curr_cl->flags)) {
BUG_ON(curr_cl->cache_set >= L1_SETS);
+ WARN_ON(curr_cl->count > L1_ASSOC);
cachepc_msrmts[curr_cl->cache_set] = curr_cl->count;
}
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -46,28 +46,28 @@ typedef struct cacheline cacheline;
typedef struct cache_ctx cache_ctx;
struct cache_ctx {
- int cache_level;
+ int cache_level;
- uint32_t sets;
- uint32_t associativity;
- uint32_t nr_of_cachelines;
- uint32_t set_size;
- uint32_t cache_size;
+ uint32_t sets;
+ uint32_t associativity;
+ uint32_t nr_of_cachelines;
+ uint32_t set_size;
+ uint32_t cache_size;
};
struct cacheline {
- /* Doubly linked cache lines inside same cache set */
- cacheline *next;
- cacheline *prev;
+ /* Doubly linked cache lines inside same cache set */
+ cacheline *next;
+ cacheline *prev;
- uint32_t cache_set;
- uint32_t cache_line;
- uint32_t flags;
+ uint32_t cache_set;
+ uint32_t cache_line;
+ uint32_t flags;
- uint64_t count;
+ uint64_t count;
- /* padding to fill cache line */
- char padding[24];
+ /* padding to fill cache line */
+ char padding[24];
};
struct cpc_fault {
@@ -127,6 +127,8 @@ extern uint8_t *cachepc_baseline;
extern bool cachepc_baseline_measure;
extern bool cachepc_baseline_active;
+extern bool cachepc_pause_vm;
+
extern bool cachepc_single_step;
extern uint32_t cachepc_track_mode;
extern uint32_t cachepc_apic_timer;
@@ -139,6 +141,7 @@ extern uint64_t cachepc_retinst_prev;
extern uint64_t cachepc_rip;
extern uint64_t cachepc_rip_prev;
+extern bool cachepc_rip_prev_set;
extern uint64_t cachepc_inst_fault_gfn;
extern uint32_t cachepc_inst_fault_err;
diff --git a/cachepc/event.c b/cachepc/event.c
@@ -57,7 +57,7 @@ cachepc_send_event(struct cpc_event event)
if (ktime_get_ns() > deadline) {
CPC_WARN("Timeout waiting for ack of event %llu\n",
cachepc_event.id);
- return 3;
+ return 1;
}
}
@@ -77,6 +77,16 @@ cachepc_send_guest_event(uint64_t type, uint64_t val)
}
int
+cachepc_send_pause_event(void)
+{
+ struct cpc_event event;
+
+ event.type = CPC_EVENT_PAUSE;
+
+ return cachepc_send_event(event);
+}
+
+int
cachepc_send_track_step_event(struct list_head *list)
{
struct cpc_event event = { 0 };
diff --git a/cachepc/event.h b/cachepc/event.h
@@ -18,9 +18,11 @@ extern bool cachepc_events_init;
void cachepc_events_reset(void);
int cachepc_send_guest_event(uint64_t type, uint64_t val);
+int cachepc_send_pause_event(void);
int cachepc_send_track_step_event(struct list_head *list);
int cachepc_send_track_step_event_single(uint64_t gfn, uint32_t err, uint64_t retinst);
int cachepc_send_track_page_event(uint64_t gfn_prev, uint64_t gfn, uint64_t retinst);
+
bool cachepc_event_is_done(uint64_t id);
int cachepc_handle_poll_event_ioctl(struct cpc_event *user);
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -29,6 +29,9 @@ EXPORT_SYMBOL(cachepc_baseline);
EXPORT_SYMBOL(cachepc_baseline_measure);
EXPORT_SYMBOL(cachepc_baseline_active);
+bool cachepc_pause_vm = false;
+EXPORT_SYMBOL(cachepc_pause_vm);
+
uint64_t cachepc_retinst = 0;
uint64_t cachepc_retinst_prev = 0;
EXPORT_SYMBOL(cachepc_retinst);
@@ -36,8 +39,10 @@ EXPORT_SYMBOL(cachepc_retinst_prev);
uint64_t cachepc_rip = 0;
uint64_t cachepc_rip_prev = 0;
+bool cachepc_rip_prev_set = false;
EXPORT_SYMBOL(cachepc_rip);
EXPORT_SYMBOL(cachepc_rip_prev);
+EXPORT_SYMBOL(cachepc_rip_prev_set);
bool cachepc_single_step = false;
uint32_t cachepc_track_mode = false;
@@ -104,19 +109,22 @@ static int cachepc_kvm_calc_baseline_ioctl(void __user *arg_user);
static int cachepc_kvm_read_baseline_ioctl(void __user *arg_user);
static int cachepc_kvm_apply_baseline_ioctl(void __user *arg_user);
-static int cachepc_kvm_single_step_ioctl(void __user *arg_user);
+//static int cachepc_kvm_single_step_ioctl(void __user *arg_user);
static int cachepc_kvm_vmsa_read_ioctl(void __user *arg_user);
static int cachepc_kvm_svme_read_ioctl(void __user *arg_user);
static int cachepc_kvm_track_mode_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_all_ioctl(void __user *arg_user);
-static int cachepc_kvm_untrack_all_ioctl(void __user *arg_user);
+// static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
+// static int cachepc_kvm_track_all_ioctl(void __user *arg_user);
+// static int cachepc_kvm_untrack_all_ioctl(void __user *arg_user);
static int cachepc_kvm_reset_tracking_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_range_start_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_range_end_ioctl(void __user *arg_user);
-static int cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user);
+// static int cachepc_kvm_track_range_start_ioctl(void __user *arg_user);
+// static int cachepc_kvm_track_range_end_ioctl(void __user *arg_user);
+// static int cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user);
+
+static int cachepc_kvm_vm_pause_ioctl(void __user *arg_user);
+static int cachepc_kvm_vm_resume_ioctl(void __user *arg_user);
static int cachepc_kvm_poll_event_ioctl(void __user *arg_user);
static int cachepc_kvm_ack_event_ioctl(void __user *arg_user);
@@ -340,6 +348,9 @@ cachepc_kvm_reset_ioctl(void __user *arg_user)
put_cpu();
+ cachepc_kvm_reset_tracking_ioctl(NULL);
+ cachepc_kvm_reset_baseline_ioctl(NULL);
+
return 0;
}
@@ -395,6 +406,8 @@ cachepc_kvm_reset_baseline_ioctl(void __user *arg_user)
{
if (arg_user) return -EINVAL;
+ cachepc_baseline_active = false;
+ cachepc_baseline_measure = false;
memset(cachepc_baseline, 0xff, L1_SETS);
return 0;
@@ -452,6 +465,7 @@ cachepc_kvm_single_step_ioctl(void __user *arg_user)
int
cachepc_kvm_track_mode_ioctl(void __user *arg_user)
{
+ struct kvm_vcpu *vcpu;
uint32_t mode;
if (!arg_user) return -EINVAL;
@@ -462,6 +476,27 @@ cachepc_kvm_track_mode_ioctl(void __user *arg_user)
cachepc_single_step = false;
cachepc_track_mode = mode;
+ BUG_ON(!main_vm || xa_empty(&main_vm->vcpu_array));
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+
+ cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
+
+ switch (mode) {
+ case CPC_TRACK_FULL:
+ cachepc_track_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ mode = CPC_TRACK_FULL;
+ break;
+ case CPC_TRACK_EXEC:
+ cachepc_track_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ mode = CPC_TRACK_EXEC;
+ break;
+ default:
+ mode = CPC_TRACK_NONE;
+ break;
+ }
+
return 0;
}
@@ -528,49 +563,49 @@ cachepc_kvm_svme_read_ioctl(void __user *arg_user)
return 0;
}
-int
-cachepc_kvm_track_all_ioctl(void __user *arg_user)
-{
- struct kvm_vcpu *vcpu;
- uint32_t mode;
-
- if (!main_vm || !arg_user) return -EINVAL;
-
- if (copy_from_user(&mode, arg_user, sizeof(mode)))
- return -EFAULT;
-
- if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
- return -EINVAL;
-
- BUG_ON(xa_empty(&main_vm->vcpu_array));
- vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!cachepc_track_all(vcpu, mode))
- return -EFAULT;
-
- return 0;
-}
-
-int
-cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
-{
- struct kvm_vcpu *vcpu;
- uint32_t mode;
-
- if (!main_vm || !arg_user) return -EINVAL;
-
- if (copy_from_user(&mode, arg_user, sizeof(mode)))
- return -EFAULT;
-
- if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
- return -EINVAL;
-
- BUG_ON(xa_empty(&main_vm->vcpu_array));
- vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!cachepc_untrack_all(vcpu, mode))
- return -EFAULT;
-
- return 0;
-}
+// int
+// cachepc_kvm_track_all_ioctl(void __user *arg_user)
+// {
+// struct kvm_vcpu *vcpu;
+// uint32_t mode;
+//
+// if (!main_vm || !arg_user) return -EINVAL;
+//
+// if (copy_from_user(&mode, arg_user, sizeof(mode)))
+// return -EFAULT;
+//
+// if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
+// return -EINVAL;
+//
+// BUG_ON(xa_empty(&main_vm->vcpu_array));
+// vcpu = xa_load(&main_vm->vcpu_array, 0);
+// if (!cachepc_track_all(vcpu, mode))
+// return -EFAULT;
+//
+// return 0;
+// }
+//
+// int
+// cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
+// {
+// struct kvm_vcpu *vcpu;
+// uint32_t mode;
+//
+// if (!main_vm || !arg_user) return -EINVAL;
+//
+// if (copy_from_user(&mode, arg_user, sizeof(mode)))
+// return -EFAULT;
+//
+// if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
+// return -EINVAL;
+//
+// BUG_ON(xa_empty(&main_vm->vcpu_array));
+// vcpu = xa_load(&main_vm->vcpu_array, 0);
+// if (!cachepc_untrack_all(vcpu, mode))
+// return -EFAULT;
+//
+// return 0;
+// }
int
cachepc_kvm_reset_tracking_ioctl(void __user *arg_user)
@@ -578,7 +613,7 @@ cachepc_kvm_reset_tracking_ioctl(void __user *arg_user)
struct kvm_vcpu *vcpu;
struct cpc_fault *fault, *next;
- BUG_ON(xa_empty(&main_vm->vcpu_array));
+ BUG_ON(!main_vm || xa_empty(&main_vm->vcpu_array));
vcpu = xa_load(&main_vm->vcpu_array, 0);
cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
@@ -627,42 +662,94 @@ cachepc_kvm_ack_event_ioctl(void __user *arg_user)
return cachepc_handle_ack_event_ioctl(eventid);
}
-int
-cachepc_kvm_track_range_start_ioctl(void __user *arg_user)
-{
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(&cachepc_track_start_gfn, arg_user, sizeof(uint64_t)))
- return -EFAULT;
-
- return 0;
-}
+// int
+// cachepc_kvm_track_range_start_ioctl(void __user *arg_user)
+// {
+// if (!arg_user) return -EINVAL;
+//
+// if (copy_from_user(&cachepc_track_start_gfn, arg_user, sizeof(uint64_t)))
+// return -EFAULT;
+//
+// return 0;
+// }
+//
+// int
+// cachepc_kvm_track_range_end_ioctl(void __user *arg_user)
+// {
+// if (!arg_user) return -EINVAL;
+//
+// if (copy_from_user(&cachepc_track_end_gfn, arg_user, sizeof(uint64_t)))
+// return -EFAULT;
+//
+// return 0;
+// }
+//
+// int
+// cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user)
+// {
+// struct cpc_fault *fault;
+//
+// if (!arg_user) return -EINVAL;
+//
+// fault = list_first_entry(&cachepc_faults, struct cpc_fault, list);
+// if (!fault) return -EFAULT;
+//
+// if (copy_to_user(arg_user, &fault->gfn, sizeof(uint64_t)))
+// return -EFAULT;
+//
+// return 0;
+// }
int
-cachepc_kvm_track_range_end_ioctl(void __user *arg_user)
+cachepc_kvm_vm_pause_ioctl(void __user *arg_user)
{
+ uint64_t deadline;
+ int err;
+
if (!arg_user) return -EINVAL;
- if (copy_from_user(&cachepc_track_end_gfn, arg_user, sizeof(uint64_t)))
- return -EFAULT;
+ if (!cachepc_events_init)
+ return -EINVAL;
- return 0;
+ cachepc_pause_vm = true;
+
+ deadline = ktime_get_ns() + 20000000000ULL; /* 20s in ns */
+ while (true) {
+ write_lock(&cachepc_event_lock);
+ if (cachepc_event_avail) {
+ err = copy_to_user(arg_user, &cachepc_event,
+ sizeof(struct cpc_event));
+ cachepc_event_avail = false;
+ write_unlock(&cachepc_event_lock);
+ return 0;
+ }
+ write_unlock(&cachepc_event_lock);
+ if (ktime_get_ns() > deadline) {
+ CPC_WARN("Timeout waiting for pause event\n");
+ cachepc_pause_vm = false;
+ return -EFAULT;
+ }
+ }
+
+ return err;
}
int
-cachepc_kvm_track_exec_cur_ioctl(void __user *arg_user)
+cachepc_kvm_vm_resume_ioctl(void __user *arg_user)
{
- struct cpc_fault *fault;
+ uint64_t eventid;
if (!arg_user) return -EINVAL;
- fault = list_first_entry(&cachepc_faults, struct cpc_fault, list);
- if (!fault) return -EFAULT;
+ if (!cachepc_events_init)
+ return -EINVAL;
- if (copy_to_user(arg_user, &fault->gfn, sizeof(uint64_t)))
+ if (copy_from_user(&eventid, arg_user, sizeof(eventid)))
return -EFAULT;
- return 0;
+ cachepc_pause_vm = false;
+
+ return cachepc_handle_ack_event_ioctl(eventid);
}
long
@@ -688,32 +775,36 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
return cachepc_kvm_calc_baseline_ioctl(arg_user);
case KVM_CPC_APPLY_BASELINE:
return cachepc_kvm_apply_baseline_ioctl(arg_user);
- case KVM_CPC_SINGLE_STEP:
- return cachepc_kvm_single_step_ioctl(arg_user);
+ // case KVM_CPC_SINGLE_STEP:
+ // return cachepc_kvm_single_step_ioctl(arg_user);
case KVM_CPC_VMSA_READ:
return cachepc_kvm_vmsa_read_ioctl(arg_user);
case KVM_CPC_SVME_READ:
return cachepc_kvm_svme_read_ioctl(arg_user);
case KVM_CPC_TRACK_MODE:
return cachepc_kvm_track_mode_ioctl(arg_user);
- case KVM_CPC_TRACK_PAGE:
- return cachepc_kvm_track_page_ioctl(arg_user);
- case KVM_CPC_TRACK_ALL:
- return cachepc_kvm_track_all_ioctl(arg_user);
- case KVM_CPC_UNTRACK_ALL:
- return cachepc_kvm_untrack_all_ioctl(arg_user);
+ // case KVM_CPC_TRACK_PAGE:
+ // return cachepc_kvm_track_page_ioctl(arg_user);
+ // case KVM_CPC_TRACK_ALL:
+ // return cachepc_kvm_track_all_ioctl(arg_user);
+ // case KVM_CPC_UNTRACK_ALL:
+ // return cachepc_kvm_untrack_all_ioctl(arg_user);
case KVM_CPC_RESET_TRACKING:
return cachepc_kvm_reset_tracking_ioctl(arg_user);
case KVM_CPC_POLL_EVENT:
return cachepc_kvm_poll_event_ioctl(arg_user);
case KVM_CPC_ACK_EVENT:
return cachepc_kvm_ack_event_ioctl(arg_user);
- case KVM_CPC_TRACK_RANGE_START:
- return cachepc_kvm_track_range_start_ioctl(arg_user);
- case KVM_CPC_TRACK_RANGE_END:
- return cachepc_kvm_track_range_end_ioctl(arg_user);
- case KVM_CPC_TRACK_EXEC_CUR:
- return cachepc_kvm_track_exec_cur_ioctl(arg_user);
+ // case KVM_CPC_TRACK_RANGE_START:
+ // return cachepc_kvm_track_range_start_ioctl(arg_user);
+ // case KVM_CPC_TRACK_RANGE_END:
+ // return cachepc_kvm_track_range_end_ioctl(arg_user);
+ // case KVM_CPC_TRACK_EXEC_CUR:
+ // return cachepc_kvm_track_exec_cur_ioctl(arg_user);
+ case KVM_CPC_VM_PAUSE:
+ return cachepc_kvm_vm_pause_ioctl(arg_user);
+ case KVM_CPC_VM_RESUME:
+ return cachepc_kvm_vm_resume_ioctl(arg_user);
default:
return kvm_arch_dev_ioctl(file, ioctl, arg);
}
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -28,22 +28,19 @@
#define KVM_CPC_SVME_READ _IOR(KVMIO, 0x2D, __u32)
#define KVM_CPC_TRACK_MODE _IOWR(KVMIO, 0x40, __u32)
-#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x41, struct cpc_track_config)
-#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x42, __u32)
-#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x43, __u32)
#define KVM_CPC_RESET_TRACKING _IO(KVMIO, 0x44)
-#define KVM_CPC_TRACK_RANGE_START _IOWR(KVMIO, 0x45, __u64)
-#define KVM_CPC_TRACK_RANGE_END _IOWR(KVMIO, 0x46, __u64)
-#define KVM_CPC_TRACK_EXEC_CUR _IOWR(KVMIO, 0x47, __u64)
#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x48, struct cpc_event)
#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x49, __u64)
+#define KVM_CPC_VM_PAUSE _IO(KVMIO, 0x50)
+#define KVM_CPC_VM_RESUME _IO(KVMIO, 0x51)
enum {
CPC_EVENT_NONE,
CPC_EVENT_TRACK_STEP,
CPC_EVENT_TRACK_PAGE,
+ CPC_EVENT_PAUSE,
CPC_EVENT_CPUID,
};
@@ -54,7 +51,6 @@ enum {
enum {
CPC_TRACK_NONE,
- CPC_TRACK_STUB,
CPC_TRACK_EXEC,
CPC_TRACK_FULL,
};
diff --git a/test/kvm-eviction.c b/test/kvm-eviction.c
@@ -1,551 +1,56 @@
-#define _GNU_SOURCE
-
-#include "kvm-eviction.h"
+#include "test/kvm-eviction.h"
+#include "test/kvm.h"
+#include "test/util.h"
#include "cachepc/uapi.h"
-#include <linux/psp-sev.h>
-#include <linux/kvm.h>
-#include <sys/syscall.h>
-#include <sys/ioctl.h>
-#include <sys/user.h>
-#include <sys/wait.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
#include <unistd.h>
-#include <signal.h>
-#include <dirent.h>
-#include <assert.h>
-#include <errno.h>
-#include <err.h>
#include <fcntl.h>
-#include <sched.h>
+#include <err.h>
#include <string.h>
#include <stdbool.h>
-#include <stdlib.h>
-#include <stdint.h>
#include <stdio.h>
-#include <stdarg.h>
-
-#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
+#include <stdlib.h>
#define SAMPLE_COUNT 64
#define TARGET_CORE 2
#define SECONDARY_CORE 3
-enum {
- WITH,
- WITHOUT
-};
-
-struct kvm {
- int fd, vmfd, vcpufd;
- void *mem;
- size_t memsize;
- struct kvm_run *run;
-};
-
-/* start and end for guest assembly */
-extern uint8_t start_guest_with[];
-extern uint8_t stop_guest_with[];
-extern uint8_t start_guest_without[];
-extern uint8_t stop_guest_without[];
+extern uint8_t guest_with_start[];
+extern uint8_t guest_with_stop[];
+extern uint8_t guest_without_start[];
+extern uint8_t guest_without_stop[];
static const char *vmtype;
-static int kvm_dev, sev_dev;
-
-enum {
- GSTATE_UNINIT,
- GSTATE_LUPDATE,
- GSTATE_LSECRET,
- GSTATE_RUNNING,
- GSTATE_SUPDATE,
- GSTATE_RUPDATE,
- GSTATE_SENT
-};
-
-const char *sev_fwerr_strs[] = {
- "Success",
- "Platform state is invalid",
- "Guest state is invalid",
- "Platform configuration is invalid",
- "Buffer too small",
- "Platform is already owned",
- "Certificate is invalid",
- "Policy is not allowed",
- "Guest is not active",
- "Invalid address",
- "Bad signature",
- "Bad measurement",
- "Asid is already owned",
- "Invalid ASID",
- "WBINVD is required",
- "DF_FLUSH is required",
- "Guest handle is invalid",
- "Invalid command",
- "Guest is active",
- "Hardware error",
- "Hardware unsafe",
- "Feature not supported",
- "Invalid parameter",
- "Out of resources",
- "Integrity checks failed"
-};
-
-const char *sev_gstate_strs[] = {
- "UNINIT",
- "LUPDATE",
- "LSECRET",
- "RUNNING",
- "SUPDATE",
- "RUPDATE",
- "SEND"
-};
-
-void
-hexdump(void *data, int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- printf("%02X ", *(uint8_t *)(data + i));
- }
- printf("\n");
-}
-
-bool
-pin_process(pid_t pid, int cpu, bool assert)
-{
- cpu_set_t cpuset;
- int ret;
-
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
- ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
- if (ret == -1) {
- if (assert) err(1, "sched_setaffinity");
- return false;
- }
-
- return true;
-}
-
-int
-read_stat_core(pid_t pid)
-{
- char path[256];
- char line[2048];
- FILE *file;
- char *p;
- int i, cpu;
-
- snprintf(path, sizeof(path), "/proc/%u/stat", pid);
- file = fopen(path, "r");
- if (!file) return -1;
-
- if (!fgets(line, sizeof(line), file))
- err(1, "read stat");
-
- p = line;
- for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
- p += 1;
-
- if (!p) errx(1, "stat format");
- cpu = atoi(p);
-
- fclose(file);
-
- return cpu;
-}
-
-const char *
-sev_fwerr_str(int code)
-{
- if (code < 0 || code >= ARRLEN(sev_fwerr_strs))
- return "Unknown error";
-
- return sev_fwerr_strs[code];
-}
-
-const char *
-sev_gstate_str(int code)
-{
- if (code < 0 || code >= ARRLEN(sev_gstate_strs))
- return "Unknown gstate";
-
- return sev_gstate_strs[code];
-}
-
-int
-sev_ioctl(int vmfd, int cmd, void *data, int *error)
-{
- struct kvm_sev_cmd input;
- int ret;
-
- memset(&input, 0, sizeof(input));
- input.id = cmd;
- input.sev_fd = sev_dev;
- input.data = (uintptr_t) data;
-
- ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input);
- if (error) *error = input.error;
-
- return ret;
-}
-
-void
-sev_get_measure(int vmfd)
+uint64_t
+vm_get_rip(struct kvm *kvm)
{
- struct kvm_sev_launch_measure msrmt;
- int ret, fwerr;
- uint8_t *data;
-
- memset(&msrmt, 0, sizeof(msrmt));
- ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
- if (ret == -1 && fwerr != SEV_RET_INVALID_LEN)
- errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- data = malloc(msrmt.len);
- msrmt.uaddr = (uintptr_t) data;
-
- ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- free(data);
-}
-
-uint8_t
-sev_guest_state(int vmfd, uint32_t handle)
-{
- struct kvm_sev_guest_status status;
- int ret, fwerr;
-
- status.handle = handle;
- ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- return status.state;
-}
-
-void
-sev_debug_encrypt(int vmfd, void *src, void *dst, size_t size)
-{
- struct kvm_sev_dbg enc;
- int ret, fwerr;
-
- enc.src_uaddr = (uintptr_t) src;
- enc.dst_uaddr = (uintptr_t) dst;
- enc.len = size;
- ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
-void
-sev_debug_decrypt(int vmfd, void *src, void *dst, size_t size)
-{
- struct kvm_sev_dbg enc;
- int ret, fwerr;
-
- enc.src_uaddr = (uintptr_t) src;
- enc.dst_uaddr = (uintptr_t) dst;
- enc.len = size;
- ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
-void
-kvm_init(struct kvm *kvm, size_t ramsize,
- void *code_start, void *code_stop)
-{
- struct kvm_userspace_memory_region region;
struct kvm_regs regs;
- struct kvm_sregs sregs;
+ uint64_t rip;
int ret;
- /* Create a kvm instance */
- kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
- if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
-
- /* Allocate guest memory */
- kvm->memsize = ramsize;
- kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm->mem) err(1, "Allocating guest memory");
- assert(code_stop - code_start <= kvm->memsize);
- memcpy(kvm->mem, code_start, code_stop - code_start);
-
- /* Map it into the vm */
- memset(®ion, 0, sizeof(region));
- region.slot = 0;
- region.memory_size = kvm->memsize;
- region.guest_phys_addr = 0x0000;
- region.userspace_addr = (uintptr_t) kvm->mem;
- ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
-
- /* Create virtual cpu core */
- kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
- if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
-
- /* Map the shared kvm_run structure and following data */
- ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
- if (ret < sizeof(struct kvm_run))
- errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
- kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
- MAP_SHARED, kvm->vcpufd, 0);
- if (!kvm->run) err(1, "mmap vcpu");
-
- /* Initialize segment regs */
- memset(&sregs, 0, sizeof(sregs));
- ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_GET_SREGS");
- sregs.cs.base = 0;
- sregs.cs.selector = 0;
- ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_SET_SREGS");
-
- /* Initialize rest of registers */
- memset(®s, 0, sizeof(regs));
- regs.rip = 0;
- regs.rsp = kvm->memsize - 8;
- regs.rbp = kvm->memsize - 8;
- regs.rflags = 0x2;
- ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
- if (ret == -1) err(1, "KVM_SET_REGS");
-}
-
-void
-sev_kvm_init(struct kvm *kvm, size_t ramsize,
- void *code_start, void *code_stop)
-{
- struct kvm_userspace_memory_region region;
- struct kvm_sev_launch_update_data update;
- struct kvm_sev_launch_start start;
- struct kvm_regs regs;
- struct kvm_sregs sregs;
- int ret, fwerr;
-
- /* Create a kvm instance */
- kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
- if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
-
- /* Allocate guest memory */
- kvm->memsize = ramsize;
- kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm->mem) err(1, "Allocating guest memory");
- assert(code_stop - code_start <= kvm->memsize);
- memcpy(kvm->mem, code_start, code_stop - code_start);
-
- /* Map it into the vm */
- memset(®ion, 0, sizeof(region));
- region.slot = 0;
- region.memory_size = kvm->memsize;
- region.guest_phys_addr = 0;
- region.userspace_addr = (uintptr_t) kvm->mem;
- ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
-
- /* Enable SEV for vm */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_INIT, NULL, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_INIT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Create virtual cpu core */
- kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
- if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
-
- /* Map the shared kvm_run structure and following data */
- ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
- if (ret < sizeof(struct kvm_run))
- errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
- kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
- MAP_SHARED, kvm->vcpufd, 0);
- if (!kvm->run) err(1, "mmap vcpu");
-
- /* Initialize segment regs */
- memset(&sregs, 0, sizeof(sregs));
- ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_GET_SREGS");
- sregs.cs.base = 0;
- sregs.cs.selector = 0;
- ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_SET_SREGS");
-
- /* Initialize rest of registers */
- memset(®s, 0, sizeof(regs));
- regs.rip = 0;
- regs.rsp = kvm->memsize - 8;
- regs.rbp = kvm->memsize - 8;
- regs.rflags = 0x2;
- ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
- if (ret == -1) err(1, "KVM_SET_REGS");
-
- /* Generate encryption keys and set policy */
- memset(&start, 0, sizeof(start));
- start.handle = 0;
- start.policy = 0;
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Prepare the vm memory (by encrypting it) */
- memset(&update, 0, sizeof(update));
- update.uaddr = (uintptr_t) kvm->mem;
- update.len = ramsize;
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Collect a measurement (necessary) */
- sev_get_measure(kvm->vmfd);
-
- /* Finalize launch process */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- ret = sev_guest_state(kvm->vmfd, start.handle);
- if (ret != GSTATE_RUNNING)
- errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
-}
+ if (!strcmp(vmtype, "sev-snp")) {
+ rip = snp_dbg_decrypt_rip(kvm->vmfd);
+ } else if (!strcmp(vmtype, "sev-es")) {
+ rip = sev_dbg_decrypt_rip(kvm->vmfd);
+ } else {
+ ret = ioctl(kvm->vcpufd, KVM_GET_REGS, ®s);
+ if (ret == -1) err(1, "KVM_GET_REGS");
+ rip = regs.rip;
+ }
-void
-sev_es_kvm_init(struct kvm *kvm, size_t ramsize,
- void *code_start, void *code_stop)
-{
- struct kvm_sev_launch_update_data update;
- struct kvm_sev_launch_start start;
- struct kvm_userspace_memory_region region;
- struct kvm_sev_dbg dec;
- struct kvm_regs regs;
- struct kvm_sregs sregs;
- int ret, fwerr;
- void *buf;
-
- /* Create a kvm instance */
- kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
- if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
-
- /* Allocate guest memory */
- kvm->memsize = ramsize;
- kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm->mem) err(1, "Allocating guest memory");
- assert(code_stop - code_start <= kvm->memsize);
- memcpy(kvm->mem, code_start, code_stop - code_start);
-
- /* Map it into the vm */
- memset(®ion, 0, sizeof(region));
- region.slot = 0;
- region.memory_size = kvm->memsize;
- region.guest_phys_addr = 0;
- region.userspace_addr = (uintptr_t) kvm->mem;
- ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
-
- /* Enable SEV-ES for vm */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_ES_INIT, NULL, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_ES_INIT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Create virtual cpu */
- kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
- if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
-
- /* Map the shared kvm_run structure and following data */
- ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
- if (ret < sizeof(struct kvm_run))
- errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
- kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
- MAP_SHARED, kvm->vcpufd, 0);
- if (!kvm->run) err(1, "mmap vcpu");
-
- /* Initialize segment regs */
- memset(&sregs, 0, sizeof(sregs));
- ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_GET_SREGS");
- sregs.cs.base = 0;
- sregs.cs.selector = 0;
- ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
- if (ret == -1) err(1, "KVM_SET_SREGS");
-
- /* Initialize rest of registers */
- memset(®s, 0, sizeof(regs));
- regs.rip = 0;
- regs.rsp = kvm->memsize - 8;
- regs.rbp = kvm->memsize - 8;
- ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
- if (ret == -1) err(1, "KVM_SET_REGS");
-
- /* Generate encryption keys and set policy */
- memset(&start, 0, sizeof(start));
- start.handle = 0;
- start.policy = 1 << 2; /* require ES */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Prepare the vm memory (by encrypting it) */
- memset(&update, 0, sizeof(update));
- update.uaddr = (uintptr_t) kvm->mem;
- update.len = ramsize;
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Prepare the vm save area */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Collect a measurement (necessary) */
- sev_get_measure(kvm->vmfd);
-
- /* Finalize launch process */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
- ret = sev_guest_state(kvm->vmfd, start.handle);
- if (ret != GSTATE_RUNNING)
- errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
-
- /* Validate code was encrypted correctly */
- buf = malloc(ramsize);
- dec.src_uaddr = (uint64_t) kvm->mem;
- dec.dst_uaddr = (uint64_t) buf;
- dec.len = ramsize;
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_DBG_DECRYPT, &dec, &fwerr);
- if (ret == -1) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
- if (memcmp(buf, code_start, code_stop - code_start))
- errx(1, "VM ram not encrypted correctly");
+ return rip;
}
void
-sev_snp_kvm_init(struct kvm *kvm, size_t ramsize,
- void *code_start, void *code_stop)
+vm_init(struct kvm *kvm, void *code_start, void *code_end)
{
- errx(1, "Not implemented");
-}
+ size_t ramsize;
-void
-vm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_end)
-{
+ ramsize = L1_SIZE * 2;
if (!strcmp(vmtype, "kvm")) {
kvm_init(kvm, ramsize, code_start, code_end);
} else if (!strcmp(vmtype, "sev")) {
@@ -562,51 +67,21 @@ vm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_end)
void
vm_deinit(struct kvm *kvm)
{
- close(kvm->vmfd);
- close(kvm->vcpufd);
- munmap(kvm->mem, kvm->memsize);
-}
-
-void
-print_counts(uint8_t *counts)
-{
- int i;
-
- for (i = 0; i < 64; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- if (counts[i] == 1)
- printf("\x1b[38;5;88m");
- else if (counts[i] > 1)
- printf("\x1b[38;5;196m");
- printf("%2i ", i);
- if (counts[i] > 0)
- printf("\x1b[0m");
- }
-
- printf("\nTarget Set %i Count: %u\n\n",
- TARGET_SET, counts[TARGET_SET]);
+ kvm_deinit(kvm);
}
void
collect(struct kvm *kvm, uint8_t *counts)
{
- struct kvm_regs regs;
int ret;
- /* run vm twice, use count without initial stack setup */
ret = ioctl(kvm->vcpufd, KVM_RUN, NULL);
if (ret == -1) err(1, "KVM_RUN");
+ // warnx("rip:%lu code:%i", vm_get_rip(kvm), kvm->run->exit_reason);
- if (kvm->run->exit_reason == KVM_EXIT_MMIO) {
- memset(®s, 0, sizeof(regs));
- ret = ioctl(kvm->vcpufd, KVM_GET_REGS, ®s);
- if (ret == -1) err(1, "KVM_GET_REGS");
- errx(1, "Victim access OOB: %llu %08llx => %02X\n",
- kvm->run->mmio.phys_addr, regs.rip,
- ((uint8_t *)kvm->mem)[regs.rip]);
- } else if (kvm->run->exit_reason != KVM_EXIT_HYPERCALL) {
- errx(1, "KVM died: %i\n", kvm->run->exit_reason);
+ if (kvm->run->exit_reason != KVM_EXIT_HLT) {
+ errx(1, "KVM died! rip:%lu code:%i",
+ vm_get_rip(kvm), kvm->run->exit_reason);
}
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
@@ -632,31 +107,28 @@ main(int argc, const char **argv)
pin_process(0, TARGET_CORE, true);
- kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC);
- if (kvm_dev < 0) err(1, "open /dev/kvm");
-
- sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
- if (sev_dev < 0) err(1, "open /dev/sev");
+ kvm_setup_init();
- /* Make sure we have the stable version of the API */
- ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL);
- if (ret == -1) err(1, "KVM_GET_API_VERSION");
- if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
+ vm_init(&vms[WITH], guest_with_start, guest_with_stop);
+ vm_init(&vms[WITHOUT], guest_without_start, guest_without_stop);
- /* Reset kernel module state */
+ /* reset kernel module state */
ret = ioctl(kvm_dev, KVM_CPC_RESET);
if (ret == -1) err(1, "ioctl KVM_CPC_RESET");
- vm_init(&vms[WITH], 2 * L1_SIZE,
- start_guest_with, stop_guest_with);
- vm_init(&vms[WITHOUT], 2 * L1_SIZE,
- start_guest_without, stop_guest_without);
+ /* resolve page faults in advance (code only covers 1 page)..
+ * we want the read counts to apply between KVM_RUN and KVM_EXIT_HLT,
+ * any exits through PFs inbetween will influence our measurement */
+ collect(&vms[WITH], counts[WITH][0]);
+ collect(&vms[WITHOUT], counts[WITHOUT][0]);
+ /* collect samples */
for (i = 0; i < SAMPLE_COUNT; i++) {
collect(&vms[WITH], counts[WITH][i]);
collect(&vms[WITHOUT], counts[WITHOUT][i]);
}
+ /* calculate measurement baseline */
memset(baseline, 0xff, L1_SETS);
for (i = 0; i < SAMPLE_COUNT; i++) {
for (k = 0; k < L1_SETS; k++) {
@@ -667,28 +139,34 @@ main(int argc, const char **argv)
}
}
+ /* apply baseline and output samples */
for (i = 0; i < SAMPLE_COUNT; i++) {
for (k = 0; k < L1_SETS; k++) {
counts[WITH][i][k] -= baseline[k];
counts[WITHOUT][i][k] -= baseline[k];
}
- printf("=== Sample %2i ===\n\n", i);
+ printf("\n=== Sample %2i ===\n", i);
- printf("With eviction:\n");
+ printf("\nWith eviction:\n\n");
print_counts(counts[WITH][i]);
+ printf("\n");
+ print_counts_raw(counts[WITH][i]);
- printf("Without eviction:\n");
+ printf("\nWithout eviction:\n\n");
print_counts(counts[WITHOUT][i]);
+ printf("\n");
+ print_counts_raw(counts[WITHOUT][i]);
}
+ /* check for measurment errors */
for (i = 0; i < SAMPLE_COUNT; i++) {
for (k = 0; k < L1_SETS; k++) {
- if (counts[WITH][i][k] + baseline[k] >= L1_ASSOC)
+ if (counts[WITH][i][k] + baseline[k] > L1_ASSOC)
warnx("sample %i: With count OOB for set %i (=%i)",
i, k, counts[WITH][i][k] + baseline[k]);
- if (counts[WITHOUT][i][k] + baseline[k] >= L1_ASSOC)
+ if (counts[WITHOUT][i][k] + baseline[k] > L1_ASSOC)
warnx("sample %i: Without count OOB for set %i (=%i)",
i, k, counts[WITHOUT][i][k] + baseline[k]);
}
@@ -702,7 +180,6 @@ main(int argc, const char **argv)
vm_deinit(&vms[WITH]);
vm_deinit(&vms[WITHOUT]);
- close(kvm_dev);
- close(sev_dev);
+ kvm_setup_deinit();
}
diff --git a/test/kvm-eviction_guest.S b/test/kvm-eviction_guest.S
@@ -1,24 +1,19 @@
-#include "kvm-eviction.h"
+#include "test/kvm-eviction.h"
#include "cachepc/const.h"
-#define TARGET_SET 15
+.global guest_with_start
+.global guest_with_stop
-.global start_guest_with
-.global stop_guest_with
+.global guest_without_start
+.global guest_without_stop
-.global start_guest_without
-.global stop_guest_without
+guest_with_start:
+ movq (L1_LINESIZE * (TARGET_SET + L1_SETS)), %rbx
+ hlt
+ jmp guest_with_start
+guest_with_stop:
-start_guest_with:
- mov $(L1_LINESIZE * TARGET_SET), %rbx
- mov (%rbx), %bl
- mov $KVM_HC_CPC_VMMCALL_EXIT, %rax
- vmmcall
- jmp start_guest_with
-stop_guest_with:
-
-start_guest_without:
- mov $KVM_HC_CPC_VMMCALL_EXIT, %rax
- vmmcall
- jmp start_guest_without
-stop_guest_without:
+guest_without_start:
+ hlt
+ jmp guest_without_start
+guest_without_stop:
diff --git a/test/kvm-pagestep.c b/test/kvm-pagestep.c
@@ -212,25 +212,6 @@ snp_guest_state(int vmfd)
return status.state;
}
-
-void
-snp_dbg_encrypt(int vmfd, void *dst, void *src, size_t size)
-{
- struct kvm_sev_dbg enc;
- int ret, fwerr;
-
- assert(false); /* ioctl not implemented yet */
-
- memset(&enc, 0, sizeof(struct kvm_sev_dbg));
- enc.src_uaddr = (uintptr_t) src;
- enc.dst_uaddr = (uintptr_t) dst;
- enc.len = size;
-
- ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
void
snp_dbg_decrypt(int vmfd, void *dst, void *src, size_t size)
{
@@ -264,7 +245,7 @@ snp_dbg_decrypt_rip(int vmfd)
}
void
-snp_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
+sev_snp_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
{
// REF: https://www.amd.com/system/files/TechDocs/55766_SEV-KM_API_Specification.pdf
struct kvm_sev_snp_launch_update update;
@@ -373,20 +354,20 @@ snp_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
}
void
-snp_kvm_deinit(struct kvm *kvm)
+sev_snp_kvm_deinit(struct kvm *kvm)
{
close(kvm->vmfd);
close(kvm->vcpufd);
munmap(kvm->mem, kvm->memsize);
}
-cpc_msrmt_t *
+uint8_t *
read_counts()
{
- cpc_msrmt_t *counts;
+ uint8_t *counts;
int i, ret;
- counts = malloc(L1_SETS * sizeof(cpc_msrmt_t));
+ counts = malloc(L1_SETS * sizeof(uint8_t));
if (!counts) err(1, "malloc");
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
@@ -401,7 +382,7 @@ read_counts()
}
void
-print_counts(cpc_msrmt_t *counts)
+print_counts(uint8_t *counts)
{
int i;
@@ -419,24 +400,7 @@ print_counts(cpc_msrmt_t *counts)
printf("\n");
}
-void
-print_counts_raw(cpc_msrmt_t *counts)
-{
- int i;
- for (i = 0; i < 64; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- if (counts[i] == 1)
- printf("\x1b[38;5;88m");
- else if (counts[i] > 1)
- printf("\x1b[38;5;196m");
- printf("%02X ", (uint8_t) counts[i]);
- if (counts[i] > 0)
- printf("\x1b[0m");
- }
- printf("\n");
-}
void
runonce(struct kvm *kvm)
@@ -451,7 +415,7 @@ int
monitor(struct kvm *kvm, bool baseline)
{
struct cpc_event event;
- cpc_msrmt_t counts[64];
+ uint8_t counts[64];
int ret, i;
/* Get page fault info */
@@ -508,7 +472,7 @@ main(int argc, const char **argv)
pid_t ppid, pid;
uint32_t arg;
struct cpc_event event;
- cpc_msrmt_t baseline[64];
+ uint8_t baseline[64];
int ret, i;
setvbuf(stdout, NULL, _IONBF, 0);
@@ -521,23 +485,16 @@ main(int argc, const char **argv)
kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC);
if (kvm_dev < 0) err(1, "open /dev/kvm");
- /* Make sure we have the stable version of the API */
+ /* ensure we have the stable version of the api */
ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL);
if (ret < 0) err(1, "KVM_GET_API_VERSION");
if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
- /* Setup needed performance counters */
- ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL);
- if (ret < 0) err(1, "ioctl SETUP_PMC");
-
- snp_kvm_init(&kvm_with_access, L1_SIZE * 2,
+ sev_snp_kvm_init(&kvm_with_access, L1_SIZE * 2,
__start_guest_with, __stop_guest_with);
- /* Page tracking init needs to happen after kvm
- * init so main_kvm is set.. */
-
- /* Reset previous tracking */
- ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
+ /* reset kernel module state */
+ ret = ioctl(kvm_dev, KVM_CPC_RESET, NULL);
if (ret) err(1, "ioctl RESET_TRACKING");
/* Do data access stepping */
@@ -624,7 +581,7 @@ main(int argc, const char **argv)
exit(0);
}
- snp_kvm_deinit(&kvm_with_access);
+ sev_snp_kvm_deinit(&kvm_with_access);
close(kvm_dev);
close(sev_dev);
diff --git a/test/kvm-step.c b/test/kvm-step.c
@@ -1,5 +1,7 @@
#define _GNU_SOURCE
+#include "test/kvm.h"
+#include "test/util.h"
#include "cachepc/uapi.h"
#include <linux/psp-sev.h>
@@ -27,532 +29,80 @@
#include <stdio.h>
#include <stdarg.h>
-#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
-
#define TARGET_CORE 2
#define SECONDARY_CORE 3
-#define TARGET_SET 15
-
-struct kvm {
- int vmfd, vcpufd;
- void *mem;
- size_t memsize;
- struct kvm_run *run;
-};
-
-/* start and end for guest assembly */
-extern uint8_t __start_guest_with[];
-extern uint8_t __stop_guest_with[];
-
-/* ioctl dev fds */
-static int kvm_dev, sev_dev, kvm_dev;
-static int faultcnt;
-
-enum {
- GSTATE_INIT,
- GSTATE_LAUNCH,
- GSTATE_RUNNING,
-};
-
-const char *sev_fwerr_strs[] = {
- [0x00] = "Success",
- [0x01] = "Platform state is invalid",
- [0x02] = "Guest state is invalid",
- [0x03] = "Platform configuration is invalid",
- [0x04] = "Buffer too small",
- [0x05] = "Platform is already owned",
- [0x06] = "Certificate is invalid",
- [0x07] = "Request not allowed by policy",
- [0x08] = "Guest is inactive",
- [0x09] = "Invalid address",
- [0x0A] = "Bad signature",
- [0x0B] = "Bad measurement",
- [0x0C] = "Asid is already owned",
- [0x0D] = "Invalid ASID",
- [0x0E] = "WBINVD is required",
- [0x0F] = "DF_FLUSH is required",
- [0x10] = "Guest handle is invalid",
- [0x11] = "Invalid command",
- [0x12] = "Guest is active",
- [0x13] = "Hardware error",
- [0x14] = "Hardware unsafe",
- [0x15] = "Feature not supported",
- [0x16] = "Invalid parameter",
- [0x17] = "Out of resources",
- [0x18] = "Integrity checks failed",
- [0x19] = "RMP page size is incorrect",
- [0x1A] = "RMP page state is incorrect",
-};
-
-const char *sev_gstate_strs[] = {
- "INIT",
- "LAUNCH",
- "RUNNING",
-};
-
-void
-hexdump(void *data, int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- printf("%02X ", *(uint8_t *)(data + i));
- }
- printf("\n");
-}
-
-__attribute__((section("guest_with"))) void
-vm_guest_with(void)
-{
- asm volatile ("mov %rbp, %rsp; pop %rbp; \
- movq $4096, %rcx; movq $0, %rdx; cmp %rcx, %rdx; \
- cmovne %rdx, %rcx; jmp *%rcx");
-}
-
-bool
-pin_process(pid_t pid, int cpu, bool assert)
-{
- cpu_set_t cpuset;
- int ret;
-
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
- ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
- if (ret < 0) {
- if (assert) err(1, "sched_setaffinity");
- return false;
- }
-
- return true;
-}
-
-int
-read_stat_core(pid_t pid)
-{
- char path[256];
- char line[2048];
- FILE *file;
- char *p;
- int i, cpu;
-
- snprintf(path, sizeof(path), "/proc/%u/stat", pid);
- file = fopen(path, "r");
- if (!file) return -1;
-
- if (!fgets(line, sizeof(line), file))
- err(1, "read stat");
-
- p = line;
- for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
- p += 1;
-
- if (!p) errx(1, "stat format");
- cpu = atoi(p);
-
- fclose(file);
-
- return cpu;
-}
-
-const char *
-sev_fwerr_str(int code)
-{
- if (code < 0 || code >= ARRLEN(sev_fwerr_strs)) {
- warnx("Unknown firmware error %i", code);
- return "Unknown error";
- }
-
- return sev_fwerr_strs[code];
-}
-
-const char *
-sev_gstate_str(int code)
-{
- if (code < 0 || code >= ARRLEN(sev_gstate_strs)) {
- warnx("Unknown guest state %i", code);
- return "Unknown gstate";
- }
-
- return sev_gstate_strs[code];
-}
-
-int
-sev_ioctl(int vmfd, int cmd, void *data, int *error)
-{
- struct kvm_sev_cmd input;
- int ret;
-
- memset(&input, 0, sizeof(input));
- input.id = cmd;
- input.sev_fd = sev_dev;
- input.data = (uintptr_t) data;
-
- ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input);
- if (error) *error = input.error;
-
- return ret;
-}
-
-uint8_t
-snp_guest_state(int vmfd)
-{
- struct kvm_sev_guest_status status;
- int ret, fwerr;
-
- assert(false); /* ioctl not implemented yet */
-
- ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- return status.state;
-}
-
-
-void
-snp_dbg_encrypt(int vmfd, void *dst, void *src, size_t size)
-{
- struct kvm_sev_dbg enc;
- int ret, fwerr;
-
- assert(false); /* ioctl not implemented yet */
-
- memset(&enc, 0, sizeof(struct kvm_sev_dbg));
- enc.src_uaddr = (uintptr_t) src;
- enc.dst_uaddr = (uintptr_t) dst;
- enc.len = size;
-
- ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
-void
-snp_dbg_decrypt(int vmfd, void *dst, void *src, size_t size)
-{
- struct kvm_sev_dbg enc;
- int ret, fwerr;
-
- // assert(false); /* ioctl not implemented yet */
-
- memset(&enc, 0, sizeof(struct kvm_sev_dbg));
- enc.src_uaddr = (uintptr_t) src;
- enc.dst_uaddr = (uintptr_t) dst;
- enc.len = size;
-
- ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
-uint64_t
-snp_dbg_decrypt_rip(int vmfd)
-{
- uint8_t vmsa[PAGE_SIZE];
- uint64_t rip;
-
- memset(vmsa, 0, PAGE_SIZE);
- snp_dbg_decrypt(vmfd, vmsa, CPC_VMSA_MAGIC_ADDR, PAGE_SIZE);
-
- rip = *(uint64_t *)(vmsa + 0x178);
-
- return rip;
-}
-
-void
-snp_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
-{
- // REF: https://www.amd.com/system/files/TechDocs/55766_SEV-KM_API_Specification.pdf
- struct kvm_sev_snp_launch_update update;
- struct kvm_sev_snp_launch_start start;
- struct kvm_sev_snp_launch_finish finish;
- struct kvm_snp_init init;
- struct kvm_userspace_memory_region region;
- struct kvm_enc_region enc_region;
- struct kvm_regs regs;
- struct kvm_sregs sregs;
- int ret, fwerr;
-
- /* Create a kvm instance */
- kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
- if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
-
- /* Allocate guest memory */
- kvm->memsize = ramsize;
- kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm->mem) err(1, "Allocating guest memory");
- assert(code_stop - code_start <= kvm->memsize);
-
- /* Fill memory with nops and put jump code a bit from start
- * such that we access multiple different pages while running */
- memset(kvm->mem, 0x90, kvm->memsize);
- memcpy(kvm->mem + L1_SIZE, // - (code_stop - code_start),
- code_start, code_stop - code_start);
-
- /* Map it into the vm */
- memset(®ion, 0, sizeof(region));
- region.slot = 0;
- region.memory_size = kvm->memsize;
- region.guest_phys_addr = 0;
- region.userspace_addr = (uintptr_t) kvm->mem;
- ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION");
-
- /* Enable SEV for vm */
- memset(&init, 0, sizeof(init));
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_INIT, &init, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_SNP_INIT: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Register memory region */
- memset(&enc_region, 0, sizeof(enc_region));
- enc_region.addr = (uintptr_t) kvm->mem;
- enc_region.size = kvm->memsize;
- ret = ioctl(kvm->vmfd, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region);
- if (ret < 0) err(1, "KVM_MEMORY_ENCRYPT_REG_REGION");
-
- /* Create virtual cpu */
- kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
- if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
-
- /* Map the shared kvm_run structure and following data */
- ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE");
- if (ret < sizeof(struct kvm_run))
- errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
- kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
- MAP_SHARED, kvm->vcpufd, 0);
- if (!kvm->run) err(1, "mmap vcpu");
-
- /* Initialize segment regs */
- memset(&sregs, 0, sizeof(sregs));
- ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
- if (ret < 0) err(1, "KVM_GET_SREGS");
- sregs.cs.base = 0;
- sregs.cs.selector = 0;
- ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
- if (ret < 0) err(1, "KVM_SET_SREGS");
-
- /* Initialize rest of registers */
- memset(®s, 0, sizeof(regs));
- regs.rip = 0;
- regs.rsp = kvm->memsize - L1_SETS * L1_LINESIZE - 8;
- regs.rbp = kvm->memsize - L1_SETS * L1_LINESIZE - 8;
- ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
- if (ret < 0) err(1, "KVM_SET_REGS");
-
- /* Generate encryption keys and set policy */
- memset(&start, 0, sizeof(start));
- start.policy = 1 << 17; /* must be set */
- start.policy |= 1 << 19; /* allow debug */
- start.policy |= 1 << 16; /* allow simultaneous multi-threading */
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_START, &start, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_START: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Prepare the vm memory */
- memset(&update, 0, sizeof(update));
- update.uaddr = (uintptr_t) kvm->mem;
- update.len = ramsize;
- update.start_gfn = 0;
- update.page_type = KVM_SEV_SNP_PAGE_TYPE_NORMAL;
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_UPDATE, &update, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_UPDATE: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-
- /* Finalize launch process */
- memset(&finish, 0, sizeof(finish));
- ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_FINISH, &finish, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_SNP_LAUNCH_FINISH: (%s) %s",
- strerror(errno), sev_fwerr_str(fwerr));
-}
-
-void
-snp_kvm_deinit(struct kvm *kvm)
-{
- close(kvm->vmfd);
- close(kvm->vcpufd);
- munmap(kvm->mem, kvm->memsize);
-}
+extern uint8_t guest_start[];
+extern uint8_t guest_stop[];
-cpc_msrmt_t *
+uint8_t *
read_counts()
{
- cpc_msrmt_t *counts;
- int i, ret;
+ uint8_t *counts;
+ int ret;
- counts = malloc(L1_SETS * sizeof(cpc_msrmt_t));
+ counts = malloc(L1_SETS * sizeof(uint8_t));
if (!counts) err(1, "malloc");
ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
- if (ret) err(1, "ioctl READ_COUNTS");
-
- for (i = 0; i < L1_SETS; i++) {
- if (counts[i] > 8)
- errx(1, "Invalid counts set %i", i);
- }
+ if (ret) err(1, "ioctl KVM_CPC_READ_COUNTS");
return counts;
}
-void
-print_counts(cpc_msrmt_t *counts)
-{
- int i;
-
- for (i = 0; i < 64; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- if (counts[i] == 1)
- printf("\x1b[38;5;88m");
- else if (counts[i] > 1)
- printf("\x1b[38;5;196m");
- printf("%2i ", i);
- if (counts[i] > 0)
- printf("\x1b[0m");
- }
- printf("\n");
-}
-
-void
-print_counts_raw(cpc_msrmt_t *counts)
-{
- int i;
-
- for (i = 0; i < 64; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- if (counts[i] == 1)
- printf("\x1b[38;5;88m");
- else if (counts[i] > 1)
- printf("\x1b[38;5;196m");
- printf("%02X ", (uint8_t) counts[i]);
- if (counts[i] > 0)
- printf("\x1b[0m");
- }
- printf("\n");
-}
-
-void
-runonce(struct kvm *kvm)
-{
- int ret;
-
- ret = ioctl(kvm->vcpufd, KVM_RUN, NULL);
- if (ret < 0) err(1, "KVM_RUN");
-}
-
-int
+uint64_t
monitor(struct kvm *kvm, bool baseline)
{
struct cpc_event event;
- cpc_msrmt_t counts[64];
- int ret, i;
+ uint8_t counts[64];
+ int ret;
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
- if (ret) {
- if (errno == EAGAIN)
- return 0;
- warn("ioctl POLL_EVENT");
- return 1;
- }
-
- if (event.type == CPC_EVENT_TRACK_STEP) {
- ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
- if (ret) err(1, "ioctl READ_COUNTS");
-
- if (!baseline) {
- printf("Event: cnt:%llu rip:%lu, inst:%llu data:%llu retired:%llu\n",
- event.step.fault_count,
- 0, // snp_dbg_decrypt_rip(kvm->vmfd),
- event.step.fault_gfns[0],
- event.step.fault_gfns[1],
- event.step.retinst);
- print_counts(counts);
- printf("\n");
- }
+ if (ret && errno == EAGAIN) return 0;
+ if (ret) err(1, "ioctl KVM_CPC_POLL_EVENT");
- for (i = 0; i < 64; i++) {
- if (counts[i] > 8) {
- warnx("Invalid count for set %i (%llu)",
- i, counts[i]);
- }
- }
+ if (event.type != CPC_EVENT_TRACK_STEP)
+ errx(1, "unexpected event type %i", event.type);
- if (baseline) faultcnt++;
- } else if (event.type == CPC_EVENT_TRACK_PAGE) {
- printf("Event: inst page from:%llu to:%llu rip:%lu\n\n",
- event.page.inst_gfn_prev, event.page.inst_gfn,
- 0); //snp_dbg_decrypt_rip(kvm->vmfd));
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret) err(1, "ioctl KVM_CPC_READ_COUNTS");
- if (!baseline) faultcnt++;
- }
+ printf("Event: cnt:%llu rip:%lu inst:%llu data:%llu retired:%llu\n",
+ event.step.fault_count, snp_dbg_decrypt_rip(kvm->vmfd),
+ event.step.fault_gfns[0], event.step.fault_gfns[1],
+ event.step.retinst);
+ print_counts(counts);
+ printf("\n");
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
- if (ret) err(1, "ioctl ACK_EVENT");
+ if (ret) err(1, "ioctl KVM_CPC_ACK_EVENT");
- return 0;
+ return 1;
}
int
main(int argc, const char **argv)
{
- struct kvm kvm_with_access;
- uint64_t track_mode;
+ struct kvm kvm;
+ uint8_t baseline[L1_SETS];
+ struct cpc_event event;
+ uint64_t eventcnt;
pid_t ppid, pid;
uint32_t arg;
- struct cpc_event event;
- cpc_msrmt_t baseline[64];
- int ret, i;
+ int ret;
setvbuf(stdout, NULL, _IONBF, 0);
pin_process(0, TARGET_CORE, true);
- sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
- if (sev_dev < 0) err(1, "open /dev/sev");
-
- kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC);
- if (kvm_dev < 0) err(1, "open /dev/kvm");
-
- /* Make sure we have the stable version of the API */
- ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL);
- if (ret < 0) err(1, "KVM_GET_API_VERSION");
- if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
-
- /* Setup needed performance counters */
- ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL);
- if (ret < 0) err(1, "ioctl SETUP_PMC");
-
- snp_kvm_init(&kvm_with_access, L1_SIZE * 2,
- __start_guest_with, __stop_guest_with);
-
- /* Page tracking init needs to happen after kvm
- * init so main_kvm is set.. */
-
- /* Reset previous tracking */
- ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
- if (ret) err(1, "ioctl RESET_TRACKING");
+ kvm_setup_init();
- /* Do data access stepping */
- arg = CPC_TRACK_STUB;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
- if (ret) err(1, "ioctl TRACK_MODE");
+ sev_snp_kvm_init(&kvm, L1_SIZE * 2, guest_start, guest_stop);
- /* Init page tracking */
- track_mode = KVM_PAGE_TRACK_EXEC;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
- if (ret) err(1, "ioctl TRACK_ALL");
-
- arg = true;
- ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
- if (ret) err(1, "ioctl MEASURE_BASELINE");
+ /* reset kernel module state */
+ ret = ioctl(kvm_dev, KVM_CPC_RESET, NULL);
+ if (ret < 0) err(1, "ioctl KVM_CPC_RESET");
ppid = getpid();
if ((pid = fork())) {
@@ -560,73 +110,67 @@ main(int argc, const char **argv)
sleep(1); /* give time for child to pin other core */
- printf("VMRUN\n");
- runonce(&kvm_with_access);
- printf("VMRUN DONE\n");
+ printf("VM start\n");
+
+ ret = ioctl(kvm.vcpufd, KVM_RUN, NULL);
+ if (ret < 0) err(1, "KVM_RUN");
+
+ printf("VM exit\n");
} else {
pin_process(0, SECONDARY_CORE, true);
- printf("PINNED\n");
- faultcnt = 0;
- while (faultcnt < 300) {
- if (monitor(&kvm_with_access, true)) break;
+ /* single step and log all accessed pages */
+ arg = CPC_TRACK_FULL;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
+ if (ret) err(1, "ioctl KVM_CPC_TRACK_MODE");
+
+ /* calculate baseline while running vm */
+ arg = true;
+ ret = ioctl(kvm_dev, KVM_CPC_CALC_BASELINE, &arg);
+ if (ret) err(1, "ioctl KVM_CPC_CALC_BASELINE");
+
+ printf("Monitor ready\n");
+
+ /* run vm while baseline is calculated */
+ eventcnt = 0;
+ while (eventcnt < 30) {
+ eventcnt += monitor(&kvm, true);
}
- do {
- ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
- if (ret && errno != EAGAIN)
- err(1, "ioctl POLL_EVENT");
- } while (ret && errno == EAGAIN);
+ ret = ioctl(kvm_dev, KVM_CPC_VM_PAUSE, &event);
+ if (ret) err(1, "ioctl KVM_CPC_VM_PAUSE");
arg = false;
- ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &arg);
- if (ret) err(1, "ioctl MEASURE_BASELINE");
+ ret = ioctl(kvm_dev, KVM_CPC_CALC_BASELINE, &arg);
+ if (ret) err(1, "ioctl KVM_CPC_CALC_BASELINE");
ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline);
- if (ret) err(1, "ioctl READ_BASELINE");
+ if (ret) err(1, "ioctl KVM_CPC_READ_BASELINE");
- printf("\n>>> BASELINE:\n");
+ printf("\nBaseline:\n");
print_counts(baseline);
printf("\n");
print_counts_raw(baseline);
printf("\n");
- /* Check baseline for saturated sets */
- for (i = 0; i < 64; i++) {
- if (baseline[i] >= 8)
- warnx("!!! Baseline set %i full\n", i);
- }
-
arg = true;
- ret = ioctl(kvm_dev, KVM_CPC_SUB_BASELINE, &arg);
- if (ret) err(1, "ioctl SUB_BASELINE");
+ ret = ioctl(kvm_dev, KVM_CPC_APPLY_BASELINE, &arg);
+ if (ret) err(1, "ioctl KMV_CPC_APPLY_BASELINE");
- ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
- if (ret) err(1, "ioctl RESET_TRACKING");
-
- arg = CPC_TRACK_EXEC;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_MODE, &arg);
- if (ret) err(1, "ioctl TRACK_MODE");
+ ret = ioctl(kvm_dev, KVM_CPC_VM_RESUME, &event.id);
+ if (ret) err(1, "ioctl KVM_CPC_VM_RESUME");
- track_mode = KVM_PAGE_TRACK_EXEC;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
- if (ret) err(1, "ioctl TRACK_ALL");
-
- ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
- if (ret) err(1, "ioctl ACK_EVENT");
-
- faultcnt = 0;
- while (faultcnt < 20) {
- if (monitor(&kvm_with_access, false)) break;
+ eventcnt = 0;
+ while (eventcnt < 30) {
+ eventcnt += monitor(&kvm, false);
}
- kill(ppid, SIGTERM);
+ kill(ppid, SIGINT);
exit(0);
}
- snp_kvm_deinit(&kvm_with_access);
-
- close(kvm_dev);
- close(sev_dev);
+ kvm_deinit(&kvm);
+
+ kvm_setup_deinit();
}
diff --git a/test/kvm-step_guest.S b/test/kvm-step_guest.S
@@ -0,0 +1,18 @@
+#include "cachepc/const.h"
+
+#define TARGET_SET 15
+
+.global guest_start
+.global guest_stop
+
+guest_start:
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ mov $(L1_LINESIZE * (L1_SETS + TARGET_SET)), %rbx
+ jmp guest_start
+guest_stop:
+
diff --git a/test/kvm.c b/test/kvm.c
@@ -1,7 +1,10 @@
#define _GNU_SOURCE
+#include "test/kvm.h"
+#include "test/util.h"
#include "cachepc/uapi.h"
+#include <linux/psp-sev.h>
#include <linux/kvm.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
@@ -21,309 +24,578 @@
#include <sched.h>
#include <string.h>
#include <stdbool.h>
-#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
+#include <stdlib.h>
-#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
+int kvm_dev, sev_dev;
+
+const char *sev_fwerr_strs[] = {
+ [0x00] = "Success",
+ [0x01] = "Platform state is invalid",
+ [0x02] = "Guest state is invalid",
+ [0x03] = "Platform configuration is invalid",
+ [0x04] = "Buffer too small",
+ [0x05] = "Platform is already owned",
+ [0x06] = "Certificate is invalid",
+ [0x07] = "Request not allowed by policy",
+ [0x08] = "Guest is inactive",
+ [0x09] = "Invalid address",
+ [0x0A] = "Bad signature",
+ [0x0B] = "Bad measurement",
+ [0x0C] = "Asid is already owned",
+ [0x0D] = "Invalid ASID",
+ [0x0E] = "WBINVD is required",
+ [0x0F] = "DF_FLUSH is required",
+ [0x10] = "Guest handle is invalid",
+ [0x11] = "Invalid command",
+ [0x12] = "Guest is active",
+ [0x13] = "Hardware error",
+ [0x14] = "Hardware unsafe",
+ [0x15] = "Feature not supported",
+ [0x16] = "Invalid parameter",
+ [0x17] = "Out of resources",
+ [0x18] = "Integrity checks failed",
+ [0x19] = "RMP page size is incorrect",
+ [0x1A] = "RMP page state is incorrect",
+};
-#define SAMPLE_COUNT 100
+const char *sev_gstate_strs[] = {
+ "UNINIT",
+ "LUPDATE",
+ "LSECRET",
+ "RUNNING",
+ "SUPDATE",
+ "RUPDATE",
+ "SEND"
+};
-#define TARGET_CORE 2
-#define SECONDARY_CORE 3
+const char *
+sev_fwerr_str(int code)
+{
+ if (code < 0 || code >= ARRLEN(sev_fwerr_strs))
+ return "Unknown error";
-struct kvm {
- int fd;
- int vmfd;
- int vcpufd;
- void *mem;
-};
+ return sev_fwerr_strs[code];
+}
-/* start and end for guest assembly */
-extern uint8_t __start_guest_with[];
-extern uint8_t __stop_guest_with[];
-extern uint8_t __start_guest_without[];
-extern uint8_t __stop_guest_without[];
+const char *
+sev_gstate_str(int code)
+{
+ if (code < 0 || code >= ARRLEN(sev_gstate_strs))
+ return "Unknown gstate";
-static struct kvm kvm;
-static struct kvm_run *kvm_run;
+ return sev_gstate_strs[code];
+}
-static int kvm_fd;
+int
+sev_ioctl(int vmfd, int cmd, void *data, int *error)
+{
+ struct kvm_sev_cmd input;
+ int ret;
+
+ memset(&input, 0, sizeof(input));
+ input.id = cmd;
+ input.sev_fd = sev_dev;
+ input.data = (uintptr_t) data;
-#define TARGET_CACHE_LINESIZE 64
-#define TARGET_SET 15
+ ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input);
+ if (error) *error = input.error;
-__attribute__((section("guest_with"))) void
-vm_guest_with(void)
+ return ret;
+}
+
+void
+sev_get_measure(int vmfd)
{
- while (1) {
- asm volatile("mov (%[v]), %%bl"
- : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET));
- asm volatile("out %%al, (%%dx)" : : );
- }
+ struct kvm_sev_launch_measure msrmt;
+ int ret, fwerr;
+ uint8_t *data;
+
+ memset(&msrmt, 0, sizeof(msrmt));
+ ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
+ if (ret == -1 && fwerr != SEV_RET_INVALID_LEN)
+ errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ data = malloc(msrmt.len);
+ msrmt.uaddr = (uintptr_t) data;
+
+ ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_MEASURE: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ free(data);
}
-__attribute__((section("guest_without"))) void
-vm_guest_without(void)
+uint8_t
+sev_guest_state(int vmfd, uint32_t handle)
{
- while (1) {
- asm volatile("out %%al, (%%dx)" : : );
- }
+ struct kvm_sev_guest_status status;
+ int ret, fwerr;
+
+ status.handle = handle;
+ ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ return status.state;
}
-bool
-pin_process(pid_t pid, int cpu, bool assert)
+void
+sev_dbg_decrypt(int vmfd, void *src, void *dst, size_t size)
{
- cpu_set_t cpuset;
- int status;
-
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
- status = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
- if (status < 0) {
- if (assert) err(1, "sched_setaffinity");
- return false;
- }
-
- return true;
+ struct kvm_sev_dbg enc;
+ int ret, fwerr;
+
+ enc.src_uaddr = (uintptr_t) src;
+ enc.dst_uaddr = (uintptr_t) dst;
+ enc.len = size;
+ ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
}
-int
-read_stat_core(pid_t pid)
+uint64_t
+sev_dbg_decrypt_rip(int vmfd)
{
- char path[256];
- char line[2048];
- FILE *file;
- char *p;
- int i, cpu;
+ uint8_t vmsa[PAGE_SIZE];
+ uint64_t rip;
- snprintf(path, sizeof(path), "/proc/%u/stat", pid);
- file = fopen(path, "r");
- if (!file) return -1;
+ memset(vmsa, 0, PAGE_SIZE);
+ sev_dbg_decrypt(vmfd, vmsa, CPC_VMSA_MAGIC_ADDR, PAGE_SIZE);
- if (!fgets(line, sizeof(line), file))
- err(1, "read stat");
+ rip = *(uint64_t *)(vmsa + 0x178);
- p = line;
- for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
- p += 1;
+ return rip;
+}
- if (!p) errx(1, "stat format");
- cpu = atoi(p);
+void
+snp_dbg_decrypt(int vmfd, void *src, void *dst, size_t size)
+{
+ struct kvm_sev_dbg enc;
+ int ret, fwerr;
- fclose(file);
+ assert(src == CPC_VMSA_MAGIC_ADDR);
- return cpu;
+ memset(&enc, 0, sizeof(struct kvm_sev_dbg));
+ enc.src_uaddr = (uintptr_t) src;
+ enc.dst_uaddr = (uintptr_t) dst;
+ enc.len = size;
+
+ ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+}
+
+uint64_t
+snp_dbg_decrypt_rip(int vmfd)
+{
+ uint8_t vmsa[PAGE_SIZE];
+ uint64_t rip;
+
+ memset(vmsa, 0, PAGE_SIZE);
+ snp_dbg_decrypt(vmfd, CPC_VMSA_MAGIC_ADDR, vmsa, PAGE_SIZE);
+
+ rip = *(uint64_t *)(vmsa + 0x178);
+
+ return rip;
}
void
-kvm_init(size_t ramsize, void *code_start, void *code_stop)
+kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop)
{
struct kvm_userspace_memory_region region;
struct kvm_regs regs;
struct kvm_sregs sregs;
int ret;
- kvm.fd = open("/dev/kvm", O_RDWR | O_CLOEXEC);
- if (kvm.fd < 0) err(1, "/dev/kvm");
+ /* Create a kvm instance */
+ kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
+ if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
- /* Make sure we have the stable version of the API */
- ret = ioctl(kvm.fd, KVM_GET_API_VERSION, NULL);
- if (ret == -1) err(1, "KVM_GET_API_VERSION");
- if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
-
- kvm.vmfd = ioctl(kvm.fd, KVM_CREATE_VM, 0);
- if (kvm.vmfd < 0) err(1, "KVM_CREATE_VM");
-
- /* Allocate one aligned page of guest memory to hold the code. */
- kvm.mem = mmap(NULL, ramsize, PROT_READ | PROT_WRITE,
+ /* Allocate guest memory */
+ kvm->memsize = ramsize;
+ kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm.mem) err(1, "allocating guest memory");
- assert(code_stop - code_start <= ramsize);
- memcpy(kvm.mem, code_start, code_stop - code_start);
+ if (!kvm->mem) err(1, "Allocating guest memory");
+ assert(code_stop - code_start <= kvm->memsize);
+ memcpy(kvm->mem, code_start, code_stop - code_start);
- /* Map it into vm memory */
+ /* Map it into the vm */
memset(®ion, 0, sizeof(region));
region.slot = 0;
- region.memory_size = ramsize;
+ region.memory_size = kvm->memsize;
region.guest_phys_addr = 0x0000;
- region.userspace_addr = (uint64_t) kvm.mem;
-
- ret = ioctl(kvm.vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION");
-
- kvm.vcpufd = ioctl(kvm.vmfd, KVM_CREATE_VCPU, 0);
- if (kvm.vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+ region.userspace_addr = (uintptr_t) kvm->mem;
+ ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
+ if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
- /* Map the shared kvm_run structure and following data. */
- ret = ioctl(kvm.fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE");
+ /* Create virtual cpu core */
+ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
+ if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+ /* Map the shared kvm_run structure and following data */
+ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
if (ret < sizeof(struct kvm_run))
errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
- kvm_run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
- MAP_SHARED, kvm.vcpufd, 0);
- if (!kvm_run) err(1, "mmap vcpu");
+ kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
+ MAP_SHARED, kvm->vcpufd, 0);
+ if (!kvm->run) err(1, "mmap vcpu");
- /* Initialize CS to point at 0, via a read-modify-write of sregs. */
+ /* Initialize segment regs */
memset(&sregs, 0, sizeof(sregs));
- ret = ioctl(kvm.vcpufd, KVM_GET_SREGS, &sregs);
- if (ret < 0) err(1, "KVM_GET_SREGS");
+ ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_GET_SREGS");
sregs.cs.base = 0;
sregs.cs.selector = 0;
- ret = ioctl(kvm.vcpufd, KVM_SET_SREGS, &sregs);
- if (ret < 0) err(1, "KVM_SET_SREGS");
+ ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_SET_SREGS");
- /* Initialize registers: instruction pointer for our code, addends, and
- * initial flags required by x86 architecture. */
+ /* Initialize rest of registers */
memset(®s, 0, sizeof(regs));
- regs.rip = 0x0;
- regs.rsp = ramsize - 1;
- regs.rbp = ramsize - 1;
- regs.rax = 0;
- regs.rdx = 0;
+ regs.rip = 0;
+ regs.rsp = kvm->memsize - 8;
+ regs.rbp = kvm->memsize - 8;
regs.rflags = 0x2;
- ret = ioctl(kvm.vcpufd, KVM_SET_REGS, ®s);
- if (ret < 0) err(1, "KVM_SET_REGS");
+ ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
+ if (ret == -1) err(1, "KVM_SET_REGS");
}
-cpc_msrmt_t *
-read_counts()
+void
+sev_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop)
{
- cpc_msrmt_t *counts;
- int ret;
+ struct kvm_userspace_memory_region region;
+ struct kvm_sev_launch_update_data update;
+ struct kvm_sev_launch_start start;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int ret, fwerr;
+
+ /* Create a kvm instance */
+ kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
+ if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
- counts = malloc(64 * sizeof(cpc_msrmt_t));
- if (!counts) err(1, "malloc");
- ret = ioctl(kvm_fd, KVM_CPC_READ_COUNTS, counts);
- if (ret == -1) err(1, "ioctl READ_COUNTS");
+ /* Allocate guest memory */
+ kvm->memsize = ramsize;
+ kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (!kvm->mem) err(1, "Allocating guest memory");
+ assert(code_stop - code_start <= kvm->memsize);
+ memcpy(kvm->mem, code_start, code_stop - code_start);
- return counts;
+ /* Map it into the vm */
+ memset(®ion, 0, sizeof(region));
+ region.slot = 0;
+ region.memory_size = kvm->memsize;
+ region.guest_phys_addr = 0;
+ region.userspace_addr = (uintptr_t) kvm->mem;
+ ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
+ if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
+
+ /* Enable SEV for vm */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_INIT, NULL, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_INIT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Create virtual cpu core */
+ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
+ if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+
+ /* Map the shared kvm_run structure and following data */
+ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
+ if (ret < sizeof(struct kvm_run))
+ errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
+ kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
+ MAP_SHARED, kvm->vcpufd, 0);
+ if (!kvm->run) err(1, "mmap vcpu");
+
+ /* Initialize segment regs */
+ memset(&sregs, 0, sizeof(sregs));
+ ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_GET_SREGS");
+ sregs.cs.base = 0;
+ sregs.cs.selector = 0;
+ ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_SET_SREGS");
+
+ /* Initialize rest of registers */
+ memset(®s, 0, sizeof(regs));
+ regs.rip = 0;
+ regs.rsp = kvm->memsize - 8;
+ regs.rbp = kvm->memsize - 8;
+ regs.rflags = 0x2;
+ ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
+ if (ret == -1) err(1, "KVM_SET_REGS");
+
+ /* Generate encryption keys and set policy */
+ memset(&start, 0, sizeof(start));
+ start.handle = 0;
+ start.policy = 0;
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm memory (by encrypting it) */
+ memset(&update, 0, sizeof(update));
+ update.uaddr = (uintptr_t) kvm->mem;
+ update.len = ramsize;
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Collect a measurement (necessary) */
+ sev_get_measure(kvm->vmfd);
+
+ /* Finalize launch process */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ ret = sev_guest_state(kvm->vmfd, start.handle);
+ if (ret != GSTATE_RUNNING)
+ errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
}
void
-print_counts(cpc_msrmt_t *counts)
+sev_es_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop)
{
- int i;
-
- for (i = 0; i < 64; i++) {
- if (i % 16 == 0 && i)
- printf("\n");
- if (counts[i] == 1)
- printf("\x1b[38;5;88m");
- else if (counts[i] > 1)
- printf("\x1b[38;5;196m");
- printf("%2i ", i);
- if (counts[i] > 0)
- printf("\x1b[0m");
- }
- printf("\n Target Set Count: %d %llu \n", TARGET_SET, counts[TARGET_SET]);
- printf("\n");
+ struct kvm_userspace_memory_region region;
+ struct kvm_sev_launch_update_data update;
+ struct kvm_sev_launch_start start;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int ret, fwerr;
+
+ /* Create a kvm instance */
+ kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
+ if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
+
+ /* Allocate guest memory */
+ kvm->memsize = ramsize;
+ kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (!kvm->mem) err(1, "Allocating guest memory");
+ assert(code_stop - code_start <= kvm->memsize);
+ memcpy(kvm->mem, code_start, code_stop - code_start);
+
+ /* Map it into the vm */
+ memset(®ion, 0, sizeof(region));
+ region.slot = 0;
+ region.memory_size = kvm->memsize;
+ region.guest_phys_addr = 0;
+ region.userspace_addr = (uintptr_t) kvm->mem;
+ ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
+ if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
+
+ /* Enable SEV for vm */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_ES_INIT, NULL, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_ES_INIT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Create virtual cpu core */
+ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
+ if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+
+ /* Map the shared kvm_run structure and following data */
+ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
+ if (ret < sizeof(struct kvm_run))
+ errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
+ kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
+ MAP_SHARED, kvm->vcpufd, 0);
+ if (!kvm->run) err(1, "mmap vcpu");
+
+ /* Initialize segment regs */
+ memset(&sregs, 0, sizeof(sregs));
+ ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_GET_SREGS");
+ sregs.cs.base = 0;
+ sregs.cs.selector = 0;
+ ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_SET_SREGS");
+
+ /* Initialize rest of registers */
+ memset(®s, 0, sizeof(regs));
+ regs.rip = 0;
+ regs.rsp = kvm->memsize - 8;
+ regs.rbp = kvm->memsize - 8;
+ regs.rflags = 0x2;
+ ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
+ if (ret == -1) err(1, "KVM_SET_REGS");
+
+ /* Generate encryption keys and set policy */
+ memset(&start, 0, sizeof(start));
+ start.handle = 0;
+ start.policy = 1 << 2; /* require SEV-ES */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm memory (by encrypting it) */
+ memset(&update, 0, sizeof(update));
+ update.uaddr = (uintptr_t) kvm->mem;
+ update.len = ramsize;
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm save area */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Collect a measurement (necessary) */
+ sev_get_measure(kvm->vmfd);
+
+ /* Finalize launch process */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ ret = sev_guest_state(kvm->vmfd, start.handle);
+ if (ret != GSTATE_RUNNING)
+ errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
}
-cpc_msrmt_t *
-collect(const char *prefix, void *code_start, void *code_stop)
+void
+sev_snp_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop)
{
+ struct kvm_sev_snp_launch_update update;
+ struct kvm_sev_snp_launch_start start;
+ struct kvm_sev_snp_launch_finish finish;
+ struct kvm_snp_init init;
+ struct kvm_userspace_memory_region region;
+ struct kvm_enc_region enc_region;
+ struct kvm_sregs sregs;
struct kvm_regs regs;
- cpc_msrmt_t *counts;
- int ret;
-
- /* using cache size for alignment of kvm memory access */
- kvm_init(64 * 64 * 8 * 2, code_start, code_stop);
+ int ret, fwerr;
- ret = 0;
- kvm_run->exit_reason = 0;
+ /* Create a kvm instance */
+ kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
+ if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
- /* run vm twice, use count without initial stack setup */
- ret = ioctl(kvm.vcpufd, KVM_RUN, NULL);
- ret = ioctl(kvm.vcpufd, KVM_RUN, NULL);
- if (ret == -1) err(1, "KVM_RUN");
+ /* Allocate guest memory */
+ kvm->memsize = ramsize;
+ kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (!kvm->mem) err(1, "Allocating guest memory");
+ assert(code_stop - code_start <= kvm->memsize);
+ memcpy(kvm->mem, code_start, code_stop - code_start);
- if (kvm_run->exit_reason == KVM_EXIT_MMIO || kvm_run->exit_reason == KVM_EXIT_HLT) {
- memset(®s, 0, sizeof(regs));
- ret = ioctl(kvm.vcpufd, KVM_GET_REGS, ®s);
- if (ret < 0) err(1, "KVM_GET_REGS");
- errx(1, "Victim access OOB: %llu %08llx => %02X\n",
- kvm_run->mmio.phys_addr, regs.rip,
- ((uint8_t *)kvm.mem)[regs.rip]);
- } else if (kvm_run->exit_reason != KVM_EXIT_IO) {
- errx(1, "KVM died: %i\n", kvm_run->exit_reason);
- }
+ /* Map it into the vm */
+ memset(®ion, 0, sizeof(region));
+ region.slot = 0;
+ region.memory_size = kvm->memsize;
+ region.guest_phys_addr = 0;
+ region.userspace_addr = (uintptr_t) kvm->mem;
+ ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
+ if (ret == -1) err(1, "KVM_SET_USER_MEMORY_REGION");
+
+ /* Enable SEV for vm */
+ memset(&init, 0, sizeof(init));
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_INIT, &init, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_SNP_INIT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Register memory region */
+ memset(&enc_region, 0, sizeof(enc_region));
+ enc_region.addr = (uintptr_t) kvm->mem;
+ enc_region.size = kvm->memsize;
+ ret = ioctl(kvm->vmfd, KVM_MEMORY_ENCRYPT_REG_REGION, &enc_region);
+ if (ret == -1) err(1, "KVM_MEMORY_ENCRYPT_REG_REGION");
+
+ /* Create virtual cpu */
+ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
+ if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+
+ /* Map the shared kvm_run structure and following data */
+ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ if (ret == -1) err(1, "KVM_GET_VCPU_MMAP_SIZE");
+ if (ret < sizeof(struct kvm_run))
+ errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
+ kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
+ MAP_SHARED, kvm->vcpufd, 0);
+ if (!kvm->run) err(1, "mmap vcpu");
- counts = read_counts();
+ /* Initialize segment regs */
+ memset(&sregs, 0, sizeof(sregs));
+ ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_GET_SREGS");
+ sregs.cs.base = 0;
+ sregs.cs.selector = 0;
+ ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
+ if (ret == -1) err(1, "KVM_SET_SREGS");
- close(kvm.fd);
- close(kvm.vmfd);
- close(kvm.vcpufd);
+ /* Initialize rest of registers */
+ memset(®s, 0, sizeof(regs));
+ regs.rip = 0;
+ regs.rsp = kvm->memsize - 8 - L1_LINESIZE * L1_SETS;
+ regs.rbp = kvm->memsize - 8 - L1_LINESIZE * L1_SETS;
+ ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
+ if (ret == -1) err(1, "KVM_SET_REGS");
+
+ /* Generate encryption keys and set policy */
+ memset(&start, 0, sizeof(start));
+ start.policy = 1 << 17; /* must be set */
+ start.policy |= 1 << 19; /* allow debug */
+ start.policy |= 1 << 16; /* allow simultaneous multi-threading */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_START, &start, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_START: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm memory */
+ memset(&update, 0, sizeof(update));
+ update.uaddr = (uintptr_t) kvm->mem;
+ update.len = ramsize;
+ update.start_gfn = 0;
+ update.page_type = KVM_SEV_SNP_PAGE_TYPE_NORMAL;
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_UPDATE, &update, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_UPDATE: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Finalize launch process */
+ memset(&finish, 0, sizeof(finish));
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_SNP_LAUNCH_FINISH, &finish, &fwerr);
+ if (ret == -1) errx(1, "KVM_SEV_SNP_LAUNCH_FINISH: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+}
- return counts;
+void
+kvm_deinit(struct kvm *kvm)
+{
+ close(kvm->vmfd);
+ close(kvm->vcpufd);
+ munmap(kvm->mem, kvm->memsize);
}
-int
-main(int argc, const char **argv)
+void
+kvm_setup_init(void)
{
- cpc_msrmt_t without_access[SAMPLE_COUNT][64];
- cpc_msrmt_t with_access[SAMPLE_COUNT][64];
- cpc_msrmt_t *counts, *baseline;
- uint32_t arg, measure;
- int i, k, ret;
-
- setvbuf(stdout, NULL, _IONBF, 0);
-
- pin_process(0, TARGET_CORE, true);
-
- kvm_fd = open("/dev/kvm", O_RDONLY);
- if (kvm_fd < 0) err(1, "open");
-
- /* init L1 miss counter for host kernel */
- arg = 0x002264D8;
- ret = ioctl(kvm_fd, KVM_CPC_INIT_PMC, &arg);
- if (ret == -1) err(1, "ioctl INIT_PMC");
-
- baseline = calloc(sizeof(cpc_msrmt_t), 64);
- if (!baseline) err(1, "calloc");
-
- measure = true;
- ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure);
- if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
-
- for (i = 0; i < SAMPLE_COUNT; i++) {
- counts = collect("without", __start_guest_without, __stop_guest_without);
- memcpy(without_access[i], counts, 64 * sizeof(cpc_msrmt_t));
- free(counts);
-
- counts = collect("with", __start_guest_with, __stop_guest_with);
- memcpy(with_access[i], counts, 64 * sizeof(cpc_msrmt_t));
- free(counts);
- }
-
- measure = false;
- ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure);
- if (ret == -1) err(1, "ioctl MEASURE_BASELINE");
-
- ret = ioctl(kvm_fd, KVM_CPC_READ_BASELINE, baseline);
- if (ret == -1) err(1, "ioctl READ_BASELINE");
-
- for (i = 0; i < SAMPLE_COUNT; i++) {
- for (k = 0; k < 64; k++) {
- with_access[i][k] -= baseline[k];
- without_access[i][k] -= baseline[k];
- }
-
- printf("Evictions with access:\n");
- print_counts(with_access[i]);
-
- printf("Evictions without access:\n");
- print_counts(without_access[i]);
- }
-
- for (i = 0; i < SAMPLE_COUNT; i++) {
- assert(with_access[i][TARGET_SET] > 0);
- //assert(without_access[i][TARGET_SET] == 0);
- }
-
- free(baseline);
- close(kvm_fd);
+ int ret;
+
+ kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC);
+ if (kvm_dev < 0) err(1, "open /dev/kvm");
+
+ sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
+ if (sev_dev < 0) err(1, "open /dev/sev");
+
+ /* ensure we have the stable version of the api */
+ ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL);
+ if (ret == -1) err(1, "KVM_GET_API_VERSION");
+ if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
+
}
+void
+kvm_setup_deinit(void)
+{
+ close(kvm_dev);
+ close(sev_dev);
+}
diff --git a/test/kvm.h b/test/kvm.h
@@ -0,0 +1,50 @@
+#pragma once
+
+#include <stdint.h>
+#include <stdlib.h>
+
+enum { WITH, WITHOUT };
+
+enum {
+ GSTATE_UNINIT,
+ GSTATE_LUPDATE,
+ GSTATE_LSECRET,
+ GSTATE_RUNNING,
+ GSTATE_SUPDATE,
+ GSTATE_RUPDATE,
+ GSTATE_SENT
+};
+
+struct kvm {
+ int fd, vmfd, vcpufd;
+ void *mem;
+ size_t memsize;
+ struct kvm_run *run;
+};
+
+const char *sev_fwerr_str(int code);
+const char *sev_gstate_str(int code);
+
+int sev_ioctl(int vmfd, int cmd, void *data, int *error);
+void sev_get_measure(int vmfd);
+uint8_t sev_guest_state(int vmfd, uint32_t handle);
+void sev_dbg_decrypt(int vmfd, void *src, void *dst, size_t size);
+uint64_t sev_dbg_decrypt_rip(int vmfd);
+void snp_dbg_decrypt(int vmfd, void *src, void *dst, size_t size);
+uint64_t snp_dbg_decrypt_rip(int vmfd);
+
+void kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop);
+void sev_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop);
+void sev_es_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop);
+void sev_snp_kvm_init(struct kvm *kvm, size_t ramsize,
+ void *code_start, void *code_stop);
+void kvm_deinit(struct kvm *kvm);
+
+void kvm_setup_init(void);
+void kvm_setup_deinit(void);
+
+extern int kvm_dev, sev_dev;
+
diff --git a/test/util.c b/test/util.c
@@ -0,0 +1,106 @@
+#define _GNU_SOURCE
+
+#include "test/util.h"
+
+#include <err.h>
+#include <sched.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+void
+hexdump(void *data, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ printf("%02X ", *(uint8_t *)(data + i));
+ }
+ printf("\n");
+}
+
+bool
+pin_process(pid_t pid, int cpu, bool assert)
+{
+ cpu_set_t cpuset;
+ int ret;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
+ if (ret == -1) {
+ if (assert) err(1, "sched_setaffinity");
+ return false;
+ }
+
+ return true;
+}
+
+int
+read_stat_core(pid_t pid)
+{
+ char path[256];
+ char line[2048];
+ FILE *file;
+ char *p;
+ int i, cpu;
+
+ snprintf(path, sizeof(path), "/proc/%u/stat", pid);
+ file = fopen(path, "r");
+ if (!file) return -1;
+
+ if (!fgets(line, sizeof(line), file))
+ err(1, "read stat");
+
+ p = line;
+ for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
+ p += 1;
+
+ if (!p) errx(1, "stat format");
+ cpu = atoi(p);
+
+ fclose(file);
+
+ return cpu;
+}
+
+void
+print_counts(uint8_t *counts)
+{
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ if (counts[i] == 1)
+ printf("\x1b[38;5;88m");
+ else if (counts[i] > 1)
+ printf("\x1b[38;5;196m");
+ printf("%2i ", i);
+ if (counts[i] > 0)
+ printf("\x1b[0m");
+ }
+ printf("\n");
+}
+
+void
+print_counts_raw(uint8_t *counts)
+{
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ if (counts[i] == 1)
+ printf("\x1b[38;5;88m");
+ else if (counts[i] > 1)
+ printf("\x1b[38;5;196m");
+ printf("%02X ", (uint8_t) counts[i]);
+ if (counts[i] > 0)
+ printf("\x1b[0m");
+ }
+ printf("\n");
+}
diff --git a/test/util.h b/test/util.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include <sys/types.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
+#define MIN(a,b) ((a) > (b) ? (b) : (a))
+
+void hexdump(void *data, int len);
+
+bool pin_process(pid_t pid, int cpu, bool assert);
+
+int read_stat_core(pid_t pid);
+
+void print_counts(uint8_t *counts);
+void print_counts_raw(uint8_t *counts);
diff --git a/util/disasm b/util/disasm
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+if [ $# -lt 2 ]; then
+ echo "Usage: guest_asm FILE FUNC"
+ exit 1
+fi
+
+gdb --batch -ex "disassemble $2" $1