commit 1c198ee120f69d435289dbb4510058b536d091d9
parent 2f5b1570df4ad2409d648b27bdaf445a29261a80
Author: Louis Burda <quent.burda@gmail.com>
Date: Thu, 20 Oct 2022 17:47:42 +0200
Fix sevstep page tracking example
Diffstat:
12 files changed, 83 insertions(+), 84 deletions(-)
diff --git a/cachepc/asm.h b/cachepc/asm.h
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
diff --git a/cachepc/kvm.h b/cachepc/kvm.h
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -15,11 +15,13 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
int have_rip, i;
int send_err;
+ pr_warn("Sevstep: Got page fault (gfn:%llu)", fault->gfn);
+
was_tracked = false;
for (i = 0; i < sizeof(modes) / sizeof(modes[0]); i++) {
if (kvm_slot_page_track_is_active(vcpu->kvm,
fault->slot, fault->gfn, modes[i])) {
- sevstep_untrack_single_page(vcpu, fault->gfn, modes[i]);
+ //sevstep_untrack_single_page(vcpu, fault->gfn, modes[i]);
was_tracked = true;
}
}
@@ -32,9 +34,8 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
send_err = sevstep_uspt_batch_tracking_save(fault->gfn << PAGE_SHIFT,
fault->error_code, have_rip, current_rip);
if (send_err) {
- printk_ratelimited(
- "sevstep_uspt_batch_tracking_save failed with %d\n"
- "##########################\n", send_err);
+ pr_warn("Sevstep: uspt_batch_tracking_save failed with %d\n",
+ send_err);
}
sevstep_uspt_batch_tracking_handle_retrack(vcpu, fault->gfn);
sevstep_uspt_batch_tracking_inc_event_idx();
@@ -42,8 +43,8 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
send_err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT,
fault->error_code, have_rip, current_rip);
if (send_err) {
- printk("sevstep_uspt_send_and_block failed with %d\n"
- "##########################\n", send_err);
+ printk("Sevstep: uspt_send_and_block failed with %d\n",
+ send_err);
}
}
}
diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c
@@ -98,9 +98,8 @@ sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (mode == KVM_PAGE_TRACK_ACCESS) {
- pr_warn("Removing gfn: %016llx from acess page track pool\n", gfn);
- }
- if (mode == KVM_PAGE_TRACK_WRITE) {
+ pr_warn("Removing gfn: %016llx from access page track pool\n", gfn);
+ } else if (mode == KVM_PAGE_TRACK_WRITE) {
pr_warn("Removing gfn: %016llx from write page track pool\n", gfn);
}
@@ -180,36 +179,25 @@ long
sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
- struct kvm_memory_slot *first_memslot;
- //struct kvm_memory_slot *second_memslot;
- struct rb_node *node;
- u64 iterator, iterat_max;
+ struct kvm_memslots *slots;
long count = 0;
- int idx;
+ int bkt;
+ u64 gfn;
pr_warn("Sevstep: Start tracking %i\n", mode);
- node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
- first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
- //second_memslot = container_of(node, struct kvm_memory_slot, gfn_node[1]);
- pr_warn("Sevstep: Total memslot pages %ld", vcpu->kvm->nr_memslot_pages);
- //pr_warn("Sevstep: First memslot pages %ld base gfn 0x%llx", first_memslot->npages, //first_memslot->base_gfn);
- //pr_warn("Sevstep: Second memslot pages %ld base gfn 0x%llx",second_memslot->npages, //second_memslot->base_gfn);
- iterat_max = first_memslot->base_gfn + vcpu->kvm->nr_memslot_pages;//first_memslot->npages;
- //VU: We retrieve the total nr of memslot pages directly from the kvm struct.
- //VU: I think this should work, but only if my understanding of the memslots is correct
- pr_warn("Sevstep: Page count: %llu\n", iterat_max);
- for (iterator = 0; iterator < iterat_max; iterator++) {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator);
- if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
- pr_warn("Sevstep: Tracking page: %llu\n", iterator);
- write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_add_page(vcpu->kvm, slot, iterator, mode);
- write_unlock(&vcpu->kvm->mmu_lock);
- count++;
+ slots = kvm_vcpu_memslots(vcpu);
+ kvm_for_each_memslot(slot, bkt, slots) {
+ pr_warn("Sevstep: Slot page count: %lu\n", slot->npages);
+ for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) {
+ if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ pr_warn("Sevstep: Tracking page: %llu\n", gfn);
+ write_lock(&vcpu->kvm->mmu_lock);
+ kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
+ write_unlock(&vcpu->kvm->mmu_lock);
+ count++;
+ }
}
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
return count;
diff --git a/cachepc/sevstep.h b/cachepc/sevstep.h
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -94,7 +94,7 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
read_lock(&event_lock);
if (!sevstep_uspt_is_initialiized()) {
- pr_warn("sevstep_uspt_send_and_block: ctx not initialized!\n");
+ pr_warn("Sevstep: uspt_send_and_block: ctx not initialized!\n");
read_unlock(&event_lock);
return 1;
}
@@ -102,7 +102,7 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
write_lock(&event_lock);
if (last_sent_event_id != last_acked_event_id) {
- pr_warn("sevstep_uspt_send_and_block: "
+ pr_warn("Sevstep: uspt_send_and_block: "
"event id_s out of sync, aborting. Fix this later\n");
write_unlock(&event_lock);
return 1;
@@ -118,21 +118,19 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
message_for_user.ns_timestamp = ktime_get_real_ns();
message_for_user.have_retired_instructions = false;
- // for poll based system;
have_event = 1;
sent_event = message_for_user;
- // printk("sevstep_uspt_send_and_block sending event %llu\n", sent_event.id);
write_unlock(&event_lock);
- // wait for ack, but with timeout. Otherwise small bugs in userland
- // easily lead to a kernel hang
- abort_after = ktime_get() + 1000000000ULL; // 1 sec in nanosecond
+ /* wait for ack with timeout */
+ pr_warn("Sevstep: uspt_send_and_block: Begin wait for event ack");
+ abort_after = ktime_get_ns() + 1000000000ULL; /* 1s in ns */
while (!sevstep_uspt_is_event_done(sent_event.id)) {
- if (ktime_get() > abort_after) {
- pr_warn("sevstep_uspt_send_and_block: "
- "Waiting for ack of event %llu timed out, "
- "continuing\n",sent_event.id);
+ if (ktime_get_ns() > abort_after) {
+ pr_warn("Sevstep: uspt_send_and_block: "
+ "Waiting for ack of event %llu timed out",
+ sent_event.id);
return 3;
}
}
@@ -143,13 +141,13 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
int
sevstep_uspt_is_event_done(uint64_t id)
{
- int res;
+ bool done;
read_lock(&event_lock);
- res = last_acked_event_id >= id;
+ done = last_acked_event_id >= id;
read_unlock(&event_lock);
- return res;
+ return done;
}
int
@@ -183,6 +181,7 @@ sevstep_uspt_handle_ack_event_ioctl(ack_event_t event)
{
int err = 0;
+ pr_warn("Sevstep: uspt_handle_ack_event_ioctl: acking event %llu", event.id);
write_lock(&event_lock);
if (event.id == last_sent_event_id) {
last_acked_event_id = last_sent_event_id;
@@ -371,11 +370,10 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
int cpu;
cpu = get_cpu();
-
spin_lock(&batch_track_state_lock);
if (!batch_track_state.is_active) {
- pr_warn("sevstep_uspt_batch_tracking_save: "
+ pr_warn("Sevstep: uspt_batch_tracking_save: "
"got save but batch tracking is not active!\n");
batch_track_state.error_occured = true;
spin_unlock(&batch_track_state_lock);
@@ -384,7 +382,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
}
if (batch_track_state.event_next_idx >= batch_track_state.events_size) {
- pr_warn("sevstep_uspt_batch_tracking_save: events buffer is full!\n");
+ pr_warn("Sevstep: uspt_batch_tracking_save: events buffer is full!\n");
batch_track_state.error_occured = true;
spin_unlock(&batch_track_state_lock);
put_cpu();
@@ -392,7 +390,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
}
if (cpu != batch_track_state.perf_cpu) {
- pr_warn("sevstep_uspt_batch_tracking_save: perf was "
+ pr_warn("Sevstep: uspt_batch_tracking_save: perf was "
"programmed on logical cpu %d but handler was called "
"on %d. Did you forget to pin the vcpu thread?\n",
batch_track_state.perf_cpu, cpu);
@@ -401,7 +399,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
if (batch_track_state.events == NULL) {
- pr_warn("sevstep_uspt_batch_tracking_save: events buf was "
+ pr_warn("Sevstep: uspt_batch_tracking_save: events buf was "
"NULL but \"is_active\" was set! This should never happen!!!\n");
spin_unlock(&batch_track_state_lock);
return 1;
@@ -467,7 +465,7 @@ sevstep_uspt_batch_tracking_stop(page_fault_event_t* results,
}
uint64_t
-sevstep_uspt_batch_tracking_get_events_count()
+sevstep_uspt_batch_tracking_get_events_count(void)
{
uint64_t buf;
@@ -479,7 +477,8 @@ sevstep_uspt_batch_tracking_get_events_count()
}
bool
-sevstep_uspt_batch_tracking_in_progress()
+sevstep_uspt_batch_tracking_in_progress(void)
{
return batch_track_state.is_active;
}
+
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -50,7 +50,8 @@ extern uint8_t __start_guest_with[];
extern uint8_t __stop_guest_with[];
/* ioctl dev fds */
-int kvm_dev, sev_dev, kvm_dev;
+static int kvm_dev, sev_dev, kvm_dev;
+static int faultcnt;
enum {
GSTATE_UNINIT,
@@ -113,17 +114,13 @@ hexdump(void *data, int len)
printf("\n");
}
-// REF: https://events19.linuxfoundation.org/wp-content/uploads/2017/12/Extending-Secure-Encrypted-Virtualization-with-SEV-ES-Thomas-Lendacky-AMD.pdf
-// REF: https://www.spinics.net/lists/linux-kselftest/msg27206.html
__attribute__((section("guest_with"))) void
vm_guest_with(void)
{
+ asm volatile("hlt");
while (1) {
asm volatile("mov (%[v]), %%bl"
: : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET));
- //asm volatile("out %%al, (%%dx)" : : );
- asm volatile("hlt");
- //asm volatile("rep; vmmcall\n\r");
}
}
@@ -359,7 +356,8 @@ sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
/* Prepare the vm save area */
ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr);
- if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",strerror(errno), sev_fwerr_str(fwerr));
+ if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
/* Collect a measurement (necessary) */
msrmt = sev_get_measure(kvm->vmfd);
@@ -416,16 +414,15 @@ print_counts(uint16_t *counts)
printf("\n");
}
-uint16_t *
-collect(struct kvm *kvm)
+void
+runonce(struct kvm *kvm)
{
struct kvm_regs regs;
- page_fault_event_t event;
- ack_event_t ack;
int ret;
ret = ioctl(kvm->vcpufd, KVM_RUN, NULL);
if (ret < 0) err(1, "KVM_RUN");
+ printf("VMEXIT\n");
if (kvm->run->exit_reason == KVM_EXIT_MMIO) {
memset(®s, 0, sizeof(regs));
@@ -437,31 +434,40 @@ collect(struct kvm *kvm)
} else if (kvm->run->exit_reason != KVM_EXIT_HLT) {
errx(1, "KVM died: %i\n", kvm->run->exit_reason);
}
+}
+
+int
+monitor(void)
+{
+ page_fault_event_t event;
+ ack_event_t ack;
+ int ret;
/* Get page fault info */
ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
if (!ret) {
- printf("Got page fault: %llu insts\n",
+ printf("Got page fault! %llu retired insts\n",
event.retired_instructions);
+ faultcnt++;
ack.id = event.id;
- printf("Acking event %d \n", ack.id);
+ printf("Acking event %llu\n", ack.id);
ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &ack);
if (ret == -1) err(1, "ioctl ACK_EVENT");
} else if (ret != CPC_USPT_POLL_EVENT_NO_EVENT) {
- err(1, "ioctl POLL_EVENT");
+ perror("ioctl POLL_EVENT");
+ return 1;
}
- return read_counts();
+ return 0;
}
int
main(int argc, const char **argv)
{
- uint16_t with_access[SAMPLE_COUNT][64];
struct kvm kvm_with_access;
track_all_pages_t track_all;
- uint16_t *counts;
+ pid_t ppid, pid;
int i, ret;
setvbuf(stdout, NULL, _IONBF, 0);
@@ -486,9 +492,12 @@ main(int argc, const char **argv)
sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2,
__start_guest_with, __stop_guest_with);
- /* One run to get into while loop (after stack setup) */
+ /* One run to skip stack setup */
ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL);
+ /* Page tracking init needs to happen after kvm
+ * init so main_kvm is set.. */
+
/* Reset previous tracking */
ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
if (ret == -1) err(1, "ioctl RESET_TRACKING");
@@ -498,20 +507,22 @@ main(int argc, const char **argv)
ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_all);
if (ret == -1) err(1, "ioctl TRACK_ALL");
- for (i = 0; i < SAMPLE_COUNT; i++) {
- counts = collect(&kvm_with_access);
- memcpy(with_access[i], counts, 64 * sizeof(uint16_t));
- free(counts);
+ ppid = getpid();
+ if ((pid = fork())) {
+ if (pid < 0) err(1, "fork");
+ runonce(&kvm_with_access);
+ } else {
+ pin_process(0, SECONDARY_CORE, true);
+ faultcnt = 0;
+ while (faultcnt < SAMPLE_COUNT) {
+ if (monitor()) break;
+ }
+ kill(ppid, SIGTERM);
+ exit(0);
}
-
- // for (i = 0; i < SAMPLE_COUNT; i++) {
- // printf("Evictions with access:\n");
- // print_counts(with_access[i]);
- // }
- printf("done.\n");
sev_kvm_deinit(&kvm_with_access);
-
+
close(kvm_dev);
close(sev_dev);
}