commit 0e89d3b1b7c45ff9a3916b01ab56f177d4b64f8c
parent dd1e29c4828b01b477a003679234097434f800d4
Author: Louis Burda <quent.burda@gmail.com>
Date: Wed, 5 Oct 2022 18:28:34 +0200
Add ioctl handling for sevstep
Diffstat:
4 files changed, 621 insertions(+), 7 deletions(-)
diff --git a/patch.diff b/patch.diff
@@ -427,7 +427,7 @@ index e089fbf9017f..7899e1efe852
static int __sev_init_locked(int *error)
{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index f2a63cb2658b..bfe4a57bcc10 100644
+index f2a63cb2658b..bd26b7a29c9e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,6 +70,10 @@
@@ -441,7 +441,620 @@ index f2a63cb2658b..bfe4a57bcc10 100644
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-@@ -5792,6 +5796,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -159,6 +163,267 @@ static unsigned long long kvm_active_vms;
+
+ static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
+
++static long
++get_user_pages_remote_unlocked(struct mm_struct *mm,
++ unsigned long start, unsigned long nr_pages,
++ unsigned int gup_flags, struct page **pages)
++{
++ struct vm_area_struct **vmas = NULL;
++ int locked = 1;
++ long ret;
++
++ down_read(&mm->mmap_lock);
++ ret = get_user_pages_remote( mm, start, nr_pages,
++ gup_flags, pages, vmas, &locked);
++ if (locked) up_read(&mm->mmap_lock);
++
++ return ret;
++}
++
++// static int
++// get_hpa_for_gpa(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
++// {
++// int ec;
++// unsigned long hva;
++// struct page *page = NULL;
++//
++// ec = 0;
++//
++// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
++// if (kvm_is_error_hva(hva)) {
++// pr_warn("in %s line %d get_hpa_for_gpa: translation to hva failed\n",
++// __FILE__, __LINE__);
++// ec = -100;
++// goto out;
++// }
++// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
++// pr_warn("in %s line %d get_hpa_for_gpa: failed to get page struct from mm",
++// __FILE__, __LINE__);
++// ec = -KVM_EINVAL;
++// goto out;
++// }
++//
++// (*hpa) = (page_to_pfn(page) << 12) + (gpa & 0xfff);
++//
++// out:
++// put_page(page);
++//
++// return ec;
++// }
++
++int
++read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
++ bool decrypt_at_host)
++{
++ unsigned long hva;
++ struct page *page = NULL;
++ void *ptr_page = NULL;
++ uint64_t offset;
++ int ec;
++
++ offset = (gpa & 0xFFF);
++
++ if ((offset + size - 1) > 0xFFF) {
++ printk("read_phyiscal: trying to read "
++ "beyond page (offset+size=%016llx)\n",
++ offset + size);
++ return -EINVAL;
++ }
++
++ ec = 0;
++
++ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
++
++ // TODO: test change
++ /*
++ if (kvm_is_error_hva(hva)) {
++ printk(KERN_CRIT "Luca: read_physical: translation to hva failed( gpa was "
++ "%016llx hva is %016lx\n",
++ gpa, hva);
++ ec = -100;
++ goto out;
++ }
++ */
++
++ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
++ pr_warn("read_physical: failed to get page struct from mm\n");
++ // ec = -KVM_EINVAL;
++ ec = -100;
++ goto out;
++ }
++
++ if (decrypt_at_host) {
++ // map with encryption bit. Content is decrypted with host key. If sev is
++ // disabled but sme is enable this allows to read the plaintext.
++ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
++ } else {
++ // map without encryption bit to read ciphertexts
++ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
++ }
++
++ /*printk("value of buff ptr = %p\t value of ptr_page=%p\n", buff,
++ ptr_page + offset);*/
++ memcpy(buff, ptr_page + offset, size);
++
++out:
++ if (ptr_page)
++ vunmap(ptr_page);
++ if (page)
++ put_page(page);
++
++ return ec;
++}
++
++int
++print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
++{
++ u8 *buffer;
++ int i, err;
++
++ buffer = kmalloc(size, GFP_ATOMIC);
++
++ err = read_physical(kvm, gpa, buffer, size, decrypt_at_host);
++ if (err != 0) {
++ pr_warn("at %s line %d: read_physical "
++ "failed with: %d\n", __FILE__, __LINE__, err);
++ }
++ for (i = 0; i < size; i++) {
++ // print bytewise with line break every 16 bytes
++ if (i % 16 == 0) {
++ printk("%02x ", buffer[i]);
++ } else {
++ printk(KERN_CONT " %02x ", buffer[i]);
++ }
++ }
++ printk("\n");
++
++ kfree(buffer);
++
++ return err;
++}
++
++int
++map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
++ void **mapping, struct page **page)
++{
++
++ int ec;
++ unsigned long hva;
++ uint64_t offset;
++
++ offset = (gpa & 0xFFF);
++
++ ec = 0;
++
++ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
++
++ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
++ pr_warn("map_physical: failed to get page struct from mm");
++ // ec = -KVM_EINVAL;
++ ec = -100;
++ return ec;
++ }
++
++ if (decrypt_at_host) {
++ // map with encryption bit. Content is decrypted with host key. If sev is
++ // disabled but sme is enable this allows to read the plaintext.
++ (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
++ } else {
++ // map without encryption bit to read ciphertexts
++ (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
++ }
++
++ return ec;
++}
++
++void
++unmap_physical(void **mapping, struct page **page)
++{
++ if (*mapping)
++ vunmap(*mapping);
++ if (*page)
++ put_page(*page);
++}
++
++int
++read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
++{
++ uint64_t offset;
++ offset = (gpa & 0xFFF);
++
++ if ((offset + size - 1) > 0xFFF) {
++ pr_warn("read_mapped: trying to read "
++ "beyond page (offset+size=%016llx)\n",
++ offset + size);
++ return -EINVAL;
++ }
++ memcpy(buff, mapping + offset, size);
++
++ return 0;
++}
++
++int
++write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
++{
++ uint64_t offset;
++
++ offset = (gpa & 0xFFF);
++
++ if ((offset + size - 1) > 0xFFF) {
++ printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
++ offset + size);
++ return -EINVAL;
++ }
++ memcpy(mapping + offset, buf, size);
++
++ return 0;
++}
++
++int
++write_physical(struct kvm *kvm, u64 gpa, u64 size,
++ const void *buf, bool write_plaintexts)
++{
++ int ec;
++ unsigned long hva;
++ struct page *page;
++ void *ptr_page;
++ uint64_t offset;
++
++ offset = (gpa & 0xFFF);
++
++ if ((offset + size - 1) > 0xFFF) {
++ pr_warn("write_physical: trying to write "
++ "beyond page(offset+size=%016llx)\n",
++ offset + size);
++ return -EINVAL;
++ }
++
++ ec = 0;
++ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
++
++ if (kvm_is_error_hva(hva))
++ return -KVM_EINVAL;
++
++ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
++ return -KVM_EINVAL;
++
++ if (write_plaintexts) {
++ // map with encrytpion bit to aplly host encryption. Usefull if sev is
++ // disabled but sme is enabled and we want to write a certain value into a
++ // page
++ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
++ } else {
++ // map without encryption bit to write ciphertexts
++ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
++ }
++
++ memcpy(ptr_page + offset, buf, size);
++
++ vunmap(ptr_page);
++ put_page(page);
++ return ec;
++}
++
+ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
+ {
+@@ -1261,6 +1526,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
+ hardware_disable_all();
+ mmdrop(mm);
+ module_put(kvm_chardev_ops.owner);
++
++ if (main_vm == kvm)
++ main_vm = NULL;
+ }
+
+ void kvm_get_kvm(struct kvm *kvm)
+@@ -1360,7 +1628,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots,
+ int idx = slots->node_idx;
+
+ parent = NULL;
+- for (node = &gfn_tree->rb_node; *node; ) {
++ for (node = &gfn_tree->rb_node; *node;) {
+ struct kvm_memory_slot *tmp;
+
+ tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
+@@ -4823,6 +5091,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
+ kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
+
+ fd_install(r, file);
++
++ main_vm = kvm;
++
+ return r;
+
+ put_kvm:
+@@ -4836,6 +5107,315 @@ static long kvm_dev_ioctl(struct file *filp,
+ long r = -EINVAL;
+
+ switch (ioctl) {
++ case KVM_TRACK_PAGE: {
++ track_page_param_t param;
++ void __user* argp = (void __user *)arg;
++ struct kvm_vcpu *vcpu;
++
++ if (copy_from_user(¶m, argp, sizeof(param))) {
++ pr_warn("KVM_TRACK_PAGE: error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++
++ if (main_vm == NULL) {
++ pr_warn("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
++ return -EFAULT;
++ }
++
++ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
++ pr_warn("KVM_TRACK_PAGE track_mode %d invalid, "
++ "must be in range [%d,%d]", param.track_mode,
++ 0, KVM_PAGE_TRACK_MAX);
++ return -EFAULT;
++ }
++
++ vcpu = xa_load(&main_vm->vcpu_array, 0);
++ if (!sevstep_track_single_page(vcpu,
++ param.gpa >> PAGE_SHIFT, param.track_mode)) {
++ printk("KVM_TRACK_PAGE: sevstep_track_single_page failed");
++ }
++ r = 0;
++ }
++ break;
++ case KVM_USPT_BATCH_TRACK_START: {
++ batch_track_config_t param;
++ void __user* argp = (void __user *)arg;
++
++ if (copy_from_user(¶m, argp, sizeof(param))) {
++ pr_warn("KVM_USPT_BATCH_TRACK_START: "
++ "error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++
++ r = sevstep_uspt_batch_tracking_start(param.tracking_type,
++ param.expected_events, param.perf_cpu, param.retrack);
++ if (r != 0) {
++ pr_warn("KVM_USPT_BATCH_TRACK_START: failed\n");
++ return r;
++ }
++ }
++ break;
++ case KVM_USPT_BATCH_TRACK_EVENT_COUNT: {
++ batch_track_event_count_t result;
++ void __user* argp = (void __user *)arg;
++
++ result.event_count = sevstep_uspt_batch_tracking_get_events_count();
++
++ if (copy_to_user(argp, &result, sizeof(result))) {
++ pr_warn("KVM_USPT_BATCH_TRACK_EVENT_COUNT: "
++ "error copying result to user, exiting\n");
++ return -EFAULT;
++ }
++
++ r = 0;
++ }
++ break;
++ case KVM_USPT_BATCH_TRACK_STOP: {
++ batch_track_stop_and_get_t param;
++ page_fault_event_t* buf;
++ uint64_t buf_bytes;
++ void __user* argp = (void __user *)arg;
++ void __user* inner_user_out_buf;
++
++ if (copy_from_user(¶m, argp, sizeof(param))) {
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
++ "error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++ inner_user_out_buf = param.out_buf;
++
++ buf_bytes = sizeof(page_fault_event_t)*param.len;
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
++ "allocating %llu bytes for tmp buf\n", buf_bytes);
++
++ buf = vmalloc(buf_bytes);
++ if (buf == NULL) {
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
++ "failed to alloc tmp buf\n");
++ return -EFAULT;
++ }
++ param.out_buf = buf;
++
++ r = sevstep_uspt_batch_tracking_stop(buf, param.len,
++ ¶m.error_during_batch);
++ if (r != 0) {
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: failed\n");
++ vfree(buf);
++ return -EFAULT;
++ }
++
++ if (copy_to_user(argp, ¶m, sizeof(param))) {
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
++ "error copying result to user, exiting\n");
++ vfree(buf);
++ return -EFAULT;
++ }
++
++ if (copy_to_user(inner_user_out_buf, buf,buf_bytes)) {
++ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
++ "error copying result to user, exiting\n");
++ vfree(buf);
++ return -EFAULT;
++ }
++
++ vfree(buf);
++ }
++ break;
++ case KVM_USPT_TRACK_ALL: {
++ track_all_pages_t param;
++ void __user* argp = (void __user *)arg;
++ struct kvm_vcpu *vcpu;
++ long tracked_pages;
++
++ if (copy_from_user(¶m, argp, sizeof(param))) {
++ pr_warn("KVM_USPT_TRACK_ALL: error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++
++ if (main_vm == NULL) {
++ pr_warn("KVM_USPT_TRACK_ALL: main_vm is not initialized, aborting!\n");
++ return -EFAULT;
++ }
++
++ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
++ pr_warn("KVM_USPT_TRACK_ALL: "
++ "track_mode %d invalid, must be in range [%d,%d]\n",
++ param.track_mode, 0, KVM_PAGE_TRACK_MAX);
++ return -EFAULT;
++ }
++
++ vcpu = xa_load(&main_vm->vcpu_array, 0);
++ tracked_pages = sevstep_start_tracking(vcpu, param.track_mode);
++ r = 0;
++ }
++ break;
++ case KVM_USPT_UNTRACK_ALL: {
++ track_all_pages_t param;
++ void __user* argp = (void __user *)arg;
++ struct kvm_vcpu *vcpu;
++ long untrack_count;
++
++ if (copy_from_user(¶m, argp, sizeof(param))) {
++ printk(KERN_CRIT
++ "KVM_USPT_UNTRACK_ALL: error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++
++ if (main_vm == NULL) {
++ printk("KVM_USPT_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
++ return -EFAULT;
++ }
++
++ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
++ printk("KVM_USPT_UNTRACK_ALL: track_mode %d invalid, must be in range [%d,%d]",param.track_mode,0,KVM_PAGE_TRACK_MAX);
++ return -EFAULT;
++ }
++
++ //printk("KVM_USPT_UNTRACK_ALL: with mode %d\n",param.track_mode);
++ vcpu = xa_load(&main_vm->vcpu_array, 0);
++ untrack_count = sevstep_stop_tracking(vcpu, param.track_mode);
++ //printk("KVM_USPT_UNTRACK_ALL: untracked %ld pages\n",untrack_count);
++ r = 0;
++ }
++ break;
++ case KVM_USPT_SETUP_RETINSTR_PERF: {
++ retired_instr_perf_config_t config;
++ void __user* argp = (void __user *)arg;
++
++ printk("Received KVM_USPT_SETUP_RETINSTR_PERF ioctl!\n");
++ if (copy_from_user(&config, argp, sizeof(config))) {
++ printk("copy from user failed\n");
++ return -EACCES;
++ }
++
++ cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER);
++
++ r = 0;
++ }
++ break;
++ case KVM_USPT_READ_RETINSTR_PERF: {
++ retired_instr_perf_t request;
++ void __user* argp = (void __user *)arg;
++
++ if (copy_from_user(&request, argp, sizeof(request))) {
++ printk("KVM_USPT_READ_RETINSTR_PERF: copy from user failed\n");
++ return -EACCES;
++ }
++
++ request.retired_instruction_count = cachepc_read_pmc(0);
++ if (copy_to_user(argp, &request, sizeof(request))) {
++ printk("KVM_USPT_READ_RETINSTR_PERF : copy to user failed\n");
++ }
++ r = 0;
++ }
++ break;
++ case KVM_READ_GUEST_MEMORY: {
++ read_guest_memory_t param;
++ int res;
++ void * buf;
++ void __user* argp = (void __user *)arg;
++
++ if (copy_from_user(¶m, argp, sizeof(read_guest_memory_t))) {
++ printk(KERN_CRIT
++ "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
++ return -EFAULT;
++ }
++
++ if (param.len > PAGE_SIZE) {
++ printk("KVM_READ_GUEST_MEMORY len may be at most page size");
++ }
++
++ buf = kmalloc(param.len, GFP_KERNEL);
++ if (buf == NULL) {
++ printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
++ return -ENOMEM;
++ }
++
++ if (param.wbinvd_cpu >= 0) {
++ wbinvd_on_cpu(param.wbinvd_cpu);
++ }
++ wbinvd_on_all_cpus();
++
++ res = read_physical(main_vm, param.gpa, buf,
++ param.len, param.decrypt_with_host_key);
++ if (res) {
++ printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
++ return -EINVAL;
++ }
++
++ if (copy_to_user(param.output_buffer, buf, param.len)) {
++ printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
++ }
++
++ return 0;
++ }
++ break;
++ case KVM_USPT_RESET: {
++ struct kvm_vcpu *vcpu;
++
++ printk("Received KVM_USPT_RESET ioctl!\n");
++
++ sevstep_uspt_clear();
++ vcpu = xa_load(&main_vm->vcpu_array, 0);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
++ r = 0;
++ }
++ break;
++ case KVM_USPT_REGISTER_PID: {
++ userspace_ctx_t ctx;
++ void __user* argp = (void __user *)arg;
++ struct kvm_vcpu *vcpu;
++
++ printk("Received REGISTER_PID ioctl!\n");
++ if (copy_from_user(&ctx, argp, sizeof(userspace_ctx_t))) {
++ printk("copy from user failed\n");
++ return -EACCES;
++ }
++
++ if (main_vm == NULL) {
++ printk("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
++ return -EFAULT;
++ }
++
++ sevstep_uspt_clear();
++ sevstep_uspt_initialize(ctx.pid, ctx.get_rip);
++
++ printk("Resetting page tracking\n");
++ vcpu = xa_load(&main_vm->vcpu_array, 0);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
++ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
++
++ return 0;
++ }
++ break;
++ case KVM_USPT_POLL_EVENT: {
++ void __user* argp = (void __user *)arg;
++ if (!sevstep_uspt_is_initialiized()) {
++ printk("userspace context not initilaized, call REGISTER_PID");
++ return -EINVAL;
++ }
++ return sevstep_uspt_handle_poll_event(argp);
++ }
++ break;
++ case KVM_USPT_ACK_EVENT: {
++ ack_event_t ack_event;
++ void __user* argp = (void __user *)arg;
++
++ if (!sevstep_uspt_is_initialiized()) {
++ printk("userspace context not initilaized, call REGISTER_PID");
++ return -EINVAL;
++ }
++ if (copy_from_user(&ack_event, argp, sizeof(ack_event_t))) {
++ printk("ACK_EVENT failed to copy args");
++ return -EINVAL;
++ }
++
++ return sevstep_uspt_handle_ack_event_ioctl(ack_event);
++ }
++ break;
+ case KVM_GET_API_VERSION:
+ if (arg)
+ goto out;
+@@ -5792,6 +6372,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_vfio_ops_init();
WARN_ON(r);
@@ -450,7 +1063,7 @@ index f2a63cb2658b..bfe4a57bcc10 100644
return 0;
out_unreg:
-@@ -5821,6 +5827,8 @@ void kvm_exit(void)
+@@ -5821,6 +6403,8 @@ void kvm_exit(void)
{
int cpu;
diff --git a/sevstep/uapi.h b/sevstep/uapi.h
@@ -72,7 +72,7 @@ typedef struct {
__u64 len;
__u8 decrypt_with_host_key;
__s32 wbinvd_cpu; // -1: do not flush; else logical cpu on which we flush
- __u64 output_buffer;
+ void *output_buffer;
} read_guest_memory_t;
typedef struct {
diff --git a/sevstep/uspt.c b/sevstep/uspt.c
@@ -440,7 +440,8 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
}
int
-sevstep_uspt_batch_tracking_stop(page_fault_event_t* results, uint64_t len, bool* error_occured)
+sevstep_uspt_batch_tracking_stop(page_fault_event_t* results,
+ uint64_t len, __u8* error_occured)
{
spin_lock(&batch_track_state_lock);
if (!batch_track_state.is_active) {
diff --git a/sevstep/uspt.h b/sevstep/uspt.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
-int sevstep_uspt_initialize(int pid,bool should_get_rip);
+int sevstep_uspt_initialize(int pid, bool should_get_rip);
int sevstep_uspt_is_initialiized(void);
void sevstep_uspt_clear(void);
@@ -43,7 +43,7 @@ uint64_t sevstep_uspt_batch_tracking_get_events_count(void);
* error_occured is set(there should also be a dmesg, but this allows programatic access);
* Caller can use sevstep_uspt_batch_tracking_get_events_count() to determine the amount
* of memory they should allocate for @results */
-int sevstep_uspt_batch_tracking_stop(page_fault_event_t *results, uint64_t len, bool *error_occured);
+int sevstep_uspt_batch_tracking_stop(page_fault_event_t *results, uint64_t len, __u8 *error_occured);
void sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu *vcpu, uint64_t current_fault_gfn);
void sevstep_uspt_batch_tracking_get_retrack_gfns(uint64_t **gfns, uint64_t *len, int *tracking_type);
bool sevstep_uspt_batch_tracking_in_progress(void);