commit 22e4bb34a2022458a594738a846d6bc2f8607862
parent 4804320a2e3c89664afc93214d3ad81b3a36c670
Author: Louis Burda <quent.burda@gmail.com>
Date: Tue, 25 Oct 2022 14:29:44 +0200
Further prune and refactor sevstep
Diffstat:
10 files changed, 135 insertions(+), 1114 deletions(-)
diff --git a/.gitignore b/.gitignore
@@ -6,3 +6,4 @@ push.sh
*.o.d
*.out
*.swp
+compile_commands.json
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -25,23 +25,6 @@ uint64_t cachepc_regs_vm[16];
EXPORT_SYMBOL(cachepc_regs_tmp);
EXPORT_SYMBOL(cachepc_regs_vm);
-static long get_user_pages_remote_unlocked(struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages);
-
-static int read_physical(struct kvm *kvm, u64 gpa,
- void *buf, u64 size, bool decrypt_at_host);
-// static int write_physical(struct kvm *kvm, u64 gpa, u64 size,
-// const void *buf, bool write_plaintexts);
-// static int print_physical(struct kvm *kvm, u64 gpa,
-// u64 size, bool decrypt_at_host);
-// static int map_physical(struct kvm *kvm, u64 gpa,
-// bool decrypt_at_host, void **mapping, struct page **page);
-// static void unmap_physical(void **mapping, struct page **page);
-
-// static int read_mapped(u64 gpa, void *buff, u64 size, void *mapping);
-// static int write_mapped(u64 gpa, u64 size, const void *buf, void *mapping);
-
static void cachepc_kvm_prime_probe_test(void *p);
static void cachepc_kvm_stream_hwpf_test(void *p);
static void cachepc_kvm_single_access_test(void *p);
@@ -55,273 +38,14 @@ static int cachepc_kvm_init_pmc_ioctl(void __user *arg_user);
static int cachepc_kvm_read_pmc_ioctl(void __user *arg_user);
static int cachepc_kvm_read_counts_ioctl(void __user *arg_user);
static int cachepc_kvm_setup_pmc_ioctl(void __user *arg_user);
-static int cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user);
static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
-static int cachepc_kvm_batch_track_start_ioctl(void __user *arg_user);
-static int cachepc_kvm_batch_track_count_ioctl(void __user *arg_user);
-static int cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user);
static int cachepc_kvm_track_all_ioctl(void __user *arg_user);
static int cachepc_kvm_untrack_all_ioctl(void __user *arg_user);
static int cachepc_kvm_uspt_reset_ioctl(void __user *arg_user);
static int cachepc_kvm_poll_event_ioctl(void __user *arg_user);
static int cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user);
-long
-get_user_pages_remote_unlocked(struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct vm_area_struct **vmas = NULL;
- int locked = 1;
- long ret;
-
- down_read(&mm->mmap_lock);
- ret = get_user_pages_remote( mm, start, nr_pages,
- gup_flags, pages, vmas, &locked);
- if (locked) up_read(&mm->mmap_lock);
-
- return ret;
-}
-
-// static int
-// get_hpa_for_gpa(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
-// {
-// int ec;
-// unsigned long hva;
-// struct page *page = NULL;
-//
-// ec = 0;
-//
-// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-// if (kvm_is_error_hva(hva)) {
-// pr_warn("in %s line %d get_hpa_for_gpa: translation to hva failed\n",
-// __FILE__, __LINE__);
-// ec = -100;
-// goto out;
-// }
-// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
-// pr_warn("in %s line %d get_hpa_for_gpa: failed to get page struct from mm",
-// __FILE__, __LINE__);
-// ec = -KVM_EINVAL;
-// goto out;
-// }
-//
-// (*hpa) = (page_to_pfn(page) << 12) + (gpa & 0xfff);
-//
-// out:
-// put_page(page);
-//
-// return ec;
-// }
-
-int
-read_physical(struct kvm *kvm, u64 gpa, void *buf, u64 size,
- bool decrypt_at_host)
-{
- struct page *page = NULL;
- void *ptr_page = NULL;
- unsigned long hva;
- uint64_t offset;
- int ec;
-
- offset = (gpa & 0xFFF);
-
- if ((offset + size - 1) > 0xFFF) {
- printk("read_phyiscal: trying to read "
- "beyond page (offset+size=%016llx)\n",
- offset + size);
- return -EINVAL;
- }
-
- ec = 0;
-
- hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-
- if (kvm_is_error_hva(hva)) {
- pr_warn("read_physical: translation to hva failed( gpa was "
- "%016llx hva is %016lx\n", gpa, hva);
- ec = -100;
- goto out;
- }
-
- if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
- pr_warn("read_physical: failed to get page struct from mm\n");
- ec = -100;
- goto out;
- }
-
- if (decrypt_at_host) {
- // map with encryption bit. Content is decrypted with host key. If sev is
- // disabled but sme is enable this allows to read the plaintext.
- ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
- } else {
- // map without encryption bit to read ciphertexts
- ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
- }
-
- /*printk("value of buf ptr = %p\t value of ptr_page=%p\n", buf,
- ptr_page + offset);*/
- memcpy(buf, ptr_page + offset, size);
-
-out:
- if (ptr_page)
- vunmap(ptr_page);
- if (page)
- put_page(page);
-
- return ec;
-}
-
-// int
-// write_physical(struct kvm *kvm, u64 gpa, u64 size,
-// const void *buf, bool write_plaintexts)
-// {
-// int ec;
-// unsigned long hva;
-// struct page *page;
-// void *ptr_page;
-// uint64_t offset;
-//
-// offset = (gpa & 0xFFF);
-//
-// if ((offset + size - 1) > 0xFFF) {
-// pr_warn("write_physical: trying to write "
-// "beyond page(offset+size=%016llx)\n",
-// offset + size);
-// return -EINVAL;
-// }
-//
-// ec = 0;
-// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-//
-// if (kvm_is_error_hva(hva))
-// return -KVM_EINVAL;
-//
-// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
-// return -KVM_EINVAL;
-//
-// if (write_plaintexts) {
-// // map with encrytpion bit to aplly host encryption. Usefull if sev is
-// // disabled but sme is enabled and we want to write a certain value into a
-// // page
-// ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
-// } else {
-// // map without encryption bit to write ciphertexts
-// ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
-// }
-//
-// memcpy(ptr_page + offset, buf, size);
-//
-// vunmap(ptr_page);
-// put_page(page);
-// return ec;
-// }
-
-// int
-// print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
-// {
-// u8 *buf;
-// int i, err;
-//
-// buf = kmalloc(size, GFP_ATOMIC);
-//
-// err = read_physical(kvm, gpa, buf, size, decrypt_at_host);
-// if (err != 0) {
-// pr_warn("at %s line %d: read_physical "
-// "failed with: %d\n", __FILE__, __LINE__, err);
-// }
-// for (i = 0; i < size; i++) {
-// // print bytewise with line break every 16 bytes
-// if (i % 16 == 0) {
-// printk("%02x ", buf[i]);
-// } else {
-// printk(KERN_CONT " %02x ", buf[i]);
-// }
-// }
-// printk("\n");
-//
-// kfree(buf);
-//
-// return err;
-// }
-
-// int
-// map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
-// void **mapping, struct page **page)
-// {
-//
-// unsigned long hva;
-// uint64_t offset;
-// int err;
-//
-// offset = (gpa & 0xFFF);
-//
-// err = 0;
-//
-// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-//
-// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
-// pr_warn("map_physical: failed to get page struct from mm");
-// err = -100;
-// return err;
-// }
-//
-// if (decrypt_at_host) {
-// // map with encryption bit. Content is decrypted with host key. If sev is
-// // disabled but sme is enable this allows to read the plaintext.
-// (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
-// } else {
-// // map without encryption bit to read ciphertexts
-// (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
-// }
-//
-// return err;
-// }
-
-// void
-// unmap_physical(void **mapping, struct page **page)
-// {
-// if (*mapping)
-// vunmap(*mapping);
-// if (*page)
-// put_page(*page);
-// }
-
-// int
-// read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
-// {
-// uint64_t offset;
-// offset = (gpa & 0xFFF);
-//
-// if ((offset + size - 1) > 0xFFF) {
-// pr_warn("read_mapped: trying to read "
-// "beyond page (offset+size=%016llx)\n",
-// offset + size);
-// return -EINVAL;
-// }
-// memcpy(buff, mapping + offset, size);
-//
-// return 0;
-// }
-
-// int
-// write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
-// {
-// uint64_t offset;
-//
-// offset = (gpa & 0xFFF);
-//
-// if ((offset + size - 1) > 0xFFF) {
-// printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
-// offset + size);
-// return -EINVAL;
-// }
-// memcpy(mapping + offset, buf, size);
-//
-// return 0;
-// }
-
void
cachepc_kvm_prime_probe_test(void *p)
{
@@ -372,6 +96,7 @@ cachepc_kvm_stream_hwpf_test(void *p)
uint32_t count;
uint32_t *arg;
uint32_t i, max;
+ bool pass;
arg = p;
@@ -638,174 +363,28 @@ cachepc_kvm_setup_pmc_ioctl(void __user *arg_user)
}
int
-cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user)
-{
- read_guest_memory_t param;
- void * buf;
- int res;
-
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(¶m, arg_user, sizeof(read_guest_memory_t))) {
- printk(KERN_CRIT
- "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
- return -EFAULT;
- }
-
- if (param.len > PAGE_SIZE) {
- printk("KVM_READ_GUEST_MEMORY len may be at most page size");
- }
-
- buf = kmalloc(param.len, GFP_KERNEL);
- if (buf == NULL) {
- printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
- return -ENOMEM;
- }
-
- if (param.wbinvd_cpu >= 0) {
- wbinvd_on_cpu(param.wbinvd_cpu);
- }
- wbinvd_on_all_cpus();
-
- res = read_physical(main_vm, param.gpa, buf,
- param.len, param.decrypt_with_host_key);
- if (res) {
- printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
- return -EINVAL;
- }
-
- if (copy_to_user(param.output_buffer, buf, param.len)) {
- printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
- }
-
- return 0;
-}
-
-int
cachepc_kvm_track_page_ioctl(void __user *arg_user)
{
- track_page_param_t param;
+ struct cpc_track_config cfg;
struct kvm_vcpu *vcpu;
if (!arg_user) return -EINVAL;
- if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_TRACK_PAGE: error copying arguments, exiting\n");
+ if (copy_from_user(&cfg, arg_user, sizeof(cfg)))
return -EFAULT;
- }
- if (main_vm == NULL) {
- pr_warn("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
+ if (main_vm == NULL)
return -EFAULT;
- }
- if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
- pr_warn("KVM_TRACK_PAGE track_mode %d invalid, "
- "must be in range [%d,%d]", param.track_mode,
- 0, KVM_PAGE_TRACK_MAX);
- return -EFAULT;
- }
+ if (cfg.track_mode < 0 || cfg.track_mode >= KVM_PAGE_TRACK_MAX)
+ return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- if (!sevstep_track_single_page(vcpu,
- param.gpa >> PAGE_SHIFT, param.track_mode)) {
- printk("KVM_TRACK_PAGE: sevstep_track_single_page failed");
- }
-
- return 0;
-}
-
-int
-cachepc_kvm_batch_track_start_ioctl(void __user *arg_user)
-{
- batch_track_config_t param;
- int ret;
-
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_CPC_BATCH_TRACK_START: "
- "error copying arguments, exiting\n");
- return -EFAULT;
- }
-
- ret = sevstep_uspt_batch_tracking_start(param.tracking_type,
- param.expected_events, param.perf_cpu, param.retrack);
- if (ret != 0) {
- pr_warn("KVM_CPC_BATCH_TRACK_START: failed\n");
- return ret;
- }
-
- return 0;
-}
-
-int
-cachepc_kvm_batch_track_count_ioctl(void __user *arg_user)
-{
- batch_track_event_count_t result;
-
- if (!arg_user) return -EINVAL;
-
- result.event_count = sevstep_uspt_batch_tracking_get_events_count();
-
- if (copy_to_user(arg_user, &result, sizeof(result))) {
- pr_warn("KVM_CPC_BATCH_TRACK_EVENT_COUNT: "
- "error copying result to user, exiting\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-int
-cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user)
-{
- batch_track_stop_and_get_t param;
- page_fault_event_t* buf;
- size_t buflen;
- void __user* inner_user_out_buf;
- int ret;
-
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("CachePC: BATCH_TRACK_STOP: "
- "error copying arguments, exiting\n");
- return -EFAULT;
- }
- inner_user_out_buf = param.out_buf;
-
- buflen = sizeof(page_fault_event_t) * param.len;
- buf = vmalloc(buflen);
- if (buf == NULL) {
- pr_warn("CachePC: BATCH_TRACK_STOP: OOM\n");
+ if (!sevstep_track_single(vcpu,
+ cfg.gpa >> PAGE_SHIFT, cfg.track_mode)) {
+ printk("KVM_TRACK_PAGE: sevstep_track_single failed");
return -EFAULT;
}
- param.out_buf = buf;
-
- ret = sevstep_uspt_batch_tracking_stop(buf, param.len,
- ¶m.error_during_batch);
- if (ret != 0) {
- pr_warn("CachePC: BATCH_TRACK_STOP: Error\n");
- vfree(buf);
- return -EFAULT;
- }
-
- if (copy_to_user(arg_user, ¶m, sizeof(param))) {
- pr_warn("CachePC: BATCH_TRACK_STOP: "
- "Error copying result to user\n");
- vfree(buf);
- return -EFAULT;
- }
-
- if (copy_to_user(inner_user_out_buf, buf, buflen)) {
- pr_warn("CachePC: BATCH_TRACK_STOP: "
- "Error copying result to user\n");
- vfree(buf);
- return -EFAULT;
- }
-
- vfree(buf);
return 0;
}
@@ -813,31 +392,23 @@ cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user)
int
cachepc_kvm_track_all_ioctl(void __user *arg_user)
{
- track_all_pages_t param;
struct kvm_vcpu *vcpu;
- long tracked_pages;
+ uint64_t track_mode;
if (!arg_user) return -EINVAL;
- if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_CPC_TRACK_ALL: error copying arguments, exiting\n");
+ if (copy_from_user(&track_mode, arg_user, sizeof(track_mode)))
return -EFAULT;
- }
- if (main_vm == NULL) {
- pr_warn("KVM_CPC_TRACK_ALL: main_vm is not initialized, aborting!\n");
+ if (main_vm == NULL)
return -EFAULT;
- }
- if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
- pr_warn("KVM_CPC_TRACK_ALL: "
- "track_mode %d invalid, must be in range [%d,%d]\n",
- param.track_mode, 0, KVM_PAGE_TRACK_MAX);
- return -EFAULT;
- }
+ if (track_mode < 0 || track_mode >= KVM_PAGE_TRACK_MAX)
+ return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- tracked_pages = sevstep_start_tracking(vcpu, param.track_mode);
+ if (!sevstep_track_all(vcpu, track_mode))
+ return -EFAULT;
return 0;
}
@@ -845,31 +416,23 @@ cachepc_kvm_track_all_ioctl(void __user *arg_user)
int
cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
{
- track_all_pages_t param;
struct kvm_vcpu *vcpu;
- long untrack_count;
+ uint64_t track_mode;
if (!arg_user) return -EINVAL;
- if (copy_from_user(¶m, arg_user, sizeof(param))) {
- printk(KERN_CRIT "KVM_CPC_UNTRACK_ALL: error copying arguments, exiting\n");
+ if (copy_from_user(&track_mode, arg_user, sizeof(track_mode)))
return -EFAULT;
- }
- if (main_vm == NULL) {
- printk("KVM_CPC_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
+ if (main_vm == NULL)
return -EFAULT;
- }
- if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
- printk("KVM_CPC_UNTRACK_ALL: track_mode %d invalid, "
- "must be in range [%d,%d]", param.track_mode,
- 0, KVM_PAGE_TRACK_MAX);
- return -EFAULT;
- }
+ if (track_mode < 0 || track_mode >= KVM_PAGE_TRACK_MAX)
+ return -EINVAL;
vcpu = xa_load(&main_vm->vcpu_array, 0);
- untrack_count = sevstep_stop_tracking(vcpu, param.track_mode);
+ if (!sevstep_untrack_all(vcpu, track_mode))
+ return -EFAULT;
return 0;
}
@@ -881,9 +444,9 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user)
sevstep_uspt_clear();
vcpu = xa_load(&main_vm->vcpu_array, 0);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
+ sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC);
+ sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS);
+ sevstep_untrack_all(vcpu, KVM_PAGE_TRACK_WRITE);
return 0;
}
@@ -891,10 +454,8 @@ cachepc_kvm_uspt_reset_ioctl(void __user *arg_user)
int
cachepc_kvm_poll_event_ioctl(void __user *arg_user)
{
- if (!sevstep_uspt_is_initialiized()) {
- pr_warn("CachePC: userspace context uninitialized\n");
+ if (!sevstep_uspt_is_initialiized())
return -EINVAL;
- }
return sevstep_uspt_handle_poll_event(arg_user);
}
@@ -902,19 +463,17 @@ cachepc_kvm_poll_event_ioctl(void __user *arg_user)
int
cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user)
{
- ack_event_t ack_event;
+ uint64_t eventid;
if (!arg_user) return -EINVAL;
if (!sevstep_uspt_is_initialiized())
return -EINVAL;
- if (copy_from_user(&ack_event, arg_user, sizeof(ack_event_t))) {
- printk("ACK_EVENT failed to copy args");
+ if (copy_from_user(&eventid, arg_user, sizeof(eventid)))
return -EINVAL;
- }
- return sevstep_uspt_handle_ack_event_ioctl(ack_event);
+ return sevstep_uspt_handle_ack_event_ioctl(eventid);
}
long
@@ -938,24 +497,15 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
return cachepc_kvm_setup_pmc_ioctl(arg_user);
case KVM_CPC_TRACK_PAGE:
return cachepc_kvm_track_page_ioctl(arg_user);
- case KVM_CPC_BATCH_TRACK_START:
- return cachepc_kvm_batch_track_start_ioctl(arg_user);
- case KVM_CPC_BATCH_TRACK_EVENT_COUNT:
- return cachepc_kvm_batch_track_count_ioctl(arg_user);
- case KVM_CPC_BATCH_TRACK_STOP:
- return cachepc_kvm_batch_track_stop_ioctl(arg_user);
case KVM_CPC_TRACK_ALL:
return cachepc_kvm_track_all_ioctl(arg_user);
case KVM_CPC_UNTRACK_ALL:
return cachepc_kvm_untrack_all_ioctl(arg_user);
- case KVM_CPC_READ_GUEST_MEMORY:
- return cachepc_kvm_read_guest_memory_ioctl(arg_user);
case KVM_CPC_RESET_TRACKING:
return cachepc_kvm_uspt_reset_ioctl(arg_user);
case KVM_CPC_POLL_EVENT:
return cachepc_kvm_poll_event_ioctl(arg_user);
case KVM_CPC_ACK_EVENT:
- pr_warn("Cachepc: CPC_ACK_EVENT called");
return cachepc_kvm_uscpt_ack_event_ioctl(arg_user);
default:
return kvm_arch_dev_ioctl(file, ioctl, arg);
diff --git a/cachepc/mmu.c b/cachepc/mmu.c
@@ -28,24 +28,11 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
if (was_tracked) {
have_rip = false;
- if (sevstep_uspt_should_get_rip())
- have_rip = sevstep_get_rip_kvm_vcpu(vcpu, ¤t_rip) == 0;
- if (sevstep_uspt_batch_tracking_in_progress()) {
- send_err = sevstep_uspt_batch_tracking_save(fault->gfn << PAGE_SHIFT,
- fault->error_code, have_rip, current_rip);
- if (send_err) {
- pr_warn("Sevstep: uspt_batch_tracking_save failed with %d\n",
- send_err);
- }
- sevstep_uspt_batch_tracking_handle_retrack(vcpu, fault->gfn);
- sevstep_uspt_batch_tracking_inc_event_idx();
- } else {
- send_err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT,
- fault->error_code, have_rip, current_rip);
- if (send_err) {
- printk("Sevstep: uspt_send_and_block failed with %d\n",
- send_err);
- }
+ send_err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT,
+ fault->error_code, have_rip, current_rip);
+ if (send_err) {
+ printk("Sevstep: uspt_send_and_block failed with %d\n",
+ send_err);
}
}
}
diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c
@@ -45,138 +45,51 @@ struct kvm* main_vm;
EXPORT_SYMBOL(main_vm);
bool
-sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
- int idx;
- bool ret;
struct kvm_memory_slot *slot;
+ int idx;
- ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- if (mode == KVM_PAGE_TRACK_ACCESS) {
- pr_warn("Adding gfn: %016llx to access page track pool\n", gfn);
- }
-
- if (mode == KVM_PAGE_TRACK_WRITE) {
- pr_warn("Adding gfn: %016llx to write page track pool\n", gfn);
- }
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm,slot, gfn, mode)) {
+ if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
- ret = true;
- } else {
- pr_warn("Failed to track %016llx because ", gfn);
- if (slot == NULL) {
- printk(KERN_CONT "slot was null");
- }
- if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page is already tracked");
- }
- printk(KERN_CONT "\n");
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
- return ret;
+ return slot != NULL;
}
-EXPORT_SYMBOL(sevstep_track_single_page);
+EXPORT_SYMBOL(sevstep_track_single);
bool
-sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
- int idx;
- bool ret;
struct kvm_memory_slot *slot;
+ int idx;
- ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (mode == KVM_PAGE_TRACK_ACCESS) {
- pr_warn("Removing gfn: %016llx from access page track pool\n", gfn);
- } else if (mode == KVM_PAGE_TRACK_WRITE) {
- pr_warn("Removing gfn: %016llx from write page track pool\n", gfn);
- }
-
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
- ret = true;
- } else {
- pr_warn("Failed to untrack %016llx because ", gfn);
- if (slot == NULL) {
- printk(KERN_CONT "slot was null");
- } else if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page track was not active");
- }
- printk(KERN_CONT "\n");
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
- return ret;
+ return slot != NULL;
}
-EXPORT_SYMBOL(sevstep_untrack_single_page);
-
-bool
-sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- int idx;
- bool ret;
- struct kvm_memory_slot *slot;
-
- ret = false;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL) {
- write_lock(&vcpu->kvm->mmu_lock);
- sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
- PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_ACCESSED);
- write_unlock(&vcpu->kvm->mmu_lock);
- ret = true;
- }
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- return ret;
-}
-EXPORT_SYMBOL(sevstep_reset_accessed_on_page);
-
-bool
-sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- int idx;
- bool ret;
- struct kvm_memory_slot *slot;
-
- ret = false;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL) {
- write_lock(&vcpu->kvm->mmu_lock);
- sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
- PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC);
- write_unlock(&vcpu->kvm->mmu_lock);
- ret = true;
- }
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- return ret;
-}
-EXPORT_SYMBOL(sevstep_clear_nx_on_page);
+EXPORT_SYMBOL(sevstep_untrack_single);
long
-sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
+sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
@@ -184,7 +97,7 @@ sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
int bkt;
u64 gfn;
- pr_warn("Sevstep: Start tracking %i\n", mode);
+ pr_warn("Sevstep: Start tracking (mode:%i)\n", mode);
slots = kvm_vcpu_memslots(vcpu);
kvm_for_each_memslot(slot, bkt, slots) {
@@ -202,41 +115,32 @@ sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
return count;
}
-EXPORT_SYMBOL(sevstep_start_tracking);
+EXPORT_SYMBOL(sevstep_track_all);
long
-sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
+sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
- struct kvm_memory_slot *first_memslot;
- struct rb_node *node;
- u64 iterator, iterat_max;
+ struct kvm_memslots *slots;
long count = 0;
- int idx;
+ int bkt;
+ u64 gfn;
- pr_warn("Sevstep: Stop tracking %i\n", mode);
-
- node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
- first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
- iterat_max = first_memslot->base_gfn + + vcpu->kvm->nr_memslot_pages;//first_memslot->npages;
- for (iterator = 0; iterator < iterat_max; iterator++) {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator);
- if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
- write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode);
- write_unlock(&vcpu->kvm->mmu_lock);
- count++;
+ pr_warn("Sevstep: Stop tracking (mode:%i)\n", mode);
+
+ slots = kvm_vcpu_memslots(vcpu);
+ kvm_for_each_memslot(slot, bkt, slots) {
+ for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages; gfn++) {
+ if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ write_lock(&vcpu->kvm->mmu_lock);
+ kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
+ write_unlock(&vcpu->kvm->mmu_lock);
+ count++;
+ }
}
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
return count;
}
-EXPORT_SYMBOL(sevstep_stop_tracking);
+EXPORT_SYMBOL(sevstep_untrack_all);
-int
-sevstep_get_rip_kvm_vcpu(struct kvm_vcpu *vcpu, uint64_t *rip)
-{
- return 0;
-}
diff --git a/cachepc/sevstep.h b/cachepc/sevstep.h
@@ -12,6 +12,7 @@
extern struct kvm* main_vm;
+/* defined in mmu.c as they rely on static mmu-internal functions */
bool sevstep_spte_protect(u64 *sptep,
bool pt_protect, enum kvm_page_track_mode mode);
bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
@@ -19,14 +20,10 @@ bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
bool sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
uint64_t gfn, int min_level, enum kvm_page_track_mode mode);
-bool sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+bool sevstep_track_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
-bool sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+bool sevstep_untrack_single(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
-bool sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn);
-bool sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn);
-long sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
-long sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
-
-int sevstep_get_rip_kvm_vcpu(struct kvm_vcpu *vcpu, uint64_t *rip);
+long sevstep_track_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
+long sevstep_untrack_all(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -15,17 +15,13 @@
#define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x23, __u32)
#define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x24, __u64)
#define KVM_CPC_SETUP_PMC _IO(KVMIO, 0x25)
-#define KVM_CPC_READ_GUEST_MEMORY _IOWR(KVMIO, 0x26, read_guest_memory_t)
-#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, track_page_param_t)
-#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x31, page_fault_event_t)
-#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x32, ack_event_t)
+#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config)
+#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u64)
+#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x32, __u64)
#define KVM_CPC_RESET_TRACKING _IO(KVMIO, 0x33)
-#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x34, track_all_pages_t)
-#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x35, track_all_pages_t)
-#define KVM_CPC_BATCH_TRACK_START _IOWR(KVMIO, 0x36, batch_track_config_t)
-#define KVM_CPC_BATCH_TRACK_STOP _IOWR(KVMIO, 0x37, batch_track_stop_and_get_t)
-#define KVM_CPC_BATCH_TRACK_EVENT_COUNT _IOWR(KVMIO, 0x38, batch_track_event_count_t)
+#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x34, struct cpc_track_event)
+#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x35, __u64)
#define CPC_USPT_POLL_EVENT_NO_EVENT 1000
#define CPC_USPT_POLL_EVENT_GOT_EVENT 0
@@ -39,8 +35,13 @@ enum kvm_page_track_mode {
KVM_PAGE_TRACK_MAX,
};
-typedef struct {
- __u64 id; // filled automatically
+struct cpc_track_config {
+ __u64 gpa;
+ __s32 track_mode;
+};
+
+struct cpc_track_event {
+ __u64 id; /* filled automatically */
__u64 faulted_gpa;
__u32 error_code;
__u8 have_rip_info;
@@ -48,57 +49,4 @@ typedef struct {
__u64 ns_timestamp;
__u8 have_retired_instructions;
__u64 retired_instructions;
-} page_fault_event_t;
-
-typedef struct {
- __s32 tracking_type;
- __u64 expected_events;
- __s32 perf_cpu;
- __u8 retrack;
-} batch_track_config_t;
-
-typedef struct {
- __u64 event_count;
-} batch_track_event_count_t;
-
-typedef struct {
- page_fault_event_t* out_buf;
- __u64 len;
- __u8 error_during_batch;
-} batch_track_stop_and_get_t;
-
-typedef struct {
- __s32 cpu; // cpu on which we want to read the counter
- __u64 retired_instruction_count; // result param
-} retired_instr_perf_t;
-
-typedef struct {
- __s32 cpu; // cpu on which counter should be programmed
-} retired_instr_perf_config_t;
-
-typedef struct {
- __u64 gpa;
- __u64 len;
- __u8 decrypt_with_host_key;
- __s32 wbinvd_cpu; // -1: do not flush; else logical cpu on which we flush
- void *output_buffer;
-} read_guest_memory_t;
-
-typedef struct {
- __s32 pid;
- __u8 get_rip;
-} userspace_ctx_t;
-
-typedef struct {
- __u64 id;
-} ack_event_t;
-
-typedef struct {
- __u64 gpa;
- __s32 track_mode;
-} track_page_param_t;
-
-typedef struct {
- __s32 track_mode;
-} track_all_pages_t;
-
+};
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -1,6 +1,7 @@
#include "uspt.h"
#include "sevstep.h"
#include "cachepc.h"
+#include "uapi.h"
#include <linux/kvm.h>
#include <linux/timekeeping.h>
@@ -12,85 +13,38 @@
#define ARRLEN(x) (sizeof(x)/sizeof((x)[0]))
-typedef struct {
- bool is_active;
- int tracking_type;
- bool retrack;
-
- int perf_cpu;
-
- uint64_t gfn_retrack_backlog[10];
- int gfn_retrack_backlog_next_idx;
-
- page_fault_event_t * events;
- uint64_t event_next_idx;
- uint64_t events_size;
-
- bool error_occured;
-} batch_track_state_t;
-
-typedef struct {
- uint64_t idx_for_last_perf_reading;
- uint64_t last_perf_reading;
- uint64_t delta_valid_idx;
- uint64_t delta;
-} perf_state_t;
-
-// crude sync mechanism. don't know a good way to act on errors yet.
-static uint64_t last_sent_event_id = 1;
-static uint64_t last_acked_event_id = 1;
+static uint64_t last_sent_eventid;
+static uint64_t last_acked_eventid;
DEFINE_RWLOCK(event_lock);
-static page_fault_event_t sent_event;
-static int have_event = 0;
-
-static bool get_rip = true;
-
-static int was_init = 0;
+static struct cpc_track_event sent_event;
+static bool have_event;
-DEFINE_SPINLOCK(batch_track_state_lock);
-static batch_track_state_t batch_track_state;
-
-static perf_state_t perf_state;
-
-static uint64_t perf_state_update_and_get_delta(uint64_t current_event_idx);
+static bool uspt_init = false;
void
sevstep_uspt_clear(void)
{
write_lock(&event_lock);
- was_init = 1;
- last_sent_event_id = 1;
- last_acked_event_id = 1;
- have_event = 0;
- get_rip = false;
+ uspt_init = true;
+ last_sent_eventid = 1;
+ last_acked_eventid = 1;
+ have_event = false;
write_unlock(&event_lock);
}
-int
-sevstep_uspt_is_initialiized()
-{
- return was_init;
-}
-
bool
-sevstep_uspt_should_get_rip()
+sevstep_uspt_is_initialiized()
{
- bool tmp;
-
- read_lock(&event_lock);
- tmp = get_rip;
- read_unlock(&event_lock);
-
- return tmp;
+ return uspt_init;
}
int
sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
bool have_rip, uint64_t rip)
{
- ktime_t abort_after;
- page_fault_event_t message_for_user;
+ struct cpc_track_event event;
+ ktime_t deadline;
read_lock(&event_lock);
if (!sevstep_uspt_is_initialiized()) {
@@ -101,36 +55,34 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
read_unlock(&event_lock);
write_lock(&event_lock);
- if (last_sent_event_id != last_acked_event_id) {
+ if (last_sent_eventid != last_acked_eventid) {
pr_warn("Sevstep: uspt_send_and_block: "
"event id_s out of sync, aborting. Fix this later\n");
write_unlock(&event_lock);
return 1;
} else {
- // TODO: handle overflow
- last_sent_event_id++;
- }
- message_for_user.id = last_sent_event_id;
- message_for_user.faulted_gpa = faulted_gpa;
- message_for_user.error_code = error_code;
- message_for_user.have_rip_info = have_rip;
- message_for_user.rip = rip;
- message_for_user.ns_timestamp = ktime_get_real_ns();
- message_for_user.have_retired_instructions = false;
-
- have_event = 1;
- sent_event = message_for_user;
-
+ last_sent_eventid++;
+ }
+ event.id = last_sent_eventid;
+ event.faulted_gpa = faulted_gpa;
+ event.error_code = error_code;
+ event.have_rip_info = have_rip;
+ event.rip = rip;
+ event.ns_timestamp = ktime_get_real_ns();
+ event.have_retired_instructions = false;
+
+ have_event = true;
+ sent_event = event;
write_unlock(&event_lock);
/* wait for ack with timeout */
pr_warn("Sevstep: uspt_send_and_block: Begin wait for event ack");
- abort_after = ktime_get_ns() + 1000000000ULL; /* 1s in ns */
+ deadline = ktime_get_ns() + 1000000000ULL; /* 1s in ns */
while (!sevstep_uspt_is_event_done(sent_event.id)) {
- if (ktime_get_ns() > abort_after) {
+ if (ktime_get_ns() > deadline) {
pr_warn("Sevstep: uspt_send_and_block: "
"Waiting for ack of event %llu timed out",
- sent_event.id);
+ sent_event.id);
return 3;
}
}
@@ -144,18 +96,17 @@ sevstep_uspt_is_event_done(uint64_t id)
bool done;
read_lock(&event_lock);
- done = last_acked_event_id >= id;
+ done = last_acked_eventid >= id;
read_unlock(&event_lock);
return done;
}
int
-sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem)
+sevstep_uspt_handle_poll_event(struct cpc_track_event __user *event)
{
int err;
- // most of the time we won't have an event
read_lock(&event_lock);
if (!have_event) {
read_unlock(&event_lock);
@@ -165,9 +116,9 @@ sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem)
write_lock(&event_lock);
if (have_event) {
- err = copy_to_user(userpace_mem,
- &sent_event, sizeof(page_fault_event_t));
- have_event = 0;
+ err = copy_to_user(event, &sent_event,
+ sizeof(struct cpc_track_event));
+ have_event = false;
} else {
err = CPC_USPT_POLL_EVENT_NO_EVENT;
}
@@ -177,308 +128,21 @@ sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem)
}
int
-sevstep_uspt_handle_ack_event_ioctl(ack_event_t event)
+sevstep_uspt_handle_ack_event_ioctl(uint64_t eventid)
{
- int err = 0;
+ int err;
- pr_warn("Sevstep: uspt_handle_ack_event_ioctl: acking event %llu", event.id);
write_lock(&event_lock);
- if (event.id == last_sent_event_id) {
- last_acked_event_id = last_sent_event_id;
+ if (eventid == last_sent_eventid) {
+ err = 0;
+ last_acked_eventid = last_sent_eventid;
} else {
err = 1;
- pr_warn("sevstep_uspt_handle_ack_event_ioctl: "
- "last sent event id is %llu but received ack for %llu\n",
- last_sent_event_id, event.id);
+ pr_warn("Sevstep: ack'd event does not match sent: %llu %llu\n",
+ last_sent_eventid, eventid);
}
write_unlock(&event_lock);
return err;
}
-// get retired instructions between current_event_idx-1 and current_event_idx
-// value is cached for multiple calls to the same current_event_idx
-uint64_t
-perf_state_update_and_get_delta(uint64_t current_event_idx)
-{
- uint64_t current_value;
-
- /* check if value is "cached" */
- if (perf_state.delta_valid_idx == current_event_idx) {
- if (current_event_idx == 0) {
- perf_state.idx_for_last_perf_reading = current_event_idx;
- perf_state.last_perf_reading = cachepc_read_pmc(CPC_RETINST_PMC);
- }
- return perf_state.delta;
- }
-
- /* otherwise update, but logic is only valid for two consecutive events */
- if (current_event_idx != perf_state.idx_for_last_perf_reading+1) {
- pr_warn("perf_state_update_and_get_delta: "
- "last reading was for idx %llu but was queried for %llu\n",
- perf_state.idx_for_last_perf_reading, current_event_idx);
- }
-
- current_value = cachepc_read_pmc(CPC_RETINST_PMC);
- perf_state.delta = (current_value - perf_state.last_perf_reading);
- perf_state.delta_valid_idx = current_event_idx;
-
- perf_state.idx_for_last_perf_reading = current_event_idx;
- perf_state.last_perf_reading = current_value;
-
- return perf_state.delta;
-}
-
-void
-sevstep_uspt_batch_tracking_inc_event_idx(void)
-{
- spin_lock(&batch_track_state_lock);
- batch_track_state.event_next_idx++;
- spin_unlock(&batch_track_state_lock);
-}
-
-int
-sevstep_uspt_batch_tracking_start(int tracking_type,uint64_t expected_events,
- int perf_cpu, bool retrack)
-{
- page_fault_event_t* events;
- uint64_t buffer_size, i;
-
- spin_lock(&batch_track_state_lock);
- if (batch_track_state.is_active) {
- pr_warn("sevstep_uspt_batch_tracking_start: "
- "overwriting active batch track config!\n");
- if (batch_track_state.events != NULL ) {
- vfree(batch_track_state.events);
- }
- }
- batch_track_state.is_active = false;
- spin_unlock(&batch_track_state_lock);
-
- buffer_size = expected_events * sizeof(page_fault_event_t);
- pr_warn("sevstep_uspt_batch_tracking_start: "
- "trying to alloc %llu bytes buffer for events\n",
- buffer_size);
- events = vmalloc(buffer_size);
- if (events == NULL) {
- pr_warn("sevstep_uspt_batch_tracking_start: "
- "faperf_cpuiled to alloc %llu bytes for event buffer\n",
- buffer_size);
- return 1; // note: lock not held here
- }
-
- // access each element once to force them into memory, improving performance
- // during tracking
- for (i = 0; i < expected_events * sizeof(page_fault_event_t); i++) {
- ((volatile uint8_t*)events)[i] = 0;
- }
-
- perf_state.idx_for_last_perf_reading = 0;
- perf_state.last_perf_reading = 0;
- perf_state.delta_valid_idx = 0;
- perf_state.delta = 0;
- cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER);
-
- spin_lock(&batch_track_state_lock);
-
- batch_track_state.perf_cpu = perf_cpu;
- batch_track_state.retrack = retrack;
-
- batch_track_state.events = events;
- batch_track_state.event_next_idx = 0;
- batch_track_state.events_size = expected_events;
-
- batch_track_state.gfn_retrack_backlog_next_idx = 0;
- batch_track_state.tracking_type = tracking_type;
- batch_track_state.error_occured = false;
-
- batch_track_state.is_active = true;
-
- spin_unlock(&batch_track_state_lock);
-
- return 0;
-}
-
-void
-sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu* vcpu,
- uint64_t current_fault_gfn)
-{
- uint64_t ret_instr_delta;
- int i, next_idx;
- int cpu;
-
- cpu = get_cpu();
-
- spin_lock(&batch_track_state_lock);
-
- if (!batch_track_state.retrack) {
- spin_unlock(&batch_track_state_lock);
- put_cpu();
- return;
- }
-
- if (cpu != batch_track_state.perf_cpu) {
- pr_warn("sevstep_uspt_batch_tracking_handle_retrack: perf was "
- "programmed on logical cpu %d but handler was called "
- "on %d. Did you forget to pin the vcpu thread?\n",
- batch_track_state.perf_cpu, cpu);
- }
- ret_instr_delta = perf_state_update_and_get_delta(batch_track_state.event_next_idx);
-
- // faulting instructions is probably the same as on last fault
- // try to add current fault to retrack log and return
- // for first event idx we do not have a valid ret_instr_delta.
- // Retracking for the frist time is fine, if we loop, we end up here
- // again but with a valid delta on one of the next event
- if ((ret_instr_delta < 2) && ( batch_track_state.event_next_idx != 0)) {
- next_idx = batch_track_state.gfn_retrack_backlog_next_idx;
- if (next_idx >= ARRLEN(batch_track_state.gfn_retrack_backlog)) {
- pr_warn("sevstep_uspt_batch_tracking_handle_retrack: "
- "retrack backlog full, dropping retrack for fault "
- "at 0x%llx\n", current_fault_gfn);
- } else {
- batch_track_state.gfn_retrack_backlog[next_idx] = current_fault_gfn;
- batch_track_state.gfn_retrack_backlog_next_idx++;
- }
-
- spin_unlock(&batch_track_state_lock);
- put_cpu();
- return;
- }
-
- /* made progress, retrack everything in backlog and reset idx */
- for (i = 0; i < batch_track_state.gfn_retrack_backlog_next_idx; i++) {
- sevstep_track_single_page(vcpu,
- batch_track_state.gfn_retrack_backlog[i],
- batch_track_state.tracking_type);
- }
-
- /* add current fault to list */
- batch_track_state.gfn_retrack_backlog[0] = current_fault_gfn;
- batch_track_state.gfn_retrack_backlog_next_idx = 1;
-
- spin_unlock(&batch_track_state_lock);
- put_cpu();
-}
-
-int
-sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
- bool have_rip, uint64_t rip)
-{
- uint64_t ret_instr_delta;
- page_fault_event_t* event;
- int cpu;
-
- cpu = get_cpu();
- spin_lock(&batch_track_state_lock);
-
- if (!batch_track_state.is_active) {
- pr_warn("Sevstep: uspt_batch_tracking_save: "
- "got save but batch tracking is not active!\n");
- batch_track_state.error_occured = true;
- spin_unlock(&batch_track_state_lock);
- put_cpu();
- return 1;
- }
-
- if (batch_track_state.event_next_idx >= batch_track_state.events_size) {
- pr_warn("Sevstep: uspt_batch_tracking_save: events buffer is full!\n");
- batch_track_state.error_occured = true;
- spin_unlock(&batch_track_state_lock);
- put_cpu();
- return 1;
- }
-
- if (cpu != batch_track_state.perf_cpu) {
- pr_warn("Sevstep: uspt_batch_tracking_save: perf was "
- "programmed on logical cpu %d but handler was called "
- "on %d. Did you forget to pin the vcpu thread?\n",
- batch_track_state.perf_cpu, cpu);
- }
- ret_instr_delta = perf_state_update_and_get_delta(batch_track_state.event_next_idx);
-
-
- if (batch_track_state.events == NULL) {
- pr_warn("Sevstep: uspt_batch_tracking_save: events buf was "
- "NULL but \"is_active\" was set! This should never happen!!!\n");
- spin_unlock(&batch_track_state_lock);
- return 1;
- }
-
- event = &batch_track_state.events[batch_track_state.event_next_idx];
- event->id = batch_track_state.event_next_idx;
- event->faulted_gpa = faulted_gpa;
- event->error_code = error_code;
- event->have_rip_info = have_rip;
- event->rip = rip;
- event->ns_timestamp = ktime_get_real_ns();
- event->have_retired_instructions = true;
- event->retired_instructions = ret_instr_delta;
-
- // old inc was here
-
- if (batch_track_state.gfn_retrack_backlog_next_idx
- > ARRLEN(batch_track_state.gfn_retrack_backlog)) {
- pr_warn("sevstep_uspt_batch_tracking_save: "
- "gfn retrack backlog overflow!\n");
- batch_track_state.error_occured = true;
- spin_unlock(&batch_track_state_lock);
- put_cpu();
- return 1;
- }
-
- spin_unlock(&batch_track_state_lock);
- put_cpu();
-
- return 0;
-}
-
-int
-sevstep_uspt_batch_tracking_stop(page_fault_event_t* results,
- uint64_t len, __u8* error_occured)
-{
- spin_lock(&batch_track_state_lock);
- if (!batch_track_state.is_active) {
- pr_warn("sevstep_uspt: batch tracking not active\n");
- spin_unlock(&batch_track_state_lock);
- return 1;
-
- }
- batch_track_state.is_active = false;
-
- if (len > batch_track_state.event_next_idx) {
- pr_warn("sevstep_uspt_batch_tracking_stop: "
- "requested %llu events but got only %llu\n",
- len, batch_track_state.event_next_idx);
- spin_unlock(&batch_track_state_lock);
- return 1;
- }
-
- memcpy(results,batch_track_state.events, len*sizeof(page_fault_event_t));
- vfree(batch_track_state.events);
-
- *error_occured = batch_track_state.error_occured;
-
- spin_unlock(&batch_track_state_lock);
-
- return 0;
-}
-
-uint64_t
-sevstep_uspt_batch_tracking_get_events_count(void)
-{
- uint64_t buf;
-
- spin_lock(&batch_track_state_lock);
- buf = batch_track_state.event_next_idx;
- spin_unlock(&batch_track_state_lock);
-
- return buf;
-}
-
-bool
-sevstep_uspt_batch_tracking_in_progress(void)
-{
- return batch_track_state.is_active;
-}
-
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
@@ -7,42 +7,15 @@
#include <linux/types.h>
-int sevstep_uspt_is_initialiized(void);
+bool sevstep_uspt_is_initialiized(void);
void sevstep_uspt_clear(void);
bool sevstep_uspt_should_get_rip(void);
int sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
bool have_rip, uint64_t rip);
-
int sevstep_uspt_is_event_done(uint64_t id);
-/* prepare next event based on faulted_gpa and error_code. Notify process
- * behind pid_number. Event must be polled id is result param with the id
- * used for the event. Can be used to call sevstep_uspt_is_event_done */
-int sevstep_uspt_send_notification(int pid_number, uint64_t faulted_gpa,
- uint32_t error_code, uint64_t *id);
-
-/* copy next event to userpace_mem */
-int sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem);
-
-/* acknowledge receival of event to event handling logic */
-int sevstep_uspt_handle_ack_event_ioctl(ack_event_t event);
-
-/* should be called after "sevstep_uspt_batch_tracking_save",
- * "sevstep_uspt_batch_tracking_handle_retrack" and any future custom logic
- * for an event is processed */
-void sevstep_uspt_batch_tracking_inc_event_idx(void);
-int sevstep_uspt_batch_tracking_start(int tracking_type, uint64_t expected_events, int perf_cpu, bool retrack);
-int sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code, bool have_rip, uint64_t rip);
-uint64_t sevstep_uspt_batch_tracking_get_events_count(void);
-
-/* Stops batch tracking on copies the first @len events into @result.
- * If an error occured at some point during the batch tracking,
- * error_occured is set(there should also be a dmesg, but this allows programatic access);
- * Caller can use sevstep_uspt_batch_tracking_get_events_count() to determine the amount
- * of memory they should allocate for @results */
-int sevstep_uspt_batch_tracking_stop(page_fault_event_t *results, uint64_t len, __u8 *error_occured);
-void sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu *vcpu, uint64_t current_fault_gfn);
-void sevstep_uspt_batch_tracking_get_retrack_gfns(uint64_t **gfns, uint64_t *len, int *tracking_type);
-bool sevstep_uspt_batch_tracking_in_progress(void);
+int sevstep_uspt_handle_poll_event(struct cpc_track_event *userpace_mem);
+
+int sevstep_uspt_handle_ack_event_ioctl(uint64_t eventid);
diff --git a/compile_commands.json b/compile_commands.json
@@ -1 +0,0 @@
-[]
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -439,8 +439,7 @@ runonce(struct kvm *kvm)
int
monitor(void)
{
- page_fault_event_t event;
- ack_event_t ack;
+ struct cpc_track_event event;
int ret;
/* Get page fault info */
@@ -450,9 +449,8 @@ monitor(void)
event.retired_instructions);
faultcnt++;
- ack.id = event.id;
- printf("Acking event %llu\n", ack.id);
- ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &ack);
+ printf("Acking event %llu\n", event.id);
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &event.id);
if (ret == -1) err(1, "ioctl ACK_EVENT");
} else if (ret != CPC_USPT_POLL_EVENT_NO_EVENT) {
perror("ioctl POLL_EVENT");
@@ -466,9 +464,9 @@ int
main(int argc, const char **argv)
{
struct kvm kvm_with_access;
- track_all_pages_t track_all;
+ uint64_t track_mode;
pid_t ppid, pid;
- int i, ret;
+ int ret;
setvbuf(stdout, NULL, _IONBF, 0);
@@ -503,8 +501,8 @@ main(int argc, const char **argv)
if (ret == -1) err(1, "ioctl RESET_TRACKING");
/* Init page tracking */
- track_all.track_mode = KVM_PAGE_TRACK_ACCESS;
- ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_all);
+ track_mode = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_mode);
if (ret == -1) err(1, "ioctl TRACK_ALL");
ppid = getpid();