commit 572985d705e4575d53ec5d312a484a96a01bce9f
parent d505f8bebab8214981a7b4ad63e2595fa497074c
Author: Louis Burda <quent.burda@gmail.com>
Date: Thu, 6 Oct 2022 12:02:32 +0200
Move cachepc ioctl into kvm device and remove old procfs endpoint
Diffstat:
14 files changed, 698 insertions(+), 749 deletions(-)
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -26,7 +26,7 @@ static bool is_in_arr(uint32_t elem, uint32_t *arr, uint32_t arr_len);
void
cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
- int host_guest, int kernel_user)
+ uint8_t host_guest, uint8_t kernel_user)
{
uint64_t event;
uint64_t reg_addr;
@@ -45,8 +45,8 @@ cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
event |= (1ULL << 22); /* enable performance counter */
event |= ((kernel_user & 0b11) * 1ULL) << 16;
event |= ((host_guest & 0b11) * 1ULL) << 40;
- printk(KERN_WARNING "CachePC: Initialized %i. PMC %02X:%02X\n",
- index, event_no, event_mask);
+ printk(KERN_WARNING "CachePC: Initialized %i. PMC %02X:%02X (%016llx)\n",
+ index, event_no, event_mask, event);
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
}
@@ -162,8 +162,6 @@ cachepc_save_msrmts(cacheline *head)
{
cacheline *curr_cl;
- // printk(KERN_WARNING "CachePC: Updating /proc/cachepc\n");
-
curr_cl = head;
do {
if (CL_IS_FIRST(curr_cl->flags)) {
@@ -173,6 +171,8 @@ cachepc_save_msrmts(cacheline *head)
curr_cl = curr_cl->prev;
} while (curr_cl != head);
+
+ cachepc_print_msrmts(head);
}
void
@@ -250,7 +250,8 @@ prepare_cache_set_ds(cache_ctx *ctx, uint32_t *sets, uint32_t sets_len)
}
if (ctx->addressing == PHYSICAL_ADDRESSING && !is_in_arr(
- curr_cl->cache_set / CACHE_GROUP_SIZE, cache_groups, cache_groups_len))
+ curr_cl->cache_set / CACHE_GROUP_SIZE,
+ cache_groups, cache_groups_len))
{
// Already free all unused blocks of the cache ds for physical
// addressing, because we loose their refs
diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h
@@ -86,7 +86,7 @@ static_assert(sizeof(struct cacheline) == CACHELINE_SIZE, "Bad cache line struct
static_assert(CL_NEXT_OFFSET == 0 && CL_PREV_OFFSET == 8);
void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
- int host_guest, int kernel_user);
+ uint8_t host_guest, uint8_t kernel_user);
cache_ctx *cachepc_get_ctx(int cache_level);
void cachepc_release_ctx(cache_ctx *ctx);
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -1,20 +1,22 @@
#include "kvm.h"
+#include "uspt.h"
+#include "cachepc.h"
+#include "sevstep.h"
+#include "uapi.h"
+#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/init.h>
#include <asm/uaccess.h>
-struct proc_ops cachepc_proc_ops;
-
-uint16_t *cachepc_msrmts;
-size_t cachepc_msrmts_count;
+uint16_t *cachepc_msrmts = NULL;
+size_t cachepc_msrmts_count = 0;
EXPORT_SYMBOL(cachepc_msrmts);
EXPORT_SYMBOL(cachepc_msrmts_count);
-cache_ctx *cachepc_ctx;
-cacheline *cachepc_ds;
+cache_ctx *cachepc_ctx = NULL;
+cacheline *cachepc_ds = NULL;
EXPORT_SYMBOL(cachepc_ctx);
EXPORT_SYMBOL(cachepc_ds);
@@ -23,72 +25,268 @@ uint64_t cachepc_regs_vm[16];
EXPORT_SYMBOL(cachepc_regs_tmp);
EXPORT_SYMBOL(cachepc_regs_vm);
+static long
+get_user_pages_remote_unlocked(struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ struct vm_area_struct **vmas = NULL;
+ int locked = 1;
+ long ret;
+
+ down_read(&mm->mmap_lock);
+ ret = get_user_pages_remote( mm, start, nr_pages,
+ gup_flags, pages, vmas, &locked);
+ if (locked) up_read(&mm->mmap_lock);
+
+ return ret;
+}
+
+// static int
+// get_hpa_for_gpa(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
+// {
+// int ec;
+// unsigned long hva;
+// struct page *page = NULL;
+//
+// ec = 0;
+//
+// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+// if (kvm_is_error_hva(hva)) {
+// pr_warn("in %s line %d get_hpa_for_gpa: translation to hva failed\n",
+// __FILE__, __LINE__);
+// ec = -100;
+// goto out;
+// }
+// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
+// pr_warn("in %s line %d get_hpa_for_gpa: failed to get page struct from mm",
+// __FILE__, __LINE__);
+// ec = -KVM_EINVAL;
+// goto out;
+// }
+//
+// (*hpa) = (page_to_pfn(page) << 12) + (gpa & 0xfff);
+//
+// out:
+// put_page(page);
+//
+// return ec;
+// }
+
int
-cachepc_kvm_proc_open(struct inode *inode, struct file *file)
+read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
+ bool decrypt_at_host)
{
- try_module_get(THIS_MODULE);
+ unsigned long hva;
+ struct page *page = NULL;
+ void *ptr_page = NULL;
+ uint64_t offset;
+ int ec;
+
+ offset = (gpa & 0xFFF);
+
+ if ((offset + size - 1) > 0xFFF) {
+ printk("read_phyiscal: trying to read "
+ "beyond page (offset+size=%016llx)\n",
+ offset + size);
+ return -EINVAL;
+ }
- return 0;
+ ec = 0;
+
+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ // TODO: test change
+ /*
+ if (kvm_is_error_hva(hva)) {
+ printk(KERN_CRIT "Luca: read_physical: translation to hva failed( gpa was "
+ "%016llx hva is %016lx\n",
+ gpa, hva);
+ ec = -100;
+ goto out;
+ }
+ */
+
+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
+ pr_warn("read_physical: failed to get page struct from mm\n");
+ // ec = -KVM_EINVAL;
+ ec = -100;
+ goto out;
+ }
+
+ if (decrypt_at_host) {
+ // map with encryption bit. Content is decrypted with host key. If sev is
+ // disabled but sme is enable this allows to read the plaintext.
+ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
+ } else {
+ // map without encryption bit to read ciphertexts
+ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
+ }
+
+ /*printk("value of buff ptr = %p\t value of ptr_page=%p\n", buff,
+ ptr_page + offset);*/
+ memcpy(buff, ptr_page + offset, size);
+
+out:
+ if (ptr_page)
+ vunmap(ptr_page);
+ if (page)
+ put_page(page);
+
+ return ec;
}
int
-cachepc_kvm_proc_close(struct inode *inode, struct file *file)
+print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
{
- module_put(THIS_MODULE);
+ u8 *buffer;
+ int i, err;
- return 0;
+ buffer = kmalloc(size, GFP_ATOMIC);
+
+ err = read_physical(kvm, gpa, buffer, size, decrypt_at_host);
+ if (err != 0) {
+ pr_warn("at %s line %d: read_physical "
+ "failed with: %d\n", __FILE__, __LINE__, err);
+ }
+ for (i = 0; i < size; i++) {
+ // print bytewise with line break every 16 bytes
+ if (i % 16 == 0) {
+ printk("%02x ", buffer[i]);
+ } else {
+ printk(KERN_CONT " %02x ", buffer[i]);
+ }
+ }
+ printk("\n");
+
+ kfree(buffer);
+
+ return err;
}
-ssize_t
-cachepc_kvm_proc_read(struct file *file, char *buf, size_t buflen, loff_t *off)
+int
+map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
+ void **mapping, struct page **page)
{
- size_t len, left;
- size_t size;
- printk(KERN_WARNING "CachePC: Reading entries (%lu:%lli)\n",
- buflen, off ? *off : 0);
+ int ec;
+ unsigned long hva;
+ uint64_t offset;
+
+ offset = (gpa & 0xFFF);
- size = cachepc_msrmts_count * sizeof(uint16_t);
- if (!off || *off >= size || *off < 0)
- return 0;
+ ec = 0;
- len = size - *off;
- if (len > buflen) len = buflen;
+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
- left = copy_to_user(buf, (uint8_t *) cachepc_msrmts + *off, len);
+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
+ pr_warn("map_physical: failed to get page struct from mm");
+ // ec = -KVM_EINVAL;
+ ec = -100;
+ return ec;
+ }
- len -= left;
- *off += len;
+ if (decrypt_at_host) {
+ // map with encryption bit. Content is decrypted with host key. If sev is
+ // disabled but sme is enable this allows to read the plaintext.
+ (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
+ } else {
+ // map without encryption bit to read ciphertexts
+ (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
+ }
- return len;
+ return ec;
}
-ssize_t
-cachepc_kvm_proc_write(struct file *file, const char *buf, size_t buflen, loff_t *off)
+void
+unmap_physical(void **mapping, struct page **page)
{
+ if (*mapping)
+ vunmap(*mapping);
+ if (*page)
+ put_page(*page);
+}
+
+int
+read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
+{
+ uint64_t offset;
+ offset = (gpa & 0xFFF);
+
+ if ((offset + size - 1) > 0xFFF) {
+ pr_warn("read_mapped: trying to read "
+ "beyond page (offset+size=%016llx)\n",
+ offset + size);
+ return -EINVAL;
+ }
+ memcpy(buff, mapping + offset, size);
+
return 0;
}
-loff_t
-cachepc_kvm_proc_lseek(struct file *file, loff_t off, int mode)
+int
+write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
{
- switch (mode) {
- case SEEK_SET:
- file->f_pos = off;
- break;
- case SEEK_CUR:
- file->f_pos += off;
- break;
- case SEEK_END:
- file->f_pos = cachepc_msrmts_count * sizeof(uint16_t) + off;
- break;
- default:
+ uint64_t offset;
+
+ offset = (gpa & 0xFFF);
+
+ if ((offset + size - 1) > 0xFFF) {
+ printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
+ offset + size);
+ return -EINVAL;
+ }
+ memcpy(mapping + offset, buf, size);
+
+ return 0;
+}
+
+int
+write_physical(struct kvm *kvm, u64 gpa, u64 size,
+ const void *buf, bool write_plaintexts)
+{
+ int ec;
+ unsigned long hva;
+ struct page *page;
+ void *ptr_page;
+ uint64_t offset;
+
+ offset = (gpa & 0xFFF);
+
+ if ((offset + size - 1) > 0xFFF) {
+ pr_warn("write_physical: trying to write "
+ "beyond page(offset+size=%016llx)\n",
+ offset + size);
return -EINVAL;
}
- return file->f_pos;
+ ec = 0;
+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ if (kvm_is_error_hva(hva))
+ return -KVM_EINVAL;
+
+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
+ return -KVM_EINVAL;
+
+ if (write_plaintexts) {
+ // map with encrytpion bit to aplly host encryption. Usefull if sev is
+ // disabled but sme is enabled and we want to write a certain value into a
+ // page
+ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
+ } else {
+ // map without encryption bit to write ciphertexts
+ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
+ }
+
+ memcpy(ptr_page + offset, buf, size);
+
+ vunmap(ptr_page);
+ put_page(page);
+ return ec;
}
+
void
cachepc_kvm_prime_probe_test(void *p)
{
@@ -152,6 +350,7 @@ cachepc_kvm_stream_hwpf_test(void *p)
max = cachepc_ctx->nr_of_cachelines;
+ count = 0;
cachepc_prime(cachepc_ds);
count -= cachepc_read_pmc(0);
@@ -275,9 +474,9 @@ cachepc_kvm_system_setup(void)
void
cachepc_kvm_init_pmc_ioctl(void *p)
{
- uint32_t event;
uint8_t index, event_no, event_mask;
uint8_t host_guest, kernel_user;
+ uint32_t event;
WARN_ON(p == NULL);
if (!p) return;
@@ -294,17 +493,321 @@ cachepc_kvm_init_pmc_ioctl(void *p)
host_guest, kernel_user);
}
+int
+cachepc_kvm_track_page_ioctl(void __user *arg_user)
+{
+ track_page_param_t param;
+ struct kvm_vcpu *vcpu;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(param))) {
+ pr_warn("KVM_TRACK_PAGE: error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ if (main_vm == NULL) {
+ pr_warn("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
+ return -EFAULT;
+ }
+
+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
+ pr_warn("KVM_TRACK_PAGE track_mode %d invalid, "
+ "must be in range [%d,%d]", param.track_mode,
+ 0, KVM_PAGE_TRACK_MAX);
+ return -EFAULT;
+ }
+
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+ if (!sevstep_track_single_page(vcpu,
+ param.gpa >> PAGE_SHIFT, param.track_mode)) {
+ printk("KVM_TRACK_PAGE: sevstep_track_single_page failed");
+ }
+
+ return 0;
+}
+
+int
+cachepc_kvm_batch_track_start_ioctl(void __user *arg_user)
+{
+ batch_track_config_t param;
+ int ret;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(param))) {
+ pr_warn("KVM_USPT_BATCH_TRACK_START: "
+ "error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ ret = sevstep_uspt_batch_tracking_start(param.tracking_type,
+ param.expected_events, param.perf_cpu, param.retrack);
+ if (ret != 0) {
+ pr_warn("KVM_USPT_BATCH_TRACK_START: failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+cachepc_kvm_batch_track_count_ioctl(void __user *arg_user)
+{
+ batch_track_event_count_t result;
+
+ if (!arg_user) return -EINVAL;
+
+ result.event_count = sevstep_uspt_batch_tracking_get_events_count();
+
+ if (copy_to_user(arg_user, &result, sizeof(result))) {
+ pr_warn("KVM_USPT_BATCH_TRACK_EVENT_COUNT: "
+ "error copying result to user, exiting\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user)
+{
+ batch_track_stop_and_get_t param;
+ page_fault_event_t* buf;
+ uint64_t buf_bytes;
+ void __user* inner_user_out_buf;
+ int ret;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(param))) {
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ "error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+ inner_user_out_buf = param.out_buf;
+
+ buf_bytes = sizeof(page_fault_event_t)*param.len;
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ "allocating %llu bytes for tmp buf\n", buf_bytes);
+
+ buf = vmalloc(buf_bytes);
+ if (buf == NULL) {
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ "failed to alloc tmp buf\n");
+ return -EFAULT;
+ }
+ param.out_buf = buf;
+
+ ret = sevstep_uspt_batch_tracking_stop(buf, param.len,
+ ¶m.error_during_batch);
+ if (ret != 0) {
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: failed\n");
+ vfree(buf);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(arg_user, ¶m, sizeof(param))) {
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ "error copying result to user, exiting\n");
+ vfree(buf);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(inner_user_out_buf, buf,buf_bytes)) {
+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ "error copying result to user, exiting\n");
+ vfree(buf);
+ return -EFAULT;
+ }
+
+ vfree(buf);
+
+ return 0;
+}
+
+int
+cachepc_kvm_track_all_ioctl(void __user *arg_user)
+{
+ track_all_pages_t param;
+ struct kvm_vcpu *vcpu;
+ long tracked_pages;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(param))) {
+ pr_warn("KVM_USPT_TRACK_ALL: error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ if (main_vm == NULL) {
+ pr_warn("KVM_USPT_TRACK_ALL: main_vm is not initialized, aborting!\n");
+ return -EFAULT;
+ }
+
+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
+ pr_warn("KVM_USPT_TRACK_ALL: "
+ "track_mode %d invalid, must be in range [%d,%d]\n",
+ param.track_mode, 0, KVM_PAGE_TRACK_MAX);
+ return -EFAULT;
+ }
+
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+ tracked_pages = sevstep_start_tracking(vcpu, param.track_mode);
+
+ return 0;
+}
+
+int
+cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
+{
+ track_all_pages_t param;
+ struct kvm_vcpu *vcpu;
+ long untrack_count;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(param))) {
+ printk(KERN_CRIT "KVM_USPT_UNTRACK_ALL: error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ if (main_vm == NULL) {
+ printk("KVM_USPT_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
+ return -EFAULT;
+ }
+
+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
+ printk("KVM_USPT_UNTRACK_ALL: track_mode %d invalid, "
+ "must be in range [%d,%d]", param.track_mode,
+ 0, KVM_PAGE_TRACK_MAX);
+ return -EFAULT;
+ }
+
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+ untrack_count = sevstep_stop_tracking(vcpu, param.track_mode);
+
+ return 0;
+}
+
+int
+cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user)
+{
+ read_guest_memory_t param;
+ void * buf;
+ int res;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(read_guest_memory_t))) {
+ printk(KERN_CRIT
+ "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ if (param.len > PAGE_SIZE) {
+ printk("KVM_READ_GUEST_MEMORY len may be at most page size");
+ }
+
+ buf = kmalloc(param.len, GFP_KERNEL);
+ if (buf == NULL) {
+ printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ if (param.wbinvd_cpu >= 0) {
+ wbinvd_on_cpu(param.wbinvd_cpu);
+ }
+ wbinvd_on_all_cpus();
+
+ res = read_physical(main_vm, param.gpa, buf,
+ param.len, param.decrypt_with_host_key);
+ if (res) {
+ printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
+ return -EINVAL;
+ }
+
+ if (copy_to_user(param.output_buffer, buf, param.len)) {
+ printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
+ }
+
+ return 0;
+}
+
+int
+cachepc_kvm_uspt_reset(void __user *arg_user)
+{
+ struct kvm_vcpu *vcpu;
+
+ sevstep_uspt_clear();
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
+
+ return 0;
+}
+
+int
+cachepc_kvm_register_pid(void __user *arg_user)
+{
+ userspace_ctx_t ctx;
+ struct kvm_vcpu *vcpu;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&ctx, arg_user, sizeof(userspace_ctx_t))) {
+ printk("copy from user failed\n");
+ return -EACCES;
+ }
+
+ if (main_vm == NULL) {
+ printk("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
+ return -EFAULT;
+ }
+
+ sevstep_uspt_clear();
+ sevstep_uspt_initialize(ctx.pid, ctx.get_rip);
+
+ printk("Resetting page tracking\n");
+ vcpu = xa_load(&main_vm->vcpu_array, 0);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
+
+ return 0;
+}
+
+int
+cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user)
+{
+ ack_event_t ack_event;
+
+ if (!arg_user) return -EINVAL;
+
+ if (!sevstep_uspt_is_initialiized()) {
+ printk("userspace context not initilaized, call REGISTER_PID");
+ return -EINVAL;
+ }
+ if (copy_from_user(&ack_event, arg_user, sizeof(ack_event_t))) {
+ printk("ACK_EVENT failed to copy args");
+ return -EINVAL;
+ }
+
+ return sevstep_uspt_handle_ack_event_ioctl(ack_event);
+}
+
long
-cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
+cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *arg_user;
uint32_t u32;
+ uint64_t u64;
int ret;
- arg_user = (void __user *)argp;
- switch (cmd) {
- case CACHEPC_IOCTL_TEST_ACCESS:
- printk(KERN_WARNING "CachePC: Called ioctl access test\n");
+ arg_user = (void __user *)arg;
+ switch (ioctl) {
+ case KVM_CPC_TEST_ACCESS:
if (!arg_user) return -EINVAL;
if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
return -EFAULT;
@@ -314,8 +817,7 @@ cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
return -EFAULT;
break;
- case CACHEPC_IOCTL_TEST_EVICTION:
- printk(KERN_WARNING "CachePC: Called ioctl eviction test\n");
+ case KVM_CPC_TEST_EVICTION:
if (!arg_user) return -EINVAL;
if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
return -EFAULT;
@@ -325,8 +827,7 @@ cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
return -EFAULT;
break;
- case CACHEPC_IOCTL_INIT_PMC:
- printk(KERN_WARNING "CachePC: Called ioctl init counter\n");
+ case KVM_CPC_INIT_PMC:
if (!arg_user) return -EINVAL;
if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
return -EFAULT;
@@ -334,8 +835,48 @@ cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
cachepc_kvm_init_pmc_ioctl, &u32, true);
WARN_ON(ret != 0);
break;
+ case KVM_CPC_READ_PMC:
+ if (!arg_user) return -EINVAL;
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+ u64 = cachepc_read_pmc(u32);
+ if (copy_to_user(arg_user, &u64, sizeof(uint64_t)))
+ return -EFAULT;
+ break;
+ case KVM_CPC_READ_COUNTS:
+ if (!arg_user) return -EINVAL;
+ if (copy_to_user(arg_user, cachepc_msrmts,
+ cachepc_msrmts_count * sizeof(uint16_t)))
+ return -EFAULT;
+ break;
+ case KVM_TRACK_PAGE:
+ return cachepc_kvm_track_page_ioctl(arg_user);
+ case KVM_USPT_BATCH_TRACK_START:
+ return cachepc_kvm_batch_track_start_ioctl(arg_user);
+ case KVM_USPT_BATCH_TRACK_EVENT_COUNT:
+ return cachepc_kvm_batch_track_count_ioctl(arg_user);
+ case KVM_USPT_BATCH_TRACK_STOP:
+ return cachepc_kvm_batch_track_stop_ioctl(arg_user);
+ case KVM_USPT_TRACK_ALL:
+ return cachepc_kvm_track_all_ioctl(arg_user);
+ case KVM_USPT_UNTRACK_ALL:
+ return cachepc_kvm_untrack_all_ioctl(arg_user);
+ case KVM_READ_GUEST_MEMORY:
+ return cachepc_kvm_read_guest_memory_ioctl(arg_user);
+ case KVM_USPT_RESET:
+ return cachepc_kvm_uspt_reset(arg_user);
+ case KVM_USPT_REGISTER_PID:
+ return cachepc_kvm_register_pid(arg_user);
+ case KVM_USPT_POLL_EVENT:
+ if (!sevstep_uspt_is_initialiized()) {
+ printk("userspace context not initilaized, call REGISTER_PID");
+ return -EINVAL;
+ }
+ return sevstep_uspt_handle_poll_event(arg_user);
+ case KVM_USPT_ACK_EVENT:
+ return cachepc_kvm_uscpt_ack_event_ioctl(arg_user);
default:
- return -EINVAL;
+ return kvm_arch_dev_ioctl(file, ioctl, arg);
}
return 0;
@@ -374,21 +915,11 @@ cachepc_kvm_init(void)
ret = smp_call_function_single(2, cachepc_kvm_setup_test, NULL, true);
WARN_ON(ret != 0);
-
- memset(&cachepc_proc_ops, 0, sizeof(cachepc_proc_ops));
- cachepc_proc_ops.proc_open = cachepc_kvm_proc_open;
- cachepc_proc_ops.proc_read = cachepc_kvm_proc_read;
- cachepc_proc_ops.proc_write = cachepc_kvm_proc_write;
- cachepc_proc_ops.proc_lseek = cachepc_kvm_proc_lseek;
- cachepc_proc_ops.proc_release = cachepc_kvm_proc_close;
- cachepc_proc_ops.proc_ioctl = cachepc_kvm_ioctl;
- proc_create("cachepc", 0644, NULL, &cachepc_proc_ops);
}
void
cachepc_kvm_exit(void)
{
- remove_proc_entry("cachepc", NULL);
kfree(cachepc_msrmts);
cachepc_release_ds(cachepc_ctx, cachepc_ds);
diff --git a/cachepc/kvm.h b/cachepc/kvm.h
@@ -2,5 +2,9 @@
#include "cachepc.h"
+#include <linux/fs.h>
+
+long cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp);
+
void cachepc_kvm_init(void);
void cachepc_kvm_exit(void);
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -1,27 +1,29 @@
#pragma once
+#include <linux/kvm.h>
#include <linux/types.h>
#include <linux/ioctl.h>
-#define CACHEPC_IOCTL_MAGIC 0xBF
-#define CACHEPC_IOCTL_TEST_ACCESS _IOWR(CACHEPC_IOCTL_MAGIC, 0, __u32)
-#define CACHEPC_IOCTL_TEST_EVICTION _IOWR(CACHEPC_IOCTL_MAGIC, 1, __u32)
-#define CACHEPC_IOCTL_INIT_PMC _IOW(CACHEPC_IOCTL_MAGIC, 2, __u32)
-
-#define KVM_TRACK_PAGE _IOWR(KVMIO, 0x20, track_page_param_t)
-#define KVM_USPT_REGISTER_PID _IOWR(KVMIO, 0x21, userspace_ctx_t)
-#define KVM_USPT_WAIT_AND_SEND _IO(KVMIO, 0x22)
-#define KVM_USPT_POLL_EVENT _IOWR(KVMIO, 0x23, page_fault_event_t)
-#define KVM_USPT_ACK_EVENT _IOWR(KVMIO, 0x24, ack_event_t)
-#define KVM_READ_GUEST_MEMORY _IOWR(KVMIO, 0x25, read_guest_memory_t)
-#define KVM_USPT_RESET _IO(KVMIO, 0x26)
-#define KVM_USPT_TRACK_ALL _IOWR(KVMIO, 0x27, track_all_pages_t)
-#define KVM_USPT_UNTRACK_ALL _IOWR(KVMIO, 0x28, track_all_pages_t)
-#define KVM_USPT_SETUP_RETINSTR_PERF _IOWR(KVMIO, 0x30, retired_instr_perf_config_t)
-#define KVM_USPT_READ_RETINSTR_PERF _IOWR(KVMIO, 0x31, retired_instr_perf_t)
-#define KVM_USPT_BATCH_TRACK_START _IOWR(KVMIO, 0x32, batch_track_config_t)
-#define KVM_USPT_BATCH_TRACK_STOP _IOWR(KVMIO, 0x33, batch_track_stop_and_get_t)
-#define KVM_USPT_BATCH_TRACK_EVENT_COUNT _IOWR(KVMIO, 0x34, batch_track_event_count_t)
+#define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32)
+#define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32)
+#define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x22, __u32)
+#define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x23, __u32)
+#define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x24, __u64)
+
+#define KVM_TRACK_PAGE _IOWR(KVMIO, 0x30, track_page_param_t)
+#define KVM_USPT_REGISTER_PID _IOWR(KVMIO, 0x31, userspace_ctx_t)
+#define KVM_USPT_WAIT_AND_SEND _IO(KVMIO, 0x32)
+#define KVM_USPT_POLL_EVENT _IOWR(KVMIO, 0x33, page_fault_event_t)
+#define KVM_USPT_ACK_EVENT _IOWR(KVMIO, 0x34, ack_event_t)
+#define KVM_READ_GUEST_MEMORY _IOWR(KVMIO, 0x35, read_guest_memory_t)
+#define KVM_USPT_RESET _IO(KVMIO, 0x36)
+#define KVM_USPT_TRACK_ALL _IOWR(KVMIO, 0x37, track_all_pages_t)
+#define KVM_USPT_UNTRACK_ALL _IOWR(KVMIO, 0x38, track_all_pages_t)
+#define KVM_USPT_SETUP_RETINSTR_PERF _IOWR(KVMIO, 0x39, retired_instr_perf_config_t)
+#define KVM_USPT_READ_RETINSTR_PERF _IOWR(KVMIO, 0x3A, retired_instr_perf_t)
+#define KVM_USPT_BATCH_TRACK_START _IOWR(KVMIO, 0x3B, batch_track_config_t)
+#define KVM_USPT_BATCH_TRACK_STOP _IOWR(KVMIO, 0x3C, batch_track_stop_and_get_t)
+#define KVM_USPT_BATCH_TRACK_EVENT_COUNT _IOWR(KVMIO, 0x3D, batch_track_event_count_t)
#define KVM_USPT_POLL_EVENT_NO_EVENT 1000
#define KVM_USPT_POLL_EVENT_GOT_EVENT 0
diff --git a/patch.diff b/patch.diff
@@ -181,7 +181,7 @@ index 2e09d1b6249f..9b40e71564bf 100644
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index cf0bf456d520..4dbb8041541f 100644
+index cf0bf456d520..6687fad99e97 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2,6 +2,8 @@
@@ -193,7 +193,7 @@ index cf0bf456d520..4dbb8041541f 100644
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -3788,14 +3790,28 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3788,14 +3790,33 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
@@ -202,27 +202,32 @@ index cf0bf456d520..4dbb8041541f 100644
guest_state_enter_irqoff();
if (sev_es_guest(vcpu->kvm)) {
-+ memset(cachepc_msrmts, 0, 64 * 2);
++ memset(cachepc_msrmts, 0,
++ cachepc_msrmts_count * sizeof(uint16_t));
++
+ cpu = get_cpu();
+ local_irq_disable();
+ WARN_ON(cpu != 2);
++
__svm_sev_es_vcpu_run(vmcb_pa);
++
+ cachepc_save_msrmts(cachepc_ds);
+ local_irq_enable();
+ put_cpu();
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
-+ memset(cachepc_msrmts, 0, 64 * 2);
++ memset(cachepc_msrmts, 0,
++ cachepc_msrmts_count * sizeof(uint16_t));
++
+ cpu = get_cpu();
+ local_irq_disable();
+ WARN_ON(cpu != 2);
-+ /* TODO: try closer to vcpu_run */
+
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
-@@ -3807,6 +3823,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+@@ -3807,6 +3828,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
vmsave(svm->vmcb01.pa);
vmload(__sme_page_pa(sd->save_area));
@@ -411,7 +416,7 @@ index e089fbf9017f..7899e1efe852
static int __sev_init_locked(int *error)
{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index f2a63cb2658b..0d1c1d8c72ea 100644
+index f2a63cb2658b..4c55f85fc775 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -13,6 +13,7 @@
@@ -433,275 +438,7 @@ index f2a63cb2658b..0d1c1d8c72ea 100644
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-@@ -159,6 +164,267 @@ static unsigned long long kvm_active_vms;
-
- static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
-
-+static long
-+get_user_pages_remote_unlocked(struct mm_struct *mm,
-+ unsigned long start, unsigned long nr_pages,
-+ unsigned int gup_flags, struct page **pages)
-+{
-+ struct vm_area_struct **vmas = NULL;
-+ int locked = 1;
-+ long ret;
-+
-+ down_read(&mm->mmap_lock);
-+ ret = get_user_pages_remote( mm, start, nr_pages,
-+ gup_flags, pages, vmas, &locked);
-+ if (locked) up_read(&mm->mmap_lock);
-+
-+ return ret;
-+}
-+
-+// static int
-+// get_hpa_for_gpa(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
-+// {
-+// int ec;
-+// unsigned long hva;
-+// struct page *page = NULL;
-+//
-+// ec = 0;
-+//
-+// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-+// if (kvm_is_error_hva(hva)) {
-+// pr_warn("in %s line %d get_hpa_for_gpa: translation to hva failed\n",
-+// __FILE__, __LINE__);
-+// ec = -100;
-+// goto out;
-+// }
-+// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
-+// pr_warn("in %s line %d get_hpa_for_gpa: failed to get page struct from mm",
-+// __FILE__, __LINE__);
-+// ec = -KVM_EINVAL;
-+// goto out;
-+// }
-+//
-+// (*hpa) = (page_to_pfn(page) << 12) + (gpa & 0xfff);
-+//
-+// out:
-+// put_page(page);
-+//
-+// return ec;
-+// }
-+
-+int
-+read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
-+ bool decrypt_at_host)
-+{
-+ unsigned long hva;
-+ struct page *page = NULL;
-+ void *ptr_page = NULL;
-+ uint64_t offset;
-+ int ec;
-+
-+ offset = (gpa & 0xFFF);
-+
-+ if ((offset + size - 1) > 0xFFF) {
-+ printk("read_phyiscal: trying to read "
-+ "beyond page (offset+size=%016llx)\n",
-+ offset + size);
-+ return -EINVAL;
-+ }
-+
-+ ec = 0;
-+
-+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-+
-+ // TODO: test change
-+ /*
-+ if (kvm_is_error_hva(hva)) {
-+ printk(KERN_CRIT "Luca: read_physical: translation to hva failed( gpa was "
-+ "%016llx hva is %016lx\n",
-+ gpa, hva);
-+ ec = -100;
-+ goto out;
-+ }
-+ */
-+
-+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
-+ pr_warn("read_physical: failed to get page struct from mm\n");
-+ // ec = -KVM_EINVAL;
-+ ec = -100;
-+ goto out;
-+ }
-+
-+ if (decrypt_at_host) {
-+ // map with encryption bit. Content is decrypted with host key. If sev is
-+ // disabled but sme is enable this allows to read the plaintext.
-+ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
-+ } else {
-+ // map without encryption bit to read ciphertexts
-+ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
-+ }
-+
-+ /*printk("value of buff ptr = %p\t value of ptr_page=%p\n", buff,
-+ ptr_page + offset);*/
-+ memcpy(buff, ptr_page + offset, size);
-+
-+out:
-+ if (ptr_page)
-+ vunmap(ptr_page);
-+ if (page)
-+ put_page(page);
-+
-+ return ec;
-+}
-+
-+int
-+print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
-+{
-+ u8 *buffer;
-+ int i, err;
-+
-+ buffer = kmalloc(size, GFP_ATOMIC);
-+
-+ err = read_physical(kvm, gpa, buffer, size, decrypt_at_host);
-+ if (err != 0) {
-+ pr_warn("at %s line %d: read_physical "
-+ "failed with: %d\n", __FILE__, __LINE__, err);
-+ }
-+ for (i = 0; i < size; i++) {
-+ // print bytewise with line break every 16 bytes
-+ if (i % 16 == 0) {
-+ printk("%02x ", buffer[i]);
-+ } else {
-+ printk(KERN_CONT " %02x ", buffer[i]);
-+ }
-+ }
-+ printk("\n");
-+
-+ kfree(buffer);
-+
-+ return err;
-+}
-+
-+int
-+map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
-+ void **mapping, struct page **page)
-+{
-+
-+ int ec;
-+ unsigned long hva;
-+ uint64_t offset;
-+
-+ offset = (gpa & 0xFFF);
-+
-+ ec = 0;
-+
-+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-+
-+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
-+ pr_warn("map_physical: failed to get page struct from mm");
-+ // ec = -KVM_EINVAL;
-+ ec = -100;
-+ return ec;
-+ }
-+
-+ if (decrypt_at_host) {
-+ // map with encryption bit. Content is decrypted with host key. If sev is
-+ // disabled but sme is enable this allows to read the plaintext.
-+ (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
-+ } else {
-+ // map without encryption bit to read ciphertexts
-+ (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
-+ }
-+
-+ return ec;
-+}
-+
-+void
-+unmap_physical(void **mapping, struct page **page)
-+{
-+ if (*mapping)
-+ vunmap(*mapping);
-+ if (*page)
-+ put_page(*page);
-+}
-+
-+int
-+read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
-+{
-+ uint64_t offset;
-+ offset = (gpa & 0xFFF);
-+
-+ if ((offset + size - 1) > 0xFFF) {
-+ pr_warn("read_mapped: trying to read "
-+ "beyond page (offset+size=%016llx)\n",
-+ offset + size);
-+ return -EINVAL;
-+ }
-+ memcpy(buff, mapping + offset, size);
-+
-+ return 0;
-+}
-+
-+int
-+write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
-+{
-+ uint64_t offset;
-+
-+ offset = (gpa & 0xFFF);
-+
-+ if ((offset + size - 1) > 0xFFF) {
-+ printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
-+ offset + size);
-+ return -EINVAL;
-+ }
-+ memcpy(mapping + offset, buf, size);
-+
-+ return 0;
-+}
-+
-+int
-+write_physical(struct kvm *kvm, u64 gpa, u64 size,
-+ const void *buf, bool write_plaintexts)
-+{
-+ int ec;
-+ unsigned long hva;
-+ struct page *page;
-+ void *ptr_page;
-+ uint64_t offset;
-+
-+ offset = (gpa & 0xFFF);
-+
-+ if ((offset + size - 1) > 0xFFF) {
-+ pr_warn("write_physical: trying to write "
-+ "beyond page(offset+size=%016llx)\n",
-+ offset + size);
-+ return -EINVAL;
-+ }
-+
-+ ec = 0;
-+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-+
-+ if (kvm_is_error_hva(hva))
-+ return -KVM_EINVAL;
-+
-+ if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
-+ return -KVM_EINVAL;
-+
-+ if (write_plaintexts) {
-+ // map with encrytpion bit to aplly host encryption. Usefull if sev is
-+ // disabled but sme is enabled and we want to write a certain value into a
-+ // page
-+ ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
-+ } else {
-+ // map without encryption bit to write ciphertexts
-+ ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
-+ }
-+
-+ memcpy(ptr_page + offset, buf, size);
-+
-+ vunmap(ptr_page);
-+ put_page(page);
-+ return ec;
-+}
-+
- __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
- {
-@@ -1261,6 +1527,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
+@@ -1261,6 +1266,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
hardware_disable_all();
mmdrop(mm);
module_put(kvm_chardev_ops.owner);
@@ -711,7 +448,7 @@ index f2a63cb2658b..0d1c1d8c72ea 100644
}
void kvm_get_kvm(struct kvm *kvm)
-@@ -1360,7 +1629,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots,
+@@ -1360,7 +1368,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots,
int idx = slots->node_idx;
parent = NULL;
@@ -720,7 +457,7 @@ index f2a63cb2658b..0d1c1d8c72ea 100644
struct kvm_memory_slot *tmp;
tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
-@@ -4823,6 +5092,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
+@@ -4823,6 +4831,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
fd_install(r, file);
@@ -730,334 +467,16 @@ index f2a63cb2658b..0d1c1d8c72ea 100644
return r;
put_kvm:
-@@ -4836,6 +5108,315 @@ static long kvm_dev_ioctl(struct file *filp,
- long r = -EINVAL;
-
- switch (ioctl) {
-+ case KVM_TRACK_PAGE: {
-+ track_page_param_t param;
-+ void __user* argp = (void __user *)arg;
-+ struct kvm_vcpu *vcpu;
-+
-+ if (copy_from_user(¶m, argp, sizeof(param))) {
-+ pr_warn("KVM_TRACK_PAGE: error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ if (main_vm == NULL) {
-+ pr_warn("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
-+ return -EFAULT;
-+ }
-+
-+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
-+ pr_warn("KVM_TRACK_PAGE track_mode %d invalid, "
-+ "must be in range [%d,%d]", param.track_mode,
-+ 0, KVM_PAGE_TRACK_MAX);
-+ return -EFAULT;
-+ }
-+
-+ vcpu = xa_load(&main_vm->vcpu_array, 0);
-+ if (!sevstep_track_single_page(vcpu,
-+ param.gpa >> PAGE_SHIFT, param.track_mode)) {
-+ printk("KVM_TRACK_PAGE: sevstep_track_single_page failed");
-+ }
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_BATCH_TRACK_START: {
-+ batch_track_config_t param;
-+ void __user* argp = (void __user *)arg;
-+
-+ if (copy_from_user(¶m, argp, sizeof(param))) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_START: "
-+ "error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ r = sevstep_uspt_batch_tracking_start(param.tracking_type,
-+ param.expected_events, param.perf_cpu, param.retrack);
-+ if (r != 0) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_START: failed\n");
-+ return r;
-+ }
-+ }
-+ break;
-+ case KVM_USPT_BATCH_TRACK_EVENT_COUNT: {
-+ batch_track_event_count_t result;
-+ void __user* argp = (void __user *)arg;
-+
-+ result.event_count = sevstep_uspt_batch_tracking_get_events_count();
-+
-+ if (copy_to_user(argp, &result, sizeof(result))) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_EVENT_COUNT: "
-+ "error copying result to user, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_BATCH_TRACK_STOP: {
-+ batch_track_stop_and_get_t param;
-+ page_fault_event_t* buf;
-+ uint64_t buf_bytes;
-+ void __user* argp = (void __user *)arg;
-+ void __user* inner_user_out_buf;
-+
-+ if (copy_from_user(¶m, argp, sizeof(param))) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
-+ "error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+ inner_user_out_buf = param.out_buf;
-+
-+ buf_bytes = sizeof(page_fault_event_t)*param.len;
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
-+ "allocating %llu bytes for tmp buf\n", buf_bytes);
-+
-+ buf = vmalloc(buf_bytes);
-+ if (buf == NULL) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
-+ "failed to alloc tmp buf\n");
-+ return -EFAULT;
-+ }
-+ param.out_buf = buf;
-+
-+ r = sevstep_uspt_batch_tracking_stop(buf, param.len,
-+ ¶m.error_during_batch);
-+ if (r != 0) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: failed\n");
-+ vfree(buf);
-+ return -EFAULT;
-+ }
-+
-+ if (copy_to_user(argp, ¶m, sizeof(param))) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
-+ "error copying result to user, exiting\n");
-+ vfree(buf);
-+ return -EFAULT;
-+ }
-+
-+ if (copy_to_user(inner_user_out_buf, buf,buf_bytes)) {
-+ pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
-+ "error copying result to user, exiting\n");
-+ vfree(buf);
-+ return -EFAULT;
-+ }
-+
-+ vfree(buf);
-+ }
-+ break;
-+ case KVM_USPT_TRACK_ALL: {
-+ track_all_pages_t param;
-+ void __user* argp = (void __user *)arg;
-+ struct kvm_vcpu *vcpu;
-+ long tracked_pages;
-+
-+ if (copy_from_user(¶m, argp, sizeof(param))) {
-+ pr_warn("KVM_USPT_TRACK_ALL: error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ if (main_vm == NULL) {
-+ pr_warn("KVM_USPT_TRACK_ALL: main_vm is not initialized, aborting!\n");
-+ return -EFAULT;
-+ }
-+
-+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
-+ pr_warn("KVM_USPT_TRACK_ALL: "
-+ "track_mode %d invalid, must be in range [%d,%d]\n",
-+ param.track_mode, 0, KVM_PAGE_TRACK_MAX);
-+ return -EFAULT;
-+ }
-+
-+ vcpu = xa_load(&main_vm->vcpu_array, 0);
-+ tracked_pages = sevstep_start_tracking(vcpu, param.track_mode);
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_UNTRACK_ALL: {
-+ track_all_pages_t param;
-+ void __user* argp = (void __user *)arg;
-+ struct kvm_vcpu *vcpu;
-+ long untrack_count;
-+
-+ if (copy_from_user(¶m, argp, sizeof(param))) {
-+ printk(KERN_CRIT
-+ "KVM_USPT_UNTRACK_ALL: error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ if (main_vm == NULL) {
-+ printk("KVM_USPT_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
-+ return -EFAULT;
-+ }
-+
-+ if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
-+ printk("KVM_USPT_UNTRACK_ALL: track_mode %d invalid, must be in range [%d,%d]",param.track_mode,0,KVM_PAGE_TRACK_MAX);
-+ return -EFAULT;
-+ }
-+
-+ //printk("KVM_USPT_UNTRACK_ALL: with mode %d\n",param.track_mode);
-+ vcpu = xa_load(&main_vm->vcpu_array, 0);
-+ untrack_count = sevstep_stop_tracking(vcpu, param.track_mode);
-+ //printk("KVM_USPT_UNTRACK_ALL: untracked %ld pages\n",untrack_count);
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_SETUP_RETINSTR_PERF: {
-+ retired_instr_perf_config_t config;
-+ void __user* argp = (void __user *)arg;
-+
-+ printk("Received KVM_USPT_SETUP_RETINSTR_PERF ioctl!\n");
-+ if (copy_from_user(&config, argp, sizeof(config))) {
-+ printk("copy from user failed\n");
-+ return -EACCES;
-+ }
-+
-+ cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER);
-+
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_READ_RETINSTR_PERF: {
-+ retired_instr_perf_t request;
-+ void __user* argp = (void __user *)arg;
-+
-+ if (copy_from_user(&request, argp, sizeof(request))) {
-+ printk("KVM_USPT_READ_RETINSTR_PERF: copy from user failed\n");
-+ return -EACCES;
-+ }
-+
-+ request.retired_instruction_count = cachepc_read_pmc(0);
-+ if (copy_to_user(argp, &request, sizeof(request))) {
-+ printk("KVM_USPT_READ_RETINSTR_PERF : copy to user failed\n");
-+ }
-+ r = 0;
-+ }
-+ break;
-+ case KVM_READ_GUEST_MEMORY: {
-+ read_guest_memory_t param;
-+ int res;
-+ void * buf;
-+ void __user* argp = (void __user *)arg;
-+
-+ if (copy_from_user(¶m, argp, sizeof(read_guest_memory_t))) {
-+ printk(KERN_CRIT
-+ "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
-+ return -EFAULT;
-+ }
-+
-+ if (param.len > PAGE_SIZE) {
-+ printk("KVM_READ_GUEST_MEMORY len may be at most page size");
-+ }
-+
-+ buf = kmalloc(param.len, GFP_KERNEL);
-+ if (buf == NULL) {
-+ printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
-+ return -ENOMEM;
-+ }
-+
-+ if (param.wbinvd_cpu >= 0) {
-+ wbinvd_on_cpu(param.wbinvd_cpu);
-+ }
-+ wbinvd_on_all_cpus();
-+
-+ res = read_physical(main_vm, param.gpa, buf,
-+ param.len, param.decrypt_with_host_key);
-+ if (res) {
-+ printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
-+ return -EINVAL;
-+ }
-+
-+ if (copy_to_user(param.output_buffer, buf, param.len)) {
-+ printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
-+ }
-+
-+ return 0;
-+ }
-+ break;
-+ case KVM_USPT_RESET: {
-+ struct kvm_vcpu *vcpu;
-+
-+ printk("Received KVM_USPT_RESET ioctl!\n");
-+
-+ sevstep_uspt_clear();
-+ vcpu = xa_load(&main_vm->vcpu_array, 0);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
-+ r = 0;
-+ }
-+ break;
-+ case KVM_USPT_REGISTER_PID: {
-+ userspace_ctx_t ctx;
-+ void __user* argp = (void __user *)arg;
-+ struct kvm_vcpu *vcpu;
-+
-+ printk("Received REGISTER_PID ioctl!\n");
-+ if (copy_from_user(&ctx, argp, sizeof(userspace_ctx_t))) {
-+ printk("copy from user failed\n");
-+ return -EACCES;
-+ }
-+
-+ if (main_vm == NULL) {
-+ printk("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
-+ return -EFAULT;
-+ }
-+
-+ sevstep_uspt_clear();
-+ sevstep_uspt_initialize(ctx.pid, ctx.get_rip);
-+
-+ printk("Resetting page tracking\n");
-+ vcpu = xa_load(&main_vm->vcpu_array, 0);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
-+ sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
-+
-+ return 0;
-+ }
-+ break;
-+ case KVM_USPT_POLL_EVENT: {
-+ void __user* argp = (void __user *)arg;
-+ if (!sevstep_uspt_is_initialiized()) {
-+ printk("userspace context not initilaized, call REGISTER_PID");
-+ return -EINVAL;
-+ }
-+ return sevstep_uspt_handle_poll_event(argp);
-+ }
-+ break;
-+ case KVM_USPT_ACK_EVENT: {
-+ ack_event_t ack_event;
-+ void __user* argp = (void __user *)arg;
-+
-+ if (!sevstep_uspt_is_initialiized()) {
-+ printk("userspace context not initilaized, call REGISTER_PID");
-+ return -EINVAL;
-+ }
-+ if (copy_from_user(&ack_event, argp, sizeof(ack_event_t))) {
-+ printk("ACK_EVENT failed to copy args");
-+ return -EINVAL;
-+ }
-+
-+ return sevstep_uspt_handle_ack_event_ioctl(ack_event);
-+ }
-+ break;
- case KVM_GET_API_VERSION:
- if (arg)
- goto out;
-@@ -4864,7 +5445,9 @@ static long kvm_dev_ioctl(struct file *filp,
+@@ -4864,7 +4875,7 @@ static long kvm_dev_ioctl(struct file *filp,
r = -EOPNOTSUPP;
break;
default:
- return kvm_arch_dev_ioctl(filp, ioctl, arg);
-+ //r = cachepc_kvm_ioctl(filp, ioctl, arg);
-+ //if (r == -EINVAL)
-+ return kvm_arch_dev_ioctl(filp, ioctl, arg);
++ return cachepc_kvm_ioctl(filp, ioctl, arg);
}
out:
return r;
-@@ -5792,6 +6375,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -5792,6 +5803,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_vfio_ops_init();
WARN_ON(r);
@@ -1066,7 +485,7 @@ index f2a63cb2658b..0d1c1d8c72ea 100644
return 0;
out_unreg:
-@@ -5821,6 +6406,8 @@ void kvm_exit(void)
+@@ -5821,6 +5834,8 @@ void kvm_exit(void)
{
int cpu;
diff --git a/test/.gitignore b/test/.gitignore
@@ -4,3 +4,4 @@ kvm
sev
sev-es
sev-snp
+sevstep
diff --git a/test/access.c b/test/access.c
@@ -16,15 +16,15 @@ main(int argc, const char **argv)
int fd, ret;
size_t i;
- fd = open("/proc/cachepc", O_RDONLY);
+ fd = open("/dev/kvm", O_RDONLY);
if (fd < 0) err(1, "open");
- for (i = 0; i < 50; i++) {
+ for (i = 0; i < 100; i++) {
arg = 48; /* target set */
- ret = ioctl(fd, CACHEPC_IOCTL_TEST_ACCESS, &arg);
- if (ret == -1) err(1, "ioctl fail");
- printf("%i\n", arg);
+ ret = ioctl(fd, KVM_CPC_TEST_ACCESS, &arg);
+ if (ret == -1) err(1, "ioctl TEST_ACCESS");
+ if (arg != 1) errx(1, "access result (%i) != 1", arg);
}
-
+
close(fd);
}
diff --git a/test/eviction.c b/test/eviction.c
@@ -14,28 +14,26 @@ main(int argc, const char **argv)
{
uint16_t counts[64];
uint32_t arg;
- size_t i, len;
- int fd, ret;
+ int i, fd, ret;
- fd = open("/proc/cachepc", O_RDONLY);
+ fd = open("/dev/kvm", O_RDONLY);
if (fd < 0) err(1, "open");
arg = 48;
if (argc == 2) arg = atoi(argv[1]);
- ret = ioctl(fd, CACHEPC_IOCTL_TEST_EVICTION, &arg);
- if (ret == -1) err(1, "ioctl");
+ ret = ioctl(fd, KVM_CPC_TEST_EVICTION, &arg);
+ if (ret == -1) err(1, "ioctl TEST_EVICTION");
- len = read(fd, counts, sizeof(counts));
- if (len != sizeof(counts))
- errx(1, "invalid count read");
+ ret = ioctl(fd, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
for (i = 0; i < 64; i++) {
if (i % 16 == 0 && i)
printf("\n");
if (counts[i] > 0)
printf("\x1b[91m");
- printf("%2lu ", i);
+ printf("%2i ", i);
if (counts[i] > 0)
printf("\x1b[0m");
}
diff --git a/test/kvm.c b/test/kvm.c
@@ -49,7 +49,7 @@ extern uint8_t __stop_guest_without[];
static struct kvm kvm;
static struct kvm_run *kvm_run;
-static int cachepc_fd;
+static int kvm_fd;
#define TARGET_CACHE_LINESIZE 64
#define TARGET_SET 15
@@ -189,14 +189,15 @@ kvm_init(size_t ramsize, void *code_start, void *code_stop)
}
uint16_t *
-read_counts()
+read_counts()
{
- uint16_t *counts = (uint16_t *)malloc(64*sizeof(uint16_t));
- size_t len;
+ uint16_t *counts;
+ int ret;
- lseek(cachepc_fd, 0, SEEK_SET);
- len = read(cachepc_fd, counts, 64 * sizeof(uint16_t));
- assert(len == 64 * sizeof(uint16_t));
+ counts = malloc(64 * sizeof(uint16_t));
+ if (!counts) err(1, "malloc");
+ ret = ioctl(kvm_fd, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
return counts;
}
@@ -272,16 +273,16 @@ main(int argc, const char **argv)
pin_process(0, TARGET_CORE, true);
- cachepc_fd = open("/proc/cachepc", O_RDONLY);
- if (cachepc_fd < 0) err(1, "open");
+ kvm_fd = open("/dev/kvm", O_RDONLY);
+ if (kvm_fd < 0) err(1, "open");
- /* init L1 miss counter */
+ /* init L1 miss counter for host kernel */
arg = 0x002264D8;
- ret = ioctl(cachepc_fd, CACHEPC_IOCTL_INIT_PMC, &arg);
- if (ret == -1) err(1, "ioctl fail");
+ ret = ioctl(kvm_fd, KVM_CPC_INIT_PMC, &arg);
+ if (ret == -1) err(1, "ioctl INIT_PMC");
baseline = calloc(sizeof(uint16_t), 64);
- if (!baseline) err(1, "counts");
+ if (!baseline) err(1, "calloc");
for (k = 0; k < 64; k++)
baseline[k] = UINT16_MAX;
@@ -319,6 +320,6 @@ main(int argc, const char **argv)
}
free(baseline);
- close(cachepc_fd);
+ close(kvm_fd);
}
diff --git a/test/sev-es.c b/test/sev-es.c
@@ -58,7 +58,7 @@ ssize_t sysret;
pid_t victim_pid;
/* ioctl dev fds */
-int kvm_dev, sev_dev, cachepc_dev;
+int kvm_dev, sev_dev, kvm_dev;
enum {
GSTATE_UNINIT,
@@ -400,15 +400,15 @@ sev_kvm_deinit(struct kvm *kvm)
}
uint16_t *
-read_counts()
+read_counts()
{
uint16_t *counts;
- size_t len;
+ int ret;
counts = malloc(64 * sizeof(uint16_t));
- lseek(cachepc_dev, 0, SEEK_SET);
- len = read(cachepc_dev, counts, 64 * sizeof(uint16_t));
- assert(len == 64 * sizeof(uint16_t));
+ if (!counts) err(1, "malloc");
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
return counts;
}
@@ -470,9 +470,6 @@ main(int argc, const char **argv)
pin_process(0, TARGET_CORE, true);
- cachepc_dev = open("/proc/cachepc", O_RDONLY);
- if (cachepc_dev < 0) err(1, "open /proc/cachepc");
-
sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
if (sev_dev < 0) err(1, "open /dev/sev");
@@ -484,13 +481,13 @@ main(int argc, const char **argv)
if (ret < 0) err(1, "KVM_GET_API_VERSION");
if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
- // Init L1 miss counter
+ /* init L1 miss counter for host kernel */
arg = 0x002264D8;
- ret = ioctl(cachepc_dev, CACHEPC_IOCTL_INIT_PMC, &arg);
- if (ret < 0) err(1, "ioctl fail");
+ ret = ioctl(kvm_dev, KVM_CPC_INIT_PMC, &arg);
+ if (ret < 0) err(1, "ioctl INIT_PMC");
baseline = calloc(sizeof(uint16_t), 64);
- if (!baseline) err(1, "counts");
+ if (!baseline) err(1, "calloc");
for (k = 0; k < 64; k++)
baseline[k] = UINT16_MAX;
@@ -541,7 +538,6 @@ main(int argc, const char **argv)
free(baseline);
- close(cachepc_dev);
close(kvm_dev);
close(sev_dev);
}
diff --git a/test/sev.c b/test/sev.c
@@ -58,7 +58,7 @@ ssize_t sysret;
pid_t victim_pid;
/* ioctl dev fds */
-int kvm_dev, sev_dev, cachepc_dev;
+int kvm_dev, sev_dev, kvm_dev;
enum {
GSTATE_UNINIT,
@@ -399,15 +399,15 @@ sev_kvm_deinit(struct kvm *kvm)
}
uint16_t *
-read_counts()
+read_counts()
{
uint16_t *counts;
- size_t len;
+ int ret;
counts = malloc(64 * sizeof(uint16_t));
- lseek(cachepc_dev, 0, SEEK_SET);
- len = read(cachepc_dev, counts, 64 * sizeof(uint16_t));
- assert(len == 64 * sizeof(uint16_t));
+ if (!counts) err(1, "malloc");
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
return counts;
}
@@ -478,9 +478,6 @@ main(int argc, const char **argv)
pin_process(0, TARGET_CORE, true);
- cachepc_dev = open("/proc/cachepc", O_RDONLY);
- if (cachepc_dev < 0) err(1, "open /proc/cachepc");
-
sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
if (sev_dev < 0) err(1, "open /dev/sev");
@@ -492,13 +489,13 @@ main(int argc, const char **argv)
if (ret < 0) err(1, "KVM_GET_API_VERSION");
if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
- // Init L1 miss counter
+ /* init L1 miss counter for host kernel */
arg = 0x002264D8;
- ret = ioctl(cachepc_dev, CACHEPC_IOCTL_INIT_PMC, &arg);
- if (ret < 0) err(1, "ioctl fail");
+ ret = ioctl(kvm_dev, KVM_CPC_INIT_PMC, &arg);
+ if (ret < 0) err(1, "ioctl INIT_PMC");
baseline = calloc(sizeof(uint16_t), 64);
- if (!baseline) err(1, "counts");
+ if (!baseline) err(1, "calloc");
for (k = 0; k < 64; k++)
baseline[k] = UINT16_MAX;
@@ -537,7 +534,6 @@ main(int argc, const char **argv)
free(baseline);
- close(cachepc_dev);
close(kvm_dev);
close(sev_dev);
}
diff --git a/test/sevstep b/test/sevstep
Binary files differ.
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -15,7 +15,7 @@ main(int argc, const char **argv)
track_all_pages_t tracking;
int ret, fd;
- fd = open("/proc/cachepc", O_RDONLY);
+ fd = open("/dev/kvm", O_RDONLY);
if (!fd) err(1, "open");
tracking.track_mode = KVM_PAGE_TRACK_ACCESS;