commit 0def0e2d6eeca32304a31072469b94f40c2ce2b9
parent 572985d705e4575d53ec5d312a484a96a01bce9f
Author: Louis Burda <quent.burda@gmail.com>
Date: Thu, 6 Oct 2022 14:07:45 +0200
Renaming, refactoring and pruning
Diffstat:
5 files changed, 905 insertions(+), 368 deletions(-)
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -25,7 +25,49 @@ uint64_t cachepc_regs_vm[16];
EXPORT_SYMBOL(cachepc_regs_tmp);
EXPORT_SYMBOL(cachepc_regs_vm);
-static long
+static long get_user_pages_remote_unlocked(struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages);
+
+static int read_physical(struct kvm *kvm, u64 gpa,
+ void *buf, u64 size, bool decrypt_at_host);
+// static int write_physical(struct kvm *kvm, u64 gpa, u64 size,
+// const void *buf, bool write_plaintexts);
+// static int print_physical(struct kvm *kvm, u64 gpa,
+// u64 size, bool decrypt_at_host);
+// static int map_physical(struct kvm *kvm, u64 gpa,
+// bool decrypt_at_host, void **mapping, struct page **page);
+// static void unmap_physical(void **mapping, struct page **page);
+
+// static int read_mapped(u64 gpa, void *buff, u64 size, void *mapping);
+// static int write_mapped(u64 gpa, u64 size, const void *buf, void *mapping);
+
+static void cachepc_kvm_prime_probe_test(void *p);
+static void cachepc_kvm_stream_hwpf_test(void *p);
+static void cachepc_kvm_single_access_test(void *p);
+static void cachepc_kvm_single_eviction_test(void *p);
+
+static void cachepc_kvm_system_setup(void);
+
+static int cachepc_kvm_test_access_ioctl(void __user *arg_user);
+static int cachepc_kvm_test_eviction_ioctl(void __user *arg_user);
+static int cachepc_kvm_init_pmc_ioctl(void __user *arg_user);
+static int cachepc_kvm_read_pmc_ioctl(void __user *arg_user);
+static int cachepc_kvm_read_counts_ioctl(void __user *arg_user);
+static int cachepc_kvm_setup_pmc_ioctl(void __user *arg_user);
+static int cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user);
+
+static int cachepc_kvm_track_page_ioctl(void __user *arg_user);
+static int cachepc_kvm_batch_track_start_ioctl(void __user *arg_user);
+static int cachepc_kvm_batch_track_count_ioctl(void __user *arg_user);
+static int cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user);
+static int cachepc_kvm_track_all_ioctl(void __user *arg_user);
+static int cachepc_kvm_untrack_all_ioctl(void __user *arg_user);
+static int cachepc_kvm_uspt_reset_ioctl(void __user *arg_user);
+static int cachepc_kvm_poll_event_ioctl(void __user *arg_user);
+static int cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user);
+
+long
get_user_pages_remote_unlocked(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages)
@@ -74,12 +116,12 @@ get_user_pages_remote_unlocked(struct mm_struct *mm,
// }
int
-read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
+read_physical(struct kvm *kvm, u64 gpa, void *buf, u64 size,
bool decrypt_at_host)
{
- unsigned long hva;
struct page *page = NULL;
void *ptr_page = NULL;
+ unsigned long hva;
uint64_t offset;
int ec;
@@ -96,20 +138,15 @@ read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
- // TODO: test change
- /*
if (kvm_is_error_hva(hva)) {
- printk(KERN_CRIT "Luca: read_physical: translation to hva failed( gpa was "
- "%016llx hva is %016lx\n",
- gpa, hva);
+ pr_warn("read_physical: translation to hva failed( gpa was "
+ "%016llx hva is %016lx\n", gpa, hva);
ec = -100;
goto out;
}
- */
if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, &page) != 1) {
pr_warn("read_physical: failed to get page struct from mm\n");
- // ec = -KVM_EINVAL;
ec = -100;
goto out;
}
@@ -123,9 +160,9 @@ read_physical(struct kvm *kvm, u64 gpa, void *buff, u64 size,
ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
}
- /*printk("value of buff ptr = %p\t value of ptr_page=%p\n", buff,
+ /*printk("value of buf ptr = %p\t value of ptr_page=%p\n", buf,
ptr_page + offset);*/
- memcpy(buff, ptr_page + offset, size);
+ memcpy(buf, ptr_page + offset, size);
out:
if (ptr_page)
@@ -136,156 +173,154 @@ out:
return ec;
}
-int
-print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
-{
- u8 *buffer;
- int i, err;
-
- buffer = kmalloc(size, GFP_ATOMIC);
-
- err = read_physical(kvm, gpa, buffer, size, decrypt_at_host);
- if (err != 0) {
- pr_warn("at %s line %d: read_physical "
- "failed with: %d\n", __FILE__, __LINE__, err);
- }
- for (i = 0; i < size; i++) {
- // print bytewise with line break every 16 bytes
- if (i % 16 == 0) {
- printk("%02x ", buffer[i]);
- } else {
- printk(KERN_CONT " %02x ", buffer[i]);
- }
- }
- printk("\n");
-
- kfree(buffer);
-
- return err;
-}
-
-int
-map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
- void **mapping, struct page **page)
-{
-
- int ec;
- unsigned long hva;
- uint64_t offset;
-
- offset = (gpa & 0xFFF);
-
- ec = 0;
-
- hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-
- if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
- pr_warn("map_physical: failed to get page struct from mm");
- // ec = -KVM_EINVAL;
- ec = -100;
- return ec;
- }
-
- if (decrypt_at_host) {
- // map with encryption bit. Content is decrypted with host key. If sev is
- // disabled but sme is enable this allows to read the plaintext.
- (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
- } else {
- // map without encryption bit to read ciphertexts
- (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
- }
-
- return ec;
-}
-
-void
-unmap_physical(void **mapping, struct page **page)
-{
- if (*mapping)
- vunmap(*mapping);
- if (*page)
- put_page(*page);
-}
-
-int
-read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
-{
- uint64_t offset;
- offset = (gpa & 0xFFF);
-
- if ((offset + size - 1) > 0xFFF) {
- pr_warn("read_mapped: trying to read "
- "beyond page (offset+size=%016llx)\n",
- offset + size);
- return -EINVAL;
- }
- memcpy(buff, mapping + offset, size);
-
- return 0;
-}
-
-int
-write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
-{
- uint64_t offset;
-
- offset = (gpa & 0xFFF);
-
- if ((offset + size - 1) > 0xFFF) {
- printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
- offset + size);
- return -EINVAL;
- }
- memcpy(mapping + offset, buf, size);
-
- return 0;
-}
-
-int
-write_physical(struct kvm *kvm, u64 gpa, u64 size,
- const void *buf, bool write_plaintexts)
-{
- int ec;
- unsigned long hva;
- struct page *page;
- void *ptr_page;
- uint64_t offset;
-
- offset = (gpa & 0xFFF);
-
- if ((offset + size - 1) > 0xFFF) {
- pr_warn("write_physical: trying to write "
- "beyond page(offset+size=%016llx)\n",
- offset + size);
- return -EINVAL;
- }
-
- ec = 0;
- hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-
- if (kvm_is_error_hva(hva))
- return -KVM_EINVAL;
+// int
+// write_physical(struct kvm *kvm, u64 gpa, u64 size,
+// const void *buf, bool write_plaintexts)
+// {
+// int ec;
+// unsigned long hva;
+// struct page *page;
+// void *ptr_page;
+// uint64_t offset;
+//
+// offset = (gpa & 0xFFF);
+//
+// if ((offset + size - 1) > 0xFFF) {
+// pr_warn("write_physical: trying to write "
+// "beyond page(offset+size=%016llx)\n",
+// offset + size);
+// return -EINVAL;
+// }
+//
+// ec = 0;
+// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+//
+// if (kvm_is_error_hva(hva))
+// return -KVM_EINVAL;
+//
+// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
+// return -KVM_EINVAL;
+//
+// if (write_plaintexts) {
+// // map with encrytpion bit to aplly host encryption. Usefull if sev is
+// // disabled but sme is enabled and we want to write a certain value into a
+// // page
+// ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
+// } else {
+// // map without encryption bit to write ciphertexts
+// ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
+// }
+//
+// memcpy(ptr_page + offset, buf, size);
+//
+// vunmap(ptr_page);
+// put_page(page);
+// return ec;
+// }
- if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, FOLL_WRITE, &page) != 1)
- return -KVM_EINVAL;
+// int
+// print_physical(struct kvm *kvm, u64 gpa, u64 size, bool decrypt_at_host)
+// {
+// u8 *buf;
+// int i, err;
+//
+// buf = kmalloc(size, GFP_ATOMIC);
+//
+// err = read_physical(kvm, gpa, buf, size, decrypt_at_host);
+// if (err != 0) {
+// pr_warn("at %s line %d: read_physical "
+// "failed with: %d\n", __FILE__, __LINE__, err);
+// }
+// for (i = 0; i < size; i++) {
+// // print bytewise with line break every 16 bytes
+// if (i % 16 == 0) {
+// printk("%02x ", buf[i]);
+// } else {
+// printk(KERN_CONT " %02x ", buf[i]);
+// }
+// }
+// printk("\n");
+//
+// kfree(buf);
+//
+// return err;
+// }
- if (write_plaintexts) {
- // map with encrytpion bit to aplly host encryption. Usefull if sev is
- // disabled but sme is enabled and we want to write a certain value into a
- // page
- ptr_page = vmap(&page, 1, 0, PAGE_KERNEL_NOCACHE);
- } else {
- // map without encryption bit to write ciphertexts
- ptr_page = vmap(&page, 1, 0, __pgprot(__PAGE_KERNEL_NOCACHE));
- }
+// int
+// map_physical(struct kvm *kvm, u64 gpa, bool decrypt_at_host,
+// void **mapping, struct page **page)
+// {
+//
+// unsigned long hva;
+// uint64_t offset;
+// int err;
+//
+// offset = (gpa & 0xFFF);
+//
+// err = 0;
+//
+// hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+//
+// if (get_user_pages_remote_unlocked(kvm->mm, hva, 1, 0, page) != 1) {
+// pr_warn("map_physical: failed to get page struct from mm");
+// err = -100;
+// return err;
+// }
+//
+// if (decrypt_at_host) {
+// // map with encryption bit. Content is decrypted with host key. If sev is
+// // disabled but sme is enable this allows to read the plaintext.
+// (*mapping) = vmap(page, 1, 0, PAGE_KERNEL);
+// } else {
+// // map without encryption bit to read ciphertexts
+// (*mapping) = vmap(page, 1, 0, __pgprot(__PAGE_KERNEL));
+// }
+//
+// return err;
+// }
- memcpy(ptr_page + offset, buf, size);
+// void
+// unmap_physical(void **mapping, struct page **page)
+// {
+// if (*mapping)
+// vunmap(*mapping);
+// if (*page)
+// put_page(*page);
+// }
- vunmap(ptr_page);
- put_page(page);
- return ec;
-}
+// int
+// read_mapped(u64 gpa, void *buff, u64 size, void *mapping)
+// {
+// uint64_t offset;
+// offset = (gpa & 0xFFF);
+//
+// if ((offset + size - 1) > 0xFFF) {
+// pr_warn("read_mapped: trying to read "
+// "beyond page (offset+size=%016llx)\n",
+// offset + size);
+// return -EINVAL;
+// }
+// memcpy(buff, mapping + offset, size);
+//
+// return 0;
+// }
+// int
+// write_mapped(u64 gpa, u64 size, const void *buf, void *mapping)
+// {
+// uint64_t offset;
+//
+// offset = (gpa & 0xFFF);
+//
+// if ((offset + size - 1) > 0xFFF) {
+// printk("write_physical: trying to write beyond page(offset+size=%016llx)\n",
+// offset + size);
+// return -EINVAL;
+// }
+// memcpy(mapping + offset, buf, size);
+//
+// return 0;
+// }
void
cachepc_kvm_prime_probe_test(void *p)
@@ -353,10 +388,10 @@ cachepc_kvm_stream_hwpf_test(void *p)
count = 0;
cachepc_prime(cachepc_ds);
- count -= cachepc_read_pmc(0);
+ count -= cachepc_read_pmc(CPC_L1MISS_PMC);
for (i = 0; i < max; i++)
asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx");
- count += cachepc_read_pmc(0);
+ count += cachepc_read_pmc(CPC_L1MISS_PMC);
printk(KERN_WARNING "CachePC: HWPF test done (%u vs. %u => %s)\n",
count, max, (count == max) ? "passed" : "failed");
@@ -384,9 +419,9 @@ cachepc_kvm_single_access_test(void *p)
cachepc_prime(cachepc_ds);
- pre = cachepc_read_pmc(0);
+ pre = cachepc_read_pmc(CPC_L1MISS_PMC);
cachepc_victim(ptr);
- post = cachepc_read_pmc(0);
+ post = cachepc_read_pmc(CPC_L1MISS_PMC);
printk(KERN_WARNING "CachePC: Single access test done (%llu vs %u => %s)",
post - pre, 1, (post - pre == 1) ? "passed" : "failed");
@@ -471,17 +506,62 @@ cachepc_kvm_system_setup(void)
printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
}
-void
-cachepc_kvm_init_pmc_ioctl(void *p)
+int
+cachepc_kvm_test_access_ioctl(void __user *arg_user)
+{
+ uint32_t u32;
+ int ret;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+
+ ret = smp_call_function_single(2,
+ cachepc_kvm_single_access_test, &u32, true);
+ WARN_ON(ret != 0);
+
+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+cachepc_kvm_test_eviction_ioctl(void __user *arg_user)
+{
+ uint32_t u32;
+ int ret;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+
+ ret = smp_call_function_single(2,
+ cachepc_kvm_single_eviction_test, &u32, true);
+ WARN_ON(ret != 0);
+
+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+cachepc_kvm_init_pmc_ioctl(void __user *arg_user)
{
uint8_t index, event_no, event_mask;
uint8_t host_guest, kernel_user;
uint32_t event;
- WARN_ON(p == NULL);
- if (!p) return;
+ if (!arg_user) return -EINVAL;
+
+ if (smp_processor_id() != CPC_ISOLCPU)
+ return -EFAULT;
- event = *(uint32_t *)p;
+ if (copy_from_user(&event, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
index = (event & 0xFF000000) >> 24;
host_guest = (event & 0x00300000) >> 20;
@@ -491,6 +571,99 @@ cachepc_kvm_init_pmc_ioctl(void *p)
cachepc_init_pmc(index, event_no, event_mask,
host_guest, kernel_user);
+
+ return 0;
+}
+
+int
+cachepc_kvm_read_pmc_ioctl(void __user *arg_user)
+{
+ uint64_t count;
+ uint32_t event;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(&event, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+
+ count = cachepc_read_pmc(event);
+ if (copy_to_user(arg_user, &count, sizeof(uint64_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+cachepc_kvm_read_counts_ioctl(void __user *arg_user)
+{
+ if (!arg_user) return -EINVAL;
+
+ if (copy_to_user(arg_user, cachepc_msrmts,
+ cachepc_msrmts_count * sizeof(uint16_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+cachepc_kvm_setup_pmc_ioctl(void __user *arg_user)
+{
+ if (smp_processor_id() != CPC_ISOLCPU)
+ return -EFAULT;
+
+ /* L1 Misses in Host Kernel */
+ cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8,
+ PMC_HOST, PMC_KERNEL);
+
+ /* Retired Instructions in Guest */
+ cachepc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00,
+ PMC_GUEST, PMC_KERNEL | PMC_USER);
+
+ return 0;
+}
+
+int
+cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user)
+{
+ read_guest_memory_t param;
+ void * buf;
+ int res;
+
+ if (!arg_user) return -EINVAL;
+
+ if (copy_from_user(¶m, arg_user, sizeof(read_guest_memory_t))) {
+ printk(KERN_CRIT
+ "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
+ return -EFAULT;
+ }
+
+ if (param.len > PAGE_SIZE) {
+ printk("KVM_READ_GUEST_MEMORY len may be at most page size");
+ }
+
+ buf = kmalloc(param.len, GFP_KERNEL);
+ if (buf == NULL) {
+ printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ if (param.wbinvd_cpu >= 0) {
+ wbinvd_on_cpu(param.wbinvd_cpu);
+ }
+ wbinvd_on_all_cpus();
+
+ res = read_physical(main_vm, param.gpa, buf,
+ param.len, param.decrypt_with_host_key);
+ if (res) {
+ printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
+ return -EINVAL;
+ }
+
+ if (copy_to_user(param.output_buffer, buf, param.len)) {
+ printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
+ }
+
+ return 0;
}
int
@@ -536,7 +709,7 @@ cachepc_kvm_batch_track_start_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_USPT_BATCH_TRACK_START: "
+ pr_warn("KVM_CPC_BATCH_TRACK_START: "
"error copying arguments, exiting\n");
return -EFAULT;
}
@@ -544,7 +717,7 @@ cachepc_kvm_batch_track_start_ioctl(void __user *arg_user)
ret = sevstep_uspt_batch_tracking_start(param.tracking_type,
param.expected_events, param.perf_cpu, param.retrack);
if (ret != 0) {
- pr_warn("KVM_USPT_BATCH_TRACK_START: failed\n");
+ pr_warn("KVM_CPC_BATCH_TRACK_START: failed\n");
return ret;
}
@@ -561,7 +734,7 @@ cachepc_kvm_batch_track_count_ioctl(void __user *arg_user)
result.event_count = sevstep_uspt_batch_tracking_get_events_count();
if (copy_to_user(arg_user, &result, sizeof(result))) {
- pr_warn("KVM_USPT_BATCH_TRACK_EVENT_COUNT: "
+ pr_warn("KVM_CPC_BATCH_TRACK_EVENT_COUNT: "
"error copying result to user, exiting\n");
return -EFAULT;
}
@@ -581,19 +754,19 @@ cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: "
"error copying arguments, exiting\n");
return -EFAULT;
}
inner_user_out_buf = param.out_buf;
buf_bytes = sizeof(page_fault_event_t)*param.len;
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: "
"allocating %llu bytes for tmp buf\n", buf_bytes);
buf = vmalloc(buf_bytes);
if (buf == NULL) {
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: "
"failed to alloc tmp buf\n");
return -EFAULT;
}
@@ -602,20 +775,20 @@ cachepc_kvm_batch_track_stop_ioctl(void __user *arg_user)
ret = sevstep_uspt_batch_tracking_stop(buf, param.len,
¶m.error_during_batch);
if (ret != 0) {
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: failed\n");
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: failed\n");
vfree(buf);
return -EFAULT;
}
if (copy_to_user(arg_user, ¶m, sizeof(param))) {
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: "
"error copying result to user, exiting\n");
vfree(buf);
return -EFAULT;
}
if (copy_to_user(inner_user_out_buf, buf,buf_bytes)) {
- pr_warn("KVM_USPT_BATCH_TRACK_STOP: "
+ pr_warn("KVM_CPC_BATCH_TRACK_STOP: "
"error copying result to user, exiting\n");
vfree(buf);
return -EFAULT;
@@ -636,17 +809,17 @@ cachepc_kvm_track_all_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
if (copy_from_user(¶m, arg_user, sizeof(param))) {
- pr_warn("KVM_USPT_TRACK_ALL: error copying arguments, exiting\n");
+ pr_warn("KVM_CPC_TRACK_ALL: error copying arguments, exiting\n");
return -EFAULT;
}
if (main_vm == NULL) {
- pr_warn("KVM_USPT_TRACK_ALL: main_vm is not initialized, aborting!\n");
+ pr_warn("KVM_CPC_TRACK_ALL: main_vm is not initialized, aborting!\n");
return -EFAULT;
}
if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
- pr_warn("KVM_USPT_TRACK_ALL: "
+ pr_warn("KVM_CPC_TRACK_ALL: "
"track_mode %d invalid, must be in range [%d,%d]\n",
param.track_mode, 0, KVM_PAGE_TRACK_MAX);
return -EFAULT;
@@ -668,17 +841,17 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
if (copy_from_user(¶m, arg_user, sizeof(param))) {
- printk(KERN_CRIT "KVM_USPT_UNTRACK_ALL: error copying arguments, exiting\n");
+ printk(KERN_CRIT "KVM_CPC_UNTRACK_ALL: error copying arguments, exiting\n");
return -EFAULT;
}
if (main_vm == NULL) {
- printk("KVM_USPT_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
+ printk("KVM_CPC_UNTRACK_ALL: main_vm is not initialized, aborting!\n");
return -EFAULT;
}
if (param.track_mode < 0 || param.track_mode >= KVM_PAGE_TRACK_MAX) {
- printk("KVM_USPT_UNTRACK_ALL: track_mode %d invalid, "
+ printk("KVM_CPC_UNTRACK_ALL: track_mode %d invalid, "
"must be in range [%d,%d]", param.track_mode,
0, KVM_PAGE_TRACK_MAX);
return -EFAULT;
@@ -691,51 +864,7 @@ cachepc_kvm_untrack_all_ioctl(void __user *arg_user)
}
int
-cachepc_kvm_read_guest_memory_ioctl(void __user *arg_user)
-{
- read_guest_memory_t param;
- void * buf;
- int res;
-
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(¶m, arg_user, sizeof(read_guest_memory_t))) {
- printk(KERN_CRIT
- "KVM_READ_GUEST_MEMORY: error copying arguments, exiting\n");
- return -EFAULT;
- }
-
- if (param.len > PAGE_SIZE) {
- printk("KVM_READ_GUEST_MEMORY len may be at most page size");
- }
-
- buf = kmalloc(param.len, GFP_KERNEL);
- if (buf == NULL) {
- printk("KVM_READ_GUEST_MEMORY: failed to alloc memory");
- return -ENOMEM;
- }
-
- if (param.wbinvd_cpu >= 0) {
- wbinvd_on_cpu(param.wbinvd_cpu);
- }
- wbinvd_on_all_cpus();
-
- res = read_physical(main_vm, param.gpa, buf,
- param.len, param.decrypt_with_host_key);
- if (res) {
- printk("KVM_READ_GUEST_MEMORY: read_physical failed with %d\n", res);
- return -EINVAL;
- }
-
- if (copy_to_user(param.output_buffer, buf, param.len)) {
- printk("KVM_READ_GUEST_MEMORY: failed to copy buf to userspace");
- }
-
- return 0;
-}
-
-int
-cachepc_kvm_uspt_reset(void __user *arg_user)
+cachepc_kvm_uspt_reset_ioctl(void __user *arg_user)
{
struct kvm_vcpu *vcpu;
@@ -749,33 +878,14 @@ cachepc_kvm_uspt_reset(void __user *arg_user)
}
int
-cachepc_kvm_register_pid(void __user *arg_user)
+cachepc_kvm_poll_event_ioctl(void __user *arg_user)
{
- userspace_ctx_t ctx;
- struct kvm_vcpu *vcpu;
-
- if (!arg_user) return -EINVAL;
-
- if (copy_from_user(&ctx, arg_user, sizeof(userspace_ctx_t))) {
- printk("copy from user failed\n");
- return -EACCES;
- }
-
- if (main_vm == NULL) {
- printk("KVM_TRACK_PAGE: main_vm is not initialized, aborting!\n");
- return -EFAULT;
+ if (!sevstep_uspt_is_initialiized()) {
+ pr_warn("CachePC: userspace context uninitialized\n");
+ return -EINVAL;
}
- sevstep_uspt_clear();
- sevstep_uspt_initialize(ctx.pid, ctx.get_rip);
-
- printk("Resetting page tracking\n");
- vcpu = xa_load(&main_vm->vcpu_array, 0);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_EXEC);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_ACCESS);
- sevstep_stop_tracking(vcpu, KVM_PAGE_TRACK_WRITE);
-
- return 0;
+ return sevstep_uspt_handle_poll_event(arg_user);
}
int
@@ -785,10 +895,9 @@ cachepc_kvm_uscpt_ack_event_ioctl(void __user *arg_user)
if (!arg_user) return -EINVAL;
- if (!sevstep_uspt_is_initialiized()) {
- printk("userspace context not initilaized, call REGISTER_PID");
+ if (!sevstep_uspt_is_initialiized())
return -EINVAL;
- }
+
if (copy_from_user(&ack_event, arg_user, sizeof(ack_event_t))) {
printk("ACK_EVENT failed to copy args");
return -EINVAL;
@@ -801,85 +910,44 @@ long
cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *arg_user;
- uint32_t u32;
- uint64_t u64;
- int ret;
arg_user = (void __user *)arg;
switch (ioctl) {
case KVM_CPC_TEST_ACCESS:
- if (!arg_user) return -EINVAL;
- if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
- return -EFAULT;
- ret = smp_call_function_single(2,
- cachepc_kvm_single_access_test, &u32, true);
- WARN_ON(ret != 0);
- if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
- return -EFAULT;
- break;
+ return cachepc_kvm_test_access_ioctl(arg_user);
case KVM_CPC_TEST_EVICTION:
- if (!arg_user) return -EINVAL;
- if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
- return -EFAULT;
- ret = smp_call_function_single(2,
- cachepc_kvm_single_eviction_test, &u32, true);
- WARN_ON(ret != 0);
- if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
- return -EFAULT;
- break;
+ return cachepc_kvm_test_eviction_ioctl(arg_user);
case KVM_CPC_INIT_PMC:
- if (!arg_user) return -EINVAL;
- if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
- return -EFAULT;
- ret = smp_call_function_single(2,
- cachepc_kvm_init_pmc_ioctl, &u32, true);
- WARN_ON(ret != 0);
- break;
+ return cachepc_kvm_init_pmc_ioctl(arg_user);
case KVM_CPC_READ_PMC:
- if (!arg_user) return -EINVAL;
- if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
- return -EFAULT;
- u64 = cachepc_read_pmc(u32);
- if (copy_to_user(arg_user, &u64, sizeof(uint64_t)))
- return -EFAULT;
- break;
+ return cachepc_kvm_read_pmc_ioctl(arg_user);
case KVM_CPC_READ_COUNTS:
- if (!arg_user) return -EINVAL;
- if (copy_to_user(arg_user, cachepc_msrmts,
- cachepc_msrmts_count * sizeof(uint16_t)))
- return -EFAULT;
- break;
- case KVM_TRACK_PAGE:
+ return cachepc_kvm_read_counts_ioctl(arg_user);
+ case KVM_CPC_SETUP_PMC:
+ return cachepc_kvm_setup_pmc_ioctl(arg_user);
+ case KVM_CPC_TRACK_PAGE:
return cachepc_kvm_track_page_ioctl(arg_user);
- case KVM_USPT_BATCH_TRACK_START:
+ case KVM_CPC_BATCH_TRACK_START:
return cachepc_kvm_batch_track_start_ioctl(arg_user);
- case KVM_USPT_BATCH_TRACK_EVENT_COUNT:
+ case KVM_CPC_BATCH_TRACK_EVENT_COUNT:
return cachepc_kvm_batch_track_count_ioctl(arg_user);
- case KVM_USPT_BATCH_TRACK_STOP:
+ case KVM_CPC_BATCH_TRACK_STOP:
return cachepc_kvm_batch_track_stop_ioctl(arg_user);
- case KVM_USPT_TRACK_ALL:
+ case KVM_CPC_TRACK_ALL:
return cachepc_kvm_track_all_ioctl(arg_user);
- case KVM_USPT_UNTRACK_ALL:
+ case KVM_CPC_UNTRACK_ALL:
return cachepc_kvm_untrack_all_ioctl(arg_user);
- case KVM_READ_GUEST_MEMORY:
+ case KVM_CPC_READ_GUEST_MEMORY:
return cachepc_kvm_read_guest_memory_ioctl(arg_user);
- case KVM_USPT_RESET:
- return cachepc_kvm_uspt_reset(arg_user);
- case KVM_USPT_REGISTER_PID:
- return cachepc_kvm_register_pid(arg_user);
- case KVM_USPT_POLL_EVENT:
- if (!sevstep_uspt_is_initialiized()) {
- printk("userspace context not initilaized, call REGISTER_PID");
- return -EINVAL;
- }
- return sevstep_uspt_handle_poll_event(arg_user);
- case KVM_USPT_ACK_EVENT:
+ case KVM_CPC_RESET_TRACKING:
+ return cachepc_kvm_uspt_reset_ioctl(arg_user);
+ case KVM_CPC_POLL_EVENT:
+ return cachepc_kvm_poll_event_ioctl(arg_user);
+ case KVM_CPC_ACK_EVENT:
return cachepc_kvm_uscpt_ack_event_ioctl(arg_user);
default:
return kvm_arch_dev_ioctl(file, ioctl, arg);
}
-
- return 0;
}
void
diff --git a/cachepc/uapi.h b/cachepc/uapi.h
@@ -4,29 +4,31 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define CPC_ISOLCPU 2
+
+#define CPC_L1MISS_PMC 0
+#define CPC_RETINST_PMC 1
+
#define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32)
#define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32)
#define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x22, __u32)
#define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x23, __u32)
#define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x24, __u64)
-
-#define KVM_TRACK_PAGE _IOWR(KVMIO, 0x30, track_page_param_t)
-#define KVM_USPT_REGISTER_PID _IOWR(KVMIO, 0x31, userspace_ctx_t)
-#define KVM_USPT_WAIT_AND_SEND _IO(KVMIO, 0x32)
-#define KVM_USPT_POLL_EVENT _IOWR(KVMIO, 0x33, page_fault_event_t)
-#define KVM_USPT_ACK_EVENT _IOWR(KVMIO, 0x34, ack_event_t)
-#define KVM_READ_GUEST_MEMORY _IOWR(KVMIO, 0x35, read_guest_memory_t)
-#define KVM_USPT_RESET _IO(KVMIO, 0x36)
-#define KVM_USPT_TRACK_ALL _IOWR(KVMIO, 0x37, track_all_pages_t)
-#define KVM_USPT_UNTRACK_ALL _IOWR(KVMIO, 0x38, track_all_pages_t)
-#define KVM_USPT_SETUP_RETINSTR_PERF _IOWR(KVMIO, 0x39, retired_instr_perf_config_t)
-#define KVM_USPT_READ_RETINSTR_PERF _IOWR(KVMIO, 0x3A, retired_instr_perf_t)
-#define KVM_USPT_BATCH_TRACK_START _IOWR(KVMIO, 0x3B, batch_track_config_t)
-#define KVM_USPT_BATCH_TRACK_STOP _IOWR(KVMIO, 0x3C, batch_track_stop_and_get_t)
-#define KVM_USPT_BATCH_TRACK_EVENT_COUNT _IOWR(KVMIO, 0x3D, batch_track_event_count_t)
-
-#define KVM_USPT_POLL_EVENT_NO_EVENT 1000
-#define KVM_USPT_POLL_EVENT_GOT_EVENT 0
+#define KVM_CPC_SETUP_PMC _IO(KVMIO, 0x25)
+#define KVM_CPC_READ_GUEST_MEMORY _IOWR(KVMIO, 0x26, read_guest_memory_t)
+
+#define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, track_page_param_t)
+#define KVM_CPC_POLL_EVENT _IOWR(KVMIO, 0x31, page_fault_event_t)
+#define KVM_CPC_ACK_EVENT _IOWR(KVMIO, 0x32, ack_event_t)
+#define KVM_CPC_RESET_TRACKING _IO(KVMIO, 0x33)
+#define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x34, track_all_pages_t)
+#define KVM_CPC_UNTRACK_ALL _IOWR(KVMIO, 0x35, track_all_pages_t)
+#define KVM_CPC_BATCH_TRACK_START _IOWR(KVMIO, 0x36, batch_track_config_t)
+#define KVM_CPC_BATCH_TRACK_STOP _IOWR(KVMIO, 0x37, batch_track_stop_and_get_t)
+#define KVM_CPC_BATCH_TRACK_EVENT_COUNT _IOWR(KVMIO, 0x38, batch_track_event_count_t)
+
+#define CPC_USPT_POLL_EVENT_NO_EVENT 1000
+#define CPC_USPT_POLL_EVENT_GOT_EVENT 0
enum kvm_page_track_mode {
KVM_PAGE_TRACK_WRITE,
diff --git a/cachepc/uspt.c b/cachepc/uspt.c
@@ -46,7 +46,7 @@ static int have_event = 0;
static bool get_rip = true;
-static int inited = 0;
+static int was_init = 0;
DEFINE_SPINLOCK(batch_track_state_lock);
static batch_track_state_t batch_track_state;
@@ -59,7 +59,7 @@ void
sevstep_uspt_clear(void)
{
write_lock(&event_lock);
- inited = 0;
+ was_init = 1;
last_sent_event_id = 1;
last_acked_event_id = 1;
have_event = 0;
@@ -68,23 +68,9 @@ sevstep_uspt_clear(void)
}
int
-sevstep_uspt_initialize(int pid, bool should_get_rip)
-{
- write_lock(&event_lock);
- inited = 1;
- last_sent_event_id = 1;
- last_acked_event_id = 1;
- have_event = 0;
- get_rip = should_get_rip;
- write_unlock(&event_lock);
-
- return 0;
-}
-
-int
sevstep_uspt_is_initialiized()
{
- return inited;
+ return was_init;
}
bool
@@ -175,7 +161,7 @@ sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem)
read_lock(&event_lock);
if (!have_event) {
read_unlock(&event_lock);
- return KVM_USPT_POLL_EVENT_NO_EVENT;
+ return CPC_USPT_POLL_EVENT_NO_EVENT;
}
read_unlock(&event_lock);
@@ -185,7 +171,7 @@ sevstep_uspt_handle_poll_event(page_fault_event_t* userpace_mem)
&sent_event, sizeof(page_fault_event_t));
have_event = 0;
} else {
- err = KVM_USPT_POLL_EVENT_NO_EVENT;
+ err = CPC_USPT_POLL_EVENT_NO_EVENT;
}
write_unlock(&event_lock);
@@ -222,7 +208,7 @@ perf_state_update_and_get_delta(uint64_t current_event_idx)
if (perf_state.delta_valid_idx == current_event_idx) {
if (current_event_idx == 0) {
perf_state.idx_for_last_perf_reading = current_event_idx;
- perf_state.last_perf_reading = cachepc_read_pmc(0);
+ perf_state.last_perf_reading = cachepc_read_pmc(CPC_RETINST_PMC);
}
return perf_state.delta;
}
@@ -234,7 +220,7 @@ perf_state_update_and_get_delta(uint64_t current_event_idx)
perf_state.idx_for_last_perf_reading, current_event_idx);
}
- current_value = cachepc_read_pmc(0);
+ current_value = cachepc_read_pmc(CPC_RETINST_PMC);
perf_state.delta = (current_value - perf_state.last_perf_reading);
perf_state.delta_valid_idx = current_event_idx;
diff --git a/cachepc/uspt.h b/cachepc/uspt.h
@@ -7,7 +7,6 @@
#include <linux/types.h>
-int sevstep_uspt_initialize(int pid, bool should_get_rip);
int sevstep_uspt_is_initialiized(void);
void sevstep_uspt_clear(void);
diff --git a/test/sevstep.c b/test/sevstep.c
@@ -1,34 +1,516 @@
+#define _GNU_SOURCE
+
#include "cachepc/uapi.h"
+#include <linux/psp-sev.h>
#include <linux/kvm.h>
+#include <sys/syscall.h>
#include <sys/ioctl.h>
-
-#include <err.h>
+#include <sys/user.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include <unistd.h>
+#include <signal.h>
+#include <dirent.h>
+#include <assert.h>
+#include <errno.h>
+#include <err.h>
#include <fcntl.h>
+#include <sched.h>
+#include <string.h>
+#include <stdbool.h>
#include <stdlib.h>
+#include <stdint.h>
#include <stdio.h>
+#include <stdarg.h>
+
+#define ARRLEN(x) (sizeof(x) / sizeof((x)[0]))
+#define MIN(a,b) ((a) > (b) ? (b) : (a))
+
+#define SAMPLE_COUNT 100
+
+#define TARGET_CORE 2
+#define SECONDARY_CORE 3
+
+#define TARGET_CACHE_LINESIZE 64
+#define TARGET_SET 15
+
+struct kvm {
+ int vmfd, vcpufd;
+ void *mem;
+ size_t memsize;
+ struct kvm_run *run;
+};
+
+/* start and end for guest assembly */
+extern uint8_t __start_guest_with[];
+extern uint8_t __stop_guest_with[];
+
+/* ioctl dev fds */
+int kvm_dev, sev_dev, kvm_dev;
+
+enum {
+ GSTATE_UNINIT,
+ GSTATE_LUPDATE,
+ GSTATE_LSECRET,
+ GSTATE_RUNNING,
+ GSTATE_SUPDATE,
+ GSTATE_RUPDATE,
+ GSTATE_SENT
+};
+
+const char *sev_fwerr_strs[] = {
+ "Success",
+ "Platform state is invalid",
+ "Guest state is invalid",
+ "Platform configuration is invalid",
+ "Buffer too small",
+ "Platform is already owned",
+ "Certificate is invalid",
+ "Policy is not allowed",
+ "Guest is not active",
+ "Invalid address",
+ "Bad signature",
+ "Bad measurement",
+ "Asid is already owned",
+ "Invalid ASID",
+ "WBINVD is required",
+ "DF_FLUSH is required",
+ "Guest handle is invalid",
+ "Invalid command",
+ "Guest is active",
+ "Hardware error",
+ "Hardware unsafe",
+ "Feature not supported",
+ "Invalid parameter",
+ "Out of resources",
+ "Integrity checks failed"
+};
+
+const char *sev_gstate_strs[] = {
+ "UNINIT",
+ "LUPDATE",
+ "LSECRET",
+ "RUNNING",
+ "SUPDATE",
+ "RUPDATE",
+ "SEND"
+};
+
+void
+hexdump(void *data, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ printf("%02X ", *(uint8_t *)(data + i));
+ }
+ printf("\n");
+}
+
+// REF: https://events19.linuxfoundation.org/wp-content/uploads/2017/12/Extending-Secure-Encrypted-Virtualization-with-SEV-ES-Thomas-Lendacky-AMD.pdf
+// REF: https://www.spinics.net/lists/linux-kselftest/msg27206.html
+__attribute__((section("guest_with"))) void
+vm_guest_with(void)
+{
+ while (1) {
+ asm volatile("mov (%[v]), %%bl"
+ : : [v] "r" (TARGET_CACHE_LINESIZE * TARGET_SET));
+ //asm volatile("out %%al, (%%dx)" : : );
+ asm volatile("hlt");
+ //asm volatile("rep; vmmcall\n\r");
+ }
+}
+
+bool
+pin_process(pid_t pid, int cpu, bool assert)
+{
+ cpu_set_t cpuset;
+ int ret;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ ret = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
+ if (ret < 0) {
+ if (assert) err(1, "sched_setaffinity");
+ return false;
+ }
+
+ return true;
+}
+
+int
+read_stat_core(pid_t pid)
+{
+ char path[256];
+ char line[2048];
+ FILE *file;
+ char *p;
+ int i, cpu;
+
+ snprintf(path, sizeof(path), "/proc/%u/stat", pid);
+ file = fopen(path, "r");
+ if (!file) return -1;
+
+ if (!fgets(line, sizeof(line), file))
+ err(1, "read stat");
+
+ p = line;
+ for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
+ p += 1;
+
+ if (!p) errx(1, "stat format");
+ cpu = atoi(p);
+
+ fclose(file);
+
+ return cpu;
+}
+
+const char *
+sev_fwerr_str(int code)
+{
+ if (code < 0 || code >= ARRLEN(sev_fwerr_strs))
+ return "Unknown error";
+
+ return sev_fwerr_strs[code];
+}
+
+const char *
+sev_gstate_str(int code)
+{
+ if (code < 0 || code >= ARRLEN(sev_gstate_strs))
+ return "Unknown gstate";
+
+ return sev_gstate_strs[code];
+}
+
+int
+sev_ioctl(int vmfd, int cmd, void *data, int *error)
+{
+ struct kvm_sev_cmd input;
+ int ret;
+
+ memset(&input, 0, sizeof(input));
+ input.id = cmd;
+ input.sev_fd = sev_dev;
+ input.data = (uintptr_t) data;
+
+ ret = ioctl(vmfd, KVM_MEMORY_ENCRYPT_OP, &input);
+ if (error) *error = input.error;
+
+ return ret;
+}
+
+uint8_t *
+sev_get_measure(int vmfd)
+{
+ struct kvm_sev_launch_measure msrmt;
+ int ret, fwerr;
+ uint8_t *data;
+
+ memset(&msrmt, 0, sizeof(msrmt));
+ ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
+ if (ret < 0 && fwerr != SEV_RET_INVALID_LEN)
+ errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr));
+
+ data = malloc(msrmt.len);
+ msrmt.uaddr = (uintptr_t) data;
+
+ ret = sev_ioctl(vmfd, KVM_SEV_LAUNCH_MEASURE, &msrmt, &fwerr);
+ if (ret < 0)
+ errx(1, "LAUNCH_MEASURE: (%s) %s", strerror(errno), sev_fwerr_str(fwerr));
+
+ return data;
+}
+
+uint8_t
+sev_guest_state(int vmfd, uint32_t handle)
+{
+ struct kvm_sev_guest_status status;
+ int ret, fwerr;
+
+ status.handle = handle;
+ ret = sev_ioctl(vmfd, KVM_SEV_GUEST_STATUS, &status, &fwerr);
+ if (ret < 0) {
+ errx(1, "KVM_SEV_GUEST_STATUS: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+ }
+
+ return status.state;
+}
+
+void
+sev_dbg_encrypt(int vmfd, void *dst, void *src, size_t size)
+{
+ struct kvm_sev_dbg enc;
+ int ret, fwerr;
+
+ enc.src_uaddr = (uintptr_t) src;
+ enc.dst_uaddr = (uintptr_t) dst;
+ enc.len = size;
+ ret = sev_ioctl(vmfd, KVM_SEV_DBG_ENCRYPT, &enc, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_DBG_ENCRYPT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+}
+
+void
+sev_dbg_decrypt(int vmfd, void *dst, void *src, size_t size)
+{
+ struct kvm_sev_dbg enc;
+ int ret, fwerr;
+
+ enc.src_uaddr = (uintptr_t) src;
+ enc.dst_uaddr = (uintptr_t) dst;
+ enc.len = size;
+ ret = sev_ioctl(vmfd, KVM_SEV_DBG_DECRYPT, &enc, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_DBG_DECRYPT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+}
+
+void
+sev_kvm_init(struct kvm *kvm, size_t ramsize, void *code_start, void *code_stop)
+{
+ // REF: https://www.amd.com/system/files/TechDocs/55766_SEV-KM_API_Specification.pdf
+ struct kvm_sev_launch_update_data update;
+ struct kvm_sev_launch_start start;
+ struct kvm_userspace_memory_region region;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ uint8_t *msrmt;
+ int ret, fwerr;
+
+ /* Create a kvm instance */
+ kvm->vmfd = ioctl(kvm_dev, KVM_CREATE_VM, 0);
+ if (kvm->vmfd < 0) err(1, "KVM_CREATE_VM");
+
+ /* Allocate guest memory */
+ kvm->memsize = ramsize;
+ kvm->mem = mmap(NULL, kvm->memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (!kvm->mem) err(1, "Allocating guest memory");
+ assert(code_stop - code_start <= kvm->memsize);
+ memcpy(kvm->mem, code_start, code_stop - code_start);
+
+ /* Map it into the vm */
+ memset(®ion, 0, sizeof(region));
+ region.slot = 0;
+ region.memory_size = kvm->memsize;
+ region.guest_phys_addr = 0;
+ region.userspace_addr = (uintptr_t) kvm->mem;
+ ret = ioctl(kvm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
+ if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION");
+
+ /* Enable SEV for vm */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_ES_INIT, NULL, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_ES_INIT: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Create virtual cpu */
+ kvm->vcpufd = ioctl(kvm->vmfd, KVM_CREATE_VCPU, 0);
+ if (kvm->vcpufd < 0) err(1, "KVM_CREATE_VCPU");
+
+ /* Map the shared kvm_run structure and following data */
+ ret = ioctl(kvm_dev, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE");
+ if (ret < sizeof(struct kvm_run))
+ errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
+ kvm->run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
+ MAP_SHARED, kvm->vcpufd, 0);
+ if (!kvm->run) err(1, "mmap vcpu");
+
+ /* Initialize segment regs */
+ memset(&sregs, 0, sizeof(sregs));
+ ret = ioctl(kvm->vcpufd, KVM_GET_SREGS, &sregs);
+ if (ret < 0) err(1, "KVM_GET_SREGS");
+ sregs.cs.base = 0;
+ sregs.cs.selector = 0;
+ ret = ioctl(kvm->vcpufd, KVM_SET_SREGS, &sregs);
+ if (ret < 0) err(1, "KVM_SET_SREGS");
+
+ /* Initialize rest of registers */
+ memset(®s, 0, sizeof(regs));
+ regs.rip = 0;
+ regs.rsp = kvm->memsize - 8;
+ regs.rbp = kvm->memsize - 8;
+ ret = ioctl(kvm->vcpufd, KVM_SET_REGS, ®s);
+ if (ret < 0) err(1, "KVM_SET_REGS");
+
+ /* Generate encryption keys and set policy */
+ memset(&start, 0, sizeof(start));
+ start.handle = 0;
+ start.policy = 1 << 2; /* require ES */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_START, &start, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_LAUNCH_START: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm memory (by encrypting it) */
+ memset(&update, 0, sizeof(update));
+ update.uaddr = (uintptr_t) kvm->mem;
+ update.len = ramsize;
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_DATA: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Prepare the vm save area */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_LAUNCH_UPDATE_VMSA: (%s) %s",strerror(errno), sev_fwerr_str(fwerr));
+
+ /* Collect a measurement (necessary) */
+ msrmt = sev_get_measure(kvm->vmfd);
+ free(msrmt);
+
+ /* Finalize launch process */
+ ret = sev_ioctl(kvm->vmfd, KVM_SEV_LAUNCH_FINISH, 0, &fwerr);
+ if (ret < 0) errx(1, "KVM_SEV_LAUNCH_FINISH: (%s) %s",
+ strerror(errno), sev_fwerr_str(fwerr));
+ ret = sev_guest_state(kvm->vmfd, start.handle);
+ if (ret != GSTATE_RUNNING)
+ errx(1, "Bad guest state: %s", sev_gstate_str(fwerr));
+}
+
+void
+sev_kvm_deinit(struct kvm *kvm)
+{
+ close(kvm->vmfd);
+ close(kvm->vcpufd);
+ munmap(kvm->mem, kvm->memsize);
+}
+
+uint16_t *
+read_counts()
+{
+ uint16_t *counts;
+ int ret;
+
+ counts = malloc(64 * sizeof(uint16_t));
+ if (!counts) err(1, "malloc");
+ ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts);
+ if (ret == -1) err(1, "ioctl READ_COUNTS");
+
+ return counts;
+}
+
+void
+print_counts(uint16_t *counts)
+{
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (i % 16 == 0 && i)
+ printf("\n");
+ if (counts[i] == 1)
+ printf("\x1b[38;5;88m");
+ else if (counts[i] > 1)
+ printf("\x1b[38;5;196m");
+ printf("%2i ", i);
+ if (counts[i] > 0)
+ printf("\x1b[0m");
+ }
+ printf("\n Target Set %i Count: %hu\n", TARGET_SET, counts[TARGET_SET]);
+ printf("\n");
+}
+
+uint16_t *
+collect(struct kvm *kvm)
+{
+ struct kvm_regs regs;
+ page_fault_event_t event;
+ ack_event_t ack;
+ int ret;
+
+ ret = ioctl(kvm->vcpufd, KVM_RUN, NULL);
+ if (ret < 0) err(1, "KVM_RUN");
+
+ if (kvm->run->exit_reason == KVM_EXIT_MMIO) {
+ memset(®s, 0, sizeof(regs));
+ ret = ioctl(kvm->vcpufd, KVM_GET_REGS, ®s);
+ if (ret < 0) err(1, "KVM_GET_REGS");
+ errx(1, "KVM_EXTI_MMIO: Victim %s at 0x%08llx: rip=0x%08llx\n",
+ kvm->run->mmio.is_write ? "write" : "read",
+ kvm->run->mmio.phys_addr, regs.rip);
+ } else if (kvm->run->exit_reason != KVM_EXIT_HLT) {
+ errx(1, "KVM died: %i\n", kvm->run->exit_reason);
+ }
+
+ /* Get page fault info */
+ ret = ioctl(kvm_dev, KVM_CPC_POLL_EVENT, &event);
+ if (!ret) {
+ printf("Got page fault: %llu insts\n",
+ event.retired_instructions);
+
+ ack.id = event.id;
+ ret = ioctl(kvm_dev, KVM_CPC_ACK_EVENT, &ack);
+ if (ret == -1) err(1, "ioctl ACK_EVENT");
+ } else if (ret != CPC_USPT_POLL_EVENT_NO_EVENT) {
+ err(1, "ioctl POLL_EVENT");
+ }
+
+ return read_counts();
+}
int
main(int argc, const char **argv)
{
- track_all_pages_t tracking;
- int ret, fd;
+ uint16_t with_access[SAMPLE_COUNT][64];
+ struct kvm kvm_with_access;
+ track_all_pages_t track_all;
+ uint16_t *counts;
+ int i, ret;
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ pin_process(0, TARGET_CORE, true);
- fd = open("/dev/kvm", O_RDONLY);
- if (!fd) err(1, "open");
+ sev_dev = open("/dev/sev", O_RDWR | O_CLOEXEC);
+ if (sev_dev < 0) err(1, "open /dev/sev");
- tracking.track_mode = KVM_PAGE_TRACK_ACCESS;
- ret = ioctl(fd, KVM_USPT_TRACK_ALL, &tracking);
- if (ret == -1) err(1, "ioctl TRACK_ALL ACCESS");
+ kvm_dev = open("/dev/kvm", O_RDWR | O_CLOEXEC);
+ if (kvm_dev < 0) err(1, "open /dev/kvm");
+ /* Make sure we have the stable version of the API */
+ ret = ioctl(kvm_dev, KVM_GET_API_VERSION, NULL);
+ if (ret < 0) err(1, "KVM_GET_API_VERSION");
+ if (ret != 12) errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
- tracking.track_mode = KVM_PAGE_TRACK_RESET_ACCESSED;
- ret = ioctl(fd, KVM_USPT_TRACK_ALL, &tracking);
- if (ret == -1) err(1, "ioctl TRACK_ALL RESET_ACCESSED");
+ /* Setup needed performance counters */
+ ret = ioctl(kvm_dev, KVM_CPC_SETUP_PMC, NULL);
+ if (ret < 0) err(1, "ioctl SETUP_PMC");
- ret = ioctl(fd, KVM_USPT_UNTRACK_ALL, &tracking);
- if (ret == -1) err(1, "ioctl UNTRACK_ALL");
+ sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2,
+ __start_guest_with, __stop_guest_with);
- close(fd);
+ /* One run to get into while loop (after stack setup) */
+ ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL);
+
+ /* Reset previous tracking */
+ ret = ioctl(kvm_dev, KVM_CPC_RESET_TRACKING, NULL);
+ if (ret == -1) err(1, "ioctl RESET_TRACKING");
+
+ /* Init page tracking */
+ track_all.track_mode = KVM_PAGE_TRACK_ACCESS;
+ ret = ioctl(kvm_dev, KVM_CPC_TRACK_ALL, &track_all);
+ if (ret == -1) err(1, "ioctl TRACK_ALL");
+
+ for (i = 0; i < SAMPLE_COUNT; i++) {
+ counts = collect(&kvm_with_access);
+ memcpy(with_access[i], counts, 64 * sizeof(uint16_t));
+ free(counts);
+ }
+
+ for (i = 0; i < SAMPLE_COUNT; i++) {
+ printf("Evictions with access:\n");
+ print_counts(with_access[i]);
+ }
+
+ sev_kvm_deinit(&kvm_with_access);
+
+ close(kvm_dev);
+ close(sev_dev);
}
+