commit 647ced3c3389a01515aea1e391830fd8e7134931
parent 0a23cb0a512c3984aa0d68f4ec4a5c0c0d55a430
Author: Louis Burda <quent.burda@gmail.com>
Date: Tue, 6 Sep 2022 12:19:15 +0200
Refactor code out of kernel tree kvm.c into kmod dir to reduce patch size
Diffstat:
A | kmod/kvm.c | | | 368 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
A | kmod/kvm.h | | | 6 | ++++++ |
M | patch.diff | | | 448 | +++---------------------------------------------------------------------------- |
3 files changed, 391 insertions(+), 431 deletions(-)
diff --git a/kmod/kvm.c b/kmod/kvm.c
@@ -0,0 +1,368 @@
+#include "kvm.h"
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+
+struct proc_ops cachepc_proc_ops;
+
+uint16_t *cachepc_msrmts;
+size_t cachepc_msrmts_count;
+EXPORT_SYMBOL(cachepc_msrmts);
+EXPORT_SYMBOL(cachepc_msrmts_count);
+
+cache_ctx *cachepc_ctx;
+cacheline *cachepc_ds;
+EXPORT_SYMBOL(cachepc_ctx);
+EXPORT_SYMBOL(cachepc_ds);
+
+int
+cachepc_kvm_proc_open(struct inode *inode, struct file *file)
+{
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+int
+cachepc_kvm_proc_close(struct inode *inode, struct file *file)
+{
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+ssize_t
+cachepc_kvm_proc_read(struct file *file, char *buf, size_t buflen, loff_t *off)
+{
+ size_t len, left;
+ size_t size;
+
+ printk(KERN_WARNING "CachePC: Reading entries (%lu:%lli)\n",
+ buflen, off ? *off : 0);
+
+ size = cachepc_msrmts_count * sizeof(uint16_t);
+ if (!off || *off >= size || *off < 0)
+ return 0;
+
+ len = size - *off;
+ if (len > buflen) len = buflen;
+
+ left = copy_to_user(buf, (uint8_t *) cachepc_msrmts + *off, len);
+
+ len -= left;
+ *off += len;
+
+ return len;
+}
+
+ssize_t
+cachepc_kvm_proc_write(struct file *file, const char *buf, size_t buflen, loff_t *off)
+{
+ return 0;
+}
+
+void
+cachepc_kvm_prime_probe_test(void *p)
+{
+ cacheline *lines;
+ cacheline *cl, *head;
+ uint32_t count;
+ uint32_t *arg;
+ int i, max;
+
+ arg = p;
+
+ /* l2 data cache, hit or miss */
+ cachepc_init_pmc(0, 0x64, 0xD8);
+
+ lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size);
+ BUG_ON(lines == NULL);
+
+ max = cachepc_ctx->nr_of_cachelines;
+
+ cachepc_cpuid();
+ cachepc_mfence();
+
+ for (i = 0; i < max; i++)
+ asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx");
+
+ head = cachepc_prime(cachepc_ds);
+ cachepc_probe(head);
+
+ count = 0;
+ cl = head = cachepc_ds;
+ do {
+ count += cl->count;
+ cl = cl->next;
+ } while (cl != head);
+
+ printk(KERN_WARNING "CachePC: Prime-probe test done (%u vs. %u => %s)\n",
+ count, 0, (count == 0) ? "passed" : "failed");
+
+ if (arg) *arg = (count == 0);
+
+ kfree(lines);
+}
+
+void
+cachepc_kvm_stream_hwpf_test(void *p)
+{
+ cacheline *lines;
+ uint32_t count;
+ uint32_t *arg;
+ uint32_t i, max;
+
+ arg = p;
+
+ /* TODO: accurately detect hwpf */
+
+ /* l2 data cache, hit or miss */
+ cachepc_init_pmc(0, 0x64, 0xD8);
+
+ lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size);
+ BUG_ON(lines == NULL);
+
+ max = cachepc_ctx->nr_of_cachelines;
+
+ cachepc_prime(cachepc_ds);
+
+ count -= cachepc_read_pmc(0);
+ for (i = 0; i < max; i++)
+ asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx");
+ count += cachepc_read_pmc(0);
+
+ printk(KERN_WARNING "CachePC: HWPF test done (%u vs. %u => %s)\n",
+ count, max, (count == max) ? "passed" : "failed");
+
+ if (arg) *arg = (count == max);
+
+ kfree(lines);
+}
+
+void
+cachepc_kvm_single_access_test(void *p)
+{
+ cacheline *ptr;
+ uint64_t pre, post;
+ uint32_t *arg;
+
+ /* l2 data cache, hit or miss */
+ cachepc_init_pmc(0, 0x64, 0xD8);
+
+ arg = p;
+
+ WARN_ON(arg && *arg >= L1_SETS);
+ if (arg && *arg >= L1_SETS) return;
+ ptr = cachepc_prepare_victim(cachepc_ctx, arg ? *arg : 48);
+
+ cachepc_prime(cachepc_ds);
+
+ pre = cachepc_read_pmc(0);
+ cachepc_victim(ptr);
+ post = cachepc_read_pmc(0);
+
+ printk(KERN_WARNING "CachePC: Single access test done (%llu vs %u => %s)",
+ post - pre, 1, (post - pre == 1) ? "passed" : "failed");
+
+ if (arg) *arg = post - pre;
+
+ cachepc_release_victim(cachepc_ctx, ptr);
+}
+
+void
+cachepc_kvm_single_eviction_test(void *p)
+{
+ cacheline *head, *cl, *evicted;
+ cacheline *ptr;
+ uint32_t target;
+ uint32_t *arg;
+ int count;
+
+ arg = p;
+
+ /* l2 data cache, hit or miss */
+ cachepc_init_pmc(0, 0x64, 0xD8);
+
+ WARN_ON(arg && *arg >= L1_SETS);
+ if (arg && *arg >= L1_SETS) return;
+ target = arg ? *arg : 48;
+
+ ptr = cachepc_prepare_victim(cachepc_ctx, target);
+
+ head = cachepc_prime(cachepc_ds);
+ cachepc_victim(ptr);
+ cachepc_probe(head);
+
+ count = 0;
+ evicted = NULL;
+ cl = head = cachepc_ds;
+ do {
+ if (IS_FIRST(cl->flags) && cl->count > 0) {
+ evicted = cl;
+ count += cl->count;
+ }
+ cl = cl->next;
+ } while (cl != head);
+
+ printk(KERN_WARNING "CachePC: Single eviction test done (%u vs %u => %s)\n",
+ count, 1, (count == 1 && evicted->cache_set == target) ? "passed" : "failed");
+ cachepc_save_msrmts(head);
+
+ if (arg) *arg = count;
+
+ cachepc_release_victim(cachepc_ctx, ptr);
+}
+
+void
+cachepc_kvm_system_setup(void)
+{
+ uint64_t reg_addr, val;
+ uint32_t lo, hi;
+
+ /* disable streaming store */
+ reg_addr = 0xc0011020;
+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
+ val = (uint64_t) lo | ((uint64_t) hi << 32);
+ val |= 1 << 13;
+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+
+ /* disable speculative data cache tlb reloads */
+ reg_addr = 0xc0011022;
+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
+ val = (uint64_t) lo | ((uint64_t) hi << 32);
+ val |= 1 << 4;
+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+
+ /* disable data cache hardware prefetcher */
+ reg_addr = 0xc0011022;
+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
+ val = (uint64_t) lo | ((uint64_t) hi << 32);
+ val |= 1 << 13;
+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
+}
+
+void
+cachepc_kvm_init_pmc_ioctl(void *p)
+{
+ uint32_t event;
+ uint8_t index, event_no, event_mask;
+
+ WARN_ON(p == NULL);
+ if (!p) return;
+
+ event = *(uint32_t *)p;
+
+ index = (event & 0xFF000000) >> 24;
+ event_no = (event & 0x0000FF00) >> 8;
+ event_mask = (event & 0x000000FF) >> 0;
+
+ cachepc_init_pmc(index, event_no, event_mask);
+}
+
+long
+cachepc_kvm_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
+{
+ void __user *arg_user;
+ uint32_t u32;
+ int r;
+
+ arg_user = (void __user *)argp;
+ switch (cmd) {
+ case CACHEPC_IOCTL_TEST_ACCESS:
+ printk(KERN_WARNING "CachePC: Called ioctl access test\n");
+ if (!arg_user) return -EINVAL;
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+ r = smp_call_function_single(2,
+ cachepc_kvm_single_access_test, &u32, true);
+ WARN_ON(r != 0);
+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case CACHEPC_IOCTL_TEST_EVICTION:
+ printk(KERN_WARNING "CachePC: Called ioctl eviction test\n");
+ if (!arg_user) return -EINVAL;
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+ r = smp_call_function_single(2,
+ cachepc_kvm_single_eviction_test, &u32, true);
+ WARN_ON(r != 0);
+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case CACHEPC_IOCTL_INIT_PMC:
+ printk(KERN_WARNING "CachePC: Called ioctl init counter\n");
+ if (!arg_user) return -EINVAL;
+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
+ return -EFAULT;
+ r = smp_call_function_single(2,
+ cachepc_kvm_init_pmc_ioctl, &u32, true);
+ WARN_ON(r != 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+cachepc_kvm_setup_test(void *p)
+{
+ int cpu;
+
+ cpu = get_cpu();
+
+ printk(KERN_WARNING "CachePC: Running on core %i\n", cpu);
+
+ cachepc_ctx = cachepc_get_ctx(L1);
+ cachepc_ds = cachepc_prepare_ds(cachepc_ctx);
+
+ cachepc_kvm_system_setup();
+
+ cachepc_kvm_prime_probe_test(NULL);
+ cachepc_kvm_single_access_test(NULL);
+ cachepc_kvm_single_eviction_test(NULL);
+ cachepc_kvm_stream_hwpf_test(NULL);
+
+ put_cpu();
+}
+
+void
+cachepc_kvm_init(void)
+{
+ int ret;
+
+ cachepc_msrmts_count = L1_SETS;
+ cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL);
+ BUG_ON(cachepc_msrmts == NULL);
+
+ ret = smp_call_function_single(2, cachepc_kvm_setup_test, NULL, true);
+ WARN_ON(ret != 0);
+
+ memset(&cachepc_proc_ops, 0, sizeof(cachepc_proc_ops));
+ cachepc_proc_ops.proc_open = cachepc_kvm_proc_open;
+ cachepc_proc_ops.proc_read = cachepc_kvm_proc_read;
+ cachepc_proc_ops.proc_write = cachepc_kvm_proc_write;
+ cachepc_proc_ops.proc_release = cachepc_kvm_proc_close;
+ cachepc_proc_ops.proc_ioctl = cachepc_kvm_ioctl;
+ proc_create("cachepc", 0644, NULL, &cachepc_proc_ops);
+}
+
+void
+cachepc_kvm_exit(void)
+{
+ remove_proc_entry("cachepc", NULL);
+ kfree(cachepc_msrmts);
+
+ cachepc_release_ds(cachepc_ctx, cachepc_ds);
+ cachepc_release_ctx(cachepc_ctx);
+}
+
+
diff --git a/kmod/kvm.h b/kmod/kvm.h
@@ -0,0 +1,6 @@
+#pragma once
+
+#include "cachepc.h"
+
+void cachepc_kvm_init(void);
+void cachepc_kvm_exit(void);
diff --git a/patch.diff b/patch.diff
@@ -1,5 +1,5 @@
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
-index b804444e16d4..17167ccfca22 100644
+index b804444e16d4..66a4d56e331a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
@@ -16,7 +16,7 @@ index b804444e16d4..17167ccfca22 100644
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
- $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
+ $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
-+ svm/cachepc/cachepc.o svm/cachepc/util.o
++ svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o
+
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
@@ -27,27 +27,10 @@ index b804444e16d4..17167ccfca22 100644
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
-kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o \
-+ svm/cachepc/cachepc.o svm/cachepc/util.o
++ svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
-diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
-index 87e1c66228f7..d7da36e12da6 100644
---- a/arch/x86/kvm/svm/sev.c
-+++ b/arch/x86/kvm/svm/sev.c
-@@ -572,10 +572,12 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
- struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
-
- /* Perform some pre-encryption checks against the VMSA */
-+ printk(KERN_WARNING "Vincent: Pre sev_es_sync_vmsa\n");
- ret = sev_es_sync_vmsa(svm);
- if (ret)
- goto e_free;
-
-+ printk(KERN_WARNING "Vincent: Post sev_es_sync_vmsa\n");
- /*
- * The LAUNCH_UPDATE_VMSA command will perform in-place
- * encryption of the VMSA memory content (i.e it will write
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7b3cfbe8f7e3..c7952eab7c6d 100644
--- a/arch/x86/kvm/svm/svm.c
@@ -61,15 +44,6 @@ index 7b3cfbe8f7e3..c7952eab7c6d 100644
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -3131,7 +3133,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
-
- if (!dump_invalid_vmcb) {
- pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
-- return;
-+ // return;
- }
-
- pr_err("VMCB Control Area:\n");
@@ -3749,9 +3751,26 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
lockdep_hardirqs_on(CALLER_ADDR0);
@@ -211,8 +185,8 @@ index 6feb8c08f45a..eb0ea02ef187 100644
jne 3f
ud2
_ASM_EXTABLE(1b, 2b)
-+
-+3:
++
++3:
+ swap_all
+ mov cachepc_ds, %rsi
+ mov 0x8(%rsi), %r15
@@ -220,7 +194,7 @@ index 6feb8c08f45a..eb0ea02ef187 100644
+ jmp cachepc_prime_vcall+1 // skip stack pushes
+sev_prime_ret:
+ swap_all
-+
++
+ vmrun %_ASM_AX
+
+ swap_all
@@ -239,7 +213,7 @@ index 6feb8c08f45a..eb0ea02ef187 100644
_ASM_EXTABLE(5b, 6b)
7:
- cli
-+ cli
++ cli
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
@@ -259,7 +233,7 @@ index 6feb8c08f45a..eb0ea02ef187 100644
* @vmcb_pa: unsigned long
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
-+ push_all
++ push_all
+
push %_ASM_BP
#ifdef CONFIG_X86_64
@@ -269,7 +243,7 @@ index 6feb8c08f45a..eb0ea02ef187 100644
sti
-1: vmrun %_ASM_AX
-+1:
++1:
+
+// swap_all
+// mov cachepc_ds, %rsi
@@ -281,7 +255,7 @@ index 6feb8c08f45a..eb0ea02ef187 100644
+
+// // TEST r15 dependance
+// movq $0x41414141, %r15
-+
++
+ vmrun %_ASM_AX
+
+// swap_all
@@ -294,421 +268,33 @@ index 6feb8c08f45a..eb0ea02ef187 100644
jmp 3f
2: cmpb $0, kvm_rebooting
jne 3f
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 4097d028c3ab..81685bd567a2 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -5414,6 +5414,7 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
- long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
-+ printk(KERN_WARNING "Vincent kvm arch ioctl \n");
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r = -ENOTTY;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 2541a17ff1c4..7efbdfd0e3e2 100644
+index 2541a17ff1c4..8796ad5e9b73 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
-@@ -51,6 +51,9 @@
- #include <linux/io.h>
- #include <linux/lockdep.h>
- #include <linux/kthread.h>
-+#include <linux/proc_fs.h>
-+#include <linux/init.h>
-+#include <asm/uaccess.h>
-
- #include <asm/processor.h>
- #include <asm/ioctl.h>
-@@ -66,6 +69,8 @@
+@@ -66,6 +66,8 @@
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
-+#include "../../arch/x86/kvm/svm/cachepc/cachepc.h"
++#include "../../arch/x86/kvm/svm/cachepc/kvm.h"
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-@@ -143,6 +148,18 @@ static void hardware_disable_all(void);
-
- static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-
-+struct proc_ops cachepc_proc_ops;
-+
-+uint16_t *cachepc_msrmts;
-+size_t cachepc_msrmts_count;
-+EXPORT_SYMBOL(cachepc_msrmts);
-+EXPORT_SYMBOL(cachepc_msrmts_count);
-+
-+cache_ctx *cachepc_ctx;
-+cacheline *cachepc_ds;
-+EXPORT_SYMBOL(cachepc_ctx);
-+EXPORT_SYMBOL(cachepc_ds);
-+
- __visible bool kvm_rebooting;
- EXPORT_SYMBOL_GPL(kvm_rebooting);
-
-@@ -4765,12 +4782,327 @@ static void check_processor_compat(void *data)
- *c->ret = kvm_arch_check_processor_compat(c->opaque);
- }
-
-+int
-+kvm_cachepc_open(struct inode *inode, struct file *file)
-+{
-+ try_module_get(THIS_MODULE);
-+
-+ return 0;
-+}
-+
-+int
-+kvm_cachepc_close(struct inode *inode, struct file *file)
-+{
-+ module_put(THIS_MODULE);
-+
-+ return 0;
-+}
-+
-+ssize_t
-+kvm_cachepc_read(struct file *file, char *buf, size_t buflen, loff_t *off)
-+{
-+ size_t len, left;
-+ size_t size;
-+
-+ printk(KERN_WARNING "CachePC: Reading entries (%lu:%lli)\n",
-+ buflen, off ? *off : 0);
-+
-+ size = cachepc_msrmts_count * sizeof(uint16_t);
-+ if (!off || *off >= size || *off < 0)
-+ return 0;
-+
-+ len = size - *off;
-+ if (len > buflen) len = buflen;
-+
-+ left = copy_to_user(buf, (uint8_t *) cachepc_msrmts + *off, len);
-+
-+ len -= left;
-+ *off += len;
-+
-+ return len;
-+}
-+
-+ssize_t
-+kvm_cachepc_write(struct file *file, const char *buf, size_t buflen, loff_t *off)
-+{
-+ return 0;
-+}
-+
-+void
-+kvm_cachepc_prime_probe_test(void *p)
-+{
-+ cacheline *lines;
-+ cacheline *cl, *head;
-+ uint32_t count;
-+ uint32_t *arg;
-+ int i, max;
-+
-+ arg = p;
-+
-+ /* l2 data cache, hit or miss */
-+ cachepc_init_pmc(0, 0x64, 0xD8);
-+
-+ lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size);
-+ BUG_ON(lines == NULL);
-+
-+ max = cachepc_ctx->nr_of_cachelines;
-+
-+ cachepc_cpuid();
-+ cachepc_mfence();
-+
-+ for (i = 0; i < max; i++)
-+ asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx");
-+
-+ head = cachepc_prime(cachepc_ds);
-+ cachepc_probe(head);
-+
-+ count = 0;
-+ cl = head = cachepc_ds;
-+ do {
-+ count += cl->count;
-+ cl = cl->next;
-+ } while (cl != head);
-+
-+ printk(KERN_WARNING "CachePC: Prime-probe test done (%u vs. %u => %s)\n",
-+ count, 0, (count == 0) ? "passed" : "failed");
-+
-+ if (arg) *arg = (count == 0);
-+
-+ kfree(lines);
-+}
-+
-+void
-+kvm_cachepc_stream_hwpf_test(void *p)
-+{
-+ cacheline *lines;
-+ uint32_t count;
-+ uint32_t *arg;
-+ uint32_t i, max;
-+
-+ arg = p;
-+
-+ /* TODO: accurately detect hwpf */
-+
-+ /* l2 data cache, hit or miss */
-+ cachepc_init_pmc(0, 0x64, 0xD8);
-+
-+ lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size);
-+ BUG_ON(lines == NULL);
-+
-+ max = cachepc_ctx->nr_of_cachelines;
-+
-+ cachepc_prime(cachepc_ds);
-+
-+ count -= cachepc_read_pmc(0);
-+ for (i = 0; i < max; i++)
-+ asm volatile ("mov (%0), %%rbx" : : "r"(lines + i) : "rbx");
-+ count += cachepc_read_pmc(0);
-+
-+ printk(KERN_WARNING "CachePC: HWPF test done (%u vs. %u => %s)\n",
-+ count, max, (count == max) ? "passed" : "failed");
-+
-+ if (arg) *arg = (count == max);
-+
-+ kfree(lines);
-+}
-+
-+void
-+kvm_cachepc_single_access_test(void *p)
-+{
-+ cacheline *ptr;
-+ uint64_t pre, post;
-+ uint32_t *arg;
-+
-+ /* l2 data cache, hit or miss */
-+ cachepc_init_pmc(0, 0x64, 0xD8);
-+
-+ arg = p;
-+
-+ WARN_ON(arg && *arg >= L1_SETS);
-+ if (arg && *arg >= L1_SETS) return;
-+ ptr = cachepc_prepare_victim(cachepc_ctx, arg ? *arg : 48);
-+
-+ cachepc_prime(cachepc_ds);
-+
-+ pre = cachepc_read_pmc(0);
-+ cachepc_victim(ptr);
-+ post = cachepc_read_pmc(0);
-+
-+ printk(KERN_WARNING "CachePC: Single access test done (%llu vs %u => %s)",
-+ post - pre, 1, (post - pre == 1) ? "passed" : "failed");
-+
-+ if (arg) *arg = post - pre;
-+
-+ cachepc_release_victim(cachepc_ctx, ptr);
-+}
-+
-+void
-+kvm_cachepc_single_eviction_test(void *p)
-+{
-+ cacheline *head, *cl, *evicted;
-+ cacheline *ptr;
-+ uint32_t target;
-+ uint32_t *arg;
-+ int count;
-+
-+ arg = p;
-+
-+ /* l2 data cache, hit or miss */
-+ cachepc_init_pmc(0, 0x64, 0xD8);
-+
-+ WARN_ON(arg && *arg >= L1_SETS);
-+ if (arg && *arg >= L1_SETS) return;
-+ target = arg ? *arg : 48;
-+
-+ ptr = cachepc_prepare_victim(cachepc_ctx, target);
-+
-+ head = cachepc_prime(cachepc_ds);
-+ cachepc_victim(ptr);
-+ cachepc_probe(head);
-+
-+ count = 0;
-+ evicted = NULL;
-+ cl = head = cachepc_ds;
-+ do {
-+ if (IS_FIRST(cl->flags) && cl->count > 0) {
-+ evicted = cl;
-+ count += cl->count;
-+ }
-+ cl = cl->next;
-+ } while (cl != head);
-+
-+ printk(KERN_WARNING "CachePC: Single eviction test done (%u vs %u => %s)\n",
-+ count, 1, (count == 1 && evicted->cache_set == target) ? "passed" : "failed");
-+ cachepc_save_msrmts(head);
-+
-+ if (arg) *arg = count;
-+
-+ cachepc_release_victim(cachepc_ctx, ptr);
-+}
-+
-+void
-+kwm_cachepc_system_setup(void)
-+{
-+ uint64_t reg_addr, val;
-+ uint32_t lo, hi;
-+
-+ /* disable streaming store */
-+ reg_addr = 0xc0011020;
-+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
-+ val = (uint64_t) lo | ((uint64_t) hi << 32);
-+ val |= 1 << 13;
-+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
-+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
-+
-+ /* disable speculative data cache tlb reloads */
-+ reg_addr = 0xc0011022;
-+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
-+ val = (uint64_t) lo | ((uint64_t) hi << 32);
-+ val |= 1 << 4;
-+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
-+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
-+
-+ /* disable data cache hardware prefetcher */
-+ reg_addr = 0xc0011022;
-+ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
-+ val = (uint64_t) lo | ((uint64_t) hi << 32);
-+ val |= 1 << 13;
-+ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
-+ printk("CachePC: Writing MSR %08llX: %016llX\n", reg_addr, val);
-+}
-+
-+void
-+kvm_cachepc_init(void *p)
-+{
-+ int cpu;
-+
-+ cpu = get_cpu();
-+
-+ printk(KERN_WARNING "CachePC: Running on core %i\n", cpu);
-+
-+ cachepc_ctx = cachepc_get_ctx(L1);
-+ cachepc_ds = cachepc_prepare_ds(cachepc_ctx);
-+
-+ kwm_cachepc_system_setup();
-+
-+ kvm_cachepc_prime_probe_test(NULL);
-+ kvm_cachepc_single_access_test(NULL);
-+ kvm_cachepc_single_eviction_test(NULL);
-+ kvm_cachepc_stream_hwpf_test(NULL);
-+
-+ put_cpu();
-+}
-+
-+void
-+kvm_cachepc_init_pmc_ioctl(void *p)
-+{
-+ uint32_t event;
-+ uint8_t index, event_no, event_mask;
-+
-+ WARN_ON(p == NULL);
-+ if (!p) return;
-+
-+ event = *(uint32_t *)p;
-+
-+ index = (event & 0xFF000000) >> 24;
-+ event_no = (event & 0x0000FF00) >> 8;
-+ event_mask = (event & 0x000000FF) >> 0;
-+
-+ cachepc_init_pmc(index, event_no, event_mask);
-+}
-+
-+long
-+kvm_cachepc_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
-+{
-+ void __user *arg_user;
-+ uint32_t u32;
-+ int r;
-+
-+ arg_user = (void __user *)argp;
-+ switch (cmd) {
-+ case CACHEPC_IOCTL_TEST_ACCESS:
-+ printk(KERN_WARNING "CachePC: Called ioctl access test\n");
-+ if (!arg_user) return -EINVAL;
-+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
-+ return -EFAULT;
-+ r = smp_call_function_single(2,
-+ kvm_cachepc_single_access_test, &u32, true);
-+ WARN_ON(r != 0);
-+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
-+ return -EFAULT;
-+ break;
-+ case CACHEPC_IOCTL_TEST_EVICTION:
-+ printk(KERN_WARNING "CachePC: Called ioctl eviction test\n");
-+ if (!arg_user) return -EINVAL;
-+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
-+ return -EFAULT;
-+ r = smp_call_function_single(2,
-+ kvm_cachepc_single_eviction_test, &u32, true);
-+ WARN_ON(r != 0);
-+ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
-+ return -EFAULT;
-+ break;
-+ case CACHEPC_IOCTL_INIT_PMC:
-+ printk(KERN_WARNING "CachePC: Called ioctl init counter\n");
-+ if (!arg_user) return -EINVAL;
-+ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
-+ return -EFAULT;
-+ r = smp_call_function_single(2,
-+ kvm_cachepc_init_pmc_ioctl, &u32, true);
-+ WARN_ON(r != 0);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
- int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module)
- {
- struct kvm_cpu_compat_check c;
-- int r;
-- int cpu;
-+ int r, cpu;
-
- r = kvm_arch_init(opaque);
- if (r)
-@@ -4848,6 +5180,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -4848,6 +4849,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_vfio_ops_init();
WARN_ON(r);
-+ cachepc_msrmts_count = L1_SETS;
-+ cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL);
-+ BUG_ON(cachepc_msrmts == NULL);
++ cachepc_kvm_init();
+
-+ r = smp_call_function_single(2, kvm_cachepc_init, NULL, true);
-+ WARN_ON(r != 0);
-+
-+ memset(&cachepc_proc_ops, 0, sizeof(cachepc_proc_ops));
-+ cachepc_proc_ops.proc_open = kvm_cachepc_open;
-+ cachepc_proc_ops.proc_read = kvm_cachepc_read;
-+ cachepc_proc_ops.proc_write = kvm_cachepc_write;
-+ cachepc_proc_ops.proc_release = kvm_cachepc_close;
-+ cachepc_proc_ops.proc_ioctl = kvm_cachepc_ioctl;
-+ proc_create("cachepc", 0644, NULL, &cachepc_proc_ops);
-+
return 0;
out_unreg:
-@@ -4872,6 +5219,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
+@@ -4872,6 +4875,8 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
-+ remove_proc_entry("cachepc", NULL);
-+ kfree(cachepc_msrmts);
-+
-+ cachepc_release_ds(cachepc_ctx, cachepc_ds);
-+ cachepc_release_ctx(cachepc_ctx);
++ cachepc_kvm_exit();
+
debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev);