diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b804444e16d4..17167ccfca22 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -Iarch/x86/kvm +ccflags-y += -Iarch/x86/kvm -O2 ccflags-$(CONFIG_KVM_WERROR) += -Werror ifeq ($(CONFIG_FRAME_POINTER),y) @@ -10,7 +10,9 @@ endif KVM := ../../../virt/kvm kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o + $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o + kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ @@ -20,7 +22,8 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o -kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o +kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7b3cfbe8f7e3..16dfd9b2938e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ #include +#include "cachepc/cachepc.h" + #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" @@ -3785,8 +3787,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); + struct cacheline *head; + struct vcpu_svm *svm; + int cpu; + + printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n"); + svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -3835,8 +3842,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + cpu = get_cpu(); + WARN_ON(cpu != 2); + + head = cachepc_prime(cachepc_ds); + svm_vcpu_enter_exit(vcpu, svm); + cachepc_probe(head); + //cachepc_print_msrmts(head); + cachepc_save_msrmts(head); + + put_cpu(); + /* * We do not use IBRS in the kernel. If this vCPU has used the * SPEC_CTRL MSR it may have left it on; save the value and diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2541a17ff1c4..8c46d509bd13 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -51,6 +51,9 @@ #include #include #include +#include +#include +#include #include #include @@ -66,6 +69,8 @@ /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 +#include "../../arch/x86/kvm/svm/cachepc/cachepc.h" + MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -143,6 +148,18 @@ static void hardware_disable_all(void); static void kvm_io_bus_destroy(struct kvm_io_bus *bus); +struct proc_ops cachepc_proc_ops; + +uint16_t *cachepc_msrmts; +size_t cachepc_msrmts_count; +EXPORT_SYMBOL(cachepc_msrmts); +EXPORT_SYMBOL(cachepc_msrmts_count); + +cache_ctx *cachepc_ctx; +cacheline *cachepc_ds; +EXPORT_SYMBOL(cachepc_ctx); +EXPORT_SYMBOL(cachepc_ds); + __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); @@ -4765,12 +4782,240 @@ static void check_processor_compat(void *data) *c->ret = kvm_arch_check_processor_compat(c->opaque); } +int +kvm_cachepc_open(struct inode *inode, struct file *file) +{ + try_module_get(THIS_MODULE); + + return 0; +} + +int +kvm_cachepc_close(struct inode *inode, struct file *file) +{ + module_put(THIS_MODULE); + + return 0; +} + +ssize_t +kvm_cachepc_read(struct file *file, char *buf, size_t buflen, loff_t *off) +{ + size_t len, left; + size_t size; + + printk(KERN_WARNING "CacheSC: Reading entries (%lu:%lli)\n", + buflen, off ? *off : 0); + + size = cachepc_msrmts_count * sizeof(uint16_t); + if (!off || *off >= size || *off < 0) + return 0; + + len = size - *off; + if (len > buflen) len = buflen; + + left = copy_to_user(buf, cachepc_msrmts + *off, len); + + len -= left; + *off += len; + + return len; +} + +ssize_t +kvm_cachepc_write(struct file *file, const char *buf, size_t buflen, loff_t *off) +{ + return 0; +} + +void +kvm_cachepc_single_access_test(void *p) +{ + cacheline *ptr; + uint64_t pre, post; + volatile register uint64_t i asm("r11"); + uint32_t *user; + + /* l2 prefetches, hit or miss */ + // cachepc_init_pmc(0, 0x60, 0x01); + + /* l2 data cache, hit or miss */ + cachepc_init_pmc(0, 0x64, 0xD8); + + user = p; + + WARN_ON(user && *user >= L1_SETS); + if (user && *user >= L1_SETS) return; + ptr = cachepc_prepare_victim(cachepc_ctx, user ? *user : 48); + + cachepc_mfence(); + cachepc_cpuid(); + + cachepc_prime(cachepc_ds); + + cachepc_mfence(); + cachepc_cpuid(); + + for (i = 0; i < 100000000LLU; i++); + + cachepc_mfence(); + cachepc_cpuid(); + + pre = cachepc_read_pmc(0); + + cachepc_mfence(); + cachepc_cpuid(); + + cachepc_victim(ptr); + + cachepc_mfence(); + cachepc_cpuid(); + + for (i = 0; i < 100000000LLU; i++); + + cachepc_mfence(); + cachepc_cpuid(); + + post = cachepc_read_pmc(0); + + cachepc_mfence(); + cachepc_cpuid(); + + printk(KERN_WARNING "CachePC: Single access test done, result: %llu", post - pre); + + if (user) *user = post - pre; + + cachepc_release_victim(cachepc_ctx, ptr); +} + +void +kvm_cachepc_single_eviction_test(void *p) +{ + cacheline *head; + cacheline *ptr; + uint32_t *user; + + user = p; + + /* l2 data cache, hit or miss */ + cachepc_init_pmc(0, 0x64, 0xD8); + + WARN_ON(user && *user >= L1_SETS); + if (user && *user >= L1_SETS) return; + ptr = cachepc_prepare_victim(cachepc_ctx, user ? *user : 48); + + head = cachepc_prime(cachepc_ds); + cachepc_victim(ptr); + cachepc_probe(head); + + printk(KERN_WARNING "CachePC: Single eviction test done\n"); + cachepc_save_msrmts(head); + + cachepc_release_victim(cachepc_ctx, ptr); +} + +void +kvm_cachepc_init(void *p) +{ + cacheline *cl, *head; + int cpu; + + local_irq_disable(); + + cpu = get_cpu(); + + printk(KERN_WARNING "CachePC: Running on core %i\n", cpu); + + cachepc_ctx = cachepc_get_ctx(L1); + cachepc_ds = cachepc_prepare_ds(cachepc_ctx); + + head = cl = cachepc_ds; + do { + cl = cl->next; + printk(KERN_WARNING "%i:%i\n", cl->cache_set, cl->cache_line); + } while (cl != head); + + kvm_cachepc_single_access_test(p); + kvm_cachepc_single_eviction_test(p); + + put_cpu(); + + local_irq_enable(); +} + +void +kvm_cachepc_init_pmc_ioctl(void *p) +{ + uint32_t event; + uint8_t index, event_no, event_mask; + + WARN_ON(p == NULL); + if (!p) return; + + event = *(uint32_t *)p; + + index = (event & 0xFF000000) >> 24; + event_no = (event & 0x0000FF00) >> 8; + event_mask = (event & 0x000000FF) >> 0; + + cachepc_init_pmc(index, event_no, event_mask); +} + +long +kvm_cachepc_ioctl(struct file *file, unsigned int cmd, unsigned long argp) +{ + void __user *arg_user; + uint32_t u32; + int r; + + arg_user = (void __user *)argp; + switch (cmd) { + case CACHEPC_IOCTL_TEST_ACCESS: + printk(KERN_WARNING "CachePC: ioctl access test\n"); + if (arg_user) { + if (copy_from_user(&u32, arg_user, sizeof(uint32_t))) + return -EFAULT; + } + r = smp_call_function_single(2, + kvm_cachepc_single_access_test, &u32, true); + WARN_ON(r != 0); + if (arg_user) { + if (copy_to_user(arg_user, &u32, sizeof(uint32_t))) + return -EFAULT; + } + break; + case CACHEPC_IOCTL_TEST_EVICTION: + printk(KERN_WARNING "CachePC: ioctl eviction test\n"); + if (arg_user) { + if (copy_from_user(&u32, arg_user, sizeof(uint32_t))) + return -EFAULT; + } + r = smp_call_function_single(2, + kvm_cachepc_single_eviction_test, &u32, true); + WARN_ON(r != 0); + break; + case CACHEPC_IOCTL_INIT_PMC: + printk(KERN_WARNING "CachePC: ioctl init counter\n"); + if (arg_user) { + if (copy_from_user(&u32, arg_user, sizeof(uint32_t))) + return -EFAULT; + } + r = smp_call_function_single(2, + kvm_cachepc_init_pmc_ioctl, &u32, true); + WARN_ON(r != 0); + break; + default: + return -EINVAL; + } + + return 0; +} + int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { struct kvm_cpu_compat_check c; - int r; - int cpu; + int r, cpu; r = kvm_arch_init(opaque); if (r) @@ -4848,6 +5093,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); + cachepc_msrmts_count = L1_SETS; + cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL); + BUG_ON(cachepc_msrmts == NULL); + + r = smp_call_function_single(2, kvm_cachepc_init, NULL, true); + WARN_ON(r != 0); + + memset(&cachepc_proc_ops, 0, sizeof(cachepc_proc_ops)); + cachepc_proc_ops.proc_open = kvm_cachepc_open; + cachepc_proc_ops.proc_read = kvm_cachepc_read; + cachepc_proc_ops.proc_write = kvm_cachepc_write; + cachepc_proc_ops.proc_release = kvm_cachepc_close; + cachepc_proc_ops.proc_ioctl = kvm_cachepc_ioctl; + proc_create("cachepc", 0644, NULL, &cachepc_proc_ops); + return 0; out_unreg: @@ -4872,6 +5132,12 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { + remove_proc_entry("cachepc", NULL); + kfree(cachepc_msrmts); + + cachepc_release_ds(cachepc_ctx, cachepc_ds); + cachepc_release_ctx(cachepc_ctx); + debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache);