diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b804444e16d4..66a4d56e331a 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -Iarch/x86/kvm +ccflags-y += -Iarch/x86/kvm -O2 ccflags-$(CONFIG_KVM_WERROR) += -Werror ifeq ($(CONFIG_FRAME_POINTER),y) @@ -10,7 +10,9 @@ endif KVM := ../../../virt/kvm kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o + $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o + kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ @@ -20,7 +22,8 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o -kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o +kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7b3cfbe8f7e3..c7952eab7c6d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ #include +#include "cachepc/cachepc.h" + #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" @@ -3749,9 +3751,26 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, lockdep_hardirqs_on(CALLER_ADDR0); if (sev_es_guest(svm->vcpu.kvm)) { + memset(cachepc_msrmts, 0, 64 * 2); + int cpu = get_cpu(); + local_irq_disable(); + WARN_ON(cpu != 2); + cacheline *next; + next = cachepc_prime(cachepc_ds); __svm_sev_es_vcpu_run(svm->vmcb_pa); + cachepc_probe(next); + cachepc_save_msrmts(cachepc_ds); + local_irq_enable(); + put_cpu(); } else { + memset(cachepc_msrmts, 0, 64 * 2); + int cpu = get_cpu(); + local_irq_disable(); + WARN_ON(cpu != 2); __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); + cachepc_save_msrmts(cachepc_ds); + local_irq_enable(); + put_cpu(); #ifdef CONFIG_X86_64 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); @@ -3785,8 +3804,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); + struct vcpu_svm *svm; + printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n"); + WARN_ON(smp_processor_id() != 2); + + svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -3888,7 +3911,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; vmcb_mark_all_clean(svm->vmcb); - + printk(KERN_WARNING "Vincent: svm->vmcb exit code %d\n", svm->vmcb->control.exit_code); /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm->vcpu.arch.apf.host_apf_flags = diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 6feb8c08f45a..eb0ea02ef187 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -27,14 +27,74 @@ #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif +.extern cachepc_msrmts + .section .noinstr.text, "ax" +.macro push_xmm gpr xmm + vmovq \gpr, \xmm +.endm + +.macro pop_xmm gpr xmm + vmovq \xmm, \gpr +.endm + +.macro swap_xmm grp xmm + vmovq \grp, %xmm15 + vmovq \xmm, \grp + vmovq %xmm15, \xmm +.endm + +.macro push_all + push_xmm %rax, %xmm0 + push_xmm %rbx, %xmm1 + push_xmm %rcx, %xmm2 + push_xmm %rdx, %xmm3 + push_xmm %rbp, %xmm4 + push_xmm %rsp, %xmm5 + push_xmm %rdi, %xmm6 + push_xmm %rsi, %xmm7 + push_xmm %r8, %xmm8 + push_xmm %r9, %xmm9 + push_xmm %r10, %xmm10 + push_xmm %r11, %xmm11 + push_xmm %r12, %xmm12 + push_xmm %r13, %xmm13 + push_xmm %r14, %xmm14 +.endm + +.macro swap_all + swap_xmm %rax, %xmm0 + swap_xmm %rbx, %xmm1 + swap_xmm %rcx, %xmm2 + swap_xmm %rdx, %xmm3 + swap_xmm %rbp, %xmm4 + swap_xmm %rsp, %xmm5 + swap_xmm %rdi, %xmm6 + swap_xmm %rsi, %xmm7 + swap_xmm %r8, %xmm8 + swap_xmm %r9, %xmm9 + swap_xmm %r10, %xmm10 + swap_xmm %r11, %xmm11 + swap_xmm %r12, %xmm12 + swap_xmm %r13, %xmm13 + swap_xmm %r14, %xmm14 +.endm + +.macro barrier + mfence + mov $0x80000005,%eax + cpuid +.endm + /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @vmcb_pa: unsigned long * @regs: unsigned long * (to guest registers) */ SYM_FUNC_START(__svm_vcpu_run) + push_all + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 @@ -45,6 +105,7 @@ SYM_FUNC_START(__svm_vcpu_run) push %edi push %esi #endif + push %_ASM_BX /* Save @regs. */ @@ -85,8 +146,25 @@ SYM_FUNC_START(__svm_vcpu_run) jne 3f ud2 _ASM_EXTABLE(1b, 2b) + +3: + swap_all + mov cachepc_ds, %rsi + mov 0x8(%rsi), %r15 + lea sev_prime_ret(%rip), %rdi + jmp cachepc_prime_vcall+1 // skip stack pushes +sev_prime_ret: + swap_all + + vmrun %_ASM_AX + + swap_all + mov %r15, %rsi + lea sev_probe_ret(%rip), %rdi + jmp cachepc_probe_vcall+6 // skip stack pushs +sev_probe_ret: + swap_all -3: vmrun %_ASM_AX jmp 5f 4: cmpb $0, kvm_rebooting jne 5f @@ -100,7 +178,7 @@ SYM_FUNC_START(__svm_vcpu_run) ud2 _ASM_EXTABLE(5b, 6b) 7: - cli + cli #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ @@ -166,6 +244,11 @@ SYM_FUNC_START(__svm_vcpu_run) pop %edi #endif pop %_ASM_BP + + # mov cachepc_msrmts(%rip), %rax + # mov $0x1, %edx + # mov %dx, (%rax) + ret SYM_FUNC_END(__svm_vcpu_run) @@ -174,6 +257,8 @@ SYM_FUNC_END(__svm_vcpu_run) * @vmcb_pa: unsigned long */ SYM_FUNC_START(__svm_sev_es_vcpu_run) + push_all + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 @@ -190,7 +275,28 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov %_ASM_ARG1, %_ASM_AX sti -1: vmrun %_ASM_AX +1: + +// swap_all +// mov cachepc_ds, %rsi +// mov 0x8(%rsi), %r15 +// lea sev_es_prime_ret(%rip), %rdi +// jmp cachepc_prime_vcall+1 // skip stack pushes +//sev_es_prime_ret: +// swap_all + +// // TEST r15 dependance +// movq $0x41414141, %r15 + + vmrun %_ASM_AX + +// swap_all +// mov %r15, %rsi +// lea sev_es_probe_ret(%rip), %rdi +// jmp cachepc_probe_vcall+6 // skip stack pushs +//sev_es_probe_ret: +// swap_all + jmp 3f 2: cmpb $0, kvm_rebooting jne 3f diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2541a17ff1c4..8796ad5e9b73 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -66,6 +66,8 @@ /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 +#include "../../arch/x86/kvm/svm/cachepc/kvm.h" + MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -4848,6 +4849,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); + cachepc_kvm_init(); + return 0; out_unreg: @@ -4872,6 +4875,8 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { + cachepc_kvm_exit(); + debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache);