diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b804444e16d4..e94fa8c02a1d 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -Iarch/x86/kvm +ccflags-y += -Iarch/x86/kvm -O2 ccflags-$(CONFIG_KVM_WERROR) += -Werror ifeq ($(CONFIG_FRAME_POINTER),y) @@ -10,7 +10,9 @@ endif KVM := ../../../virt/kvm kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o + $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o + kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ @@ -20,7 +22,8 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o -kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o +kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o \ + svm/cachepc/cachepc.o svm/cachepc/util.o obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7b3cfbe8f7e3..241ce70885dc 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ #include +#include "cachepc/cachepc.h" + #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" @@ -3728,6 +3730,8 @@ void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_svm *svm) { + int cpu; + /* * VMENTER enables interrupts (host state), but the kernel state is * interrupts disabled when this is invoked. Also tell RCU about @@ -3749,9 +3753,23 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, lockdep_hardirqs_on(CALLER_ADDR0); if (sev_es_guest(svm->vcpu.kvm)) { + memset(cachepc_msrmts, 0, 64 * 2); + cpu = get_cpu(); + local_irq_disable(); + WARN_ON(cpu != 2); __svm_sev_es_vcpu_run(svm->vmcb_pa); + cachepc_save_msrmts(cachepc_ds); + local_irq_enable(); + put_cpu(); } else { + memset(cachepc_msrmts, 0, 64 * 2); + cpu = get_cpu(); + local_irq_disable(); + WARN_ON(cpu != 2); __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); + cachepc_save_msrmts(cachepc_ds); + local_irq_enable(); + put_cpu(); #ifdef CONFIG_X86_64 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); @@ -3785,8 +3803,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); + struct vcpu_svm *svm; + + printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n"); + WARN_ON(smp_processor_id() != 2); + svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -3888,7 +3910,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; vmcb_mark_all_clean(svm->vmcb); - + printk(KERN_WARNING "Vincent: svm->vmcb exit code %d\n", svm->vmcb->control.exit_code); /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm->vcpu.arch.apf.host_apf_flags = diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 6feb8c08f45a..60da3cff6c49 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -27,14 +27,59 @@ #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif +.extern cachepc_msrmts + .section .noinstr.text, "ax" +.macro load_tmp off reg + mov cachepc_regs_tmp(off), reg +.endm + +.macro save_tmp off reg + mov reg, cachepc_regs_tmp(off) +.endm + +.macro load_vm off reg + mov cachepc_regs_vm(off), reg +.endm + +.macro save_vm off reg + mov reg, cachepc_regs_vm(off) +.endm + +.macro apply_regs func + func 0x00, %rax + func 0x08, %rbx + func 0x10, %rcx + func 0x18, %rdx + func 0x20, %rbp + func 0x28, %rsp + func 0x30, %rdi + func 0x38, %rsi + func 0x40, %r8 + func 0x48, %r9 + func 0x50, %r10 + func 0x58, %r11 + func 0x60, %r12 + func 0x68, %r13 + func 0x70, %r14 + func 0x78, %r15 +.endm + +.macro barrier + mfence + mov $0x80000005,%eax + cpuid +.endm + /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @vmcb_pa: unsigned long * @regs: unsigned long * (to guest registers) */ SYM_FUNC_START(__svm_vcpu_run) + apply_regs save_tmp + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 @@ -45,6 +90,7 @@ SYM_FUNC_START(__svm_vcpu_run) push %edi push %esi #endif + push %_ASM_BX /* Save @regs. */ @@ -86,7 +132,28 @@ SYM_FUNC_START(__svm_vcpu_run) ud2 _ASM_EXTABLE(1b, 2b) -3: vmrun %_ASM_AX +3: + apply_regs save_vm + apply_regs load_tmp + mov cachepc_ds, %rsi + mov 0x8(%rsi), %r15 + lea sev_prime_ret(%rip), %rdi + jmp cachepc_prime_vcall+1 // skip stack pushes +sev_prime_ret: + apply_regs save_tmp + apply_regs load_vm + + vmrun %_ASM_AX + + apply_regs save_vm + apply_regs load_tmp + mov %r15, %rsi + lea sev_probe_ret(%rip), %rdi + jmp cachepc_probe_vcall+6 // skip stack pushs +sev_probe_ret: + apply_regs save_tmp + apply_regs load_vm + jmp 5f 4: cmpb $0, kvm_rebooting jne 5f @@ -166,6 +233,11 @@ SYM_FUNC_START(__svm_vcpu_run) pop %edi #endif pop %_ASM_BP + + # mov cachepc_msrmts(%rip), %rax + # mov $0x1, %edx + # mov %dx, (%rax) + ret SYM_FUNC_END(__svm_vcpu_run) @@ -174,6 +246,8 @@ SYM_FUNC_END(__svm_vcpu_run) * @vmcb_pa: unsigned long */ SYM_FUNC_START(__svm_sev_es_vcpu_run) + apply_regs save_tmp + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 @@ -190,7 +264,29 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov %_ASM_ARG1, %_ASM_AX sti -1: vmrun %_ASM_AX +1: + + apply_regs save_vm + apply_regs load_tmp + mov cachepc_ds, %rsi + mov 0x8(%rsi), %r15 + lea sev_es_prime_ret(%rip), %rdi + jmp cachepc_prime_vcall+1 // skip stack pushes +sev_es_prime_ret: + apply_regs save_tmp + apply_regs load_vm + + vmrun %_ASM_AX + + apply_regs save_vm + apply_regs load_tmp + mov %r15, %rsi + lea sev_es_probe_ret(%rip), %rdi + jmp cachepc_probe_vcall+6 // skip stack pushs +sev_es_probe_ret: + apply_regs save_tmp + apply_regs load_vm + jmp 3f 2: cmpb $0, kvm_rebooting jne 3f diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2541a17ff1c4..1345938d1d2b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -66,6 +66,8 @@ /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 +#include "../../arch/x86/kvm/svm/cachepc/kvm.h" + MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -4848,6 +4850,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); + cachepc_kvm_init(); + return 0; out_unreg: @@ -4872,6 +4876,8 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { + cachepc_kvm_exit(); + debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache);