diff options
Diffstat (limited to 'patch.diff')
| -rwxr-xr-x | patch.diff | 241 |
1 files changed, 143 insertions, 98 deletions
@@ -31,8 +31,25 @@ index b804444e16d4..17167ccfca22 100644 obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 87e1c66228f7..d7da36e12da6 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -572,10 +572,12 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) + struct vcpu_svm *svm = to_svm(kvm->vcpus[i]); + + /* Perform some pre-encryption checks against the VMSA */ ++ printk(KERN_WARNING "Vincent: Pre sev_es_sync_vmsa\n"); + ret = sev_es_sync_vmsa(svm); + if (ret) + goto e_free; + ++ printk(KERN_WARNING "Vincent: Post sev_es_sync_vmsa\n"); + /* + * The LAUNCH_UPDATE_VMSA command will perform in-place + * encryption of the VMSA memory content (i.e it will write diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 7b3cfbe8f7e3..cb60859f7d17 100644 +index 7b3cfbe8f7e3..c7952eab7c6d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ @@ -44,9 +61,30 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -3751,7 +3753,14 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3131,7 +3133,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) + + if (!dump_invalid_vmcb) { + pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); +- return; ++ // return; + } + + pr_err("VMCB Control Area:\n"); +@@ -3749,9 +3751,26 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, + lockdep_hardirqs_on(CALLER_ADDR0); + if (sev_es_guest(svm->vcpu.kvm)) { ++ memset(cachepc_msrmts, 0, 64 * 2); ++ int cpu = get_cpu(); ++ local_irq_disable(); ++ WARN_ON(cpu != 2); ++ cacheline *next; ++ next = cachepc_prime(cachepc_ds); __svm_sev_es_vcpu_run(svm->vmcb_pa); ++ cachepc_probe(next); ++ cachepc_save_msrmts(cachepc_ds); ++ local_irq_enable(); ++ put_cpu(); } else { + memset(cachepc_msrmts, 0, 64 * 2); + int cpu = get_cpu(); @@ -59,7 +97,7 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 #ifdef CONFIG_X86_64 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); -@@ -3785,8 +3794,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3785,8 +3804,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -73,11 +111,20 @@ index 7b3cfbe8f7e3..cb60859f7d17 100644 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; +@@ -3888,7 +3911,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + + svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + vmcb_mark_all_clean(svm->vmcb); +- ++ printk(KERN_WARNING "Vincent: svm->vmcb exit code %d\n", svm->vmcb->control.exit_code); + /* if exit due to PF check for async PF */ + if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) + svm->vcpu.arch.apf.host_apf_flags = diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S -index 6feb8c08f45a..2f259db92037 100644 +index 6feb8c08f45a..eb0ea02ef187 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S -@@ -27,14 +27,53 @@ +@@ -27,14 +27,74 @@ #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif @@ -94,44 +141,65 @@ index 6feb8c08f45a..2f259db92037 100644 +.endm + +.macro swap_xmm grp xmm -+ vmovq \grp, %xmm1 ++ vmovq \grp, %xmm15 + vmovq \xmm, \grp -+ vmovq %xmm1, \xmm ++ vmovq %xmm15, \xmm +.endm + -+.macro barrier -+ mfence -+ mov $0x80000005,%eax -+ cpuid -+.endm -+ - /** - * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode - * @vmcb_pa: unsigned long - * @regs: unsigned long * (to guest registers) - */ - SYM_FUNC_START(__svm_vcpu_run) -+ /* store regs in zmm */ ++.macro push_all + push_xmm %rax, %xmm0 ++ push_xmm %rbx, %xmm1 + push_xmm %rcx, %xmm2 + push_xmm %rdx, %xmm3 + push_xmm %rbp, %xmm4 + push_xmm %rsp, %xmm5 + push_xmm %rdi, %xmm6 + push_xmm %rsi, %xmm7 -+ push_xmm %r8, %xmm8 -+ push_xmm %r9, %xmm9 ++ push_xmm %r8, %xmm8 ++ push_xmm %r9, %xmm9 + push_xmm %r10, %xmm10 + push_xmm %r11, %xmm11 + push_xmm %r12, %xmm12 + push_xmm %r13, %xmm13 + push_xmm %r14, %xmm14 -+ push_xmm %r15, %xmm15 ++.endm ++ ++.macro swap_all ++ swap_xmm %rax, %xmm0 ++ swap_xmm %rbx, %xmm1 ++ swap_xmm %rcx, %xmm2 ++ swap_xmm %rdx, %xmm3 ++ swap_xmm %rbp, %xmm4 ++ swap_xmm %rsp, %xmm5 ++ swap_xmm %rdi, %xmm6 ++ swap_xmm %rsi, %xmm7 ++ swap_xmm %r8, %xmm8 ++ swap_xmm %r9, %xmm9 ++ swap_xmm %r10, %xmm10 ++ swap_xmm %r11, %xmm11 ++ swap_xmm %r12, %xmm12 ++ swap_xmm %r13, %xmm13 ++ swap_xmm %r14, %xmm14 ++.endm ++ ++.macro barrier ++ mfence ++ mov $0x80000005,%eax ++ cpuid ++.endm ++ + /** + * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode + * @vmcb_pa: unsigned long + * @regs: unsigned long * (to guest registers) + */ + SYM_FUNC_START(__svm_vcpu_run) ++ push_all + push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 -@@ -45,6 +84,7 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -45,6 +105,7 @@ SYM_FUNC_START(__svm_vcpu_run) push %edi push %esi #endif @@ -139,96 +207,34 @@ index 6feb8c08f45a..2f259db92037 100644 push %_ASM_BX /* Save @regs. */ -@@ -85,8 +125,87 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -85,8 +146,25 @@ SYM_FUNC_START(__svm_vcpu_run) jne 3f ud2 _ASM_EXTABLE(1b, 2b) + +3: -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++ swap_all + mov cachepc_ds, %rsi + mov 0x8(%rsi), %r15 -+ -+ lea prime_ret(%rip), %rdi ++ lea sev_prime_ret(%rip), %rdi + jmp cachepc_prime_vcall+1 // skip stack pushes -+prime_ret: -+ -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++sev_prime_ret: ++ swap_all ++ + vmrun %_ASM_AX + -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++ swap_all + mov %r15, %rsi -+ lea probe_ret(%rip), %rdi ++ lea sev_probe_ret(%rip), %rdi + jmp cachepc_probe_vcall+6 // skip stack pushs -+probe_ret: -+ -+ swap_xmm %rax, %xmm0 -+ swap_xmm %rcx, %xmm2 -+ swap_xmm %rdx, %xmm3 -+ swap_xmm %rbp, %xmm4 -+ swap_xmm %rsp, %xmm5 -+ swap_xmm %rdi, %xmm6 -+ swap_xmm %rsi, %xmm7 -+ swap_xmm %r8, %xmm8 -+ swap_xmm %r9, %xmm9 -+ swap_xmm %r10, %xmm10 -+ swap_xmm %r11, %xmm11 -+ swap_xmm %r12, %xmm12 -+ swap_xmm %r13, %xmm13 -+ swap_xmm %r14, %xmm14 -+ swap_xmm %r15, %xmm15 -+ ++sev_probe_ret: ++ swap_all -3: vmrun %_ASM_AX jmp 5f 4: cmpb $0, kvm_rebooting jne 5f -@@ -100,7 +219,7 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -100,7 +178,7 @@ SYM_FUNC_START(__svm_vcpu_run) ud2 _ASM_EXTABLE(5b, 6b) 7: @@ -237,7 +243,7 @@ index 6feb8c08f45a..2f259db92037 100644 #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ -@@ -166,6 +285,11 @@ SYM_FUNC_START(__svm_vcpu_run) +@@ -166,6 +244,11 @@ SYM_FUNC_START(__svm_vcpu_run) pop %edi #endif pop %_ASM_BP @@ -249,6 +255,45 @@ index 6feb8c08f45a..2f259db92037 100644 ret SYM_FUNC_END(__svm_vcpu_run) +@@ -174,6 +257,8 @@ SYM_FUNC_END(__svm_vcpu_run) + * @vmcb_pa: unsigned long + */ + SYM_FUNC_START(__svm_sev_es_vcpu_run) ++ push_all ++ + push %_ASM_BP + #ifdef CONFIG_X86_64 + push %r15 +@@ -190,7 +275,28 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) + mov %_ASM_ARG1, %_ASM_AX + sti + +-1: vmrun %_ASM_AX ++1: ++ ++// swap_all ++// mov cachepc_ds, %rsi ++// mov 0x8(%rsi), %r15 ++// lea sev_es_prime_ret(%rip), %rdi ++// jmp cachepc_prime_vcall+1 // skip stack pushes ++//sev_es_prime_ret: ++// swap_all ++ ++// // TEST r15 dependance ++// movq $0x41414141, %r15 ++ ++ vmrun %_ASM_AX ++ ++// swap_all ++// mov %r15, %rsi ++// lea sev_es_probe_ret(%rip), %rdi ++// jmp cachepc_probe_vcall+6 // skip stack pushs ++//sev_es_probe_ret: ++// swap_all ++ + jmp 3f + 2: cmpb $0, kvm_rebooting + jne 3f diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4097d028c3ab..81685bd567a2 100644 --- a/arch/x86/kvm/x86.c |
