/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include #define WORD_SIZE (BITS_PER_LONG / 8) /* Intentionally omit RAX as it's context switched by hardware */ #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE /* Intentionally omit RSP as it's context switched by hardware */ #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE #ifdef CONFIG_X86_64 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif .section .noinstr.text, "ax" #include "../cachepc/macro.S" .macro apply_regs func \func %rax %xmm0 \func %rbx %xmm1 \func %rcx %xmm2 \func %rdx %xmm3 \func %rbp %xmm4 \func %rsp %xmm5 \func %rdi %xmm6 \func %rsi %xmm7 \func %r8 %xmm8 \func %r9 %xmm9 \func %r10 %xmm10 \func %r11 %xmm11 \func %r12 %xmm12 \func %r13 %xmm13 \func %r14 %xmm14 .endm .macro save_reg src dst movq \src, \dst .endm .macro swap_reg src dst movq \src, %r15 movq \dst, \src movq %r15, \dst .endm .macro save_vm apply_regs save_reg .endm .macro exit_vm apply_regs swap_reg .endm .macro enter_vm apply_regs swap_reg movq %xmm15, %r15 # fixup tmp reg .endm .macro wrap_prime name exit_vm # read vars before prime xor %rax, %rax movb cpc_apic_oneshot, %al mov %rax, %r11 mov cpc_apic_timer, %eax mov %rax, %r12 mov cpc_apic_timer_softdiv, %eax mov %rax, %r13 movb cpc_prime_probe, %al mov %rax, %r14 # do prime mov %r14, %rax cmp $0, %al je skip_prime_\name wbinvd mov cpc_ds, %r9 prime \name %r9 %r10 %r8 skip_prime_\name: # do oneshot mov %r11, %rax cmp $0, %al je skip_apic_\name # asm from cpc_apic_oneshot_run mov $0xec, %edx mov %edx, 0xffffffffff5fd320 mov $0x0b, %edx mov %edx, 0xffffffffff5fd3e0 xor %edx, %edx mov %r12, %rax div %r13 mov %eax, 0xffffffffff5fd380 skip_apic_\name: enter_vm .endm .macro wrap_probe name exit_vm # do probe mov %r14, %rax cmp $0, %al je skip_probe_\name probe \name $CPC_L1MISS_PMC %r8 %r9 %r10 %r11 %r12 skip_probe_\name: enter_vm .endm /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @vmcb_pa: unsigned long * @regs: unsigned long * (to guest registers) */ SYM_FUNC_START(__svm_vcpu_run) save_vm push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 push %r13 push %r12 #else push %edi push %esi #endif push %_ASM_BX /* Save @regs. */ push %_ASM_ARG2 /* Save @vmcb. */ push %_ASM_ARG1 /* Move @regs to RAX. */ mov %_ASM_ARG2, %_ASM_AX /* Load guest registers. */ mov VCPU_RCX(%_ASM_AX), %_ASM_CX mov VCPU_RDX(%_ASM_AX), %_ASM_DX mov VCPU_RBX(%_ASM_AX), %_ASM_BX mov VCPU_RBP(%_ASM_AX), %_ASM_BP mov VCPU_RSI(%_ASM_AX), %_ASM_SI mov VCPU_RDI(%_ASM_AX), %_ASM_DI #ifdef CONFIG_X86_64 mov VCPU_R8 (%_ASM_AX), %r8 mov VCPU_R9 (%_ASM_AX), %r9 mov VCPU_R10(%_ASM_AX), %r10 mov VCPU_R11(%_ASM_AX), %r11 mov VCPU_R12(%_ASM_AX), %r12 mov VCPU_R13(%_ASM_AX), %r13 mov VCPU_R14(%_ASM_AX), %r14 mov VCPU_R15(%_ASM_AX), %r15 #endif /* "POP" @vmcb to RAX. */ pop %_ASM_AX wrap_prime sev_vcpu_run sti 1: vmrun %_ASM_AX 2: cli wrap_probe sev_vcpu_run #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE #endif /* "POP" @regs to RAX. */ pop %_ASM_AX /* Save all guest registers. */ mov %_ASM_CX, VCPU_RCX(%_ASM_AX) mov %_ASM_DX, VCPU_RDX(%_ASM_AX) mov %_ASM_BX, VCPU_RBX(%_ASM_AX) mov %_ASM_BP, VCPU_RBP(%_ASM_AX) mov %_ASM_SI, VCPU_RSI(%_ASM_AX) mov %_ASM_DI, VCPU_RDI(%_ASM_AX) #ifdef CONFIG_X86_64 mov %r8, VCPU_R8 (%_ASM_AX) mov %r9, VCPU_R9 (%_ASM_AX) mov %r10, VCPU_R10(%_ASM_AX) mov %r11, VCPU_R11(%_ASM_AX) mov %r12, VCPU_R12(%_ASM_AX) mov %r13, VCPU_R13(%_ASM_AX) mov %r14, VCPU_R14(%_ASM_AX) mov %r15, VCPU_R15(%_ASM_AX) #endif /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * free. RSP and RAX are exempt as they are restored by hardware * during VM-Exit. */ xor %ecx, %ecx xor %edx, %edx xor %ebx, %ebx xor %ebp, %ebp xor %esi, %esi xor %edi, %edi #ifdef CONFIG_X86_64 xor %r8d, %r8d xor %r9d, %r9d xor %r10d, %r10d xor %r11d, %r11d xor %r12d, %r12d xor %r13d, %r13d xor %r14d, %r14d xor %r15d, %r15d #endif pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 pop %r14 pop %r15 #else pop %esi pop %edi #endif pop %_ASM_BP RET 3: cmpb $0, kvm_rebooting jne 2b ud2 _ASM_EXTABLE(1b, 3b) SYM_FUNC_END(__svm_vcpu_run) /** * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode * @vmcb_pa: unsigned long */ SYM_FUNC_START(__svm_sev_es_vcpu_run) save_vm push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 push %r13 push %r12 #else push %edi push %esi #endif push %_ASM_BX /* Move @vmcb to RAX. */ mov %_ASM_ARG1, %_ASM_AX wrap_prime sev_es_vcpu_run sti 1: vmrun %_ASM_AX 2: cli # in sev-es the cpu register state is not restored after vmrun.. # xmm regs have been cleared so we need to do an extra accesses movq cpc_ds_probe, %xmm8 movq cpc_prime_probe, %xmm14 wrap_probe sev_es_vcpu_run #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE #endif pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 pop %r14 pop %r15 #else pop %esi pop %edi #endif pop %_ASM_BP RET 3: cmpb $0, kvm_rebooting jne 2b ud2 _ASM_EXTABLE(1b, 3b) SYM_FUNC_END(__svm_sev_es_vcpu_run)