cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmx_ops.h (9840B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __KVM_X86_VMX_INSN_H
      3#define __KVM_X86_VMX_INSN_H
      4
      5#include <linux/nospec.h>
      6
      7#include <asm/vmx.h>
      8
      9#include "evmcs.h"
     10#include "vmcs.h"
     11#include "x86.h"
     12
     13asmlinkage void vmread_error(unsigned long field, bool fault);
     14__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
     15							 bool fault);
     16void vmwrite_error(unsigned long field, unsigned long value);
     17void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
     18void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
     19void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
     20void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
     21
     22static __always_inline void vmcs_check16(unsigned long field)
     23{
     24	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
     25			 "16-bit accessor invalid for 64-bit field");
     26	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
     27			 "16-bit accessor invalid for 64-bit high field");
     28	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
     29			 "16-bit accessor invalid for 32-bit high field");
     30	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
     31			 "16-bit accessor invalid for natural width field");
     32}
     33
     34static __always_inline void vmcs_check32(unsigned long field)
     35{
     36	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
     37			 "32-bit accessor invalid for 16-bit field");
     38	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
     39			 "32-bit accessor invalid for 64-bit field");
     40	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
     41			 "32-bit accessor invalid for 64-bit high field");
     42	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
     43			 "32-bit accessor invalid for natural width field");
     44}
     45
     46static __always_inline void vmcs_check64(unsigned long field)
     47{
     48	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
     49			 "64-bit accessor invalid for 16-bit field");
     50	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
     51			 "64-bit accessor invalid for 64-bit high field");
     52	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
     53			 "64-bit accessor invalid for 32-bit field");
     54	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
     55			 "64-bit accessor invalid for natural width field");
     56}
     57
     58static __always_inline void vmcs_checkl(unsigned long field)
     59{
     60	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
     61			 "Natural width accessor invalid for 16-bit field");
     62	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
     63			 "Natural width accessor invalid for 64-bit field");
     64	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
     65			 "Natural width accessor invalid for 64-bit high field");
     66	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
     67			 "Natural width accessor invalid for 32-bit field");
     68}
     69
     70static __always_inline unsigned long __vmcs_readl(unsigned long field)
     71{
     72	unsigned long value;
     73
     74#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
     75
     76	asm_volatile_goto("1: vmread %[field], %[output]\n\t"
     77			  "jna %l[do_fail]\n\t"
     78
     79			  _ASM_EXTABLE(1b, %l[do_exception])
     80
     81			  : [output] "=r" (value)
     82			  : [field] "r" (field)
     83			  : "cc"
     84			  : do_fail, do_exception);
     85
     86	return value;
     87
     88do_fail:
     89	WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
     90	pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
     91	return 0;
     92
     93do_exception:
     94	kvm_spurious_fault();
     95	return 0;
     96
     97#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
     98
     99	asm volatile("1: vmread %2, %1\n\t"
    100		     ".byte 0x3e\n\t" /* branch taken hint */
    101		     "ja 3f\n\t"
    102
    103		     /*
    104		      * VMREAD failed.  Push '0' for @fault, push the failing
    105		      * @field, and bounce through the trampoline to preserve
    106		      * volatile registers.
    107		      */
    108		     "xorl %k1, %k1\n\t"
    109		     "2:\n\t"
    110		     "push %1\n\t"
    111		     "push %2\n\t"
    112		     "call vmread_error_trampoline\n\t"
    113
    114		     /*
    115		      * Unwind the stack.  Note, the trampoline zeros out the
    116		      * memory for @fault so that the result is '0' on error.
    117		      */
    118		     "pop %2\n\t"
    119		     "pop %1\n\t"
    120		     "3:\n\t"
    121
    122		     /* VMREAD faulted.  As above, except push '1' for @fault. */
    123		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
    124
    125		     : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
    126	return value;
    127
    128#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
    129}
    130
    131static __always_inline u16 vmcs_read16(unsigned long field)
    132{
    133	vmcs_check16(field);
    134	if (static_branch_unlikely(&enable_evmcs))
    135		return evmcs_read16(field);
    136	return __vmcs_readl(field);
    137}
    138
    139static __always_inline u32 vmcs_read32(unsigned long field)
    140{
    141	vmcs_check32(field);
    142	if (static_branch_unlikely(&enable_evmcs))
    143		return evmcs_read32(field);
    144	return __vmcs_readl(field);
    145}
    146
    147static __always_inline u64 vmcs_read64(unsigned long field)
    148{
    149	vmcs_check64(field);
    150	if (static_branch_unlikely(&enable_evmcs))
    151		return evmcs_read64(field);
    152#ifdef CONFIG_X86_64
    153	return __vmcs_readl(field);
    154#else
    155	return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
    156#endif
    157}
    158
    159static __always_inline unsigned long vmcs_readl(unsigned long field)
    160{
    161	vmcs_checkl(field);
    162	if (static_branch_unlikely(&enable_evmcs))
    163		return evmcs_read64(field);
    164	return __vmcs_readl(field);
    165}
    166
    167#define vmx_asm1(insn, op1, error_args...)				\
    168do {									\
    169	asm_volatile_goto("1: " __stringify(insn) " %0\n\t"		\
    170			  ".byte 0x2e\n\t" /* branch not taken hint */	\
    171			  "jna %l[error]\n\t"				\
    172			  _ASM_EXTABLE(1b, %l[fault])			\
    173			  : : op1 : "cc" : error, fault);		\
    174	return;								\
    175error:									\
    176	instrumentation_begin();					\
    177	insn##_error(error_args);					\
    178	instrumentation_end();						\
    179	return;								\
    180fault:									\
    181	kvm_spurious_fault();						\
    182} while (0)
    183
    184#define vmx_asm2(insn, op1, op2, error_args...)				\
    185do {									\
    186	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
    187			  ".byte 0x2e\n\t" /* branch not taken hint */	\
    188			  "jna %l[error]\n\t"				\
    189			  _ASM_EXTABLE(1b, %l[fault])			\
    190			  : : op1, op2 : "cc" : error, fault);		\
    191	return;								\
    192error:									\
    193	instrumentation_begin();					\
    194	insn##_error(error_args);					\
    195	instrumentation_end();						\
    196	return;								\
    197fault:									\
    198	kvm_spurious_fault();						\
    199} while (0)
    200
    201static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
    202{
    203	vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
    204}
    205
    206static __always_inline void vmcs_write16(unsigned long field, u16 value)
    207{
    208	vmcs_check16(field);
    209	if (static_branch_unlikely(&enable_evmcs))
    210		return evmcs_write16(field, value);
    211
    212	__vmcs_writel(field, value);
    213}
    214
    215static __always_inline void vmcs_write32(unsigned long field, u32 value)
    216{
    217	vmcs_check32(field);
    218	if (static_branch_unlikely(&enable_evmcs))
    219		return evmcs_write32(field, value);
    220
    221	__vmcs_writel(field, value);
    222}
    223
    224static __always_inline void vmcs_write64(unsigned long field, u64 value)
    225{
    226	vmcs_check64(field);
    227	if (static_branch_unlikely(&enable_evmcs))
    228		return evmcs_write64(field, value);
    229
    230	__vmcs_writel(field, value);
    231#ifndef CONFIG_X86_64
    232	__vmcs_writel(field+1, value >> 32);
    233#endif
    234}
    235
    236static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
    237{
    238	vmcs_checkl(field);
    239	if (static_branch_unlikely(&enable_evmcs))
    240		return evmcs_write64(field, value);
    241
    242	__vmcs_writel(field, value);
    243}
    244
    245static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
    246{
    247	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
    248			 "vmcs_clear_bits does not support 64-bit fields");
    249	if (static_branch_unlikely(&enable_evmcs))
    250		return evmcs_write32(field, evmcs_read32(field) & ~mask);
    251
    252	__vmcs_writel(field, __vmcs_readl(field) & ~mask);
    253}
    254
    255static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
    256{
    257	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
    258			 "vmcs_set_bits does not support 64-bit fields");
    259	if (static_branch_unlikely(&enable_evmcs))
    260		return evmcs_write32(field, evmcs_read32(field) | mask);
    261
    262	__vmcs_writel(field, __vmcs_readl(field) | mask);
    263}
    264
    265static inline void vmcs_clear(struct vmcs *vmcs)
    266{
    267	u64 phys_addr = __pa(vmcs);
    268
    269	vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
    270}
    271
    272static inline void vmcs_load(struct vmcs *vmcs)
    273{
    274	u64 phys_addr = __pa(vmcs);
    275
    276	if (static_branch_unlikely(&enable_evmcs))
    277		return evmcs_load(phys_addr);
    278
    279	vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
    280}
    281
    282static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
    283{
    284	struct {
    285		u64 vpid : 16;
    286		u64 rsvd : 48;
    287		u64 gva;
    288	} operand = { vpid, 0, gva };
    289
    290	vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
    291}
    292
    293static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
    294{
    295	struct {
    296		u64 eptp, gpa;
    297	} operand = {eptp, gpa};
    298
    299	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
    300}
    301
    302static inline void vpid_sync_vcpu_single(int vpid)
    303{
    304	if (vpid == 0)
    305		return;
    306
    307	__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
    308}
    309
    310static inline void vpid_sync_vcpu_global(void)
    311{
    312	__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
    313}
    314
    315static inline void vpid_sync_context(int vpid)
    316{
    317	if (cpu_has_vmx_invvpid_single())
    318		vpid_sync_vcpu_single(vpid);
    319	else if (vpid != 0)
    320		vpid_sync_vcpu_global();
    321}
    322
    323static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
    324{
    325	if (vpid == 0)
    326		return;
    327
    328	if (cpu_has_vmx_invvpid_individual_addr())
    329		__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
    330	else
    331		vpid_sync_context(vpid);
    332}
    333
    334static inline void ept_sync_global(void)
    335{
    336	__invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
    337}
    338
    339static inline void ept_sync_context(u64 eptp)
    340{
    341	if (cpu_has_vmx_invept_context())
    342		__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
    343	else
    344		ept_sync_global();
    345}
    346
    347#endif /* __KVM_X86_VMX_INSN_H */