cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvm_asm.h (10066B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012,2013 - ARM Ltd
      4 * Author: Marc Zyngier <marc.zyngier@arm.com>
      5 */
      6
      7#ifndef __ARM_KVM_ASM_H__
      8#define __ARM_KVM_ASM_H__
      9
     10#include <asm/hyp_image.h>
     11#include <asm/insn.h>
     12#include <asm/virt.h>
     13
     14#define ARM_EXIT_WITH_SERROR_BIT  31
     15#define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
     16#define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
     17#define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
     18
     19#define ARM_EXCEPTION_IRQ	  0
     20#define ARM_EXCEPTION_EL1_SERROR  1
     21#define ARM_EXCEPTION_TRAP	  2
     22#define ARM_EXCEPTION_IL	  3
     23/* The hyp-stub will return this for any kvm_call_hyp() call */
     24#define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
     25
     26#define kvm_arm_exception_type					\
     27	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
     28	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
     29	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
     30	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
     31
     32/*
     33 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
     34 * that jumps over this.
     35 */
     36#define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
     37
     38#define KVM_HOST_SMCCC_ID(id)						\
     39	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
     40			   ARM_SMCCC_SMC_64,				\
     41			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
     42			   (id))
     43
     44#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
     45
     46#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
     47
     48#ifndef __ASSEMBLY__
     49
     50#include <linux/mm.h>
     51
     52enum __kvm_host_smccc_func {
     53	/* Hypercalls available only prior to pKVM finalisation */
     54	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
     55	__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
     56	__KVM_HOST_SMCCC_FUNC___pkvm_init,
     57	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
     58	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
     59	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
     60	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
     61	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
     62	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
     63
     64	/* Hypercalls available after pKVM finalisation */
     65	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
     66	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
     67	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
     68	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
     69	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
     70	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
     71	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
     72	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
     73	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
     74	__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
     75	__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
     76	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
     77	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
     78	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
     79};
     80
     81#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
     82#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
     83
     84/*
     85 * Define a pair of symbols sharing the same name but one defined in
     86 * VHE and the other in nVHE hyp implementations.
     87 */
     88#define DECLARE_KVM_HYP_SYM(sym)		\
     89	DECLARE_KVM_VHE_SYM(sym);		\
     90	DECLARE_KVM_NVHE_SYM(sym)
     91
     92#define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
     93	DECLARE_PER_CPU(type, sym)
     94#define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
     95	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
     96
     97#define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
     98	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
     99	DECLARE_KVM_NVHE_PER_CPU(type, sym)
    100
    101/*
    102 * Compute pointer to a symbol defined in nVHE percpu region.
    103 * Returns NULL if percpu memory has not been allocated yet.
    104 */
    105#define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
    106#define per_cpu_ptr_nvhe_sym(sym, cpu)						\
    107	({									\
    108		unsigned long base, off;					\
    109		base = kvm_arm_hyp_percpu_base[cpu];				\
    110		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
    111		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
    112		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
    113	})
    114
    115#if defined(__KVM_NVHE_HYPERVISOR__)
    116
    117#define CHOOSE_NVHE_SYM(sym)	sym
    118#define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
    119
    120/* The nVHE hypervisor shouldn't even try to access VHE symbols */
    121extern void *__nvhe_undefined_symbol;
    122#define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
    123#define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
    124#define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
    125
    126#elif defined(__KVM_VHE_HYPERVISOR__)
    127
    128#define CHOOSE_VHE_SYM(sym)	sym
    129#define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
    130
    131/* The VHE hypervisor shouldn't even try to access nVHE symbols */
    132extern void *__vhe_undefined_symbol;
    133#define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
    134#define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
    135#define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
    136
    137#else
    138
    139/*
    140 * BIG FAT WARNINGS:
    141 *
    142 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
    143 *   to has_vhe(). has_vhe() is implemented as a *final* capability,
    144 *   while this is used early at boot time, when the capabilities are
    145 *   not final yet....
    146 *
    147 * - Don't let the nVHE hypervisor have access to this, as it will
    148 *   pick the *wrong* symbol (yes, it runs at EL2...).
    149 */
    150#define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
    151					   ? CHOOSE_VHE_SYM(sym)	\
    152					   : CHOOSE_NVHE_SYM(sym))
    153
    154#define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
    155					   ? this_cpu_ptr(&sym)		\
    156					   : this_cpu_ptr_nvhe_sym(sym))
    157
    158#define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
    159					   ? per_cpu_ptr(&sym, cpu)	\
    160					   : per_cpu_ptr_nvhe_sym(sym, cpu))
    161
    162#define CHOOSE_VHE_SYM(sym)	sym
    163#define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
    164
    165#endif
    166
    167struct kvm_nvhe_init_params {
    168	unsigned long mair_el2;
    169	unsigned long tcr_el2;
    170	unsigned long tpidr_el2;
    171	unsigned long stack_hyp_va;
    172	unsigned long stack_pa;
    173	phys_addr_t pgd_pa;
    174	unsigned long hcr_el2;
    175	unsigned long vttbr;
    176	unsigned long vtcr;
    177};
    178
    179/* Translate a kernel address @ptr into its equivalent linear mapping */
    180#define kvm_ksym_ref(ptr)						\
    181	({								\
    182		void *val = (ptr);					\
    183		if (!is_kernel_in_hyp_mode())				\
    184			val = lm_alias((ptr));				\
    185		val;							\
    186	 })
    187#define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
    188
    189struct kvm;
    190struct kvm_vcpu;
    191struct kvm_s2_mmu;
    192
    193DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
    194DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
    195#define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
    196#define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
    197
    198extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
    199DECLARE_KVM_NVHE_SYM(__per_cpu_start);
    200DECLARE_KVM_NVHE_SYM(__per_cpu_end);
    201
    202DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
    203#define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
    204
    205extern void __kvm_flush_vm_context(void);
    206extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
    207extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
    208				     int level);
    209extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
    210
    211extern void __kvm_timer_set_cntvoff(u64 cntvoff);
    212
    213extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
    214
    215extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
    216
    217extern u64 __vgic_v3_get_gic_config(void);
    218extern u64 __vgic_v3_read_vmcr(void);
    219extern void __vgic_v3_write_vmcr(u32 vmcr);
    220extern void __vgic_v3_init_lrs(void);
    221
    222extern u64 __kvm_get_mdcr_el2(void);
    223
    224#define __KVM_EXTABLE(from, to)						\
    225	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
    226	"	.align		3\n"					\
    227	"	.long		(" #from " - .), (" #to " - .)\n"	\
    228	"	.popsection\n"
    229
    230
    231#define __kvm_at(at_op, addr)						\
    232( { 									\
    233	int __kvm_at_err = 0;						\
    234	u64 spsr, elr;							\
    235	asm volatile(							\
    236	"	mrs	%1, spsr_el2\n"					\
    237	"	mrs	%2, elr_el2\n"					\
    238	"1:	at	"at_op", %3\n"					\
    239	"	isb\n"							\
    240	"	b	9f\n"						\
    241	"2:	msr	spsr_el2, %1\n"					\
    242	"	msr	elr_el2, %2\n"					\
    243	"	mov	%w0, %4\n"					\
    244	"9:\n"								\
    245	__KVM_EXTABLE(1b, 2b)						\
    246	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
    247	: "r" (addr), "i" (-EFAULT));					\
    248	__kvm_at_err;							\
    249} )
    250
    251
    252#else /* __ASSEMBLY__ */
    253
    254.macro get_host_ctxt reg, tmp
    255	adr_this_cpu \reg, kvm_host_data, \tmp
    256	add	\reg, \reg, #HOST_DATA_CONTEXT
    257.endm
    258
    259.macro get_vcpu_ptr vcpu, ctxt
    260	get_host_ctxt \ctxt, \vcpu
    261	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
    262.endm
    263
    264.macro get_loaded_vcpu vcpu, ctxt
    265	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
    266	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
    267.endm
    268
    269.macro set_loaded_vcpu vcpu, ctxt, tmp
    270	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
    271	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
    272.endm
    273
    274/*
    275 * KVM extable for unexpected exceptions.
    276 * Create a struct kvm_exception_table_entry output to a section that can be
    277 * mapped by EL2. The table is not sorted.
    278 *
    279 * The caller must ensure:
    280 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
    281 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
    282 */
    283.macro	_kvm_extable, from, to
    284	.pushsection	__kvm_ex_table, "a"
    285	.align		3
    286	.long		(\from - .), (\to - .)
    287	.popsection
    288.endm
    289
    290#define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
    291#define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
    292#define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
    293
    294/*
    295 * We treat x18 as callee-saved as the host may use it as a platform
    296 * register (e.g. for shadow call stack).
    297 */
    298.macro save_callee_saved_regs ctxt
    299	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
    300	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
    301	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
    302	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
    303	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
    304	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
    305	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
    306.endm
    307
    308.macro restore_callee_saved_regs ctxt
    309	// We require \ctxt is not x18-x28
    310	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
    311	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
    312	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
    313	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
    314	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
    315	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
    316	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
    317.endm
    318
    319.macro save_sp_el0 ctxt, tmp
    320	mrs	\tmp,	sp_el0
    321	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
    322.endm
    323
    324.macro restore_sp_el0 ctxt, tmp
    325	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
    326	msr	sp_el0, \tmp
    327.endm
    328
    329#endif
    330
    331#endif /* __ARM_KVM_ASM_H__ */