cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hyp-stub.S (5628B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Hypervisor stub
      4 *
      5 * Copyright (C) 2012 ARM Ltd.
      6 * Author:	Marc Zyngier <marc.zyngier@arm.com>
      7 */
      8
      9#include <linux/init.h>
     10#include <linux/linkage.h>
     11
     12#include <asm/assembler.h>
     13#include <asm/el2_setup.h>
     14#include <asm/kvm_arm.h>
     15#include <asm/kvm_asm.h>
     16#include <asm/ptrace.h>
     17#include <asm/virt.h>
     18
     19	.text
     20	.pushsection	.hyp.text, "ax"
     21
     22	.align 11
     23
     24SYM_CODE_START(__hyp_stub_vectors)
     25	ventry	el2_sync_invalid		// Synchronous EL2t
     26	ventry	el2_irq_invalid			// IRQ EL2t
     27	ventry	el2_fiq_invalid			// FIQ EL2t
     28	ventry	el2_error_invalid		// Error EL2t
     29
     30	ventry	elx_sync			// Synchronous EL2h
     31	ventry	el2_irq_invalid			// IRQ EL2h
     32	ventry	el2_fiq_invalid			// FIQ EL2h
     33	ventry	el2_error_invalid		// Error EL2h
     34
     35	ventry	elx_sync			// Synchronous 64-bit EL1
     36	ventry	el1_irq_invalid			// IRQ 64-bit EL1
     37	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
     38	ventry	el1_error_invalid		// Error 64-bit EL1
     39
     40	ventry	el1_sync_invalid		// Synchronous 32-bit EL1
     41	ventry	el1_irq_invalid			// IRQ 32-bit EL1
     42	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
     43	ventry	el1_error_invalid		// Error 32-bit EL1
     44SYM_CODE_END(__hyp_stub_vectors)
     45
     46	.align 11
     47
     48SYM_CODE_START_LOCAL(elx_sync)
     49	cmp	x0, #HVC_SET_VECTORS
     50	b.ne	1f
     51	msr	vbar_el2, x1
     52	b	9f
     53
     541:	cmp	x0, #HVC_VHE_RESTART
     55	b.eq	mutate_to_vhe
     56
     572:	cmp	x0, #HVC_SOFT_RESTART
     58	b.ne	3f
     59	mov	x0, x2
     60	mov	x2, x4
     61	mov	x4, x1
     62	mov	x1, x3
     63	br	x4				// no return
     64
     653:	cmp	x0, #HVC_RESET_VECTORS
     66	beq	9f				// Nothing to reset!
     67
     68	/* Someone called kvm_call_hyp() against the hyp-stub... */
     69	mov_q	x0, HVC_STUB_ERR
     70	eret
     71
     729:	mov	x0, xzr
     73	eret
     74SYM_CODE_END(elx_sync)
     75
     76// nVHE? No way! Give me the real thing!
     77SYM_CODE_START_LOCAL(mutate_to_vhe)
     78	// Sanity check: MMU *must* be off
     79	mrs	x1, sctlr_el2
     80	tbnz	x1, #0, 1f
     81
     82	// Needs to be VHE capable, obviously
     83	mrs	x1, id_aa64mmfr1_el1
     84	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
     85	cbz	x1, 1f
     86
     87	// Check whether VHE is disabled from the command line
     88	adr_l	x1, id_aa64mmfr1_override
     89	ldr	x2, [x1, FTR_OVR_VAL_OFFSET]
     90	ldr	x1, [x1, FTR_OVR_MASK_OFFSET]
     91	ubfx	x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
     92	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
     93	cmp	x1, xzr
     94	and	x2, x2, x1
     95	csinv	x2, x2, xzr, ne
     96	cbnz	x2, 2f
     97
     981:	mov_q	x0, HVC_STUB_ERR
     99	eret
    1002:
    101	// Engage the VHE magic!
    102	mov_q	x0, HCR_HOST_VHE_FLAGS
    103	msr	hcr_el2, x0
    104	isb
    105
    106	// Use the EL1 allocated stack, per-cpu offset
    107	mrs	x0, sp_el1
    108	mov	sp, x0
    109	mrs	x0, tpidr_el1
    110	msr	tpidr_el2, x0
    111
    112	// FP configuration, vectors
    113	mrs_s	x0, SYS_CPACR_EL12
    114	msr	cpacr_el1, x0
    115	mrs_s	x0, SYS_VBAR_EL12
    116	msr	vbar_el1, x0
    117
    118	// Use EL2 translations for SPE & TRBE and disable access from EL1
    119	mrs	x0, mdcr_el2
    120	bic	x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
    121	bic	x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
    122	msr	mdcr_el2, x0
    123
    124	// Transfer the MM state from EL1 to EL2
    125	mrs_s	x0, SYS_TCR_EL12
    126	msr	tcr_el1, x0
    127	mrs_s	x0, SYS_TTBR0_EL12
    128	msr	ttbr0_el1, x0
    129	mrs_s	x0, SYS_TTBR1_EL12
    130	msr	ttbr1_el1, x0
    131	mrs_s	x0, SYS_MAIR_EL12
    132	msr	mair_el1, x0
    133	isb
    134
    135	// Hack the exception return to stay at EL2
    136	mrs	x0, spsr_el1
    137	and	x0, x0, #~PSR_MODE_MASK
    138	mov	x1, #PSR_MODE_EL2h
    139	orr	x0, x0, x1
    140	msr	spsr_el1, x0
    141
    142	b	enter_vhe
    143SYM_CODE_END(mutate_to_vhe)
    144
    145	// At the point where we reach enter_vhe(), we run with
    146	// the MMU off (which is enforced by mutate_to_vhe()).
    147	// We thus need to be in the idmap, or everything will
    148	// explode when enabling the MMU.
    149
    150	.pushsection	.idmap.text, "ax"
    151
    152SYM_CODE_START_LOCAL(enter_vhe)
    153	// Invalidate TLBs before enabling the MMU
    154	tlbi	vmalle1
    155	dsb	nsh
    156	isb
    157
    158	// Enable the EL2 S1 MMU, as set up from EL1
    159	mrs_s	x0, SYS_SCTLR_EL12
    160	set_sctlr_el1	x0
    161
    162	// Disable the EL1 S1 MMU for a good measure
    163	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
    164	msr_s	SYS_SCTLR_EL12, x0
    165
    166	mov	x0, xzr
    167
    168	eret
    169SYM_CODE_END(enter_vhe)
    170
    171	.popsection
    172
    173.macro invalid_vector	label
    174SYM_CODE_START_LOCAL(\label)
    175	b \label
    176SYM_CODE_END(\label)
    177.endm
    178
    179	invalid_vector	el2_sync_invalid
    180	invalid_vector	el2_irq_invalid
    181	invalid_vector	el2_fiq_invalid
    182	invalid_vector	el2_error_invalid
    183	invalid_vector	el1_sync_invalid
    184	invalid_vector	el1_irq_invalid
    185	invalid_vector	el1_fiq_invalid
    186	invalid_vector	el1_error_invalid
    187
    188	.popsection
    189
    190/*
    191 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
    192 * vectors as part of hypervisor installation.  On an SMP system, this should
    193 * be called on each CPU.
    194 *
    195 * x0 must be the physical address of the new vector table, and must be
    196 * 2KB aligned.
    197 *
    198 * Before calling this, you must check that the stub hypervisor is installed
    199 * everywhere, by waiting for any secondary CPUs to be brought up and then
    200 * checking that is_hyp_mode_available() is true.
    201 *
    202 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
    203 * something else went wrong... in such cases, trying to install a new
    204 * hypervisor is unlikely to work as desired.
    205 *
    206 * When you call into your shiny new hypervisor, sp_el2 will contain junk,
    207 * so you will need to set that to something sensible at the new hypervisor's
    208 * initialisation entry point.
    209 */
    210
    211SYM_FUNC_START(__hyp_set_vectors)
    212	mov	x1, x0
    213	mov	x0, #HVC_SET_VECTORS
    214	hvc	#0
    215	ret
    216SYM_FUNC_END(__hyp_set_vectors)
    217
    218SYM_FUNC_START(__hyp_reset_vectors)
    219	mov	x0, #HVC_RESET_VECTORS
    220	hvc	#0
    221	ret
    222SYM_FUNC_END(__hyp_reset_vectors)
    223
    224/*
    225 * Entry point to switch to VHE if deemed capable
    226 */
    227SYM_FUNC_START(switch_to_vhe)
    228	// Need to have booted at EL2
    229	adr_l	x1, __boot_cpu_mode
    230	ldr	w0, [x1]
    231	cmp	w0, #BOOT_CPU_MODE_EL2
    232	b.ne	1f
    233
    234	// and still be at EL1
    235	mrs	x0, CurrentEL
    236	cmp	x0, #CurrentEL_EL1
    237	b.ne	1f
    238
    239	// Turn the world upside down
    240	mov	x0, #HVC_VHE_RESTART
    241	hvc	#0
    2421:
    243	ret
    244SYM_FUNC_END(switch_to_vhe)