cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hyp-entry.S (5773B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2015-2018 - ARM Ltd
      4 * Author: Marc Zyngier <marc.zyngier@arm.com>
      5 */
      6
      7#include <linux/arm-smccc.h>
      8#include <linux/linkage.h>
      9
     10#include <asm/alternative.h>
     11#include <asm/assembler.h>
     12#include <asm/cpufeature.h>
     13#include <asm/kvm_arm.h>
     14#include <asm/kvm_asm.h>
     15#include <asm/mmu.h>
     16#include <asm/spectre.h>
     17
     18.macro save_caller_saved_regs_vect
     19	/* x0 and x1 were saved in the vector entry */
     20	stp	x2, x3,   [sp, #-16]!
     21	stp	x4, x5,   [sp, #-16]!
     22	stp	x6, x7,   [sp, #-16]!
     23	stp	x8, x9,   [sp, #-16]!
     24	stp	x10, x11, [sp, #-16]!
     25	stp	x12, x13, [sp, #-16]!
     26	stp	x14, x15, [sp, #-16]!
     27	stp	x16, x17, [sp, #-16]!
     28.endm
     29
     30.macro restore_caller_saved_regs_vect
     31	ldp	x16, x17, [sp], #16
     32	ldp	x14, x15, [sp], #16
     33	ldp	x12, x13, [sp], #16
     34	ldp	x10, x11, [sp], #16
     35	ldp	x8, x9,   [sp], #16
     36	ldp	x6, x7,   [sp], #16
     37	ldp	x4, x5,   [sp], #16
     38	ldp	x2, x3,   [sp], #16
     39	ldp	x0, x1,   [sp], #16
     40.endm
     41
     42	.text
     43
     44el1_sync:				// Guest trapped into EL2
     45
     46	mrs	x0, esr_el2
     47	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
     48	cmp	x0, #ESR_ELx_EC_HVC64
     49	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
     50	b.ne	el1_trap
     51
     52	/*
     53	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
     54	 * The workaround has already been applied on the host,
     55	 * so let's quickly get back to the guest. We don't bother
     56	 * restoring x1, as it can be clobbered anyway.
     57	 */
     58	ldr	x1, [sp]				// Guest's x0
     59	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
     60	cbz	w1, wa_epilogue
     61
     62	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
     63	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
     64			  ARM_SMCCC_ARCH_WORKAROUND_2)
     65	cbz	w1, wa_epilogue
     66
     67	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
     68			  ARM_SMCCC_ARCH_WORKAROUND_3)
     69	cbnz	w1, el1_trap
     70
     71wa_epilogue:
     72	mov	x0, xzr
     73	add	sp, sp, #16
     74	eret
     75	sb
     76
     77el1_trap:
     78	get_vcpu_ptr	x1, x0
     79	mov	x0, #ARM_EXCEPTION_TRAP
     80	b	__guest_exit
     81
     82el1_irq:
     83el1_fiq:
     84	get_vcpu_ptr	x1, x0
     85	mov	x0, #ARM_EXCEPTION_IRQ
     86	b	__guest_exit
     87
     88el1_error:
     89	get_vcpu_ptr	x1, x0
     90	mov	x0, #ARM_EXCEPTION_EL1_SERROR
     91	b	__guest_exit
     92
     93el2_sync:
     94	/* Check for illegal exception return */
     95	mrs	x0, spsr_el2
     96	tbnz	x0, #20, 1f
     97
     98	save_caller_saved_regs_vect
     99	stp     x29, x30, [sp, #-16]!
    100	bl	kvm_unexpected_el2_exception
    101	ldp     x29, x30, [sp], #16
    102	restore_caller_saved_regs_vect
    103
    104	eret
    105
    1061:
    107	/* Let's attempt a recovery from the illegal exception return */
    108	get_vcpu_ptr	x1, x0
    109	mov	x0, #ARM_EXCEPTION_IL
    110	b	__guest_exit
    111
    112
    113el2_error:
    114	save_caller_saved_regs_vect
    115	stp     x29, x30, [sp, #-16]!
    116
    117	bl	kvm_unexpected_el2_exception
    118
    119	ldp     x29, x30, [sp], #16
    120	restore_caller_saved_regs_vect
    121
    122	eret
    123	sb
    124
    125.macro invalid_vector	label, target = __guest_exit_panic
    126	.align	2
    127SYM_CODE_START_LOCAL(\label)
    128	b \target
    129SYM_CODE_END(\label)
    130.endm
    131
    132	/* None of these should ever happen */
    133	invalid_vector	el2t_sync_invalid
    134	invalid_vector	el2t_irq_invalid
    135	invalid_vector	el2t_fiq_invalid
    136	invalid_vector	el2t_error_invalid
    137	invalid_vector	el2h_irq_invalid
    138	invalid_vector	el2h_fiq_invalid
    139
    140	.ltorg
    141
    142	.align 11
    143
    144.macro check_preamble_length start, end
    145/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
    146.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
    147	.error "KVM vector preamble length mismatch"
    148.endif
    149.endm
    150
    151.macro valid_vect target
    152	.align 7
    153661:
    154	esb
    155	stp	x0, x1, [sp, #-16]!
    156662:
    157	b	\target
    158
    159check_preamble_length 661b, 662b
    160.endm
    161
    162.macro invalid_vect target
    163	.align 7
    164661:
    165	nop
    166	stp	x0, x1, [sp, #-16]!
    167662:
    168	b	\target
    169
    170check_preamble_length 661b, 662b
    171.endm
    172
    173SYM_CODE_START(__kvm_hyp_vector)
    174	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
    175	invalid_vect	el2t_irq_invalid	// IRQ EL2t
    176	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
    177	invalid_vect	el2t_error_invalid	// Error EL2t
    178
    179	valid_vect	el2_sync		// Synchronous EL2h
    180	invalid_vect	el2h_irq_invalid	// IRQ EL2h
    181	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
    182	valid_vect	el2_error		// Error EL2h
    183
    184	valid_vect	el1_sync		// Synchronous 64-bit EL1
    185	valid_vect	el1_irq			// IRQ 64-bit EL1
    186	valid_vect	el1_fiq			// FIQ 64-bit EL1
    187	valid_vect	el1_error		// Error 64-bit EL1
    188
    189	valid_vect	el1_sync		// Synchronous 32-bit EL1
    190	valid_vect	el1_irq			// IRQ 32-bit EL1
    191	valid_vect	el1_fiq			// FIQ 32-bit EL1
    192	valid_vect	el1_error		// Error 32-bit EL1
    193SYM_CODE_END(__kvm_hyp_vector)
    194
    195.macro spectrev2_smccc_wa1_smc
    196	sub	sp, sp, #(8 * 4)
    197	stp	x2, x3, [sp, #(8 * 0)]
    198	stp	x0, x1, [sp, #(8 * 2)]
    199	alternative_cb spectre_bhb_patch_wa3
    200	/* Patched to mov WA3 when supported */
    201	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
    202	alternative_cb_end
    203	smc	#0
    204	ldp	x2, x3, [sp, #(8 * 0)]
    205	add	sp, sp, #(8 * 2)
    206.endm
    207
    208.macro hyp_ventry	indirect, spectrev2
    209	.align	7
    2101:	esb
    211	.if \spectrev2 != 0
    212	spectrev2_smccc_wa1_smc
    213	.else
    214	stp	x0, x1, [sp, #-16]!
    215	mitigate_spectre_bhb_loop	x0
    216	mitigate_spectre_bhb_clear_insn
    217	.endif
    218	.if \indirect != 0
    219	alternative_cb  kvm_patch_vector_branch
    220	/*
    221	 * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
    222	 *
    223	 * movz	x0, #(addr & 0xffff)
    224	 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
    225	 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
    226	 * br	x0
    227	 *
    228	 * Where:
    229	 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
    230	 * See kvm_patch_vector_branch for details.
    231	 */
    232	nop
    233	nop
    234	nop
    235	nop
    236	alternative_cb_end
    237	.endif
    238	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
    239.endm
    240
    241.macro generate_vectors	indirect, spectrev2
    2420:
    243	.rept 16
    244	hyp_ventry	\indirect, \spectrev2
    245	.endr
    246	.org 0b + SZ_2K		// Safety measure
    247.endm
    248
    249	.align	11
    250SYM_CODE_START(__bp_harden_hyp_vecs)
    251	generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
    252	generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
    253	generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
    2541:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
    255	.org 1b
    256SYM_CODE_END(__bp_harden_hyp_vecs)