cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

el2_setup.h (6759B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012,2013 - ARM Ltd
      4 * Author: Marc Zyngier <marc.zyngier@arm.com>
      5 */
      6
      7#ifndef __ARM_KVM_INIT_H__
      8#define __ARM_KVM_INIT_H__
      9
     10#ifndef __ASSEMBLY__
     11#error Assembly-only header
     12#endif
     13
     14#include <asm/kvm_arm.h>
     15#include <asm/ptrace.h>
     16#include <asm/sysreg.h>
     17#include <linux/irqchip/arm-gic-v3.h>
     18
     19.macro __init_el2_sctlr
     20	mov_q	x0, INIT_SCTLR_EL2_MMU_OFF
     21	msr	sctlr_el2, x0
     22	isb
     23.endm
     24
     25/*
     26 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
     27 * This is not necessary for VHE, since the host kernel runs in EL2,
     28 * and EL0 accesses are configured in the later stage of boot process.
     29 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
     30 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
     31 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
     32 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
     33 * EL2.
     34 */
     35.macro __init_el2_timers
     36	mov	x0, #3				// Enable EL1 physical timers
     37	msr	cnthctl_el2, x0
     38	msr	cntvoff_el2, xzr		// Clear virtual offset
     39.endm
     40
     41.macro __init_el2_debug
     42	mrs	x1, id_aa64dfr0_el1
     43	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
     44	cmp	x0, #1
     45	b.lt	.Lskip_pmu_\@			// Skip if no PMU present
     46	mrs	x0, pmcr_el0			// Disable debug access traps
     47	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
     48.Lskip_pmu_\@:
     49	csel	x2, xzr, x0, lt			// all PMU counters from EL1
     50
     51	/* Statistical profiling */
     52	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
     53	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present
     54
     55	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
     56	and	x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
     57	cbnz	x0, .Lskip_spe_el2_\@		// then permit sampling of physical
     58	mov	x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
     59		      1 << SYS_PMSCR_EL2_PA_SHIFT)
     60	msr_s	SYS_PMSCR_EL2, x0		// addresses and physical counter
     61.Lskip_spe_el2_\@:
     62	mov	x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
     63	orr	x2, x2, x0			// If we don't have VHE, then
     64						// use EL1&0 translation.
     65
     66.Lskip_spe_\@:
     67	/* Trace buffer */
     68	ubfx	x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
     69	cbz	x0, .Lskip_trace_\@		// Skip if TraceBuffer is not present
     70
     71	mrs_s	x0, SYS_TRBIDR_EL1
     72	and	x0, x0, TRBIDR_PROG
     73	cbnz	x0, .Lskip_trace_\@		// If TRBE is available at EL2
     74
     75	mov	x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
     76	orr	x2, x2, x0			// allow the EL1&0 translation
     77						// to own it.
     78
     79.Lskip_trace_\@:
     80	msr	mdcr_el2, x2			// Configure debug traps
     81.endm
     82
     83/* LORegions */
     84.macro __init_el2_lor
     85	mrs	x1, id_aa64mmfr1_el1
     86	ubfx	x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
     87	cbz	x0, .Lskip_lor_\@
     88	msr_s	SYS_LORC_EL1, xzr
     89.Lskip_lor_\@:
     90.endm
     91
     92/* Stage-2 translation */
     93.macro __init_el2_stage2
     94	msr	vttbr_el2, xzr
     95.endm
     96
     97/* GICv3 system register access */
     98.macro __init_el2_gicv3
     99	mrs	x0, id_aa64pfr0_el1
    100	ubfx	x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
    101	cbz	x0, .Lskip_gicv3_\@
    102
    103	mrs_s	x0, SYS_ICC_SRE_EL2
    104	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
    105	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
    106	msr_s	SYS_ICC_SRE_EL2, x0
    107	isb					// Make sure SRE is now set
    108	mrs_s	x0, SYS_ICC_SRE_EL2		// Read SRE back,
    109	tbz	x0, #0, .Lskip_gicv3_\@		// and check that it sticks
    110	msr_s	SYS_ICH_HCR_EL2, xzr		// Reset ICH_HCR_EL2 to defaults
    111.Lskip_gicv3_\@:
    112.endm
    113
    114.macro __init_el2_hstr
    115	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
    116.endm
    117
    118/* Virtual CPU ID registers */
    119.macro __init_el2_nvhe_idregs
    120	mrs	x0, midr_el1
    121	mrs	x1, mpidr_el1
    122	msr	vpidr_el2, x0
    123	msr	vmpidr_el2, x1
    124.endm
    125
    126/* Coprocessor traps */
    127.macro __init_el2_nvhe_cptr
    128	mov	x0, #0x33ff
    129	msr	cptr_el2, x0			// Disable copro. traps to EL2
    130.endm
    131
    132/* SVE register access */
    133.macro __init_el2_nvhe_sve
    134	mrs	x1, id_aa64pfr0_el1
    135	ubfx	x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
    136	cbz	x1, .Lskip_sve_\@
    137
    138	bic	x0, x0, #CPTR_EL2_TZ		// Also disable SVE traps
    139	msr	cptr_el2, x0			// Disable copro. traps to EL2
    140	isb
    141	mov	x1, #ZCR_ELx_LEN_MASK		// SVE: Enable full vector
    142	msr_s	SYS_ZCR_EL2, x1			// length for EL1.
    143.Lskip_sve_\@:
    144.endm
    145
    146/* SME register access and priority mapping */
    147.macro __init_el2_nvhe_sme
    148	mrs	x1, id_aa64pfr1_el1
    149	ubfx	x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
    150	cbz	x1, .Lskip_sme_\@
    151
    152	bic	x0, x0, #CPTR_EL2_TSM		// Also disable SME traps
    153	msr	cptr_el2, x0			// Disable copro. traps to EL2
    154	isb
    155
    156	mrs	x1, sctlr_el2
    157	orr	x1, x1, #SCTLR_ELx_ENTP2	// Disable TPIDR2 traps
    158	msr	sctlr_el2, x1
    159	isb
    160
    161	mov	x1, #0				// SMCR controls
    162
    163	mrs_s	x2, SYS_ID_AA64SMFR0_EL1
    164	ubfx	x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
    165	cbz	x2, .Lskip_sme_fa64_\@
    166
    167	orr	x1, x1, SMCR_ELx_FA64_MASK
    168.Lskip_sme_fa64_\@:
    169
    170	orr	x1, x1, #SMCR_ELx_LEN_MASK	// Enable full SME vector
    171	msr_s	SYS_SMCR_EL2, x1		// length for EL1.
    172
    173	mrs_s	x1, SYS_SMIDR_EL1		// Priority mapping supported?
    174	ubfx    x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
    175	cbz     x1, .Lskip_sme_\@
    176
    177	msr_s	SYS_SMPRIMAP_EL2, xzr		// Make all priorities equal
    178
    179	mrs	x1, id_aa64mmfr1_el1		// HCRX_EL2 present?
    180	ubfx	x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
    181	cbz	x1, .Lskip_sme_\@
    182
    183	mrs_s	x1, SYS_HCRX_EL2
    184	orr	x1, x1, #HCRX_EL2_SMPME_MASK	// Enable priority mapping
    185	msr_s	SYS_HCRX_EL2, x1
    186
    187.Lskip_sme_\@:
    188.endm
    189
    190/* Disable any fine grained traps */
    191.macro __init_el2_fgt
    192	mrs	x1, id_aa64mmfr0_el1
    193	ubfx	x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
    194	cbz	x1, .Lskip_fgt_\@
    195
    196	mov	x0, xzr
    197	mrs	x1, id_aa64dfr0_el1
    198	ubfx	x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
    199	cmp	x1, #3
    200	b.lt	.Lset_debug_fgt_\@
    201	/* Disable PMSNEVFR_EL1 read and write traps */
    202	orr	x0, x0, #(1 << 62)
    203
    204.Lset_debug_fgt_\@:
    205	msr_s	SYS_HDFGRTR_EL2, x0
    206	msr_s	SYS_HDFGWTR_EL2, x0
    207
    208	mov	x0, xzr
    209	mrs	x1, id_aa64pfr1_el1
    210	ubfx	x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
    211	cbz	x1, .Lset_fgt_\@
    212
    213	/* Disable nVHE traps of TPIDR2 and SMPRI */
    214	orr	x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
    215	orr	x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
    216
    217.Lset_fgt_\@:
    218	msr_s	SYS_HFGRTR_EL2, x0
    219	msr_s	SYS_HFGWTR_EL2, x0
    220	msr_s	SYS_HFGITR_EL2, xzr
    221
    222	mrs	x1, id_aa64pfr0_el1		// AMU traps UNDEF without AMU
    223	ubfx	x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
    224	cbz	x1, .Lskip_fgt_\@
    225
    226	msr_s	SYS_HAFGRTR_EL2, xzr
    227.Lskip_fgt_\@:
    228.endm
    229
    230.macro __init_el2_nvhe_prepare_eret
    231	mov	x0, #INIT_PSTATE_EL1
    232	msr	spsr_el2, x0
    233.endm
    234
    235/**
    236 * Initialize EL2 registers to sane values. This should be called early on all
    237 * cores that were booted in EL2. Note that everything gets initialised as
    238 * if VHE was not evailable. The kernel context will be upgraded to VHE
    239 * if possible later on in the boot process
    240 *
    241 * Regs: x0, x1 and x2 are clobbered.
    242 */
    243.macro init_el2_state
    244	__init_el2_sctlr
    245	__init_el2_timers
    246	__init_el2_debug
    247	__init_el2_lor
    248	__init_el2_stage2
    249	__init_el2_gicv3
    250	__init_el2_hstr
    251	__init_el2_nvhe_idregs
    252	__init_el2_nvhe_cptr
    253	__init_el2_nvhe_sve
    254	__init_el2_nvhe_sme
    255	__init_el2_fgt
    256	__init_el2_nvhe_prepare_eret
    257.endm
    258
    259#endif /* __ARM_KVM_INIT_H__ */