cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

head-nommu.S (14239B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  linux/arch/arm/kernel/head-nommu.S
      4 *
      5 *  Copyright (C) 1994-2002 Russell King
      6 *  Copyright (C) 2003-2006 Hyok S. Choi
      7 *
      8 *  Common kernel startup code (non-paged MM)
      9 */
     10#include <linux/linkage.h>
     11#include <linux/init.h>
     12#include <linux/errno.h>
     13
     14#include <asm/assembler.h>
     15#include <asm/ptrace.h>
     16#include <asm/asm-offsets.h>
     17#include <asm/memory.h>
     18#include <asm/cp15.h>
     19#include <asm/thread_info.h>
     20#include <asm/v7m.h>
     21#include <asm/mpu.h>
     22#include <asm/page.h>
     23
     24/*
     25 * Kernel startup entry point.
     26 * ---------------------------
     27 *
     28 * This is normally called from the decompressor code.  The requirements
     29 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
     30 * r1 = machine nr.
     31 *
     32 * See linux/arch/arm/tools/mach-types for the complete list of machine
     33 * numbers for r1.
     34 *
     35 */
     36
     37	__HEAD
     38
     39#ifdef CONFIG_CPU_THUMBONLY
     40	.thumb
     41ENTRY(stext)
     42#else
     43	.arm
     44ENTRY(stext)
     45
     46 THUMB(	badr	r9, 1f		)	@ Kernel is always entered in ARM.
     47 THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
     48 THUMB(	.thumb			)	@ switch to Thumb now.
     49 THUMB(1:			)
     50#endif
     51
     52#ifdef CONFIG_ARM_VIRT_EXT
     53	bl	__hyp_stub_install
     54#endif
     55	@ ensure svc mode and all interrupts masked
     56	safe_svcmode_maskall r9
     57						@ and irqs disabled
     58#if defined(CONFIG_CPU_CP15)
     59	mrc	p15, 0, r9, c0, c0		@ get processor id
     60#elif defined(CONFIG_CPU_V7M)
     61	ldr	r9, =BASEADDR_V7M_SCB
     62	ldr	r9, [r9, V7M_SCB_CPUID]
     63#else
     64	ldr	r9, =CONFIG_PROCESSOR_ID
     65#endif
     66	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
     67	movs	r10, r5				@ invalid processor (r5=0)?
     68	beq	__error_p				@ yes, error 'p'
     69
     70#ifdef CONFIG_ARM_MPU
     71	bl	__setup_mpu
     72#endif
     73
     74	badr	lr, 1f				@ return (PIC) address
     75	ldr	r12, [r10, #PROCINFO_INITFUNC]
     76	add	r12, r12, r10
     77	ret	r12
     781:	ldr	lr, =__mmap_switched
     79	b	__after_proc_init
     80ENDPROC(stext)
     81
     82#ifdef CONFIG_SMP
     83	.text
     84ENTRY(secondary_startup)
     85	/*
     86	 * Common entry point for secondary CPUs.
     87	 *
     88	 * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
     89	 * the processor type - there is no need to check the machine type
     90	 * as it has already been validated by the primary processor.
     91	 */
     92#ifdef CONFIG_ARM_VIRT_EXT
     93	bl	__hyp_stub_install_secondary
     94#endif
     95	safe_svcmode_maskall r9
     96
     97#ifndef CONFIG_CPU_CP15
     98	ldr	r9, =CONFIG_PROCESSOR_ID
     99#else
    100	mrc	p15, 0, r9, c0, c0		@ get processor id
    101#endif
    102	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
    103	movs	r10, r5				@ invalid processor?
    104	beq	__error_p			@ yes, error 'p'
    105
    106	ldr	r7, __secondary_data
    107
    108#ifdef CONFIG_ARM_MPU
    109	bl      __secondary_setup_mpu		@ Initialize the MPU
    110#endif
    111
    112	badr	lr, 1f				@ return (PIC) address
    113	ldr	r12, [r10, #PROCINFO_INITFUNC]
    114	add	r12, r12, r10
    115	ret	r12
    1161:	bl	__after_proc_init
    117	ldr	r7, __secondary_data		@ reload r7
    118	ldr	sp, [r7, #12]			@ set up the stack pointer
    119	ldr	r0, [r7, #16]			@ set up task pointer
    120	mov	fp, #0
    121	b	secondary_start_kernel
    122ENDPROC(secondary_startup)
    123
    124	.type	__secondary_data, %object
    125__secondary_data:
    126	.long	secondary_data
    127#endif /* CONFIG_SMP */
    128
    129/*
    130 * Set the Control Register and Read the process ID.
    131 */
    132	.text
    133__after_proc_init:
    134M_CLASS(movw	r12, #:lower16:BASEADDR_V7M_SCB)
    135M_CLASS(movt	r12, #:upper16:BASEADDR_V7M_SCB)
    136#ifdef CONFIG_ARM_MPU
    137M_CLASS(ldr	r3, [r12, 0x50])
    138AR_CLASS(mrc	p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
    139	and	r3, r3, #(MMFR0_PMSA)           @ PMSA field
    140	teq	r3, #(MMFR0_PMSAv7)             @ PMSA v7
    141	beq	1f
    142	teq	r3, #(MMFR0_PMSAv8)		@ PMSA v8
    143	/*
    144	 * Memory region attributes for PMSAv8:
    145	 *
    146	 *   n = AttrIndx[2:0]
    147	 *                      n       MAIR
    148	 *   DEVICE_nGnRnE      000     00000000
    149	 *   NORMAL             001     11111111
    150	 */
    151	ldreq	r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
    152		     PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
    153AR_CLASS(mcreq	p15, 0, r3, c10, c2, 0)		@ MAIR 0
    154M_CLASS(streq	r3, [r12, #PMSAv8_MAIR0])
    155	moveq	r3, #0
    156AR_CLASS(mcreq	p15, 0, r3, c10, c2, 1)		@ MAIR 1
    157M_CLASS(streq	r3, [r12, #PMSAv8_MAIR1])
    158
    1591:
    160#endif
    161#ifdef CONFIG_CPU_CP15
    162	/*
    163	 * CP15 system control register value returned in r0 from
    164	 * the CPU init function.
    165	 */
    166
    167#ifdef CONFIG_ARM_MPU
    168	biceq	r0, r0, #CR_BR			@ Disable the 'default mem-map'
    169	orreq	r0, r0, #CR_M			@ Set SCTRL.M (MPU on)
    170#endif
    171#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
    172	orr	r0, r0, #CR_A
    173#else
    174	bic	r0, r0, #CR_A
    175#endif
    176#ifdef CONFIG_CPU_DCACHE_DISABLE
    177	bic	r0, r0, #CR_C
    178#endif
    179#ifdef CONFIG_CPU_BPREDICT_DISABLE
    180	bic	r0, r0, #CR_Z
    181#endif
    182#ifdef CONFIG_CPU_ICACHE_DISABLE
    183	bic	r0, r0, #CR_I
    184#endif
    185	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
    186	instr_sync
    187#elif defined (CONFIG_CPU_V7M)
    188#ifdef CONFIG_ARM_MPU
    189	ldreq	r3, [r12, MPU_CTRL]
    190	biceq	r3, #MPU_CTRL_PRIVDEFENA
    191	orreq	r3, #MPU_CTRL_ENABLE
    192	streq	r3, [r12, MPU_CTRL]
    193	isb
    194#endif
    195	/* For V7M systems we want to modify the CCR similarly to the SCTLR */
    196#ifdef CONFIG_CPU_DCACHE_DISABLE
    197	bic	r0, r0, #V7M_SCB_CCR_DC
    198#endif
    199#ifdef CONFIG_CPU_BPREDICT_DISABLE
    200	bic	r0, r0, #V7M_SCB_CCR_BP
    201#endif
    202#ifdef CONFIG_CPU_ICACHE_DISABLE
    203	bic	r0, r0, #V7M_SCB_CCR_IC
    204#endif
    205	str	r0, [r12, V7M_SCB_CCR]
    206	/* Pass exc_ret to __mmap_switched */
    207	mov	r0, r10
    208#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
    209	ret	lr
    210ENDPROC(__after_proc_init)
    211	.ltorg
    212
    213#ifdef CONFIG_ARM_MPU
    214
    215
    216#ifndef CONFIG_CPU_V7M
    217/* Set which MPU region should be programmed */
    218.macro set_region_nr tmp, rgnr, unused
    219	mov	\tmp, \rgnr			@ Use static region numbers
    220	mcr	p15, 0, \tmp, c6, c2, 0		@ Write RGNR
    221.endm
    222
    223/* Setup a single MPU region, either D or I side (D-side for unified) */
    224.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
    225	mcr	p15, 0, \bar, c6, c1, (0 + \side)	@ I/DRBAR
    226	mcr	p15, 0, \acr, c6, c1, (4 + \side)	@ I/DRACR
    227	mcr	p15, 0, \sr, c6, c1, (2 + \side)		@ I/DRSR
    228.endm
    229#else
    230.macro set_region_nr tmp, rgnr, base
    231	mov	\tmp, \rgnr
    232	str     \tmp, [\base, #PMSAv7_RNR]
    233.endm
    234
    235.macro setup_region bar, acr, sr, unused, base
    236	lsl     \acr, \acr, #16
    237	orr     \acr, \acr, \sr
    238	str     \bar, [\base, #PMSAv7_RBAR]
    239	str     \acr, [\base, #PMSAv7_RASR]
    240.endm
    241
    242#endif
    243/*
    244 * Setup the MPU and initial MPU Regions. We create the following regions:
    245 * Region 0: Use this for probing the MPU details, so leave disabled.
    246 * Region 1: Background region - covers the whole of RAM as strongly ordered
    247 * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
    248 * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
    249 *
    250 * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
    251*/
    252	__HEAD
    253
    254ENTRY(__setup_mpu)
    255
    256	/* Probe for v7 PMSA compliance */
    257M_CLASS(movw	r12, #:lower16:BASEADDR_V7M_SCB)
    258M_CLASS(movt	r12, #:upper16:BASEADDR_V7M_SCB)
    259
    260AR_CLASS(mrc	p15, 0, r0, c0, c1, 4)		@ Read ID_MMFR0
    261M_CLASS(ldr	r0, [r12, 0x50])
    262	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
    263	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
    264	beq	__setup_pmsa_v7
    265	teq	r0, #(MMFR0_PMSAv8)		@ PMSA v8
    266	beq	__setup_pmsa_v8
    267
    268	ret	lr
    269ENDPROC(__setup_mpu)
    270
    271ENTRY(__setup_pmsa_v7)
    272	/* Calculate the size of a region covering just the kernel */
    273	ldr	r5, =PLAT_PHYS_OFFSET		@ Region start: PHYS_OFFSET
    274	ldr     r6, =(_end)			@ Cover whole kernel
    275	sub	r6, r6, r5			@ Minimum size of region to map
    276	clz	r6, r6				@ Region size must be 2^N...
    277	rsb	r6, r6, #31			@ ...so round up region size
    278	lsl	r6, r6, #PMSAv7_RSR_SZ		@ Put size in right field
    279	orr	r6, r6, #(1 << PMSAv7_RSR_EN)	@ Set region enabled bit
    280
    281	/* Determine whether the D/I-side memory map is unified. We set the
    282	 * flags here and continue to use them for the rest of this function */
    283AR_CLASS(mrc	p15, 0, r0, c0, c0, 4)		@ MPUIR
    284M_CLASS(ldr    r0, [r12, #MPU_TYPE])
    285	ands	r5, r0, #MPUIR_DREGION_SZMASK	@ 0 size d region => No MPU
    286	bxeq	lr
    287	tst	r0, #MPUIR_nU			@ MPUIR_nU = 0 for unified
    288
    289	/* Setup second region first to free up r6 */
    290	set_region_nr r0, #PMSAv7_RAM_REGION, r12
    291	isb
    292	/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
    293	ldr	r0, =PLAT_PHYS_OFFSET		@ RAM starts at PHYS_OFFSET
    294	ldr	r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
    295
    296	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ PHYS_OFFSET, shared, enabled
    297	beq	1f					@ Memory-map not unified
    298	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12	@ PHYS_OFFSET, shared, enabled
    2991:	isb
    300
    301	/* First/background region */
    302	set_region_nr r0, #PMSAv7_BG_REGION, r12
    303	isb
    304	/* Execute Never,  strongly ordered, inaccessible to PL0, rw PL1  */
    305	mov	r0, #0				@ BG region starts at 0x0
    306	ldr	r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
    307	mov	r6, #PMSAv7_RSR_ALL_MEM		@ 4GB region, enabled
    308
    309	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ 0x0, BG region, enabled
    310	beq	2f					@ Memory-map not unified
    311	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12	@ 0x0, BG region, enabled
    3122:	isb
    313
    314#ifdef CONFIG_XIP_KERNEL
    315	set_region_nr r0, #PMSAv7_ROM_REGION, r12
    316	isb
    317
    318	ldr	r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
    319
    320	ldr	r0, =CONFIG_XIP_PHYS_ADDR		@ ROM start
    321	ldr     r6, =(_exiprom)				@ ROM end
    322	sub	r6, r6, r0				@ Minimum size of region to map
    323	clz	r6, r6					@ Region size must be 2^N...
    324	rsb	r6, r6, #31				@ ...so round up region size
    325	lsl	r6, r6, #PMSAv7_RSR_SZ			@ Put size in right field
    326	orr	r6, r6, #(1 << PMSAv7_RSR_EN)		@ Set region enabled bit
    327
    328	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
    329	beq	3f					@ Memory-map not unified
    330	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
    3313:	isb
    332#endif
    333	ret	lr
    334ENDPROC(__setup_pmsa_v7)
    335
    336ENTRY(__setup_pmsa_v8)
    337	mov	r0, #0
    338AR_CLASS(mcr	p15, 0, r0, c6, c2, 1)		@ PRSEL
    339M_CLASS(str	r0, [r12, #PMSAv8_RNR])
    340	isb
    341
    342#ifdef CONFIG_XIP_KERNEL
    343	ldr	r5, =CONFIG_XIP_PHYS_ADDR		@ ROM start
    344	ldr     r6, =(_exiprom)				@ ROM end
    345	sub	r6, r6, #1
    346	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
    347
    348	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
    349	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
    350
    351AR_CLASS(mcr	p15, 0, r5, c6, c8, 0)			@ PRBAR0
    352AR_CLASS(mcr	p15, 0, r6, c6, c8, 1)			@ PRLAR0
    353M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(0)])
    354M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(0)])
    355#endif
    356
    357	ldr	r5, =KERNEL_START
    358	ldr	r6, =KERNEL_END
    359	sub	r6, r6, #1
    360	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
    361
    362	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
    363	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
    364
    365AR_CLASS(mcr	p15, 0, r5, c6, c8, 4)			@ PRBAR1
    366AR_CLASS(mcr	p15, 0, r6, c6, c8, 5)			@ PRLAR1
    367M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(1)])
    368M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(1)])
    369
    370	/* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
    371#ifdef CONFIG_XIP_KERNEL
    372	ldr	r6, =KERNEL_START
    373	ldr	r5, =CONFIG_XIP_PHYS_ADDR
    374	cmp	r6, r5
    375	movcs	r6, r5
    376#else
    377	ldr	r6, =KERNEL_START
    378#endif
    379	cmp	r6, #0
    380	beq	1f
    381
    382	mov	r5, #0
    383	sub	r6, r6, #1
    384	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
    385
    386	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
    387	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
    388
    389AR_CLASS(mcr	p15, 0, r5, c6, c9, 0)			@ PRBAR2
    390AR_CLASS(mcr	p15, 0, r6, c6, c9, 1)			@ PRLAR2
    391M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(2)])
    392M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(2)])
    393
    3941:
    395	/* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
    396#ifdef CONFIG_XIP_KERNEL
    397	ldr	r5, =KERNEL_END
    398	ldr	r6, =(_exiprom)
    399	cmp	r5, r6
    400	movcc	r5, r6
    401#else
    402	ldr	r5, =KERNEL_END
    403#endif
    404	mov	r6, #0xffffffff
    405	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
    406
    407	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
    408	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
    409
    410AR_CLASS(mcr	p15, 0, r5, c6, c9, 4)			@ PRBAR3
    411AR_CLASS(mcr	p15, 0, r6, c6, c9, 5)			@ PRLAR3
    412M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(3)])
    413M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(3)])
    414
    415#ifdef CONFIG_XIP_KERNEL
    416	/* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
    417	ldr	r5, =(_exiprom)
    418	ldr	r6, =KERNEL_END
    419	cmp	r5, r6
    420	movcs	r5, r6
    421
    422	ldr	r6, =KERNEL_START
    423	ldr	r0, =CONFIG_XIP_PHYS_ADDR
    424	cmp	r6, r0
    425	movcc	r6, r0
    426
    427	sub	r6, r6, #1
    428	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
    429
    430	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
    431	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
    432
    433#ifdef CONFIG_CPU_V7M
    434	/* There is no alias for n == 4 */
    435	mov	r0, #4
    436	str	r0, [r12, #PMSAv8_RNR]			@ PRSEL
    437	isb
    438
    439	str	r5, [r12, #PMSAv8_RBAR_A(0)]
    440	str	r6, [r12, #PMSAv8_RLAR_A(0)]
    441#else
    442	mcr	p15, 0, r5, c6, c10, 0			@ PRBAR4
    443	mcr	p15, 0, r6, c6, c10, 1			@ PRLAR4
    444#endif
    445#endif
    446	ret	lr
    447ENDPROC(__setup_pmsa_v8)
    448
    449#ifdef CONFIG_SMP
    450/*
    451 * r6: pointer at mpu_rgn_info
    452 */
    453
    454	.text
    455ENTRY(__secondary_setup_mpu)
    456	/* Use MPU region info supplied by __cpu_up */
    457	ldr	r6, [r7]			@ get secondary_data.mpu_rgn_info
    458
    459	/* Probe for v7 PMSA compliance */
    460	mrc	p15, 0, r0, c0, c1, 4		@ Read ID_MMFR0
    461	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
    462	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
    463	beq	__secondary_setup_pmsa_v7
    464	teq	r0, #(MMFR0_PMSAv8)		@ PMSA v8
    465	beq	__secondary_setup_pmsa_v8
    466	b	__error_p
    467ENDPROC(__secondary_setup_mpu)
    468
    469/*
    470 * r6: pointer at mpu_rgn_info
    471 */
    472ENTRY(__secondary_setup_pmsa_v7)
    473	/* Determine whether the D/I-side memory map is unified. We set the
    474	 * flags here and continue to use them for the rest of this function */
    475	mrc	p15, 0, r0, c0, c0, 4		@ MPUIR
    476	ands	r5, r0, #MPUIR_DREGION_SZMASK	@ 0 size d region => No MPU
    477	beq	__error_p
    478
    479	ldr	r4, [r6, #MPU_RNG_INFO_USED]
    480	mov	r5, #MPU_RNG_SIZE
    481	add	r3, r6, #MPU_RNG_INFO_RNGS
    482	mla	r3, r4, r5, r3
    483
    4841:
    485	tst	r0, #MPUIR_nU			@ MPUIR_nU = 0 for unified
    486	sub	r3, r3, #MPU_RNG_SIZE
    487	sub	r4, r4, #1
    488
    489	set_region_nr r0, r4
    490	isb
    491
    492	ldr	r0, [r3, #MPU_RGN_DRBAR]
    493	ldr	r6, [r3, #MPU_RGN_DRSR]
    494	ldr	r5, [r3, #MPU_RGN_DRACR]
    495
    496	setup_region r0, r5, r6, PMSAv7_DATA_SIDE
    497	beq	2f
    498	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
    4992:	isb
    500
    501	mrc	p15, 0, r0, c0, c0, 4		@ Reevaluate the MPUIR
    502	cmp	r4, #0
    503	bgt	1b
    504
    505	ret	lr
    506ENDPROC(__secondary_setup_pmsa_v7)
    507
    508ENTRY(__secondary_setup_pmsa_v8)
    509	ldr	r4, [r6, #MPU_RNG_INFO_USED]
    510#ifndef CONFIG_XIP_KERNEL
    511	add	r4, r4, #1
    512#endif
    513	mov	r5, #MPU_RNG_SIZE
    514	add	r3, r6, #MPU_RNG_INFO_RNGS
    515	mla	r3, r4, r5, r3
    516
    5171:
    518	sub	r3, r3, #MPU_RNG_SIZE
    519	sub	r4, r4, #1
    520
    521	mcr	p15, 0, r4, c6, c2, 1		@ PRSEL
    522	isb
    523
    524	ldr	r5, [r3, #MPU_RGN_PRBAR]
    525	ldr	r6, [r3, #MPU_RGN_PRLAR]
    526
    527	mcr	p15, 0, r5, c6, c3, 0		@ PRBAR
    528	mcr	p15, 0, r6, c6, c3, 1           @ PRLAR
    529
    530	cmp	r4, #0
    531	bgt	1b
    532
    533	ret	lr
    534ENDPROC(__secondary_setup_pmsa_v8)
    535#endif /* CONFIG_SMP */
    536#endif /* CONFIG_ARM_MPU */
    537#include "head-common.S"