cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sleep44xx.S (10202B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * OMAP44xx sleep code.
      4 *
      5 * Copyright (C) 2011 Texas Instruments, Inc.
      6 * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
      7 */
      8
      9#include <linux/linkage.h>
     10#include <asm/assembler.h>
     11#include <asm/smp_scu.h>
     12#include <asm/memory.h>
     13#include <asm/hardware/cache-l2x0.h>
     14
     15#include "omap-secure.h"
     16
     17#include "common.h"
     18#include "omap44xx.h"
     19#include "omap4-sar-layout.h"
     20
     21	.arch armv7-a
     22
     23#if defined(CONFIG_SMP) && defined(CONFIG_PM)
     24
     25	.arch_extension sec
     26.macro	DO_SMC
     27	dsb
     28	smc	#0
     29	dsb
     30.endm
     31
     32#ifdef CONFIG_ARCH_OMAP4
     33
     34/*
     35 * =============================
     36 * == CPU suspend finisher ==
     37 * =============================
     38 *
     39 * void omap4_finish_suspend(unsigned long cpu_state)
     40 *
     41 * This function code saves the CPU context and performs the CPU
     42 * power down sequence. Calling WFI effectively changes the CPU
     43 * power domains states to the desired target power state.
     44 *
     45 * @cpu_state : contains context save state (r0)
     46 *	0 - No context lost
     47 * 	1 - CPUx L1 and logic lost: MPUSS CSWR
     48 * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
     49 *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
     50 * @return: This function never returns for CPU OFF and DORMANT power states.
     51 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
     52 * from this follows a full CPU reset path via ROM code to CPU restore code.
     53 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
     54 * It returns to the caller for CPU INACTIVE and ON power states or in case
     55 * CPU failed to transition to targeted OFF/DORMANT state.
     56 *
     57 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
     58 * stack frame and it expects the caller to take care of it. Hence the entire
     59 * stack frame is saved to avoid possible stack corruption.
     60 */
     61ENTRY(omap4_finish_suspend)
     62	stmfd	sp!, {r4-r12, lr}
     63	cmp	r0, #0x0
     64	beq	do_WFI				@ No lowpower state, jump to WFI
     65
     66	/*
     67	 * Flush all data from the L1 data cache before disabling
     68	 * SCTLR.C bit.
     69	 */
     70	bl	omap4_get_sar_ram_base
     71	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
     72	cmp	r9, #0x1			@ Check for HS device
     73	bne	skip_secure_l1_clean
     74	mov	r0, #SCU_PM_NORMAL
     75	mov	r1, #0xFF			@ clean seucre L1
     76	stmfd   r13!, {r4-r12, r14}
     77	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
     78	DO_SMC
     79	ldmfd   r13!, {r4-r12, r14}
     80skip_secure_l1_clean:
     81	bl	v7_flush_dcache_all
     82
     83	/*
     84	 * Clear the SCTLR.C bit to prevent further data cache
     85	 * allocation. Clearing SCTLR.C would make all the data accesses
     86	 * strongly ordered and would not hit the cache.
     87	 */
     88	mrc	p15, 0, r0, c1, c0, 0
     89	bic	r0, r0, #(1 << 2)		@ Disable the C bit
     90	mcr	p15, 0, r0, c1, c0, 0
     91	isb
     92
     93	bl	v7_invalidate_l1
     94
     95	/*
     96	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
     97	 * to AsymmetricMultiprocessing (AMP) mode by programming
     98	 * the SCU power status to DORMANT or OFF mode.
     99	 * This enables the CPU to be taken out of coherency by
    100	 * preventing the CPU from receiving cache, TLB, or BTB
    101	 * maintenance operations broadcast by other CPUs in the cluster.
    102	 */
    103	bl	omap4_get_sar_ram_base
    104	mov	r8, r0
    105	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
    106	cmp	r9, #0x1			@ Check for HS device
    107	bne	scu_gp_set
    108	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
    109	ands	r0, r0, #0x0f
    110	ldreq	r0, [r8, #SCU_OFFSET0]
    111	ldrne	r0, [r8, #SCU_OFFSET1]
    112	mov	r1, #0x00
    113	stmfd   r13!, {r4-r12, r14}
    114	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
    115	DO_SMC
    116	ldmfd   r13!, {r4-r12, r14}
    117	b	skip_scu_gp_set
    118scu_gp_set:
    119	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
    120	ands	r0, r0, #0x0f
    121	ldreq	r1, [r8, #SCU_OFFSET0]
    122	ldrne	r1, [r8, #SCU_OFFSET1]
    123	bl	omap4_get_scu_base
    124	bl	scu_power_mode
    125skip_scu_gp_set:
    126	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
    127	tst	r0, #(1 << 18)
    128	mrcne	p15, 0, r0, c1, c0, 1
    129	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
    130	mcrne	p15, 0, r0, c1, c0, 1
    131	isb
    132	dsb
    133#ifdef CONFIG_CACHE_L2X0
    134	/*
    135	 * Clean and invalidate the L2 cache.
    136	 * Common cache-l2x0.c functions can't be used here since it
    137	 * uses spinlocks. We are out of coherency here with data cache
    138	 * disabled. The spinlock implementation uses exclusive load/store
    139	 * instruction which can fail without data cache being enabled.
    140	 * OMAP4 hardware doesn't support exclusive monitor which can
    141	 * overcome exclusive access issue. Because of this, CPU can
    142	 * lead to deadlock.
    143	 */
    144	bl	omap4_get_sar_ram_base
    145	mov	r8, r0
    146	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
    147	ands	r5, r5, #0x0f
    148	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
    149	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
    150	cmp	r0, #3
    151	bne	do_WFI
    152#ifdef CONFIG_PL310_ERRATA_727915
    153	mov	r0, #0x03
    154	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
    155	DO_SMC
    156#endif
    157	bl	omap4_get_l2cache_base
    158	mov	r2, r0
    159	ldr	r0, =0xffff
    160	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
    161wait:
    162	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
    163	ldr	r1, =0xffff
    164	ands	r0, r0, r1
    165	bne	wait
    166#ifdef CONFIG_PL310_ERRATA_727915
    167	mov	r0, #0x00
    168	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
    169	DO_SMC
    170#endif
    171l2x_sync:
    172	bl	omap4_get_l2cache_base
    173	mov	r2, r0
    174	mov	r0, #0x0
    175	str	r0, [r2, #L2X0_CACHE_SYNC]
    176sync:
    177	ldr	r0, [r2, #L2X0_CACHE_SYNC]
    178	ands	r0, r0, #0x1
    179	bne	sync
    180#endif
    181
    182do_WFI:
    183	bl	omap_do_wfi
    184
    185	/*
    186	 * CPU is here when it failed to enter OFF/DORMANT or
    187	 * no low power state was attempted.
    188	 */
    189	mrc	p15, 0, r0, c1, c0, 0
    190	tst	r0, #(1 << 2)			@ Check C bit enabled?
    191	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
    192	mcreq	p15, 0, r0, c1, c0, 0
    193	isb
    194
    195	/*
    196	 * Ensure the CPU power state is set to NORMAL in
    197	 * SCU power state so that CPU is back in coherency.
    198	 * In non-coherent mode CPU can lock-up and lead to
    199	 * system deadlock.
    200	 */
    201	mrc	p15, 0, r0, c1, c0, 1
    202	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
    203	orreq	r0, r0, #(1 << 6)
    204	mcreq	p15, 0, r0, c1, c0, 1
    205	isb
    206	bl	omap4_get_sar_ram_base
    207	mov	r8, r0
    208	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
    209	cmp	r9, #0x1			@ Check for HS device
    210	bne	scu_gp_clear
    211	mov	r0, #SCU_PM_NORMAL
    212	mov	r1, #0x00
    213	stmfd   r13!, {r4-r12, r14}
    214	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
    215	DO_SMC
    216	ldmfd   r13!, {r4-r12, r14}
    217	b	skip_scu_gp_clear
    218scu_gp_clear:
    219	bl	omap4_get_scu_base
    220	mov	r1, #SCU_PM_NORMAL
    221	bl	scu_power_mode
    222skip_scu_gp_clear:
    223	isb
    224	dsb
    225	ldmfd	sp!, {r4-r12, pc}
    226ENDPROC(omap4_finish_suspend)
    227
    228/*
    229 * ============================
    230 * == CPU resume entry point ==
    231 * ============================
    232 *
    233 * void omap4_cpu_resume(void)
    234 *
    235 * ROM code jumps to this function while waking up from CPU
    236 * OFF or DORMANT state. Physical address of the function is
    237 * stored in the SAR RAM while entering to OFF or DORMANT mode.
    238 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
    239 */
    240ENTRY(omap4_cpu_resume)
    241	/*
    242	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
    243	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
    244	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
    245	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
    246	 * OMAP443X GP devices- SMP bit isn't accessible.
    247	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
    248	 */
    249	ldr	r8, =OMAP44XX_SAR_RAM_BASE
    250	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
    251	cmp	r9, #0x1			@ Skip if GP device
    252	bne	skip_ns_smp_enable
    253	mrc     p15, 0, r0, c0, c0, 5
    254	ands    r0, r0, #0x0f
    255	beq	skip_ns_smp_enable
    256ppa_actrl_retry:
    257	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
    258	adr	r1, ppa_zero_params_offset
    259	ldr	r3, [r1]
    260	add	r3, r3, r1			@ Pointer to ppa_zero_params
    261	mov	r1, #0x0			@ Process ID
    262	mov	r2, #0x4			@ Flag
    263	mov	r6, #0xff
    264	mov	r12, #0x00			@ Secure Service ID
    265	DO_SMC
    266	cmp	r0, #0x0			@ API returns 0 on success.
    267	beq	enable_smp_bit
    268	b	ppa_actrl_retry
    269enable_smp_bit:
    270	mrc	p15, 0, r0, c1, c0, 1
    271	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
    272	orreq	r0, r0, #(1 << 6)
    273	mcreq	p15, 0, r0, c1, c0, 1
    274	isb
    275skip_ns_smp_enable:
    276#ifdef CONFIG_CACHE_L2X0
    277	/*
    278	 * Restore the L2 AUXCTRL and enable the L2 cache.
    279	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
    280	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
    281	 * register r0 contains value to be programmed.
    282	 * L2 cache is already invalidate by ROM code as part
    283	 * of MPUSS OFF wakeup path.
    284	 */
    285	ldr	r2, =OMAP44XX_L2CACHE_BASE
    286	ldr	r0, [r2, #L2X0_CTRL]
    287	and	r0, #0x0f
    288	cmp	r0, #1
    289	beq	skip_l2en			@ Skip if already enabled
    290	ldr	r3, =OMAP44XX_SAR_RAM_BASE
    291	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
    292	cmp	r1, #0x1			@ Check for HS device
    293	bne     set_gp_por
    294	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
    295	ldr     r1, =OMAP44XX_SAR_RAM_BASE
    296	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
    297	adr     r1, ppa_por_params_offset
    298	ldr	r3, [r1]
    299	add	r3, r3, r1			@ Pointer to ppa_por_params
    300	str     r4, [r3, #0x04]
    301	mov	r1, #0x0			@ Process ID
    302	mov	r2, #0x4			@ Flag
    303	mov	r6, #0xff
    304	mov	r12, #0x00			@ Secure Service ID
    305	DO_SMC
    306	b	set_aux_ctrl
    307set_gp_por:
    308	ldr     r1, =OMAP44XX_SAR_RAM_BASE
    309	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
    310	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
    311	DO_SMC
    312set_aux_ctrl:
    313	ldr     r1, =OMAP44XX_SAR_RAM_BASE
    314	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
    315	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
    316	DO_SMC
    317	mov	r0, #0x1
    318	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
    319	DO_SMC
    320skip_l2en:
    321#endif
    322
    323	b	cpu_resume			@ Jump to generic resume
    324ppa_por_params_offset:
    325	.long	ppa_por_params - .
    326ENDPROC(omap4_cpu_resume)
    327#endif	/* CONFIG_ARCH_OMAP4 */
    328
    329#endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
    330
    331ENTRY(omap_do_wfi)
    332	stmfd	sp!, {lr}
    333#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
    334	/* Drain interconnect write buffers. */
    335	bl	omap_interconnect_sync
    336#endif
    337
    338	/*
    339	 * Execute an ISB instruction to ensure that all of the
    340	 * CP15 register changes have been committed.
    341	 */
    342	isb
    343
    344	/*
    345	 * Execute a barrier instruction to ensure that all cache,
    346	 * TLB and branch predictor maintenance operations issued
    347	 * by any CPU in the cluster have completed.
    348	 */
    349	dsb
    350	dmb
    351
    352	/*
    353	 * Execute a WFI instruction and wait until the
    354	 * STANDBYWFI output is asserted to indicate that the
    355	 * CPU is in idle and low power state. CPU can specualatively
    356	 * prefetch the instructions so add NOPs after WFI. Sixteen
    357	 * NOPs as per Cortex-A9 pipeline.
    358	 */
    359	wfi					@ Wait For Interrupt
    360	nop
    361	nop
    362	nop
    363	nop
    364	nop
    365	nop
    366	nop
    367	nop
    368	nop
    369	nop
    370	nop
    371	nop
    372	nop
    373	nop
    374	nop
    375	nop
    376
    377	ldmfd	sp!, {pc}
    378ppa_zero_params_offset:
    379	.long	ppa_zero_params - .
    380ENDPROC(omap_do_wfi)
    381
    382	.data
    383	.align	2
    384ppa_zero_params:
    385	.word		0
    386
    387ppa_por_params:
    388	.word		1, 0