cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpu_setup_fsl_booke.S (6966B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * This file contains low level CPU setup functions.
      4 * Kumar Gala <galak@kernel.crashing.org>
      5 * Copyright 2009 Freescale Semiconductor, Inc.
      6 *
      7 * Based on cpu_setup_6xx code by
      8 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
      9 */
     10
     11#include <asm/page.h>
     12#include <asm/processor.h>
     13#include <asm/cputable.h>
     14#include <asm/ppc_asm.h>
     15#include <asm/nohash/mmu-book3e.h>
     16#include <asm/asm-offsets.h>
     17#include <asm/mpc85xx.h>
     18
     19_GLOBAL(__e500_icache_setup)
     20	mfspr	r0, SPRN_L1CSR1
     21	andi.	r3, r0, L1CSR1_ICE
     22	bnelr				/* Already enabled */
     23	oris	r0, r0, L1CSR1_CPE@h
     24	ori	r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
     25	mtspr	SPRN_L1CSR1, r0		/* Enable I-Cache */
     26	isync
     27	blr
     28
     29_GLOBAL(__e500_dcache_setup)
     30	mfspr	r0, SPRN_L1CSR0
     31	andi.	r3, r0, L1CSR0_DCE
     32	bnelr				/* Already enabled */
     33	msync
     34	isync
     35	li	r0, 0
     36	mtspr	SPRN_L1CSR0, r0		/* Disable */
     37	msync
     38	isync
     39	li	r0, (L1CSR0_DCFI | L1CSR0_CLFC)
     40	mtspr	SPRN_L1CSR0, r0		/* Invalidate */
     41	isync
     421:	mfspr	r0, SPRN_L1CSR0
     43	andi.	r3, r0, L1CSR0_CLFC
     44	bne+	1b			/* Wait for lock bits reset */
     45	oris	r0, r0, L1CSR0_CPE@h
     46	ori	r0, r0, L1CSR0_DCE
     47	msync
     48	isync
     49	mtspr	SPRN_L1CSR0, r0		/* Enable */
     50	isync
     51	blr
     52
     53/*
     54 * FIXME - we haven't yet done testing to determine a reasonable default
     55 * value for PW20_WAIT_IDLE_BIT.
     56 */
     57#define PW20_WAIT_IDLE_BIT		50 /* 1ms, TB frequency is 41.66MHZ */
     58_GLOBAL(setup_pw20_idle)
     59	mfspr	r3, SPRN_PWRMGTCR0
     60
     61	/* Set PW20_WAIT bit, enable pw20 state*/
     62	ori	r3, r3, PWRMGTCR0_PW20_WAIT
     63	li	r11, PW20_WAIT_IDLE_BIT
     64
     65	/* Set Automatic PW20 Core Idle Count */
     66	rlwimi	r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
     67
     68	mtspr	SPRN_PWRMGTCR0, r3
     69
     70	blr
     71
     72/*
     73 * FIXME - we haven't yet done testing to determine a reasonable default
     74 * value for AV_WAIT_IDLE_BIT.
     75 */
     76#define AV_WAIT_IDLE_BIT		50 /* 1ms, TB frequency is 41.66MHZ */
     77_GLOBAL(setup_altivec_idle)
     78	mfspr	r3, SPRN_PWRMGTCR0
     79
     80	/* Enable Altivec Idle */
     81	oris	r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
     82	li	r11, AV_WAIT_IDLE_BIT
     83
     84	/* Set Automatic AltiVec Idle Count */
     85	rlwimi	r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
     86
     87	mtspr	SPRN_PWRMGTCR0, r3
     88
     89	blr
     90
     91#ifdef CONFIG_PPC_E500MC
     92_GLOBAL(__setup_cpu_e6500)
     93	mflr	r6
     94#ifdef CONFIG_PPC64
     95	bl	setup_altivec_ivors
     96	/* Touch IVOR42 only if the CPU supports E.HV category */
     97	mfspr	r10,SPRN_MMUCFG
     98	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
     99	beq	1f
    100	bl	setup_lrat_ivor
    1011:
    102#endif
    103	bl	setup_pw20_idle
    104	bl	setup_altivec_idle
    105	bl	__setup_cpu_e5500
    106	mtlr	r6
    107	blr
    108#endif /* CONFIG_PPC_E500MC */
    109
    110#ifdef CONFIG_PPC32
    111#ifdef CONFIG_E500
    112#ifndef CONFIG_PPC_E500MC
    113_GLOBAL(__setup_cpu_e500v1)
    114_GLOBAL(__setup_cpu_e500v2)
    115	mflr	r4
    116	bl	__e500_icache_setup
    117	bl	__e500_dcache_setup
    118	bl	__setup_e500_ivors
    119#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
    120	/* Ensure that RFXE is set */
    121	mfspr	r3,SPRN_HID1
    122	oris	r3,r3,HID1_RFXE@h
    123	mtspr	SPRN_HID1,r3
    124#endif
    125	mtlr	r4
    126	blr
    127#else /* CONFIG_PPC_E500MC */
    128_GLOBAL(__setup_cpu_e500mc)
    129_GLOBAL(__setup_cpu_e5500)
    130	mflr	r5
    131	bl	__e500_icache_setup
    132	bl	__e500_dcache_setup
    133	bl	__setup_e500mc_ivors
    134	/*
    135	 * We only want to touch IVOR38-41 if we're running on hardware
    136	 * that supports category E.HV.  The architectural way to determine
    137	 * this is MMUCFG[LPIDSIZE].
    138	 */
    139	mfspr	r3, SPRN_MMUCFG
    140	rlwinm.	r3, r3, 0, MMUCFG_LPIDSIZE
    141	beq	1f
    142	bl	__setup_ehv_ivors
    143	b	2f
    1441:
    145	lwz	r3, CPU_SPEC_FEATURES(r4)
    146	/* We need this check as cpu_setup is also called for
    147	 * the secondary cores. So, if we have already cleared
    148	 * the feature on the primary core, avoid doing it on the
    149	 * secondary core.
    150	 */
    151	andi.	r6, r3, CPU_FTR_EMB_HV
    152	beq	2f
    153	rlwinm	r3, r3, 0, ~CPU_FTR_EMB_HV
    154	stw	r3, CPU_SPEC_FEATURES(r4)
    1552:
    156	mtlr	r5
    157	blr
    158#endif /* CONFIG_PPC_E500MC */
    159#endif /* CONFIG_E500 */
    160#endif /* CONFIG_PPC32 */
    161
    162#ifdef CONFIG_PPC_BOOK3E_64
    163_GLOBAL(__restore_cpu_e6500)
    164	mflr	r5
    165	bl	setup_altivec_ivors
    166	/* Touch IVOR42 only if the CPU supports E.HV category */
    167	mfspr	r10,SPRN_MMUCFG
    168	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
    169	beq	1f
    170	bl	setup_lrat_ivor
    1711:
    172	bl	setup_pw20_idle
    173	bl	setup_altivec_idle
    174	bl	__restore_cpu_e5500
    175	mtlr	r5
    176	blr
    177
    178_GLOBAL(__restore_cpu_e5500)
    179	mflr	r4
    180	bl	__e500_icache_setup
    181	bl	__e500_dcache_setup
    182	bl	__setup_base_ivors
    183	bl	setup_perfmon_ivor
    184	bl	setup_doorbell_ivors
    185	/*
    186	 * We only want to touch IVOR38-41 if we're running on hardware
    187	 * that supports category E.HV.  The architectural way to determine
    188	 * this is MMUCFG[LPIDSIZE].
    189	 */
    190	mfspr	r10,SPRN_MMUCFG
    191	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
    192	beq	1f
    193	bl	setup_ehv_ivors
    1941:
    195	mtlr	r4
    196	blr
    197
    198_GLOBAL(__setup_cpu_e5500)
    199	mflr	r5
    200	bl	__e500_icache_setup
    201	bl	__e500_dcache_setup
    202	bl	__setup_base_ivors
    203	bl	setup_perfmon_ivor
    204	bl	setup_doorbell_ivors
    205	/*
    206	 * We only want to touch IVOR38-41 if we're running on hardware
    207	 * that supports category E.HV.  The architectural way to determine
    208	 * this is MMUCFG[LPIDSIZE].
    209	 */
    210	mfspr	r10,SPRN_MMUCFG
    211	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
    212	beq	1f
    213	bl	setup_ehv_ivors
    214	b	2f
    2151:
    216	ld	r10,CPU_SPEC_FEATURES(r4)
    217	LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
    218	andc	r10,r10,r9
    219	std	r10,CPU_SPEC_FEATURES(r4)
    2202:
    221	mtlr	r5
    222	blr
    223#endif
    224
    225/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
    226_GLOBAL(flush_dcache_L1)
    227	mfmsr	r10
    228	wrteei	0
    229
    230	mfspr	r3,SPRN_L1CFG0
    231	rlwinm	r5,r3,9,3	/* Extract cache block size */
    232	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
    233				 * are currently defined.
    234				 */
    235	li	r4,32
    236	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
    237				 *      log2(number of ways)
    238				 */
    239	slw	r5,r4,r5	/* r5 = cache block size */
    240
    241	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
    242	mulli	r7,r7,13	/* An 8-way cache will require 13
    243				 * loads per set.
    244				 */
    245	slw	r7,r7,r6
    246
    247	/* save off HID0 and set DCFA */
    248	mfspr	r8,SPRN_HID0
    249	ori	r9,r8,HID0_DCFA@l
    250	mtspr	SPRN_HID0,r9
    251	isync
    252
    253	LOAD_REG_IMMEDIATE(r6, KERNELBASE)
    254	mr	r4, r6
    255	mtctr	r7
    256
    2571:	lwz	r3,0(r4)	/* Load... */
    258	add	r4,r4,r5
    259	bdnz	1b
    260
    261	msync
    262	mr	r4, r6
    263	mtctr	r7
    264
    2651:	dcbf	0,r4		/* ...and flush. */
    266	add	r4,r4,r5
    267	bdnz	1b
    268
    269	/* restore HID0 */
    270	mtspr	SPRN_HID0,r8
    271	isync
    272
    273	wrtee r10
    274
    275	blr
    276
    277has_L2_cache:
    278	/* skip L2 cache on P2040/P2040E as they have no L2 cache */
    279	mfspr	r3, SPRN_SVR
    280	/* shift right by 8 bits and clear E bit of SVR */
    281	rlwinm	r4, r3, 24, ~0x800
    282
    283	lis	r3, SVR_P2040@h
    284	ori	r3, r3, SVR_P2040@l
    285	cmpw	r4, r3
    286	beq	1f
    287
    288	li	r3, 1
    289	blr
    2901:
    291	li	r3, 0
    292	blr
    293
    294/* flush backside L2 cache */
    295flush_backside_L2_cache:
    296	mflr	r10
    297	bl	has_L2_cache
    298	mtlr	r10
    299	cmpwi	r3, 0
    300	beq	2f
    301
    302	/* Flush the L2 cache */
    303	mfspr	r3, SPRN_L2CSR0
    304	ori	r3, r3, L2CSR0_L2FL@l
    305	msync
    306	isync
    307	mtspr	SPRN_L2CSR0,r3
    308	isync
    309
    310	/* check if it is complete */
    3111:	mfspr	r3,SPRN_L2CSR0
    312	andi.	r3, r3, L2CSR0_L2FL@l
    313	bne	1b
    3142:
    315	blr
    316
    317_GLOBAL(cpu_down_flush_e500v2)
    318	mflr r0
    319	bl	flush_dcache_L1
    320	mtlr r0
    321	blr
    322
    323_GLOBAL(cpu_down_flush_e500mc)
    324_GLOBAL(cpu_down_flush_e5500)
    325	mflr r0
    326	bl	flush_dcache_L1
    327	bl	flush_backside_L2_cache
    328	mtlr r0
    329	blr
    330
    331/* L1 Data Cache of e6500 contains no modified data, no flush is required */
    332_GLOBAL(cpu_down_flush_e6500)
    333	blr