cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

l2cr_6xx.S (10506B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3	L2CR functions
      4	Copyright © 1997-1998 by PowerLogix R & D, Inc.
      5
      6*/
      7/*
      8	Thur, Dec. 12, 1998.
      9	- First public release, contributed by PowerLogix.
     10	***********
     11	Sat, Aug. 7, 1999.
     12	- Terry: Made sure code disabled interrupts before running. (Previously
     13			it was assumed interrupts were already disabled).
     14	- Terry: Updated for tentative G4 support.  4MB of memory is now flushed
     15			instead of 2MB.  (Prob. only 3 is necessary).
     16	- Terry: Updated for workaround to HID0[DPM] processor bug
     17			during global invalidates.
     18	***********
     19	Thu, July 13, 2000.
     20	- Terry: Added isync to correct for an errata.
     21
     22	22 August 2001.
     23	- DanM: Finally added the 7450 patch I've had for the past
     24		several months.  The L2CR is similar, but I'm going
     25		to assume the user of this functions knows what they
     26		are doing.
     27
     28	Author:	Terry Greeniaus (tgree@phys.ualberta.ca)
     29	Please e-mail updates to this file to me, thanks!
     30*/
     31#include <asm/processor.h>
     32#include <asm/cputable.h>
     33#include <asm/ppc_asm.h>
     34#include <asm/cache.h>
     35#include <asm/page.h>
     36#include <asm/feature-fixups.h>
     37
     38/* Usage:
     39
     40	When setting the L2CR register, you must do a few special
     41	things.  If you are enabling the cache, you must perform a
     42	global invalidate.  If you are disabling the cache, you must
     43	flush the cache contents first.  This routine takes care of
     44	doing these things.  When first enabling the cache, make sure
     45	you pass in the L2CR you want, as well as passing in the
     46	global invalidate bit set.  A global invalidate will only be
     47	performed if the L2I bit is set in applyThis.  When enabling
     48	the cache, you should also set the L2E bit in applyThis.  If
     49	you want to modify the L2CR contents after the cache has been
     50	enabled, the recommended procedure is to first call
     51	__setL2CR(0) to disable the cache and then call it again with
     52	the new values for L2CR.  Examples:
     53
     54	_setL2CR(0)		- disables the cache
     55	_setL2CR(0xB3A04000)	- enables my G3 upgrade card:
     56				- L2E set to turn on the cache
     57				- L2SIZ set to 1MB
     58				- L2CLK set to 1:1
     59				- L2RAM set to pipelined synchronous late-write
     60				- L2I set to perform a global invalidation
     61				- L2OH set to 0.5 nS
     62				- L2DF set because this upgrade card
     63				  requires it
     64
     65	A similar call should work for your card.  You need to know
     66	the correct setting for your card and then place them in the
     67	fields I have outlined above.  Other fields support optional
     68	features, such as L2DO which caches only data, or L2TS which
     69	causes cache pushes from the L1 cache to go to the L2 cache
     70	instead of to main memory.
     71
     72IMPORTANT:
     73	Starting with the 7450, the bits in this register have moved
     74	or behave differently.  The Enable, Parity Enable, Size,
     75	and L2 Invalidate are the only bits that have not moved.
     76	The size is read-only for these processors with internal L2
     77	cache, and the invalidate is a control as well as status.
     78		-- Dan
     79
     80*/
     81/*
     82 * Summary: this procedure ignores the L2I bit in the value passed in,
     83 * flushes the cache if it was already enabled, always invalidates the
     84 * cache, then enables the cache if the L2E bit is set in the value
     85 * passed in.
     86 *   -- paulus.
     87 */
     88_GLOBAL(_set_L2CR)
     89	/* Make sure this is a 750 or 7400 chip */
     90BEGIN_FTR_SECTION
     91	li	r3,-1
     92	blr
     93END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
     94
     95	mflr	r9
     96
     97	/* Stop DST streams */
     98BEGIN_FTR_SECTION
     99	PPC_DSSALL
    100	sync
    101END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
    102
    103	/* Turn off interrupts and data relocation. */
    104	mfmsr	r7		/* Save MSR in r7 */
    105	rlwinm	r4,r7,0,17,15
    106	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
    107	sync
    108	mtmsr	r4
    109	isync
    110
    111	/* Before we perform the global invalidation, we must disable dynamic
    112	 * power management via HID0[DPM] to work around a processor bug where
    113	 * DPM can possibly interfere with the state machine in the processor
    114	 * that invalidates the L2 cache tags.
    115	 */
    116	mfspr	r8,SPRN_HID0		/* Save HID0 in r8 */
    117	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
    118	sync
    119	mtspr	SPRN_HID0,r4		/* Disable DPM */
    120	sync
    121
    122	/* Get the current enable bit of the L2CR into r4 */
    123	mfspr	r4,SPRN_L2CR
    124
    125	/* Tweak some bits */
    126	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
    127	rlwinm	r3,r3,0,11,9		/* Turn off the invalidate bit */
    128	rlwinm	r3,r3,0,1,31		/* Turn off the enable bit */
    129
    130	/* Check to see if we need to flush */
    131	rlwinm.	r4,r4,0,0,0
    132	beq	2f
    133
    134	/* Flush the cache. First, read the first 4MB of memory (physical) to
    135	 * put new data in the cache.  (Actually we only need
    136	 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
    137	 * cover everything just to be safe).
    138	 */
    139
    140	 /**** Might be a good idea to set L2DO here - to prevent instructions
    141	       from getting into the cache.  But since we invalidate
    142	       the next time we enable the cache it doesn't really matter.
    143	       Don't do this unless you accommodate all processor variations.
    144	       The bit moved on the 7450.....
    145	  ****/
    146
    147BEGIN_FTR_SECTION
    148	/* Disable L2 prefetch on some 745x and try to ensure
    149	 * L2 prefetch engines are idle. As explained by errata
    150	 * text, we can't be sure they are, we just hope very hard
    151	 * that well be enough (sic !). At least I noticed Apple
    152	 * doesn't even bother doing the dcbf's here...
    153	 */
    154	mfspr	r4,SPRN_MSSCR0
    155	rlwinm	r4,r4,0,0,29
    156	sync
    157	mtspr	SPRN_MSSCR0,r4
    158	sync
    159	isync
    160	lis	r4,KERNELBASE@h
    161	dcbf	0,r4
    162	dcbf	0,r4
    163	dcbf	0,r4
    164	dcbf	0,r4
    165END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
    166
    167	/* TODO: use HW flush assist when available */
    168
    169	lis	r4,0x0002
    170	mtctr	r4
    171	li	r4,0
    1721:
    173	lwzx	r0,0,r4
    174	addi	r4,r4,32		/* Go to start of next cache line */
    175	bdnz	1b
    176	isync
    177
    178	/* Now, flush the first 4MB of memory */
    179	lis	r4,0x0002
    180	mtctr	r4
    181	li	r4,0
    182	sync
    1831:
    184	dcbf	0,r4
    185	addi	r4,r4,32		/* Go to start of next cache line */
    186	bdnz	1b
    187
    1882:
    189	/* Set up the L2CR configuration bits (and switch L2 off) */
    190	/* CPU errata: Make sure the mtspr below is already in the
    191	 * L1 icache
    192	 */
    193	b	20f
    194	.balign	L1_CACHE_BYTES
    19522:
    196	sync
    197	mtspr	SPRN_L2CR,r3
    198	sync
    199	b	23f
    20020:
    201	b	21f
    20221:	sync
    203	isync
    204	b	22b
    205
    20623:
    207	/* Perform a global invalidation */
    208	oris	r3,r3,0x0020
    209	sync
    210	mtspr	SPRN_L2CR,r3
    211	sync
    212	isync				/* For errata */
    213
    214BEGIN_FTR_SECTION
    215	/* On the 7450, we wait for the L2I bit to clear......
    216	*/
    21710:	mfspr	r3,SPRN_L2CR
    218	andis.	r4,r3,0x0020
    219	bne	10b
    220	b	11f
    221END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
    222
    223	/* Wait for the invalidation to complete */
    2243:	mfspr	r3,SPRN_L2CR
    225	rlwinm.	r4,r3,0,31,31
    226	bne	3b
    227
    22811:	rlwinm	r3,r3,0,11,9		/* Turn off the L2I bit */
    229	sync
    230	mtspr	SPRN_L2CR,r3
    231	sync
    232
    233	/* See if we need to enable the cache */
    234	cmplwi	r5,0
    235	beq	4f
    236
    237	/* Enable the cache */
    238	oris	r3,r3,0x8000
    239	mtspr	SPRN_L2CR,r3
    240	sync
    241	
    242	/* Enable L2 HW prefetch on 744x/745x */
    243BEGIN_FTR_SECTION
    244	mfspr	r3,SPRN_MSSCR0
    245	ori	r3,r3,3
    246	sync
    247	mtspr	SPRN_MSSCR0,r3
    248	sync
    249	isync
    250END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
    2514:
    252
    253	/* Restore HID0[DPM] to whatever it was before */
    254	sync
    255	mtspr	1008,r8
    256	sync
    257
    258	/* Restore MSR (restores EE and DR bits to original state) */
    259	mtmsr	r7
    260	isync
    261
    262	mtlr	r9
    263	blr
    264
    265_GLOBAL(_get_L2CR)
    266	/* Return the L2CR contents */
    267	li	r3,0
    268BEGIN_FTR_SECTION
    269	mfspr	r3,SPRN_L2CR
    270END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
    271	blr
    272
    273
    274/*
    275 * Here is a similar routine for dealing with the L3 cache
    276 * on the 745x family of chips
    277 */
    278
    279_GLOBAL(_set_L3CR)
    280	/* Make sure this is a 745x chip */
    281BEGIN_FTR_SECTION
    282	li	r3,-1
    283	blr
    284END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
    285
    286	/* Turn off interrupts and data relocation. */
    287	mfmsr	r7		/* Save MSR in r7 */
    288	rlwinm	r4,r7,0,17,15
    289	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
    290	sync
    291	mtmsr	r4
    292	isync
    293
    294	/* Stop DST streams */
    295	PPC_DSSALL
    296	sync
    297
    298	/* Get the current enable bit of the L3CR into r4 */
    299	mfspr	r4,SPRN_L3CR
    300
    301	/* Tweak some bits */
    302	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
    303	rlwinm	r3,r3,0,22,20		/* Turn off the invalidate bit */
    304	rlwinm	r3,r3,0,2,31		/* Turn off the enable & PE bits */
    305	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
    306	/* Check to see if we need to flush */
    307	rlwinm.	r4,r4,0,0,0
    308	beq	2f
    309
    310	/* Flush the cache.
    311	 */
    312
    313	/* TODO: use HW flush assist */
    314
    315	lis	r4,0x0008
    316	mtctr	r4
    317	li	r4,0
    3181:
    319	lwzx	r0,0,r4
    320	dcbf	0,r4
    321	addi	r4,r4,32		/* Go to start of next cache line */
    322	bdnz	1b
    323
    3242:
    325	/* Set up the L3CR configuration bits (and switch L3 off) */
    326	sync
    327	mtspr	SPRN_L3CR,r3
    328	sync
    329
    330	oris	r3,r3,L3CR_L3RES@h		/* Set reserved bit 5 */
    331	mtspr	SPRN_L3CR,r3
    332	sync
    333	oris	r3,r3,L3CR_L3CLKEN@h		/* Set clken */
    334	mtspr	SPRN_L3CR,r3
    335	sync
    336
    337	/* Wait for stabilize */
    338	li	r0,256
    339	mtctr	r0
    3401:	bdnz	1b
    341
    342	/* Perform a global invalidation */
    343	ori	r3,r3,0x0400
    344	sync
    345	mtspr	SPRN_L3CR,r3
    346	sync
    347	isync
    348
    349	/* We wait for the L3I bit to clear...... */
    35010:	mfspr	r3,SPRN_L3CR
    351	andi.	r4,r3,0x0400
    352	bne	10b
    353
    354	/* Clear CLKEN */
    355	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
    356	mtspr	SPRN_L3CR,r3
    357	sync
    358
    359	/* Wait for stabilize */
    360	li	r0,256
    361	mtctr	r0
    3621:	bdnz	1b
    363
    364	/* See if we need to enable the cache */
    365	cmplwi	r5,0
    366	beq	4f
    367
    368	/* Enable the cache */
    369	oris	r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
    370	mtspr	SPRN_L3CR,r3
    371	sync
    372
    373	/* Wait for stabilize */
    374	li	r0,256
    375	mtctr	r0
    3761:	bdnz	1b
    377
    378	/* Restore MSR (restores EE and DR bits to original state) */
    3794:
    380	mtmsr	r7
    381	isync
    382	blr
    383
    384_GLOBAL(_get_L3CR)
    385	/* Return the L3CR contents */
    386	li	r3,0
    387BEGIN_FTR_SECTION
    388	mfspr	r3,SPRN_L3CR
    389END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
    390	blr
    391
    392/* --- End of PowerLogix code ---
    393 */
    394
    395
    396/* flush_disable_L1()	- Flush and disable L1 cache
    397 *
    398 * clobbers r0, r3, ctr, cr0
    399 * Must be called with interrupts disabled and MMU enabled.
    400 */
    401_GLOBAL(__flush_disable_L1)
    402	/* Stop pending alitvec streams and memory accesses */
    403BEGIN_FTR_SECTION
    404	PPC_DSSALL
    405END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
    406 	sync
    407
    408	/* Load counter to 0x4000 cache lines (512k) and
    409	 * load cache with datas
    410	 */
    411	li	r3,0x4000	/* 512kB / 32B */
    412	mtctr	r3
    413	lis	r3,KERNELBASE@h
    4141:
    415	lwz	r0,0(r3)
    416	addi	r3,r3,0x0020	/* Go to start of next cache line */
    417	bdnz	1b
    418	isync
    419	sync
    420
    421	/* Now flush those cache lines */
    422	li	r3,0x4000	/* 512kB / 32B */
    423	mtctr	r3
    424	lis	r3,KERNELBASE@h
    4251:
    426	dcbf	0,r3
    427	addi	r3,r3,0x0020	/* Go to start of next cache line */
    428	bdnz	1b
    429	sync
    430
    431	/* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
    432	mfspr	r3,SPRN_HID0
    433	rlwinm	r3,r3,0,18,15
    434	mtspr	SPRN_HID0,r3
    435	sync
    436	isync
    437 	blr
    438
    439/* inval_enable_L1	- Invalidate and enable L1 cache
    440 *
    441 * Assumes L1 is already disabled and MSR:EE is off
    442 *
    443 * clobbers r3
    444 */
    445_GLOBAL(__inval_enable_L1)
    446	/* Enable and then Flash inval the instruction & data cache */
    447	mfspr	r3,SPRN_HID0
    448	ori	r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
    449	sync
    450	isync
    451	mtspr	SPRN_HID0,r3
    452	xori	r3,r3, HID0_ICFI|HID0_DCI
    453	mtspr	SPRN_HID0,r3
    454	sync
    455
    456 	blr
    457_ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
    458
    459