cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

head_44x.S (32819B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Kernel execution entry point code.
      4 *
      5 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
      6 *      Initial PowerPC version.
      7 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
      8 *      Rewritten for PReP
      9 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
     10 *      Low-level exception handers, MMU support, and rewrite.
     11 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
     12 *      PowerPC 8xx modifications.
     13 *    Copyright (c) 1998-1999 TiVo, Inc.
     14 *      PowerPC 403GCX modifications.
     15 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
     16 *      PowerPC 403GCX/405GP modifications.
     17 *    Copyright 2000 MontaVista Software Inc.
     18 *	PPC405 modifications
     19 *      PowerPC 403GCX/405GP modifications.
     20 * 	Author: MontaVista Software, Inc.
     21 *         	frank_rowand@mvista.com or source@mvista.com
     22 * 	   	debbie_chu@mvista.com
     23 *    Copyright 2002-2005 MontaVista Software, Inc.
     24 *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
     25 */
     26
     27#include <linux/init.h>
     28#include <linux/pgtable.h>
     29#include <asm/processor.h>
     30#include <asm/page.h>
     31#include <asm/mmu.h>
     32#include <asm/cputable.h>
     33#include <asm/thread_info.h>
     34#include <asm/ppc_asm.h>
     35#include <asm/asm-offsets.h>
     36#include <asm/ptrace.h>
     37#include <asm/synch.h>
     38#include <asm/export.h>
     39#include <asm/code-patching-asm.h>
     40#include "head_booke.h"
     41
     42
     43/* As with the other PowerPC ports, it is expected that when code
     44 * execution begins here, the following registers contain valid, yet
     45 * optional, information:
     46 *
     47 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
     48 *   r4 - Starting address of the init RAM disk
     49 *   r5 - Ending address of the init RAM disk
     50 *   r6 - Start of kernel command line string (e.g. "mem=128")
     51 *   r7 - End of kernel command line string
     52 *
     53 */
     54	__HEAD
     55_GLOBAL(_stext);
     56_GLOBAL(_start);
     57	/*
     58	 * Reserve a word at a fixed location to store the address
     59	 * of abatron_pteptrs
     60	 */
     61	nop
     62	mr	r31,r3		/* save device tree ptr */
     63	li	r24,0		/* CPU number */
     64
     65#ifdef CONFIG_RELOCATABLE
     66/*
     67 * Relocate ourselves to the current runtime address.
     68 * This is called only by the Boot CPU.
     69 * "relocate" is called with our current runtime virutal
     70 * address.
     71 * r21 will be loaded with the physical runtime address of _stext
     72 */
     73	bcl	20,31,$+4			/* Get our runtime address */
     740:	mflr	r21				/* Make it accessible */
     75	addis	r21,r21,(_stext - 0b)@ha
     76	addi	r21,r21,(_stext - 0b)@l 	/* Get our current runtime base */
     77
     78	/*
     79	 * We have the runtime (virutal) address of our base.
     80	 * We calculate our shift of offset from a 256M page.
     81	 * We could map the 256M page we belong to at PAGE_OFFSET and
     82	 * get going from there.
     83	 */
     84	lis	r4,KERNELBASE@h
     85	ori	r4,r4,KERNELBASE@l
     86	rlwinm	r6,r21,0,4,31			/* r6 = PHYS_START % 256M */
     87	rlwinm	r5,r4,0,4,31			/* r5 = KERNELBASE % 256M */
     88	subf	r3,r5,r6			/* r3 = r6 - r5 */
     89	add	r3,r4,r3			/* Required Virutal Address */
     90
     91	bl	relocate
     92#endif
     93
     94	bl	init_cpu_state
     95
     96	/*
     97	 * This is where the main kernel code starts.
     98	 */
     99
    100	/* ptr to current */
    101	lis	r2,init_task@h
    102	ori	r2,r2,init_task@l
    103
    104	/* ptr to current thread */
    105	addi	r4,r2,THREAD	/* init task's THREAD */
    106	mtspr	SPRN_SPRG_THREAD,r4
    107
    108	/* stack */
    109	lis	r1,init_thread_union@h
    110	ori	r1,r1,init_thread_union@l
    111	li	r0,0
    112	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
    113
    114	bl	early_init
    115
    116#ifdef CONFIG_RELOCATABLE
    117	/*
    118	 * Relocatable kernel support based on processing of dynamic
    119	 * relocation entries.
    120	 *
    121	 * r25 will contain RPN/ERPN for the start address of memory
    122	 * r21 will contain the current offset of _stext
    123	 */
    124	lis	r3,kernstart_addr@ha
    125	la	r3,kernstart_addr@l(r3)
    126
    127	/*
    128	 * Compute the kernstart_addr.
    129	 * kernstart_addr => (r6,r8)
    130	 * kernstart_addr & ~0xfffffff => (r6,r7)
    131	 */
    132	rlwinm	r6,r25,0,28,31	/* ERPN. Bits 32-35 of Address */
    133	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
    134	rlwinm	r8,r21,0,4,31	/* r8 = (_stext & 0xfffffff) */
    135	or	r8,r7,r8	/* Compute the lower 32bit of kernstart_addr */
    136
    137	/* Store kernstart_addr */
    138	stw	r6,0(r3)	/* higher 32bit */
    139	stw	r8,4(r3)	/* lower 32bit  */
    140
    141	/*
    142	 * Compute the virt_phys_offset :
    143	 * virt_phys_offset = stext.run - kernstart_addr
    144	 *
    145	 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
    146	 * When we relocate, we have :
    147	 *
    148	 *	(kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
    149	 *
    150	 * hence:
    151	 *  virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
    152	 *
    153	 */
    154
    155	/* KERNELBASE&~0xfffffff => (r4,r5) */
    156	li	r4, 0		/* higer 32bit */
    157	lis	r5,KERNELBASE@h
    158	rlwinm	r5,r5,0,0,3	/* Align to 256M, lower 32bit */
    159
    160	/*
    161	 * 64bit subtraction.
    162	 */
    163	subfc	r5,r7,r5
    164	subfe	r4,r6,r4
    165
    166	/* Store virt_phys_offset */
    167	lis	r3,virt_phys_offset@ha
    168	la	r3,virt_phys_offset@l(r3)
    169
    170	stw	r4,0(r3)
    171	stw	r5,4(r3)
    172
    173#elif defined(CONFIG_DYNAMIC_MEMSTART)
    174	/*
    175	 * Mapping based, page aligned dynamic kernel loading.
    176	 *
    177	 * r25 will contain RPN/ERPN for the start address of memory
    178	 *
    179	 * Add the difference between KERNELBASE and PAGE_OFFSET to the
    180	 * start of physical memory to get kernstart_addr.
    181	 */
    182	lis	r3,kernstart_addr@ha
    183	la	r3,kernstart_addr@l(r3)
    184
    185	lis	r4,KERNELBASE@h
    186	ori	r4,r4,KERNELBASE@l
    187	lis	r5,PAGE_OFFSET@h
    188	ori	r5,r5,PAGE_OFFSET@l
    189	subf	r4,r5,r4
    190
    191	rlwinm	r6,r25,0,28,31	/* ERPN */
    192	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
    193	add	r7,r7,r4
    194
    195	stw	r6,0(r3)
    196	stw	r7,4(r3)
    197#endif
    198
    199/*
    200 * Decide what sort of machine this is and initialize the MMU.
    201 */
    202#ifdef CONFIG_KASAN
    203	bl	kasan_early_init
    204#endif
    205	li	r3,0
    206	mr	r4,r31
    207	bl	machine_init
    208	bl	MMU_init
    209
    210	/* Setup PTE pointers for the Abatron bdiGDB */
    211	lis	r6, swapper_pg_dir@h
    212	ori	r6, r6, swapper_pg_dir@l
    213	lis	r5, abatron_pteptrs@h
    214	ori	r5, r5, abatron_pteptrs@l
    215	lis	r4, KERNELBASE@h
    216	ori	r4, r4, KERNELBASE@l
    217	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
    218	stw	r6, 0(r5)
    219
    220	/* Clear the Machine Check Syndrome Register */
    221	li	r0,0
    222	mtspr	SPRN_MCSR,r0
    223
    224	/* Let's move on */
    225	lis	r4,start_kernel@h
    226	ori	r4,r4,start_kernel@l
    227	lis	r3,MSR_KERNEL@h
    228	ori	r3,r3,MSR_KERNEL@l
    229	mtspr	SPRN_SRR0,r4
    230	mtspr	SPRN_SRR1,r3
    231	rfi			/* change context and jump to start_kernel */
    232
    233/*
    234 * Interrupt vector entry code
    235 *
    236 * The Book E MMUs are always on so we don't need to handle
    237 * interrupts in real mode as with previous PPC processors. In
    238 * this case we handle interrupts in the kernel virtual address
    239 * space.
    240 *
    241 * Interrupt vectors are dynamically placed relative to the
    242 * interrupt prefix as determined by the address of interrupt_base.
    243 * The interrupt vectors offsets are programmed using the labels
    244 * for each interrupt vector entry.
    245 *
    246 * Interrupt vectors must be aligned on a 16 byte boundary.
    247 * We align on a 32 byte cache line boundary for good measure.
    248 */
    249
    250interrupt_base:
    251	/* Critical Input Interrupt */
    252	CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
    253
    254	/* Machine Check Interrupt */
    255	CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
    256			   machine_check_exception)
    257	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
    258
    259	/* Data Storage Interrupt */
    260	DATA_STORAGE_EXCEPTION
    261
    262		/* Instruction Storage Interrupt */
    263	INSTRUCTION_STORAGE_EXCEPTION
    264
    265	/* External Input Interrupt */
    266	EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
    267
    268	/* Alignment Interrupt */
    269	ALIGNMENT_EXCEPTION
    270
    271	/* Program Interrupt */
    272	PROGRAM_EXCEPTION
    273
    274	/* Floating Point Unavailable Interrupt */
    275#ifdef CONFIG_PPC_FPU
    276	FP_UNAVAILABLE_EXCEPTION
    277#else
    278	EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
    279		  FloatingPointUnavailable, unknown_exception)
    280#endif
    281	/* System Call Interrupt */
    282	START_EXCEPTION(SystemCall)
    283	SYSCALL_ENTRY   0xc00 BOOKE_INTERRUPT_SYSCALL
    284
    285	/* Auxiliary Processor Unavailable Interrupt */
    286	EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
    287		  AuxillaryProcessorUnavailable, unknown_exception)
    288
    289	/* Decrementer Interrupt */
    290	DECREMENTER_EXCEPTION
    291
    292	/* Fixed Internal Timer Interrupt */
    293	/* TODO: Add FIT support */
    294	EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
    295
    296	/* Watchdog Timer Interrupt */
    297	/* TODO: Add watchdog support */
    298#ifdef CONFIG_BOOKE_WDT
    299	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
    300#else
    301	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
    302#endif
    303
    304	/* Data TLB Error Interrupt */
    305	START_EXCEPTION(DataTLBError44x)
    306	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
    307	mtspr	SPRN_SPRG_WSCRATCH1, r11
    308	mtspr	SPRN_SPRG_WSCRATCH2, r12
    309	mtspr	SPRN_SPRG_WSCRATCH3, r13
    310	mfcr	r11
    311	mtspr	SPRN_SPRG_WSCRATCH4, r11
    312	mfspr	r10, SPRN_DEAR		/* Get faulting address */
    313
    314	/* If we are faulting a kernel address, we have to use the
    315	 * kernel page tables.
    316	 */
    317	lis	r11, PAGE_OFFSET@h
    318	cmplw	r10, r11
    319	blt+	3f
    320	lis	r11, swapper_pg_dir@h
    321	ori	r11, r11, swapper_pg_dir@l
    322
    323	mfspr	r12,SPRN_MMUCR
    324	rlwinm	r12,r12,0,0,23		/* Clear TID */
    325
    326	b	4f
    327
    328	/* Get the PGD for the current thread */
    3293:
    330	mfspr	r11,SPRN_SPRG_THREAD
    331	lwz	r11,PGDIR(r11)
    332
    333	/* Load PID into MMUCR TID */
    334	mfspr	r12,SPRN_MMUCR
    335	mfspr   r13,SPRN_PID		/* Get PID */
    336	rlwimi	r12,r13,0,24,31		/* Set TID */
    337#ifdef CONFIG_PPC_KUAP
    338	cmpwi	r13,0
    339	beq	2f			/* KUAP Fault */
    340#endif
    341
    3424:
    343	mtspr	SPRN_MMUCR,r12
    344
    345	/* Mask of required permission bits. Note that while we
    346	 * do copy ESR:ST to _PAGE_RW position as trying to write
    347	 * to an RO page is pretty common, we don't do it with
    348	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
    349	 * event so I'd rather take the overhead when it happens
    350	 * rather than adding an instruction here. We should measure
    351	 * whether the whole thing is worth it in the first place
    352	 * as we could avoid loading SPRN_ESR completely in the first
    353	 * place...
    354	 *
    355	 * TODO: Is it worth doing that mfspr & rlwimi in the first
    356	 *       place or can we save a couple of instructions here ?
    357	 */
    358	mfspr	r12,SPRN_ESR
    359	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
    360	rlwimi	r13,r12,10,30,30
    361
    362	/* Load the PTE */
    363	/* Compute pgdir/pmd offset */
    364	rlwinm  r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
    365	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
    366	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
    367	beq	2f			/* Bail if no table */
    368
    369	/* Compute pte address */
    370	rlwimi  r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
    371	lwz	r11, 0(r12)		/* Get high word of pte entry */
    372	lwz	r12, 4(r12)		/* Get low word of pte entry */
    373
    374	lis	r10,tlb_44x_index@ha
    375
    376	andc.	r13,r13,r12		/* Check permission */
    377
    378	/* Load the next available TLB index */
    379	lwz	r13,tlb_44x_index@l(r10)
    380
    381	bne	2f			/* Bail if permission mismatch */
    382
    383	/* Increment, rollover, and store TLB index */
    384	addi	r13,r13,1
    385
    386	patch_site 0f, patch__tlb_44x_hwater_D
    387	/* Compare with watermark (instruction gets patched) */
    3880:	cmpwi	0,r13,1			/* reserve entries */
    389	ble	5f
    390	li	r13,0
    3915:
    392	/* Store the next available TLB index */
    393	stw	r13,tlb_44x_index@l(r10)
    394
    395	/* Re-load the faulting address */
    396	mfspr	r10,SPRN_DEAR
    397
    398	 /* Jump to common tlb load */
    399	b	finish_tlb_load_44x
    400
    4012:
    402	/* The bailout.  Restore registers to pre-exception conditions
    403	 * and call the heavyweights to help us out.
    404	 */
    405	mfspr	r11, SPRN_SPRG_RSCRATCH4
    406	mtcr	r11
    407	mfspr	r13, SPRN_SPRG_RSCRATCH3
    408	mfspr	r12, SPRN_SPRG_RSCRATCH2
    409	mfspr	r11, SPRN_SPRG_RSCRATCH1
    410	mfspr	r10, SPRN_SPRG_RSCRATCH0
    411	b	DataStorage
    412
    413	/* Instruction TLB Error Interrupt */
    414	/*
    415	 * Nearly the same as above, except we get our
    416	 * information from different registers and bailout
    417	 * to a different point.
    418	 */
    419	START_EXCEPTION(InstructionTLBError44x)
    420	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
    421	mtspr	SPRN_SPRG_WSCRATCH1, r11
    422	mtspr	SPRN_SPRG_WSCRATCH2, r12
    423	mtspr	SPRN_SPRG_WSCRATCH3, r13
    424	mfcr	r11
    425	mtspr	SPRN_SPRG_WSCRATCH4, r11
    426	mfspr	r10, SPRN_SRR0		/* Get faulting address */
    427
    428	/* If we are faulting a kernel address, we have to use the
    429	 * kernel page tables.
    430	 */
    431	lis	r11, PAGE_OFFSET@h
    432	cmplw	r10, r11
    433	blt+	3f
    434	lis	r11, swapper_pg_dir@h
    435	ori	r11, r11, swapper_pg_dir@l
    436
    437	mfspr	r12,SPRN_MMUCR
    438	rlwinm	r12,r12,0,0,23		/* Clear TID */
    439
    440	b	4f
    441
    442	/* Get the PGD for the current thread */
    4433:
    444	mfspr	r11,SPRN_SPRG_THREAD
    445	lwz	r11,PGDIR(r11)
    446
    447	/* Load PID into MMUCR TID */
    448	mfspr	r12,SPRN_MMUCR
    449	mfspr   r13,SPRN_PID		/* Get PID */
    450	rlwimi	r12,r13,0,24,31		/* Set TID */
    451#ifdef CONFIG_PPC_KUAP
    452	cmpwi	r13,0
    453	beq	2f			/* KUAP Fault */
    454#endif
    455
    4564:
    457	mtspr	SPRN_MMUCR,r12
    458
    459	/* Make up the required permissions */
    460	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
    461
    462	/* Compute pgdir/pmd offset */
    463	rlwinm 	r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
    464	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
    465	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
    466	beq	2f			/* Bail if no table */
    467
    468	/* Compute pte address */
    469	rlwimi	r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
    470	lwz	r11, 0(r12)		/* Get high word of pte entry */
    471	lwz	r12, 4(r12)		/* Get low word of pte entry */
    472
    473	lis	r10,tlb_44x_index@ha
    474
    475	andc.	r13,r13,r12		/* Check permission */
    476
    477	/* Load the next available TLB index */
    478	lwz	r13,tlb_44x_index@l(r10)
    479
    480	bne	2f			/* Bail if permission mismatch */
    481
    482	/* Increment, rollover, and store TLB index */
    483	addi	r13,r13,1
    484
    485	patch_site 0f, patch__tlb_44x_hwater_I
    486	/* Compare with watermark (instruction gets patched) */
    4870:	cmpwi	0,r13,1			/* reserve entries */
    488	ble	5f
    489	li	r13,0
    4905:
    491	/* Store the next available TLB index */
    492	stw	r13,tlb_44x_index@l(r10)
    493
    494	/* Re-load the faulting address */
    495	mfspr	r10,SPRN_SRR0
    496
    497	/* Jump to common TLB load point */
    498	b	finish_tlb_load_44x
    499
    5002:
    501	/* The bailout.  Restore registers to pre-exception conditions
    502	 * and call the heavyweights to help us out.
    503	 */
    504	mfspr	r11, SPRN_SPRG_RSCRATCH4
    505	mtcr	r11
    506	mfspr	r13, SPRN_SPRG_RSCRATCH3
    507	mfspr	r12, SPRN_SPRG_RSCRATCH2
    508	mfspr	r11, SPRN_SPRG_RSCRATCH1
    509	mfspr	r10, SPRN_SPRG_RSCRATCH0
    510	b	InstructionStorage
    511
    512/*
    513 * Both the instruction and data TLB miss get to this
    514 * point to load the TLB.
    515 * 	r10 - EA of fault
    516 * 	r11 - PTE high word value
    517 *	r12 - PTE low word value
    518 *	r13 - TLB index
    519 *	MMUCR - loaded with proper value when we get here
    520 *	Upon exit, we reload everything and RFI.
    521 */
    522finish_tlb_load_44x:
    523	/* Combine RPN & ERPN an write WS 0 */
    524	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
    525	tlbwe	r11,r13,PPC44x_TLB_XLAT
    526
    527	/*
    528	 * Create WS1. This is the faulting address (EPN),
    529	 * page size, and valid flag.
    530	 */
    531	li	r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
    532	/* Insert valid and page size */
    533	rlwimi	r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
    534	tlbwe	r10,r13,PPC44x_TLB_PAGEID	/* Write PAGEID */
    535
    536	/* And WS 2 */
    537	li	r10,0xf85			/* Mask to apply from PTE */
    538	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
    539	and	r11,r12,r10			/* Mask PTE bits to keep */
    540	andi.	r10,r12,_PAGE_USER		/* User page ? */
    541	beq	1f				/* nope, leave U bits empty */
    542	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
    543	rlwinm	r11,r11,0,~PPC44x_TLB_SX	/* Clear SX if User page */
    5441:	tlbwe	r11,r13,PPC44x_TLB_ATTRIB	/* Write ATTRIB */
    545
    546	/* Done...restore registers and get out of here.
    547	*/
    548	mfspr	r11, SPRN_SPRG_RSCRATCH4
    549	mtcr	r11
    550	mfspr	r13, SPRN_SPRG_RSCRATCH3
    551	mfspr	r12, SPRN_SPRG_RSCRATCH2
    552	mfspr	r11, SPRN_SPRG_RSCRATCH1
    553	mfspr	r10, SPRN_SPRG_RSCRATCH0
    554	rfi					/* Force context change */
    555
    556/* TLB error interrupts for 476
    557 */
    558#ifdef CONFIG_PPC_47x
    559	START_EXCEPTION(DataTLBError47x)
    560	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
    561	mtspr	SPRN_SPRG_WSCRATCH1,r11
    562	mtspr	SPRN_SPRG_WSCRATCH2,r12
    563	mtspr	SPRN_SPRG_WSCRATCH3,r13
    564	mfcr	r11
    565	mtspr	SPRN_SPRG_WSCRATCH4,r11
    566	mfspr	r10,SPRN_DEAR		/* Get faulting address */
    567
    568	/* If we are faulting a kernel address, we have to use the
    569	 * kernel page tables.
    570	 */
    571	lis	r11,PAGE_OFFSET@h
    572	cmplw	cr0,r10,r11
    573	blt+	3f
    574	lis	r11,swapper_pg_dir@h
    575	ori	r11,r11, swapper_pg_dir@l
    576	li	r12,0			/* MMUCR = 0 */
    577	b	4f
    578
    579	/* Get the PGD for the current thread and setup MMUCR */
    5803:	mfspr	r11,SPRN_SPRG3
    581	lwz	r11,PGDIR(r11)
    582	mfspr   r12,SPRN_PID		/* Get PID */
    583#ifdef CONFIG_PPC_KUAP
    584	cmpwi	r12,0
    585	beq	2f			/* KUAP Fault */
    586#endif
    5874:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
    588
    589	/* Mask of required permission bits. Note that while we
    590	 * do copy ESR:ST to _PAGE_RW position as trying to write
    591	 * to an RO page is pretty common, we don't do it with
    592	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
    593	 * event so I'd rather take the overhead when it happens
    594	 * rather than adding an instruction here. We should measure
    595	 * whether the whole thing is worth it in the first place
    596	 * as we could avoid loading SPRN_ESR completely in the first
    597	 * place...
    598	 *
    599	 * TODO: Is it worth doing that mfspr & rlwimi in the first
    600	 *       place or can we save a couple of instructions here ?
    601	 */
    602	mfspr	r12,SPRN_ESR
    603	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
    604	rlwimi	r13,r12,10,30,30
    605
    606	/* Load the PTE */
    607	/* Compute pgdir/pmd offset */
    608	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
    609	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
    610
    611	/* Word 0 is EPN,V,TS,DSIZ */
    612	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
    613	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
    614	li	r12,0
    615	tlbwe	r10,r12,0
    616
    617	/* XXX can we do better ? Need to make sure tlbwe has established
    618	 * latch V bit in MMUCR0 before the PTE is loaded further down */
    619#ifdef CONFIG_SMP
    620	isync
    621#endif
    622
    623	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
    624	/* Compute pte address */
    625	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
    626	beq	2f			/* Bail if no table */
    627	lwz	r11,0(r12)		/* Get high word of pte entry */
    628
    629	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
    630	 * bottom of r12 to create a data dependency... We can also use r10
    631	 * as destination nowadays
    632	 */
    633#ifdef CONFIG_SMP
    634	lwsync
    635#endif
    636	lwz	r12,4(r12)		/* Get low word of pte entry */
    637
    638	andc.	r13,r13,r12		/* Check permission */
    639
    640	 /* Jump to common tlb load */
    641	beq	finish_tlb_load_47x
    642
    6432:	/* The bailout.  Restore registers to pre-exception conditions
    644	 * and call the heavyweights to help us out.
    645	 */
    646	mfspr	r11,SPRN_SPRG_RSCRATCH4
    647	mtcr	r11
    648	mfspr	r13,SPRN_SPRG_RSCRATCH3
    649	mfspr	r12,SPRN_SPRG_RSCRATCH2
    650	mfspr	r11,SPRN_SPRG_RSCRATCH1
    651	mfspr	r10,SPRN_SPRG_RSCRATCH0
    652	b	DataStorage
    653
    654	/* Instruction TLB Error Interrupt */
    655	/*
    656	 * Nearly the same as above, except we get our
    657	 * information from different registers and bailout
    658	 * to a different point.
    659	 */
    660	START_EXCEPTION(InstructionTLBError47x)
    661	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
    662	mtspr	SPRN_SPRG_WSCRATCH1,r11
    663	mtspr	SPRN_SPRG_WSCRATCH2,r12
    664	mtspr	SPRN_SPRG_WSCRATCH3,r13
    665	mfcr	r11
    666	mtspr	SPRN_SPRG_WSCRATCH4,r11
    667	mfspr	r10,SPRN_SRR0		/* Get faulting address */
    668
    669	/* If we are faulting a kernel address, we have to use the
    670	 * kernel page tables.
    671	 */
    672	lis	r11,PAGE_OFFSET@h
    673	cmplw	cr0,r10,r11
    674	blt+	3f
    675	lis	r11,swapper_pg_dir@h
    676	ori	r11,r11, swapper_pg_dir@l
    677	li	r12,0			/* MMUCR = 0 */
    678	b	4f
    679
    680	/* Get the PGD for the current thread and setup MMUCR */
    6813:	mfspr	r11,SPRN_SPRG_THREAD
    682	lwz	r11,PGDIR(r11)
    683	mfspr   r12,SPRN_PID		/* Get PID */
    684#ifdef CONFIG_PPC_KUAP
    685	cmpwi	r12,0
    686	beq	2f			/* KUAP Fault */
    687#endif
    6884:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
    689
    690	/* Make up the required permissions */
    691	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
    692
    693	/* Load PTE */
    694	/* Compute pgdir/pmd offset */
    695	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
    696	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
    697
    698	/* Word 0 is EPN,V,TS,DSIZ */
    699	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
    700	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
    701	li	r12,0
    702	tlbwe	r10,r12,0
    703
    704	/* XXX can we do better ? Need to make sure tlbwe has established
    705	 * latch V bit in MMUCR0 before the PTE is loaded further down */
    706#ifdef CONFIG_SMP
    707	isync
    708#endif
    709
    710	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
    711	/* Compute pte address */
    712	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
    713	beq	2f			/* Bail if no table */
    714
    715	lwz	r11,0(r12)		/* Get high word of pte entry */
    716	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
    717	 * bottom of r12 to create a data dependency... We can also use r10
    718	 * as destination nowadays
    719	 */
    720#ifdef CONFIG_SMP
    721	lwsync
    722#endif
    723	lwz	r12,4(r12)		/* Get low word of pte entry */
    724
    725	andc.	r13,r13,r12		/* Check permission */
    726
    727	/* Jump to common TLB load point */
    728	beq	finish_tlb_load_47x
    729
    7302:	/* The bailout.  Restore registers to pre-exception conditions
    731	 * and call the heavyweights to help us out.
    732	 */
    733	mfspr	r11, SPRN_SPRG_RSCRATCH4
    734	mtcr	r11
    735	mfspr	r13, SPRN_SPRG_RSCRATCH3
    736	mfspr	r12, SPRN_SPRG_RSCRATCH2
    737	mfspr	r11, SPRN_SPRG_RSCRATCH1
    738	mfspr	r10, SPRN_SPRG_RSCRATCH0
    739	b	InstructionStorage
    740
    741/*
    742 * Both the instruction and data TLB miss get to this
    743 * point to load the TLB.
    744 * 	r10 - free to use
    745 * 	r11 - PTE high word value
    746 *	r12 - PTE low word value
    747 *      r13 - free to use
    748 *	MMUCR - loaded with proper value when we get here
    749 *	Upon exit, we reload everything and RFI.
    750 */
    751finish_tlb_load_47x:
    752	/* Combine RPN & ERPN an write WS 1 */
    753	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
    754	tlbwe	r11,r13,1
    755
    756	/* And make up word 2 */
    757	li	r10,0xf85			/* Mask to apply from PTE */
    758	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
    759	and	r11,r12,r10			/* Mask PTE bits to keep */
    760	andi.	r10,r12,_PAGE_USER		/* User page ? */
    761	beq	1f				/* nope, leave U bits empty */
    762	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
    763	rlwinm	r11,r11,0,~PPC47x_TLB2_SX	/* Clear SX if User page */
    7641:	tlbwe	r11,r13,2
    765
    766	/* Done...restore registers and get out of here.
    767	*/
    768	mfspr	r11, SPRN_SPRG_RSCRATCH4
    769	mtcr	r11
    770	mfspr	r13, SPRN_SPRG_RSCRATCH3
    771	mfspr	r12, SPRN_SPRG_RSCRATCH2
    772	mfspr	r11, SPRN_SPRG_RSCRATCH1
    773	mfspr	r10, SPRN_SPRG_RSCRATCH0
    774	rfi
    775
    776#endif /* CONFIG_PPC_47x */
    777
    778	/* Debug Interrupt */
    779	/*
    780	 * This statement needs to exist at the end of the IVPR
    781	 * definition just in case you end up taking a debug
    782	 * exception within another exception.
    783	 */
    784	DEBUG_CRIT_EXCEPTION
    785
    786interrupt_end:
    787
    788/*
    789 * Global functions
    790 */
    791
    792/*
    793 * Adjust the machine check IVOR on 440A cores
    794 */
    795_GLOBAL(__fixup_440A_mcheck)
    796	li	r3,MachineCheckA@l
    797	mtspr	SPRN_IVOR1,r3
    798	sync
    799	blr
    800
    801/*
    802 * Init CPU state. This is called at boot time or for secondary CPUs
    803 * to setup initial TLB entries, setup IVORs, etc...
    804 *
    805 */
    806_GLOBAL(init_cpu_state)
    807	mflr	r22
    808#ifdef CONFIG_PPC_47x
    809	/* We use the PVR to differentiate 44x cores from 476 */
    810	mfspr	r3,SPRN_PVR
    811	srwi	r3,r3,16
    812	cmplwi	cr0,r3,PVR_476FPE@h
    813	beq	head_start_47x
    814	cmplwi	cr0,r3,PVR_476@h
    815	beq	head_start_47x
    816	cmplwi	cr0,r3,PVR_476_ISS@h
    817	beq	head_start_47x
    818#endif /* CONFIG_PPC_47x */
    819
    820/*
    821 * In case the firmware didn't do it, we apply some workarounds
    822 * that are good for all 440 core variants here
    823 */
    824	mfspr	r3,SPRN_CCR0
    825	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
    826	isync
    827	mtspr	SPRN_CCR0,r3
    828	isync
    829	sync
    830
    831/*
    832 * Set up the initial MMU state for 44x
    833 *
    834 * We are still executing code at the virtual address
    835 * mappings set by the firmware for the base of RAM.
    836 *
    837 * We first invalidate all TLB entries but the one
    838 * we are running from.  We then load the KERNELBASE
    839 * mappings so we can begin to use kernel addresses
    840 * natively and so the interrupt vector locations are
    841 * permanently pinned (necessary since Book E
    842 * implementations always have translation enabled).
    843 *
    844 * TODO: Use the known TLB entry we are running from to
    845 *	 determine which physical region we are located
    846 *	 in.  This can be used to determine where in RAM
    847 *	 (on a shared CPU system) or PCI memory space
    848 *	 (on a DRAMless system) we are located.
    849 *       For now, we assume a perfect world which means
    850 *	 we are located at the base of DRAM (physical 0).
    851 */
    852
    853/*
    854 * Search TLB for entry that we are currently using.
    855 * Invalidate all entries but the one we are using.
    856 */
    857	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
    858	mfspr	r3,SPRN_PID			/* Get PID */
    859	mfmsr	r4				/* Get MSR */
    860	andi.	r4,r4,MSR_IS@l			/* TS=1? */
    861	beq	wmmucr				/* If not, leave STS=0 */
    862	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
    863wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
    864	sync
    865
    866	bcl	20,31,$+4			/* Find our address */
    867invstr:	mflr	r5				/* Make it accessible */
    868	tlbsx	r23,0,r5			/* Find entry we are in */
    869	li	r4,0				/* Start at TLB entry 0 */
    870	li	r3,0				/* Set PAGEID inval value */
    8711:	cmpw	r23,r4				/* Is this our entry? */
    872	beq	skpinv				/* If so, skip the inval */
    873	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
    874skpinv:	addi	r4,r4,1				/* Increment */
    875	cmpwi	r4,64				/* Are we done? */
    876	bne	1b				/* If not, repeat */
    877	isync					/* If so, context change */
    878
    879/*
    880 * Configure and load pinned entry into TLB slot 63.
    881 */
    882#ifdef CONFIG_NONSTATIC_KERNEL
    883	/*
    884	 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
    885	 * entries of the initial mapping set by the boot loader.
    886	 * The XLAT entry is stored in r25
    887	 */
    888
    889	/* Read the XLAT entry for our current mapping */
    890	tlbre	r25,r23,PPC44x_TLB_XLAT
    891
    892	lis	r3,KERNELBASE@h
    893	ori	r3,r3,KERNELBASE@l
    894
    895	/* Use our current RPN entry */
    896	mr	r4,r25
    897#else
    898
    899	lis	r3,PAGE_OFFSET@h
    900	ori	r3,r3,PAGE_OFFSET@l
    901
    902	/* Kernel is at the base of RAM */
    903	li r4, 0			/* Load the kernel physical address */
    904#endif
    905
    906	/* Load the kernel PID = 0 */
    907	li	r0,0
    908	mtspr	SPRN_PID,r0
    909	sync
    910
    911	/* Initialize MMUCR */
    912	li	r5,0
    913	mtspr	SPRN_MMUCR,r5
    914	sync
    915
    916	/* pageid fields */
    917	clrrwi	r3,r3,10		/* Mask off the effective page number */
    918	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
    919
    920	/* xlat fields */
    921	clrrwi	r4,r4,10		/* Mask off the real page number */
    922					/* ERPN is 0 for first 4GB page */
    923
    924	/* attrib fields */
    925	/* Added guarded bit to protect against speculative loads/stores */
    926	li	r5,0
    927	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
    928
    929        li      r0,63                    /* TLB slot 63 */
    930
    931	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
    932	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
    933	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
    934
    935	/* Force context change */
    936	mfmsr	r0
    937	mtspr	SPRN_SRR1, r0
    938	lis	r0,3f@h
    939	ori	r0,r0,3f@l
    940	mtspr	SPRN_SRR0,r0
    941	sync
    942	rfi
    943
    944	/* If necessary, invalidate original entry we used */
    9453:	cmpwi	r23,63
    946	beq	4f
    947	li	r6,0
    948	tlbwe   r6,r23,PPC44x_TLB_PAGEID
    949	isync
    950
    9514:
    952#ifdef CONFIG_PPC_EARLY_DEBUG_44x
    953	/* Add UART mapping for early debug. */
    954
    955	/* pageid fields */
    956	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
    957	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
    958
    959	/* xlat fields */
    960	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
    961	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
    962
    963	/* attrib fields */
    964	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
    965        li      r0,62                    /* TLB slot 0 */
    966
    967	tlbwe	r3,r0,PPC44x_TLB_PAGEID
    968	tlbwe	r4,r0,PPC44x_TLB_XLAT
    969	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
    970
    971	/* Force context change */
    972	isync
    973#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
    974
    975	/* Establish the interrupt vector offsets */
    976	SET_IVOR(0,  CriticalInput);
    977	SET_IVOR(1,  MachineCheck);
    978	SET_IVOR(2,  DataStorage);
    979	SET_IVOR(3,  InstructionStorage);
    980	SET_IVOR(4,  ExternalInput);
    981	SET_IVOR(5,  Alignment);
    982	SET_IVOR(6,  Program);
    983	SET_IVOR(7,  FloatingPointUnavailable);
    984	SET_IVOR(8,  SystemCall);
    985	SET_IVOR(9,  AuxillaryProcessorUnavailable);
    986	SET_IVOR(10, Decrementer);
    987	SET_IVOR(11, FixedIntervalTimer);
    988	SET_IVOR(12, WatchdogTimer);
    989	SET_IVOR(13, DataTLBError44x);
    990	SET_IVOR(14, InstructionTLBError44x);
    991	SET_IVOR(15, DebugCrit);
    992
    993	b	head_start_common
    994
    995
    996#ifdef CONFIG_PPC_47x
    997
    998#ifdef CONFIG_SMP
    999
   1000/* Entry point for secondary 47x processors */
   1001_GLOBAL(start_secondary_47x)
   1002        mr      r24,r3          /* CPU number */
   1003
   1004	bl	init_cpu_state
   1005
   1006	/* Now we need to bolt the rest of kernel memory which
   1007	 * is done in C code. We must be careful because our task
   1008	 * struct or our stack can (and will probably) be out
   1009	 * of reach of the initial 256M TLB entry, so we use a
   1010	 * small temporary stack in .bss for that. This works
   1011	 * because only one CPU at a time can be in this code
   1012	 */
   1013	lis	r1,temp_boot_stack@h
   1014	ori	r1,r1,temp_boot_stack@l
   1015	addi	r1,r1,1024-STACK_FRAME_OVERHEAD
   1016	li	r0,0
   1017	stw	r0,0(r1)
   1018	bl	mmu_init_secondary
   1019
   1020	/* Now we can get our task struct and real stack pointer */
   1021
   1022	/* Get current's stack and current */
   1023	lis	r2,secondary_current@ha
   1024	lwz	r2,secondary_current@l(r2)
   1025	lwz	r1,TASK_STACK(r2)
   1026
   1027	/* Current stack pointer */
   1028	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
   1029	li	r0,0
   1030	stw	r0,0(r1)
   1031
   1032	/* Kernel stack for exception entry in SPRG3 */
   1033	addi	r4,r2,THREAD	/* init task's THREAD */
   1034	mtspr	SPRN_SPRG3,r4
   1035
   1036	b	start_secondary
   1037
   1038#endif /* CONFIG_SMP */
   1039
   1040/*
   1041 * Set up the initial MMU state for 44x
   1042 *
   1043 * We are still executing code at the virtual address
   1044 * mappings set by the firmware for the base of RAM.
   1045 */
   1046
   1047head_start_47x:
   1048	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
   1049	mfspr	r3,SPRN_PID			/* Get PID */
   1050	mfmsr	r4				/* Get MSR */
   1051	andi.	r4,r4,MSR_IS@l			/* TS=1? */
   1052	beq	1f				/* If not, leave STS=0 */
   1053	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
   10541:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
   1055	sync
   1056
   1057	/* Find the entry we are running from */
   1058	bcl	20,31,$+4
   10591:	mflr	r23
   1060	tlbsx	r23,0,r23
   1061	tlbre	r24,r23,0
   1062	tlbre	r25,r23,1
   1063	tlbre	r26,r23,2
   1064
   1065/*
   1066 * Cleanup time
   1067 */
   1068
   1069	/* Initialize MMUCR */
   1070	li	r5,0
   1071	mtspr	SPRN_MMUCR,r5
   1072	sync
   1073
   1074clear_all_utlb_entries:
   1075
   1076	#; Set initial values.
   1077
   1078	addis		r3,0,0x8000
   1079	addi		r4,0,0
   1080	addi		r5,0,0
   1081	b		clear_utlb_entry
   1082
   1083	#; Align the loop to speed things up.
   1084
   1085	.align		6
   1086
   1087clear_utlb_entry:
   1088
   1089	tlbwe		r4,r3,0
   1090	tlbwe		r5,r3,1
   1091	tlbwe		r5,r3,2
   1092	addis		r3,r3,0x2000
   1093	cmpwi		r3,0
   1094	bne		clear_utlb_entry
   1095	addis		r3,0,0x8000
   1096	addis		r4,r4,0x100
   1097	cmpwi		r4,0
   1098	bne		clear_utlb_entry
   1099
   1100	#; Restore original entry.
   1101
   1102	oris	r23,r23,0x8000  /* specify the way */
   1103	tlbwe		r24,r23,0
   1104	tlbwe		r25,r23,1
   1105	tlbwe		r26,r23,2
   1106
   1107/*
   1108 * Configure and load pinned entry into TLB for the kernel core
   1109 */
   1110
   1111	lis	r3,PAGE_OFFSET@h
   1112	ori	r3,r3,PAGE_OFFSET@l
   1113
   1114	/* Load the kernel PID = 0 */
   1115	li	r0,0
   1116	mtspr	SPRN_PID,r0
   1117	sync
   1118
   1119	/* Word 0 */
   1120	clrrwi	r3,r3,12		/* Mask off the effective page number */
   1121	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
   1122
   1123	/* Word 1 - use r25.  RPN is the same as the original entry */
   1124
   1125	/* Word 2 */
   1126	li	r5,0
   1127	ori	r5,r5,PPC47x_TLB2_S_RWX
   1128#ifdef CONFIG_SMP
   1129	ori	r5,r5,PPC47x_TLB2_M
   1130#endif
   1131
   1132	/* We write to way 0 and bolted 0 */
   1133	lis	r0,0x8800
   1134	tlbwe	r3,r0,0
   1135	tlbwe	r25,r0,1
   1136	tlbwe	r5,r0,2
   1137
   1138/*
   1139 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
   1140 * them up later
   1141 */
   1142	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
   1143	mtspr	SPRN_SSPCR,r3
   1144	mtspr	SPRN_USPCR,r3
   1145	LOAD_REG_IMMEDIATE(r3, 0x12345670)
   1146	mtspr	SPRN_ISPCR,r3
   1147
   1148	/* Force context change */
   1149	mfmsr	r0
   1150	mtspr	SPRN_SRR1, r0
   1151	lis	r0,3f@h
   1152	ori	r0,r0,3f@l
   1153	mtspr	SPRN_SRR0,r0
   1154	sync
   1155	rfi
   1156
   1157	/* Invalidate original entry we used */
   11583:
   1159	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
   1160	tlbwe	r24,r23,0
   1161	addi	r24,0,0
   1162	tlbwe	r24,r23,1
   1163	tlbwe	r24,r23,2
   1164	isync                   /* Clear out the shadow TLB entries */
   1165
   1166#ifdef CONFIG_PPC_EARLY_DEBUG_44x
   1167	/* Add UART mapping for early debug. */
   1168
   1169	/* Word 0 */
   1170	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
   1171	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
   1172
   1173	/* Word 1 */
   1174	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
   1175	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
   1176
   1177	/* Word 2 */
   1178	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
   1179
   1180	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
   1181	 * congruence class as the kernel, we need to make sure of it at
   1182	 * some point
   1183	 */
   1184        lis	r0,0x8d00
   1185	tlbwe	r3,r0,0
   1186	tlbwe	r4,r0,1
   1187	tlbwe	r5,r0,2
   1188
   1189	/* Force context change */
   1190	isync
   1191#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
   1192
   1193	/* Establish the interrupt vector offsets */
   1194	SET_IVOR(0,  CriticalInput);
   1195	SET_IVOR(1,  MachineCheckA);
   1196	SET_IVOR(2,  DataStorage);
   1197	SET_IVOR(3,  InstructionStorage);
   1198	SET_IVOR(4,  ExternalInput);
   1199	SET_IVOR(5,  Alignment);
   1200	SET_IVOR(6,  Program);
   1201	SET_IVOR(7,  FloatingPointUnavailable);
   1202	SET_IVOR(8,  SystemCall);
   1203	SET_IVOR(9,  AuxillaryProcessorUnavailable);
   1204	SET_IVOR(10, Decrementer);
   1205	SET_IVOR(11, FixedIntervalTimer);
   1206	SET_IVOR(12, WatchdogTimer);
   1207	SET_IVOR(13, DataTLBError47x);
   1208	SET_IVOR(14, InstructionTLBError47x);
   1209	SET_IVOR(15, DebugCrit);
   1210
   1211	/* We configure icbi to invalidate 128 bytes at a time since the
   1212	 * current 32-bit kernel code isn't too happy with icache != dcache
   1213	 * block size. We also disable the BTAC as this can cause errors
   1214	 * in some circumstances (see IBM Erratum 47).
   1215	 */
   1216	mfspr	r3,SPRN_CCR0
   1217	oris	r3,r3,0x0020
   1218	ori	r3,r3,0x0040
   1219	mtspr	SPRN_CCR0,r3
   1220	isync
   1221
   1222#endif /* CONFIG_PPC_47x */
   1223
   1224/*
   1225 * Here we are back to code that is common between 44x and 47x
   1226 *
   1227 * We proceed to further kernel initialization and return to the
   1228 * main kernel entry
   1229 */
   1230head_start_common:
   1231	/* Establish the interrupt vector base */
   1232	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
   1233	mtspr	SPRN_IVPR,r4
   1234
   1235	/*
   1236	 * If the kernel was loaded at a non-zero 256 MB page, we need to
   1237	 * mask off the most significant 4 bits to get the relative address
   1238	 * from the start of physical memory
   1239	 */
   1240	rlwinm	r22,r22,0,4,31
   1241	addis	r22,r22,PAGE_OFFSET@h
   1242	mtlr	r22
   1243	isync
   1244	blr
   1245
   1246#ifdef CONFIG_SMP
   1247	.data
   1248	.align	12
   1249temp_boot_stack:
   1250	.space	1024
   1251#endif /* CONFIG_SMP */