cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

relocate_32.S (11415B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * This file contains kexec low-level functions.
      4 *
      5 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
      6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
      7 * PPC44x port. Copyright (C) 2011,  IBM Corporation
      8 * 		Author: Suzuki Poulose <suzuki@in.ibm.com>
      9 */
     10
     11#include <asm/reg.h>
     12#include <asm/page.h>
     13#include <asm/mmu.h>
     14#include <asm/ppc_asm.h>
     15#include <asm/kexec.h>
     16
     17	.text
     18
     19	/*
     20	 * Must be relocatable PIC code callable as a C function.
     21	 */
     22	.globl relocate_new_kernel
     23relocate_new_kernel:
     24	/* r3 = page_list   */
     25	/* r4 = reboot_code_buffer */
     26	/* r5 = start_address      */
     27
     28#ifdef CONFIG_FSL_BOOKE
     29
     30	mr	r29, r3
     31	mr	r30, r4
     32	mr	r31, r5
     33
     34#define ENTRY_MAPPING_KEXEC_SETUP
     35#include <kernel/fsl_booke_entry_mapping.S>
     36#undef ENTRY_MAPPING_KEXEC_SETUP
     37
     38	mr      r3, r29
     39	mr      r4, r30
     40	mr      r5, r31
     41
     42	li	r0, 0
     43#elif defined(CONFIG_44x)
     44
     45	/* Save our parameters */
     46	mr	r29, r3
     47	mr	r30, r4
     48	mr	r31, r5
     49
     50#ifdef CONFIG_PPC_47x
     51	/* Check for 47x cores */
     52	mfspr	r3,SPRN_PVR
     53	srwi	r3,r3,16
     54	cmplwi	cr0,r3,PVR_476FPE@h
     55	beq	setup_map_47x
     56	cmplwi	cr0,r3,PVR_476@h
     57	beq	setup_map_47x
     58	cmplwi	cr0,r3,PVR_476_ISS@h
     59	beq	setup_map_47x
     60#endif /* CONFIG_PPC_47x */
     61
     62/*
     63 * Code for setting up 1:1 mapping for PPC440x for KEXEC
     64 *
     65 * We cannot switch off the MMU on PPC44x.
     66 * So we:
     67 * 1) Invalidate all the mappings except the one we are running from.
     68 * 2) Create a tmp mapping for our code in the other address space(TS) and
     69 *    jump to it. Invalidate the entry we started in.
     70 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
     71 * 4) Jump to the 1:1 mapping in original TS.
     72 * 5) Invalidate the tmp mapping.
     73 *
     74 * - Based on the kexec support code for FSL BookE
     75 *
     76 */
     77
     78	/*
     79	 * Load the PID with kernel PID (0).
     80	 * Also load our MSR_IS and TID to MMUCR for TLB search.
     81	 */
     82	li	r3, 0
     83	mtspr	SPRN_PID, r3
     84	mfmsr	r4
     85	andi.	r4,r4,MSR_IS@l
     86	beq	wmmucr
     87	oris	r3,r3,PPC44x_MMUCR_STS@h
     88wmmucr:
     89	mtspr	SPRN_MMUCR,r3
     90	sync
     91
     92	/*
     93	 * Invalidate all the TLB entries except the current entry
     94	 * where we are running from
     95	 */
     96	bcl	20,31,$+4			/* Find our address */
     970:	mflr	r5				/* Make it accessible */
     98	tlbsx	r23,0,r5			/* Find entry we are in */
     99	li	r4,0				/* Start at TLB entry 0 */
    100	li	r3,0				/* Set PAGEID inval value */
    1011:	cmpw	r23,r4				/* Is this our entry? */
    102	beq	skip				/* If so, skip the inval */
    103	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
    104skip:
    105	addi	r4,r4,1				/* Increment */
    106	cmpwi	r4,64				/* Are we done?	*/
    107	bne	1b				/* If not, repeat */
    108	isync
    109
    110	/* Create a temp mapping and jump to it */
    111	andi.	r6, r23, 1		/* Find the index to use */
    112	addi	r24, r6, 1		/* r24 will contain 1 or 2 */
    113
    114	mfmsr	r9			/* get the MSR */
    115	rlwinm	r5, r9, 27, 31, 31	/* Extract the MSR[IS] */
    116	xori	r7, r5, 1		/* Use the other address space */
    117
    118	/* Read the current mapping entries */
    119	tlbre	r3, r23, PPC44x_TLB_PAGEID
    120	tlbre	r4, r23, PPC44x_TLB_XLAT
    121	tlbre	r5, r23, PPC44x_TLB_ATTRIB
    122
    123	/* Save our current XLAT entry */
    124	mr	r25, r4
    125
    126	/* Extract the TLB PageSize */
    127	li	r10, 1 			/* r10 will hold PageSize */
    128	rlwinm	r11, r3, 0, 24, 27	/* bits 24-27 */
    129
    130	/* XXX: As of now we use 256M, 4K pages */
    131	cmpwi	r11, PPC44x_TLB_256M
    132	bne	tlb_4k
    133	rotlwi	r10, r10, 28		/* r10 = 256M */
    134	b	write_out
    135tlb_4k:
    136	cmpwi	r11, PPC44x_TLB_4K
    137	bne	default
    138	rotlwi	r10, r10, 12		/* r10 = 4K */
    139	b	write_out
    140default:
    141	rotlwi	r10, r10, 10		/* r10 = 1K */
    142
    143write_out:
    144	/*
    145	 * Write out the tmp 1:1 mapping for this code in other address space
    146	 * Fixup  EPN = RPN , TS=other address space
    147	 */
    148	insrwi	r3, r7, 1, 23		/* Bit 23 is TS for PAGEID field */
    149
    150	/* Write out the tmp mapping entries */
    151	tlbwe	r3, r24, PPC44x_TLB_PAGEID
    152	tlbwe	r4, r24, PPC44x_TLB_XLAT
    153	tlbwe	r5, r24, PPC44x_TLB_ATTRIB
    154
    155	subi	r11, r10, 1		/* PageOffset Mask = PageSize - 1 */
    156	not	r10, r11		/* Mask for PageNum */
    157
    158	/* Switch to other address space in MSR */
    159	insrwi	r9, r7, 1, 26		/* Set MSR[IS] = r7 */
    160
    161	bcl	20,31,$+4
    1621:	mflr	r8
    163	addi	r8, r8, (2f-1b)		/* Find the target offset */
    164
    165	/* Jump to the tmp mapping */
    166	mtspr	SPRN_SRR0, r8
    167	mtspr	SPRN_SRR1, r9
    168	rfi
    169
    1702:
    171	/* Invalidate the entry we were executing from */
    172	li	r3, 0
    173	tlbwe	r3, r23, PPC44x_TLB_PAGEID
    174
    175	/* attribute fields. rwx for SUPERVISOR mode */
    176	li	r5, 0
    177	ori	r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
    178
    179	/* Create 1:1 mapping in 256M pages */
    180	xori	r7, r7, 1			/* Revert back to Original TS */
    181
    182	li	r8, 0				/* PageNumber */
    183	li	r6, 3				/* TLB Index, start at 3  */
    184
    185next_tlb:
    186	rotlwi	r3, r8, 28			/* Create EPN (bits 0-3) */
    187	mr	r4, r3				/* RPN = EPN  */
    188	ori	r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
    189	insrwi	r3, r7, 1, 23			/* Set TS from r7 */
    190
    191	tlbwe	r3, r6, PPC44x_TLB_PAGEID	/* PageID field : EPN, V, SIZE */
    192	tlbwe	r4, r6, PPC44x_TLB_XLAT		/* Address translation : RPN   */
    193	tlbwe	r5, r6, PPC44x_TLB_ATTRIB	/* Attributes */
    194
    195	addi	r8, r8, 1			/* Increment PN */
    196	addi	r6, r6, 1			/* Increment TLB Index */
    197	cmpwi	r8, 8				/* Are we done ? */
    198	bne	next_tlb
    199	isync
    200
    201	/* Jump to the new mapping 1:1 */
    202	li	r9,0
    203	insrwi	r9, r7, 1, 26			/* Set MSR[IS] = r7 */
    204
    205	bcl	20,31,$+4
    2061:	mflr	r8
    207	and	r8, r8, r11			/* Get our offset within page */
    208	addi	r8, r8, (2f-1b)
    209
    210	and	r5, r25, r10			/* Get our target PageNum */
    211	or	r8, r8, r5			/* Target jump address */
    212
    213	mtspr	SPRN_SRR0, r8
    214	mtspr	SPRN_SRR1, r9
    215	rfi
    2162:
    217	/* Invalidate the tmp entry we used */
    218	li	r3, 0
    219	tlbwe	r3, r24, PPC44x_TLB_PAGEID
    220	sync
    221	b	ppc44x_map_done
    222
    223#ifdef CONFIG_PPC_47x
    224
    225	/* 1:1 mapping for 47x */
    226
    227setup_map_47x:
    228
    229	/*
    230	 * Load the kernel pid (0) to PID and also to MMUCR[TID].
    231	 * Also set the MSR IS->MMUCR STS
    232	 */
    233	li	r3, 0
    234	mtspr	SPRN_PID, r3			/* Set PID */
    235	mfmsr	r4				/* Get MSR */
    236	andi.	r4, r4, MSR_IS@l		/* TS=1? */
    237	beq	1f				/* If not, leave STS=0 */
    238	oris	r3, r3, PPC47x_MMUCR_STS@h	/* Set STS=1 */
    2391:	mtspr	SPRN_MMUCR, r3			/* Put MMUCR */
    240	sync
    241
    242	/* Find the entry we are running from */
    243	bcl	20,31,$+4
    2442:	mflr	r23
    245	tlbsx	r23, 0, r23
    246	tlbre	r24, r23, 0			/* TLB Word 0 */
    247	tlbre	r25, r23, 1			/* TLB Word 1 */
    248	tlbre	r26, r23, 2			/* TLB Word 2 */
    249
    250
    251	/*
    252	 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
    253	 * of 4k page size in all  4 ways (0-3 in r3).
    254	 * This would invalidate the entire UTLB including the one we are
    255	 * running from. However the shadow TLB entries would help us
    256	 * to continue the execution, until we flush them (rfi/isync).
    257	 */
    258	addis	r3, 0, 0x8000			/* specify the way */
    259	addi	r4, 0, 0			/* TLB Word0 = (EPN=0, VALID = 0) */
    260	addi	r5, 0, 0
    261	b	clear_utlb_entry
    262
    263	/* Align the loop to speed things up. from head_44x.S */
    264	.align	6
    265
    266clear_utlb_entry:
    267
    268	tlbwe	r4, r3, 0
    269	tlbwe	r5, r3, 1
    270	tlbwe	r5, r3, 2
    271	addis	r3, r3, 0x2000			/* Increment the way */
    272	cmpwi	r3, 0
    273	bne	clear_utlb_entry
    274	addis	r3, 0, 0x8000
    275	addis	r4, r4, 0x100			/* Increment the EPN */
    276	cmpwi	r4, 0
    277	bne	clear_utlb_entry
    278
    279	/* Create the entries in the other address space */
    280	mfmsr	r5
    281	rlwinm	r7, r5, 27, 31, 31		/* Get the TS (Bit 26) from MSR */
    282	xori	r7, r7, 1			/* r7 = !TS */
    283
    284	insrwi	r24, r7, 1, 21			/* Change the TS in the saved TLB word 0 */
    285
    286	/*
    287	 * write out the TLB entries for the tmp mapping
    288	 * Use way '0' so that we could easily invalidate it later.
    289	 */
    290	lis	r3, 0x8000			/* Way '0' */
    291
    292	tlbwe	r24, r3, 0
    293	tlbwe	r25, r3, 1
    294	tlbwe	r26, r3, 2
    295
    296	/* Update the msr to the new TS */
    297	insrwi	r5, r7, 1, 26
    298
    299	bcl	20,31,$+4
    3001:	mflr	r6
    301	addi	r6, r6, (2f-1b)
    302
    303	mtspr	SPRN_SRR0, r6
    304	mtspr	SPRN_SRR1, r5
    305	rfi
    306
    307	/*
    308	 * Now we are in the tmp address space.
    309	 * Create a 1:1 mapping for 0-2GiB in the original TS.
    310	 */
    3112:
    312	li	r3, 0
    313	li	r4, 0				/* TLB Word 0 */
    314	li	r5, 0				/* TLB Word 1 */
    315	li	r6, 0
    316	ori	r6, r6, PPC47x_TLB2_S_RWX	/* TLB word 2 */
    317
    318	li	r8, 0				/* PageIndex */
    319
    320	xori	r7, r7, 1			/* revert back to original TS */
    321
    322write_utlb:
    323	rotlwi	r5, r8, 28			/* RPN = PageIndex * 256M */
    324						/* ERPN = 0 as we don't use memory above 2G */
    325
    326	mr	r4, r5				/* EPN = RPN */
    327	ori	r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
    328	insrwi	r4, r7, 1, 21			/* Insert the TS to Word 0 */
    329
    330	tlbwe	r4, r3, 0			/* Write out the entries */
    331	tlbwe	r5, r3, 1
    332	tlbwe	r6, r3, 2
    333	addi	r8, r8, 1
    334	cmpwi	r8, 8				/* Have we completed ? */
    335	bne	write_utlb
    336
    337	/* make sure we complete the TLB write up */
    338	isync
    339
    340	/*
    341	 * Prepare to jump to the 1:1 mapping.
    342	 * 1) Extract page size of the tmp mapping
    343	 *    DSIZ = TLB_Word0[22:27]
    344	 * 2) Calculate the physical address of the address
    345	 *    to jump to.
    346	 */
    347	rlwinm	r10, r24, 0, 22, 27
    348
    349	cmpwi	r10, PPC47x_TLB0_4K
    350	bne	0f
    351	li	r10, 0x1000			/* r10 = 4k */
    352	bl	1f
    353
    3540:
    355	/* Defaults to 256M */
    356	lis	r10, 0x1000
    357
    358	bcl	20,31,$+4
    3591:	mflr	r4
    360	addi	r4, r4, (2f-1b)			/* virtual address  of 2f */
    361
    362	subi	r11, r10, 1			/* offsetmask = Pagesize - 1 */
    363	not	r10, r11			/* Pagemask = ~(offsetmask) */
    364
    365	and	r5, r25, r10			/* Physical page */
    366	and	r6, r4, r11			/* offset within the current page */
    367
    368	or	r5, r5, r6			/* Physical address for 2f */
    369
    370	/* Switch the TS in MSR to the original one */
    371	mfmsr	r8
    372	insrwi	r8, r7, 1, 26
    373
    374	mtspr	SPRN_SRR1, r8
    375	mtspr	SPRN_SRR0, r5
    376	rfi
    377
    3782:
    379	/* Invalidate the tmp mapping */
    380	lis	r3, 0x8000			/* Way '0' */
    381
    382	clrrwi	r24, r24, 12			/* Clear the valid bit */
    383	tlbwe	r24, r3, 0
    384	tlbwe	r25, r3, 1
    385	tlbwe	r26, r3, 2
    386
    387	/* Make sure we complete the TLB write and flush the shadow TLB */
    388	isync
    389
    390#endif
    391
    392ppc44x_map_done:
    393
    394
    395	/* Restore the parameters */
    396	mr	r3, r29
    397	mr	r4, r30
    398	mr	r5, r31
    399
    400	li	r0, 0
    401#else
    402	li	r0, 0
    403
    404	/*
    405	 * Set Machine Status Register to a known status,
    406	 * switch the MMU off and jump to 1: in a single step.
    407	 */
    408
    409	mr	r8, r0
    410	ori     r8, r8, MSR_RI|MSR_ME
    411	mtspr	SPRN_SRR1, r8
    412	addi	r8, r4, 1f - relocate_new_kernel
    413	mtspr	SPRN_SRR0, r8
    414	sync
    415	rfi
    416
    4171:
    418#endif
    419	/* from this point address translation is turned off */
    420	/* and interrupts are disabled */
    421
    422	/* set a new stack at the bottom of our page... */
    423	/* (not really needed now) */
    424	addi	r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
    425	stw	r0, 0(r1)
    426
    427	/* Do the copies */
    428	li	r6, 0 /* checksum */
    429	mr	r0, r3
    430	b	1f
    431
    4320:	/* top, read another word for the indirection page */
    433	lwzu	r0, 4(r3)
    434
    4351:
    436	/* is it a destination page? (r8) */
    437	rlwinm.	r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
    438	beq	2f
    439
    440	rlwinm	r8, r0, 0, 0, 19 /* clear kexec flags, page align */
    441	b	0b
    442
    4432:	/* is it an indirection page? (r3) */
    444	rlwinm.	r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
    445	beq	2f
    446
    447	rlwinm	r3, r0, 0, 0, 19 /* clear kexec flags, page align */
    448	subi	r3, r3, 4
    449	b	0b
    450
    4512:	/* are we done? */
    452	rlwinm.	r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
    453	beq	2f
    454	b	3f
    455
    4562:	/* is it a source page? (r9) */
    457	rlwinm.	r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
    458	beq	0b
    459
    460	rlwinm	r9, r0, 0, 0, 19 /* clear kexec flags, page align */
    461
    462	li	r7, PAGE_SIZE / 4
    463	mtctr   r7
    464	subi    r9, r9, 4
    465	subi    r8, r8, 4
    4669:
    467	lwzu    r0, 4(r9)  /* do the copy */
    468	xor	r6, r6, r0
    469	stwu    r0, 4(r8)
    470	dcbst	0, r8
    471	sync
    472	icbi	0, r8
    473	bdnz    9b
    474
    475	addi    r9, r9, 4
    476	addi    r8, r8, 4
    477	b	0b
    478
    4793:
    480
    481	/* To be certain of avoiding problems with self-modifying code
    482	 * execute a serializing instruction here.
    483	 */
    484	isync
    485	sync
    486
    487	mfspr	r3, SPRN_PIR /* current core we are running on */
    488	mr	r4, r5 /* load physical address of chunk called */
    489
    490	/* jump to the entry point, usually the setup routine */
    491	mtlr	r5
    492	blrl
    493
    4941:	b	1b
    495
    496relocate_new_kernel_end:
    497
    498	.globl relocate_new_kernel_size
    499relocate_new_kernel_size:
    500	.long relocate_new_kernel_end - relocate_new_kernel