cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry.S (10711B)


      1/* SPDX-License-Identifier: GPL-2.0
      2 *
      3 * arch/sh/kernel/cpu/sh3/entry.S
      4 *
      5 *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
      6 *  Copyright (C) 2003 - 2012  Paul Mundt
      7 */
      8#include <linux/sys.h>
      9#include <linux/errno.h>
     10#include <linux/linkage.h>
     11#include <asm/asm-offsets.h>
     12#include <asm/thread_info.h>
     13#include <asm/unistd.h>
     14#include <cpu/mmu_context.h>
     15#include <asm/page.h>
     16#include <asm/cache.h>
     17
     18! NOTE:
     19! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
     20! to be jumped is too far, but it causes illegal slot exception.
     21
     22/*	
     23 * entry.S contains the system-call and fault low-level handling routines.
     24 * This also contains the timer-interrupt handler, as well as all interrupts
     25 * and faults that can result in a task-switch.
     26 *
     27 * NOTE: This code handles signal-recognition, which happens every time
     28 * after a timer-interrupt and after each system call.
     29 *
     30 * NOTE: This code uses a convention that instructions in the delay slot
     31 * of a transfer-control instruction are indented by an extra space, thus:
     32 *
     33 *    jmp	@k0	    ! control-transfer instruction
     34 *     ldc	k1, ssr     ! delay slot
     35 *
     36 * Stack layout in 'ret_from_syscall':
     37 * 	ptrace needs to have all regs on the stack.
     38 *	if the order here is changed, it needs to be
     39 *	updated in ptrace.c and ptrace.h
     40 *
     41 *	r0
     42 *      ...
     43 *	r15 = stack pointer
     44 *	spc
     45 *	pr
     46 *	ssr
     47 *	gbr
     48 *	mach
     49 *	macl
     50 *	syscall #
     51 *
     52 */
     53/* Offsets to the stack */
     54OFF_R0  =  0		/* Return value. New ABI also arg4 */
     55OFF_R1  =  4     	/* New ABI: arg5 */
     56OFF_R2  =  8     	/* New ABI: arg6 */
     57OFF_R3  =  12     	/* New ABI: syscall_nr */
     58OFF_R4  =  16     	/* New ABI: arg0 */
     59OFF_R5  =  20     	/* New ABI: arg1 */
     60OFF_R6  =  24     	/* New ABI: arg2 */
     61OFF_R7  =  28     	/* New ABI: arg3 */
     62OFF_SP	=  (15*4)
     63OFF_PC  =  (16*4)
     64OFF_SR	=  (16*4+8)
     65OFF_TRA	=  (16*4+6*4)
     66
     67#define k0	r0
     68#define k1	r1
     69#define k2	r2
     70#define k3	r3
     71#define k4	r4
     72
     73#define g_imask		r6	/* r6_bank1 */
     74#define k_g_imask	r6_bank	/* r6_bank1 */
     75#define current		r7	/* r7_bank1 */
     76
     77#include <asm/entry-macros.S>
     78	
     79/*
     80 * Kernel mode register usage:
     81 *	k0	scratch
     82 *	k1	scratch
     83 *	k2	scratch (Exception code)
     84 *	k3	scratch (Return address)
     85 *	k4	scratch
     86 *	k5	reserved
     87 *	k6	Global Interrupt Mask (0--15 << 4)
     88 *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
     89 */
     90
     91!
     92! TLB Miss / Initial Page write exception handling
     93!			_and_
     94! TLB hits, but the access violate the protection.
     95! It can be valid access, such as stack grow and/or C-O-W.
     96!
     97!
     98! Find the pmd/pte entry and loadtlb
     99! If it's not found, cause address error (SEGV)
    100!
    101! Although this could be written in assembly language (and it'd be faster),
    102! this first version depends *much* on C implementation.
    103!
    104
    105#if defined(CONFIG_MMU)
    106	.align	2
    107ENTRY(tlb_miss_load)
    108	bra	call_handle_tlbmiss
    109	 mov	#0, r5
    110
    111	.align	2
    112ENTRY(tlb_miss_store)
    113	bra	call_handle_tlbmiss
    114	 mov	#FAULT_CODE_WRITE, r5
    115
    116	.align	2
    117ENTRY(initial_page_write)
    118	bra	call_handle_tlbmiss
    119	 mov	#FAULT_CODE_INITIAL, r5
    120
    121	.align	2
    122ENTRY(tlb_protection_violation_load)
    123	bra	call_do_page_fault
    124	 mov	#FAULT_CODE_PROT, r5
    125
    126	.align	2
    127ENTRY(tlb_protection_violation_store)
    128	bra	call_do_page_fault
    129	 mov	#(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
    130
    131call_handle_tlbmiss:
    132	mov.l	1f, r0
    133	mov	r5, r8
    134	mov.l	@r0, r6
    135	mov.l	2f, r0
    136	sts	pr, r10
    137	jsr	@r0
    138	 mov	r15, r4
    139	!
    140	tst	r0, r0
    141	bf/s	0f
    142	 lds	r10, pr
    143	rts
    144	 nop
    1450:
    146	mov	r8, r5
    147call_do_page_fault:
    148	mov.l	1f, r0
    149	mov.l	@r0, r6
    150
    151	mov.l	3f, r0
    152	mov.l	4f, r1
    153	mov	r15, r4
    154	jmp	@r0
    155	 lds	r1, pr
    156
    157	.align 2
    1581:	.long	MMU_TEA
    1592:	.long	handle_tlbmiss
    1603:	.long	do_page_fault
    1614:	.long	ret_from_exception
    162
    163	.align	2
    164ENTRY(address_error_load)
    165	bra	call_dae
    166	 mov	#0,r5		! writeaccess = 0
    167
    168	.align	2
    169ENTRY(address_error_store)
    170	bra	call_dae
    171	 mov	#1,r5		! writeaccess = 1
    172
    173	.align	2
    174call_dae:
    175	mov.l	1f, r0
    176	mov.l	@r0, r6		! address
    177	mov.l	2f, r0
    178	jmp	@r0
    179	 mov	r15, r4		! regs
    180
    181	.align 2
    1821:	.long	MMU_TEA
    1832:	.long   do_address_error
    184#endif /* CONFIG_MMU */
    185
    186#if defined(CONFIG_SH_STANDARD_BIOS)
    187	/* Unwind the stack and jmp to the debug entry */
    188ENTRY(sh_bios_handler)
    189	mov.l	1f, r8
    190	bsr	restore_regs
    191	 nop
    192
    193	lds	k2, pr			! restore pr
    194	mov	k4, r15
    195	!
    196	mov.l	2f, k0
    197	mov.l	@k0, k0
    198	jmp	@k0
    199	 ldc	k3, ssr
    200	.align	2
    2011:	.long	0x300000f0
    2022:	.long	gdb_vbr_vector
    203#endif /* CONFIG_SH_STANDARD_BIOS */
    204
    205! restore_regs()
    206! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
    207! - switch bank
    208! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
    209! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
    210! k2 returns original pr
    211! k3 returns original sr
    212! k4 returns original stack pointer
    213! r8 passes SR bitmask, overwritten with restored data on return
    214! r9 trashed
    215! BL=0 on entry, on exit BL=1 (depending on r8).
    216
    217ENTRY(restore_regs)
    218	mov.l	@r15+, r0
    219	mov.l	@r15+, r1
    220	mov.l	@r15+, r2
    221	mov.l	@r15+, r3
    222	mov.l	@r15+, r4
    223	mov.l	@r15+, r5
    224	mov.l	@r15+, r6
    225	mov.l	@r15+, r7
    226	!
    227	stc	sr, r9
    228	or	r8, r9
    229	ldc	r9, sr
    230	!
    231	mov.l	@r15+, r8
    232	mov.l	@r15+, r9
    233	mov.l	@r15+, r10
    234	mov.l	@r15+, r11
    235	mov.l	@r15+, r12
    236	mov.l	@r15+, r13
    237	mov.l	@r15+, r14
    238	mov.l	@r15+, k4		! original stack pointer
    239	ldc.l	@r15+, spc
    240	mov.l	@r15+, k2		! original PR
    241	mov.l	@r15+, k3		! original SR
    242	ldc.l	@r15+, gbr
    243	lds.l	@r15+, mach
    244	lds.l	@r15+, macl
    245	rts
    246	 add	#4, r15			! Skip syscall number
    247
    248restore_all:
    249	mov.l	7f, r8
    250	bsr	restore_regs
    251	 nop
    252
    253	lds	k2, pr			! restore pr
    254	!
    255	! Calculate new SR value
    256	mov	k3, k2			! original SR value
    257	mov	#0xfffffff0, k1
    258	extu.b	k1, k1
    259	not	k1, k1
    260	and	k1, k2			! Mask original SR value
    261	!
    262	mov	k3, k0			! Calculate IMASK-bits
    263	shlr2	k0
    264	and	#0x3c, k0
    265	cmp/eq	#0x3c, k0
    266	bt/s	6f
    267	 shll2	k0
    268	mov	g_imask, k0
    269	!
    2706:	or	k0, k2			! Set the IMASK-bits
    271	ldc	k2, ssr
    272	!
    273	mov	k4, r15
    274	rte
    275	 nop
    276
    277	.align	2
    2785:	.long	0x00001000	! DSP
    2797:	.long	0x30000000
    280
    281! common exception handler
    282#include "../../entry-common.S"
    283	
    284! Exception Vector Base
    285!
    286!	Should be aligned page boundary.
    287!
    288	.balign 	4096,0,4096
    289ENTRY(vbr_base)
    290	.long	0
    291!
    292! 0x100: General exception vector
    293!
    294	.balign 	256,0,256
    295general_exception:
    296	bra	handle_exception
    297	 sts	pr, k3		! save original pr value in k3
    298
    299! prepare_stack()
    300! - roll back gRB
    301! - switch to kernel stack
    302! k0 returns original sp (after roll back)
    303! k1 trashed
    304! k2 trashed
    305
    306prepare_stack:
    307#ifdef CONFIG_GUSA
    308	! Check for roll back gRB (User and Kernel)
    309	mov	r15, k0
    310	shll	k0
    311	bf/s	1f
    312	 shll	k0
    313	bf/s	1f
    314	 stc	spc, k1
    315	stc	r0_bank, k0
    316	cmp/hs	k0, k1		! test k1 (saved PC) >= k0 (saved r0)
    317	bt/s	2f
    318	 stc	r1_bank, k1
    319
    320	add	#-2, k0
    321	add	r15, k0
    322	ldc	k0, spc		! PC = saved r0 + r15 - 2
    3232:	mov	k1, r15		! SP = r1
    3241:
    325#endif
    326	! Switch to kernel stack if needed
    327	stc	ssr, k0		! Is it from kernel space?
    328	shll	k0		! Check MD bit (bit30) by shifting it into...
    329	shll	k0		!       ...the T bit
    330	bt/s	1f		! It's a kernel to kernel transition.
    331	 mov	r15, k0		! save original stack to k0
    332	/* User space to kernel */
    333	mov	#(THREAD_SIZE >> 10), k1
    334	shll8	k1		! k1 := THREAD_SIZE
    335	shll2	k1
    336	add	current, k1
    337	mov	k1, r15		! change to kernel stack
    338	!
    3391:
    340	rts
    341	 nop
    342
    343!
    344! 0x400: Instruction and Data TLB miss exception vector
    345!
    346	.balign 	1024,0,1024
    347tlb_miss:
    348	sts	pr, k3		! save original pr value in k3
    349
    350handle_exception:
    351	mova	exception_data, k0
    352
    353	! Setup stack and save DSP context (k0 contains original r15 on return)
    354	bsr	prepare_stack
    355	 PREF(k0)
    356
    357	! Save registers / Switch to bank 0
    358	mov.l	5f, k2		! vector register address
    359	mov.l	1f, k4		! SR bits to clear in k4
    360	bsr	save_regs	! needs original pr value in k3
    361	 mov.l	@k2, k2		! read out vector and keep in k2
    362
    363handle_exception_special:
    364	setup_frame_reg
    365
    366	! Setup return address and jump to exception handler
    367	mov.l	7f, r9		! fetch return address
    368	stc	r2_bank, r0	! k2 (vector)
    369	mov.l	6f, r10
    370	shlr2	r0
    371	shlr	r0
    372	mov.l	@(r0, r10), r10
    373	jmp	@r10
    374	 lds	r9, pr		! put return address in pr
    375
    376	.align	L1_CACHE_SHIFT
    377
    378! save_regs()
    379! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
    380! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
    381! - switch bank
    382! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
    383! k0 contains original stack pointer*
    384! k1 trashed
    385! k3 passes original pr*
    386! k4 passes SR bitmask
    387! BL=1 on entry, on exit BL=0.
    388
    389ENTRY(save_regs)
    390	mov	#-1, r1
    391	mov.l	k1, @-r15	! set TRA (default: -1)
    392	sts.l	macl, @-r15
    393	sts.l	mach, @-r15
    394	stc.l	gbr, @-r15
    395	stc.l	ssr, @-r15
    396	mov.l	k3, @-r15	! original pr in k3
    397	stc.l	spc, @-r15
    398
    399	mov.l	k0, @-r15	! original stack pointer in k0
    400	mov.l	r14, @-r15
    401	mov.l	r13, @-r15
    402	mov.l	r12, @-r15
    403	mov.l	r11, @-r15
    404	mov.l	r10, @-r15
    405	mov.l	r9, @-r15
    406	mov.l	r8, @-r15
    407
    408	mov.l	0f, k3		! SR bits to set in k3
    409
    410	! fall-through
    411
    412! save_low_regs()
    413! - modify SR for bank switch
    414! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
    415! k3 passes bits to set in SR
    416! k4 passes bits to clear in SR
    417
    418ENTRY(save_low_regs)
    419	stc	sr, r8
    420	or	k3, r8
    421	and	k4, r8
    422	ldc	r8, sr
    423
    424	mov.l	r7, @-r15
    425	mov.l	r6, @-r15
    426	mov.l	r5, @-r15
    427	mov.l	r4, @-r15
    428	mov.l	r3, @-r15
    429	mov.l	r2, @-r15
    430	mov.l	r1, @-r15
    431	rts
    432	 mov.l	r0, @-r15
    433
    434!
    435! 0x600: Interrupt / NMI vector
    436!
    437	.balign 	512,0,512
    438ENTRY(handle_interrupt)
    439	sts	pr, k3		! save original pr value in k3
    440	mova	exception_data, k0
    441
    442	! Setup stack and save DSP context (k0 contains original r15 on return)
    443	bsr	prepare_stack
    444	 PREF(k0)
    445
    446	! Save registers / Switch to bank 0
    447	mov.l	1f, k4		! SR bits to clear in k4
    448	bsr	save_regs	! needs original pr value in k3
    449	 mov	#-1, k2		! default vector kept in k2
    450
    451	setup_frame_reg
    452
    453	stc	sr, r0	! get status register
    454	shlr2	r0
    455	and	#0x3c, r0
    456	cmp/eq	#0x3c, r0
    457	bf	9f
    458	TRACE_IRQS_OFF
    4599:
    460
    461	! Setup return address and jump to do_IRQ
    462	mov.l	4f, r9		! fetch return address
    463	lds	r9, pr		! put return address in pr
    464	mov.l	2f, r4
    465	mov.l	3f, r9
    466	mov.l	@r4, r4		! pass INTEVT vector as arg0
    467
    468	shlr2	r4
    469	shlr	r4
    470	mov	r4, r0		! save vector->jmp table offset for later
    471
    472	shlr2	r4		! vector to IRQ# conversion
    473	add	#-0x10, r4
    474
    475	cmp/pz	r4		! is it a valid IRQ?
    476	bt	10f
    477
    478	/*
    479	 * We got here as a result of taking the INTEVT path for something
    480	 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
    481	 * path and special case the event dispatch instead.  This is the
    482	 * expected path for the NMI (and any other brilliantly implemented
    483	 * exception), which effectively wants regular exception dispatch
    484	 * but is unfortunately reported through INTEVT rather than
    485	 * EXPEVT.  Grr.
    486	 */
    487	mov.l	6f, r9
    488	mov.l	@(r0, r9), r9
    489	jmp	@r9
    490	 mov	r15, r8		! trap handlers take saved regs in r8
    491
    49210:
    493	jmp	@r9		! Off to do_IRQ() we go.
    494	 mov	r15, r5		! pass saved registers as arg1
    495
    496ENTRY(exception_none)
    497	rts
    498	 nop
    499
    500	.align	L1_CACHE_SHIFT
    501exception_data:
    5020:	.long	0x000080f0	! FD=1, IMASK=15
    5031:	.long	0xcfffffff	! RB=0, BL=0
    5042:	.long	INTEVT
    5053:	.long	do_IRQ
    5064:	.long	ret_from_irq
    5075:	.long	EXPEVT
    5086:	.long	exception_handling_table
    5097:	.long	ret_from_exception