cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry.S (14811B)


      1/*
      2 * linux/arch/nios2/kernel/entry.S
      3 *
      4 * Copyright (C) 2013-2014  Altera Corporation
      5 * Copyright (C) 2009, Wind River Systems Inc
      6 *
      7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
      8 *
      9 *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
     10 *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
     11 *                      Kenneth Albanowski <kjahds@kjahds.com>,
     12 *  Copyright (C) 2000  Lineo Inc. (www.lineo.com)
     13 *  Copyright (C) 2004  Microtronix Datacom Ltd.
     14 *
     15 * This file is subject to the terms and conditions of the GNU General Public
     16 * License.  See the file "COPYING" in the main directory of this archive
     17 * for more details.
     18 *
     19 * Linux/m68k support by Hamish Macdonald
     20 *
     21 * 68060 fixes by Jesper Skov
     22 * ColdFire support by Greg Ungerer (gerg@snapgear.com)
     23 * 5307 fixes by David W. Miller
     24 * linux 2.4 support David McCullough <davidm@snapgear.com>
     25 */
     26
     27#include <linux/sys.h>
     28#include <linux/linkage.h>
     29#include <asm/asm-offsets.h>
     30#include <asm/asm-macros.h>
     31#include <asm/thread_info.h>
     32#include <asm/errno.h>
     33#include <asm/setup.h>
     34#include <asm/entry.h>
     35#include <asm/unistd.h>
     36#include <asm/processor.h>
     37
     38.macro GET_THREAD_INFO reg
     39.if THREAD_SIZE & 0xffff0000
     40	andhi	\reg, sp, %hi(~(THREAD_SIZE-1))
     41.else
     42	addi	\reg, r0, %lo(~(THREAD_SIZE-1))
     43	and	\reg, \reg, sp
     44.endif
     45.endm
     46
     47.macro	kuser_cmpxchg_check
     48	/*
     49	 * Make sure our user space atomic helper is restarted if it was
     50	 * interrupted in a critical region.
     51	 * ea-4 = address of interrupted insn (ea must be preserved).
     52	 * sp = saved regs.
     53	 * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
     54	 * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
     55	 * cmpxchg_ldw + 4.
     56	*/
     57	/* et = cmpxchg_stw + 4 */
     58	movui   et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
     59	bgtu	ea, et, 1f
     60
     61	subi	et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
     62	bltu	ea, et, 1f
     63	stw	et, PT_EA(sp)	/* fix up EA */
     64	mov	ea, et
     651:
     66.endm
     67
     68.section .rodata
     69.align 4
     70exception_table:
     71	.word unhandled_exception	/* 0 - Reset */
     72	.word unhandled_exception	/* 1 - Processor-only Reset */
     73	.word external_interrupt	/* 2 - Interrupt */
     74	.word handle_trap		/* 3 - Trap Instruction */
     75
     76	.word instruction_trap		/* 4 - Unimplemented instruction */
     77	.word handle_illegal		/* 5 - Illegal instruction */
     78	.word handle_unaligned		/* 6 - Misaligned data access */
     79	.word handle_unaligned		/* 7 - Misaligned destination address */
     80
     81	.word handle_diverror		/* 8 - Division error */
     82	.word protection_exception_ba	/* 9 - Supervisor-only instr. address */
     83	.word protection_exception_instr /* 10 - Supervisor only instruction */
     84	.word protection_exception_ba	/* 11 - Supervisor only data address */
     85
     86	.word unhandled_exception	/* 12 - Double TLB miss (data) */
     87	.word protection_exception_pte	/* 13 - TLB permission violation (x) */
     88	.word protection_exception_pte	/* 14 - TLB permission violation (r) */
     89	.word protection_exception_pte	/* 15 - TLB permission violation (w) */
     90
     91	.word unhandled_exception	/* 16 - MPU region violation */
     92
     93trap_table:
     94	.word	handle_system_call	/* 0  */
     95	.word	handle_trap_1		/* 1  */
     96	.word	handle_trap_2		/* 2  */
     97	.word	handle_trap_3		/* 3  */
     98	.word	handle_trap_reserved	/* 4  */
     99	.word	handle_trap_reserved	/* 5  */
    100	.word	handle_trap_reserved	/* 6  */
    101	.word	handle_trap_reserved	/* 7  */
    102	.word	handle_trap_reserved	/* 8  */
    103	.word	handle_trap_reserved	/* 9  */
    104	.word	handle_trap_reserved	/* 10 */
    105	.word	handle_trap_reserved	/* 11 */
    106	.word	handle_trap_reserved	/* 12 */
    107	.word	handle_trap_reserved	/* 13 */
    108	.word	handle_trap_reserved	/* 14 */
    109	.word	handle_trap_reserved	/* 15 */
    110	.word	handle_trap_reserved	/* 16 */
    111	.word	handle_trap_reserved	/* 17 */
    112	.word	handle_trap_reserved	/* 18 */
    113	.word	handle_trap_reserved	/* 19 */
    114	.word	handle_trap_reserved	/* 20 */
    115	.word	handle_trap_reserved	/* 21 */
    116	.word	handle_trap_reserved	/* 22 */
    117	.word	handle_trap_reserved	/* 23 */
    118	.word	handle_trap_reserved	/* 24 */
    119	.word	handle_trap_reserved	/* 25 */
    120	.word	handle_trap_reserved	/* 26 */
    121	.word	handle_trap_reserved	/* 27 */
    122	.word	handle_trap_reserved	/* 28 */
    123	.word	handle_trap_reserved	/* 29 */
    124#ifdef CONFIG_KGDB
    125	.word	handle_kgdb_breakpoint	/* 30 KGDB breakpoint */
    126#else
    127	.word	instruction_trap		/* 30 */
    128#endif
    129	.word	handle_breakpoint	/* 31 */
    130
    131.text
    132.set noat
    133.set nobreak
    134
    135ENTRY(inthandler)
    136	SAVE_ALL
    137
    138	kuser_cmpxchg_check
    139
    140	/* Clear EH bit before we get a new excpetion in the kernel
    141	 * and after we have saved it to the exception frame. This is done
    142	 * whether it's trap, tlb-miss or interrupt. If we don't do this
    143	 * estatus is not updated the next exception.
    144	 */
    145	rdctl	r24, status
    146	movi	r9, %lo(~STATUS_EH)
    147	and	r24, r24, r9
    148	wrctl	status, r24
    149
    150	/* Read cause and vector and branch to the associated handler */
    151	mov	r4, sp
    152	rdctl	r5, exception
    153	movia	r9, exception_table
    154	add	r24, r9, r5
    155	ldw	r24, 0(r24)
    156	jmp	r24
    157
    158
    159/***********************************************************************
    160 * Handle traps
    161 ***********************************************************************
    162 */
    163ENTRY(handle_trap)
    164	ldwio	r24, -4(ea)	/* instruction that caused the exception */
    165	srli	r24, r24, 4
    166	andi	r24, r24, 0x7c
    167	movia	r9,trap_table
    168	add	r24, r24, r9
    169	ldw	r24, 0(r24)
    170	jmp	r24
    171
    172
    173/***********************************************************************
    174 * Handle system calls
    175 ***********************************************************************
    176 */
    177ENTRY(handle_system_call)
    178	/* Enable interrupts */
    179	rdctl	r10, status
    180	ori	r10, r10, STATUS_PIE
    181	wrctl	status, r10
    182
    183	/* Reload registers destroyed by common code. */
    184	ldw	r4, PT_R4(sp)
    185	ldw	r5, PT_R5(sp)
    186
    187local_restart:
    188	/* Check that the requested system call is within limits */
    189	movui	r1, __NR_syscalls
    190	bgeu	r2, r1, ret_invsyscall
    191	slli	r1, r2, 2
    192	movhi	r11, %hiadj(sys_call_table)
    193	add	r1, r1, r11
    194	ldw	r1, %lo(sys_call_table)(r1)
    195	beq	r1, r0, ret_invsyscall
    196
    197	/* Check if we are being traced */
    198	GET_THREAD_INFO r11
    199	ldw	r11,TI_FLAGS(r11)
    200	BTBNZ   r11,r11,TIF_SYSCALL_TRACE,traced_system_call
    201
    202	/* Execute the system call */
    203	callr	r1
    204
    205	/* If the syscall returns a negative result:
    206	 *   Set r7 to 1 to indicate error,
    207	 *   Negate r2 to get a positive error code
    208	 * If the syscall returns zero or a positive value:
    209	 *   Set r7 to 0.
    210	 * The sigreturn system calls will skip the code below by
    211	 * adding to register ra. To avoid destroying registers
    212	 */
    213translate_rc_and_ret:
    214	movi	r1, 0
    215	bge	r2, zero, 3f
    216	sub	r2, zero, r2
    217	movi	r1, 1
    2183:
    219	stw	r2, PT_R2(sp)
    220	stw	r1, PT_R7(sp)
    221end_translate_rc_and_ret:
    222
    223ret_from_exception:
    224	ldw	r1, PT_ESTATUS(sp)
    225	/* if so, skip resched, signals */
    226	TSTBNZ	r1, r1, ESTATUS_EU, Luser_return
    227
    228restore_all:
    229	rdctl	r10, status			/* disable intrs */
    230	andi	r10, r10, %lo(~STATUS_PIE)
    231	wrctl	status, r10
    232	RESTORE_ALL
    233	eret
    234
    235	/* If the syscall number was invalid return ENOSYS */
    236ret_invsyscall:
    237	movi	r2, -ENOSYS
    238	br	translate_rc_and_ret
    239
    240	/* This implements the same as above, except it calls
    241	 * do_syscall_trace_enter and do_syscall_trace_exit before and after the
    242	 * syscall in order for utilities like strace and gdb to work.
    243	 */
    244traced_system_call:
    245	SAVE_SWITCH_STACK
    246	call	do_syscall_trace_enter
    247	RESTORE_SWITCH_STACK
    248
    249	/* Create system call register arguments. The 5th and 6th
    250	   arguments on stack are already in place at the beginning
    251	   of pt_regs. */
    252	ldw	r2, PT_R2(sp)
    253	ldw	r4, PT_R4(sp)
    254	ldw	r5, PT_R5(sp)
    255	ldw	r6, PT_R6(sp)
    256	ldw	r7, PT_R7(sp)
    257
    258	/* Fetch the syscall function, we don't need to check the boundaries
    259	 * since this is already done.
    260	 */
    261	slli	r1, r2, 2
    262	movhi	r11,%hiadj(sys_call_table)
    263	add	r1, r1, r11
    264	ldw	r1, %lo(sys_call_table)(r1)
    265
    266	callr	r1
    267
    268	/* If the syscall returns a negative result:
    269	 *   Set r7 to 1 to indicate error,
    270	 *   Negate r2 to get a positive error code
    271	 * If the syscall returns zero or a positive value:
    272	 *   Set r7 to 0.
    273	 * The sigreturn system calls will skip the code below by
    274	 * adding to register ra. To avoid destroying registers
    275	 */
    276translate_rc_and_ret2:
    277	movi	r1, 0
    278	bge	r2, zero, 4f
    279	sub	r2, zero, r2
    280	movi	r1, 1
    2814:
    282	stw	r2, PT_R2(sp)
    283	stw	r1, PT_R7(sp)
    284end_translate_rc_and_ret2:
    285	SAVE_SWITCH_STACK
    286	call	do_syscall_trace_exit
    287	RESTORE_SWITCH_STACK
    288	br	ret_from_exception
    289
    290Luser_return:
    291	GET_THREAD_INFO	r11			/* get thread_info pointer */
    292	ldw	r10, TI_FLAGS(r11)		/* get thread_info->flags */
    293	ANDI32	r11, r10, _TIF_WORK_MASK
    294	beq	r11, r0, restore_all		/* Nothing to do */
    295	BTBZ	r1, r10, TIF_NEED_RESCHED, Lsignal_return
    296
    297	/* Reschedule work */
    298	call	schedule
    299	br	ret_from_exception
    300
    301Lsignal_return:
    302	ANDI32	r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
    303	beq	r1, r0, restore_all
    304	mov	r4, sp			/* pt_regs */
    305	SAVE_SWITCH_STACK
    306	call	do_notify_resume
    307	beq	r2, r0, no_work_pending
    308	RESTORE_SWITCH_STACK
    309	/* prepare restart syscall here without leaving kernel */
    310	ldw	r2, PT_R2(sp)	/* reload syscall number in r2 */
    311	ldw 	r4, PT_R4(sp)	/* reload syscall arguments r4-r9 */
    312	ldw 	r5, PT_R5(sp)
    313	ldw 	r6, PT_R6(sp)
    314	ldw 	r7, PT_R7(sp)
    315	ldw 	r8, PT_R8(sp)
    316	ldw 	r9, PT_R9(sp)
    317	br	local_restart	/* restart syscall */
    318
    319no_work_pending:
    320	RESTORE_SWITCH_STACK
    321	br	ret_from_exception
    322
    323/***********************************************************************
    324 * Handle external interrupts.
    325 ***********************************************************************
    326 */
    327/*
    328 * This is the generic interrupt handler (for all hardware interrupt
    329 * sources). It figures out the vector number and calls the appropriate
    330 * interrupt service routine directly.
    331 */
    332external_interrupt:
    333	rdctl	r12, ipending
    334	rdctl	r9, ienable
    335	and	r12, r12, r9
    336	/* skip if no interrupt is pending */
    337	beq	r12, r0, ret_from_interrupt
    338
    339	movi	r24, -1
    340	stw	r24, PT_ORIG_R2(sp)
    341
    342	/*
    343	 * Process an external hardware interrupt.
    344	 */
    345
    346	addi	ea, ea, -4	/* re-issue the interrupted instruction */
    347	stw	ea, PT_EA(sp)
    3482:	movi	r4, %lo(-1)	/* Start from bit position 0,
    349					highest priority */
    350				/* This is the IRQ # for handler call */
    3511:	andi	r10, r12, 1	/* Isolate bit we are interested in */
    352	srli	r12, r12, 1	/* shift count is costly without hardware
    353					multiplier */
    354	addi	r4, r4, 1
    355	beq	r10, r0, 1b
    356	mov	r5, sp		/* Setup pt_regs pointer for handler call */
    357	call	do_IRQ
    358	rdctl	r12, ipending	/* check again if irq still pending */
    359	rdctl	r9, ienable	/* Isolate possible interrupts */
    360	and	r12, r12, r9
    361	bne	r12, r0, 2b
    362	/* br	ret_from_interrupt */ /* fall through to ret_from_interrupt */
    363
    364ENTRY(ret_from_interrupt)
    365	ldw	r1, PT_ESTATUS(sp)	/* check if returning to kernel */
    366	TSTBNZ	r1, r1, ESTATUS_EU, Luser_return
    367
    368#ifdef CONFIG_PREEMPTION
    369	GET_THREAD_INFO	r1
    370	ldw	r4, TI_PREEMPT_COUNT(r1)
    371	bne	r4, r0, restore_all
    372	ldw	r4, TI_FLAGS(r1)		/* ? Need resched set */
    373	BTBZ	r10, r4, TIF_NEED_RESCHED, restore_all
    374	ldw	r4, PT_ESTATUS(sp)	/* ? Interrupts off */
    375	andi	r10, r4, ESTATUS_EPIE
    376	beq	r10, r0, restore_all
    377	call	preempt_schedule_irq
    378#endif
    379	br	restore_all
    380
    381/***********************************************************************
    382 * A few syscall wrappers
    383 ***********************************************************************
    384 */
    385/*
    386 * int clone(unsigned long clone_flags, unsigned long newsp,
    387 *		int __user * parent_tidptr, int __user * child_tidptr,
    388 *		int tls_val)
    389 */
    390ENTRY(sys_clone)
    391	SAVE_SWITCH_STACK
    392	subi    sp, sp, 4 /* make space for tls pointer */
    393	stw     r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
    394	call	nios2_clone
    395	addi    sp, sp, 4
    396	RESTORE_SWITCH_STACK
    397	ret
    398
    399ENTRY(sys_rt_sigreturn)
    400	SAVE_SWITCH_STACK
    401	mov	r4, sp
    402	call	do_rt_sigreturn
    403	RESTORE_SWITCH_STACK
    404	addi	ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
    405	ret
    406
    407/***********************************************************************
    408 * A few other wrappers and stubs
    409 ***********************************************************************
    410 */
    411protection_exception_pte:
    412	rdctl	r6, pteaddr
    413	slli	r6, r6, 10
    414	call	do_page_fault
    415	br	ret_from_exception
    416
    417protection_exception_ba:
    418	rdctl	r6, badaddr
    419	call	do_page_fault
    420	br	ret_from_exception
    421
    422protection_exception_instr:
    423	call	handle_supervisor_instr
    424	br	ret_from_exception
    425
    426handle_breakpoint:
    427	call	breakpoint_c
    428	br	ret_from_exception
    429
    430#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
    431handle_unaligned:
    432	SAVE_SWITCH_STACK
    433	call	handle_unaligned_c
    434	RESTORE_SWITCH_STACK
    435	br	ret_from_exception
    436#else
    437handle_unaligned:
    438	call	handle_unaligned_c
    439	br	ret_from_exception
    440#endif
    441
    442handle_illegal:
    443	call	handle_illegal_c
    444	br	ret_from_exception
    445
    446handle_diverror:
    447	call	handle_diverror_c
    448	br	ret_from_exception
    449
    450#ifdef CONFIG_KGDB
    451handle_kgdb_breakpoint:
    452	call	kgdb_breakpoint_c
    453	br	ret_from_exception
    454#endif
    455
    456handle_trap_1:
    457	call	handle_trap_1_c
    458	br	ret_from_exception
    459
    460handle_trap_2:
    461	call	handle_trap_2_c
    462	br	ret_from_exception
    463
    464handle_trap_3:
    465handle_trap_reserved:
    466	call	handle_trap_3_c
    467	br	ret_from_exception
    468
    469/*
    470 * Beware - when entering resume, prev (the current task) is
    471 * in r4, next (the new task) is in r5, don't change these
    472 * registers.
    473 */
    474ENTRY(resume)
    475
    476	rdctl	r7, status			/* save thread status reg */
    477	stw	r7, TASK_THREAD + THREAD_KPSR(r4)
    478
    479	andi	r7, r7, %lo(~STATUS_PIE)	/* disable interrupts */
    480	wrctl	status, r7
    481
    482	SAVE_SWITCH_STACK
    483	stw	sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
    484	ldw	sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
    485	movia	r24, _current_thread		/* save thread */
    486	GET_THREAD_INFO r1
    487	stw	r1, 0(r24)
    488	RESTORE_SWITCH_STACK
    489
    490	ldw	r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
    491	wrctl	status, r7
    492	ret
    493
    494ENTRY(ret_from_fork)
    495	call	schedule_tail
    496	br	ret_from_exception
    497
    498ENTRY(ret_from_kernel_thread)
    499	call	schedule_tail
    500	mov	r4,r17	/* arg */
    501	callr	r16	/* function */
    502	br	ret_from_exception
    503
    504/*
    505 * Kernel user helpers.
    506 *
    507 * Each segment is 64-byte aligned and will be mapped to the <User space>.
    508 * New segments (if ever needed) must be added after the existing ones.
    509 * This mechanism should be used only for things that are really small and
    510 * justified, and not be abused freely.
    511 *
    512 */
    513
    514 /* Filling pads with undefined instructions. */
    515.macro	kuser_pad sym size
    516	.if	((. - \sym) & 3)
    517	.rept	(4 - (. - \sym) & 3)
    518	.byte	0
    519	.endr
    520	.endif
    521	.rept	((\size - (. - \sym)) / 4)
    522	.word	0xdeadbeef
    523	.endr
    524.endm
    525
    526	.align	6
    527	.globl	__kuser_helper_start
    528__kuser_helper_start:
    529
    530__kuser_helper_version:				/* @ 0x1000 */
    531	.word	((__kuser_helper_end - __kuser_helper_start) >> 6)
    532
    533__kuser_cmpxchg:				/* @ 0x1004 */
    534	/*
    535	 * r4 pointer to exchange variable
    536	 * r5 old value
    537	 * r6 new value
    538	 */
    539cmpxchg_ldw:
    540	ldw	r2, 0(r4)			/* load current value */
    541	sub	r2, r2, r5			/* compare with old value */
    542	bne	r2, zero, cmpxchg_ret
    543
    544	/* We had a match, store the new value */
    545cmpxchg_stw:
    546	stw	r6, 0(r4)
    547cmpxchg_ret:
    548	ret
    549
    550	kuser_pad __kuser_cmpxchg, 64
    551
    552	.globl	__kuser_sigtramp
    553__kuser_sigtramp:
    554	movi	r2, __NR_rt_sigreturn
    555	trap
    556
    557	kuser_pad __kuser_sigtramp, 64
    558
    559	.globl	__kuser_helper_end
    560__kuser_helper_end: