cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry.S (28973B)


      1/*
      2 * Low-level system-call handling, trap handlers and context-switching
      3 *
      4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
      5 * Copyright (C) 2008-2009 PetaLogix
      6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
      7 * Copyright (C) 2001,2002	NEC Corporation
      8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
      9 *
     10 * This file is subject to the terms and conditions of the GNU General
     11 * Public License. See the file COPYING in the main directory of this
     12 * archive for more details.
     13 *
     14 * Written by Miles Bader <miles@gnu.org>
     15 * Heavily modified by John Williams for Microblaze
     16 */
     17
     18#include <linux/sys.h>
     19#include <linux/linkage.h>
     20
     21#include <asm/entry.h>
     22#include <asm/current.h>
     23#include <asm/processor.h>
     24#include <asm/exceptions.h>
     25#include <asm/asm-offsets.h>
     26#include <asm/thread_info.h>
     27
     28#include <asm/page.h>
     29#include <asm/unistd.h>
     30
     31#include <linux/errno.h>
     32#include <asm/signal.h>
     33
     34#undef DEBUG
     35
     36#ifdef DEBUG
     37/* Create space for syscalls counting. */
     38.section .data
     39.global syscall_debug_table
     40.align 4
     41syscall_debug_table:
     42	.space	(__NR_syscalls * 4)
     43#endif /* DEBUG */
     44
     45#define C_ENTRY(name)	.globl name; .align 4; name
     46
     47/*
     48 * Various ways of setting and clearing BIP in flags reg.
     49 * This is mucky, but necessary using microblaze version that
     50 * allows msr ops to write to BIP
     51 */
     52#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
     53	.macro	clear_bip
     54	msrclr	r0, MSR_BIP
     55	.endm
     56
     57	.macro	set_bip
     58	msrset	r0, MSR_BIP
     59	.endm
     60
     61	.macro	clear_eip
     62	msrclr	r0, MSR_EIP
     63	.endm
     64
     65	.macro	set_ee
     66	msrset	r0, MSR_EE
     67	.endm
     68
     69	.macro	disable_irq
     70	msrclr	r0, MSR_IE
     71	.endm
     72
     73	.macro	enable_irq
     74	msrset	r0, MSR_IE
     75	.endm
     76
     77	.macro	set_ums
     78	msrset	r0, MSR_UMS
     79	msrclr	r0, MSR_VMS
     80	.endm
     81
     82	.macro	set_vms
     83	msrclr	r0, MSR_UMS
     84	msrset	r0, MSR_VMS
     85	.endm
     86
     87	.macro	clear_ums
     88	msrclr	r0, MSR_UMS
     89	.endm
     90
     91	.macro	clear_vms_ums
     92	msrclr	r0, MSR_VMS | MSR_UMS
     93	.endm
     94#else
     95	.macro	clear_bip
     96	mfs	r11, rmsr
     97	andi	r11, r11, ~MSR_BIP
     98	mts	rmsr, r11
     99	.endm
    100
    101	.macro	set_bip
    102	mfs	r11, rmsr
    103	ori	r11, r11, MSR_BIP
    104	mts	rmsr, r11
    105	.endm
    106
    107	.macro	clear_eip
    108	mfs	r11, rmsr
    109	andi	r11, r11, ~MSR_EIP
    110	mts	rmsr, r11
    111	.endm
    112
    113	.macro	set_ee
    114	mfs	r11, rmsr
    115	ori	r11, r11, MSR_EE
    116	mts	rmsr, r11
    117	.endm
    118
    119	.macro	disable_irq
    120	mfs	r11, rmsr
    121	andi	r11, r11, ~MSR_IE
    122	mts	rmsr, r11
    123	.endm
    124
    125	.macro	enable_irq
    126	mfs	r11, rmsr
    127	ori	r11, r11, MSR_IE
    128	mts	rmsr, r11
    129	.endm
    130
    131	.macro set_ums
    132	mfs	r11, rmsr
    133	ori	r11, r11, MSR_VMS
    134	andni	r11, r11, MSR_UMS
    135	mts	rmsr, r11
    136	.endm
    137
    138	.macro	set_vms
    139	mfs	r11, rmsr
    140	ori	r11, r11, MSR_VMS
    141	andni	r11, r11, MSR_UMS
    142	mts	rmsr, r11
    143	.endm
    144
    145	.macro	clear_ums
    146	mfs	r11, rmsr
    147	andni	r11, r11, MSR_UMS
    148	mts	rmsr,r11
    149	.endm
    150
    151	.macro	clear_vms_ums
    152	mfs	r11, rmsr
    153	andni	r11, r11, (MSR_VMS|MSR_UMS)
    154	mts	rmsr,r11
    155	.endm
    156#endif
    157
    158/* Define how to call high-level functions. With MMU, virtual mode must be
    159 * enabled when calling the high-level function. Clobbers R11.
    160 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
    161 */
    162
    163/* turn on virtual protected mode save */
    164#define VM_ON		\
    165	set_ums;	\
    166	rted	r0, 2f;	\
    167	nop; \
    1682:
    169
    170/* turn off virtual protected mode save and user mode save*/
    171#define VM_OFF			\
    172	clear_vms_ums;		\
    173	rted	r0, TOPHYS(1f);	\
    174	nop; \
    1751:
    176
    177#define SAVE_REGS \
    178	swi	r2, r1, PT_R2;	/* Save SDA */			\
    179	swi	r3, r1, PT_R3;					\
    180	swi	r4, r1, PT_R4;					\
    181	swi	r5, r1, PT_R5;					\
    182	swi	r6, r1, PT_R6;					\
    183	swi	r7, r1, PT_R7;					\
    184	swi	r8, r1, PT_R8;					\
    185	swi	r9, r1, PT_R9;					\
    186	swi	r10, r1, PT_R10;					\
    187	swi	r11, r1, PT_R11;	/* save clobbered regs after rval */\
    188	swi	r12, r1, PT_R12;					\
    189	swi	r13, r1, PT_R13;	/* Save SDA2 */			\
    190	swi	r14, r1, PT_PC;	/* PC, before IRQ/trap */	\
    191	swi	r15, r1, PT_R15;	/* Save LP */			\
    192	swi	r16, r1, PT_R16;					\
    193	swi	r17, r1, PT_R17;					\
    194	swi	r18, r1, PT_R18;	/* Save asm scratch reg */	\
    195	swi	r19, r1, PT_R19;					\
    196	swi	r20, r1, PT_R20;					\
    197	swi	r21, r1, PT_R21;					\
    198	swi	r22, r1, PT_R22;					\
    199	swi	r23, r1, PT_R23;					\
    200	swi	r24, r1, PT_R24;					\
    201	swi	r25, r1, PT_R25;					\
    202	swi	r26, r1, PT_R26;					\
    203	swi	r27, r1, PT_R27;					\
    204	swi	r28, r1, PT_R28;					\
    205	swi	r29, r1, PT_R29;					\
    206	swi	r30, r1, PT_R30;					\
    207	swi	r31, r1, PT_R31;	/* Save current task reg */	\
    208	mfs	r11, rmsr;		/* save MSR */			\
    209	swi	r11, r1, PT_MSR;
    210
    211#define RESTORE_REGS_GP \
    212	lwi	r2, r1, PT_R2;	/* restore SDA */		\
    213	lwi	r3, r1, PT_R3;					\
    214	lwi	r4, r1, PT_R4;					\
    215	lwi	r5, r1, PT_R5;					\
    216	lwi	r6, r1, PT_R6;					\
    217	lwi	r7, r1, PT_R7;					\
    218	lwi	r8, r1, PT_R8;					\
    219	lwi	r9, r1, PT_R9;					\
    220	lwi	r10, r1, PT_R10;					\
    221	lwi	r11, r1, PT_R11;	/* restore clobbered regs after rval */\
    222	lwi	r12, r1, PT_R12;					\
    223	lwi	r13, r1, PT_R13;	/* restore SDA2 */		\
    224	lwi	r14, r1, PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
    225	lwi	r15, r1, PT_R15;	/* restore LP */		\
    226	lwi	r16, r1, PT_R16;					\
    227	lwi	r17, r1, PT_R17;					\
    228	lwi	r18, r1, PT_R18;	/* restore asm scratch reg */	\
    229	lwi	r19, r1, PT_R19;					\
    230	lwi	r20, r1, PT_R20;					\
    231	lwi	r21, r1, PT_R21;					\
    232	lwi	r22, r1, PT_R22;					\
    233	lwi	r23, r1, PT_R23;					\
    234	lwi	r24, r1, PT_R24;					\
    235	lwi	r25, r1, PT_R25;					\
    236	lwi	r26, r1, PT_R26;					\
    237	lwi	r27, r1, PT_R27;					\
    238	lwi	r28, r1, PT_R28;					\
    239	lwi	r29, r1, PT_R29;					\
    240	lwi	r30, r1, PT_R30;					\
    241	lwi	r31, r1, PT_R31;	/* Restore cur task reg */
    242
    243#define RESTORE_REGS \
    244	lwi	r11, r1, PT_MSR;					\
    245	mts	rmsr , r11;						\
    246	RESTORE_REGS_GP
    247
    248#define RESTORE_REGS_RTBD \
    249	lwi	r11, r1, PT_MSR;					\
    250	andni	r11, r11, MSR_EIP;          /* clear EIP */             \
    251	ori	r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */        \
    252	mts	rmsr , r11;						\
    253	RESTORE_REGS_GP
    254
    255#define SAVE_STATE	\
    256	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */	\
    257	/* See if already in kernel mode.*/				\
    258	mfs	r1, rmsr;						\
    259	andi	r1, r1, MSR_UMS;					\
    260	bnei	r1, 1f;						\
    261	/* Kernel-mode state save.  */					\
    262	/* Reload kernel stack-ptr. */					\
    263	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
    264	/* FIXME: I can add these two lines to one */			\
    265	/* tophys(r1,r1); */						\
    266	/* addik	r1, r1, -PT_SIZE; */				\
    267	addik	r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
    268	SAVE_REGS							\
    269	brid	2f;							\
    270	swi	r1, r1, PT_MODE; 	 				\
    2711:	/* User-mode state save.  */					\
    272	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
    273	tophys(r1,r1);							\
    274	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
    275	/* MS these three instructions can be added to one */		\
    276	/* addik	r1, r1, THREAD_SIZE; */				\
    277	/* tophys(r1,r1); */						\
    278	/* addik	r1, r1, -PT_SIZE; */			\
    279	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
    280	SAVE_REGS							\
    281	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
    282	swi	r11, r1, PT_R1; /* Store user SP.  */		\
    283	swi	r0, r1, PT_MODE; /* Was in user-mode.  */		\
    284	/* MS: I am clearing UMS even in case when I come from kernel space */ \
    285	clear_ums; 							\
    2862:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
    287
    288.text
    289
    290/*
    291 * User trap.
    292 *
    293 * System calls are handled here.
    294 *
    295 * Syscall protocol:
    296 * Syscall number in r12, args in r5-r10
    297 * Return value in r3
    298 *
    299 * Trap entered via brki instruction, so BIP bit is set, and interrupts
    300 * are masked. This is nice, means we don't have to CLI before state save
    301 */
    302C_ENTRY(_user_exception):
    303	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
    304	addi	r14, r14, 4	/* return address is 4 byte after call */
    305
    306	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
    307	tophys(r1,r1);
    308	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
    309/* calculate kernel stack pointer from task struct 8k */
    310	addik	r1, r1, THREAD_SIZE;
    311	tophys(r1,r1);
    312
    313	addik	r1, r1, -PT_SIZE; /* Make room on the stack.  */
    314	SAVE_REGS
    315	swi	r0, r1, PT_R3
    316	swi	r0, r1, PT_R4
    317
    318	swi	r0, r1, PT_MODE;			/* Was in user-mode. */
    319	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
    320	swi	r11, r1, PT_R1;		/* Store user SP.  */
    321	clear_ums;
    3222:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
    323	/* Save away the syscall number.  */
    324	swi	r12, r1, PT_R0;
    325	tovirt(r1,r1)
    326
    327/* where the trap should return need -8 to adjust for rtsd r15, 8*/
    328/* Jump to the appropriate function for the system call number in r12
    329 * (r12 is not preserved), or return an error if r12 is not valid. The LP
    330 * register should point to the location where
    331 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
    332
    333	/* Step into virtual mode */
    334	rtbd	r0, 3f
    335	nop
    3363:
    337	lwi	r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
    338	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
    339	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
    340	beqi	r11, 4f
    341
    342	addik	r3, r0, -ENOSYS
    343	swi	r3, r1, PT_R3
    344	brlid	r15, do_syscall_trace_enter
    345	addik	r5, r1, PT_R0
    346
    347	# do_syscall_trace_enter returns the new syscall nr.
    348	addk	r12, r0, r3
    349	lwi	r5, r1, PT_R5;
    350	lwi	r6, r1, PT_R6;
    351	lwi	r7, r1, PT_R7;
    352	lwi	r8, r1, PT_R8;
    353	lwi	r9, r1, PT_R9;
    354	lwi	r10, r1, PT_R10;
    3554:
    356/* Jump to the appropriate function for the system call number in r12
    357 * (r12 is not preserved), or return an error if r12 is not valid.
    358 * The LP register should point to the location where the called function
    359 * should return.  [note that MAKE_SYS_CALL uses label 1] */
    360	/* See if the system call number is valid */
    361	blti	r12, 5f
    362	addi	r11, r12, -__NR_syscalls;
    363	bgei	r11, 5f;
    364	/* Figure out which function to use for this system call.  */
    365	/* Note Microblaze barrel shift is optional, so don't rely on it */
    366	add	r12, r12, r12;			/* convert num -> ptr */
    367	add	r12, r12, r12;
    368	addi	r30, r0, 1			/* restarts allowed */
    369
    370#ifdef DEBUG
    371	/* Trac syscalls and stored them to syscall_debug_table */
    372	/* The first syscall location stores total syscall number */
    373	lwi	r3, r0, syscall_debug_table
    374	addi	r3, r3, 1
    375	swi	r3, r0, syscall_debug_table
    376	lwi	r3, r12, syscall_debug_table
    377	addi	r3, r3, 1
    378	swi	r3, r12, syscall_debug_table
    379#endif
    380
    381	# Find and jump into the syscall handler.
    382	lwi	r12, r12, sys_call_table
    383	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
    384	addi	r15, r0, ret_from_trap-8
    385	bra	r12
    386
    387	/* The syscall number is invalid, return an error.  */
    3885:
    389	braid	ret_from_trap
    390	addi	r3, r0, -ENOSYS;
    391
    392/* Entry point used to return from a syscall/trap */
    393/* We re-enable BIP bit before state restore */
    394C_ENTRY(ret_from_trap):
    395	swi	r3, r1, PT_R3
    396	swi	r4, r1, PT_R4
    397
    398	lwi	r11, r1, PT_MODE;
    399/* See if returning to kernel mode, if so, skip resched &c.  */
    400	bnei	r11, 2f;
    401	/* We're returning to user mode, so check for various conditions that
    402	 * trigger rescheduling. */
    403	/* FIXME: Restructure all these flag checks. */
    404	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
    405	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
    406	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
    407	beqi	r11, 1f
    408
    409	brlid	r15, do_syscall_trace_leave
    410	addik	r5, r1, PT_R0
    4111:
    412	/* We're returning to user mode, so check for various conditions that
    413	 * trigger rescheduling. */
    414	/* get thread info from current task */
    415	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
    416	lwi	r19, r11, TI_FLAGS;		/* get flags in thread info */
    417	andi	r11, r19, _TIF_NEED_RESCHED;
    418	beqi	r11, 5f;
    419
    420	bralid	r15, schedule;	/* Call scheduler */
    421	nop;				/* delay slot */
    422	bri	1b
    423
    424	/* Maybe handle a signal */
    4255:
    426	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
    427	beqi	r11, 4f;		/* Signals to handle, handle them */
    428
    429	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
    430	bralid	r15, do_notify_resume;	/* Handle any signals */
    431	add	r6, r30, r0;		/* Arg 2: int in_syscall */
    432	add	r30, r0, r0		/* no more restarts */
    433	bri	1b
    434
    435/* Finally, return to user state.  */
    4364:	set_bip;			/*  Ints masked for state restore */
    437	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
    438	VM_OFF;
    439	tophys(r1,r1);
    440	RESTORE_REGS_RTBD;
    441	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
    442	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
    443	bri	6f;
    444
    445/* Return to kernel state.  */
    4462:	set_bip;			/*  Ints masked for state restore */
    447	VM_OFF;
    448	tophys(r1,r1);
    449	RESTORE_REGS_RTBD;
    450	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
    451	tovirt(r1,r1);
    4526:
    453TRAP_return:		/* Make global symbol for debugging */
    454	rtbd	r14, 0;	/* Instructions to return from an IRQ */
    455	nop;
    456
    457
    458/* This the initial entry point for a new child thread, with an appropriate
    459   stack in place that makes it look the the child is in the middle of an
    460   syscall.  This function is actually `returned to' from switch_thread
    461   (copy_thread makes ret_from_fork the return address in each new thread's
    462   saved context).  */
    463C_ENTRY(ret_from_fork):
    464	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
    465	add	r5, r3, r0;	/* switch_thread returns the prev task */
    466				/* ( in the delay slot ) */
    467	brid	ret_from_trap;	/* Do normal trap return */
    468	add	r3, r0, r0;	/* Child's fork call should return 0. */
    469
    470C_ENTRY(ret_from_kernel_thread):
    471	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
    472	add	r5, r3, r0;	/* switch_thread returns the prev task */
    473				/* ( in the delay slot ) */
    474	brald	r15, r20	/* fn was left in r20 */
    475	addk	r5, r0, r19	/* ... and argument - in r19 */
    476	brid	ret_from_trap
    477	add	r3, r0, r0
    478
    479C_ENTRY(sys_rt_sigreturn_wrapper):
    480	addik	r30, r0, 0		/* no restarts */
    481	brid	sys_rt_sigreturn	/* Do real work */
    482	addik	r5, r1, 0;		/* add user context as 1st arg */
    483
    484/*
    485 * HW EXCEPTION rutine start
    486 */
    487C_ENTRY(full_exception_trap):
    488	/* adjust exception address for privileged instruction
    489	 * for finding where is it */
    490	addik	r17, r17, -4
    491	SAVE_STATE /* Save registers */
    492	/* PC, before IRQ/trap - this is one instruction above */
    493	swi	r17, r1, PT_PC;
    494	tovirt(r1,r1)
    495	/* FIXME this can be store directly in PT_ESR reg.
    496	 * I tested it but there is a fault */
    497	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
    498	addik	r15, r0, ret_from_exc - 8
    499	mfs	r6, resr
    500	mfs	r7, rfsr;		/* save FSR */
    501	mts	rfsr, r0;	/* Clear sticky fsr */
    502	rted	r0, full_exception
    503	addik	r5, r1, 0		 /* parameter struct pt_regs * regs */
    504
    505/*
    506 * Unaligned data trap.
    507 *
    508 * Unaligned data trap last on 4k page is handled here.
    509 *
    510 * Trap entered via exception, so EE bit is set, and interrupts
    511 * are masked.  This is nice, means we don't have to CLI before state save
    512 *
    513 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
    514 */
    515C_ENTRY(unaligned_data_trap):
    516	/* MS: I have to save r11 value and then restore it because
    517	 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
    518	 * instructions are not used. We don't need to do if MSR instructions
    519	 * are used and they use r0 instead of r11.
    520	 * I am using ENTRY_SP which should be primary used only for stack
    521	 * pointer saving. */
    522	swi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
    523	set_bip;        /* equalize initial state for all possible entries */
    524	clear_eip;
    525	set_ee;
    526	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
    527	SAVE_STATE		/* Save registers.*/
    528	/* PC, before IRQ/trap - this is one instruction above */
    529	swi	r17, r1, PT_PC;
    530	tovirt(r1,r1)
    531	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
    532	addik	r15, r0, ret_from_exc-8
    533	mfs	r3, resr		/* ESR */
    534	mfs	r4, rear		/* EAR */
    535	rtbd	r0, _unaligned_data_exception
    536	addik	r7, r1, 0		/* parameter struct pt_regs * regs */
    537
    538/*
    539 * Page fault traps.
    540 *
    541 * If the real exception handler (from hw_exception_handler.S) didn't find
    542 * the mapping for the process, then we're thrown here to handle such situation.
    543 *
    544 * Trap entered via exceptions, so EE bit is set, and interrupts
    545 * are masked.  This is nice, means we don't have to CLI before state save
    546 *
    547 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
    548 * will bail out to this point if they can't resolve the lightweight TLB fault.
    549 *
    550 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
    551 * void do_page_fault(struct pt_regs *regs,
    552 *				unsigned long address,
    553 *				unsigned long error_code)
    554 */
    555/* data and intruction trap - which is choose is resolved int fault.c */
    556C_ENTRY(page_fault_data_trap):
    557	SAVE_STATE		/* Save registers.*/
    558	/* PC, before IRQ/trap - this is one instruction above */
    559	swi	r17, r1, PT_PC;
    560	tovirt(r1,r1)
    561	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
    562	addik	r15, r0, ret_from_exc-8
    563	mfs	r6, rear		/* parameter unsigned long address */
    564	mfs	r7, resr		/* parameter unsigned long error_code */
    565	rted	r0, do_page_fault
    566	addik	r5, r1, 0		/* parameter struct pt_regs * regs */
    567
    568C_ENTRY(page_fault_instr_trap):
    569	SAVE_STATE		/* Save registers.*/
    570	/* PC, before IRQ/trap - this is one instruction above */
    571	swi	r17, r1, PT_PC;
    572	tovirt(r1,r1)
    573	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
    574	addik	r15, r0, ret_from_exc-8
    575	mfs	r6, rear		/* parameter unsigned long address */
    576	ori	r7, r0, 0		/* parameter unsigned long error_code */
    577	rted	r0, do_page_fault
    578	addik	r5, r1, 0		/* parameter struct pt_regs * regs */
    579
    580/* Entry point used to return from an exception.  */
    581C_ENTRY(ret_from_exc):
    582	lwi	r11, r1, PT_MODE;
    583	bnei	r11, 2f;		/* See if returning to kernel mode, */
    584					/* ... if so, skip resched &c.  */
    585
    586	/* We're returning to user mode, so check for various conditions that
    587	   trigger rescheduling. */
    5881:
    589	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
    590	lwi	r19, r11, TI_FLAGS;	/* get flags in thread info */
    591	andi	r11, r19, _TIF_NEED_RESCHED;
    592	beqi	r11, 5f;
    593
    594/* Call the scheduler before returning from a syscall/trap. */
    595	bralid	r15, schedule;	/* Call scheduler */
    596	nop;				/* delay slot */
    597	bri	1b
    598
    599	/* Maybe handle a signal */
    6005:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
    601	beqi	r11, 4f;		/* Signals to handle, handle them */
    602
    603	/*
    604	 * Handle a signal return; Pending signals should be in r18.
    605	 *
    606	 * Not all registers are saved by the normal trap/interrupt entry
    607	 * points (for instance, call-saved registers (because the normal
    608	 * C-compiler calling sequence in the kernel makes sure they're
    609	 * preserved), and call-clobbered registers in the case of
    610	 * traps), but signal handlers may want to examine or change the
    611	 * complete register state.  Here we save anything not saved by
    612	 * the normal entry sequence, so that it may be safely restored
    613	 * (in a possibly modified form) after do_notify_resume returns. */
    614	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
    615	bralid	r15, do_notify_resume;	/* Handle any signals */
    616	addi	r6, r0, 0;		/* Arg 2: int in_syscall */
    617	bri	1b
    618
    619/* Finally, return to user state.  */
    6204:	set_bip;			/* Ints masked for state restore */
    621	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
    622	VM_OFF;
    623	tophys(r1,r1);
    624
    625	RESTORE_REGS_RTBD;
    626	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
    627
    628	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
    629	bri	6f;
    630/* Return to kernel state.  */
    6312:	set_bip;			/* Ints masked for state restore */
    632	VM_OFF;
    633	tophys(r1,r1);
    634	RESTORE_REGS_RTBD;
    635	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
    636
    637	tovirt(r1,r1);
    6386:
    639EXC_return:		/* Make global symbol for debugging */
    640	rtbd	r14, 0;	/* Instructions to return from an IRQ */
    641	nop;
    642
    643/*
    644 * HW EXCEPTION rutine end
    645 */
    646
    647/*
    648 * Hardware maskable interrupts.
    649 *
    650 * The stack-pointer (r1) should have already been saved to the memory
    651 * location PER_CPU(ENTRY_SP).
    652 */
    653C_ENTRY(_interrupt):
    654/* MS: we are in physical address */
    655/* Save registers, switch to proper stack, convert SP to virtual.*/
    656	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
    657	/* MS: See if already in kernel mode. */
    658	mfs	r1, rmsr
    659	nop
    660	andi	r1, r1, MSR_UMS
    661	bnei	r1, 1f
    662
    663/* Kernel-mode state save. */
    664	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
    665	tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
    666	/* save registers */
    667/* MS: Make room on the stack -> activation record */
    668	addik	r1, r1, -PT_SIZE;
    669	SAVE_REGS
    670	brid	2f;
    671	swi	r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
    6721:
    673/* User-mode state save. */
    674 /* MS: get the saved current */
    675	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
    676	tophys(r1,r1);
    677	lwi	r1, r1, TS_THREAD_INFO;
    678	addik	r1, r1, THREAD_SIZE;
    679	tophys(r1,r1);
    680	/* save registers */
    681	addik	r1, r1, -PT_SIZE;
    682	SAVE_REGS
    683	/* calculate mode */
    684	swi	r0, r1, PT_MODE;
    685	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
    686	swi	r11, r1, PT_R1;
    687	clear_ums;
    6882:
    689	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
    690	tovirt(r1,r1)
    691	addik	r15, r0, irq_call;
    692irq_call:rtbd	r0, do_IRQ;
    693	addik	r5, r1, 0;
    694
    695/* MS: we are in virtual mode */
    696ret_from_irq:
    697	lwi	r11, r1, PT_MODE;
    698	bnei	r11, 2f;
    699
    7001:
    701	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
    702	lwi	r19, r11, TI_FLAGS; /* MS: get flags from thread info */
    703	andi	r11, r19, _TIF_NEED_RESCHED;
    704	beqi	r11, 5f
    705	bralid	r15, schedule;
    706	nop; /* delay slot */
    707	bri	1b
    708
    709    /* Maybe handle a signal */
    7105:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
    711	beqid	r11, no_intr_resched
    712/* Handle a signal return; Pending signals should be in r18. */
    713	addik	r5, r1, 0; /* Arg 1: struct pt_regs *regs */
    714	bralid	r15, do_notify_resume;	/* Handle any signals */
    715	addi	r6, r0, 0; /* Arg 2: int in_syscall */
    716	bri	1b
    717
    718/* Finally, return to user state. */
    719no_intr_resched:
    720    /* Disable interrupts, we are now committed to the state restore */
    721	disable_irq
    722	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
    723	VM_OFF;
    724	tophys(r1,r1);
    725	RESTORE_REGS
    726	addik	r1, r1, PT_SIZE /* MS: Clean up stack space. */
    727	lwi	r1, r1, PT_R1 - PT_SIZE;
    728	bri	6f;
    729/* MS: Return to kernel state. */
    7302:
    731#ifdef CONFIG_PREEMPTION
    732	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
    733	/* MS: get preempt_count from thread info */
    734	lwi	r5, r11, TI_PREEMPT_COUNT;
    735	bgti	r5, restore;
    736
    737	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
    738	andi	r5, r5, _TIF_NEED_RESCHED;
    739	beqi	r5, restore /* if zero jump over */
    740
    741	/* interrupts are off that's why I am calling preempt_chedule_irq */
    742	bralid	r15, preempt_schedule_irq
    743	nop
    744restore:
    745#endif
    746	VM_OFF /* MS: turn off MMU */
    747	tophys(r1,r1)
    748	RESTORE_REGS
    749	addik	r1, r1, PT_SIZE	/* MS: Clean up stack space. */
    750	tovirt(r1,r1);
    7516:
    752IRQ_return: /* MS: Make global symbol for debugging */
    753	rtid	r14, 0
    754	nop
    755
    756/*
    757 * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
    758 * and call handling function with saved pt_regs
    759 */
    760C_ENTRY(_debug_exception):
    761	/* BIP bit is set on entry, no interrupts can occur */
    762	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
    763
    764	mfs	r1, rmsr
    765	nop
    766	andi	r1, r1, MSR_UMS
    767	bnei	r1, 1f
    768/* MS: Kernel-mode state save - kgdb */
    769	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
    770
    771	/* BIP bit is set on entry, no interrupts can occur */
    772	addik   r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
    773	SAVE_REGS;
    774	/* save all regs to pt_reg structure */
    775	swi	r0, r1, PT_R0;	/* R0 must be saved too */
    776	swi	r14, r1, PT_R14	/* rewrite saved R14 value */
    777	swi	r16, r1, PT_PC; /* PC and r16 are the same */
    778	/* save special purpose registers to pt_regs */
    779	mfs	r11, rear;
    780	swi	r11, r1, PT_EAR;
    781	mfs	r11, resr;
    782	swi	r11, r1, PT_ESR;
    783	mfs	r11, rfsr;
    784	swi	r11, r1, PT_FSR;
    785
    786	/* stack pointer is in physical address at it is decrease
    787	 * by PT_SIZE but we need to get correct R1 value */
    788	addik   r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
    789	swi	r11, r1, PT_R1
    790	/* MS: r31 - current pointer isn't changed */
    791	tovirt(r1,r1)
    792#ifdef CONFIG_KGDB
    793	addi	r5, r1, 0 /* pass pt_reg address as the first arg */
    794	addik	r15, r0, dbtrap_call; /* return address */
    795	rtbd	r0, microblaze_kgdb_break
    796	nop;
    797#endif
    798	/* MS: Place handler for brki from kernel space if KGDB is OFF.
    799	 * It is very unlikely that another brki instruction is called. */
    800	bri 0
    801
    802/* MS: User-mode state save - gdb */
    8031:	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
    804	tophys(r1,r1);
    805	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
    806	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
    807	tophys(r1,r1);
    808
    809	addik	r1, r1, -PT_SIZE; /* Make room on the stack.  */
    810	SAVE_REGS;
    811	swi	r16, r1, PT_PC;	/* Save LP */
    812	swi	r0, r1, PT_MODE; /* Was in user-mode.  */
    813	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
    814	swi	r11, r1, PT_R1; /* Store user SP.  */
    815	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
    816	tovirt(r1,r1)
    817	set_vms;
    818	addik	r5, r1, 0;
    819	addik	r15, r0, dbtrap_call;
    820dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
    821	rtbd	r0, sw_exception
    822	nop
    823
    824	/* MS: The first instruction for the second part of the gdb/kgdb */
    825	set_bip; /* Ints masked for state restore */
    826	lwi	r11, r1, PT_MODE;
    827	bnei	r11, 2f;
    828/* MS: Return to user space - gdb */
    8291:
    830	/* Get current task ptr into r11 */
    831	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
    832	lwi	r19, r11, TI_FLAGS;	/* get flags in thread info */
    833	andi	r11, r19, _TIF_NEED_RESCHED;
    834	beqi	r11, 5f;
    835
    836	/* Call the scheduler before returning from a syscall/trap. */
    837	bralid	r15, schedule;	/* Call scheduler */
    838	nop;				/* delay slot */
    839	bri	1b
    840
    841	/* Maybe handle a signal */
    8425:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
    843	beqi	r11, 4f;		/* Signals to handle, handle them */
    844
    845	addik	r5, r1, 0;		/* Arg 1: struct pt_regs *regs */
    846	bralid	r15, do_notify_resume;	/* Handle any signals */
    847	addi  r6, r0, 0;	/* Arg 2: int in_syscall */
    848	bri	1b
    849
    850/* Finally, return to user state.  */
    8514:	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
    852	VM_OFF;
    853	tophys(r1,r1);
    854	/* MS: Restore all regs */
    855	RESTORE_REGS_RTBD
    856	addik	r1, r1, PT_SIZE	 /* Clean up stack space */
    857	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
    858DBTRAP_return_user: /* MS: Make global symbol for debugging */
    859	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
    860	nop;
    861
    862/* MS: Return to kernel state - kgdb */
    8632:	VM_OFF;
    864	tophys(r1,r1);
    865	/* MS: Restore all regs */
    866	RESTORE_REGS_RTBD
    867	lwi	r14, r1, PT_R14;
    868	lwi	r16, r1, PT_PC;
    869	addik	r1, r1, PT_SIZE; /* MS: Clean up stack space */
    870	tovirt(r1,r1);
    871DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
    872	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
    873	nop;
    874
    875
    876ENTRY(_switch_to)
    877	/* prepare return value */
    878	addk	r3, r0, CURRENT_TASK
    879
    880	/* save registers in cpu_context */
    881	/* use r11 and r12, volatile registers, as temp register */
    882	/* give start of cpu_context for previous process */
    883	addik	r11, r5, TI_CPU_CONTEXT
    884	swi	r1, r11, CC_R1
    885	swi	r2, r11, CC_R2
    886	/* skip volatile registers.
    887	 * they are saved on stack when we jumped to _switch_to() */
    888	/* dedicated registers */
    889	swi	r13, r11, CC_R13
    890	swi	r14, r11, CC_R14
    891	swi	r15, r11, CC_R15
    892	swi	r16, r11, CC_R16
    893	swi	r17, r11, CC_R17
    894	swi	r18, r11, CC_R18
    895	/* save non-volatile registers */
    896	swi	r19, r11, CC_R19
    897	swi	r20, r11, CC_R20
    898	swi	r21, r11, CC_R21
    899	swi	r22, r11, CC_R22
    900	swi	r23, r11, CC_R23
    901	swi	r24, r11, CC_R24
    902	swi	r25, r11, CC_R25
    903	swi	r26, r11, CC_R26
    904	swi	r27, r11, CC_R27
    905	swi	r28, r11, CC_R28
    906	swi	r29, r11, CC_R29
    907	swi	r30, r11, CC_R30
    908	/* special purpose registers */
    909	mfs	r12, rmsr
    910	swi	r12, r11, CC_MSR
    911	mfs	r12, rear
    912	swi	r12, r11, CC_EAR
    913	mfs	r12, resr
    914	swi	r12, r11, CC_ESR
    915	mfs	r12, rfsr
    916	swi	r12, r11, CC_FSR
    917
    918	/* update r31, the current-give me pointer to task which will be next */
    919	lwi	CURRENT_TASK, r6, TI_TASK
    920	/* stored it to current_save too */
    921	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
    922
    923	/* get new process' cpu context and restore */
    924	/* give me start where start context of next task */
    925	addik	r11, r6, TI_CPU_CONTEXT
    926
    927	/* non-volatile registers */
    928	lwi	r30, r11, CC_R30
    929	lwi	r29, r11, CC_R29
    930	lwi	r28, r11, CC_R28
    931	lwi	r27, r11, CC_R27
    932	lwi	r26, r11, CC_R26
    933	lwi	r25, r11, CC_R25
    934	lwi	r24, r11, CC_R24
    935	lwi	r23, r11, CC_R23
    936	lwi	r22, r11, CC_R22
    937	lwi	r21, r11, CC_R21
    938	lwi	r20, r11, CC_R20
    939	lwi	r19, r11, CC_R19
    940	/* dedicated registers */
    941	lwi	r18, r11, CC_R18
    942	lwi	r17, r11, CC_R17
    943	lwi	r16, r11, CC_R16
    944	lwi	r15, r11, CC_R15
    945	lwi	r14, r11, CC_R14
    946	lwi	r13, r11, CC_R13
    947	/* skip volatile registers */
    948	lwi	r2, r11, CC_R2
    949	lwi	r1, r11, CC_R1
    950
    951	/* special purpose registers */
    952	lwi	r12, r11, CC_FSR
    953	mts	rfsr, r12
    954	lwi	r12, r11, CC_MSR
    955	mts	rmsr, r12
    956
    957	rtsd	r15, 8
    958	nop
    959
    960ENTRY(_reset)
    961	VM_OFF
    962	brai	0; /* Jump to reset vector */
    963
    964	/* These are compiled and loaded into high memory, then
    965	 * copied into place in mach_early_setup */
    966	.section	.init.ivt, "ax"
    967#if CONFIG_MANUAL_RESET_VECTOR
    968	.org	0x0
    969	brai	CONFIG_MANUAL_RESET_VECTOR
    970#endif
    971	.org	0x8
    972	brai	TOPHYS(_user_exception); /* syscall handler */
    973	.org	0x10
    974	brai	TOPHYS(_interrupt);	/* Interrupt handler */
    975	.org	0x18
    976	brai	TOPHYS(_debug_exception);	/* debug trap handler */
    977	.org	0x20
    978	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
    979
    980.section .rodata,"a"
    981#include "syscall_table.S"
    982
    983syscall_table_size=(.-sys_call_table)
    984
    985type_SYSCALL:
    986	.ascii "SYSCALL\0"
    987type_IRQ:
    988	.ascii "IRQ\0"
    989type_IRQ_PREEMPT:
    990	.ascii "IRQ (PREEMPTED)\0"
    991type_SYSCALL_PREEMPT:
    992	.ascii " SYSCALL (PREEMPTED)\0"
    993
    994	/*
    995	 * Trap decoding for stack unwinder
    996	 * Tuples are (start addr, end addr, string)
    997	 * If return address lies on [start addr, end addr],
    998	 * unwinder displays 'string'
    999	 */
   1000
   1001	.align 4
   1002.global microblaze_trap_handlers
   1003microblaze_trap_handlers:
   1004	/* Exact matches come first */
   1005	.word ret_from_trap; .word ret_from_trap   ; .word type_SYSCALL
   1006	.word ret_from_irq ; .word ret_from_irq    ; .word type_IRQ
   1007	/* Fuzzy matches go here */
   1008	.word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
   1009	.word ret_from_trap; .word TRAP_return     ; .word type_SYSCALL_PREEMPT
   1010	/* End of table */
   1011	.word 0               ; .word 0               ; .word 0