cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry.S (14538B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012 Regents of the University of California
      4 * Copyright (C) 2017 SiFive
      5 */
      6
      7#include <linux/init.h>
      8#include <linux/linkage.h>
      9
     10#include <asm/asm.h>
     11#include <asm/csr.h>
     12#include <asm/unistd.h>
     13#include <asm/thread_info.h>
     14#include <asm/asm-offsets.h>
     15#include <asm/errata_list.h>
     16
     17#if !IS_ENABLED(CONFIG_PREEMPTION)
     18.set resume_kernel, restore_all
     19#endif
     20
     21ENTRY(handle_exception)
     22	/*
     23	 * If coming from userspace, preserve the user thread pointer and load
     24	 * the kernel thread pointer.  If we came from the kernel, the scratch
     25	 * register will contain 0, and we should continue on the current TP.
     26	 */
     27	csrrw tp, CSR_SCRATCH, tp
     28	bnez tp, _save_context
     29
     30_restore_kernel_tpsp:
     31	csrr tp, CSR_SCRATCH
     32	REG_S sp, TASK_TI_KERNEL_SP(tp)
     33
     34#ifdef CONFIG_VMAP_STACK
     35	addi sp, sp, -(PT_SIZE_ON_STACK)
     36	srli sp, sp, THREAD_SHIFT
     37	andi sp, sp, 0x1
     38	bnez sp, handle_kernel_stack_overflow
     39	REG_L sp, TASK_TI_KERNEL_SP(tp)
     40#endif
     41
     42_save_context:
     43	REG_S sp, TASK_TI_USER_SP(tp)
     44	REG_L sp, TASK_TI_KERNEL_SP(tp)
     45	addi sp, sp, -(PT_SIZE_ON_STACK)
     46	REG_S x1,  PT_RA(sp)
     47	REG_S x3,  PT_GP(sp)
     48	REG_S x5,  PT_T0(sp)
     49	REG_S x6,  PT_T1(sp)
     50	REG_S x7,  PT_T2(sp)
     51	REG_S x8,  PT_S0(sp)
     52	REG_S x9,  PT_S1(sp)
     53	REG_S x10, PT_A0(sp)
     54	REG_S x11, PT_A1(sp)
     55	REG_S x12, PT_A2(sp)
     56	REG_S x13, PT_A3(sp)
     57	REG_S x14, PT_A4(sp)
     58	REG_S x15, PT_A5(sp)
     59	REG_S x16, PT_A6(sp)
     60	REG_S x17, PT_A7(sp)
     61	REG_S x18, PT_S2(sp)
     62	REG_S x19, PT_S3(sp)
     63	REG_S x20, PT_S4(sp)
     64	REG_S x21, PT_S5(sp)
     65	REG_S x22, PT_S6(sp)
     66	REG_S x23, PT_S7(sp)
     67	REG_S x24, PT_S8(sp)
     68	REG_S x25, PT_S9(sp)
     69	REG_S x26, PT_S10(sp)
     70	REG_S x27, PT_S11(sp)
     71	REG_S x28, PT_T3(sp)
     72	REG_S x29, PT_T4(sp)
     73	REG_S x30, PT_T5(sp)
     74	REG_S x31, PT_T6(sp)
     75
     76	/*
     77	 * Disable user-mode memory access as it should only be set in the
     78	 * actual user copy routines.
     79	 *
     80	 * Disable the FPU to detect illegal usage of floating point in kernel
     81	 * space.
     82	 */
     83	li t0, SR_SUM | SR_FS
     84
     85	REG_L s0, TASK_TI_USER_SP(tp)
     86	csrrc s1, CSR_STATUS, t0
     87	csrr s2, CSR_EPC
     88	csrr s3, CSR_TVAL
     89	csrr s4, CSR_CAUSE
     90	csrr s5, CSR_SCRATCH
     91	REG_S s0, PT_SP(sp)
     92	REG_S s1, PT_STATUS(sp)
     93	REG_S s2, PT_EPC(sp)
     94	REG_S s3, PT_BADADDR(sp)
     95	REG_S s4, PT_CAUSE(sp)
     96	REG_S s5, PT_TP(sp)
     97
     98	/*
     99	 * Set the scratch register to 0, so that if a recursive exception
    100	 * occurs, the exception vector knows it came from the kernel
    101	 */
    102	csrw CSR_SCRATCH, x0
    103
    104	/* Load the global pointer */
    105.option push
    106.option norelax
    107	la gp, __global_pointer$
    108.option pop
    109
    110#ifdef CONFIG_TRACE_IRQFLAGS
    111	call __trace_hardirqs_off
    112#endif
    113
    114#ifdef CONFIG_CONTEXT_TRACKING
    115	/* If previous state is in user mode, call context_tracking_user_exit. */
    116	li   a0, SR_PP
    117	and a0, s1, a0
    118	bnez a0, skip_context_tracking
    119	call context_tracking_user_exit
    120skip_context_tracking:
    121#endif
    122
    123	/*
    124	 * MSB of cause differentiates between
    125	 * interrupts and exceptions
    126	 */
    127	bge s4, zero, 1f
    128
    129	la ra, ret_from_exception
    130
    131	/* Handle interrupts */
    132	move a0, sp /* pt_regs */
    133	la a1, generic_handle_arch_irq
    134	jr a1
    1351:
    136	/*
    137	 * Exceptions run with interrupts enabled or disabled depending on the
    138	 * state of SR_PIE in m/sstatus.
    139	 */
    140	andi t0, s1, SR_PIE
    141	beqz t0, 1f
    142	/* kprobes, entered via ebreak, must have interrupts disabled. */
    143	li t0, EXC_BREAKPOINT
    144	beq s4, t0, 1f
    145#ifdef CONFIG_TRACE_IRQFLAGS
    146	call __trace_hardirqs_on
    147#endif
    148	csrs CSR_STATUS, SR_IE
    149
    1501:
    151	la ra, ret_from_exception
    152	/* Handle syscalls */
    153	li t0, EXC_SYSCALL
    154	beq s4, t0, handle_syscall
    155
    156	/* Handle other exceptions */
    157	slli t0, s4, RISCV_LGPTR
    158	la t1, excp_vect_table
    159	la t2, excp_vect_table_end
    160	move a0, sp /* pt_regs */
    161	add t0, t1, t0
    162	/* Check if exception code lies within bounds */
    163	bgeu t0, t2, 1f
    164	REG_L t0, 0(t0)
    165	jr t0
    1661:
    167	tail do_trap_unknown
    168
    169handle_syscall:
    170#ifdef CONFIG_RISCV_M_MODE
    171	/*
    172	 * When running is M-Mode (no MMU config), MPIE does not get set.
    173	 * As a result, we need to force enable interrupts here because
    174	 * handle_exception did not do set SR_IE as it always sees SR_PIE
    175	 * being cleared.
    176	 */
    177	csrs CSR_STATUS, SR_IE
    178#endif
    179#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
    180	/* Recover a0 - a7 for system calls */
    181	REG_L a0, PT_A0(sp)
    182	REG_L a1, PT_A1(sp)
    183	REG_L a2, PT_A2(sp)
    184	REG_L a3, PT_A3(sp)
    185	REG_L a4, PT_A4(sp)
    186	REG_L a5, PT_A5(sp)
    187	REG_L a6, PT_A6(sp)
    188	REG_L a7, PT_A7(sp)
    189#endif
    190	 /* save the initial A0 value (needed in signal handlers) */
    191	REG_S a0, PT_ORIG_A0(sp)
    192	/*
    193	 * Advance SEPC to avoid executing the original
    194	 * scall instruction on sret
    195	 */
    196	addi s2, s2, 0x4
    197	REG_S s2, PT_EPC(sp)
    198	/* Trace syscalls, but only if requested by the user. */
    199	REG_L t0, TASK_TI_FLAGS(tp)
    200	andi t0, t0, _TIF_SYSCALL_WORK
    201	bnez t0, handle_syscall_trace_enter
    202check_syscall_nr:
    203	/* Check to make sure we don't jump to a bogus syscall number. */
    204	li t0, __NR_syscalls
    205	la s0, sys_ni_syscall
    206	/*
    207	 * Syscall number held in a7.
    208	 * If syscall number is above allowed value, redirect to ni_syscall.
    209	 */
    210	bgeu a7, t0, 3f
    211#ifdef CONFIG_COMPAT
    212	REG_L s0, PT_STATUS(sp)
    213	srli s0, s0, SR_UXL_SHIFT
    214	andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
    215	li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
    216	sub t0, s0, t0
    217	bnez t0, 1f
    218
    219	/* Call compat_syscall */
    220	la s0, compat_sys_call_table
    221	j 2f
    2221:
    223#endif
    224	/* Call syscall */
    225	la s0, sys_call_table
    2262:
    227	slli t0, a7, RISCV_LGPTR
    228	add s0, s0, t0
    229	REG_L s0, 0(s0)
    2303:
    231	jalr s0
    232
    233ret_from_syscall:
    234	/* Set user a0 to kernel a0 */
    235	REG_S a0, PT_A0(sp)
    236	/*
    237	 * We didn't execute the actual syscall.
    238	 * Seccomp already set return value for the current task pt_regs.
    239	 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
    240	 */
    241ret_from_syscall_rejected:
    242#ifdef CONFIG_DEBUG_RSEQ
    243	move a0, sp
    244	call rseq_syscall
    245#endif
    246	/* Trace syscalls, but only if requested by the user. */
    247	REG_L t0, TASK_TI_FLAGS(tp)
    248	andi t0, t0, _TIF_SYSCALL_WORK
    249	bnez t0, handle_syscall_trace_exit
    250
    251ret_from_exception:
    252	REG_L s0, PT_STATUS(sp)
    253	csrc CSR_STATUS, SR_IE
    254#ifdef CONFIG_TRACE_IRQFLAGS
    255	call __trace_hardirqs_off
    256#endif
    257#ifdef CONFIG_RISCV_M_MODE
    258	/* the MPP value is too large to be used as an immediate arg for addi */
    259	li t0, SR_MPP
    260	and s0, s0, t0
    261#else
    262	andi s0, s0, SR_SPP
    263#endif
    264	bnez s0, resume_kernel
    265
    266resume_userspace:
    267	/* Interrupts must be disabled here so flags are checked atomically */
    268	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
    269	andi s1, s0, _TIF_WORK_MASK
    270	bnez s1, work_pending
    271
    272#ifdef CONFIG_CONTEXT_TRACKING
    273	call context_tracking_user_enter
    274#endif
    275
    276	/* Save unwound kernel stack pointer in thread_info */
    277	addi s0, sp, PT_SIZE_ON_STACK
    278	REG_S s0, TASK_TI_KERNEL_SP(tp)
    279
    280	/*
    281	 * Save TP into the scratch register , so we can find the kernel data
    282	 * structures again.
    283	 */
    284	csrw CSR_SCRATCH, tp
    285
    286restore_all:
    287#ifdef CONFIG_TRACE_IRQFLAGS
    288	REG_L s1, PT_STATUS(sp)
    289	andi t0, s1, SR_PIE
    290	beqz t0, 1f
    291	call __trace_hardirqs_on
    292	j 2f
    2931:
    294	call __trace_hardirqs_off
    2952:
    296#endif
    297	REG_L a0, PT_STATUS(sp)
    298	/*
    299	 * The current load reservation is effectively part of the processor's
    300	 * state, in the sense that load reservations cannot be shared between
    301	 * different hart contexts.  We can't actually save and restore a load
    302	 * reservation, so instead here we clear any existing reservation --
    303	 * it's always legal for implementations to clear load reservations at
    304	 * any point (as long as the forward progress guarantee is kept, but
    305	 * we'll ignore that here).
    306	 *
    307	 * Dangling load reservations can be the result of taking a trap in the
    308	 * middle of an LR/SC sequence, but can also be the result of a taken
    309	 * forward branch around an SC -- which is how we implement CAS.  As a
    310	 * result we need to clear reservations between the last CAS and the
    311	 * jump back to the new context.  While it is unlikely the store
    312	 * completes, implementations are allowed to expand reservations to be
    313	 * arbitrarily large.
    314	 */
    315	REG_L  a2, PT_EPC(sp)
    316	REG_SC x0, a2, PT_EPC(sp)
    317
    318	csrw CSR_STATUS, a0
    319	csrw CSR_EPC, a2
    320
    321	REG_L x1,  PT_RA(sp)
    322	REG_L x3,  PT_GP(sp)
    323	REG_L x4,  PT_TP(sp)
    324	REG_L x5,  PT_T0(sp)
    325	REG_L x6,  PT_T1(sp)
    326	REG_L x7,  PT_T2(sp)
    327	REG_L x8,  PT_S0(sp)
    328	REG_L x9,  PT_S1(sp)
    329	REG_L x10, PT_A0(sp)
    330	REG_L x11, PT_A1(sp)
    331	REG_L x12, PT_A2(sp)
    332	REG_L x13, PT_A3(sp)
    333	REG_L x14, PT_A4(sp)
    334	REG_L x15, PT_A5(sp)
    335	REG_L x16, PT_A6(sp)
    336	REG_L x17, PT_A7(sp)
    337	REG_L x18, PT_S2(sp)
    338	REG_L x19, PT_S3(sp)
    339	REG_L x20, PT_S4(sp)
    340	REG_L x21, PT_S5(sp)
    341	REG_L x22, PT_S6(sp)
    342	REG_L x23, PT_S7(sp)
    343	REG_L x24, PT_S8(sp)
    344	REG_L x25, PT_S9(sp)
    345	REG_L x26, PT_S10(sp)
    346	REG_L x27, PT_S11(sp)
    347	REG_L x28, PT_T3(sp)
    348	REG_L x29, PT_T4(sp)
    349	REG_L x30, PT_T5(sp)
    350	REG_L x31, PT_T6(sp)
    351
    352	REG_L x2,  PT_SP(sp)
    353
    354#ifdef CONFIG_RISCV_M_MODE
    355	mret
    356#else
    357	sret
    358#endif
    359
    360#if IS_ENABLED(CONFIG_PREEMPTION)
    361resume_kernel:
    362	REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
    363	bnez s0, restore_all
    364	REG_L s0, TASK_TI_FLAGS(tp)
    365	andi s0, s0, _TIF_NEED_RESCHED
    366	beqz s0, restore_all
    367	call preempt_schedule_irq
    368	j restore_all
    369#endif
    370
    371work_pending:
    372	/* Enter slow path for supplementary processing */
    373	la ra, ret_from_exception
    374	andi s1, s0, _TIF_NEED_RESCHED
    375	bnez s1, work_resched
    376work_notifysig:
    377	/* Handle pending signals and notify-resume requests */
    378	csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
    379	move a0, sp /* pt_regs */
    380	move a1, s0 /* current_thread_info->flags */
    381	tail do_notify_resume
    382work_resched:
    383	tail schedule
    384
    385/* Slow paths for ptrace. */
    386handle_syscall_trace_enter:
    387	move a0, sp
    388	call do_syscall_trace_enter
    389	move t0, a0
    390	REG_L a0, PT_A0(sp)
    391	REG_L a1, PT_A1(sp)
    392	REG_L a2, PT_A2(sp)
    393	REG_L a3, PT_A3(sp)
    394	REG_L a4, PT_A4(sp)
    395	REG_L a5, PT_A5(sp)
    396	REG_L a6, PT_A6(sp)
    397	REG_L a7, PT_A7(sp)
    398	bnez t0, ret_from_syscall_rejected
    399	j check_syscall_nr
    400handle_syscall_trace_exit:
    401	move a0, sp
    402	call do_syscall_trace_exit
    403	j ret_from_exception
    404
    405#ifdef CONFIG_VMAP_STACK
    406handle_kernel_stack_overflow:
    407	la sp, shadow_stack
    408	addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
    409
    410	//save caller register to shadow stack
    411	addi sp, sp, -(PT_SIZE_ON_STACK)
    412	REG_S x1,  PT_RA(sp)
    413	REG_S x5,  PT_T0(sp)
    414	REG_S x6,  PT_T1(sp)
    415	REG_S x7,  PT_T2(sp)
    416	REG_S x10, PT_A0(sp)
    417	REG_S x11, PT_A1(sp)
    418	REG_S x12, PT_A2(sp)
    419	REG_S x13, PT_A3(sp)
    420	REG_S x14, PT_A4(sp)
    421	REG_S x15, PT_A5(sp)
    422	REG_S x16, PT_A6(sp)
    423	REG_S x17, PT_A7(sp)
    424	REG_S x28, PT_T3(sp)
    425	REG_S x29, PT_T4(sp)
    426	REG_S x30, PT_T5(sp)
    427	REG_S x31, PT_T6(sp)
    428
    429	la ra, restore_caller_reg
    430	tail get_overflow_stack
    431
    432restore_caller_reg:
    433	//save per-cpu overflow stack
    434	REG_S a0, -8(sp)
    435	//restore caller register from shadow_stack
    436	REG_L x1,  PT_RA(sp)
    437	REG_L x5,  PT_T0(sp)
    438	REG_L x6,  PT_T1(sp)
    439	REG_L x7,  PT_T2(sp)
    440	REG_L x10, PT_A0(sp)
    441	REG_L x11, PT_A1(sp)
    442	REG_L x12, PT_A2(sp)
    443	REG_L x13, PT_A3(sp)
    444	REG_L x14, PT_A4(sp)
    445	REG_L x15, PT_A5(sp)
    446	REG_L x16, PT_A6(sp)
    447	REG_L x17, PT_A7(sp)
    448	REG_L x28, PT_T3(sp)
    449	REG_L x29, PT_T4(sp)
    450	REG_L x30, PT_T5(sp)
    451	REG_L x31, PT_T6(sp)
    452
    453	//load per-cpu overflow stack
    454	REG_L sp, -8(sp)
    455	addi sp, sp, -(PT_SIZE_ON_STACK)
    456
    457	//save context to overflow stack
    458	REG_S x1,  PT_RA(sp)
    459	REG_S x3,  PT_GP(sp)
    460	REG_S x5,  PT_T0(sp)
    461	REG_S x6,  PT_T1(sp)
    462	REG_S x7,  PT_T2(sp)
    463	REG_S x8,  PT_S0(sp)
    464	REG_S x9,  PT_S1(sp)
    465	REG_S x10, PT_A0(sp)
    466	REG_S x11, PT_A1(sp)
    467	REG_S x12, PT_A2(sp)
    468	REG_S x13, PT_A3(sp)
    469	REG_S x14, PT_A4(sp)
    470	REG_S x15, PT_A5(sp)
    471	REG_S x16, PT_A6(sp)
    472	REG_S x17, PT_A7(sp)
    473	REG_S x18, PT_S2(sp)
    474	REG_S x19, PT_S3(sp)
    475	REG_S x20, PT_S4(sp)
    476	REG_S x21, PT_S5(sp)
    477	REG_S x22, PT_S6(sp)
    478	REG_S x23, PT_S7(sp)
    479	REG_S x24, PT_S8(sp)
    480	REG_S x25, PT_S9(sp)
    481	REG_S x26, PT_S10(sp)
    482	REG_S x27, PT_S11(sp)
    483	REG_S x28, PT_T3(sp)
    484	REG_S x29, PT_T4(sp)
    485	REG_S x30, PT_T5(sp)
    486	REG_S x31, PT_T6(sp)
    487
    488	REG_L s0, TASK_TI_KERNEL_SP(tp)
    489	csrr s1, CSR_STATUS
    490	csrr s2, CSR_EPC
    491	csrr s3, CSR_TVAL
    492	csrr s4, CSR_CAUSE
    493	csrr s5, CSR_SCRATCH
    494	REG_S s0, PT_SP(sp)
    495	REG_S s1, PT_STATUS(sp)
    496	REG_S s2, PT_EPC(sp)
    497	REG_S s3, PT_BADADDR(sp)
    498	REG_S s4, PT_CAUSE(sp)
    499	REG_S s5, PT_TP(sp)
    500	move a0, sp
    501	tail handle_bad_stack
    502#endif
    503
    504END(handle_exception)
    505
    506ENTRY(ret_from_fork)
    507	la ra, ret_from_exception
    508	tail schedule_tail
    509ENDPROC(ret_from_fork)
    510
    511ENTRY(ret_from_kernel_thread)
    512	call schedule_tail
    513	/* Call fn(arg) */
    514	la ra, ret_from_exception
    515	move a0, s1
    516	jr s0
    517ENDPROC(ret_from_kernel_thread)
    518
    519
    520/*
    521 * Integer register context switch
    522 * The callee-saved registers must be saved and restored.
    523 *
    524 *   a0: previous task_struct (must be preserved across the switch)
    525 *   a1: next task_struct
    526 *
    527 * The value of a0 and a1 must be preserved by this function, as that's how
    528 * arguments are passed to schedule_tail.
    529 */
    530ENTRY(__switch_to)
    531	/* Save context into prev->thread */
    532	li    a4,  TASK_THREAD_RA
    533	add   a3, a0, a4
    534	add   a4, a1, a4
    535	REG_S ra,  TASK_THREAD_RA_RA(a3)
    536	REG_S sp,  TASK_THREAD_SP_RA(a3)
    537	REG_S s0,  TASK_THREAD_S0_RA(a3)
    538	REG_S s1,  TASK_THREAD_S1_RA(a3)
    539	REG_S s2,  TASK_THREAD_S2_RA(a3)
    540	REG_S s3,  TASK_THREAD_S3_RA(a3)
    541	REG_S s4,  TASK_THREAD_S4_RA(a3)
    542	REG_S s5,  TASK_THREAD_S5_RA(a3)
    543	REG_S s6,  TASK_THREAD_S6_RA(a3)
    544	REG_S s7,  TASK_THREAD_S7_RA(a3)
    545	REG_S s8,  TASK_THREAD_S8_RA(a3)
    546	REG_S s9,  TASK_THREAD_S9_RA(a3)
    547	REG_S s10, TASK_THREAD_S10_RA(a3)
    548	REG_S s11, TASK_THREAD_S11_RA(a3)
    549	/* Restore context from next->thread */
    550	REG_L ra,  TASK_THREAD_RA_RA(a4)
    551	REG_L sp,  TASK_THREAD_SP_RA(a4)
    552	REG_L s0,  TASK_THREAD_S0_RA(a4)
    553	REG_L s1,  TASK_THREAD_S1_RA(a4)
    554	REG_L s2,  TASK_THREAD_S2_RA(a4)
    555	REG_L s3,  TASK_THREAD_S3_RA(a4)
    556	REG_L s4,  TASK_THREAD_S4_RA(a4)
    557	REG_L s5,  TASK_THREAD_S5_RA(a4)
    558	REG_L s6,  TASK_THREAD_S6_RA(a4)
    559	REG_L s7,  TASK_THREAD_S7_RA(a4)
    560	REG_L s8,  TASK_THREAD_S8_RA(a4)
    561	REG_L s9,  TASK_THREAD_S9_RA(a4)
    562	REG_L s10, TASK_THREAD_S10_RA(a4)
    563	REG_L s11, TASK_THREAD_S11_RA(a4)
    564	/* The offset of thread_info in task_struct is zero. */
    565	move tp, a1
    566	ret
    567ENDPROC(__switch_to)
    568
    569#ifndef CONFIG_MMU
    570#define do_page_fault do_trap_unknown
    571#endif
    572
    573	.section ".rodata"
    574	.align LGREG
    575	/* Exception vector table */
    576ENTRY(excp_vect_table)
    577	RISCV_PTR do_trap_insn_misaligned
    578	ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
    579	RISCV_PTR do_trap_insn_illegal
    580	RISCV_PTR do_trap_break
    581	RISCV_PTR do_trap_load_misaligned
    582	RISCV_PTR do_trap_load_fault
    583	RISCV_PTR do_trap_store_misaligned
    584	RISCV_PTR do_trap_store_fault
    585	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
    586	RISCV_PTR do_trap_ecall_s
    587	RISCV_PTR do_trap_unknown
    588	RISCV_PTR do_trap_ecall_m
    589	/* instruciton page fault */
    590	ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
    591	RISCV_PTR do_page_fault   /* load page fault */
    592	RISCV_PTR do_trap_unknown
    593	RISCV_PTR do_page_fault   /* store page fault */
    594excp_vect_table_end:
    595END(excp_vect_table)
    596
    597#ifndef CONFIG_MMU
    598ENTRY(__user_rt_sigreturn)
    599	li a7, __NR_rt_sigreturn
    600	scall
    601END(__user_rt_sigreturn)
    602#endif