scall32-o32.S (5216B)
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) 2001 MIPS Technologies, Inc. 8 * Copyright (C) 2004 Thiemo Seufer 9 * Copyright (C) 2014 Imagination Technologies Ltd. 10 */ 11#include <linux/errno.h> 12#include <asm/asm.h> 13#include <asm/asmmacro.h> 14#include <asm/irqflags.h> 15#include <asm/mipsregs.h> 16#include <asm/regdef.h> 17#include <asm/stackframe.h> 18#include <asm/isadep.h> 19#include <asm/sysmips.h> 20#include <asm/thread_info.h> 21#include <asm/unistd.h> 22#include <asm/asm-offsets.h> 23 24 .align 5 25NESTED(handle_sys, PT_SIZE, sp) 26 .set noat 27 SAVE_SOME 28 TRACE_IRQS_ON_RELOAD 29 STI 30 .set at 31 32 lw t1, PT_EPC(sp) # skip syscall on return 33 34 addiu t1, 4 # skip to next instruction 35 sw t1, PT_EPC(sp) 36 37 sw a3, PT_R26(sp) # save a3 for syscall restarting 38 39 /* 40 * More than four arguments. Try to deal with it by copying the 41 * stack arguments from the user stack to the kernel stack. 42 * This Sucks (TM). 43 */ 44 lw t0, PT_R29(sp) # get old user stack pointer 45 46 /* 47 * We intentionally keep the kernel stack a little below the top of 48 * userspace so we don't have to do a slower byte accurate check here. 49 */ 50 addu t4, t0, 32 51 bltz t4, bad_stack # -> sp is bad 52 53 /* 54 * Ok, copy the args from the luser stack to the kernel stack. 55 */ 56 57 .set push 58 .set noreorder 59 .set nomacro 60 61load_a4: user_lw(t5, 16(t0)) # argument #5 from usp 62load_a5: user_lw(t6, 20(t0)) # argument #6 from usp 63load_a6: user_lw(t7, 24(t0)) # argument #7 from usp 64load_a7: user_lw(t8, 28(t0)) # argument #8 from usp 65loads_done: 66 67 sw t5, 16(sp) # argument #5 to ksp 68 sw t6, 20(sp) # argument #6 to ksp 69 sw t7, 24(sp) # argument #7 to ksp 70 sw t8, 28(sp) # argument #8 to ksp 71 .set pop 72 73 .section __ex_table,"a" 74 PTR_WD load_a4, bad_stack_a4 75 PTR_WD load_a5, bad_stack_a5 76 PTR_WD load_a6, bad_stack_a6 77 PTR_WD load_a7, bad_stack_a7 78 .previous 79 80 lw t0, TI_FLAGS($28) # syscall tracing enabled? 81 li t1, _TIF_WORK_SYSCALL_ENTRY 82 and t0, t1 83 bnez t0, syscall_trace_entry # -> yes 84syscall_common: 85 subu v0, v0, __NR_O32_Linux # check syscall number 86 sltiu t0, v0, __NR_O32_Linux_syscalls 87 beqz t0, illegal_syscall 88 89 sll t0, v0, 2 90 la t1, sys_call_table 91 addu t1, t0 92 lw t2, (t1) # syscall routine 93 94 beqz t2, illegal_syscall 95 96 jalr t2 # Do The Real Thing (TM) 97 98 li t0, -EMAXERRNO - 1 # error? 99 sltu t0, t0, v0 100 sw t0, PT_R7(sp) # set error flag 101 beqz t0, 1f 102 103 lw t1, PT_R2(sp) # syscall number 104 negu v0 # error 105 sw t1, PT_R0(sp) # save it for syscall restarting 1061: sw v0, PT_R2(sp) # result 107 108o32_syscall_exit: 109 j syscall_exit_partial 110 111/* ------------------------------------------------------------------------ */ 112 113syscall_trace_entry: 114 SAVE_STATIC 115 move a0, sp 116 117 /* 118 * syscall number is in v0 unless we called syscall(__NR_###) 119 * where the real syscall number is in a0 120 */ 121 move a1, v0 122 subu t2, v0, __NR_O32_Linux 123 bnez t2, 1f /* __NR_syscall at offset 0 */ 124 lw a1, PT_R4(sp) 125 1261: jal syscall_trace_enter 127 128 bltz v0, 1f # seccomp failed? Skip syscall 129 130 RESTORE_STATIC 131 lw v0, PT_R2(sp) # Restore syscall (maybe modified) 132 lw a0, PT_R4(sp) # Restore argument registers 133 lw a1, PT_R5(sp) 134 lw a2, PT_R6(sp) 135 lw a3, PT_R7(sp) 136 j syscall_common 137 1381: j syscall_exit 139 140/* ------------------------------------------------------------------------ */ 141 142 /* 143 * Our open-coded access area sanity test for the stack pointer 144 * failed. We probably should handle this case a bit more drastic. 145 */ 146bad_stack: 147 li v0, EFAULT 148 sw v0, PT_R2(sp) 149 li t0, 1 # set error flag 150 sw t0, PT_R7(sp) 151 j o32_syscall_exit 152 153bad_stack_a4: 154 li t5, 0 155 b load_a5 156 157bad_stack_a5: 158 li t6, 0 159 b load_a6 160 161bad_stack_a6: 162 li t7, 0 163 b load_a7 164 165bad_stack_a7: 166 li t8, 0 167 b loads_done 168 169 /* 170 * The system call does not exist in this kernel 171 */ 172illegal_syscall: 173 li v0, ENOSYS # error 174 sw v0, PT_R2(sp) 175 li t0, 1 # set error flag 176 sw t0, PT_R7(sp) 177 j o32_syscall_exit 178 END(handle_sys) 179 180 LEAF(sys_syscall) 181 subu t0, a0, __NR_O32_Linux # check syscall number 182 sltiu v0, t0, __NR_O32_Linux_syscalls 183 beqz t0, einval # do not recurse 184 sll t1, t0, 2 185 beqz v0, einval 186 lw t2, sys_call_table(t1) # syscall routine 187 188 move a0, a1 # shift argument registers 189 move a1, a2 190 move a2, a3 191 lw a3, 16(sp) 192 lw t4, 20(sp) 193 lw t5, 24(sp) 194 lw t6, 28(sp) 195 sw t4, 16(sp) 196 sw t5, 20(sp) 197 sw t6, 24(sp) 198 jr t2 199 /* Unreached */ 200 201einval: li v0, -ENOSYS 202 jr ra 203 END(sys_syscall) 204 205#ifdef CONFIG_MIPS_MT_FPAFF 206 /* 207 * For FPU affinity scheduling on MIPS MT processors, we need to 208 * intercept sys_sched_xxxaffinity() calls until we get a proper hook 209 * in kernel/sched/core.c. Considered only temporary we only support 210 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor 211 * atm. 212 */ 213#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity 214#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity 215#endif /* CONFIG_MIPS_MT_FPAFF */ 216 217#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) 218#define __SYSCALL(nr, entry) PTR_WD entry 219 .align 2 220 .type sys_call_table, @object 221EXPORT(sys_call_table) 222#include <asm/syscall_table_o32.h>