signal.c (23441B)
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11#include <linux/cache.h> 12#include <linux/context_tracking.h> 13#include <linux/irqflags.h> 14#include <linux/sched.h> 15#include <linux/mm.h> 16#include <linux/personality.h> 17#include <linux/smp.h> 18#include <linux/kernel.h> 19#include <linux/signal.h> 20#include <linux/errno.h> 21#include <linux/wait.h> 22#include <linux/ptrace.h> 23#include <linux/unistd.h> 24#include <linux/uprobes.h> 25#include <linux/compiler.h> 26#include <linux/syscalls.h> 27#include <linux/uaccess.h> 28#include <linux/resume_user_mode.h> 29 30#include <asm/abi.h> 31#include <asm/asm.h> 32#include <linux/bitops.h> 33#include <asm/cacheflush.h> 34#include <asm/fpu.h> 35#include <asm/sim.h> 36#include <asm/ucontext.h> 37#include <asm/cpu-features.h> 38#include <asm/dsp.h> 39#include <asm/inst.h> 40#include <asm/msa.h> 41 42#include "signal-common.h" 43 44static int (*save_fp_context)(void __user *sc); 45static int (*restore_fp_context)(void __user *sc); 46 47struct sigframe { 48 u32 sf_ass[4]; /* argument save space for o32 */ 49 u32 sf_pad[2]; /* Was: signal trampoline */ 50 51 /* Matches struct ucontext from its uc_mcontext field onwards */ 52 struct sigcontext sf_sc; 53 sigset_t sf_mask; 54 unsigned long long sf_extcontext[]; 55}; 56 57struct rt_sigframe { 58 u32 rs_ass[4]; /* argument save space for o32 */ 59 u32 rs_pad[2]; /* Was: signal trampoline */ 60 struct siginfo rs_info; 61 struct ucontext rs_uc; 62}; 63 64#ifdef CONFIG_MIPS_FP_SUPPORT 65 66/* 67 * Thread saved context copy to/from a signal context presumed to be on the 68 * user stack, and therefore accessed with appropriate macros from uaccess.h. 69 */ 70static int copy_fp_to_sigcontext(void __user *sc) 71{ 72 struct mips_abi *abi = current->thread.abi; 73 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 74 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 75 int i; 76 int err = 0; 77 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 78 79 for (i = 0; i < NUM_FPU_REGS; i += inc) { 80 err |= 81 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 82 &fpregs[i]); 83 } 84 err |= __put_user(current->thread.fpu.fcr31, csr); 85 86 return err; 87} 88 89static int copy_fp_from_sigcontext(void __user *sc) 90{ 91 struct mips_abi *abi = current->thread.abi; 92 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 93 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 94 int i; 95 int err = 0; 96 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 97 u64 fpr_val; 98 99 for (i = 0; i < NUM_FPU_REGS; i += inc) { 100 err |= __get_user(fpr_val, &fpregs[i]); 101 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 102 } 103 err |= __get_user(current->thread.fpu.fcr31, csr); 104 105 return err; 106} 107 108#else /* !CONFIG_MIPS_FP_SUPPORT */ 109 110static int copy_fp_to_sigcontext(void __user *sc) 111{ 112 return 0; 113} 114 115static int copy_fp_from_sigcontext(void __user *sc) 116{ 117 return 0; 118} 119 120#endif /* !CONFIG_MIPS_FP_SUPPORT */ 121 122/* 123 * Wrappers for the assembly _{save,restore}_fp_context functions. 124 */ 125static int save_hw_fp_context(void __user *sc) 126{ 127 struct mips_abi *abi = current->thread.abi; 128 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 129 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 130 131 return _save_fp_context(fpregs, csr); 132} 133 134static int restore_hw_fp_context(void __user *sc) 135{ 136 struct mips_abi *abi = current->thread.abi; 137 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 138 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 139 140 return _restore_fp_context(fpregs, csr); 141} 142 143/* 144 * Extended context handling. 145 */ 146 147static inline void __user *sc_to_extcontext(void __user *sc) 148{ 149 struct ucontext __user *uc; 150 151 /* 152 * We can just pretend the sigcontext is always embedded in a struct 153 * ucontext here, because the offset from sigcontext to extended 154 * context is the same in the struct sigframe case. 155 */ 156 uc = container_of(sc, struct ucontext, uc_mcontext); 157 return &uc->uc_extcontext; 158} 159 160#ifdef CONFIG_CPU_HAS_MSA 161 162static int save_msa_extcontext(void __user *buf) 163{ 164 struct msa_extcontext __user *msa = buf; 165 uint64_t val; 166 int i, err; 167 168 if (!thread_msa_context_live()) 169 return 0; 170 171 /* 172 * Ensure that we can't lose the live MSA context between checking 173 * for it & writing it to memory. 174 */ 175 preempt_disable(); 176 177 if (is_msa_enabled()) { 178 /* 179 * There are no EVA versions of the vector register load/store 180 * instructions, so MSA context has to be saved to kernel memory 181 * and then copied to user memory. The save to kernel memory 182 * should already have been done when handling scalar FP 183 * context. 184 */ 185 BUG_ON(IS_ENABLED(CONFIG_EVA)); 186 187 err = __put_user(read_msa_csr(), &msa->csr); 188 err |= _save_msa_all_upper(&msa->wr); 189 190 preempt_enable(); 191 } else { 192 preempt_enable(); 193 194 err = __put_user(current->thread.fpu.msacsr, &msa->csr); 195 196 for (i = 0; i < NUM_FPU_REGS; i++) { 197 val = get_fpr64(¤t->thread.fpu.fpr[i], 1); 198 err |= __put_user(val, &msa->wr[i]); 199 } 200 } 201 202 err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); 203 err |= __put_user(sizeof(*msa), &msa->ext.size); 204 205 return err ? -EFAULT : sizeof(*msa); 206} 207 208static int restore_msa_extcontext(void __user *buf, unsigned int size) 209{ 210 struct msa_extcontext __user *msa = buf; 211 unsigned long long val; 212 unsigned int csr; 213 int i, err; 214 215 if (size != sizeof(*msa)) 216 return -EINVAL; 217 218 err = get_user(csr, &msa->csr); 219 if (err) 220 return err; 221 222 preempt_disable(); 223 224 if (is_msa_enabled()) { 225 /* 226 * There are no EVA versions of the vector register load/store 227 * instructions, so MSA context has to be copied to kernel 228 * memory and later loaded to registers. The same is true of 229 * scalar FP context, so FPU & MSA should have already been 230 * disabled whilst handling scalar FP context. 231 */ 232 BUG_ON(IS_ENABLED(CONFIG_EVA)); 233 234 write_msa_csr(csr); 235 err |= _restore_msa_all_upper(&msa->wr); 236 preempt_enable(); 237 } else { 238 preempt_enable(); 239 240 current->thread.fpu.msacsr = csr; 241 242 for (i = 0; i < NUM_FPU_REGS; i++) { 243 err |= __get_user(val, &msa->wr[i]); 244 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); 245 } 246 } 247 248 return err; 249} 250 251#else /* !CONFIG_CPU_HAS_MSA */ 252 253static int save_msa_extcontext(void __user *buf) 254{ 255 return 0; 256} 257 258static int restore_msa_extcontext(void __user *buf, unsigned int size) 259{ 260 return SIGSYS; 261} 262 263#endif /* !CONFIG_CPU_HAS_MSA */ 264 265static int save_extcontext(void __user *buf) 266{ 267 int sz; 268 269 sz = save_msa_extcontext(buf); 270 if (sz < 0) 271 return sz; 272 buf += sz; 273 274 /* If no context was saved then trivially return */ 275 if (!sz) 276 return 0; 277 278 /* Write the end marker */ 279 if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) 280 return -EFAULT; 281 282 sz += sizeof(((struct extcontext *)NULL)->magic); 283 return sz; 284} 285 286static int restore_extcontext(void __user *buf) 287{ 288 struct extcontext ext; 289 int err; 290 291 while (1) { 292 err = __get_user(ext.magic, (unsigned int *)buf); 293 if (err) 294 return err; 295 296 if (ext.magic == END_EXTCONTEXT_MAGIC) 297 return 0; 298 299 err = __get_user(ext.size, (unsigned int *)(buf 300 + offsetof(struct extcontext, size))); 301 if (err) 302 return err; 303 304 switch (ext.magic) { 305 case MSA_EXTCONTEXT_MAGIC: 306 err = restore_msa_extcontext(buf, ext.size); 307 break; 308 309 default: 310 err = -EINVAL; 311 break; 312 } 313 314 if (err) 315 return err; 316 317 buf += ext.size; 318 } 319} 320 321/* 322 * Helper routines 323 */ 324int protected_save_fp_context(void __user *sc) 325{ 326 struct mips_abi *abi = current->thread.abi; 327 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 328 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 329 uint32_t __user *used_math = sc + abi->off_sc_used_math; 330 unsigned int used, ext_sz; 331 int err; 332 333 used = used_math() ? USED_FP : 0; 334 if (!used) 335 goto fp_done; 336 337 if (!test_thread_flag(TIF_32BIT_FPREGS)) 338 used |= USED_FR1; 339 if (test_thread_flag(TIF_HYBRID_FPREGS)) 340 used |= USED_HYBRID_FPRS; 341 342 /* 343 * EVA does not have userland equivalents of ldc1 or sdc1, so 344 * save to the kernel FP context & copy that to userland below. 345 */ 346 if (IS_ENABLED(CONFIG_EVA)) 347 lose_fpu(1); 348 349 while (1) { 350 lock_fpu_owner(); 351 if (is_fpu_owner()) { 352 err = save_fp_context(sc); 353 unlock_fpu_owner(); 354 } else { 355 unlock_fpu_owner(); 356 err = copy_fp_to_sigcontext(sc); 357 } 358 if (likely(!err)) 359 break; 360 /* touch the sigcontext and try again */ 361 err = __put_user(0, &fpregs[0]) | 362 __put_user(0, &fpregs[31]) | 363 __put_user(0, csr); 364 if (err) 365 return err; /* really bad sigcontext */ 366 } 367 368fp_done: 369 ext_sz = err = save_extcontext(sc_to_extcontext(sc)); 370 if (err < 0) 371 return err; 372 used |= ext_sz ? USED_EXTCONTEXT : 0; 373 374 return __put_user(used, used_math); 375} 376 377int protected_restore_fp_context(void __user *sc) 378{ 379 struct mips_abi *abi = current->thread.abi; 380 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 381 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 382 uint32_t __user *used_math = sc + abi->off_sc_used_math; 383 unsigned int used; 384 int err, sig = 0, tmp __maybe_unused; 385 386 err = __get_user(used, used_math); 387 conditional_used_math(used & USED_FP); 388 389 /* 390 * The signal handler may have used FPU; give it up if the program 391 * doesn't want it following sigreturn. 392 */ 393 if (err || !(used & USED_FP)) 394 lose_fpu(0); 395 if (err) 396 return err; 397 if (!(used & USED_FP)) 398 goto fp_done; 399 400 err = sig = fpcsr_pending(csr); 401 if (err < 0) 402 return err; 403 404 /* 405 * EVA does not have userland equivalents of ldc1 or sdc1, so we 406 * disable the FPU here such that the code below simply copies to 407 * the kernel FP context. 408 */ 409 if (IS_ENABLED(CONFIG_EVA)) 410 lose_fpu(0); 411 412 while (1) { 413 lock_fpu_owner(); 414 if (is_fpu_owner()) { 415 err = restore_fp_context(sc); 416 unlock_fpu_owner(); 417 } else { 418 unlock_fpu_owner(); 419 err = copy_fp_from_sigcontext(sc); 420 } 421 if (likely(!err)) 422 break; 423 /* touch the sigcontext and try again */ 424 err = __get_user(tmp, &fpregs[0]) | 425 __get_user(tmp, &fpregs[31]) | 426 __get_user(tmp, csr); 427 if (err) 428 break; /* really bad sigcontext */ 429 } 430 431fp_done: 432 if (!err && (used & USED_EXTCONTEXT)) 433 err = restore_extcontext(sc_to_extcontext(sc)); 434 435 return err ?: sig; 436} 437 438int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 439{ 440 int err = 0; 441 int i; 442 443 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 444 445 err |= __put_user(0, &sc->sc_regs[0]); 446 for (i = 1; i < 32; i++) 447 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 448 449#ifdef CONFIG_CPU_HAS_SMARTMIPS 450 err |= __put_user(regs->acx, &sc->sc_acx); 451#endif 452 err |= __put_user(regs->hi, &sc->sc_mdhi); 453 err |= __put_user(regs->lo, &sc->sc_mdlo); 454 if (cpu_has_dsp) { 455 err |= __put_user(mfhi1(), &sc->sc_hi1); 456 err |= __put_user(mflo1(), &sc->sc_lo1); 457 err |= __put_user(mfhi2(), &sc->sc_hi2); 458 err |= __put_user(mflo2(), &sc->sc_lo2); 459 err |= __put_user(mfhi3(), &sc->sc_hi3); 460 err |= __put_user(mflo3(), &sc->sc_lo3); 461 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 462 } 463 464 465 /* 466 * Save FPU state to signal context. Signal handler 467 * will "inherit" current FPU state. 468 */ 469 err |= protected_save_fp_context(sc); 470 471 return err; 472} 473 474static size_t extcontext_max_size(void) 475{ 476 size_t sz = 0; 477 478 /* 479 * The assumption here is that between this point & the point at which 480 * the extended context is saved the size of the context should only 481 * ever be able to shrink (if the task is preempted), but never grow. 482 * That is, what this function returns is an upper bound on the size of 483 * the extended context for the current task at the current time. 484 */ 485 486 if (thread_msa_context_live()) 487 sz += sizeof(struct msa_extcontext); 488 489 /* If any context is saved then we'll append the end marker */ 490 if (sz) 491 sz += sizeof(((struct extcontext *)NULL)->magic); 492 493 return sz; 494} 495 496int fpcsr_pending(unsigned int __user *fpcsr) 497{ 498 int err, sig = 0; 499 unsigned int csr, enabled; 500 501 err = __get_user(csr, fpcsr); 502 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 503 /* 504 * If the signal handler set some FPU exceptions, clear it and 505 * send SIGFPE. 506 */ 507 if (csr & enabled) { 508 csr &= ~enabled; 509 err |= __put_user(csr, fpcsr); 510 sig = SIGFPE; 511 } 512 return err ?: sig; 513} 514 515int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 516{ 517 unsigned long treg; 518 int err = 0; 519 int i; 520 521 /* Always make any pending restarted system calls return -EINTR */ 522 current->restart_block.fn = do_no_restart_syscall; 523 524 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 525 526#ifdef CONFIG_CPU_HAS_SMARTMIPS 527 err |= __get_user(regs->acx, &sc->sc_acx); 528#endif 529 err |= __get_user(regs->hi, &sc->sc_mdhi); 530 err |= __get_user(regs->lo, &sc->sc_mdlo); 531 if (cpu_has_dsp) { 532 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 533 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 534 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 535 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 536 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 537 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 538 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 539 } 540 541 for (i = 1; i < 32; i++) 542 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 543 544 return err ?: protected_restore_fp_context(sc); 545} 546 547#ifdef CONFIG_WAR_ICACHE_REFILLS 548#define SIGMASK ~(cpu_icache_line_size()-1) 549#else 550#define SIGMASK ALMASK 551#endif 552 553void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, 554 size_t frame_size) 555{ 556 unsigned long sp; 557 558 /* Leave space for potential extended context */ 559 frame_size += extcontext_max_size(); 560 561 /* Default to using normal stack */ 562 sp = regs->regs[29]; 563 564 /* 565 * If we are on the alternate signal stack and would overflow it, don't. 566 * Return an always-bogus address instead so we will die with SIGSEGV. 567 */ 568 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) 569 return (void __user __force *)(-1UL); 570 571 /* 572 * FPU emulator may have it's own trampoline active just 573 * above the user stack, 16-bytes before the next lowest 574 * 16 byte boundary. Try to avoid trashing it. 575 */ 576 sp -= 32; 577 578 sp = sigsp(sp, ksig); 579 580 return (void __user *)((sp - frame_size) & SIGMASK); 581} 582 583/* 584 * Atomically swap in the new signal mask, and wait for a signal. 585 */ 586 587#ifdef CONFIG_TRAD_SIGNALS 588SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 589{ 590 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 591} 592#endif 593 594#ifdef CONFIG_TRAD_SIGNALS 595SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 596 struct sigaction __user *, oact) 597{ 598 struct k_sigaction new_ka, old_ka; 599 int ret; 600 int err = 0; 601 602 if (act) { 603 old_sigset_t mask; 604 605 if (!access_ok(act, sizeof(*act))) 606 return -EFAULT; 607 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 608 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 609 err |= __get_user(mask, &act->sa_mask.sig[0]); 610 if (err) 611 return -EFAULT; 612 613 siginitset(&new_ka.sa.sa_mask, mask); 614 } 615 616 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 617 618 if (!ret && oact) { 619 if (!access_ok(oact, sizeof(*oact))) 620 return -EFAULT; 621 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 622 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 623 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 624 err |= __put_user(0, &oact->sa_mask.sig[1]); 625 err |= __put_user(0, &oact->sa_mask.sig[2]); 626 err |= __put_user(0, &oact->sa_mask.sig[3]); 627 if (err) 628 return -EFAULT; 629 } 630 631 return ret; 632} 633#endif 634 635#ifdef CONFIG_TRAD_SIGNALS 636asmlinkage void sys_sigreturn(void) 637{ 638 struct sigframe __user *frame; 639 struct pt_regs *regs; 640 sigset_t blocked; 641 int sig; 642 643 regs = current_pt_regs(); 644 frame = (struct sigframe __user *)regs->regs[29]; 645 if (!access_ok(frame, sizeof(*frame))) 646 goto badframe; 647 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 648 goto badframe; 649 650 set_current_blocked(&blocked); 651 652 sig = restore_sigcontext(regs, &frame->sf_sc); 653 if (sig < 0) 654 goto badframe; 655 else if (sig) 656 force_sig(sig); 657 658 /* 659 * Don't let your children do this ... 660 */ 661 __asm__ __volatile__( 662 "move\t$29, %0\n\t" 663 "j\tsyscall_exit" 664 : /* no outputs */ 665 : "r" (regs)); 666 /* Unreached */ 667 668badframe: 669 force_sig(SIGSEGV); 670} 671#endif /* CONFIG_TRAD_SIGNALS */ 672 673asmlinkage void sys_rt_sigreturn(void) 674{ 675 struct rt_sigframe __user *frame; 676 struct pt_regs *regs; 677 sigset_t set; 678 int sig; 679 680 regs = current_pt_regs(); 681 frame = (struct rt_sigframe __user *)regs->regs[29]; 682 if (!access_ok(frame, sizeof(*frame))) 683 goto badframe; 684 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 685 goto badframe; 686 687 set_current_blocked(&set); 688 689 sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); 690 if (sig < 0) 691 goto badframe; 692 else if (sig) 693 force_sig(sig); 694 695 if (restore_altstack(&frame->rs_uc.uc_stack)) 696 goto badframe; 697 698 /* 699 * Don't let your children do this ... 700 */ 701 __asm__ __volatile__( 702 "move\t$29, %0\n\t" 703 "j\tsyscall_exit" 704 : /* no outputs */ 705 : "r" (regs)); 706 /* Unreached */ 707 708badframe: 709 force_sig(SIGSEGV); 710} 711 712#ifdef CONFIG_TRAD_SIGNALS 713static int setup_frame(void *sig_return, struct ksignal *ksig, 714 struct pt_regs *regs, sigset_t *set) 715{ 716 struct sigframe __user *frame; 717 int err = 0; 718 719 frame = get_sigframe(ksig, regs, sizeof(*frame)); 720 if (!access_ok(frame, sizeof (*frame))) 721 return -EFAULT; 722 723 err |= setup_sigcontext(regs, &frame->sf_sc); 724 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 725 if (err) 726 return -EFAULT; 727 728 /* 729 * Arguments to signal handler: 730 * 731 * a0 = signal number 732 * a1 = 0 (should be cause) 733 * a2 = pointer to struct sigcontext 734 * 735 * $25 and c0_epc point to the signal handler, $29 points to the 736 * struct sigframe. 737 */ 738 regs->regs[ 4] = ksig->sig; 739 regs->regs[ 5] = 0; 740 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 741 regs->regs[29] = (unsigned long) frame; 742 regs->regs[31] = (unsigned long) sig_return; 743 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 744 745 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 746 current->comm, current->pid, 747 frame, regs->cp0_epc, regs->regs[31]); 748 return 0; 749} 750#endif 751 752static int setup_rt_frame(void *sig_return, struct ksignal *ksig, 753 struct pt_regs *regs, sigset_t *set) 754{ 755 struct rt_sigframe __user *frame; 756 757 frame = get_sigframe(ksig, regs, sizeof(*frame)); 758 if (!access_ok(frame, sizeof (*frame))) 759 return -EFAULT; 760 761 /* Create siginfo. */ 762 if (copy_siginfo_to_user(&frame->rs_info, &ksig->info)) 763 return -EFAULT; 764 765 /* Create the ucontext. */ 766 if (__put_user(0, &frame->rs_uc.uc_flags)) 767 return -EFAULT; 768 if (__put_user(NULL, &frame->rs_uc.uc_link)) 769 return -EFAULT; 770 if (__save_altstack(&frame->rs_uc.uc_stack, regs->regs[29])) 771 return -EFAULT; 772 if (setup_sigcontext(regs, &frame->rs_uc.uc_mcontext)) 773 return -EFAULT; 774 if (__copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set))) 775 return -EFAULT; 776 777 /* 778 * Arguments to signal handler: 779 * 780 * a0 = signal number 781 * a1 = 0 (should be cause) 782 * a2 = pointer to ucontext 783 * 784 * $25 and c0_epc point to the signal handler, $29 points to 785 * the struct rt_sigframe. 786 */ 787 regs->regs[ 4] = ksig->sig; 788 regs->regs[ 5] = (unsigned long) &frame->rs_info; 789 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 790 regs->regs[29] = (unsigned long) frame; 791 regs->regs[31] = (unsigned long) sig_return; 792 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 793 794 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 795 current->comm, current->pid, 796 frame, regs->cp0_epc, regs->regs[31]); 797 798 return 0; 799} 800 801struct mips_abi mips_abi = { 802#ifdef CONFIG_TRAD_SIGNALS 803 .setup_frame = setup_frame, 804#endif 805 .setup_rt_frame = setup_rt_frame, 806 .restart = __NR_restart_syscall, 807 808 .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), 809 .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), 810 .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), 811 812 .vdso = &vdso_image, 813}; 814 815static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 816{ 817 sigset_t *oldset = sigmask_to_save(); 818 int ret; 819 struct mips_abi *abi = current->thread.abi; 820 void *vdso = current->mm->context.vdso; 821 822 /* 823 * If we were emulating a delay slot instruction, exit that frame such 824 * that addresses in the sigframe are as expected for userland and we 825 * don't have a problem if we reuse the thread's frame for an 826 * instruction within the signal handler. 827 */ 828 dsemul_thread_rollback(regs); 829 830 if (regs->regs[0]) { 831 switch(regs->regs[2]) { 832 case ERESTART_RESTARTBLOCK: 833 case ERESTARTNOHAND: 834 regs->regs[2] = EINTR; 835 break; 836 case ERESTARTSYS: 837 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 838 regs->regs[2] = EINTR; 839 break; 840 } 841 fallthrough; 842 case ERESTARTNOINTR: 843 regs->regs[7] = regs->regs[26]; 844 regs->regs[2] = regs->regs[0]; 845 regs->cp0_epc -= 4; 846 } 847 848 regs->regs[0] = 0; /* Don't deal with this again. */ 849 } 850 851 rseq_signal_deliver(ksig, regs); 852 853 if (sig_uses_siginfo(&ksig->ka, abi)) 854 ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, 855 ksig, regs, oldset); 856 else 857 ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn, 858 ksig, regs, oldset); 859 860 signal_setup_done(ret, ksig, 0); 861} 862 863static void do_signal(struct pt_regs *regs) 864{ 865 struct ksignal ksig; 866 867 if (get_signal(&ksig)) { 868 /* Whee! Actually deliver the signal. */ 869 handle_signal(&ksig, regs); 870 return; 871 } 872 873 if (regs->regs[0]) { 874 switch (regs->regs[2]) { 875 case ERESTARTNOHAND: 876 case ERESTARTSYS: 877 case ERESTARTNOINTR: 878 regs->regs[2] = regs->regs[0]; 879 regs->regs[7] = regs->regs[26]; 880 regs->cp0_epc -= 4; 881 break; 882 883 case ERESTART_RESTARTBLOCK: 884 regs->regs[2] = current->thread.abi->restart; 885 regs->regs[7] = regs->regs[26]; 886 regs->cp0_epc -= 4; 887 break; 888 } 889 regs->regs[0] = 0; /* Don't deal with this again. */ 890 } 891 892 /* 893 * If there's no signal to deliver, we just put the saved sigmask 894 * back 895 */ 896 restore_saved_sigmask(); 897} 898 899/* 900 * notification of userspace execution resumption 901 * - triggered by the TIF_WORK_MASK flags 902 */ 903asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 904 __u32 thread_info_flags) 905{ 906 local_irq_enable(); 907 908 user_exit(); 909 910 if (thread_info_flags & _TIF_UPROBE) 911 uprobe_notify_resume(regs); 912 913 /* deal with pending signal delivery */ 914 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 915 do_signal(regs); 916 917 if (thread_info_flags & _TIF_NOTIFY_RESUME) 918 resume_user_mode_work(regs); 919 920 user_enter(); 921} 922 923#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) 924static int smp_save_fp_context(void __user *sc) 925{ 926 return raw_cpu_has_fpu 927 ? save_hw_fp_context(sc) 928 : copy_fp_to_sigcontext(sc); 929} 930 931static int smp_restore_fp_context(void __user *sc) 932{ 933 return raw_cpu_has_fpu 934 ? restore_hw_fp_context(sc) 935 : copy_fp_from_sigcontext(sc); 936} 937#endif 938 939static int signal_setup(void) 940{ 941 /* 942 * The offset from sigcontext to extended context should be the same 943 * regardless of the type of signal, such that userland can always know 944 * where to look if it wishes to find the extended context structures. 945 */ 946 BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - 947 offsetof(struct sigframe, sf_sc)) != 948 (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - 949 offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); 950 951#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) 952 /* For now just do the cpu_has_fpu check when the functions are invoked */ 953 save_fp_context = smp_save_fp_context; 954 restore_fp_context = smp_restore_fp_context; 955#else 956 if (cpu_has_fpu) { 957 save_fp_context = save_hw_fp_context; 958 restore_fp_context = restore_hw_fp_context; 959 } else { 960 save_fp_context = copy_fp_to_sigcontext; 961 restore_fp_context = copy_fp_from_sigcontext; 962 } 963#endif /* CONFIG_SMP */ 964 965 return 0; 966} 967 968arch_initcall(signal_setup);