signal.c (27808B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 5 * 6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 8 * 2000-2002 x86-64 support by Andi Kleen 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/sched.h> 14#include <linux/sched/task_stack.h> 15#include <linux/mm.h> 16#include <linux/smp.h> 17#include <linux/kernel.h> 18#include <linux/kstrtox.h> 19#include <linux/errno.h> 20#include <linux/wait.h> 21#include <linux/unistd.h> 22#include <linux/stddef.h> 23#include <linux/personality.h> 24#include <linux/uaccess.h> 25#include <linux/user-return-notifier.h> 26#include <linux/uprobes.h> 27#include <linux/context_tracking.h> 28#include <linux/entry-common.h> 29#include <linux/syscalls.h> 30 31#include <asm/processor.h> 32#include <asm/ucontext.h> 33#include <asm/fpu/signal.h> 34#include <asm/fpu/xstate.h> 35#include <asm/vdso.h> 36#include <asm/mce.h> 37#include <asm/sighandling.h> 38#include <asm/vm86.h> 39 40#ifdef CONFIG_X86_64 41#include <linux/compat.h> 42#include <asm/proto.h> 43#include <asm/ia32_unistd.h> 44#include <asm/fpu/xstate.h> 45#endif /* CONFIG_X86_64 */ 46 47#include <asm/syscall.h> 48#include <asm/sigframe.h> 49#include <asm/signal.h> 50 51#ifdef CONFIG_X86_64 52/* 53 * If regs->ss will cause an IRET fault, change it. Otherwise leave it 54 * alone. Using this generally makes no sense unless 55 * user_64bit_mode(regs) would return true. 56 */ 57static void force_valid_ss(struct pt_regs *regs) 58{ 59 u32 ar; 60 asm volatile ("lar %[old_ss], %[ar]\n\t" 61 "jz 1f\n\t" /* If invalid: */ 62 "xorl %[ar], %[ar]\n\t" /* set ar = 0 */ 63 "1:" 64 : [ar] "=r" (ar) 65 : [old_ss] "rm" ((u16)regs->ss)); 66 67 /* 68 * For a valid 64-bit user context, we need DPL 3, type 69 * read-write data or read-write exp-down data, and S and P 70 * set. We can't use VERW because VERW doesn't check the 71 * P bit. 72 */ 73 ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK; 74 if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) && 75 ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN)) 76 regs->ss = __USER_DS; 77} 78# define CONTEXT_COPY_SIZE offsetof(struct sigcontext, reserved1) 79#else 80# define CONTEXT_COPY_SIZE sizeof(struct sigcontext) 81#endif 82 83static bool restore_sigcontext(struct pt_regs *regs, 84 struct sigcontext __user *usc, 85 unsigned long uc_flags) 86{ 87 struct sigcontext sc; 88 89 /* Always make any pending restarted system calls return -EINTR */ 90 current->restart_block.fn = do_no_restart_syscall; 91 92 if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE)) 93 return false; 94 95#ifdef CONFIG_X86_32 96 loadsegment(gs, sc.gs); 97 regs->fs = sc.fs; 98 regs->es = sc.es; 99 regs->ds = sc.ds; 100#endif /* CONFIG_X86_32 */ 101 102 regs->bx = sc.bx; 103 regs->cx = sc.cx; 104 regs->dx = sc.dx; 105 regs->si = sc.si; 106 regs->di = sc.di; 107 regs->bp = sc.bp; 108 regs->ax = sc.ax; 109 regs->sp = sc.sp; 110 regs->ip = sc.ip; 111 112#ifdef CONFIG_X86_64 113 regs->r8 = sc.r8; 114 regs->r9 = sc.r9; 115 regs->r10 = sc.r10; 116 regs->r11 = sc.r11; 117 regs->r12 = sc.r12; 118 regs->r13 = sc.r13; 119 regs->r14 = sc.r14; 120 regs->r15 = sc.r15; 121#endif /* CONFIG_X86_64 */ 122 123 /* Get CS/SS and force CPL3 */ 124 regs->cs = sc.cs | 0x03; 125 regs->ss = sc.ss | 0x03; 126 127 regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); 128 /* disable syscall checks */ 129 regs->orig_ax = -1; 130 131#ifdef CONFIG_X86_64 132 /* 133 * Fix up SS if needed for the benefit of old DOSEMU and 134 * CRIU. 135 */ 136 if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) 137 force_valid_ss(regs); 138#endif 139 140 return fpu__restore_sig((void __user *)sc.fpstate, 141 IS_ENABLED(CONFIG_X86_32)); 142} 143 144static __always_inline int 145__unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 146 struct pt_regs *regs, unsigned long mask) 147{ 148#ifdef CONFIG_X86_32 149 unsigned int gs; 150 savesegment(gs, gs); 151 152 unsafe_put_user(gs, (unsigned int __user *)&sc->gs, Efault); 153 unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault); 154 unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault); 155 unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault); 156#endif /* CONFIG_X86_32 */ 157 158 unsafe_put_user(regs->di, &sc->di, Efault); 159 unsafe_put_user(regs->si, &sc->si, Efault); 160 unsafe_put_user(regs->bp, &sc->bp, Efault); 161 unsafe_put_user(regs->sp, &sc->sp, Efault); 162 unsafe_put_user(regs->bx, &sc->bx, Efault); 163 unsafe_put_user(regs->dx, &sc->dx, Efault); 164 unsafe_put_user(regs->cx, &sc->cx, Efault); 165 unsafe_put_user(regs->ax, &sc->ax, Efault); 166#ifdef CONFIG_X86_64 167 unsafe_put_user(regs->r8, &sc->r8, Efault); 168 unsafe_put_user(regs->r9, &sc->r9, Efault); 169 unsafe_put_user(regs->r10, &sc->r10, Efault); 170 unsafe_put_user(regs->r11, &sc->r11, Efault); 171 unsafe_put_user(regs->r12, &sc->r12, Efault); 172 unsafe_put_user(regs->r13, &sc->r13, Efault); 173 unsafe_put_user(regs->r14, &sc->r14, Efault); 174 unsafe_put_user(regs->r15, &sc->r15, Efault); 175#endif /* CONFIG_X86_64 */ 176 177 unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); 178 unsafe_put_user(current->thread.error_code, &sc->err, Efault); 179 unsafe_put_user(regs->ip, &sc->ip, Efault); 180#ifdef CONFIG_X86_32 181 unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault); 182 unsafe_put_user(regs->flags, &sc->flags, Efault); 183 unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault); 184 unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault); 185#else /* !CONFIG_X86_32 */ 186 unsafe_put_user(regs->flags, &sc->flags, Efault); 187 unsafe_put_user(regs->cs, &sc->cs, Efault); 188 unsafe_put_user(0, &sc->gs, Efault); 189 unsafe_put_user(0, &sc->fs, Efault); 190 unsafe_put_user(regs->ss, &sc->ss, Efault); 191#endif /* CONFIG_X86_32 */ 192 193 unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault); 194 195 /* non-iBCS2 extensions.. */ 196 unsafe_put_user(mask, &sc->oldmask, Efault); 197 unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); 198 return 0; 199Efault: 200 return -EFAULT; 201} 202 203#define unsafe_put_sigcontext(sc, fp, regs, set, label) \ 204do { \ 205 if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \ 206 goto label; \ 207} while(0); 208 209#define unsafe_put_sigmask(set, frame, label) \ 210 unsafe_put_user(*(__u64 *)(set), \ 211 (__u64 __user *)&(frame)->uc.uc_sigmask, \ 212 label) 213 214/* 215 * Set up a signal frame. 216 */ 217 218/* x86 ABI requires 16-byte alignment */ 219#define FRAME_ALIGNMENT 16UL 220 221#define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1) 222 223/* 224 * Determine which stack to use.. 225 */ 226static unsigned long align_sigframe(unsigned long sp) 227{ 228#ifdef CONFIG_X86_32 229 /* 230 * Align the stack pointer according to the i386 ABI, 231 * i.e. so that on function entry ((sp + 4) & 15) == 0. 232 */ 233 sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4; 234#else /* !CONFIG_X86_32 */ 235 sp = round_down(sp, FRAME_ALIGNMENT) - 8; 236#endif 237 return sp; 238} 239 240static void __user * 241get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 242 void __user **fpstate) 243{ 244 /* Default to using normal stack */ 245 bool nested_altstack = on_sig_stack(regs->sp); 246 bool entering_altstack = false; 247 unsigned long math_size = 0; 248 unsigned long sp = regs->sp; 249 unsigned long buf_fx = 0; 250 251 /* redzone */ 252 if (IS_ENABLED(CONFIG_X86_64)) 253 sp -= 128; 254 255 /* This is the X/Open sanctioned signal stack switching. */ 256 if (ka->sa.sa_flags & SA_ONSTACK) { 257 /* 258 * This checks nested_altstack via sas_ss_flags(). Sensible 259 * programs use SS_AUTODISARM, which disables that check, and 260 * programs that don't use SS_AUTODISARM get compatible. 261 */ 262 if (sas_ss_flags(sp) == 0) { 263 sp = current->sas_ss_sp + current->sas_ss_size; 264 entering_altstack = true; 265 } 266 } else if (IS_ENABLED(CONFIG_X86_32) && 267 !nested_altstack && 268 regs->ss != __USER_DS && 269 !(ka->sa.sa_flags & SA_RESTORER) && 270 ka->sa.sa_restorer) { 271 /* This is the legacy signal stack switching. */ 272 sp = (unsigned long) ka->sa.sa_restorer; 273 entering_altstack = true; 274 } 275 276 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), 277 &buf_fx, &math_size); 278 *fpstate = (void __user *)sp; 279 280 sp = align_sigframe(sp - frame_size); 281 282 /* 283 * If we are on the alternate signal stack and would overflow it, don't. 284 * Return an always-bogus address instead so we will die with SIGSEGV. 285 */ 286 if (unlikely((nested_altstack || entering_altstack) && 287 !__on_sig_stack(sp))) { 288 289 if (show_unhandled_signals && printk_ratelimit()) 290 pr_info("%s[%d] overflowed sigaltstack\n", 291 current->comm, task_pid_nr(current)); 292 293 return (void __user *)-1L; 294 } 295 296 /* save i387 and extended state */ 297 if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size)) 298 return (void __user *)-1L; 299 300 return (void __user *)sp; 301} 302 303#ifdef CONFIG_X86_32 304static const struct { 305 u16 poplmovl; 306 u32 val; 307 u16 int80; 308} __attribute__((packed)) retcode = { 309 0xb858, /* popl %eax; movl $..., %eax */ 310 __NR_sigreturn, 311 0x80cd, /* int $0x80 */ 312}; 313 314static const struct { 315 u8 movl; 316 u32 val; 317 u16 int80; 318 u8 pad; 319} __attribute__((packed)) rt_retcode = { 320 0xb8, /* movl $..., %eax */ 321 __NR_rt_sigreturn, 322 0x80cd, /* int $0x80 */ 323 0 324}; 325 326static int 327__setup_frame(int sig, struct ksignal *ksig, sigset_t *set, 328 struct pt_regs *regs) 329{ 330 struct sigframe __user *frame; 331 void __user *restorer; 332 void __user *fp = NULL; 333 334 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); 335 336 if (!user_access_begin(frame, sizeof(*frame))) 337 return -EFAULT; 338 339 unsafe_put_user(sig, &frame->sig, Efault); 340 unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault); 341 unsafe_put_user(set->sig[1], &frame->extramask[0], Efault); 342 if (current->mm->context.vdso) 343 restorer = current->mm->context.vdso + 344 vdso_image_32.sym___kernel_sigreturn; 345 else 346 restorer = &frame->retcode; 347 if (ksig->ka.sa.sa_flags & SA_RESTORER) 348 restorer = ksig->ka.sa.sa_restorer; 349 350 /* Set up to return from userspace. */ 351 unsafe_put_user(restorer, &frame->pretcode, Efault); 352 353 /* 354 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 355 * 356 * WE DO NOT USE IT ANY MORE! It's only left here for historical 357 * reasons and because gdb uses it as a signature to notice 358 * signal handler stack frames. 359 */ 360 unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault); 361 user_access_end(); 362 363 /* Set up registers for signal handler */ 364 regs->sp = (unsigned long)frame; 365 regs->ip = (unsigned long)ksig->ka.sa.sa_handler; 366 regs->ax = (unsigned long)sig; 367 regs->dx = 0; 368 regs->cx = 0; 369 370 regs->ds = __USER_DS; 371 regs->es = __USER_DS; 372 regs->ss = __USER_DS; 373 regs->cs = __USER_CS; 374 375 return 0; 376 377Efault: 378 user_access_end(); 379 return -EFAULT; 380} 381 382static int __setup_rt_frame(int sig, struct ksignal *ksig, 383 sigset_t *set, struct pt_regs *regs) 384{ 385 struct rt_sigframe __user *frame; 386 void __user *restorer; 387 void __user *fp = NULL; 388 389 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); 390 391 if (!user_access_begin(frame, sizeof(*frame))) 392 return -EFAULT; 393 394 unsafe_put_user(sig, &frame->sig, Efault); 395 unsafe_put_user(&frame->info, &frame->pinfo, Efault); 396 unsafe_put_user(&frame->uc, &frame->puc, Efault); 397 398 /* Create the ucontext. */ 399 if (static_cpu_has(X86_FEATURE_XSAVE)) 400 unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault); 401 else 402 unsafe_put_user(0, &frame->uc.uc_flags, Efault); 403 unsafe_put_user(0, &frame->uc.uc_link, Efault); 404 unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); 405 406 /* Set up to return from userspace. */ 407 restorer = current->mm->context.vdso + 408 vdso_image_32.sym___kernel_rt_sigreturn; 409 if (ksig->ka.sa.sa_flags & SA_RESTORER) 410 restorer = ksig->ka.sa.sa_restorer; 411 unsafe_put_user(restorer, &frame->pretcode, Efault); 412 413 /* 414 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 415 * 416 * WE DO NOT USE IT ANY MORE! It's only left here for historical 417 * reasons and because gdb uses it as a signature to notice 418 * signal handler stack frames. 419 */ 420 unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault); 421 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); 422 unsafe_put_sigmask(set, frame, Efault); 423 user_access_end(); 424 425 if (copy_siginfo_to_user(&frame->info, &ksig->info)) 426 return -EFAULT; 427 428 /* Set up registers for signal handler */ 429 regs->sp = (unsigned long)frame; 430 regs->ip = (unsigned long)ksig->ka.sa.sa_handler; 431 regs->ax = (unsigned long)sig; 432 regs->dx = (unsigned long)&frame->info; 433 regs->cx = (unsigned long)&frame->uc; 434 435 regs->ds = __USER_DS; 436 regs->es = __USER_DS; 437 regs->ss = __USER_DS; 438 regs->cs = __USER_CS; 439 440 return 0; 441Efault: 442 user_access_end(); 443 return -EFAULT; 444} 445#else /* !CONFIG_X86_32 */ 446static unsigned long frame_uc_flags(struct pt_regs *regs) 447{ 448 unsigned long flags; 449 450 if (boot_cpu_has(X86_FEATURE_XSAVE)) 451 flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS; 452 else 453 flags = UC_SIGCONTEXT_SS; 454 455 if (likely(user_64bit_mode(regs))) 456 flags |= UC_STRICT_RESTORE_SS; 457 458 return flags; 459} 460 461static int __setup_rt_frame(int sig, struct ksignal *ksig, 462 sigset_t *set, struct pt_regs *regs) 463{ 464 struct rt_sigframe __user *frame; 465 void __user *fp = NULL; 466 unsigned long uc_flags; 467 468 /* x86-64 should always use SA_RESTORER. */ 469 if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) 470 return -EFAULT; 471 472 frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); 473 uc_flags = frame_uc_flags(regs); 474 475 if (!user_access_begin(frame, sizeof(*frame))) 476 return -EFAULT; 477 478 /* Create the ucontext. */ 479 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); 480 unsafe_put_user(0, &frame->uc.uc_link, Efault); 481 unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); 482 483 /* Set up to return from userspace. If provided, use a stub 484 already in userspace. */ 485 unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault); 486 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); 487 unsafe_put_sigmask(set, frame, Efault); 488 user_access_end(); 489 490 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 491 if (copy_siginfo_to_user(&frame->info, &ksig->info)) 492 return -EFAULT; 493 } 494 495 /* Set up registers for signal handler */ 496 regs->di = sig; 497 /* In case the signal handler was declared without prototypes */ 498 regs->ax = 0; 499 500 /* This also works for non SA_SIGINFO handlers because they expect the 501 next argument after the signal number on the stack. */ 502 regs->si = (unsigned long)&frame->info; 503 regs->dx = (unsigned long)&frame->uc; 504 regs->ip = (unsigned long) ksig->ka.sa.sa_handler; 505 506 regs->sp = (unsigned long)frame; 507 508 /* 509 * Set up the CS and SS registers to run signal handlers in 510 * 64-bit mode, even if the handler happens to be interrupting 511 * 32-bit or 16-bit code. 512 * 513 * SS is subtle. In 64-bit mode, we don't need any particular 514 * SS descriptor, but we do need SS to be valid. It's possible 515 * that the old SS is entirely bogus -- this can happen if the 516 * signal we're trying to deliver is #GP or #SS caused by a bad 517 * SS value. We also have a compatibility issue here: DOSEMU 518 * relies on the contents of the SS register indicating the 519 * SS value at the time of the signal, even though that code in 520 * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU 521 * avoids relying on sigreturn to restore SS; instead it uses 522 * a trampoline.) So we do our best: if the old SS was valid, 523 * we keep it. Otherwise we replace it. 524 */ 525 regs->cs = __USER_CS; 526 527 if (unlikely(regs->ss != __USER_DS)) 528 force_valid_ss(regs); 529 530 return 0; 531 532Efault: 533 user_access_end(); 534 return -EFAULT; 535} 536#endif /* CONFIG_X86_32 */ 537 538#ifdef CONFIG_X86_X32_ABI 539static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to, 540 const struct kernel_siginfo *from) 541{ 542 struct compat_siginfo new; 543 544 copy_siginfo_to_external32(&new, from); 545 if (from->si_signo == SIGCHLD) { 546 new._sifields._sigchld_x32._utime = from->si_utime; 547 new._sifields._sigchld_x32._stime = from->si_stime; 548 } 549 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 550 return -EFAULT; 551 return 0; 552} 553 554int copy_siginfo_to_user32(struct compat_siginfo __user *to, 555 const struct kernel_siginfo *from) 556{ 557 if (in_x32_syscall()) 558 return x32_copy_siginfo_to_user(to, from); 559 return __copy_siginfo_to_user32(to, from); 560} 561#endif /* CONFIG_X86_X32_ABI */ 562 563static int x32_setup_rt_frame(struct ksignal *ksig, 564 compat_sigset_t *set, 565 struct pt_regs *regs) 566{ 567#ifdef CONFIG_X86_X32_ABI 568 struct rt_sigframe_x32 __user *frame; 569 unsigned long uc_flags; 570 void __user *restorer; 571 void __user *fp = NULL; 572 573 if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) 574 return -EFAULT; 575 576 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp); 577 578 uc_flags = frame_uc_flags(regs); 579 580 if (!user_access_begin(frame, sizeof(*frame))) 581 return -EFAULT; 582 583 /* Create the ucontext. */ 584 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); 585 unsafe_put_user(0, &frame->uc.uc_link, Efault); 586 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); 587 unsafe_put_user(0, &frame->uc.uc__pad0, Efault); 588 restorer = ksig->ka.sa.sa_restorer; 589 unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault); 590 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); 591 unsafe_put_sigmask(set, frame, Efault); 592 user_access_end(); 593 594 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 595 if (x32_copy_siginfo_to_user(&frame->info, &ksig->info)) 596 return -EFAULT; 597 } 598 599 /* Set up registers for signal handler */ 600 regs->sp = (unsigned long) frame; 601 regs->ip = (unsigned long) ksig->ka.sa.sa_handler; 602 603 /* We use the x32 calling convention here... */ 604 regs->di = ksig->sig; 605 regs->si = (unsigned long) &frame->info; 606 regs->dx = (unsigned long) &frame->uc; 607 608 loadsegment(ds, __USER_DS); 609 loadsegment(es, __USER_DS); 610 611 regs->cs = __USER_CS; 612 regs->ss = __USER_DS; 613#endif /* CONFIG_X86_X32_ABI */ 614 615 return 0; 616#ifdef CONFIG_X86_X32_ABI 617Efault: 618 user_access_end(); 619 return -EFAULT; 620#endif 621} 622 623/* 624 * Do a signal return; undo the signal stack. 625 */ 626#ifdef CONFIG_X86_32 627SYSCALL_DEFINE0(sigreturn) 628{ 629 struct pt_regs *regs = current_pt_regs(); 630 struct sigframe __user *frame; 631 sigset_t set; 632 633 frame = (struct sigframe __user *)(regs->sp - 8); 634 635 if (!access_ok(frame, sizeof(*frame))) 636 goto badframe; 637 if (__get_user(set.sig[0], &frame->sc.oldmask) || 638 __get_user(set.sig[1], &frame->extramask[0])) 639 goto badframe; 640 641 set_current_blocked(&set); 642 643 /* 644 * x86_32 has no uc_flags bits relevant to restore_sigcontext. 645 * Save a few cycles by skipping the __get_user. 646 */ 647 if (!restore_sigcontext(regs, &frame->sc, 0)) 648 goto badframe; 649 return regs->ax; 650 651badframe: 652 signal_fault(regs, frame, "sigreturn"); 653 654 return 0; 655} 656#endif /* CONFIG_X86_32 */ 657 658SYSCALL_DEFINE0(rt_sigreturn) 659{ 660 struct pt_regs *regs = current_pt_regs(); 661 struct rt_sigframe __user *frame; 662 sigset_t set; 663 unsigned long uc_flags; 664 665 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); 666 if (!access_ok(frame, sizeof(*frame))) 667 goto badframe; 668 if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask)) 669 goto badframe; 670 if (__get_user(uc_flags, &frame->uc.uc_flags)) 671 goto badframe; 672 673 set_current_blocked(&set); 674 675 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) 676 goto badframe; 677 678 if (restore_altstack(&frame->uc.uc_stack)) 679 goto badframe; 680 681 return regs->ax; 682 683badframe: 684 signal_fault(regs, frame, "rt_sigreturn"); 685 return 0; 686} 687 688/* 689 * There are four different struct types for signal frame: sigframe_ia32, 690 * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case 691 * -- the largest size. It means the size for 64-bit apps is a bit more 692 * than needed, but this keeps the code simple. 693 */ 694#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 695# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32) 696#else 697# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe) 698#endif 699 700/* 701 * The FP state frame contains an XSAVE buffer which must be 64-byte aligned. 702 * If a signal frame starts at an unaligned address, extra space is required. 703 * This is the max alignment padding, conservatively. 704 */ 705#define MAX_XSAVE_PADDING 63UL 706 707/* 708 * The frame data is composed of the following areas and laid out as: 709 * 710 * ------------------------- 711 * | alignment padding | 712 * ------------------------- 713 * | (f)xsave frame | 714 * ------------------------- 715 * | fsave header | 716 * ------------------------- 717 * | alignment padding | 718 * ------------------------- 719 * | siginfo + ucontext | 720 * ------------------------- 721 */ 722 723/* max_frame_size tells userspace the worst case signal stack size. */ 724static unsigned long __ro_after_init max_frame_size; 725static unsigned int __ro_after_init fpu_default_state_size; 726 727void __init init_sigframe_size(void) 728{ 729 fpu_default_state_size = fpu__get_fpstate_size(); 730 731 max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; 732 733 max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING; 734 735 /* Userspace expects an aligned size. */ 736 max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); 737 738 pr_info("max sigframe size: %lu\n", max_frame_size); 739} 740 741unsigned long get_sigframe_size(void) 742{ 743 return max_frame_size; 744} 745 746static inline int is_ia32_compat_frame(struct ksignal *ksig) 747{ 748 return IS_ENABLED(CONFIG_IA32_EMULATION) && 749 ksig->ka.sa.sa_flags & SA_IA32_ABI; 750} 751 752static inline int is_ia32_frame(struct ksignal *ksig) 753{ 754 return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); 755} 756 757static inline int is_x32_frame(struct ksignal *ksig) 758{ 759 return IS_ENABLED(CONFIG_X86_X32_ABI) && 760 ksig->ka.sa.sa_flags & SA_X32_ABI; 761} 762 763static int 764setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) 765{ 766 int usig = ksig->sig; 767 sigset_t *set = sigmask_to_save(); 768 compat_sigset_t *cset = (compat_sigset_t *) set; 769 770 /* Perform fixup for the pre-signal frame. */ 771 rseq_signal_deliver(ksig, regs); 772 773 /* Set up the stack frame */ 774 if (is_ia32_frame(ksig)) { 775 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 776 return ia32_setup_rt_frame(usig, ksig, cset, regs); 777 else 778 return ia32_setup_frame(usig, ksig, cset, regs); 779 } else if (is_x32_frame(ksig)) { 780 return x32_setup_rt_frame(ksig, cset, regs); 781 } else { 782 return __setup_rt_frame(ksig->sig, ksig, set, regs); 783 } 784} 785 786static void 787handle_signal(struct ksignal *ksig, struct pt_regs *regs) 788{ 789 bool stepping, failed; 790 struct fpu *fpu = ¤t->thread.fpu; 791 792 if (v8086_mode(regs)) 793 save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); 794 795 /* Are we from a system call? */ 796 if (syscall_get_nr(current, regs) != -1) { 797 /* If so, check system call restarting.. */ 798 switch (syscall_get_error(current, regs)) { 799 case -ERESTART_RESTARTBLOCK: 800 case -ERESTARTNOHAND: 801 regs->ax = -EINTR; 802 break; 803 804 case -ERESTARTSYS: 805 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 806 regs->ax = -EINTR; 807 break; 808 } 809 fallthrough; 810 case -ERESTARTNOINTR: 811 regs->ax = regs->orig_ax; 812 regs->ip -= 2; 813 break; 814 } 815 } 816 817 /* 818 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now 819 * so that register information in the sigcontext is correct and 820 * then notify the tracer before entering the signal handler. 821 */ 822 stepping = test_thread_flag(TIF_SINGLESTEP); 823 if (stepping) 824 user_disable_single_step(current); 825 826 failed = (setup_rt_frame(ksig, regs) < 0); 827 if (!failed) { 828 /* 829 * Clear the direction flag as per the ABI for function entry. 830 * 831 * Clear RF when entering the signal handler, because 832 * it might disable possible debug exception from the 833 * signal handler. 834 * 835 * Clear TF for the case when it wasn't set by debugger to 836 * avoid the recursive send_sigtrap() in SIGTRAP handler. 837 */ 838 regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); 839 /* 840 * Ensure the signal handler starts with the new fpu state. 841 */ 842 fpu__clear_user_states(fpu); 843 } 844 signal_setup_done(failed, ksig, stepping); 845} 846 847static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) 848{ 849#ifdef CONFIG_IA32_EMULATION 850 if (current->restart_block.arch_data & TS_COMPAT) 851 return __NR_ia32_restart_syscall; 852#endif 853#ifdef CONFIG_X86_X32_ABI 854 return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); 855#else 856 return __NR_restart_syscall; 857#endif 858} 859 860/* 861 * Note that 'init' is a special process: it doesn't get signals it doesn't 862 * want to handle. Thus you cannot kill init even with a SIGKILL even by 863 * mistake. 864 */ 865void arch_do_signal_or_restart(struct pt_regs *regs) 866{ 867 struct ksignal ksig; 868 869 if (get_signal(&ksig)) { 870 /* Whee! Actually deliver the signal. */ 871 handle_signal(&ksig, regs); 872 return; 873 } 874 875 /* Did we come from a system call? */ 876 if (syscall_get_nr(current, regs) != -1) { 877 /* Restart the system call - no handlers present */ 878 switch (syscall_get_error(current, regs)) { 879 case -ERESTARTNOHAND: 880 case -ERESTARTSYS: 881 case -ERESTARTNOINTR: 882 regs->ax = regs->orig_ax; 883 regs->ip -= 2; 884 break; 885 886 case -ERESTART_RESTARTBLOCK: 887 regs->ax = get_nr_restart_syscall(regs); 888 regs->ip -= 2; 889 break; 890 } 891 } 892 893 /* 894 * If there's no signal to deliver, we just put the saved sigmask 895 * back. 896 */ 897 restore_saved_sigmask(); 898} 899 900void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 901{ 902 struct task_struct *me = current; 903 904 if (show_unhandled_signals && printk_ratelimit()) { 905 printk("%s" 906 "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 907 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 908 me->comm, me->pid, where, frame, 909 regs->ip, regs->sp, regs->orig_ax); 910 print_vma_addr(KERN_CONT " in ", regs->ip); 911 pr_cont("\n"); 912 } 913 914 force_sig(SIGSEGV); 915} 916 917#ifdef CONFIG_DYNAMIC_SIGFRAME 918#ifdef CONFIG_STRICT_SIGALTSTACK_SIZE 919static bool strict_sigaltstack_size __ro_after_init = true; 920#else 921static bool strict_sigaltstack_size __ro_after_init = false; 922#endif 923 924static int __init strict_sas_size(char *arg) 925{ 926 return kstrtobool(arg, &strict_sigaltstack_size); 927} 928__setup("strict_sas_size", strict_sas_size); 929 930/* 931 * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512 932 * exceeds that size already. As such programs might never use the 933 * sigaltstack they just continued to work. While always checking against 934 * the real size would be correct, this might be considered a regression. 935 * 936 * Therefore avoid the sanity check, unless enforced by kernel 937 * configuration or command line option. 938 * 939 * When dynamic FPU features are supported, the check is also enforced when 940 * the task has permissions to use dynamic features. Tasks which have no 941 * permission are checked against the size of the non-dynamic feature set 942 * if strict checking is enabled. This avoids forcing all tasks on the 943 * system to allocate large sigaltstacks even if they are never going 944 * to use a dynamic feature. As this is serialized via sighand::siglock 945 * any permission request for a dynamic feature either happened already 946 * or will see the newly install sigaltstack size in the permission checks. 947 */ 948bool sigaltstack_size_valid(size_t ss_size) 949{ 950 unsigned long fsize = max_frame_size - fpu_default_state_size; 951 u64 mask; 952 953 lockdep_assert_held(¤t->sighand->siglock); 954 955 if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) 956 return true; 957 958 fsize += current->group_leader->thread.fpu.perm.__user_state_size; 959 if (likely(ss_size > fsize)) 960 return true; 961 962 if (strict_sigaltstack_size) 963 return ss_size > fsize; 964 965 mask = current->group_leader->thread.fpu.perm.__state_perm; 966 if (mask & XFEATURE_MASK_USER_DYNAMIC) 967 return ss_size > fsize; 968 969 return true; 970} 971#endif /* CONFIG_DYNAMIC_SIGFRAME */ 972 973#ifdef CONFIG_X86_X32_ABI 974COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn) 975{ 976 struct pt_regs *regs = current_pt_regs(); 977 struct rt_sigframe_x32 __user *frame; 978 sigset_t set; 979 unsigned long uc_flags; 980 981 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); 982 983 if (!access_ok(frame, sizeof(*frame))) 984 goto badframe; 985 if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) 986 goto badframe; 987 if (__get_user(uc_flags, &frame->uc.uc_flags)) 988 goto badframe; 989 990 set_current_blocked(&set); 991 992 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) 993 goto badframe; 994 995 if (compat_restore_altstack(&frame->uc.uc_stack)) 996 goto badframe; 997 998 return regs->ax; 999 1000badframe: 1001 signal_fault(regs, frame, "x32 rt_sigreturn"); 1002 return 0; 1003} 1004#endif