interrupt.h (20386B)
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2#ifndef _ASM_POWERPC_INTERRUPT_H 3#define _ASM_POWERPC_INTERRUPT_H 4 5/* BookE/4xx */ 6#define INTERRUPT_CRITICAL_INPUT 0x100 7 8/* BookE */ 9#define INTERRUPT_DEBUG 0xd00 10#ifdef CONFIG_BOOKE 11#define INTERRUPT_PERFMON 0x260 12#define INTERRUPT_DOORBELL 0x280 13#endif 14 15/* BookS/4xx/8xx */ 16#define INTERRUPT_MACHINE_CHECK 0x200 17 18/* BookS/8xx */ 19#define INTERRUPT_SYSTEM_RESET 0x100 20 21/* BookS */ 22#define INTERRUPT_DATA_SEGMENT 0x380 23#define INTERRUPT_INST_SEGMENT 0x480 24#define INTERRUPT_TRACE 0xd00 25#define INTERRUPT_H_DATA_STORAGE 0xe00 26#define INTERRUPT_HMI 0xe60 27#define INTERRUPT_H_FAC_UNAVAIL 0xf80 28#ifdef CONFIG_PPC_BOOK3S 29#define INTERRUPT_DOORBELL 0xa00 30#define INTERRUPT_PERFMON 0xf00 31#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32#endif 33 34/* BookE/BookS/4xx/8xx */ 35#define INTERRUPT_DATA_STORAGE 0x300 36#define INTERRUPT_INST_STORAGE 0x400 37#define INTERRUPT_EXTERNAL 0x500 38#define INTERRUPT_ALIGNMENT 0x600 39#define INTERRUPT_PROGRAM 0x700 40#define INTERRUPT_SYSCALL 0xc00 41#define INTERRUPT_TRACE 0xd00 42 43/* BookE/BookS/44x */ 44#define INTERRUPT_FP_UNAVAIL 0x800 45 46/* BookE/BookS/44x/8xx */ 47#define INTERRUPT_DECREMENTER 0x900 48 49#ifndef INTERRUPT_PERFMON 50#define INTERRUPT_PERFMON 0x0 51#endif 52 53/* 8xx */ 54#define INTERRUPT_SOFT_EMU_8xx 0x1000 55#define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62/* 603 */ 63#define INTERRUPT_INST_TLB_MISS_603 0x1000 64#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67#ifndef __ASSEMBLY__ 68 69#include <linux/context_tracking.h> 70#include <linux/hardirq.h> 71#include <asm/cputime.h> 72#include <asm/ftrace.h> 73#include <asm/kprobes.h> 74#include <asm/runlatch.h> 75 76#ifdef CONFIG_PPC_BOOK3S_64 77extern char __end_soft_masked[]; 78bool search_kernel_soft_mask_table(unsigned long addr); 79unsigned long search_kernel_restart_table(unsigned long addr); 80 81DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 82 83static inline bool is_implicit_soft_masked(struct pt_regs *regs) 84{ 85 if (regs->msr & MSR_PR) 86 return false; 87 88 if (regs->nip >= (unsigned long)__end_soft_masked) 89 return false; 90 91 return search_kernel_soft_mask_table(regs->nip); 92} 93 94static inline void srr_regs_clobbered(void) 95{ 96 local_paca->srr_valid = 0; 97 local_paca->hsrr_valid = 0; 98} 99#else 100static inline unsigned long search_kernel_restart_table(unsigned long addr) 101{ 102 return 0; 103} 104 105static inline bool is_implicit_soft_masked(struct pt_regs *regs) 106{ 107 return false; 108} 109 110static inline void srr_regs_clobbered(void) 111{ 112} 113#endif 114 115static inline void nap_adjust_return(struct pt_regs *regs) 116{ 117#ifdef CONFIG_PPC_970_NAP 118 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 119 /* Can avoid a test-and-clear because NMIs do not call this */ 120 clear_thread_local_flags(_TLF_NAPPING); 121 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 122 } 123#endif 124} 125 126static inline void booke_restore_dbcr0(void) 127{ 128#ifdef CONFIG_PPC_ADV_DEBUG_REGS 129 unsigned long dbcr0 = current->thread.debug.dbcr0; 130 131 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 132 mtspr(SPRN_DBSR, -1); 133 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 134 } 135#endif 136} 137 138static inline void interrupt_enter_prepare(struct pt_regs *regs) 139{ 140#ifdef CONFIG_PPC32 141 if (!arch_irq_disabled_regs(regs)) 142 trace_hardirqs_off(); 143 144 if (user_mode(regs)) 145 kuap_lock(); 146 else 147 kuap_save_and_lock(regs); 148 149 if (user_mode(regs)) 150 account_cpu_user_entry(); 151#endif 152 153#ifdef CONFIG_PPC64 154 bool trace_enable = false; 155 156 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) { 157 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 158 trace_enable = true; 159 } else { 160 irq_soft_mask_set(IRQS_ALL_DISABLED); 161 } 162 163 /* 164 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. 165 * Asynchronous interrupts get here with HARD_DIS set (see below), so 166 * this enables MSR[EE] for synchronous interrupts. IRQs remain 167 * soft-masked. The interrupt handler may later call 168 * interrupt_cond_local_irq_enable() to achieve a regular process 169 * context. 170 */ 171 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { 172 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 173 BUG_ON(!(regs->msr & MSR_EE)); 174 __hard_irq_enable(); 175 } else { 176 __hard_RI_enable(); 177 } 178 179 /* Do this when RI=1 because it can cause SLB faults */ 180 if (trace_enable) 181 trace_hardirqs_off(); 182 183 if (user_mode(regs)) { 184 kuap_lock(); 185 CT_WARN_ON(ct_state() != CONTEXT_USER); 186 user_exit_irqoff(); 187 188 account_cpu_user_entry(); 189 account_stolen_time(); 190 } else { 191 kuap_save_and_lock(regs); 192 /* 193 * CT_WARN_ON comes here via program_check_exception, 194 * so avoid recursion. 195 */ 196 if (TRAP(regs) != INTERRUPT_PROGRAM) { 197 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 198 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 199 BUG_ON(is_implicit_soft_masked(regs)); 200 } 201 202 /* Move this under a debugging check */ 203 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && 204 arch_irq_disabled_regs(regs)) 205 BUG_ON(search_kernel_restart_table(regs->nip)); 206 } 207 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 208 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); 209#endif 210 211 booke_restore_dbcr0(); 212} 213 214/* 215 * Care should be taken to note that interrupt_exit_prepare and 216 * interrupt_async_exit_prepare do not necessarily return immediately to 217 * regs context (e.g., if regs is usermode, we don't necessarily return to 218 * user mode). Other interrupts might be taken between here and return, 219 * context switch / preemption may occur in the exit path after this, or a 220 * signal may be delivered, etc. 221 * 222 * The real interrupt exit code is platform specific, e.g., 223 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 224 * 225 * However interrupt_nmi_exit_prepare does return directly to regs, because 226 * NMIs do not do "exit work" or replay soft-masked interrupts. 227 */ 228static inline void interrupt_exit_prepare(struct pt_regs *regs) 229{ 230} 231 232static inline void interrupt_async_enter_prepare(struct pt_regs *regs) 233{ 234#ifdef CONFIG_PPC64 235 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ 236 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 237#endif 238 interrupt_enter_prepare(regs); 239#ifdef CONFIG_PPC_BOOK3S_64 240 /* 241 * RI=1 is set by interrupt_enter_prepare, so this thread flags access 242 * has to come afterward (it can cause SLB faults). 243 */ 244 if (cpu_has_feature(CPU_FTR_CTRL) && 245 !test_thread_local_flags(_TLF_RUNLATCH)) 246 __ppc64_runlatch_on(); 247#endif 248 irq_enter(); 249} 250 251static inline void interrupt_async_exit_prepare(struct pt_regs *regs) 252{ 253 /* 254 * Adjust at exit so the main handler sees the true NIA. This must 255 * come before irq_exit() because irq_exit can enable interrupts, and 256 * if another interrupt is taken before nap_adjust_return has run 257 * here, then that interrupt would return directly to idle nap return. 258 */ 259 nap_adjust_return(regs); 260 261 irq_exit(); 262 interrupt_exit_prepare(regs); 263} 264 265struct interrupt_nmi_state { 266#ifdef CONFIG_PPC64 267 u8 irq_soft_mask; 268 u8 irq_happened; 269 u8 ftrace_enabled; 270 u64 softe; 271#endif 272}; 273 274static inline bool nmi_disables_ftrace(struct pt_regs *regs) 275{ 276 /* Allow DEC and PMI to be traced when they are soft-NMI */ 277 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 278 if (TRAP(regs) == INTERRUPT_DECREMENTER) 279 return false; 280 if (TRAP(regs) == INTERRUPT_PERFMON) 281 return false; 282 } 283 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { 284 if (TRAP(regs) == INTERRUPT_PERFMON) 285 return false; 286 } 287 288 return true; 289} 290 291static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 292{ 293#ifdef CONFIG_PPC64 294 state->irq_soft_mask = local_paca->irq_soft_mask; 295 state->irq_happened = local_paca->irq_happened; 296 state->softe = regs->softe; 297 298 /* 299 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 300 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 301 * because that goes through irq tracing which we don't want in NMI. 302 */ 303 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 304 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 305 306 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 307 /* 308 * Adjust regs->softe to be soft-masked if it had not been 309 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 310 * not yet set disabled), or if it was in an implicit soft 311 * masked state. This makes arch_irq_disabled_regs(regs) 312 * behave as expected. 313 */ 314 regs->softe = IRQS_ALL_DISABLED; 315 } 316 317 __hard_RI_enable(); 318 319 /* Don't do any per-CPU operations until interrupt state is fixed */ 320 321 if (nmi_disables_ftrace(regs)) { 322 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 323 this_cpu_set_ftrace_enabled(0); 324 } 325#endif 326 327 /* If data relocations are enabled, it's safe to use nmi_enter() */ 328 if (mfmsr() & MSR_DR) { 329 nmi_enter(); 330 return; 331 } 332 333 /* 334 * But do not use nmi_enter() for pseries hash guest taking a real-mode 335 * NMI because not everything it touches is within the RMA limit. 336 */ 337 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 338 firmware_has_feature(FW_FEATURE_LPAR) && 339 !radix_enabled()) 340 return; 341 342 /* 343 * Likewise, don't use it if we have some form of instrumentation (like 344 * KASAN shadow) that is not safe to access in real mode (even on radix) 345 */ 346 if (IS_ENABLED(CONFIG_KASAN)) 347 return; 348 349 /* Otherwise, it should be safe to call it */ 350 nmi_enter(); 351} 352 353static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 354{ 355 if (mfmsr() & MSR_DR) { 356 // nmi_exit if relocations are on 357 nmi_exit(); 358 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 359 firmware_has_feature(FW_FEATURE_LPAR) && 360 !radix_enabled()) { 361 // no nmi_exit for a pseries hash guest taking a real mode exception 362 } else if (IS_ENABLED(CONFIG_KASAN)) { 363 // no nmi_exit for KASAN in real mode 364 } else { 365 nmi_exit(); 366 } 367 368 /* 369 * nmi does not call nap_adjust_return because nmi should not create 370 * new work to do (must use irq_work for that). 371 */ 372 373#ifdef CONFIG_PPC64 374#ifdef CONFIG_PPC_BOOK3S 375 if (arch_irq_disabled_regs(regs)) { 376 unsigned long rst = search_kernel_restart_table(regs->nip); 377 if (rst) 378 regs_set_return_ip(regs, rst); 379 } 380#endif 381 382 if (nmi_disables_ftrace(regs)) 383 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 384 385 /* Check we didn't change the pending interrupt mask. */ 386 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 387 regs->softe = state->softe; 388 local_paca->irq_happened = state->irq_happened; 389 local_paca->irq_soft_mask = state->irq_soft_mask; 390#endif 391} 392 393/* 394 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 395 * function definition. The reason for this is the noinstr section is placed 396 * after the main text section, i.e., very far away from the interrupt entry 397 * asm. That creates problems with fitting linker stubs when building large 398 * kernels. 399 */ 400#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 401 402/** 403 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 404 * @func: Function name of the entry point 405 * @returns: Returns a value back to asm caller 406 */ 407#define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 408 __visible long func(struct pt_regs *regs) 409 410/** 411 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 412 * @func: Function name of the entry point 413 * @returns: Returns a value back to asm caller 414 * 415 * @func is called from ASM entry code. 416 * 417 * This is a plain function which does no tracing, reconciling, etc. 418 * The macro is written so it acts as function definition. Append the 419 * body with a pair of curly brackets. 420 * 421 * raw interrupt handlers must not enable or disable interrupts, or 422 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 423 * not be advisable either, although may be possible in a pinch, the 424 * trace will look odd at least. 425 * 426 * A raw handler may call one of the other interrupt handler functions 427 * to be converted into that interrupt context without these restrictions. 428 * 429 * On PPC64, _RAW handlers may return with fast_interrupt_return. 430 * 431 * Specific handlers may have additional restrictions. 432 */ 433#define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 434static __always_inline __no_sanitize_address __no_kcsan long \ 435____##func(struct pt_regs *regs); \ 436 \ 437interrupt_handler long func(struct pt_regs *regs) \ 438{ \ 439 long ret; \ 440 \ 441 __hard_RI_enable(); \ 442 \ 443 ret = ____##func (regs); \ 444 \ 445 return ret; \ 446} \ 447NOKPROBE_SYMBOL(func); \ 448 \ 449static __always_inline __no_sanitize_address __no_kcsan long \ 450____##func(struct pt_regs *regs) 451 452/** 453 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 454 * @func: Function name of the entry point 455 */ 456#define DECLARE_INTERRUPT_HANDLER(func) \ 457 __visible void func(struct pt_regs *regs) 458 459/** 460 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 461 * @func: Function name of the entry point 462 * 463 * @func is called from ASM entry code. 464 * 465 * The macro is written so it acts as function definition. Append the 466 * body with a pair of curly brackets. 467 */ 468#define DEFINE_INTERRUPT_HANDLER(func) \ 469static __always_inline void ____##func(struct pt_regs *regs); \ 470 \ 471interrupt_handler void func(struct pt_regs *regs) \ 472{ \ 473 interrupt_enter_prepare(regs); \ 474 \ 475 ____##func (regs); \ 476 \ 477 interrupt_exit_prepare(regs); \ 478} \ 479NOKPROBE_SYMBOL(func); \ 480 \ 481static __always_inline void ____##func(struct pt_regs *regs) 482 483/** 484 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 485 * @func: Function name of the entry point 486 * @returns: Returns a value back to asm caller 487 */ 488#define DECLARE_INTERRUPT_HANDLER_RET(func) \ 489 __visible long func(struct pt_regs *regs) 490 491/** 492 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 493 * @func: Function name of the entry point 494 * @returns: Returns a value back to asm caller 495 * 496 * @func is called from ASM entry code. 497 * 498 * The macro is written so it acts as function definition. Append the 499 * body with a pair of curly brackets. 500 */ 501#define DEFINE_INTERRUPT_HANDLER_RET(func) \ 502static __always_inline long ____##func(struct pt_regs *regs); \ 503 \ 504interrupt_handler long func(struct pt_regs *regs) \ 505{ \ 506 long ret; \ 507 \ 508 interrupt_enter_prepare(regs); \ 509 \ 510 ret = ____##func (regs); \ 511 \ 512 interrupt_exit_prepare(regs); \ 513 \ 514 return ret; \ 515} \ 516NOKPROBE_SYMBOL(func); \ 517 \ 518static __always_inline long ____##func(struct pt_regs *regs) 519 520/** 521 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 522 * @func: Function name of the entry point 523 */ 524#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 525 __visible void func(struct pt_regs *regs) 526 527/** 528 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 529 * @func: Function name of the entry point 530 * 531 * @func is called from ASM entry code. 532 * 533 * The macro is written so it acts as function definition. Append the 534 * body with a pair of curly brackets. 535 */ 536#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 537static __always_inline void ____##func(struct pt_regs *regs); \ 538 \ 539interrupt_handler void func(struct pt_regs *regs) \ 540{ \ 541 interrupt_async_enter_prepare(regs); \ 542 \ 543 ____##func (regs); \ 544 \ 545 interrupt_async_exit_prepare(regs); \ 546} \ 547NOKPROBE_SYMBOL(func); \ 548 \ 549static __always_inline void ____##func(struct pt_regs *regs) 550 551/** 552 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 553 * @func: Function name of the entry point 554 * @returns: Returns a value back to asm caller 555 */ 556#define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 557 __visible long func(struct pt_regs *regs) 558 559/** 560 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 561 * @func: Function name of the entry point 562 * @returns: Returns a value back to asm caller 563 * 564 * @func is called from ASM entry code. 565 * 566 * The macro is written so it acts as function definition. Append the 567 * body with a pair of curly brackets. 568 */ 569#define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 570static __always_inline __no_sanitize_address __no_kcsan long \ 571____##func(struct pt_regs *regs); \ 572 \ 573interrupt_handler long func(struct pt_regs *regs) \ 574{ \ 575 struct interrupt_nmi_state state; \ 576 long ret; \ 577 \ 578 interrupt_nmi_enter_prepare(regs, &state); \ 579 \ 580 ret = ____##func (regs); \ 581 \ 582 interrupt_nmi_exit_prepare(regs, &state); \ 583 \ 584 return ret; \ 585} \ 586NOKPROBE_SYMBOL(func); \ 587 \ 588static __always_inline __no_sanitize_address __no_kcsan long \ 589____##func(struct pt_regs *regs) 590 591 592/* Interrupt handlers */ 593/* kernel/traps.c */ 594DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 595#ifdef CONFIG_PPC_BOOK3S_64 596DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 597#endif 598DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 599DECLARE_INTERRUPT_HANDLER(SMIException); 600DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 601DECLARE_INTERRUPT_HANDLER(unknown_exception); 602DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 603DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 604DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 605DECLARE_INTERRUPT_HANDLER(RunModeException); 606DECLARE_INTERRUPT_HANDLER(single_step_exception); 607DECLARE_INTERRUPT_HANDLER(program_check_exception); 608DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 609DECLARE_INTERRUPT_HANDLER(alignment_exception); 610DECLARE_INTERRUPT_HANDLER(StackOverflow); 611DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 612DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 613DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 614DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 615DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 616DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 617DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 618DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 619DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 620DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 621DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 622DECLARE_INTERRUPT_HANDLER(DebugException); 623DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 624DECLARE_INTERRUPT_HANDLER(CacheLockingException); 625DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 626DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 627DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 628DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 629 630/* slb.c */ 631DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 632DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); 633 634/* hash_utils.c */ 635DECLARE_INTERRUPT_HANDLER(do_hash_fault); 636 637/* fault.c */ 638DECLARE_INTERRUPT_HANDLER(do_page_fault); 639DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 640 641/* process.c */ 642DECLARE_INTERRUPT_HANDLER(do_break); 643 644/* time.c */ 645DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 646 647/* mce.c */ 648DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 649DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 650 651DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 652 653/* irq.c */ 654DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 655 656void __noreturn unrecoverable_exception(struct pt_regs *regs); 657 658void replay_system_reset(void); 659void replay_soft_interrupts(void); 660 661static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 662{ 663 if (!arch_irq_disabled_regs(regs)) 664 local_irq_enable(); 665} 666 667long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, 668 unsigned long r0, struct pt_regs *regs); 669notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); 670notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); 671notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); 672#ifdef CONFIG_PPC64 673unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs); 674unsigned long interrupt_exit_user_restart(struct pt_regs *regs); 675unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); 676#endif 677 678#endif /* __ASSEMBLY__ */ 679 680#endif /* _ASM_POWERPC_INTERRUPT_H */