Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 #ifndef _ASM_POWERPC_INTERRUPT_H
0003 #define _ASM_POWERPC_INTERRUPT_H
0004 
0005 /* BookE/4xx */
0006 #define INTERRUPT_CRITICAL_INPUT  0x100
0007 
0008 /* BookE */
0009 #define INTERRUPT_DEBUG           0xd00
0010 #ifdef CONFIG_BOOKE
0011 #define INTERRUPT_PERFMON         0x260
0012 #define INTERRUPT_DOORBELL        0x280
0013 #endif
0014 
0015 /* BookS/4xx/8xx */
0016 #define INTERRUPT_MACHINE_CHECK   0x200
0017 
0018 /* BookS/8xx */
0019 #define INTERRUPT_SYSTEM_RESET    0x100
0020 
0021 /* BookS */
0022 #define INTERRUPT_DATA_SEGMENT    0x380
0023 #define INTERRUPT_INST_SEGMENT    0x480
0024 #define INTERRUPT_TRACE           0xd00
0025 #define INTERRUPT_H_DATA_STORAGE  0xe00
0026 #define INTERRUPT_HMI           0xe60
0027 #define INTERRUPT_H_FAC_UNAVAIL   0xf80
0028 #ifdef CONFIG_PPC_BOOK3S
0029 #define INTERRUPT_DOORBELL        0xa00
0030 #define INTERRUPT_PERFMON         0xf00
0031 #define INTERRUPT_ALTIVEC_UNAVAIL   0xf20
0032 #endif
0033 
0034 /* BookE/BookS/4xx/8xx */
0035 #define INTERRUPT_DATA_STORAGE    0x300
0036 #define INTERRUPT_INST_STORAGE    0x400
0037 #define INTERRUPT_EXTERNAL      0x500
0038 #define INTERRUPT_ALIGNMENT       0x600
0039 #define INTERRUPT_PROGRAM         0x700
0040 #define INTERRUPT_SYSCALL         0xc00
0041 #define INTERRUPT_TRACE         0xd00
0042 
0043 /* BookE/BookS/44x */
0044 #define INTERRUPT_FP_UNAVAIL      0x800
0045 
0046 /* BookE/BookS/44x/8xx */
0047 #define INTERRUPT_DECREMENTER     0x900
0048 
0049 #ifndef INTERRUPT_PERFMON
0050 #define INTERRUPT_PERFMON         0x0
0051 #endif
0052 
0053 /* 8xx */
0054 #define INTERRUPT_SOFT_EMU_8xx      0x1000
0055 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100
0056 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
0057 #define INTERRUPT_INST_TLB_ERROR_8xx    0x1300
0058 #define INTERRUPT_DATA_TLB_ERROR_8xx    0x1400
0059 #define INTERRUPT_DATA_BREAKPOINT_8xx   0x1c00
0060 #define INTERRUPT_INST_BREAKPOINT_8xx   0x1d00
0061 
0062 /* 603 */
0063 #define INTERRUPT_INST_TLB_MISS_603     0x1000
0064 #define INTERRUPT_DATA_LOAD_TLB_MISS_603    0x1100
0065 #define INTERRUPT_DATA_STORE_TLB_MISS_603   0x1200
0066 
0067 #ifndef __ASSEMBLY__
0068 
0069 #include <linux/context_tracking.h>
0070 #include <linux/hardirq.h>
0071 #include <asm/cputime.h>
0072 #include <asm/firmware.h>
0073 #include <asm/ftrace.h>
0074 #include <asm/kprobes.h>
0075 #include <asm/runlatch.h>
0076 
0077 #ifdef CONFIG_PPC_BOOK3S_64
0078 extern char __end_soft_masked[];
0079 bool search_kernel_soft_mask_table(unsigned long addr);
0080 unsigned long search_kernel_restart_table(unsigned long addr);
0081 
0082 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
0083 
0084 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
0085 {
0086     if (regs->msr & MSR_PR)
0087         return false;
0088 
0089     if (regs->nip >= (unsigned long)__end_soft_masked)
0090         return false;
0091 
0092     return search_kernel_soft_mask_table(regs->nip);
0093 }
0094 
0095 static inline void srr_regs_clobbered(void)
0096 {
0097     local_paca->srr_valid = 0;
0098     local_paca->hsrr_valid = 0;
0099 }
0100 #else
0101 static inline unsigned long search_kernel_restart_table(unsigned long addr)
0102 {
0103     return 0;
0104 }
0105 
0106 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
0107 {
0108     return false;
0109 }
0110 
0111 static inline void srr_regs_clobbered(void)
0112 {
0113 }
0114 #endif
0115 
0116 static inline void nap_adjust_return(struct pt_regs *regs)
0117 {
0118 #ifdef CONFIG_PPC_970_NAP
0119     if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
0120         /* Can avoid a test-and-clear because NMIs do not call this */
0121         clear_thread_local_flags(_TLF_NAPPING);
0122         regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
0123     }
0124 #endif
0125 }
0126 
0127 static inline void booke_restore_dbcr0(void)
0128 {
0129 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
0130     unsigned long dbcr0 = current->thread.debug.dbcr0;
0131 
0132     if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
0133         mtspr(SPRN_DBSR, -1);
0134         mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
0135     }
0136 #endif
0137 }
0138 
0139 static inline void interrupt_enter_prepare(struct pt_regs *regs)
0140 {
0141 #ifdef CONFIG_PPC32
0142     if (!arch_irq_disabled_regs(regs))
0143         trace_hardirqs_off();
0144 
0145     if (user_mode(regs))
0146         kuap_lock();
0147     else
0148         kuap_save_and_lock(regs);
0149 
0150     if (user_mode(regs))
0151         account_cpu_user_entry();
0152 #endif
0153 
0154 #ifdef CONFIG_PPC64
0155     bool trace_enable = false;
0156 
0157     if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
0158         if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
0159             trace_enable = true;
0160     } else {
0161         irq_soft_mask_set(IRQS_ALL_DISABLED);
0162     }
0163 
0164     /*
0165      * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
0166      * Asynchronous interrupts get here with HARD_DIS set (see below), so
0167      * this enables MSR[EE] for synchronous interrupts. IRQs remain
0168      * soft-masked. The interrupt handler may later call
0169      * interrupt_cond_local_irq_enable() to achieve a regular process
0170      * context.
0171      */
0172     if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
0173         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0174             BUG_ON(!(regs->msr & MSR_EE));
0175         __hard_irq_enable();
0176     } else {
0177         __hard_RI_enable();
0178     }
0179 
0180     /* Do this when RI=1 because it can cause SLB faults */
0181     if (trace_enable)
0182         trace_hardirqs_off();
0183 
0184     if (user_mode(regs)) {
0185         kuap_lock();
0186         CT_WARN_ON(ct_state() != CONTEXT_USER);
0187         user_exit_irqoff();
0188 
0189         account_cpu_user_entry();
0190         account_stolen_time();
0191     } else {
0192         kuap_save_and_lock(regs);
0193         /*
0194          * CT_WARN_ON comes here via program_check_exception,
0195          * so avoid recursion.
0196          */
0197         if (TRAP(regs) != INTERRUPT_PROGRAM) {
0198             CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
0199             if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0200                 BUG_ON(is_implicit_soft_masked(regs));
0201         }
0202 
0203         /* Move this under a debugging check */
0204         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
0205                 arch_irq_disabled_regs(regs))
0206             BUG_ON(search_kernel_restart_table(regs->nip));
0207     }
0208     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0209         BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
0210 #endif
0211 
0212     booke_restore_dbcr0();
0213 }
0214 
0215 /*
0216  * Care should be taken to note that interrupt_exit_prepare and
0217  * interrupt_async_exit_prepare do not necessarily return immediately to
0218  * regs context (e.g., if regs is usermode, we don't necessarily return to
0219  * user mode). Other interrupts might be taken between here and return,
0220  * context switch / preemption may occur in the exit path after this, or a
0221  * signal may be delivered, etc.
0222  *
0223  * The real interrupt exit code is platform specific, e.g.,
0224  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
0225  *
0226  * However interrupt_nmi_exit_prepare does return directly to regs, because
0227  * NMIs do not do "exit work" or replay soft-masked interrupts.
0228  */
0229 static inline void interrupt_exit_prepare(struct pt_regs *regs)
0230 {
0231 }
0232 
0233 static inline void interrupt_async_enter_prepare(struct pt_regs *regs)
0234 {
0235 #ifdef CONFIG_PPC64
0236     /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
0237     local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0238 #endif
0239     interrupt_enter_prepare(regs);
0240 #ifdef CONFIG_PPC_BOOK3S_64
0241     /*
0242      * RI=1 is set by interrupt_enter_prepare, so this thread flags access
0243      * has to come afterward (it can cause SLB faults).
0244      */
0245     if (cpu_has_feature(CPU_FTR_CTRL) &&
0246         !test_thread_local_flags(_TLF_RUNLATCH))
0247         __ppc64_runlatch_on();
0248 #endif
0249     irq_enter();
0250 }
0251 
0252 static inline void interrupt_async_exit_prepare(struct pt_regs *regs)
0253 {
0254     /*
0255      * Adjust at exit so the main handler sees the true NIA. This must
0256      * come before irq_exit() because irq_exit can enable interrupts, and
0257      * if another interrupt is taken before nap_adjust_return has run
0258      * here, then that interrupt would return directly to idle nap return.
0259      */
0260     nap_adjust_return(regs);
0261 
0262     irq_exit();
0263     interrupt_exit_prepare(regs);
0264 }
0265 
0266 struct interrupt_nmi_state {
0267 #ifdef CONFIG_PPC64
0268     u8 irq_soft_mask;
0269     u8 irq_happened;
0270     u8 ftrace_enabled;
0271     u64 softe;
0272 #endif
0273 };
0274 
0275 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
0276 {
0277     /* Allow DEC and PMI to be traced when they are soft-NMI */
0278     if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
0279         if (TRAP(regs) == INTERRUPT_DECREMENTER)
0280                return false;
0281         if (TRAP(regs) == INTERRUPT_PERFMON)
0282                return false;
0283     }
0284     if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
0285         if (TRAP(regs) == INTERRUPT_PERFMON)
0286             return false;
0287     }
0288 
0289     return true;
0290 }
0291 
0292 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
0293 {
0294 #ifdef CONFIG_PPC64
0295     state->irq_soft_mask = local_paca->irq_soft_mask;
0296     state->irq_happened = local_paca->irq_happened;
0297     state->softe = regs->softe;
0298 
0299     /*
0300      * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
0301      * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
0302      * because that goes through irq tracing which we don't want in NMI.
0303      */
0304     local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
0305     local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0306 
0307     if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
0308         /*
0309          * Adjust regs->softe to be soft-masked if it had not been
0310          * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
0311          * not yet set disabled), or if it was in an implicit soft
0312          * masked state. This makes arch_irq_disabled_regs(regs)
0313          * behave as expected.
0314          */
0315         regs->softe = IRQS_ALL_DISABLED;
0316     }
0317 
0318     __hard_RI_enable();
0319 
0320     /* Don't do any per-CPU operations until interrupt state is fixed */
0321 
0322     if (nmi_disables_ftrace(regs)) {
0323         state->ftrace_enabled = this_cpu_get_ftrace_enabled();
0324         this_cpu_set_ftrace_enabled(0);
0325     }
0326 #endif
0327 
0328     /* If data relocations are enabled, it's safe to use nmi_enter() */
0329     if (mfmsr() & MSR_DR) {
0330         nmi_enter();
0331         return;
0332     }
0333 
0334     /*
0335      * But do not use nmi_enter() for pseries hash guest taking a real-mode
0336      * NMI because not everything it touches is within the RMA limit.
0337      */
0338     if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
0339         firmware_has_feature(FW_FEATURE_LPAR) &&
0340         !radix_enabled())
0341         return;
0342 
0343     /*
0344      * Likewise, don't use it if we have some form of instrumentation (like
0345      * KASAN shadow) that is not safe to access in real mode (even on radix)
0346      */
0347     if (IS_ENABLED(CONFIG_KASAN))
0348         return;
0349 
0350     /* Otherwise, it should be safe to call it */
0351     nmi_enter();
0352 }
0353 
0354 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
0355 {
0356     if (mfmsr() & MSR_DR) {
0357         // nmi_exit if relocations are on
0358         nmi_exit();
0359     } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
0360            firmware_has_feature(FW_FEATURE_LPAR) &&
0361            !radix_enabled()) {
0362         // no nmi_exit for a pseries hash guest taking a real mode exception
0363     } else if (IS_ENABLED(CONFIG_KASAN)) {
0364         // no nmi_exit for KASAN in real mode
0365     } else {
0366         nmi_exit();
0367     }
0368 
0369     /*
0370      * nmi does not call nap_adjust_return because nmi should not create
0371      * new work to do (must use irq_work for that).
0372      */
0373 
0374 #ifdef CONFIG_PPC64
0375 #ifdef CONFIG_PPC_BOOK3S
0376     if (arch_irq_disabled_regs(regs)) {
0377         unsigned long rst = search_kernel_restart_table(regs->nip);
0378         if (rst)
0379             regs_set_return_ip(regs, rst);
0380     }
0381 #endif
0382 
0383     if (nmi_disables_ftrace(regs))
0384         this_cpu_set_ftrace_enabled(state->ftrace_enabled);
0385 
0386     /* Check we didn't change the pending interrupt mask. */
0387     WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
0388     regs->softe = state->softe;
0389     local_paca->irq_happened = state->irq_happened;
0390     local_paca->irq_soft_mask = state->irq_soft_mask;
0391 #endif
0392 }
0393 
0394 /*
0395  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
0396  * function definition. The reason for this is the noinstr section is placed
0397  * after the main text section, i.e., very far away from the interrupt entry
0398  * asm. That creates problems with fitting linker stubs when building large
0399  * kernels.
0400  */
0401 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
0402 
0403 /**
0404  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
0405  * @func:   Function name of the entry point
0406  * @returns:    Returns a value back to asm caller
0407  */
0408 #define DECLARE_INTERRUPT_HANDLER_RAW(func)             \
0409     __visible long func(struct pt_regs *regs)
0410 
0411 /**
0412  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
0413  * @func:   Function name of the entry point
0414  * @returns:    Returns a value back to asm caller
0415  *
0416  * @func is called from ASM entry code.
0417  *
0418  * This is a plain function which does no tracing, reconciling, etc.
0419  * The macro is written so it acts as function definition. Append the
0420  * body with a pair of curly brackets.
0421  *
0422  * raw interrupt handlers must not enable or disable interrupts, or
0423  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
0424  * not be advisable either, although may be possible in a pinch, the
0425  * trace will look odd at least.
0426  *
0427  * A raw handler may call one of the other interrupt handler functions
0428  * to be converted into that interrupt context without these restrictions.
0429  *
0430  * On PPC64, _RAW handlers may return with fast_interrupt_return.
0431  *
0432  * Specific handlers may have additional restrictions.
0433  */
0434 #define DEFINE_INTERRUPT_HANDLER_RAW(func)              \
0435 static __always_inline __no_sanitize_address __no_kcsan long        \
0436 ____##func(struct pt_regs *regs);                   \
0437                                     \
0438 interrupt_handler long func(struct pt_regs *regs)           \
0439 {                                   \
0440     long ret;                           \
0441                                     \
0442     __hard_RI_enable();                     \
0443                                     \
0444     ret = ____##func (regs);                    \
0445                                     \
0446     return ret;                         \
0447 }                                   \
0448 NOKPROBE_SYMBOL(func);                          \
0449                                     \
0450 static __always_inline __no_sanitize_address __no_kcsan long        \
0451 ____##func(struct pt_regs *regs)
0452 
0453 /**
0454  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
0455  * @func:   Function name of the entry point
0456  */
0457 #define DECLARE_INTERRUPT_HANDLER(func)                 \
0458     __visible void func(struct pt_regs *regs)
0459 
0460 /**
0461  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
0462  * @func:   Function name of the entry point
0463  *
0464  * @func is called from ASM entry code.
0465  *
0466  * The macro is written so it acts as function definition. Append the
0467  * body with a pair of curly brackets.
0468  */
0469 #define DEFINE_INTERRUPT_HANDLER(func)                  \
0470 static __always_inline void ____##func(struct pt_regs *regs);       \
0471                                     \
0472 interrupt_handler void func(struct pt_regs *regs)           \
0473 {                                   \
0474     interrupt_enter_prepare(regs);                  \
0475                                     \
0476     ____##func (regs);                      \
0477                                     \
0478     interrupt_exit_prepare(regs);                   \
0479 }                                   \
0480 NOKPROBE_SYMBOL(func);                          \
0481                                     \
0482 static __always_inline void ____##func(struct pt_regs *regs)
0483 
0484 /**
0485  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
0486  * @func:   Function name of the entry point
0487  * @returns:    Returns a value back to asm caller
0488  */
0489 #define DECLARE_INTERRUPT_HANDLER_RET(func)             \
0490     __visible long func(struct pt_regs *regs)
0491 
0492 /**
0493  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
0494  * @func:   Function name of the entry point
0495  * @returns:    Returns a value back to asm caller
0496  *
0497  * @func is called from ASM entry code.
0498  *
0499  * The macro is written so it acts as function definition. Append the
0500  * body with a pair of curly brackets.
0501  */
0502 #define DEFINE_INTERRUPT_HANDLER_RET(func)              \
0503 static __always_inline long ____##func(struct pt_regs *regs);       \
0504                                     \
0505 interrupt_handler long func(struct pt_regs *regs)           \
0506 {                                   \
0507     long ret;                           \
0508                                     \
0509     interrupt_enter_prepare(regs);                  \
0510                                     \
0511     ret = ____##func (regs);                    \
0512                                     \
0513     interrupt_exit_prepare(regs);                   \
0514                                     \
0515     return ret;                         \
0516 }                                   \
0517 NOKPROBE_SYMBOL(func);                          \
0518                                     \
0519 static __always_inline long ____##func(struct pt_regs *regs)
0520 
0521 /**
0522  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
0523  * @func:   Function name of the entry point
0524  */
0525 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)               \
0526     __visible void func(struct pt_regs *regs)
0527 
0528 /**
0529  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
0530  * @func:   Function name of the entry point
0531  *
0532  * @func is called from ASM entry code.
0533  *
0534  * The macro is written so it acts as function definition. Append the
0535  * body with a pair of curly brackets.
0536  */
0537 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)                \
0538 static __always_inline void ____##func(struct pt_regs *regs);       \
0539                                     \
0540 interrupt_handler void func(struct pt_regs *regs)           \
0541 {                                   \
0542     interrupt_async_enter_prepare(regs);                \
0543                                     \
0544     ____##func (regs);                      \
0545                                     \
0546     interrupt_async_exit_prepare(regs);             \
0547 }                                   \
0548 NOKPROBE_SYMBOL(func);                          \
0549                                     \
0550 static __always_inline void ____##func(struct pt_regs *regs)
0551 
0552 /**
0553  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
0554  * @func:   Function name of the entry point
0555  * @returns:    Returns a value back to asm caller
0556  */
0557 #define DECLARE_INTERRUPT_HANDLER_NMI(func)             \
0558     __visible long func(struct pt_regs *regs)
0559 
0560 /**
0561  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
0562  * @func:   Function name of the entry point
0563  * @returns:    Returns a value back to asm caller
0564  *
0565  * @func is called from ASM entry code.
0566  *
0567  * The macro is written so it acts as function definition. Append the
0568  * body with a pair of curly brackets.
0569  */
0570 #define DEFINE_INTERRUPT_HANDLER_NMI(func)              \
0571 static __always_inline __no_sanitize_address __no_kcsan long        \
0572 ____##func(struct pt_regs *regs);                   \
0573                                     \
0574 interrupt_handler long func(struct pt_regs *regs)           \
0575 {                                   \
0576     struct interrupt_nmi_state state;               \
0577     long ret;                           \
0578                                     \
0579     interrupt_nmi_enter_prepare(regs, &state);          \
0580                                     \
0581     ret = ____##func (regs);                    \
0582                                     \
0583     interrupt_nmi_exit_prepare(regs, &state);           \
0584                                     \
0585     return ret;                         \
0586 }                                   \
0587 NOKPROBE_SYMBOL(func);                          \
0588                                     \
0589 static __always_inline  __no_sanitize_address __no_kcsan long       \
0590 ____##func(struct pt_regs *regs)
0591 
0592 
0593 /* Interrupt handlers */
0594 /* kernel/traps.c */
0595 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
0596 #ifdef CONFIG_PPC_BOOK3S_64
0597 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
0598 #endif
0599 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
0600 DECLARE_INTERRUPT_HANDLER(SMIException);
0601 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
0602 DECLARE_INTERRUPT_HANDLER(unknown_exception);
0603 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
0604 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
0605 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
0606 DECLARE_INTERRUPT_HANDLER(RunModeException);
0607 DECLARE_INTERRUPT_HANDLER(single_step_exception);
0608 DECLARE_INTERRUPT_HANDLER(program_check_exception);
0609 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
0610 DECLARE_INTERRUPT_HANDLER(alignment_exception);
0611 DECLARE_INTERRUPT_HANDLER(StackOverflow);
0612 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
0613 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
0614 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
0615 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
0616 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
0617 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
0618 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
0619 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
0620 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
0621 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
0622 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
0623 DECLARE_INTERRUPT_HANDLER(DebugException);
0624 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
0625 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
0626 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
0627 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
0628 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
0629 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
0630 
0631 /* slb.c */
0632 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
0633 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
0634 
0635 /* hash_utils.c */
0636 DECLARE_INTERRUPT_HANDLER(do_hash_fault);
0637 
0638 /* fault.c */
0639 DECLARE_INTERRUPT_HANDLER(do_page_fault);
0640 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
0641 
0642 /* process.c */
0643 DECLARE_INTERRUPT_HANDLER(do_break);
0644 
0645 /* time.c */
0646 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
0647 
0648 /* mce.c */
0649 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
0650 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
0651 
0652 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
0653 
0654 /* irq.c */
0655 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
0656 
0657 void __noreturn unrecoverable_exception(struct pt_regs *regs);
0658 
0659 void replay_system_reset(void);
0660 void replay_soft_interrupts(void);
0661 
0662 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
0663 {
0664     if (!arch_irq_disabled_regs(regs))
0665         local_irq_enable();
0666 }
0667 
0668 long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8,
0669                unsigned long r0, struct pt_regs *regs);
0670 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
0671 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
0672 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
0673 #ifdef CONFIG_PPC64
0674 unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
0675 unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
0676 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
0677 #endif
0678 
0679 #endif /* __ASSEMBLY__ */
0680 
0681 #endif /* _ASM_POWERPC_INTERRUPT_H */