Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
0004  */
0005 #ifndef _ASM_POWERPC_HW_IRQ_H
0006 #define _ASM_POWERPC_HW_IRQ_H
0007 
0008 #ifdef __KERNEL__
0009 
0010 #include <linux/errno.h>
0011 #include <linux/compiler.h>
0012 #include <asm/ptrace.h>
0013 #include <asm/processor.h>
0014 
0015 #ifdef CONFIG_PPC64
0016 
0017 /*
0018  * PACA flags in paca->irq_happened.
0019  *
0020  * This bits are set when interrupts occur while soft-disabled
0021  * and allow a proper replay.
0022  *
0023  * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
0024  * always in synch with the MSR[EE] state, except:
0025  * - A window in interrupt entry, where hardware disables MSR[EE] and that
0026  *   must be "reconciled" with the soft mask state.
0027  * - NMI interrupts that hit in awkward places, until they fix the state.
0028  * - When local irqs are being enabled and state is being fixed up.
0029  * - When returning from an interrupt there are some windows where this
0030  *   can become out of synch, but gets fixed before the RFI or before
0031  *   executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
0032  */
0033 #define PACA_IRQ_HARD_DIS   0x01
0034 #define PACA_IRQ_DBELL      0x02
0035 #define PACA_IRQ_EE     0x04
0036 #define PACA_IRQ_DEC        0x08 /* Or FIT */
0037 #define PACA_IRQ_HMI        0x10
0038 #define PACA_IRQ_PMI        0x20
0039 
0040 /*
0041  * Some soft-masked interrupts must be hard masked until they are replayed
0042  * (e.g., because the soft-masked handler does not clear the exception).
0043  */
0044 #ifdef CONFIG_PPC_BOOK3S
0045 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
0046 #else
0047 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
0048 #endif
0049 
0050 #endif /* CONFIG_PPC64 */
0051 
0052 /*
0053  * flags for paca->irq_soft_mask
0054  */
0055 #define IRQS_ENABLED        0
0056 #define IRQS_DISABLED       1 /* local_irq_disable() interrupts */
0057 #define IRQS_PMI_DISABLED   2
0058 #define IRQS_ALL_DISABLED   (IRQS_DISABLED | IRQS_PMI_DISABLED)
0059 
0060 #ifndef __ASSEMBLY__
0061 
0062 static inline void __hard_irq_enable(void)
0063 {
0064     if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0065         wrtee(MSR_EE);
0066     else if (IS_ENABLED(CONFIG_PPC_8xx))
0067         wrtspr(SPRN_EIE);
0068     else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0069         __mtmsrd(MSR_EE | MSR_RI, 1);
0070     else
0071         mtmsr(mfmsr() | MSR_EE);
0072 }
0073 
0074 static inline void __hard_irq_disable(void)
0075 {
0076     if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0077         wrtee(0);
0078     else if (IS_ENABLED(CONFIG_PPC_8xx))
0079         wrtspr(SPRN_EID);
0080     else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0081         __mtmsrd(MSR_RI, 1);
0082     else
0083         mtmsr(mfmsr() & ~MSR_EE);
0084 }
0085 
0086 static inline void __hard_EE_RI_disable(void)
0087 {
0088     if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0089         wrtee(0);
0090     else if (IS_ENABLED(CONFIG_PPC_8xx))
0091         wrtspr(SPRN_NRI);
0092     else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0093         __mtmsrd(0, 1);
0094     else
0095         mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
0096 }
0097 
0098 static inline void __hard_RI_enable(void)
0099 {
0100     if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0101         return;
0102 
0103     if (IS_ENABLED(CONFIG_PPC_8xx))
0104         wrtspr(SPRN_EID);
0105     else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0106         __mtmsrd(MSR_RI, 1);
0107     else
0108         mtmsr(mfmsr() | MSR_RI);
0109 }
0110 
0111 #ifdef CONFIG_PPC64
0112 #include <asm/paca.h>
0113 
0114 static inline notrace unsigned long irq_soft_mask_return(void)
0115 {
0116     unsigned long flags;
0117 
0118     asm volatile(
0119         "lbz %0,%1(13)"
0120         : "=r" (flags)
0121         : "i" (offsetof(struct paca_struct, irq_soft_mask)));
0122 
0123     return flags;
0124 }
0125 
0126 /*
0127  * The "memory" clobber acts as both a compiler barrier
0128  * for the critical section and as a clobber because
0129  * we changed paca->irq_soft_mask
0130  */
0131 static inline notrace void irq_soft_mask_set(unsigned long mask)
0132 {
0133     /*
0134      * The irq mask must always include the STD bit if any are set.
0135      *
0136      * and interrupts don't get replayed until the standard
0137      * interrupt (local_irq_disable()) is unmasked.
0138      *
0139      * Other masks must only provide additional masking beyond
0140      * the standard, and they are also not replayed until the
0141      * standard interrupt becomes unmasked.
0142      *
0143      * This could be changed, but it will require partial
0144      * unmasks to be replayed, among other things. For now, take
0145      * the simple approach.
0146      */
0147     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0148         WARN_ON(mask && !(mask & IRQS_DISABLED));
0149 
0150     asm volatile(
0151         "stb %0,%1(13)"
0152         :
0153         : "r" (mask),
0154           "i" (offsetof(struct paca_struct, irq_soft_mask))
0155         : "memory");
0156 }
0157 
0158 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
0159 {
0160     unsigned long flags;
0161 
0162 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
0163     WARN_ON(mask && !(mask & IRQS_DISABLED));
0164 #endif
0165 
0166     asm volatile(
0167         "lbz %0,%1(13); stb %2,%1(13)"
0168         : "=&r" (flags)
0169         : "i" (offsetof(struct paca_struct, irq_soft_mask)),
0170           "r" (mask)
0171         : "memory");
0172 
0173     return flags;
0174 }
0175 
0176 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
0177 {
0178     unsigned long flags, tmp;
0179 
0180     asm volatile(
0181         "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
0182         : "=&r" (flags), "=r" (tmp)
0183         : "i" (offsetof(struct paca_struct, irq_soft_mask)),
0184           "r" (mask)
0185         : "memory");
0186 
0187 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
0188     WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
0189 #endif
0190 
0191     return flags;
0192 }
0193 
0194 static inline unsigned long arch_local_save_flags(void)
0195 {
0196     return irq_soft_mask_return();
0197 }
0198 
0199 static inline void arch_local_irq_disable(void)
0200 {
0201     irq_soft_mask_set(IRQS_DISABLED);
0202 }
0203 
0204 extern void arch_local_irq_restore(unsigned long);
0205 
0206 static inline void arch_local_irq_enable(void)
0207 {
0208     arch_local_irq_restore(IRQS_ENABLED);
0209 }
0210 
0211 static inline unsigned long arch_local_irq_save(void)
0212 {
0213     return irq_soft_mask_set_return(IRQS_DISABLED);
0214 }
0215 
0216 static inline bool arch_irqs_disabled_flags(unsigned long flags)
0217 {
0218     return flags & IRQS_DISABLED;
0219 }
0220 
0221 static inline bool arch_irqs_disabled(void)
0222 {
0223     return arch_irqs_disabled_flags(arch_local_save_flags());
0224 }
0225 
0226 static inline void set_pmi_irq_pending(void)
0227 {
0228     /*
0229      * Invoked from PMU callback functions to set PMI bit in the paca.
0230      * This has to be called with irq's disabled (via hard_irq_disable()).
0231      */
0232     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0233         WARN_ON_ONCE(mfmsr() & MSR_EE);
0234 
0235     get_paca()->irq_happened |= PACA_IRQ_PMI;
0236 }
0237 
0238 static inline void clear_pmi_irq_pending(void)
0239 {
0240     /*
0241      * Invoked from PMU callback functions to clear the pending PMI bit
0242      * in the paca.
0243      */
0244     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0245         WARN_ON_ONCE(mfmsr() & MSR_EE);
0246 
0247     get_paca()->irq_happened &= ~PACA_IRQ_PMI;
0248 }
0249 
0250 static inline bool pmi_irq_pending(void)
0251 {
0252     /*
0253      * Invoked from PMU callback functions to check if there is a pending
0254      * PMI bit in the paca.
0255      */
0256     if (get_paca()->irq_happened & PACA_IRQ_PMI)
0257         return true;
0258 
0259     return false;
0260 }
0261 
0262 #ifdef CONFIG_PPC_BOOK3S
0263 /*
0264  * To support disabling and enabling of irq with PMI, set of
0265  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
0266  * functions are added. These macros are implemented using generic
0267  * linux local_irq_* code from include/linux/irqflags.h.
0268  */
0269 #define raw_local_irq_pmu_save(flags)                   \
0270     do {                                \
0271         typecheck(unsigned long, flags);            \
0272         flags = irq_soft_mask_or_return(IRQS_DISABLED | \
0273                 IRQS_PMI_DISABLED);         \
0274     } while(0)
0275 
0276 #define raw_local_irq_pmu_restore(flags)                \
0277     do {                                \
0278         typecheck(unsigned long, flags);            \
0279         arch_local_irq_restore(flags);              \
0280     } while(0)
0281 
0282 #ifdef CONFIG_TRACE_IRQFLAGS
0283 #define powerpc_local_irq_pmu_save(flags)           \
0284      do {                           \
0285         raw_local_irq_pmu_save(flags);          \
0286         if (!raw_irqs_disabled_flags(flags))        \
0287             trace_hardirqs_off();           \
0288     } while(0)
0289 #define powerpc_local_irq_pmu_restore(flags)            \
0290     do {                            \
0291         if (!raw_irqs_disabled_flags(flags))        \
0292             trace_hardirqs_on();            \
0293         raw_local_irq_pmu_restore(flags);       \
0294     } while(0)
0295 #else
0296 #define powerpc_local_irq_pmu_save(flags)           \
0297     do {                            \
0298         raw_local_irq_pmu_save(flags);          \
0299     } while(0)
0300 #define powerpc_local_irq_pmu_restore(flags)            \
0301     do {                            \
0302         raw_local_irq_pmu_restore(flags);       \
0303     } while (0)
0304 #endif  /* CONFIG_TRACE_IRQFLAGS */
0305 
0306 #endif /* CONFIG_PPC_BOOK3S */
0307 
0308 #define hard_irq_disable()  do {                    \
0309     unsigned long flags;                        \
0310     __hard_irq_disable();                       \
0311     flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);        \
0312     local_paca->irq_happened |= PACA_IRQ_HARD_DIS;          \
0313     if (!arch_irqs_disabled_flags(flags)) {             \
0314         asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
0315                         : "r" (current_stack_pointer)); \
0316         trace_hardirqs_off();                   \
0317     }                               \
0318 } while(0)
0319 
0320 static inline bool __lazy_irq_pending(u8 irq_happened)
0321 {
0322     return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
0323 }
0324 
0325 /*
0326  * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
0327  */
0328 static inline bool lazy_irq_pending(void)
0329 {
0330     return __lazy_irq_pending(get_paca()->irq_happened);
0331 }
0332 
0333 /*
0334  * Check if a lazy IRQ is pending, with no debugging checks.
0335  * Should be called with IRQs hard disabled.
0336  * For use in RI disabled code or other constrained situations.
0337  */
0338 static inline bool lazy_irq_pending_nocheck(void)
0339 {
0340     return __lazy_irq_pending(local_paca->irq_happened);
0341 }
0342 
0343 bool power_pmu_wants_prompt_pmi(void);
0344 
0345 /*
0346  * This is called by asynchronous interrupts to check whether to
0347  * conditionally re-enable hard interrupts after having cleared
0348  * the source of the interrupt. They are kept disabled if there
0349  * is a different soft-masked interrupt pending that requires hard
0350  * masking.
0351  */
0352 static inline bool should_hard_irq_enable(void)
0353 {
0354     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
0355         WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
0356         WARN_ON(mfmsr() & MSR_EE);
0357     }
0358 
0359     if (!IS_ENABLED(CONFIG_PERF_EVENTS))
0360         return false;
0361     /*
0362      * If the PMU is not running, there is not much reason to enable
0363      * MSR[EE] in irq handlers because any interrupts would just be
0364      * soft-masked.
0365      *
0366      * TODO: Add test for 64e
0367      */
0368     if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
0369         return false;
0370 
0371     if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
0372         return false;
0373 
0374     return true;
0375 }
0376 
0377 /*
0378  * Do the hard enabling, only call this if should_hard_irq_enable is true.
0379  */
0380 static inline void do_hard_irq_enable(void)
0381 {
0382     if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
0383         WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
0384         WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
0385         WARN_ON(mfmsr() & MSR_EE);
0386     }
0387     /*
0388      * This allows PMI interrupts (and watchdog soft-NMIs) through.
0389      * There is no other reason to enable this way.
0390      */
0391     get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
0392     __hard_irq_enable();
0393 }
0394 
0395 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
0396 {
0397     return (regs->softe & IRQS_DISABLED);
0398 }
0399 
0400 extern bool prep_irq_for_idle(void);
0401 extern bool prep_irq_for_idle_irqsoff(void);
0402 extern void irq_set_pending_from_srr1(unsigned long srr1);
0403 
0404 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
0405 
0406 extern void force_external_irq_replay(void);
0407 
0408 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
0409 {
0410     regs->softe = val;
0411 }
0412 #else /* CONFIG_PPC64 */
0413 
0414 static inline notrace unsigned long irq_soft_mask_return(void)
0415 {
0416     return 0;
0417 }
0418 
0419 static inline unsigned long arch_local_save_flags(void)
0420 {
0421     return mfmsr();
0422 }
0423 
0424 static inline void arch_local_irq_restore(unsigned long flags)
0425 {
0426     if (IS_ENABLED(CONFIG_BOOKE))
0427         wrtee(flags);
0428     else
0429         mtmsr(flags);
0430 }
0431 
0432 static inline unsigned long arch_local_irq_save(void)
0433 {
0434     unsigned long flags = arch_local_save_flags();
0435 
0436     if (IS_ENABLED(CONFIG_BOOKE))
0437         wrtee(0);
0438     else if (IS_ENABLED(CONFIG_PPC_8xx))
0439         wrtspr(SPRN_EID);
0440     else
0441         mtmsr(flags & ~MSR_EE);
0442 
0443     return flags;
0444 }
0445 
0446 static inline void arch_local_irq_disable(void)
0447 {
0448     __hard_irq_disable();
0449 }
0450 
0451 static inline void arch_local_irq_enable(void)
0452 {
0453     __hard_irq_enable();
0454 }
0455 
0456 static inline bool arch_irqs_disabled_flags(unsigned long flags)
0457 {
0458     return (flags & MSR_EE) == 0;
0459 }
0460 
0461 static inline bool arch_irqs_disabled(void)
0462 {
0463     return arch_irqs_disabled_flags(arch_local_save_flags());
0464 }
0465 
0466 #define hard_irq_disable()      arch_local_irq_disable()
0467 
0468 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
0469 {
0470     return !(regs->msr & MSR_EE);
0471 }
0472 
0473 static __always_inline bool should_hard_irq_enable(void)
0474 {
0475     return false;
0476 }
0477 
0478 static inline void do_hard_irq_enable(void)
0479 {
0480     BUILD_BUG();
0481 }
0482 
0483 static inline void clear_pmi_irq_pending(void) { }
0484 static inline void set_pmi_irq_pending(void) { }
0485 static inline bool pmi_irq_pending(void) { return false; }
0486 
0487 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
0488 {
0489 }
0490 #endif /* CONFIG_PPC64 */
0491 
0492 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
0493 
0494 #endif  /* __ASSEMBLY__ */
0495 #endif  /* __KERNEL__ */
0496 #endif  /* _ASM_POWERPC_HW_IRQ_H */