0001
0002
0003
0004
0005 #ifndef _ASM_POWERPC_HW_IRQ_H
0006 #define _ASM_POWERPC_HW_IRQ_H
0007
0008 #ifdef __KERNEL__
0009
0010 #include <linux/errno.h>
0011 #include <linux/compiler.h>
0012 #include <asm/ptrace.h>
0013 #include <asm/processor.h>
0014
0015 #ifdef CONFIG_PPC64
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define PACA_IRQ_HARD_DIS 0x01
0034 #define PACA_IRQ_DBELL 0x02
0035 #define PACA_IRQ_EE 0x04
0036 #define PACA_IRQ_DEC 0x08
0037 #define PACA_IRQ_HMI 0x10
0038 #define PACA_IRQ_PMI 0x20
0039
0040
0041
0042
0043
0044 #ifdef CONFIG_PPC_BOOK3S
0045 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
0046 #else
0047 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
0048 #endif
0049
0050 #endif
0051
0052
0053
0054
0055 #define IRQS_ENABLED 0
0056 #define IRQS_DISABLED 1
0057 #define IRQS_PMI_DISABLED 2
0058 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
0059
0060 #ifndef __ASSEMBLY__
0061
0062 static inline void __hard_irq_enable(void)
0063 {
0064 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0065 wrtee(MSR_EE);
0066 else if (IS_ENABLED(CONFIG_PPC_8xx))
0067 wrtspr(SPRN_EIE);
0068 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0069 __mtmsrd(MSR_EE | MSR_RI, 1);
0070 else
0071 mtmsr(mfmsr() | MSR_EE);
0072 }
0073
0074 static inline void __hard_irq_disable(void)
0075 {
0076 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0077 wrtee(0);
0078 else if (IS_ENABLED(CONFIG_PPC_8xx))
0079 wrtspr(SPRN_EID);
0080 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0081 __mtmsrd(MSR_RI, 1);
0082 else
0083 mtmsr(mfmsr() & ~MSR_EE);
0084 }
0085
0086 static inline void __hard_EE_RI_disable(void)
0087 {
0088 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0089 wrtee(0);
0090 else if (IS_ENABLED(CONFIG_PPC_8xx))
0091 wrtspr(SPRN_NRI);
0092 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0093 __mtmsrd(0, 1);
0094 else
0095 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
0096 }
0097
0098 static inline void __hard_RI_enable(void)
0099 {
0100 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
0101 return;
0102
0103 if (IS_ENABLED(CONFIG_PPC_8xx))
0104 wrtspr(SPRN_EID);
0105 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
0106 __mtmsrd(MSR_RI, 1);
0107 else
0108 mtmsr(mfmsr() | MSR_RI);
0109 }
0110
0111 #ifdef CONFIG_PPC64
0112 #include <asm/paca.h>
0113
0114 static inline notrace unsigned long irq_soft_mask_return(void)
0115 {
0116 unsigned long flags;
0117
0118 asm volatile(
0119 "lbz %0,%1(13)"
0120 : "=r" (flags)
0121 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
0122
0123 return flags;
0124 }
0125
0126
0127
0128
0129
0130
0131 static inline notrace void irq_soft_mask_set(unsigned long mask)
0132 {
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0148 WARN_ON(mask && !(mask & IRQS_DISABLED));
0149
0150 asm volatile(
0151 "stb %0,%1(13)"
0152 :
0153 : "r" (mask),
0154 "i" (offsetof(struct paca_struct, irq_soft_mask))
0155 : "memory");
0156 }
0157
0158 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
0159 {
0160 unsigned long flags;
0161
0162 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
0163 WARN_ON(mask && !(mask & IRQS_DISABLED));
0164 #endif
0165
0166 asm volatile(
0167 "lbz %0,%1(13); stb %2,%1(13)"
0168 : "=&r" (flags)
0169 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
0170 "r" (mask)
0171 : "memory");
0172
0173 return flags;
0174 }
0175
0176 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
0177 {
0178 unsigned long flags, tmp;
0179
0180 asm volatile(
0181 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
0182 : "=&r" (flags), "=r" (tmp)
0183 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
0184 "r" (mask)
0185 : "memory");
0186
0187 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
0188 WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
0189 #endif
0190
0191 return flags;
0192 }
0193
0194 static inline unsigned long arch_local_save_flags(void)
0195 {
0196 return irq_soft_mask_return();
0197 }
0198
0199 static inline void arch_local_irq_disable(void)
0200 {
0201 irq_soft_mask_set(IRQS_DISABLED);
0202 }
0203
0204 extern void arch_local_irq_restore(unsigned long);
0205
0206 static inline void arch_local_irq_enable(void)
0207 {
0208 arch_local_irq_restore(IRQS_ENABLED);
0209 }
0210
0211 static inline unsigned long arch_local_irq_save(void)
0212 {
0213 return irq_soft_mask_set_return(IRQS_DISABLED);
0214 }
0215
0216 static inline bool arch_irqs_disabled_flags(unsigned long flags)
0217 {
0218 return flags & IRQS_DISABLED;
0219 }
0220
0221 static inline bool arch_irqs_disabled(void)
0222 {
0223 return arch_irqs_disabled_flags(arch_local_save_flags());
0224 }
0225
0226 static inline void set_pmi_irq_pending(void)
0227 {
0228
0229
0230
0231
0232 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0233 WARN_ON_ONCE(mfmsr() & MSR_EE);
0234
0235 get_paca()->irq_happened |= PACA_IRQ_PMI;
0236 }
0237
0238 static inline void clear_pmi_irq_pending(void)
0239 {
0240
0241
0242
0243
0244 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0245 WARN_ON_ONCE(mfmsr() & MSR_EE);
0246
0247 get_paca()->irq_happened &= ~PACA_IRQ_PMI;
0248 }
0249
0250 static inline bool pmi_irq_pending(void)
0251 {
0252
0253
0254
0255
0256 if (get_paca()->irq_happened & PACA_IRQ_PMI)
0257 return true;
0258
0259 return false;
0260 }
0261
0262 #ifdef CONFIG_PPC_BOOK3S
0263
0264
0265
0266
0267
0268
0269 #define raw_local_irq_pmu_save(flags) \
0270 do { \
0271 typecheck(unsigned long, flags); \
0272 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
0273 IRQS_PMI_DISABLED); \
0274 } while(0)
0275
0276 #define raw_local_irq_pmu_restore(flags) \
0277 do { \
0278 typecheck(unsigned long, flags); \
0279 arch_local_irq_restore(flags); \
0280 } while(0)
0281
0282 #ifdef CONFIG_TRACE_IRQFLAGS
0283 #define powerpc_local_irq_pmu_save(flags) \
0284 do { \
0285 raw_local_irq_pmu_save(flags); \
0286 if (!raw_irqs_disabled_flags(flags)) \
0287 trace_hardirqs_off(); \
0288 } while(0)
0289 #define powerpc_local_irq_pmu_restore(flags) \
0290 do { \
0291 if (!raw_irqs_disabled_flags(flags)) \
0292 trace_hardirqs_on(); \
0293 raw_local_irq_pmu_restore(flags); \
0294 } while(0)
0295 #else
0296 #define powerpc_local_irq_pmu_save(flags) \
0297 do { \
0298 raw_local_irq_pmu_save(flags); \
0299 } while(0)
0300 #define powerpc_local_irq_pmu_restore(flags) \
0301 do { \
0302 raw_local_irq_pmu_restore(flags); \
0303 } while (0)
0304 #endif
0305
0306 #endif
0307
0308 #define hard_irq_disable() do { \
0309 unsigned long flags; \
0310 __hard_irq_disable(); \
0311 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
0312 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
0313 if (!arch_irqs_disabled_flags(flags)) { \
0314 asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
0315 : "r" (current_stack_pointer)); \
0316 trace_hardirqs_off(); \
0317 } \
0318 } while(0)
0319
0320 static inline bool __lazy_irq_pending(u8 irq_happened)
0321 {
0322 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
0323 }
0324
0325
0326
0327
0328 static inline bool lazy_irq_pending(void)
0329 {
0330 return __lazy_irq_pending(get_paca()->irq_happened);
0331 }
0332
0333
0334
0335
0336
0337
0338 static inline bool lazy_irq_pending_nocheck(void)
0339 {
0340 return __lazy_irq_pending(local_paca->irq_happened);
0341 }
0342
0343 bool power_pmu_wants_prompt_pmi(void);
0344
0345
0346
0347
0348
0349
0350
0351
0352 static inline bool should_hard_irq_enable(void)
0353 {
0354 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
0355 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
0356 WARN_ON(mfmsr() & MSR_EE);
0357 }
0358
0359 if (!IS_ENABLED(CONFIG_PERF_EVENTS))
0360 return false;
0361
0362
0363
0364
0365
0366
0367
0368 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
0369 return false;
0370
0371 if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
0372 return false;
0373
0374 return true;
0375 }
0376
0377
0378
0379
0380 static inline void do_hard_irq_enable(void)
0381 {
0382 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
0383 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
0384 WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
0385 WARN_ON(mfmsr() & MSR_EE);
0386 }
0387
0388
0389
0390
0391 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
0392 __hard_irq_enable();
0393 }
0394
0395 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
0396 {
0397 return (regs->softe & IRQS_DISABLED);
0398 }
0399
0400 extern bool prep_irq_for_idle(void);
0401 extern bool prep_irq_for_idle_irqsoff(void);
0402 extern void irq_set_pending_from_srr1(unsigned long srr1);
0403
0404 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
0405
0406 extern void force_external_irq_replay(void);
0407
0408 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
0409 {
0410 regs->softe = val;
0411 }
0412 #else
0413
0414 static inline notrace unsigned long irq_soft_mask_return(void)
0415 {
0416 return 0;
0417 }
0418
0419 static inline unsigned long arch_local_save_flags(void)
0420 {
0421 return mfmsr();
0422 }
0423
0424 static inline void arch_local_irq_restore(unsigned long flags)
0425 {
0426 if (IS_ENABLED(CONFIG_BOOKE))
0427 wrtee(flags);
0428 else
0429 mtmsr(flags);
0430 }
0431
0432 static inline unsigned long arch_local_irq_save(void)
0433 {
0434 unsigned long flags = arch_local_save_flags();
0435
0436 if (IS_ENABLED(CONFIG_BOOKE))
0437 wrtee(0);
0438 else if (IS_ENABLED(CONFIG_PPC_8xx))
0439 wrtspr(SPRN_EID);
0440 else
0441 mtmsr(flags & ~MSR_EE);
0442
0443 return flags;
0444 }
0445
0446 static inline void arch_local_irq_disable(void)
0447 {
0448 __hard_irq_disable();
0449 }
0450
0451 static inline void arch_local_irq_enable(void)
0452 {
0453 __hard_irq_enable();
0454 }
0455
0456 static inline bool arch_irqs_disabled_flags(unsigned long flags)
0457 {
0458 return (flags & MSR_EE) == 0;
0459 }
0460
0461 static inline bool arch_irqs_disabled(void)
0462 {
0463 return arch_irqs_disabled_flags(arch_local_save_flags());
0464 }
0465
0466 #define hard_irq_disable() arch_local_irq_disable()
0467
0468 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
0469 {
0470 return !(regs->msr & MSR_EE);
0471 }
0472
0473 static __always_inline bool should_hard_irq_enable(void)
0474 {
0475 return false;
0476 }
0477
0478 static inline void do_hard_irq_enable(void)
0479 {
0480 BUILD_BUG();
0481 }
0482
0483 static inline void clear_pmi_irq_pending(void) { }
0484 static inline void set_pmi_irq_pending(void) { }
0485 static inline bool pmi_irq_pending(void) { return false; }
0486
0487 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
0488 {
0489 }
0490 #endif
0491
0492 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
0493
0494 #endif
0495 #endif
0496 #endif