0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/jiffies.h>
0009 #include <linux/irq.h>
0010 #include <linux/module.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/moduleparam.h>
0013 #include <linux/timer.h>
0014
0015 #include "internals.h"
0016
0017 static int irqfixup __read_mostly;
0018
0019 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
0020 static void poll_spurious_irqs(struct timer_list *unused);
0021 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
0022 static int irq_poll_cpu;
0023 static atomic_t irq_poll_active;
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 bool irq_wait_for_poll(struct irq_desc *desc)
0037 __must_hold(&desc->lock)
0038 {
0039 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
0040 "irq poll in progress on cpu %d for irq %d\n",
0041 smp_processor_id(), desc->irq_data.irq))
0042 return false;
0043
0044 #ifdef CONFIG_SMP
0045 do {
0046 raw_spin_unlock(&desc->lock);
0047 while (irqd_irq_inprogress(&desc->irq_data))
0048 cpu_relax();
0049 raw_spin_lock(&desc->lock);
0050 } while (irqd_irq_inprogress(&desc->irq_data));
0051
0052 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
0053 #else
0054 return false;
0055 #endif
0056 }
0057
0058
0059
0060
0061
0062 static int try_one_irq(struct irq_desc *desc, bool force)
0063 {
0064 irqreturn_t ret = IRQ_NONE;
0065 struct irqaction *action;
0066
0067 raw_spin_lock(&desc->lock);
0068
0069
0070
0071
0072
0073 if (irq_settings_is_per_cpu(desc) ||
0074 irq_settings_is_nested_thread(desc) ||
0075 irq_settings_is_polled(desc))
0076 goto out;
0077
0078
0079
0080
0081
0082 if (irqd_irq_disabled(&desc->irq_data) && !force)
0083 goto out;
0084
0085
0086
0087
0088
0089 action = desc->action;
0090 if (!action || !(action->flags & IRQF_SHARED) ||
0091 (action->flags & __IRQF_TIMER))
0092 goto out;
0093
0094
0095 if (irqd_irq_inprogress(&desc->irq_data)) {
0096
0097
0098
0099
0100 desc->istate |= IRQS_PENDING;
0101 goto out;
0102 }
0103
0104
0105 desc->istate |= IRQS_POLL_INPROGRESS;
0106 do {
0107 if (handle_irq_event(desc) == IRQ_HANDLED)
0108 ret = IRQ_HANDLED;
0109
0110 action = desc->action;
0111 } while ((desc->istate & IRQS_PENDING) && action);
0112 desc->istate &= ~IRQS_POLL_INPROGRESS;
0113 out:
0114 raw_spin_unlock(&desc->lock);
0115 return ret == IRQ_HANDLED;
0116 }
0117
0118 static int misrouted_irq(int irq)
0119 {
0120 struct irq_desc *desc;
0121 int i, ok = 0;
0122
0123 if (atomic_inc_return(&irq_poll_active) != 1)
0124 goto out;
0125
0126 irq_poll_cpu = smp_processor_id();
0127
0128 for_each_irq_desc(i, desc) {
0129 if (!i)
0130 continue;
0131
0132 if (i == irq)
0133 continue;
0134
0135 if (try_one_irq(desc, false))
0136 ok = 1;
0137 }
0138 out:
0139 atomic_dec(&irq_poll_active);
0140
0141 return ok;
0142 }
0143
0144 static void poll_spurious_irqs(struct timer_list *unused)
0145 {
0146 struct irq_desc *desc;
0147 int i;
0148
0149 if (atomic_inc_return(&irq_poll_active) != 1)
0150 goto out;
0151 irq_poll_cpu = smp_processor_id();
0152
0153 for_each_irq_desc(i, desc) {
0154 unsigned int state;
0155
0156 if (!i)
0157 continue;
0158
0159
0160 state = desc->istate;
0161 barrier();
0162 if (!(state & IRQS_SPURIOUS_DISABLED))
0163 continue;
0164
0165 local_irq_disable();
0166 try_one_irq(desc, true);
0167 local_irq_enable();
0168 }
0169 out:
0170 atomic_dec(&irq_poll_active);
0171 mod_timer(&poll_spurious_irq_timer,
0172 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
0173 }
0174
0175 static inline int bad_action_ret(irqreturn_t action_ret)
0176 {
0177 unsigned int r = action_ret;
0178
0179 if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
0180 return 0;
0181 return 1;
0182 }
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
0193 {
0194 unsigned int irq = irq_desc_get_irq(desc);
0195 struct irqaction *action;
0196 unsigned long flags;
0197
0198 if (bad_action_ret(action_ret)) {
0199 printk(KERN_ERR "irq event %d: bogus return value %x\n",
0200 irq, action_ret);
0201 } else {
0202 printk(KERN_ERR "irq %d: nobody cared (try booting with "
0203 "the \"irqpoll\" option)\n", irq);
0204 }
0205 dump_stack();
0206 printk(KERN_ERR "handlers:\n");
0207
0208
0209
0210
0211
0212
0213
0214 raw_spin_lock_irqsave(&desc->lock, flags);
0215 for_each_action_of_desc(desc, action) {
0216 printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler);
0217 if (action->thread_fn)
0218 printk(KERN_CONT " threaded [<%p>] %ps",
0219 action->thread_fn, action->thread_fn);
0220 printk(KERN_CONT "\n");
0221 }
0222 raw_spin_unlock_irqrestore(&desc->lock, flags);
0223 }
0224
0225 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
0226 {
0227 static int count = 100;
0228
0229 if (count > 0) {
0230 count--;
0231 __report_bad_irq(desc, action_ret);
0232 }
0233 }
0234
0235 static inline int
0236 try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
0237 irqreturn_t action_ret)
0238 {
0239 struct irqaction *action;
0240
0241 if (!irqfixup)
0242 return 0;
0243
0244
0245 if (action_ret == IRQ_NONE)
0246 return 1;
0247
0248
0249
0250
0251
0252
0253 if (irqfixup < 2)
0254 return 0;
0255
0256 if (!irq)
0257 return 1;
0258
0259
0260
0261
0262
0263
0264
0265 action = desc->action;
0266 barrier();
0267 return action && (action->flags & IRQF_IRQPOLL);
0268 }
0269
0270 #define SPURIOUS_DEFERRED 0x80000000
0271
0272 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
0273 {
0274 unsigned int irq;
0275
0276 if (desc->istate & IRQS_POLL_INPROGRESS ||
0277 irq_settings_is_polled(desc))
0278 return;
0279
0280 if (bad_action_ret(action_ret)) {
0281 report_bad_irq(desc, action_ret);
0282 return;
0283 }
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 if (action_ret & IRQ_WAKE_THREAD) {
0306
0307
0308
0309
0310
0311
0312 if (action_ret == IRQ_WAKE_THREAD) {
0313 int handled;
0314
0315
0316
0317
0318
0319
0320
0321
0322 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
0323 desc->threads_handled_last |= SPURIOUS_DEFERRED;
0324 return;
0325 }
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 handled = atomic_read(&desc->threads_handled);
0339 handled |= SPURIOUS_DEFERRED;
0340 if (handled != desc->threads_handled_last) {
0341 action_ret = IRQ_HANDLED;
0342
0343
0344
0345
0346
0347
0348
0349
0350 desc->threads_handled_last = handled;
0351 } else {
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 action_ret = IRQ_NONE;
0362 }
0363 } else {
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
0382 }
0383 }
0384
0385 if (unlikely(action_ret == IRQ_NONE)) {
0386
0387
0388
0389
0390
0391
0392 if (time_after(jiffies, desc->last_unhandled + HZ/10))
0393 desc->irqs_unhandled = 1;
0394 else
0395 desc->irqs_unhandled++;
0396 desc->last_unhandled = jiffies;
0397 }
0398
0399 irq = irq_desc_get_irq(desc);
0400 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
0401 int ok = misrouted_irq(irq);
0402 if (action_ret == IRQ_NONE)
0403 desc->irqs_unhandled -= ok;
0404 }
0405
0406 if (likely(!desc->irqs_unhandled))
0407 return;
0408
0409
0410 desc->irq_count++;
0411 if (likely(desc->irq_count < 100000))
0412 return;
0413
0414 desc->irq_count = 0;
0415 if (unlikely(desc->irqs_unhandled > 99900)) {
0416
0417
0418
0419 __report_bad_irq(desc, action_ret);
0420
0421
0422
0423 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
0424 desc->istate |= IRQS_SPURIOUS_DISABLED;
0425 desc->depth++;
0426 irq_disable(desc);
0427
0428 mod_timer(&poll_spurious_irq_timer,
0429 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
0430 }
0431 desc->irqs_unhandled = 0;
0432 }
0433
0434 bool noirqdebug __read_mostly;
0435
0436 int noirqdebug_setup(char *str)
0437 {
0438 noirqdebug = 1;
0439 printk(KERN_INFO "IRQ lockup detection disabled\n");
0440
0441 return 1;
0442 }
0443
0444 __setup("noirqdebug", noirqdebug_setup);
0445 module_param(noirqdebug, bool, 0644);
0446 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
0447
0448 static int __init irqfixup_setup(char *str)
0449 {
0450 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
0451 pr_warn("irqfixup boot option not supported with PREEMPT_RT\n");
0452 return 1;
0453 }
0454 irqfixup = 1;
0455 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
0456 printk(KERN_WARNING "This may impact system performance.\n");
0457
0458 return 1;
0459 }
0460
0461 __setup("irqfixup", irqfixup_setup);
0462 module_param(irqfixup, int, 0644);
0463
0464 static int __init irqpoll_setup(char *str)
0465 {
0466 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
0467 pr_warn("irqpoll boot option not supported with PREEMPT_RT\n");
0468 return 1;
0469 }
0470 irqfixup = 2;
0471 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
0472 "enabled\n");
0473 printk(KERN_WARNING "This may significantly impact system "
0474 "performance\n");
0475 return 1;
0476 }
0477
0478 __setup("irqpoll", irqpoll_setup);