0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) "genirq: " fmt
0010
0011 #include <linux/irq.h>
0012 #include <linux/kthread.h>
0013 #include <linux/module.h>
0014 #include <linux/random.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/irqdomain.h>
0017 #include <linux/slab.h>
0018 #include <linux/sched.h>
0019 #include <linux/sched/rt.h>
0020 #include <linux/sched/task.h>
0021 #include <linux/sched/isolation.h>
0022 #include <uapi/linux/sched/types.h>
0023 #include <linux/task_work.h>
0024
0025 #include "internals.h"
0026
0027 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
0028 DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
0029
0030 static int __init setup_forced_irqthreads(char *arg)
0031 {
0032 static_branch_enable(&force_irqthreads_key);
0033 return 0;
0034 }
0035 early_param("threadirqs", setup_forced_irqthreads);
0036 #endif
0037
0038 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
0039 {
0040 struct irq_data *irqd = irq_desc_get_irq_data(desc);
0041 bool inprogress;
0042
0043 do {
0044 unsigned long flags;
0045
0046
0047
0048
0049
0050 while (irqd_irq_inprogress(&desc->irq_data))
0051 cpu_relax();
0052
0053
0054 raw_spin_lock_irqsave(&desc->lock, flags);
0055 inprogress = irqd_irq_inprogress(&desc->irq_data);
0056
0057
0058
0059
0060
0061
0062 if (!inprogress && sync_chip) {
0063
0064
0065
0066
0067 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
0068 &inprogress);
0069 }
0070 raw_spin_unlock_irqrestore(&desc->lock, flags);
0071
0072
0073 } while (inprogress);
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 bool synchronize_hardirq(unsigned int irq)
0099 {
0100 struct irq_desc *desc = irq_to_desc(irq);
0101
0102 if (desc) {
0103 __synchronize_hardirq(desc, false);
0104 return !atomic_read(&desc->threads_active);
0105 }
0106
0107 return true;
0108 }
0109 EXPORT_SYMBOL(synchronize_hardirq);
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 void synchronize_irq(unsigned int irq)
0127 {
0128 struct irq_desc *desc = irq_to_desc(irq);
0129
0130 if (desc) {
0131 __synchronize_hardirq(desc, true);
0132
0133
0134
0135
0136
0137 wait_event(desc->wait_for_threads,
0138 !atomic_read(&desc->threads_active));
0139 }
0140 }
0141 EXPORT_SYMBOL(synchronize_irq);
0142
0143 #ifdef CONFIG_SMP
0144 cpumask_var_t irq_default_affinity;
0145
0146 static bool __irq_can_set_affinity(struct irq_desc *desc)
0147 {
0148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
0149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
0150 return false;
0151 return true;
0152 }
0153
0154
0155
0156
0157
0158
0159 int irq_can_set_affinity(unsigned int irq)
0160 {
0161 return __irq_can_set_affinity(irq_to_desc(irq));
0162 }
0163
0164
0165
0166
0167
0168
0169
0170
0171 bool irq_can_set_affinity_usr(unsigned int irq)
0172 {
0173 struct irq_desc *desc = irq_to_desc(irq);
0174
0175 return __irq_can_set_affinity(desc) &&
0176 !irqd_affinity_is_managed(&desc->irq_data);
0177 }
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 void irq_set_thread_affinity(struct irq_desc *desc)
0189 {
0190 struct irqaction *action;
0191
0192 for_each_action_of_desc(desc, action)
0193 if (action->thread)
0194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
0195 }
0196
0197 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0198 static void irq_validate_effective_affinity(struct irq_data *data)
0199 {
0200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
0201 struct irq_chip *chip = irq_data_get_irq_chip(data);
0202
0203 if (!cpumask_empty(m))
0204 return;
0205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
0206 chip->name, data->irq);
0207 }
0208 #else
0209 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
0210 #endif
0211
0212 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
0213 bool force)
0214 {
0215 struct irq_desc *desc = irq_data_to_desc(data);
0216 struct irq_chip *chip = irq_data_get_irq_chip(data);
0217 const struct cpumask *prog_mask;
0218 int ret;
0219
0220 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
0221 static struct cpumask tmp_mask;
0222
0223 if (!chip || !chip->irq_set_affinity)
0224 return -EINVAL;
0225
0226 raw_spin_lock(&tmp_mask_lock);
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246 if (irqd_affinity_is_managed(data) &&
0247 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
0248 const struct cpumask *hk_mask;
0249
0250 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
0251
0252 cpumask_and(&tmp_mask, mask, hk_mask);
0253 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
0254 prog_mask = mask;
0255 else
0256 prog_mask = &tmp_mask;
0257 } else {
0258 prog_mask = mask;
0259 }
0260
0261
0262
0263
0264
0265
0266 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
0267 if (!force && !cpumask_empty(&tmp_mask))
0268 ret = chip->irq_set_affinity(data, &tmp_mask, force);
0269 else if (force)
0270 ret = chip->irq_set_affinity(data, mask, force);
0271 else
0272 ret = -EINVAL;
0273
0274 raw_spin_unlock(&tmp_mask_lock);
0275
0276 switch (ret) {
0277 case IRQ_SET_MASK_OK:
0278 case IRQ_SET_MASK_OK_DONE:
0279 cpumask_copy(desc->irq_common_data.affinity, mask);
0280 fallthrough;
0281 case IRQ_SET_MASK_OK_NOCOPY:
0282 irq_validate_effective_affinity(data);
0283 irq_set_thread_affinity(desc);
0284 ret = 0;
0285 }
0286
0287 return ret;
0288 }
0289
0290 #ifdef CONFIG_GENERIC_PENDING_IRQ
0291 static inline int irq_set_affinity_pending(struct irq_data *data,
0292 const struct cpumask *dest)
0293 {
0294 struct irq_desc *desc = irq_data_to_desc(data);
0295
0296 irqd_set_move_pending(data);
0297 irq_copy_pending(desc, dest);
0298 return 0;
0299 }
0300 #else
0301 static inline int irq_set_affinity_pending(struct irq_data *data,
0302 const struct cpumask *dest)
0303 {
0304 return -EBUSY;
0305 }
0306 #endif
0307
0308 static int irq_try_set_affinity(struct irq_data *data,
0309 const struct cpumask *dest, bool force)
0310 {
0311 int ret = irq_do_set_affinity(data, dest, force);
0312
0313
0314
0315
0316
0317
0318 if (ret == -EBUSY && !force)
0319 ret = irq_set_affinity_pending(data, dest);
0320 return ret;
0321 }
0322
0323 static bool irq_set_affinity_deactivated(struct irq_data *data,
0324 const struct cpumask *mask, bool force)
0325 {
0326 struct irq_desc *desc = irq_data_to_desc(data);
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
0338 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
0339 return false;
0340
0341 cpumask_copy(desc->irq_common_data.affinity, mask);
0342 irq_data_update_effective_affinity(data, mask);
0343 irqd_set(data, IRQD_AFFINITY_SET);
0344 return true;
0345 }
0346
0347 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
0348 bool force)
0349 {
0350 struct irq_chip *chip = irq_data_get_irq_chip(data);
0351 struct irq_desc *desc = irq_data_to_desc(data);
0352 int ret = 0;
0353
0354 if (!chip || !chip->irq_set_affinity)
0355 return -EINVAL;
0356
0357 if (irq_set_affinity_deactivated(data, mask, force))
0358 return 0;
0359
0360 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
0361 ret = irq_try_set_affinity(data, mask, force);
0362 } else {
0363 irqd_set_move_pending(data);
0364 irq_copy_pending(desc, mask);
0365 }
0366
0367 if (desc->affinity_notify) {
0368 kref_get(&desc->affinity_notify->kref);
0369 if (!schedule_work(&desc->affinity_notify->work)) {
0370
0371 kref_put(&desc->affinity_notify->kref,
0372 desc->affinity_notify->release);
0373 }
0374 }
0375 irqd_set(data, IRQD_AFFINITY_SET);
0376
0377 return ret;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 int irq_update_affinity_desc(unsigned int irq,
0396 struct irq_affinity_desc *affinity)
0397 {
0398 struct irq_desc *desc;
0399 unsigned long flags;
0400 bool activated;
0401 int ret = 0;
0402
0403
0404
0405
0406
0407 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
0408 return -EOPNOTSUPP;
0409
0410 desc = irq_get_desc_buslock(irq, &flags, 0);
0411 if (!desc)
0412 return -EINVAL;
0413
0414
0415 if (irqd_is_started(&desc->irq_data)) {
0416 ret = -EBUSY;
0417 goto out_unlock;
0418 }
0419
0420
0421 if (irqd_affinity_is_managed(&desc->irq_data)) {
0422 ret = -EBUSY;
0423 goto out_unlock;
0424 }
0425
0426
0427
0428
0429
0430 activated = irqd_is_activated(&desc->irq_data);
0431 if (activated)
0432 irq_domain_deactivate_irq(&desc->irq_data);
0433
0434 if (affinity->is_managed) {
0435 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
0436 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
0437 }
0438
0439 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
0440
0441
0442 if (activated)
0443 irq_domain_activate_irq(&desc->irq_data, false);
0444
0445 out_unlock:
0446 irq_put_desc_busunlock(desc, flags);
0447 return ret;
0448 }
0449
0450 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
0451 bool force)
0452 {
0453 struct irq_desc *desc = irq_to_desc(irq);
0454 unsigned long flags;
0455 int ret;
0456
0457 if (!desc)
0458 return -EINVAL;
0459
0460 raw_spin_lock_irqsave(&desc->lock, flags);
0461 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
0462 raw_spin_unlock_irqrestore(&desc->lock, flags);
0463 return ret;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472
0473 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
0474 {
0475 return __irq_set_affinity(irq, cpumask, false);
0476 }
0477 EXPORT_SYMBOL_GPL(irq_set_affinity);
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
0491 {
0492 return __irq_set_affinity(irq, cpumask, true);
0493 }
0494 EXPORT_SYMBOL_GPL(irq_force_affinity);
0495
0496 int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
0497 bool setaffinity)
0498 {
0499 unsigned long flags;
0500 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0501
0502 if (!desc)
0503 return -EINVAL;
0504 desc->affinity_hint = m;
0505 irq_put_desc_unlock(desc, flags);
0506 if (m && setaffinity)
0507 __irq_set_affinity(irq, m, false);
0508 return 0;
0509 }
0510 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
0511
0512 static void irq_affinity_notify(struct work_struct *work)
0513 {
0514 struct irq_affinity_notify *notify =
0515 container_of(work, struct irq_affinity_notify, work);
0516 struct irq_desc *desc = irq_to_desc(notify->irq);
0517 cpumask_var_t cpumask;
0518 unsigned long flags;
0519
0520 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
0521 goto out;
0522
0523 raw_spin_lock_irqsave(&desc->lock, flags);
0524 if (irq_move_pending(&desc->irq_data))
0525 irq_get_pending(cpumask, desc);
0526 else
0527 cpumask_copy(cpumask, desc->irq_common_data.affinity);
0528 raw_spin_unlock_irqrestore(&desc->lock, flags);
0529
0530 notify->notify(notify, cpumask);
0531
0532 free_cpumask_var(cpumask);
0533 out:
0534 kref_put(¬ify->kref, notify->release);
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 int
0549 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
0550 {
0551 struct irq_desc *desc = irq_to_desc(irq);
0552 struct irq_affinity_notify *old_notify;
0553 unsigned long flags;
0554
0555
0556 might_sleep();
0557
0558 if (!desc || desc->istate & IRQS_NMI)
0559 return -EINVAL;
0560
0561
0562 if (notify) {
0563 notify->irq = irq;
0564 kref_init(¬ify->kref);
0565 INIT_WORK(¬ify->work, irq_affinity_notify);
0566 }
0567
0568 raw_spin_lock_irqsave(&desc->lock, flags);
0569 old_notify = desc->affinity_notify;
0570 desc->affinity_notify = notify;
0571 raw_spin_unlock_irqrestore(&desc->lock, flags);
0572
0573 if (old_notify) {
0574 if (cancel_work_sync(&old_notify->work)) {
0575
0576 kref_put(&old_notify->kref, old_notify->release);
0577 }
0578 kref_put(&old_notify->kref, old_notify->release);
0579 }
0580
0581 return 0;
0582 }
0583 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
0584
0585 #ifndef CONFIG_AUTO_IRQ_AFFINITY
0586
0587
0588
0589 int irq_setup_affinity(struct irq_desc *desc)
0590 {
0591 struct cpumask *set = irq_default_affinity;
0592 int ret, node = irq_desc_get_node(desc);
0593 static DEFINE_RAW_SPINLOCK(mask_lock);
0594 static struct cpumask mask;
0595
0596
0597 if (!__irq_can_set_affinity(desc))
0598 return 0;
0599
0600 raw_spin_lock(&mask_lock);
0601
0602
0603
0604
0605 if (irqd_affinity_is_managed(&desc->irq_data) ||
0606 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
0607 if (cpumask_intersects(desc->irq_common_data.affinity,
0608 cpu_online_mask))
0609 set = desc->irq_common_data.affinity;
0610 else
0611 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
0612 }
0613
0614 cpumask_and(&mask, cpu_online_mask, set);
0615 if (cpumask_empty(&mask))
0616 cpumask_copy(&mask, cpu_online_mask);
0617
0618 if (node != NUMA_NO_NODE) {
0619 const struct cpumask *nodemask = cpumask_of_node(node);
0620
0621
0622 if (cpumask_intersects(&mask, nodemask))
0623 cpumask_and(&mask, &mask, nodemask);
0624 }
0625 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
0626 raw_spin_unlock(&mask_lock);
0627 return ret;
0628 }
0629 #else
0630
0631 int irq_setup_affinity(struct irq_desc *desc)
0632 {
0633 return irq_select_affinity(irq_desc_get_irq(desc));
0634 }
0635 #endif
0636 #endif
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
0651 {
0652 unsigned long flags;
0653 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
0654 struct irq_data *data;
0655 struct irq_chip *chip;
0656 int ret = -ENOSYS;
0657
0658 if (!desc)
0659 return -EINVAL;
0660
0661 data = irq_desc_get_irq_data(desc);
0662 do {
0663 chip = irq_data_get_irq_chip(data);
0664 if (chip && chip->irq_set_vcpu_affinity)
0665 break;
0666 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0667 data = data->parent_data;
0668 #else
0669 data = NULL;
0670 #endif
0671 } while (data);
0672
0673 if (data)
0674 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
0675 irq_put_desc_unlock(desc, flags);
0676
0677 return ret;
0678 }
0679 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
0680
0681 void __disable_irq(struct irq_desc *desc)
0682 {
0683 if (!desc->depth++)
0684 irq_disable(desc);
0685 }
0686
0687 static int __disable_irq_nosync(unsigned int irq)
0688 {
0689 unsigned long flags;
0690 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0691
0692 if (!desc)
0693 return -EINVAL;
0694 __disable_irq(desc);
0695 irq_put_desc_busunlock(desc, flags);
0696 return 0;
0697 }
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710 void disable_irq_nosync(unsigned int irq)
0711 {
0712 __disable_irq_nosync(irq);
0713 }
0714 EXPORT_SYMBOL(disable_irq_nosync);
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 void disable_irq(unsigned int irq)
0729 {
0730 if (!__disable_irq_nosync(irq))
0731 synchronize_irq(irq);
0732 }
0733 EXPORT_SYMBOL(disable_irq);
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 bool disable_hardirq(unsigned int irq)
0753 {
0754 if (!__disable_irq_nosync(irq))
0755 return synchronize_hardirq(irq);
0756
0757 return false;
0758 }
0759 EXPORT_SYMBOL_GPL(disable_hardirq);
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771 void disable_nmi_nosync(unsigned int irq)
0772 {
0773 disable_irq_nosync(irq);
0774 }
0775
0776 void __enable_irq(struct irq_desc *desc)
0777 {
0778 switch (desc->depth) {
0779 case 0:
0780 err_out:
0781 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
0782 irq_desc_get_irq(desc));
0783 break;
0784 case 1: {
0785 if (desc->istate & IRQS_SUSPENDED)
0786 goto err_out;
0787
0788 irq_settings_set_noprobe(desc);
0789
0790
0791
0792
0793
0794
0795
0796 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
0797 break;
0798 }
0799 default:
0800 desc->depth--;
0801 }
0802 }
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815 void enable_irq(unsigned int irq)
0816 {
0817 unsigned long flags;
0818 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0819
0820 if (!desc)
0821 return;
0822 if (WARN(!desc->irq_data.chip,
0823 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
0824 goto out;
0825
0826 __enable_irq(desc);
0827 out:
0828 irq_put_desc_busunlock(desc, flags);
0829 }
0830 EXPORT_SYMBOL(enable_irq);
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 void enable_nmi(unsigned int irq)
0842 {
0843 enable_irq(irq);
0844 }
0845
0846 static int set_irq_wake_real(unsigned int irq, unsigned int on)
0847 {
0848 struct irq_desc *desc = irq_to_desc(irq);
0849 int ret = -ENXIO;
0850
0851 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
0852 return 0;
0853
0854 if (desc->irq_data.chip->irq_set_wake)
0855 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
0856
0857 return ret;
0858 }
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 int irq_set_irq_wake(unsigned int irq, unsigned int on)
0880 {
0881 unsigned long flags;
0882 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0883 int ret = 0;
0884
0885 if (!desc)
0886 return -EINVAL;
0887
0888
0889 if (desc->istate & IRQS_NMI) {
0890 ret = -EINVAL;
0891 goto out_unlock;
0892 }
0893
0894
0895
0896
0897 if (on) {
0898 if (desc->wake_depth++ == 0) {
0899 ret = set_irq_wake_real(irq, on);
0900 if (ret)
0901 desc->wake_depth = 0;
0902 else
0903 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
0904 }
0905 } else {
0906 if (desc->wake_depth == 0) {
0907 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
0908 } else if (--desc->wake_depth == 0) {
0909 ret = set_irq_wake_real(irq, on);
0910 if (ret)
0911 desc->wake_depth = 1;
0912 else
0913 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
0914 }
0915 }
0916
0917 out_unlock:
0918 irq_put_desc_busunlock(desc, flags);
0919 return ret;
0920 }
0921 EXPORT_SYMBOL(irq_set_irq_wake);
0922
0923
0924
0925
0926
0927
0928 int can_request_irq(unsigned int irq, unsigned long irqflags)
0929 {
0930 unsigned long flags;
0931 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
0932 int canrequest = 0;
0933
0934 if (!desc)
0935 return 0;
0936
0937 if (irq_settings_can_request(desc)) {
0938 if (!desc->action ||
0939 irqflags & desc->action->flags & IRQF_SHARED)
0940 canrequest = 1;
0941 }
0942 irq_put_desc_unlock(desc, flags);
0943 return canrequest;
0944 }
0945
0946 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
0947 {
0948 struct irq_chip *chip = desc->irq_data.chip;
0949 int ret, unmask = 0;
0950
0951 if (!chip || !chip->irq_set_type) {
0952
0953
0954
0955
0956 pr_debug("No set_type function for IRQ %d (%s)\n",
0957 irq_desc_get_irq(desc),
0958 chip ? (chip->name ? : "unknown") : "unknown");
0959 return 0;
0960 }
0961
0962 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
0963 if (!irqd_irq_masked(&desc->irq_data))
0964 mask_irq(desc);
0965 if (!irqd_irq_disabled(&desc->irq_data))
0966 unmask = 1;
0967 }
0968
0969
0970 flags &= IRQ_TYPE_SENSE_MASK;
0971 ret = chip->irq_set_type(&desc->irq_data, flags);
0972
0973 switch (ret) {
0974 case IRQ_SET_MASK_OK:
0975 case IRQ_SET_MASK_OK_DONE:
0976 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
0977 irqd_set(&desc->irq_data, flags);
0978 fallthrough;
0979
0980 case IRQ_SET_MASK_OK_NOCOPY:
0981 flags = irqd_get_trigger_type(&desc->irq_data);
0982 irq_settings_set_trigger_mask(desc, flags);
0983 irqd_clear(&desc->irq_data, IRQD_LEVEL);
0984 irq_settings_clr_level(desc);
0985 if (flags & IRQ_TYPE_LEVEL_MASK) {
0986 irq_settings_set_level(desc);
0987 irqd_set(&desc->irq_data, IRQD_LEVEL);
0988 }
0989
0990 ret = 0;
0991 break;
0992 default:
0993 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
0994 flags, irq_desc_get_irq(desc), chip->irq_set_type);
0995 }
0996 if (unmask)
0997 unmask_irq(desc);
0998 return ret;
0999 }
1000
1001 #ifdef CONFIG_HARDIRQS_SW_RESEND
1002 int irq_set_parent(int irq, int parent_irq)
1003 {
1004 unsigned long flags;
1005 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1006
1007 if (!desc)
1008 return -EINVAL;
1009
1010 desc->parent_irq = parent_irq;
1011
1012 irq_put_desc_unlock(desc, flags);
1013 return 0;
1014 }
1015 EXPORT_SYMBOL_GPL(irq_set_parent);
1016 #endif
1017
1018
1019
1020
1021
1022
1023 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1024 {
1025 return IRQ_WAKE_THREAD;
1026 }
1027
1028
1029
1030
1031
1032 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1033 {
1034 WARN(1, "Primary handler called for nested irq %d\n", irq);
1035 return IRQ_NONE;
1036 }
1037
1038 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1039 {
1040 WARN(1, "Secondary action handler called for irq %d\n", irq);
1041 return IRQ_NONE;
1042 }
1043
1044 static int irq_wait_for_interrupt(struct irqaction *action)
1045 {
1046 for (;;) {
1047 set_current_state(TASK_INTERRUPTIBLE);
1048
1049 if (kthread_should_stop()) {
1050
1051 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1052 &action->thread_flags)) {
1053 __set_current_state(TASK_RUNNING);
1054 return 0;
1055 }
1056 __set_current_state(TASK_RUNNING);
1057 return -1;
1058 }
1059
1060 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1061 &action->thread_flags)) {
1062 __set_current_state(TASK_RUNNING);
1063 return 0;
1064 }
1065 schedule();
1066 }
1067 }
1068
1069
1070
1071
1072
1073
1074 static void irq_finalize_oneshot(struct irq_desc *desc,
1075 struct irqaction *action)
1076 {
1077 if (!(desc->istate & IRQS_ONESHOT) ||
1078 action->handler == irq_forced_secondary_handler)
1079 return;
1080 again:
1081 chip_bus_lock(desc);
1082 raw_spin_lock_irq(&desc->lock);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1099 raw_spin_unlock_irq(&desc->lock);
1100 chip_bus_sync_unlock(desc);
1101 cpu_relax();
1102 goto again;
1103 }
1104
1105
1106
1107
1108
1109
1110 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1111 goto out_unlock;
1112
1113 desc->threads_oneshot &= ~action->thread_mask;
1114
1115 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1116 irqd_irq_masked(&desc->irq_data))
1117 unmask_threaded_irq(desc);
1118
1119 out_unlock:
1120 raw_spin_unlock_irq(&desc->lock);
1121 chip_bus_sync_unlock(desc);
1122 }
1123
1124 #ifdef CONFIG_SMP
1125
1126
1127
1128 static void
1129 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1130 {
1131 cpumask_var_t mask;
1132 bool valid = true;
1133
1134 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1135 return;
1136
1137
1138
1139
1140
1141 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1142 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1143 return;
1144 }
1145
1146 raw_spin_lock_irq(&desc->lock);
1147
1148
1149
1150
1151 if (cpumask_available(desc->irq_common_data.affinity)) {
1152 const struct cpumask *m;
1153
1154 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1155 cpumask_copy(mask, m);
1156 } else {
1157 valid = false;
1158 }
1159 raw_spin_unlock_irq(&desc->lock);
1160
1161 if (valid)
1162 set_cpus_allowed_ptr(current, mask);
1163 free_cpumask_var(mask);
1164 }
1165 #else
1166 static inline void
1167 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1168 #endif
1169
1170
1171
1172
1173
1174
1175
1176 static irqreturn_t
1177 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1178 {
1179 irqreturn_t ret;
1180
1181 local_bh_disable();
1182 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1183 local_irq_disable();
1184 ret = action->thread_fn(action->irq, action->dev_id);
1185 if (ret == IRQ_HANDLED)
1186 atomic_inc(&desc->threads_handled);
1187
1188 irq_finalize_oneshot(desc, action);
1189 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1190 local_irq_enable();
1191 local_bh_enable();
1192 return ret;
1193 }
1194
1195
1196
1197
1198
1199
1200 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1201 struct irqaction *action)
1202 {
1203 irqreturn_t ret;
1204
1205 ret = action->thread_fn(action->irq, action->dev_id);
1206 if (ret == IRQ_HANDLED)
1207 atomic_inc(&desc->threads_handled);
1208
1209 irq_finalize_oneshot(desc, action);
1210 return ret;
1211 }
1212
1213 static void wake_threads_waitq(struct irq_desc *desc)
1214 {
1215 if (atomic_dec_and_test(&desc->threads_active))
1216 wake_up(&desc->wait_for_threads);
1217 }
1218
1219 static void irq_thread_dtor(struct callback_head *unused)
1220 {
1221 struct task_struct *tsk = current;
1222 struct irq_desc *desc;
1223 struct irqaction *action;
1224
1225 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1226 return;
1227
1228 action = kthread_data(tsk);
1229
1230 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1231 tsk->comm, tsk->pid, action->irq);
1232
1233
1234 desc = irq_to_desc(action->irq);
1235
1236
1237
1238
1239 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1240 wake_threads_waitq(desc);
1241
1242
1243 irq_finalize_oneshot(desc, action);
1244 }
1245
1246 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1247 {
1248 struct irqaction *secondary = action->secondary;
1249
1250 if (WARN_ON_ONCE(!secondary))
1251 return;
1252
1253 raw_spin_lock_irq(&desc->lock);
1254 __irq_wake_thread(desc, secondary);
1255 raw_spin_unlock_irq(&desc->lock);
1256 }
1257
1258
1259
1260
1261 static void irq_thread_set_ready(struct irq_desc *desc,
1262 struct irqaction *action)
1263 {
1264 set_bit(IRQTF_READY, &action->thread_flags);
1265 wake_up(&desc->wait_for_threads);
1266 }
1267
1268
1269
1270
1271
1272 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1273 struct irqaction *action)
1274 {
1275 if (!action || !action->thread)
1276 return;
1277
1278 wake_up_process(action->thread);
1279 wait_event(desc->wait_for_threads,
1280 test_bit(IRQTF_READY, &action->thread_flags));
1281 }
1282
1283
1284
1285
1286 static int irq_thread(void *data)
1287 {
1288 struct callback_head on_exit_work;
1289 struct irqaction *action = data;
1290 struct irq_desc *desc = irq_to_desc(action->irq);
1291 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1292 struct irqaction *action);
1293
1294 irq_thread_set_ready(desc, action);
1295
1296 sched_set_fifo(current);
1297
1298 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1299 &action->thread_flags))
1300 handler_fn = irq_forced_thread_fn;
1301 else
1302 handler_fn = irq_thread_fn;
1303
1304 init_task_work(&on_exit_work, irq_thread_dtor);
1305 task_work_add(current, &on_exit_work, TWA_NONE);
1306
1307 irq_thread_check_affinity(desc, action);
1308
1309 while (!irq_wait_for_interrupt(action)) {
1310 irqreturn_t action_ret;
1311
1312 irq_thread_check_affinity(desc, action);
1313
1314 action_ret = handler_fn(desc, action);
1315 if (action_ret == IRQ_WAKE_THREAD)
1316 irq_wake_secondary(desc, action);
1317
1318 wake_threads_waitq(desc);
1319 }
1320
1321
1322
1323
1324
1325
1326
1327 task_work_cancel(current, irq_thread_dtor);
1328 return 0;
1329 }
1330
1331
1332
1333
1334
1335
1336
1337 void irq_wake_thread(unsigned int irq, void *dev_id)
1338 {
1339 struct irq_desc *desc = irq_to_desc(irq);
1340 struct irqaction *action;
1341 unsigned long flags;
1342
1343 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1344 return;
1345
1346 raw_spin_lock_irqsave(&desc->lock, flags);
1347 for_each_action_of_desc(desc, action) {
1348 if (action->dev_id == dev_id) {
1349 if (action->thread)
1350 __irq_wake_thread(desc, action);
1351 break;
1352 }
1353 }
1354 raw_spin_unlock_irqrestore(&desc->lock, flags);
1355 }
1356 EXPORT_SYMBOL_GPL(irq_wake_thread);
1357
1358 static int irq_setup_forced_threading(struct irqaction *new)
1359 {
1360 if (!force_irqthreads())
1361 return 0;
1362 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1363 return 0;
1364
1365
1366
1367
1368
1369 if (new->handler == irq_default_primary_handler)
1370 return 0;
1371
1372 new->flags |= IRQF_ONESHOT;
1373
1374
1375
1376
1377
1378
1379 if (new->handler && new->thread_fn) {
1380
1381 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1382 if (!new->secondary)
1383 return -ENOMEM;
1384 new->secondary->handler = irq_forced_secondary_handler;
1385 new->secondary->thread_fn = new->thread_fn;
1386 new->secondary->dev_id = new->dev_id;
1387 new->secondary->irq = new->irq;
1388 new->secondary->name = new->name;
1389 }
1390
1391 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1392 new->thread_fn = new->handler;
1393 new->handler = irq_default_primary_handler;
1394 return 0;
1395 }
1396
1397 static int irq_request_resources(struct irq_desc *desc)
1398 {
1399 struct irq_data *d = &desc->irq_data;
1400 struct irq_chip *c = d->chip;
1401
1402 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1403 }
1404
1405 static void irq_release_resources(struct irq_desc *desc)
1406 {
1407 struct irq_data *d = &desc->irq_data;
1408 struct irq_chip *c = d->chip;
1409
1410 if (c->irq_release_resources)
1411 c->irq_release_resources(d);
1412 }
1413
1414 static bool irq_supports_nmi(struct irq_desc *desc)
1415 {
1416 struct irq_data *d = irq_desc_get_irq_data(desc);
1417
1418 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1419
1420 if (d->parent_data)
1421 return false;
1422 #endif
1423
1424 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1425 return false;
1426
1427 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1428 }
1429
1430 static int irq_nmi_setup(struct irq_desc *desc)
1431 {
1432 struct irq_data *d = irq_desc_get_irq_data(desc);
1433 struct irq_chip *c = d->chip;
1434
1435 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1436 }
1437
1438 static void irq_nmi_teardown(struct irq_desc *desc)
1439 {
1440 struct irq_data *d = irq_desc_get_irq_data(desc);
1441 struct irq_chip *c = d->chip;
1442
1443 if (c->irq_nmi_teardown)
1444 c->irq_nmi_teardown(d);
1445 }
1446
1447 static int
1448 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1449 {
1450 struct task_struct *t;
1451
1452 if (!secondary) {
1453 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1454 new->name);
1455 } else {
1456 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1457 new->name);
1458 }
1459
1460 if (IS_ERR(t))
1461 return PTR_ERR(t);
1462
1463
1464
1465
1466
1467
1468 new->thread = get_task_struct(t);
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1479 return 0;
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 static int
1497 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1498 {
1499 struct irqaction *old, **old_ptr;
1500 unsigned long flags, thread_mask = 0;
1501 int ret, nested, shared = 0;
1502
1503 if (!desc)
1504 return -EINVAL;
1505
1506 if (desc->irq_data.chip == &no_irq_chip)
1507 return -ENOSYS;
1508 if (!try_module_get(desc->owner))
1509 return -ENODEV;
1510
1511 new->irq = irq;
1512
1513
1514
1515
1516
1517 if (!(new->flags & IRQF_TRIGGER_MASK))
1518 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1519
1520
1521
1522
1523
1524 nested = irq_settings_is_nested_thread(desc);
1525 if (nested) {
1526 if (!new->thread_fn) {
1527 ret = -EINVAL;
1528 goto out_mput;
1529 }
1530
1531
1532
1533
1534
1535 new->handler = irq_nested_primary_handler;
1536 } else {
1537 if (irq_settings_can_thread(desc)) {
1538 ret = irq_setup_forced_threading(new);
1539 if (ret)
1540 goto out_mput;
1541 }
1542 }
1543
1544
1545
1546
1547
1548
1549 if (new->thread_fn && !nested) {
1550 ret = setup_irq_thread(new, irq, false);
1551 if (ret)
1552 goto out_mput;
1553 if (new->secondary) {
1554 ret = setup_irq_thread(new->secondary, irq, true);
1555 if (ret)
1556 goto out_thread;
1557 }
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1570 new->flags &= ~IRQF_ONESHOT;
1571
1572
1573
1574
1575
1576
1577
1578
1579 mutex_lock(&desc->request_mutex);
1580
1581
1582
1583
1584
1585
1586 chip_bus_lock(desc);
1587
1588
1589 if (!desc->action) {
1590 ret = irq_request_resources(desc);
1591 if (ret) {
1592 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1593 new->name, irq, desc->irq_data.chip->name);
1594 goto out_bus_unlock;
1595 }
1596 }
1597
1598
1599
1600
1601
1602
1603
1604 raw_spin_lock_irqsave(&desc->lock, flags);
1605 old_ptr = &desc->action;
1606 old = *old_ptr;
1607 if (old) {
1608
1609
1610
1611
1612
1613
1614
1615
1616 unsigned int oldtype;
1617
1618 if (desc->istate & IRQS_NMI) {
1619 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1620 new->name, irq, desc->irq_data.chip->name);
1621 ret = -EINVAL;
1622 goto out_unlock;
1623 }
1624
1625
1626
1627
1628
1629 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1630 oldtype = irqd_get_trigger_type(&desc->irq_data);
1631 } else {
1632 oldtype = new->flags & IRQF_TRIGGER_MASK;
1633 irqd_set_trigger_type(&desc->irq_data, oldtype);
1634 }
1635
1636 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1637 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1638 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1639 goto mismatch;
1640
1641
1642 if ((old->flags & IRQF_PERCPU) !=
1643 (new->flags & IRQF_PERCPU))
1644 goto mismatch;
1645
1646
1647 do {
1648
1649
1650
1651
1652
1653 thread_mask |= old->thread_mask;
1654 old_ptr = &old->next;
1655 old = *old_ptr;
1656 } while (old);
1657 shared = 1;
1658 }
1659
1660
1661
1662
1663
1664
1665 if (new->flags & IRQF_ONESHOT) {
1666
1667
1668
1669
1670 if (thread_mask == ~0UL) {
1671 ret = -EBUSY;
1672 goto out_unlock;
1673 }
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 new->thread_mask = 1UL << ffz(thread_mask);
1695
1696 } else if (new->handler == irq_default_primary_handler &&
1697 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1714 new->name, irq);
1715 ret = -EINVAL;
1716 goto out_unlock;
1717 }
1718
1719 if (!shared) {
1720
1721 if (new->flags & IRQF_TRIGGER_MASK) {
1722 ret = __irq_set_trigger(desc,
1723 new->flags & IRQF_TRIGGER_MASK);
1724
1725 if (ret)
1726 goto out_unlock;
1727 }
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 ret = irq_activate(desc);
1741 if (ret)
1742 goto out_unlock;
1743
1744 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1745 IRQS_ONESHOT | IRQS_WAITING);
1746 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1747
1748 if (new->flags & IRQF_PERCPU) {
1749 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1750 irq_settings_set_per_cpu(desc);
1751 if (new->flags & IRQF_NO_DEBUG)
1752 irq_settings_set_no_debug(desc);
1753 }
1754
1755 if (noirqdebug)
1756 irq_settings_set_no_debug(desc);
1757
1758 if (new->flags & IRQF_ONESHOT)
1759 desc->istate |= IRQS_ONESHOT;
1760
1761
1762 if (new->flags & IRQF_NOBALANCING) {
1763 irq_settings_set_no_balancing(desc);
1764 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1765 }
1766
1767 if (!(new->flags & IRQF_NO_AUTOEN) &&
1768 irq_settings_can_autoenable(desc)) {
1769 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1770 } else {
1771
1772
1773
1774
1775
1776
1777 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1778
1779 desc->depth = 1;
1780 }
1781
1782 } else if (new->flags & IRQF_TRIGGER_MASK) {
1783 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1784 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1785
1786 if (nmsk != omsk)
1787
1788 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1789 irq, omsk, nmsk);
1790 }
1791
1792 *old_ptr = new;
1793
1794 irq_pm_install_action(desc, new);
1795
1796
1797 desc->irq_count = 0;
1798 desc->irqs_unhandled = 0;
1799
1800
1801
1802
1803
1804 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1805 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1806 __enable_irq(desc);
1807 }
1808
1809 raw_spin_unlock_irqrestore(&desc->lock, flags);
1810 chip_bus_sync_unlock(desc);
1811 mutex_unlock(&desc->request_mutex);
1812
1813 irq_setup_timings(desc, new);
1814
1815 wake_up_and_wait_for_irq_thread_ready(desc, new);
1816 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1817
1818 register_irq_proc(irq, desc);
1819 new->dir = NULL;
1820 register_handler_proc(irq, new);
1821 return 0;
1822
1823 mismatch:
1824 if (!(new->flags & IRQF_PROBE_SHARED)) {
1825 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1826 irq, new->flags, new->name, old->flags, old->name);
1827 #ifdef CONFIG_DEBUG_SHIRQ
1828 dump_stack();
1829 #endif
1830 }
1831 ret = -EBUSY;
1832
1833 out_unlock:
1834 raw_spin_unlock_irqrestore(&desc->lock, flags);
1835
1836 if (!desc->action)
1837 irq_release_resources(desc);
1838 out_bus_unlock:
1839 chip_bus_sync_unlock(desc);
1840 mutex_unlock(&desc->request_mutex);
1841
1842 out_thread:
1843 if (new->thread) {
1844 struct task_struct *t = new->thread;
1845
1846 new->thread = NULL;
1847 kthread_stop(t);
1848 put_task_struct(t);
1849 }
1850 if (new->secondary && new->secondary->thread) {
1851 struct task_struct *t = new->secondary->thread;
1852
1853 new->secondary->thread = NULL;
1854 kthread_stop(t);
1855 put_task_struct(t);
1856 }
1857 out_mput:
1858 module_put(desc->owner);
1859 return ret;
1860 }
1861
1862
1863
1864
1865
1866 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1867 {
1868 unsigned irq = desc->irq_data.irq;
1869 struct irqaction *action, **action_ptr;
1870 unsigned long flags;
1871
1872 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1873
1874 mutex_lock(&desc->request_mutex);
1875 chip_bus_lock(desc);
1876 raw_spin_lock_irqsave(&desc->lock, flags);
1877
1878
1879
1880
1881
1882 action_ptr = &desc->action;
1883 for (;;) {
1884 action = *action_ptr;
1885
1886 if (!action) {
1887 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1888 raw_spin_unlock_irqrestore(&desc->lock, flags);
1889 chip_bus_sync_unlock(desc);
1890 mutex_unlock(&desc->request_mutex);
1891 return NULL;
1892 }
1893
1894 if (action->dev_id == dev_id)
1895 break;
1896 action_ptr = &action->next;
1897 }
1898
1899
1900 *action_ptr = action->next;
1901
1902 irq_pm_remove_action(desc, action);
1903
1904
1905 if (!desc->action) {
1906 irq_settings_clr_disable_unlazy(desc);
1907
1908 irq_shutdown(desc);
1909 }
1910
1911 #ifdef CONFIG_SMP
1912
1913 if (WARN_ON_ONCE(desc->affinity_hint))
1914 desc->affinity_hint = NULL;
1915 #endif
1916
1917 raw_spin_unlock_irqrestore(&desc->lock, flags);
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 chip_bus_sync_unlock(desc);
1933
1934 unregister_handler_proc(irq, action);
1935
1936
1937
1938
1939
1940
1941 __synchronize_hardirq(desc, true);
1942
1943 #ifdef CONFIG_DEBUG_SHIRQ
1944
1945
1946
1947
1948
1949
1950
1951
1952 if (action->flags & IRQF_SHARED) {
1953 local_irq_save(flags);
1954 action->handler(irq, dev_id);
1955 local_irq_restore(flags);
1956 }
1957 #endif
1958
1959
1960
1961
1962
1963
1964
1965 if (action->thread) {
1966 kthread_stop(action->thread);
1967 put_task_struct(action->thread);
1968 if (action->secondary && action->secondary->thread) {
1969 kthread_stop(action->secondary->thread);
1970 put_task_struct(action->secondary->thread);
1971 }
1972 }
1973
1974
1975 if (!desc->action) {
1976
1977
1978
1979
1980 chip_bus_lock(desc);
1981
1982
1983
1984
1985 raw_spin_lock_irqsave(&desc->lock, flags);
1986 irq_domain_deactivate_irq(&desc->irq_data);
1987 raw_spin_unlock_irqrestore(&desc->lock, flags);
1988
1989 irq_release_resources(desc);
1990 chip_bus_sync_unlock(desc);
1991 irq_remove_timings(desc);
1992 }
1993
1994 mutex_unlock(&desc->request_mutex);
1995
1996 irq_chip_pm_put(&desc->irq_data);
1997 module_put(desc->owner);
1998 kfree(action->secondary);
1999 return action;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018 const void *free_irq(unsigned int irq, void *dev_id)
2019 {
2020 struct irq_desc *desc = irq_to_desc(irq);
2021 struct irqaction *action;
2022 const char *devname;
2023
2024 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2025 return NULL;
2026
2027 #ifdef CONFIG_SMP
2028 if (WARN_ON(desc->affinity_notify))
2029 desc->affinity_notify = NULL;
2030 #endif
2031
2032 action = __free_irq(desc, dev_id);
2033
2034 if (!action)
2035 return NULL;
2036
2037 devname = action->name;
2038 kfree(action);
2039 return devname;
2040 }
2041 EXPORT_SYMBOL(free_irq);
2042
2043
2044 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2045 {
2046 const char *devname = NULL;
2047
2048 desc->istate &= ~IRQS_NMI;
2049
2050 if (!WARN_ON(desc->action == NULL)) {
2051 irq_pm_remove_action(desc, desc->action);
2052 devname = desc->action->name;
2053 unregister_handler_proc(irq, desc->action);
2054
2055 kfree(desc->action);
2056 desc->action = NULL;
2057 }
2058
2059 irq_settings_clr_disable_unlazy(desc);
2060 irq_shutdown_and_deactivate(desc);
2061
2062 irq_release_resources(desc);
2063
2064 irq_chip_pm_put(&desc->irq_data);
2065 module_put(desc->owner);
2066
2067 return devname;
2068 }
2069
2070 const void *free_nmi(unsigned int irq, void *dev_id)
2071 {
2072 struct irq_desc *desc = irq_to_desc(irq);
2073 unsigned long flags;
2074 const void *devname;
2075
2076 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2077 return NULL;
2078
2079 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2080 return NULL;
2081
2082
2083 if (WARN_ON(desc->depth == 0))
2084 disable_nmi_nosync(irq);
2085
2086 raw_spin_lock_irqsave(&desc->lock, flags);
2087
2088 irq_nmi_teardown(desc);
2089 devname = __cleanup_nmi(irq, desc);
2090
2091 raw_spin_unlock_irqrestore(&desc->lock, flags);
2092
2093 return devname;
2094 }
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2139 irq_handler_t thread_fn, unsigned long irqflags,
2140 const char *devname, void *dev_id)
2141 {
2142 struct irqaction *action;
2143 struct irq_desc *desc;
2144 int retval;
2145
2146 if (irq == IRQ_NOTCONNECTED)
2147 return -ENOTCONN;
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2163 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2164 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2165 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2166 return -EINVAL;
2167
2168 desc = irq_to_desc(irq);
2169 if (!desc)
2170 return -EINVAL;
2171
2172 if (!irq_settings_can_request(desc) ||
2173 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2174 return -EINVAL;
2175
2176 if (!handler) {
2177 if (!thread_fn)
2178 return -EINVAL;
2179 handler = irq_default_primary_handler;
2180 }
2181
2182 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2183 if (!action)
2184 return -ENOMEM;
2185
2186 action->handler = handler;
2187 action->thread_fn = thread_fn;
2188 action->flags = irqflags;
2189 action->name = devname;
2190 action->dev_id = dev_id;
2191
2192 retval = irq_chip_pm_get(&desc->irq_data);
2193 if (retval < 0) {
2194 kfree(action);
2195 return retval;
2196 }
2197
2198 retval = __setup_irq(irq, desc, action);
2199
2200 if (retval) {
2201 irq_chip_pm_put(&desc->irq_data);
2202 kfree(action->secondary);
2203 kfree(action);
2204 }
2205
2206 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2207 if (!retval && (irqflags & IRQF_SHARED)) {
2208
2209
2210
2211
2212
2213
2214 unsigned long flags;
2215
2216 disable_irq(irq);
2217 local_irq_save(flags);
2218
2219 handler(irq, dev_id);
2220
2221 local_irq_restore(flags);
2222 enable_irq(irq);
2223 }
2224 #endif
2225 return retval;
2226 }
2227 EXPORT_SYMBOL(request_threaded_irq);
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2247 unsigned long flags, const char *name, void *dev_id)
2248 {
2249 struct irq_desc *desc;
2250 int ret;
2251
2252 if (irq == IRQ_NOTCONNECTED)
2253 return -ENOTCONN;
2254
2255 desc = irq_to_desc(irq);
2256 if (!desc)
2257 return -EINVAL;
2258
2259 if (irq_settings_is_nested_thread(desc)) {
2260 ret = request_threaded_irq(irq, NULL, handler,
2261 flags, name, dev_id);
2262 return !ret ? IRQC_IS_NESTED : ret;
2263 }
2264
2265 ret = request_irq(irq, handler, flags, name, dev_id);
2266 return !ret ? IRQC_IS_HARDIRQ : ret;
2267 }
2268 EXPORT_SYMBOL_GPL(request_any_context_irq);
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 int request_nmi(unsigned int irq, irq_handler_t handler,
2297 unsigned long irqflags, const char *name, void *dev_id)
2298 {
2299 struct irqaction *action;
2300 struct irq_desc *desc;
2301 unsigned long flags;
2302 int retval;
2303
2304 if (irq == IRQ_NOTCONNECTED)
2305 return -ENOTCONN;
2306
2307
2308 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2309 return -EINVAL;
2310
2311 if (!(irqflags & IRQF_PERCPU))
2312 return -EINVAL;
2313
2314 if (!handler)
2315 return -EINVAL;
2316
2317 desc = irq_to_desc(irq);
2318
2319 if (!desc || (irq_settings_can_autoenable(desc) &&
2320 !(irqflags & IRQF_NO_AUTOEN)) ||
2321 !irq_settings_can_request(desc) ||
2322 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2323 !irq_supports_nmi(desc))
2324 return -EINVAL;
2325
2326 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2327 if (!action)
2328 return -ENOMEM;
2329
2330 action->handler = handler;
2331 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2332 action->name = name;
2333 action->dev_id = dev_id;
2334
2335 retval = irq_chip_pm_get(&desc->irq_data);
2336 if (retval < 0)
2337 goto err_out;
2338
2339 retval = __setup_irq(irq, desc, action);
2340 if (retval)
2341 goto err_irq_setup;
2342
2343 raw_spin_lock_irqsave(&desc->lock, flags);
2344
2345
2346 desc->istate |= IRQS_NMI;
2347 retval = irq_nmi_setup(desc);
2348 if (retval) {
2349 __cleanup_nmi(irq, desc);
2350 raw_spin_unlock_irqrestore(&desc->lock, flags);
2351 return -EINVAL;
2352 }
2353
2354 raw_spin_unlock_irqrestore(&desc->lock, flags);
2355
2356 return 0;
2357
2358 err_irq_setup:
2359 irq_chip_pm_put(&desc->irq_data);
2360 err_out:
2361 kfree(action);
2362
2363 return retval;
2364 }
2365
2366 void enable_percpu_irq(unsigned int irq, unsigned int type)
2367 {
2368 unsigned int cpu = smp_processor_id();
2369 unsigned long flags;
2370 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2371
2372 if (!desc)
2373 return;
2374
2375
2376
2377
2378
2379 type &= IRQ_TYPE_SENSE_MASK;
2380 if (type == IRQ_TYPE_NONE)
2381 type = irqd_get_trigger_type(&desc->irq_data);
2382
2383 if (type != IRQ_TYPE_NONE) {
2384 int ret;
2385
2386 ret = __irq_set_trigger(desc, type);
2387
2388 if (ret) {
2389 WARN(1, "failed to set type for IRQ%d\n", irq);
2390 goto out;
2391 }
2392 }
2393
2394 irq_percpu_enable(desc, cpu);
2395 out:
2396 irq_put_desc_unlock(desc, flags);
2397 }
2398 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2399
2400 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2401 {
2402 enable_percpu_irq(irq, type);
2403 }
2404
2405
2406
2407
2408
2409
2410
2411
2412 bool irq_percpu_is_enabled(unsigned int irq)
2413 {
2414 unsigned int cpu = smp_processor_id();
2415 struct irq_desc *desc;
2416 unsigned long flags;
2417 bool is_enabled;
2418
2419 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2420 if (!desc)
2421 return false;
2422
2423 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2424 irq_put_desc_unlock(desc, flags);
2425
2426 return is_enabled;
2427 }
2428 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2429
2430 void disable_percpu_irq(unsigned int irq)
2431 {
2432 unsigned int cpu = smp_processor_id();
2433 unsigned long flags;
2434 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2435
2436 if (!desc)
2437 return;
2438
2439 irq_percpu_disable(desc, cpu);
2440 irq_put_desc_unlock(desc, flags);
2441 }
2442 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2443
2444 void disable_percpu_nmi(unsigned int irq)
2445 {
2446 disable_percpu_irq(irq);
2447 }
2448
2449
2450
2451
2452 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2453 {
2454 struct irq_desc *desc = irq_to_desc(irq);
2455 struct irqaction *action;
2456 unsigned long flags;
2457
2458 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2459
2460 if (!desc)
2461 return NULL;
2462
2463 raw_spin_lock_irqsave(&desc->lock, flags);
2464
2465 action = desc->action;
2466 if (!action || action->percpu_dev_id != dev_id) {
2467 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2468 goto bad;
2469 }
2470
2471 if (!cpumask_empty(desc->percpu_enabled)) {
2472 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2473 irq, cpumask_first(desc->percpu_enabled));
2474 goto bad;
2475 }
2476
2477
2478 desc->action = NULL;
2479
2480 desc->istate &= ~IRQS_NMI;
2481
2482 raw_spin_unlock_irqrestore(&desc->lock, flags);
2483
2484 unregister_handler_proc(irq, action);
2485
2486 irq_chip_pm_put(&desc->irq_data);
2487 module_put(desc->owner);
2488 return action;
2489
2490 bad:
2491 raw_spin_unlock_irqrestore(&desc->lock, flags);
2492 return NULL;
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2503 {
2504 struct irq_desc *desc = irq_to_desc(irq);
2505
2506 if (desc && irq_settings_is_per_cpu_devid(desc))
2507 __free_percpu_irq(irq, act->percpu_dev_id);
2508 }
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2523 {
2524 struct irq_desc *desc = irq_to_desc(irq);
2525
2526 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2527 return;
2528
2529 chip_bus_lock(desc);
2530 kfree(__free_percpu_irq(irq, dev_id));
2531 chip_bus_sync_unlock(desc);
2532 }
2533 EXPORT_SYMBOL_GPL(free_percpu_irq);
2534
2535 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2536 {
2537 struct irq_desc *desc = irq_to_desc(irq);
2538
2539 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2540 return;
2541
2542 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2543 return;
2544
2545 kfree(__free_percpu_irq(irq, dev_id));
2546 }
2547
2548
2549
2550
2551
2552
2553
2554
2555 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2556 {
2557 struct irq_desc *desc = irq_to_desc(irq);
2558 int retval;
2559
2560 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2561 return -EINVAL;
2562
2563 retval = irq_chip_pm_get(&desc->irq_data);
2564 if (retval < 0)
2565 return retval;
2566
2567 retval = __setup_irq(irq, desc, act);
2568
2569 if (retval)
2570 irq_chip_pm_put(&desc->irq_data);
2571
2572 return retval;
2573 }
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2593 unsigned long flags, const char *devname,
2594 void __percpu *dev_id)
2595 {
2596 struct irqaction *action;
2597 struct irq_desc *desc;
2598 int retval;
2599
2600 if (!dev_id)
2601 return -EINVAL;
2602
2603 desc = irq_to_desc(irq);
2604 if (!desc || !irq_settings_can_request(desc) ||
2605 !irq_settings_is_per_cpu_devid(desc))
2606 return -EINVAL;
2607
2608 if (flags && flags != IRQF_TIMER)
2609 return -EINVAL;
2610
2611 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2612 if (!action)
2613 return -ENOMEM;
2614
2615 action->handler = handler;
2616 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2617 action->name = devname;
2618 action->percpu_dev_id = dev_id;
2619
2620 retval = irq_chip_pm_get(&desc->irq_data);
2621 if (retval < 0) {
2622 kfree(action);
2623 return retval;
2624 }
2625
2626 retval = __setup_irq(irq, desc, action);
2627
2628 if (retval) {
2629 irq_chip_pm_put(&desc->irq_data);
2630 kfree(action);
2631 }
2632
2633 return retval;
2634 }
2635 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2659 const char *name, void __percpu *dev_id)
2660 {
2661 struct irqaction *action;
2662 struct irq_desc *desc;
2663 unsigned long flags;
2664 int retval;
2665
2666 if (!handler)
2667 return -EINVAL;
2668
2669 desc = irq_to_desc(irq);
2670
2671 if (!desc || !irq_settings_can_request(desc) ||
2672 !irq_settings_is_per_cpu_devid(desc) ||
2673 irq_settings_can_autoenable(desc) ||
2674 !irq_supports_nmi(desc))
2675 return -EINVAL;
2676
2677
2678 if (desc->istate & IRQS_NMI)
2679 return -EINVAL;
2680
2681 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2682 if (!action)
2683 return -ENOMEM;
2684
2685 action->handler = handler;
2686 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2687 | IRQF_NOBALANCING;
2688 action->name = name;
2689 action->percpu_dev_id = dev_id;
2690
2691 retval = irq_chip_pm_get(&desc->irq_data);
2692 if (retval < 0)
2693 goto err_out;
2694
2695 retval = __setup_irq(irq, desc, action);
2696 if (retval)
2697 goto err_irq_setup;
2698
2699 raw_spin_lock_irqsave(&desc->lock, flags);
2700 desc->istate |= IRQS_NMI;
2701 raw_spin_unlock_irqrestore(&desc->lock, flags);
2702
2703 return 0;
2704
2705 err_irq_setup:
2706 irq_chip_pm_put(&desc->irq_data);
2707 err_out:
2708 kfree(action);
2709
2710 return retval;
2711 }
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726 int prepare_percpu_nmi(unsigned int irq)
2727 {
2728 unsigned long flags;
2729 struct irq_desc *desc;
2730 int ret = 0;
2731
2732 WARN_ON(preemptible());
2733
2734 desc = irq_get_desc_lock(irq, &flags,
2735 IRQ_GET_DESC_CHECK_PERCPU);
2736 if (!desc)
2737 return -EINVAL;
2738
2739 if (WARN(!(desc->istate & IRQS_NMI),
2740 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2741 irq)) {
2742 ret = -EINVAL;
2743 goto out;
2744 }
2745
2746 ret = irq_nmi_setup(desc);
2747 if (ret) {
2748 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2749 goto out;
2750 }
2751
2752 out:
2753 irq_put_desc_unlock(desc, flags);
2754 return ret;
2755 }
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 void teardown_percpu_nmi(unsigned int irq)
2770 {
2771 unsigned long flags;
2772 struct irq_desc *desc;
2773
2774 WARN_ON(preemptible());
2775
2776 desc = irq_get_desc_lock(irq, &flags,
2777 IRQ_GET_DESC_CHECK_PERCPU);
2778 if (!desc)
2779 return;
2780
2781 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2782 goto out;
2783
2784 irq_nmi_teardown(desc);
2785 out:
2786 irq_put_desc_unlock(desc, flags);
2787 }
2788
2789 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2790 bool *state)
2791 {
2792 struct irq_chip *chip;
2793 int err = -EINVAL;
2794
2795 do {
2796 chip = irq_data_get_irq_chip(data);
2797 if (WARN_ON_ONCE(!chip))
2798 return -ENODEV;
2799 if (chip->irq_get_irqchip_state)
2800 break;
2801 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2802 data = data->parent_data;
2803 #else
2804 data = NULL;
2805 #endif
2806 } while (data);
2807
2808 if (data)
2809 err = chip->irq_get_irqchip_state(data, which, state);
2810 return err;
2811 }
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2827 bool *state)
2828 {
2829 struct irq_desc *desc;
2830 struct irq_data *data;
2831 unsigned long flags;
2832 int err = -EINVAL;
2833
2834 desc = irq_get_desc_buslock(irq, &flags, 0);
2835 if (!desc)
2836 return err;
2837
2838 data = irq_desc_get_irq_data(desc);
2839
2840 err = __irq_get_irqchip_state(data, which, state);
2841
2842 irq_put_desc_busunlock(desc, flags);
2843 return err;
2844 }
2845 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2860 bool val)
2861 {
2862 struct irq_desc *desc;
2863 struct irq_data *data;
2864 struct irq_chip *chip;
2865 unsigned long flags;
2866 int err = -EINVAL;
2867
2868 desc = irq_get_desc_buslock(irq, &flags, 0);
2869 if (!desc)
2870 return err;
2871
2872 data = irq_desc_get_irq_data(desc);
2873
2874 do {
2875 chip = irq_data_get_irq_chip(data);
2876 if (WARN_ON_ONCE(!chip)) {
2877 err = -ENODEV;
2878 goto out_unlock;
2879 }
2880 if (chip->irq_set_irqchip_state)
2881 break;
2882 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2883 data = data->parent_data;
2884 #else
2885 data = NULL;
2886 #endif
2887 } while (data);
2888
2889 if (data)
2890 err = chip->irq_set_irqchip_state(data, which, val);
2891
2892 out_unlock:
2893 irq_put_desc_busunlock(desc, flags);
2894 return err;
2895 }
2896 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2897
2898
2899
2900
2901
2902
2903
2904 bool irq_has_action(unsigned int irq)
2905 {
2906 bool res;
2907
2908 rcu_read_lock();
2909 res = irq_desc_has_action(irq_to_desc(irq));
2910 rcu_read_unlock();
2911 return res;
2912 }
2913 EXPORT_SYMBOL_GPL(irq_has_action);
2914
2915
2916
2917
2918
2919
2920
2921
2922 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2923 {
2924 struct irq_desc *desc;
2925 bool res = false;
2926
2927 rcu_read_lock();
2928 desc = irq_to_desc(irq);
2929 if (desc)
2930 res = !!(desc->status_use_accessors & bitmask);
2931 rcu_read_unlock();
2932 return res;
2933 }
2934 EXPORT_SYMBOL_GPL(irq_check_status_bit);