0001
0002
0003 #include <linux/irq.h>
0004 #include <linux/interrupt.h>
0005
0006 #include "internals.h"
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
0019 {
0020 struct irq_data *data = irq_desc_get_irq_data(desc);
0021
0022 if (!irqd_is_setaffinity_pending(data))
0023 return false;
0024
0025
0026
0027
0028
0029 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
0030 irqd_clr_move_pending(data);
0031 return false;
0032 }
0033 if (force_clear)
0034 irqd_clr_move_pending(data);
0035 return true;
0036 }
0037
0038 void irq_move_masked_irq(struct irq_data *idata)
0039 {
0040 struct irq_desc *desc = irq_data_to_desc(idata);
0041 struct irq_data *data = &desc->irq_data;
0042 struct irq_chip *chip = data->chip;
0043
0044 if (likely(!irqd_is_setaffinity_pending(data)))
0045 return;
0046
0047 irqd_clr_move_pending(data);
0048
0049
0050
0051
0052 if (irqd_is_per_cpu(data)) {
0053 WARN_ON(1);
0054 return;
0055 }
0056
0057 if (unlikely(cpumask_empty(desc->pending_mask)))
0058 return;
0059
0060 if (!chip->irq_set_affinity)
0061 return;
0062
0063 assert_raw_spin_locked(&desc->lock);
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
0078 int ret;
0079
0080 ret = irq_do_set_affinity(data, desc->pending_mask, false);
0081
0082
0083
0084
0085
0086 if (ret == -EBUSY) {
0087 irqd_set_move_pending(data);
0088 return;
0089 }
0090 }
0091 cpumask_clear(desc->pending_mask);
0092 }
0093
0094 void __irq_move_irq(struct irq_data *idata)
0095 {
0096 bool masked;
0097
0098
0099
0100
0101
0102
0103 idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
0104
0105 if (unlikely(irqd_irq_disabled(idata)))
0106 return;
0107
0108
0109
0110
0111
0112
0113 masked = irqd_irq_masked(idata);
0114 if (!masked)
0115 idata->chip->irq_mask(idata);
0116 irq_move_masked_irq(idata);
0117 if (!masked)
0118 idata->chip->irq_unmask(idata);
0119 }