0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/interrupt.h>
0013 #include <linux/ratelimit.h>
0014 #include <linux/irq.h>
0015 #include <linux/sched/isolation.h>
0016
0017 #include "internals.h"
0018
0019
0020 static inline bool irq_needs_fixup(struct irq_data *d)
0021 {
0022 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
0023 unsigned int cpu = smp_processor_id();
0024
0025 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0026
0027
0028
0029
0030
0031 if (cpumask_empty(m))
0032 m = irq_data_get_affinity_mask(d);
0033
0034
0035
0036
0037
0038
0039 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
0040 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
0041
0042
0043
0044
0045 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
0046 cpumask_pr_args(m), d->irq, cpu);
0047 return true;
0048 }
0049 #endif
0050 return cpumask_test_cpu(cpu, m);
0051 }
0052
0053 static bool migrate_one_irq(struct irq_desc *desc)
0054 {
0055 struct irq_data *d = irq_desc_get_irq_data(desc);
0056 struct irq_chip *chip = irq_data_get_irq_chip(d);
0057 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
0058 const struct cpumask *affinity;
0059 bool brokeaff = false;
0060 int err;
0061
0062
0063
0064
0065
0066
0067 if (!chip || !chip->irq_set_affinity) {
0068 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
0069 return false;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
0082
0083
0084
0085
0086 irq_fixup_move_pending(desc, false);
0087 return false;
0088 }
0089
0090
0091
0092
0093
0094
0095
0096 irq_force_complete_move(desc);
0097
0098
0099
0100
0101
0102
0103
0104 if (irq_fixup_move_pending(desc, true))
0105 affinity = irq_desc_get_pending_mask(desc);
0106 else
0107 affinity = irq_data_get_affinity_mask(d);
0108
0109
0110 if (maskchip && chip->irq_mask)
0111 chip->irq_mask(d);
0112
0113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
0114
0115
0116
0117
0118 if (irqd_affinity_is_managed(d)) {
0119 irqd_set_managed_shutdown(d);
0120 irq_shutdown_and_deactivate(desc);
0121 return false;
0122 }
0123 affinity = cpu_online_mask;
0124 brokeaff = true;
0125 }
0126
0127
0128
0129
0130
0131
0132 err = irq_do_set_affinity(d, affinity, false);
0133 if (err) {
0134 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
0135 d->irq, err);
0136 brokeaff = false;
0137 }
0138
0139 if (maskchip && chip->irq_unmask)
0140 chip->irq_unmask(d);
0141
0142 return brokeaff;
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 void irq_migrate_all_off_this_cpu(void)
0156 {
0157 struct irq_desc *desc;
0158 unsigned int irq;
0159
0160 for_each_active_irq(irq) {
0161 bool affinity_broken;
0162
0163 desc = irq_to_desc(irq);
0164 raw_spin_lock(&desc->lock);
0165 affinity_broken = migrate_one_irq(desc);
0166 raw_spin_unlock(&desc->lock);
0167
0168 if (affinity_broken) {
0169 pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
0170 irq, smp_processor_id());
0171 }
0172 }
0173 }
0174
0175 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
0176 {
0177 const struct cpumask *hk_mask;
0178
0179 if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ))
0180 return false;
0181
0182 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
0183 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
0184 return false;
0185
0186 return cpumask_test_cpu(cpu, hk_mask);
0187 }
0188
0189 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
0190 {
0191 struct irq_data *data = irq_desc_get_irq_data(desc);
0192 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
0193
0194 if (!irqd_affinity_is_managed(data) || !desc->action ||
0195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
0196 return;
0197
0198 if (irqd_is_managed_and_shutdown(data)) {
0199 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
0200 return;
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
0211 irq_set_affinity_locked(data, affinity, false);
0212 }
0213
0214
0215
0216
0217
0218 int irq_affinity_online_cpu(unsigned int cpu)
0219 {
0220 struct irq_desc *desc;
0221 unsigned int irq;
0222
0223 irq_lock_sparse();
0224 for_each_active_irq(irq) {
0225 desc = irq_to_desc(irq);
0226 raw_spin_lock_irq(&desc->lock);
0227 irq_restore_affinity_of_irq(desc, cpu);
0228 raw_spin_unlock_irq(&desc->lock);
0229 }
0230 irq_unlock_sparse();
0231
0232 return 0;
0233 }