Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Generic cpu hotunplug interrupt migration code copied from the
0004  * arch/arm implementation
0005  *
0006  * Copyright (C) Russell King
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License version 2 as
0010  * published by the Free Software Foundation.
0011  */
0012 #include <linux/interrupt.h>
0013 #include <linux/ratelimit.h>
0014 #include <linux/irq.h>
0015 #include <linux/sched/isolation.h>
0016 
0017 #include "internals.h"
0018 
0019 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
0020 static inline bool irq_needs_fixup(struct irq_data *d)
0021 {
0022     const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
0023     unsigned int cpu = smp_processor_id();
0024 
0025 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0026     /*
0027      * The cpumask_empty() check is a workaround for interrupt chips,
0028      * which do not implement effective affinity, but the architecture has
0029      * enabled the config switch. Use the general affinity mask instead.
0030      */
0031     if (cpumask_empty(m))
0032         m = irq_data_get_affinity_mask(d);
0033 
0034     /*
0035      * Sanity check. If the mask is not empty when excluding the outgoing
0036      * CPU then it must contain at least one online CPU. The outgoing CPU
0037      * has been removed from the online mask already.
0038      */
0039     if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
0040         cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
0041         /*
0042          * If this happens then there was a missed IRQ fixup at some
0043          * point. Warn about it and enforce fixup.
0044          */
0045         pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
0046             cpumask_pr_args(m), d->irq, cpu);
0047         return true;
0048     }
0049 #endif
0050     return cpumask_test_cpu(cpu, m);
0051 }
0052 
0053 static bool migrate_one_irq(struct irq_desc *desc)
0054 {
0055     struct irq_data *d = irq_desc_get_irq_data(desc);
0056     struct irq_chip *chip = irq_data_get_irq_chip(d);
0057     bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
0058     const struct cpumask *affinity;
0059     bool brokeaff = false;
0060     int err;
0061 
0062     /*
0063      * IRQ chip might be already torn down, but the irq descriptor is
0064      * still in the radix tree. Also if the chip has no affinity setter,
0065      * nothing can be done here.
0066      */
0067     if (!chip || !chip->irq_set_affinity) {
0068         pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
0069         return false;
0070     }
0071 
0072     /*
0073      * No move required, if:
0074      * - Interrupt is per cpu
0075      * - Interrupt is not started
0076      * - Affinity mask does not include this CPU.
0077      *
0078      * Note: Do not check desc->action as this might be a chained
0079      * interrupt.
0080      */
0081     if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
0082         /*
0083          * If an irq move is pending, abort it if the dying CPU is
0084          * the sole target.
0085          */
0086         irq_fixup_move_pending(desc, false);
0087         return false;
0088     }
0089 
0090     /*
0091      * Complete an eventually pending irq move cleanup. If this
0092      * interrupt was moved in hard irq context, then the vectors need
0093      * to be cleaned up. It can't wait until this interrupt actually
0094      * happens and this CPU was involved.
0095      */
0096     irq_force_complete_move(desc);
0097 
0098     /*
0099      * If there is a setaffinity pending, then try to reuse the pending
0100      * mask, so the last change of the affinity does not get lost. If
0101      * there is no move pending or the pending mask does not contain
0102      * any online CPU, use the current affinity mask.
0103      */
0104     if (irq_fixup_move_pending(desc, true))
0105         affinity = irq_desc_get_pending_mask(desc);
0106     else
0107         affinity = irq_data_get_affinity_mask(d);
0108 
0109     /* Mask the chip for interrupts which cannot move in process context */
0110     if (maskchip && chip->irq_mask)
0111         chip->irq_mask(d);
0112 
0113     if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
0114         /*
0115          * If the interrupt is managed, then shut it down and leave
0116          * the affinity untouched.
0117          */
0118         if (irqd_affinity_is_managed(d)) {
0119             irqd_set_managed_shutdown(d);
0120             irq_shutdown_and_deactivate(desc);
0121             return false;
0122         }
0123         affinity = cpu_online_mask;
0124         brokeaff = true;
0125     }
0126     /*
0127      * Do not set the force argument of irq_do_set_affinity() as this
0128      * disables the masking of offline CPUs from the supplied affinity
0129      * mask and therefore might keep/reassign the irq to the outgoing
0130      * CPU.
0131      */
0132     err = irq_do_set_affinity(d, affinity, false);
0133     if (err) {
0134         pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
0135                     d->irq, err);
0136         brokeaff = false;
0137     }
0138 
0139     if (maskchip && chip->irq_unmask)
0140         chip->irq_unmask(d);
0141 
0142     return brokeaff;
0143 }
0144 
0145 /**
0146  * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
0147  *
0148  * The current CPU has been marked offline.  Migrate IRQs off this CPU.
0149  * If the affinity settings do not allow other CPUs, force them onto any
0150  * available CPU.
0151  *
0152  * Note: we must iterate over all IRQs, whether they have an attached
0153  * action structure or not, as we need to get chained interrupts too.
0154  */
0155 void irq_migrate_all_off_this_cpu(void)
0156 {
0157     struct irq_desc *desc;
0158     unsigned int irq;
0159 
0160     for_each_active_irq(irq) {
0161         bool affinity_broken;
0162 
0163         desc = irq_to_desc(irq);
0164         raw_spin_lock(&desc->lock);
0165         affinity_broken = migrate_one_irq(desc);
0166         raw_spin_unlock(&desc->lock);
0167 
0168         if (affinity_broken) {
0169             pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
0170                         irq, smp_processor_id());
0171         }
0172     }
0173 }
0174 
0175 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
0176 {
0177     const struct cpumask *hk_mask;
0178 
0179     if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ))
0180         return false;
0181 
0182     hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
0183     if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
0184         return false;
0185 
0186     return cpumask_test_cpu(cpu, hk_mask);
0187 }
0188 
0189 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
0190 {
0191     struct irq_data *data = irq_desc_get_irq_data(desc);
0192     const struct cpumask *affinity = irq_data_get_affinity_mask(data);
0193 
0194     if (!irqd_affinity_is_managed(data) || !desc->action ||
0195         !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
0196         return;
0197 
0198     if (irqd_is_managed_and_shutdown(data)) {
0199         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
0200         return;
0201     }
0202 
0203     /*
0204      * If the interrupt can only be directed to a single target
0205      * CPU then it is already assigned to a CPU in the affinity
0206      * mask. No point in trying to move it around unless the
0207      * isolation mechanism requests to move it to an upcoming
0208      * housekeeping CPU.
0209      */
0210     if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
0211         irq_set_affinity_locked(data, affinity, false);
0212 }
0213 
0214 /**
0215  * irq_affinity_online_cpu - Restore affinity for managed interrupts
0216  * @cpu:    Upcoming CPU for which interrupts should be restored
0217  */
0218 int irq_affinity_online_cpu(unsigned int cpu)
0219 {
0220     struct irq_desc *desc;
0221     unsigned int irq;
0222 
0223     irq_lock_sparse();
0224     for_each_active_irq(irq) {
0225         desc = irq_to_desc(irq);
0226         raw_spin_lock_irq(&desc->lock);
0227         irq_restore_affinity_of_irq(desc, cpu);
0228         raw_spin_unlock_irq(&desc->lock);
0229     }
0230     irq_unlock_sparse();
0231 
0232     return 0;
0233 }