Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
0004  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
0005  *
0006  * This file contains the core interrupt handling code, for irq-chip based
0007  * architectures. Detailed information is available in
0008  * Documentation/core-api/genericirq.rst
0009  */
0010 
0011 #include <linux/irq.h>
0012 #include <linux/msi.h>
0013 #include <linux/module.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/kernel_stat.h>
0016 #include <linux/irqdomain.h>
0017 
0018 #include <trace/events/irq.h>
0019 
0020 #include "internals.h"
0021 
0022 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
0023 {
0024     WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
0025     return IRQ_NONE;
0026 }
0027 
0028 /*
0029  * Chained handlers should never call action on their IRQ. This default
0030  * action will emit warning if such thing happens.
0031  */
0032 struct irqaction chained_action = {
0033     .handler = bad_chained_irq,
0034 };
0035 
0036 /**
0037  *  irq_set_chip - set the irq chip for an irq
0038  *  @irq:   irq number
0039  *  @chip:  pointer to irq chip description structure
0040  */
0041 int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
0042 {
0043     unsigned long flags;
0044     struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
0045 
0046     if (!desc)
0047         return -EINVAL;
0048 
0049     desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
0050     irq_put_desc_unlock(desc, flags);
0051     /*
0052      * For !CONFIG_SPARSE_IRQ make the irq show up in
0053      * allocated_irqs.
0054      */
0055     irq_mark_irq(irq);
0056     return 0;
0057 }
0058 EXPORT_SYMBOL(irq_set_chip);
0059 
0060 /**
0061  *  irq_set_irq_type - set the irq trigger type for an irq
0062  *  @irq:   irq number
0063  *  @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
0064  */
0065 int irq_set_irq_type(unsigned int irq, unsigned int type)
0066 {
0067     unsigned long flags;
0068     struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0069     int ret = 0;
0070 
0071     if (!desc)
0072         return -EINVAL;
0073 
0074     ret = __irq_set_trigger(desc, type);
0075     irq_put_desc_busunlock(desc, flags);
0076     return ret;
0077 }
0078 EXPORT_SYMBOL(irq_set_irq_type);
0079 
0080 /**
0081  *  irq_set_handler_data - set irq handler data for an irq
0082  *  @irq:   Interrupt number
0083  *  @data:  Pointer to interrupt specific data
0084  *
0085  *  Set the hardware irq controller data for an irq
0086  */
0087 int irq_set_handler_data(unsigned int irq, void *data)
0088 {
0089     unsigned long flags;
0090     struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
0091 
0092     if (!desc)
0093         return -EINVAL;
0094     desc->irq_common_data.handler_data = data;
0095     irq_put_desc_unlock(desc, flags);
0096     return 0;
0097 }
0098 EXPORT_SYMBOL(irq_set_handler_data);
0099 
0100 /**
0101  *  irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
0102  *  @irq_base:  Interrupt number base
0103  *  @irq_offset:    Interrupt number offset
0104  *  @entry:     Pointer to MSI descriptor data
0105  *
0106  *  Set the MSI descriptor entry for an irq at offset
0107  */
0108 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
0109              struct msi_desc *entry)
0110 {
0111     unsigned long flags;
0112     struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
0113 
0114     if (!desc)
0115         return -EINVAL;
0116     desc->irq_common_data.msi_desc = entry;
0117     if (entry && !irq_offset)
0118         entry->irq = irq_base;
0119     irq_put_desc_unlock(desc, flags);
0120     return 0;
0121 }
0122 
0123 /**
0124  *  irq_set_msi_desc - set MSI descriptor data for an irq
0125  *  @irq:   Interrupt number
0126  *  @entry: Pointer to MSI descriptor data
0127  *
0128  *  Set the MSI descriptor entry for an irq
0129  */
0130 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
0131 {
0132     return irq_set_msi_desc_off(irq, 0, entry);
0133 }
0134 
0135 /**
0136  *  irq_set_chip_data - set irq chip data for an irq
0137  *  @irq:   Interrupt number
0138  *  @data:  Pointer to chip specific data
0139  *
0140  *  Set the hardware irq chip data for an irq
0141  */
0142 int irq_set_chip_data(unsigned int irq, void *data)
0143 {
0144     unsigned long flags;
0145     struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
0146 
0147     if (!desc)
0148         return -EINVAL;
0149     desc->irq_data.chip_data = data;
0150     irq_put_desc_unlock(desc, flags);
0151     return 0;
0152 }
0153 EXPORT_SYMBOL(irq_set_chip_data);
0154 
0155 struct irq_data *irq_get_irq_data(unsigned int irq)
0156 {
0157     struct irq_desc *desc = irq_to_desc(irq);
0158 
0159     return desc ? &desc->irq_data : NULL;
0160 }
0161 EXPORT_SYMBOL_GPL(irq_get_irq_data);
0162 
0163 static void irq_state_clr_disabled(struct irq_desc *desc)
0164 {
0165     irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
0166 }
0167 
0168 static void irq_state_clr_masked(struct irq_desc *desc)
0169 {
0170     irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
0171 }
0172 
0173 static void irq_state_clr_started(struct irq_desc *desc)
0174 {
0175     irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
0176 }
0177 
0178 static void irq_state_set_started(struct irq_desc *desc)
0179 {
0180     irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
0181 }
0182 
0183 enum {
0184     IRQ_STARTUP_NORMAL,
0185     IRQ_STARTUP_MANAGED,
0186     IRQ_STARTUP_ABORT,
0187 };
0188 
0189 #ifdef CONFIG_SMP
0190 static int
0191 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
0192               bool force)
0193 {
0194     struct irq_data *d = irq_desc_get_irq_data(desc);
0195 
0196     if (!irqd_affinity_is_managed(d))
0197         return IRQ_STARTUP_NORMAL;
0198 
0199     irqd_clr_managed_shutdown(d);
0200 
0201     if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
0202         /*
0203          * Catch code which fiddles with enable_irq() on a managed
0204          * and potentially shutdown IRQ. Chained interrupt
0205          * installment or irq auto probing should not happen on
0206          * managed irqs either.
0207          */
0208         if (WARN_ON_ONCE(force))
0209             return IRQ_STARTUP_ABORT;
0210         /*
0211          * The interrupt was requested, but there is no online CPU
0212          * in it's affinity mask. Put it into managed shutdown
0213          * state and let the cpu hotplug mechanism start it up once
0214          * a CPU in the mask becomes available.
0215          */
0216         return IRQ_STARTUP_ABORT;
0217     }
0218     /*
0219      * Managed interrupts have reserved resources, so this should not
0220      * happen.
0221      */
0222     if (WARN_ON(irq_domain_activate_irq(d, false)))
0223         return IRQ_STARTUP_ABORT;
0224     return IRQ_STARTUP_MANAGED;
0225 }
0226 #else
0227 static __always_inline int
0228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
0229               bool force)
0230 {
0231     return IRQ_STARTUP_NORMAL;
0232 }
0233 #endif
0234 
0235 static int __irq_startup(struct irq_desc *desc)
0236 {
0237     struct irq_data *d = irq_desc_get_irq_data(desc);
0238     int ret = 0;
0239 
0240     /* Warn if this interrupt is not activated but try nevertheless */
0241     WARN_ON_ONCE(!irqd_is_activated(d));
0242 
0243     if (d->chip->irq_startup) {
0244         ret = d->chip->irq_startup(d);
0245         irq_state_clr_disabled(desc);
0246         irq_state_clr_masked(desc);
0247     } else {
0248         irq_enable(desc);
0249     }
0250     irq_state_set_started(desc);
0251     return ret;
0252 }
0253 
0254 int irq_startup(struct irq_desc *desc, bool resend, bool force)
0255 {
0256     struct irq_data *d = irq_desc_get_irq_data(desc);
0257     const struct cpumask *aff = irq_data_get_affinity_mask(d);
0258     int ret = 0;
0259 
0260     desc->depth = 0;
0261 
0262     if (irqd_is_started(d)) {
0263         irq_enable(desc);
0264     } else {
0265         switch (__irq_startup_managed(desc, aff, force)) {
0266         case IRQ_STARTUP_NORMAL:
0267             if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
0268                 irq_setup_affinity(desc);
0269             ret = __irq_startup(desc);
0270             if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
0271                 irq_setup_affinity(desc);
0272             break;
0273         case IRQ_STARTUP_MANAGED:
0274             irq_do_set_affinity(d, aff, false);
0275             ret = __irq_startup(desc);
0276             break;
0277         case IRQ_STARTUP_ABORT:
0278             irqd_set_managed_shutdown(d);
0279             return 0;
0280         }
0281     }
0282     if (resend)
0283         check_irq_resend(desc, false);
0284 
0285     return ret;
0286 }
0287 
0288 int irq_activate(struct irq_desc *desc)
0289 {
0290     struct irq_data *d = irq_desc_get_irq_data(desc);
0291 
0292     if (!irqd_affinity_is_managed(d))
0293         return irq_domain_activate_irq(d, false);
0294     return 0;
0295 }
0296 
0297 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
0298 {
0299     if (WARN_ON(irq_activate(desc)))
0300         return 0;
0301     return irq_startup(desc, resend, IRQ_START_FORCE);
0302 }
0303 
0304 static void __irq_disable(struct irq_desc *desc, bool mask);
0305 
0306 void irq_shutdown(struct irq_desc *desc)
0307 {
0308     if (irqd_is_started(&desc->irq_data)) {
0309         desc->depth = 1;
0310         if (desc->irq_data.chip->irq_shutdown) {
0311             desc->irq_data.chip->irq_shutdown(&desc->irq_data);
0312             irq_state_set_disabled(desc);
0313             irq_state_set_masked(desc);
0314         } else {
0315             __irq_disable(desc, true);
0316         }
0317         irq_state_clr_started(desc);
0318     }
0319 }
0320 
0321 
0322 void irq_shutdown_and_deactivate(struct irq_desc *desc)
0323 {
0324     irq_shutdown(desc);
0325     /*
0326      * This must be called even if the interrupt was never started up,
0327      * because the activation can happen before the interrupt is
0328      * available for request/startup. It has it's own state tracking so
0329      * it's safe to call it unconditionally.
0330      */
0331     irq_domain_deactivate_irq(&desc->irq_data);
0332 }
0333 
0334 void irq_enable(struct irq_desc *desc)
0335 {
0336     if (!irqd_irq_disabled(&desc->irq_data)) {
0337         unmask_irq(desc);
0338     } else {
0339         irq_state_clr_disabled(desc);
0340         if (desc->irq_data.chip->irq_enable) {
0341             desc->irq_data.chip->irq_enable(&desc->irq_data);
0342             irq_state_clr_masked(desc);
0343         } else {
0344             unmask_irq(desc);
0345         }
0346     }
0347 }
0348 
0349 static void __irq_disable(struct irq_desc *desc, bool mask)
0350 {
0351     if (irqd_irq_disabled(&desc->irq_data)) {
0352         if (mask)
0353             mask_irq(desc);
0354     } else {
0355         irq_state_set_disabled(desc);
0356         if (desc->irq_data.chip->irq_disable) {
0357             desc->irq_data.chip->irq_disable(&desc->irq_data);
0358             irq_state_set_masked(desc);
0359         } else if (mask) {
0360             mask_irq(desc);
0361         }
0362     }
0363 }
0364 
0365 /**
0366  * irq_disable - Mark interrupt disabled
0367  * @desc:   irq descriptor which should be disabled
0368  *
0369  * If the chip does not implement the irq_disable callback, we
0370  * use a lazy disable approach. That means we mark the interrupt
0371  * disabled, but leave the hardware unmasked. That's an
0372  * optimization because we avoid the hardware access for the
0373  * common case where no interrupt happens after we marked it
0374  * disabled. If an interrupt happens, then the interrupt flow
0375  * handler masks the line at the hardware level and marks it
0376  * pending.
0377  *
0378  * If the interrupt chip does not implement the irq_disable callback,
0379  * a driver can disable the lazy approach for a particular irq line by
0380  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
0381  * be used for devices which cannot disable the interrupt at the
0382  * device level under certain circumstances and have to use
0383  * disable_irq[_nosync] instead.
0384  */
0385 void irq_disable(struct irq_desc *desc)
0386 {
0387     __irq_disable(desc, irq_settings_disable_unlazy(desc));
0388 }
0389 
0390 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
0391 {
0392     if (desc->irq_data.chip->irq_enable)
0393         desc->irq_data.chip->irq_enable(&desc->irq_data);
0394     else
0395         desc->irq_data.chip->irq_unmask(&desc->irq_data);
0396     cpumask_set_cpu(cpu, desc->percpu_enabled);
0397 }
0398 
0399 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
0400 {
0401     if (desc->irq_data.chip->irq_disable)
0402         desc->irq_data.chip->irq_disable(&desc->irq_data);
0403     else
0404         desc->irq_data.chip->irq_mask(&desc->irq_data);
0405     cpumask_clear_cpu(cpu, desc->percpu_enabled);
0406 }
0407 
0408 static inline void mask_ack_irq(struct irq_desc *desc)
0409 {
0410     if (desc->irq_data.chip->irq_mask_ack) {
0411         desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
0412         irq_state_set_masked(desc);
0413     } else {
0414         mask_irq(desc);
0415         if (desc->irq_data.chip->irq_ack)
0416             desc->irq_data.chip->irq_ack(&desc->irq_data);
0417     }
0418 }
0419 
0420 void mask_irq(struct irq_desc *desc)
0421 {
0422     if (irqd_irq_masked(&desc->irq_data))
0423         return;
0424 
0425     if (desc->irq_data.chip->irq_mask) {
0426         desc->irq_data.chip->irq_mask(&desc->irq_data);
0427         irq_state_set_masked(desc);
0428     }
0429 }
0430 
0431 void unmask_irq(struct irq_desc *desc)
0432 {
0433     if (!irqd_irq_masked(&desc->irq_data))
0434         return;
0435 
0436     if (desc->irq_data.chip->irq_unmask) {
0437         desc->irq_data.chip->irq_unmask(&desc->irq_data);
0438         irq_state_clr_masked(desc);
0439     }
0440 }
0441 
0442 void unmask_threaded_irq(struct irq_desc *desc)
0443 {
0444     struct irq_chip *chip = desc->irq_data.chip;
0445 
0446     if (chip->flags & IRQCHIP_EOI_THREADED)
0447         chip->irq_eoi(&desc->irq_data);
0448 
0449     unmask_irq(desc);
0450 }
0451 
0452 /*
0453  *  handle_nested_irq - Handle a nested irq from a irq thread
0454  *  @irq:   the interrupt number
0455  *
0456  *  Handle interrupts which are nested into a threaded interrupt
0457  *  handler. The handler function is called inside the calling
0458  *  threads context.
0459  */
0460 void handle_nested_irq(unsigned int irq)
0461 {
0462     struct irq_desc *desc = irq_to_desc(irq);
0463     struct irqaction *action;
0464     irqreturn_t action_ret;
0465 
0466     might_sleep();
0467 
0468     raw_spin_lock_irq(&desc->lock);
0469 
0470     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0471 
0472     action = desc->action;
0473     if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
0474         desc->istate |= IRQS_PENDING;
0475         goto out_unlock;
0476     }
0477 
0478     kstat_incr_irqs_this_cpu(desc);
0479     irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
0480     raw_spin_unlock_irq(&desc->lock);
0481 
0482     action_ret = IRQ_NONE;
0483     for_each_action_of_desc(desc, action)
0484         action_ret |= action->thread_fn(action->irq, action->dev_id);
0485 
0486     if (!irq_settings_no_debug(desc))
0487         note_interrupt(desc, action_ret);
0488 
0489     raw_spin_lock_irq(&desc->lock);
0490     irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
0491 
0492 out_unlock:
0493     raw_spin_unlock_irq(&desc->lock);
0494 }
0495 EXPORT_SYMBOL_GPL(handle_nested_irq);
0496 
0497 static bool irq_check_poll(struct irq_desc *desc)
0498 {
0499     if (!(desc->istate & IRQS_POLL_INPROGRESS))
0500         return false;
0501     return irq_wait_for_poll(desc);
0502 }
0503 
0504 static bool irq_may_run(struct irq_desc *desc)
0505 {
0506     unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
0507 
0508     /*
0509      * If the interrupt is not in progress and is not an armed
0510      * wakeup interrupt, proceed.
0511      */
0512     if (!irqd_has_set(&desc->irq_data, mask))
0513         return true;
0514 
0515     /*
0516      * If the interrupt is an armed wakeup source, mark it pending
0517      * and suspended, disable it and notify the pm core about the
0518      * event.
0519      */
0520     if (irq_pm_check_wakeup(desc))
0521         return false;
0522 
0523     /*
0524      * Handle a potential concurrent poll on a different core.
0525      */
0526     return irq_check_poll(desc);
0527 }
0528 
0529 /**
0530  *  handle_simple_irq - Simple and software-decoded IRQs.
0531  *  @desc:  the interrupt description structure for this irq
0532  *
0533  *  Simple interrupts are either sent from a demultiplexing interrupt
0534  *  handler or come from hardware, where no interrupt hardware control
0535  *  is necessary.
0536  *
0537  *  Note: The caller is expected to handle the ack, clear, mask and
0538  *  unmask issues if necessary.
0539  */
0540 void handle_simple_irq(struct irq_desc *desc)
0541 {
0542     raw_spin_lock(&desc->lock);
0543 
0544     if (!irq_may_run(desc))
0545         goto out_unlock;
0546 
0547     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0548 
0549     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
0550         desc->istate |= IRQS_PENDING;
0551         goto out_unlock;
0552     }
0553 
0554     kstat_incr_irqs_this_cpu(desc);
0555     handle_irq_event(desc);
0556 
0557 out_unlock:
0558     raw_spin_unlock(&desc->lock);
0559 }
0560 EXPORT_SYMBOL_GPL(handle_simple_irq);
0561 
0562 /**
0563  *  handle_untracked_irq - Simple and software-decoded IRQs.
0564  *  @desc:  the interrupt description structure for this irq
0565  *
0566  *  Untracked interrupts are sent from a demultiplexing interrupt
0567  *  handler when the demultiplexer does not know which device it its
0568  *  multiplexed irq domain generated the interrupt. IRQ's handled
0569  *  through here are not subjected to stats tracking, randomness, or
0570  *  spurious interrupt detection.
0571  *
0572  *  Note: Like handle_simple_irq, the caller is expected to handle
0573  *  the ack, clear, mask and unmask issues if necessary.
0574  */
0575 void handle_untracked_irq(struct irq_desc *desc)
0576 {
0577     raw_spin_lock(&desc->lock);
0578 
0579     if (!irq_may_run(desc))
0580         goto out_unlock;
0581 
0582     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0583 
0584     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
0585         desc->istate |= IRQS_PENDING;
0586         goto out_unlock;
0587     }
0588 
0589     desc->istate &= ~IRQS_PENDING;
0590     irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
0591     raw_spin_unlock(&desc->lock);
0592 
0593     __handle_irq_event_percpu(desc);
0594 
0595     raw_spin_lock(&desc->lock);
0596     irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
0597 
0598 out_unlock:
0599     raw_spin_unlock(&desc->lock);
0600 }
0601 EXPORT_SYMBOL_GPL(handle_untracked_irq);
0602 
0603 /*
0604  * Called unconditionally from handle_level_irq() and only for oneshot
0605  * interrupts from handle_fasteoi_irq()
0606  */
0607 static void cond_unmask_irq(struct irq_desc *desc)
0608 {
0609     /*
0610      * We need to unmask in the following cases:
0611      * - Standard level irq (IRQF_ONESHOT is not set)
0612      * - Oneshot irq which did not wake the thread (caused by a
0613      *   spurious interrupt or a primary handler handling it
0614      *   completely).
0615      */
0616     if (!irqd_irq_disabled(&desc->irq_data) &&
0617         irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
0618         unmask_irq(desc);
0619 }
0620 
0621 /**
0622  *  handle_level_irq - Level type irq handler
0623  *  @desc:  the interrupt description structure for this irq
0624  *
0625  *  Level type interrupts are active as long as the hardware line has
0626  *  the active level. This may require to mask the interrupt and unmask
0627  *  it after the associated handler has acknowledged the device, so the
0628  *  interrupt line is back to inactive.
0629  */
0630 void handle_level_irq(struct irq_desc *desc)
0631 {
0632     raw_spin_lock(&desc->lock);
0633     mask_ack_irq(desc);
0634 
0635     if (!irq_may_run(desc))
0636         goto out_unlock;
0637 
0638     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0639 
0640     /*
0641      * If its disabled or no action available
0642      * keep it masked and get out of here
0643      */
0644     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
0645         desc->istate |= IRQS_PENDING;
0646         goto out_unlock;
0647     }
0648 
0649     kstat_incr_irqs_this_cpu(desc);
0650     handle_irq_event(desc);
0651 
0652     cond_unmask_irq(desc);
0653 
0654 out_unlock:
0655     raw_spin_unlock(&desc->lock);
0656 }
0657 EXPORT_SYMBOL_GPL(handle_level_irq);
0658 
0659 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
0660 {
0661     if (!(desc->istate & IRQS_ONESHOT)) {
0662         chip->irq_eoi(&desc->irq_data);
0663         return;
0664     }
0665     /*
0666      * We need to unmask in the following cases:
0667      * - Oneshot irq which did not wake the thread (caused by a
0668      *   spurious interrupt or a primary handler handling it
0669      *   completely).
0670      */
0671     if (!irqd_irq_disabled(&desc->irq_data) &&
0672         irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
0673         chip->irq_eoi(&desc->irq_data);
0674         unmask_irq(desc);
0675     } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
0676         chip->irq_eoi(&desc->irq_data);
0677     }
0678 }
0679 
0680 /**
0681  *  handle_fasteoi_irq - irq handler for transparent controllers
0682  *  @desc:  the interrupt description structure for this irq
0683  *
0684  *  Only a single callback will be issued to the chip: an ->eoi()
0685  *  call when the interrupt has been serviced. This enables support
0686  *  for modern forms of interrupt handlers, which handle the flow
0687  *  details in hardware, transparently.
0688  */
0689 void handle_fasteoi_irq(struct irq_desc *desc)
0690 {
0691     struct irq_chip *chip = desc->irq_data.chip;
0692 
0693     raw_spin_lock(&desc->lock);
0694 
0695     if (!irq_may_run(desc))
0696         goto out;
0697 
0698     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0699 
0700     /*
0701      * If its disabled or no action available
0702      * then mask it and get out of here:
0703      */
0704     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
0705         desc->istate |= IRQS_PENDING;
0706         mask_irq(desc);
0707         goto out;
0708     }
0709 
0710     kstat_incr_irqs_this_cpu(desc);
0711     if (desc->istate & IRQS_ONESHOT)
0712         mask_irq(desc);
0713 
0714     handle_irq_event(desc);
0715 
0716     cond_unmask_eoi_irq(desc, chip);
0717 
0718     raw_spin_unlock(&desc->lock);
0719     return;
0720 out:
0721     if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
0722         chip->irq_eoi(&desc->irq_data);
0723     raw_spin_unlock(&desc->lock);
0724 }
0725 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
0726 
0727 /**
0728  *  handle_fasteoi_nmi - irq handler for NMI interrupt lines
0729  *  @desc:  the interrupt description structure for this irq
0730  *
0731  *  A simple NMI-safe handler, considering the restrictions
0732  *  from request_nmi.
0733  *
0734  *  Only a single callback will be issued to the chip: an ->eoi()
0735  *  call when the interrupt has been serviced. This enables support
0736  *  for modern forms of interrupt handlers, which handle the flow
0737  *  details in hardware, transparently.
0738  */
0739 void handle_fasteoi_nmi(struct irq_desc *desc)
0740 {
0741     struct irq_chip *chip = irq_desc_get_chip(desc);
0742     struct irqaction *action = desc->action;
0743     unsigned int irq = irq_desc_get_irq(desc);
0744     irqreturn_t res;
0745 
0746     __kstat_incr_irqs_this_cpu(desc);
0747 
0748     trace_irq_handler_entry(irq, action);
0749     /*
0750      * NMIs cannot be shared, there is only one action.
0751      */
0752     res = action->handler(irq, action->dev_id);
0753     trace_irq_handler_exit(irq, action, res);
0754 
0755     if (chip->irq_eoi)
0756         chip->irq_eoi(&desc->irq_data);
0757 }
0758 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
0759 
0760 /**
0761  *  handle_edge_irq - edge type IRQ handler
0762  *  @desc:  the interrupt description structure for this irq
0763  *
0764  *  Interrupt occurs on the falling and/or rising edge of a hardware
0765  *  signal. The occurrence is latched into the irq controller hardware
0766  *  and must be acked in order to be reenabled. After the ack another
0767  *  interrupt can happen on the same source even before the first one
0768  *  is handled by the associated event handler. If this happens it
0769  *  might be necessary to disable (mask) the interrupt depending on the
0770  *  controller hardware. This requires to reenable the interrupt inside
0771  *  of the loop which handles the interrupts which have arrived while
0772  *  the handler was running. If all pending interrupts are handled, the
0773  *  loop is left.
0774  */
0775 void handle_edge_irq(struct irq_desc *desc)
0776 {
0777     raw_spin_lock(&desc->lock);
0778 
0779     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0780 
0781     if (!irq_may_run(desc)) {
0782         desc->istate |= IRQS_PENDING;
0783         mask_ack_irq(desc);
0784         goto out_unlock;
0785     }
0786 
0787     /*
0788      * If its disabled or no action available then mask it and get
0789      * out of here.
0790      */
0791     if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
0792         desc->istate |= IRQS_PENDING;
0793         mask_ack_irq(desc);
0794         goto out_unlock;
0795     }
0796 
0797     kstat_incr_irqs_this_cpu(desc);
0798 
0799     /* Start handling the irq */
0800     desc->irq_data.chip->irq_ack(&desc->irq_data);
0801 
0802     do {
0803         if (unlikely(!desc->action)) {
0804             mask_irq(desc);
0805             goto out_unlock;
0806         }
0807 
0808         /*
0809          * When another irq arrived while we were handling
0810          * one, we could have masked the irq.
0811          * Reenable it, if it was not disabled in meantime.
0812          */
0813         if (unlikely(desc->istate & IRQS_PENDING)) {
0814             if (!irqd_irq_disabled(&desc->irq_data) &&
0815                 irqd_irq_masked(&desc->irq_data))
0816                 unmask_irq(desc);
0817         }
0818 
0819         handle_irq_event(desc);
0820 
0821     } while ((desc->istate & IRQS_PENDING) &&
0822          !irqd_irq_disabled(&desc->irq_data));
0823 
0824 out_unlock:
0825     raw_spin_unlock(&desc->lock);
0826 }
0827 EXPORT_SYMBOL(handle_edge_irq);
0828 
0829 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
0830 /**
0831  *  handle_edge_eoi_irq - edge eoi type IRQ handler
0832  *  @desc:  the interrupt description structure for this irq
0833  *
0834  * Similar as the above handle_edge_irq, but using eoi and w/o the
0835  * mask/unmask logic.
0836  */
0837 void handle_edge_eoi_irq(struct irq_desc *desc)
0838 {
0839     struct irq_chip *chip = irq_desc_get_chip(desc);
0840 
0841     raw_spin_lock(&desc->lock);
0842 
0843     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
0844 
0845     if (!irq_may_run(desc)) {
0846         desc->istate |= IRQS_PENDING;
0847         goto out_eoi;
0848     }
0849 
0850     /*
0851      * If its disabled or no action available then mask it and get
0852      * out of here.
0853      */
0854     if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
0855         desc->istate |= IRQS_PENDING;
0856         goto out_eoi;
0857     }
0858 
0859     kstat_incr_irqs_this_cpu(desc);
0860 
0861     do {
0862         if (unlikely(!desc->action))
0863             goto out_eoi;
0864 
0865         handle_irq_event(desc);
0866 
0867     } while ((desc->istate & IRQS_PENDING) &&
0868          !irqd_irq_disabled(&desc->irq_data));
0869 
0870 out_eoi:
0871     chip->irq_eoi(&desc->irq_data);
0872     raw_spin_unlock(&desc->lock);
0873 }
0874 #endif
0875 
0876 /**
0877  *  handle_percpu_irq - Per CPU local irq handler
0878  *  @desc:  the interrupt description structure for this irq
0879  *
0880  *  Per CPU interrupts on SMP machines without locking requirements
0881  */
0882 void handle_percpu_irq(struct irq_desc *desc)
0883 {
0884     struct irq_chip *chip = irq_desc_get_chip(desc);
0885 
0886     /*
0887      * PER CPU interrupts are not serialized. Do not touch
0888      * desc->tot_count.
0889      */
0890     __kstat_incr_irqs_this_cpu(desc);
0891 
0892     if (chip->irq_ack)
0893         chip->irq_ack(&desc->irq_data);
0894 
0895     handle_irq_event_percpu(desc);
0896 
0897     if (chip->irq_eoi)
0898         chip->irq_eoi(&desc->irq_data);
0899 }
0900 
0901 /**
0902  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
0903  * @desc:   the interrupt description structure for this irq
0904  *
0905  * Per CPU interrupts on SMP machines without locking requirements. Same as
0906  * handle_percpu_irq() above but with the following extras:
0907  *
0908  * action->percpu_dev_id is a pointer to percpu variables which
0909  * contain the real device id for the cpu on which this handler is
0910  * called
0911  */
0912 void handle_percpu_devid_irq(struct irq_desc *desc)
0913 {
0914     struct irq_chip *chip = irq_desc_get_chip(desc);
0915     struct irqaction *action = desc->action;
0916     unsigned int irq = irq_desc_get_irq(desc);
0917     irqreturn_t res;
0918 
0919     /*
0920      * PER CPU interrupts are not serialized. Do not touch
0921      * desc->tot_count.
0922      */
0923     __kstat_incr_irqs_this_cpu(desc);
0924 
0925     if (chip->irq_ack)
0926         chip->irq_ack(&desc->irq_data);
0927 
0928     if (likely(action)) {
0929         trace_irq_handler_entry(irq, action);
0930         res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
0931         trace_irq_handler_exit(irq, action, res);
0932     } else {
0933         unsigned int cpu = smp_processor_id();
0934         bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
0935 
0936         if (enabled)
0937             irq_percpu_disable(desc, cpu);
0938 
0939         pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
0940                 enabled ? " and unmasked" : "", irq, cpu);
0941     }
0942 
0943     if (chip->irq_eoi)
0944         chip->irq_eoi(&desc->irq_data);
0945 }
0946 
0947 /**
0948  * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
0949  *                   dev ids
0950  * @desc:   the interrupt description structure for this irq
0951  *
0952  * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
0953  * as a percpu pointer.
0954  */
0955 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
0956 {
0957     struct irq_chip *chip = irq_desc_get_chip(desc);
0958     struct irqaction *action = desc->action;
0959     unsigned int irq = irq_desc_get_irq(desc);
0960     irqreturn_t res;
0961 
0962     __kstat_incr_irqs_this_cpu(desc);
0963 
0964     trace_irq_handler_entry(irq, action);
0965     res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
0966     trace_irq_handler_exit(irq, action, res);
0967 
0968     if (chip->irq_eoi)
0969         chip->irq_eoi(&desc->irq_data);
0970 }
0971 
0972 static void
0973 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
0974              int is_chained, const char *name)
0975 {
0976     if (!handle) {
0977         handle = handle_bad_irq;
0978     } else {
0979         struct irq_data *irq_data = &desc->irq_data;
0980 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0981         /*
0982          * With hierarchical domains we might run into a
0983          * situation where the outermost chip is not yet set
0984          * up, but the inner chips are there.  Instead of
0985          * bailing we install the handler, but obviously we
0986          * cannot enable/startup the interrupt at this point.
0987          */
0988         while (irq_data) {
0989             if (irq_data->chip != &no_irq_chip)
0990                 break;
0991             /*
0992              * Bail out if the outer chip is not set up
0993              * and the interrupt supposed to be started
0994              * right away.
0995              */
0996             if (WARN_ON(is_chained))
0997                 return;
0998             /* Try the parent */
0999             irq_data = irq_data->parent_data;
1000         }
1001 #endif
1002         if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1003             return;
1004     }
1005 
1006     /* Uninstall? */
1007     if (handle == handle_bad_irq) {
1008         if (desc->irq_data.chip != &no_irq_chip)
1009             mask_ack_irq(desc);
1010         irq_state_set_disabled(desc);
1011         if (is_chained) {
1012             desc->action = NULL;
1013             WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
1014         }
1015         desc->depth = 1;
1016     }
1017     desc->handle_irq = handle;
1018     desc->name = name;
1019 
1020     if (handle != handle_bad_irq && is_chained) {
1021         unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1022 
1023         /*
1024          * We're about to start this interrupt immediately,
1025          * hence the need to set the trigger configuration.
1026          * But the .set_type callback may have overridden the
1027          * flow handler, ignoring that we're dealing with a
1028          * chained interrupt. Reset it immediately because we
1029          * do know better.
1030          */
1031         if (type != IRQ_TYPE_NONE) {
1032             __irq_set_trigger(desc, type);
1033             desc->handle_irq = handle;
1034         }
1035 
1036         irq_settings_set_noprobe(desc);
1037         irq_settings_set_norequest(desc);
1038         irq_settings_set_nothread(desc);
1039         desc->action = &chained_action;
1040         WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
1041         irq_activate_and_startup(desc, IRQ_RESEND);
1042     }
1043 }
1044 
1045 void
1046 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1047           const char *name)
1048 {
1049     unsigned long flags;
1050     struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1051 
1052     if (!desc)
1053         return;
1054 
1055     __irq_do_set_handler(desc, handle, is_chained, name);
1056     irq_put_desc_busunlock(desc, flags);
1057 }
1058 EXPORT_SYMBOL_GPL(__irq_set_handler);
1059 
1060 void
1061 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1062                  void *data)
1063 {
1064     unsigned long flags;
1065     struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1066 
1067     if (!desc)
1068         return;
1069 
1070     desc->irq_common_data.handler_data = data;
1071     __irq_do_set_handler(desc, handle, 1, NULL);
1072 
1073     irq_put_desc_busunlock(desc, flags);
1074 }
1075 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1076 
1077 void
1078 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
1079                   irq_flow_handler_t handle, const char *name)
1080 {
1081     irq_set_chip(irq, chip);
1082     __irq_set_handler(irq, handle, 0, name);
1083 }
1084 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1085 
1086 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1087 {
1088     unsigned long flags, trigger, tmp;
1089     struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1090 
1091     if (!desc)
1092         return;
1093 
1094     /*
1095      * Warn when a driver sets the no autoenable flag on an already
1096      * active interrupt.
1097      */
1098     WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1099 
1100     irq_settings_clr_and_set(desc, clr, set);
1101 
1102     trigger = irqd_get_trigger_type(&desc->irq_data);
1103 
1104     irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1105            IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1106     if (irq_settings_has_no_balance_set(desc))
1107         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1108     if (irq_settings_is_per_cpu(desc))
1109         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1110     if (irq_settings_can_move_pcntxt(desc))
1111         irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1112     if (irq_settings_is_level(desc))
1113         irqd_set(&desc->irq_data, IRQD_LEVEL);
1114 
1115     tmp = irq_settings_get_trigger_mask(desc);
1116     if (tmp != IRQ_TYPE_NONE)
1117         trigger = tmp;
1118 
1119     irqd_set(&desc->irq_data, trigger);
1120 
1121     irq_put_desc_unlock(desc, flags);
1122 }
1123 EXPORT_SYMBOL_GPL(irq_modify_status);
1124 
1125 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
1126 /**
1127  *  irq_cpu_online - Invoke all irq_cpu_online functions.
1128  *
1129  *  Iterate through all irqs and invoke the chip.irq_cpu_online()
1130  *  for each.
1131  */
1132 void irq_cpu_online(void)
1133 {
1134     struct irq_desc *desc;
1135     struct irq_chip *chip;
1136     unsigned long flags;
1137     unsigned int irq;
1138 
1139     for_each_active_irq(irq) {
1140         desc = irq_to_desc(irq);
1141         if (!desc)
1142             continue;
1143 
1144         raw_spin_lock_irqsave(&desc->lock, flags);
1145 
1146         chip = irq_data_get_irq_chip(&desc->irq_data);
1147         if (chip && chip->irq_cpu_online &&
1148             (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1149              !irqd_irq_disabled(&desc->irq_data)))
1150             chip->irq_cpu_online(&desc->irq_data);
1151 
1152         raw_spin_unlock_irqrestore(&desc->lock, flags);
1153     }
1154 }
1155 
1156 /**
1157  *  irq_cpu_offline - Invoke all irq_cpu_offline functions.
1158  *
1159  *  Iterate through all irqs and invoke the chip.irq_cpu_offline()
1160  *  for each.
1161  */
1162 void irq_cpu_offline(void)
1163 {
1164     struct irq_desc *desc;
1165     struct irq_chip *chip;
1166     unsigned long flags;
1167     unsigned int irq;
1168 
1169     for_each_active_irq(irq) {
1170         desc = irq_to_desc(irq);
1171         if (!desc)
1172             continue;
1173 
1174         raw_spin_lock_irqsave(&desc->lock, flags);
1175 
1176         chip = irq_data_get_irq_chip(&desc->irq_data);
1177         if (chip && chip->irq_cpu_offline &&
1178             (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1179              !irqd_irq_disabled(&desc->irq_data)))
1180             chip->irq_cpu_offline(&desc->irq_data);
1181 
1182         raw_spin_unlock_irqrestore(&desc->lock, flags);
1183     }
1184 }
1185 #endif
1186 
1187 #ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1188 
1189 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1190 /**
1191  *  handle_fasteoi_ack_irq - irq handler for edge hierarchy
1192  *  stacked on transparent controllers
1193  *
1194  *  @desc:  the interrupt description structure for this irq
1195  *
1196  *  Like handle_fasteoi_irq(), but for use with hierarchy where
1197  *  the irq_chip also needs to have its ->irq_ack() function
1198  *  called.
1199  */
1200 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1201 {
1202     struct irq_chip *chip = desc->irq_data.chip;
1203 
1204     raw_spin_lock(&desc->lock);
1205 
1206     if (!irq_may_run(desc))
1207         goto out;
1208 
1209     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1210 
1211     /*
1212      * If its disabled or no action available
1213      * then mask it and get out of here:
1214      */
1215     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1216         desc->istate |= IRQS_PENDING;
1217         mask_irq(desc);
1218         goto out;
1219     }
1220 
1221     kstat_incr_irqs_this_cpu(desc);
1222     if (desc->istate & IRQS_ONESHOT)
1223         mask_irq(desc);
1224 
1225     /* Start handling the irq */
1226     desc->irq_data.chip->irq_ack(&desc->irq_data);
1227 
1228     handle_irq_event(desc);
1229 
1230     cond_unmask_eoi_irq(desc, chip);
1231 
1232     raw_spin_unlock(&desc->lock);
1233     return;
1234 out:
1235     if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1236         chip->irq_eoi(&desc->irq_data);
1237     raw_spin_unlock(&desc->lock);
1238 }
1239 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1240 
1241 /**
1242  *  handle_fasteoi_mask_irq - irq handler for level hierarchy
1243  *  stacked on transparent controllers
1244  *
1245  *  @desc:  the interrupt description structure for this irq
1246  *
1247  *  Like handle_fasteoi_irq(), but for use with hierarchy where
1248  *  the irq_chip also needs to have its ->irq_mask_ack() function
1249  *  called.
1250  */
1251 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1252 {
1253     struct irq_chip *chip = desc->irq_data.chip;
1254 
1255     raw_spin_lock(&desc->lock);
1256     mask_ack_irq(desc);
1257 
1258     if (!irq_may_run(desc))
1259         goto out;
1260 
1261     desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1262 
1263     /*
1264      * If its disabled or no action available
1265      * then mask it and get out of here:
1266      */
1267     if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1268         desc->istate |= IRQS_PENDING;
1269         mask_irq(desc);
1270         goto out;
1271     }
1272 
1273     kstat_incr_irqs_this_cpu(desc);
1274     if (desc->istate & IRQS_ONESHOT)
1275         mask_irq(desc);
1276 
1277     handle_irq_event(desc);
1278 
1279     cond_unmask_eoi_irq(desc, chip);
1280 
1281     raw_spin_unlock(&desc->lock);
1282     return;
1283 out:
1284     if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1285         chip->irq_eoi(&desc->irq_data);
1286     raw_spin_unlock(&desc->lock);
1287 }
1288 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1289 
1290 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1291 
1292 /**
1293  * irq_chip_set_parent_state - set the state of a parent interrupt.
1294  *
1295  * @data: Pointer to interrupt specific data
1296  * @which: State to be restored (one of IRQCHIP_STATE_*)
1297  * @val: Value corresponding to @which
1298  *
1299  * Conditional success, if the underlying irqchip does not implement it.
1300  */
1301 int irq_chip_set_parent_state(struct irq_data *data,
1302                   enum irqchip_irq_state which,
1303                   bool val)
1304 {
1305     data = data->parent_data;
1306 
1307     if (!data || !data->chip->irq_set_irqchip_state)
1308         return 0;
1309 
1310     return data->chip->irq_set_irqchip_state(data, which, val);
1311 }
1312 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1313 
1314 /**
1315  * irq_chip_get_parent_state - get the state of a parent interrupt.
1316  *
1317  * @data: Pointer to interrupt specific data
1318  * @which: one of IRQCHIP_STATE_* the caller wants to know
1319  * @state: a pointer to a boolean where the state is to be stored
1320  *
1321  * Conditional success, if the underlying irqchip does not implement it.
1322  */
1323 int irq_chip_get_parent_state(struct irq_data *data,
1324                   enum irqchip_irq_state which,
1325                   bool *state)
1326 {
1327     data = data->parent_data;
1328 
1329     if (!data || !data->chip->irq_get_irqchip_state)
1330         return 0;
1331 
1332     return data->chip->irq_get_irqchip_state(data, which, state);
1333 }
1334 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1335 
1336 /**
1337  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1338  * NULL)
1339  * @data:   Pointer to interrupt specific data
1340  */
1341 void irq_chip_enable_parent(struct irq_data *data)
1342 {
1343     data = data->parent_data;
1344     if (data->chip->irq_enable)
1345         data->chip->irq_enable(data);
1346     else
1347         data->chip->irq_unmask(data);
1348 }
1349 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1350 
1351 /**
1352  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1353  * NULL)
1354  * @data:   Pointer to interrupt specific data
1355  */
1356 void irq_chip_disable_parent(struct irq_data *data)
1357 {
1358     data = data->parent_data;
1359     if (data->chip->irq_disable)
1360         data->chip->irq_disable(data);
1361     else
1362         data->chip->irq_mask(data);
1363 }
1364 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1365 
1366 /**
1367  * irq_chip_ack_parent - Acknowledge the parent interrupt
1368  * @data:   Pointer to interrupt specific data
1369  */
1370 void irq_chip_ack_parent(struct irq_data *data)
1371 {
1372     data = data->parent_data;
1373     data->chip->irq_ack(data);
1374 }
1375 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1376 
1377 /**
1378  * irq_chip_mask_parent - Mask the parent interrupt
1379  * @data:   Pointer to interrupt specific data
1380  */
1381 void irq_chip_mask_parent(struct irq_data *data)
1382 {
1383     data = data->parent_data;
1384     data->chip->irq_mask(data);
1385 }
1386 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1387 
1388 /**
1389  * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1390  * @data:   Pointer to interrupt specific data
1391  */
1392 void irq_chip_mask_ack_parent(struct irq_data *data)
1393 {
1394     data = data->parent_data;
1395     data->chip->irq_mask_ack(data);
1396 }
1397 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1398 
1399 /**
1400  * irq_chip_unmask_parent - Unmask the parent interrupt
1401  * @data:   Pointer to interrupt specific data
1402  */
1403 void irq_chip_unmask_parent(struct irq_data *data)
1404 {
1405     data = data->parent_data;
1406     data->chip->irq_unmask(data);
1407 }
1408 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1409 
1410 /**
1411  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1412  * @data:   Pointer to interrupt specific data
1413  */
1414 void irq_chip_eoi_parent(struct irq_data *data)
1415 {
1416     data = data->parent_data;
1417     data->chip->irq_eoi(data);
1418 }
1419 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1420 
1421 /**
1422  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1423  * @data:   Pointer to interrupt specific data
1424  * @dest:   The affinity mask to set
1425  * @force:  Flag to enforce setting (disable online checks)
1426  *
1427  * Conditional, as the underlying parent chip might not implement it.
1428  */
1429 int irq_chip_set_affinity_parent(struct irq_data *data,
1430                  const struct cpumask *dest, bool force)
1431 {
1432     data = data->parent_data;
1433     if (data->chip->irq_set_affinity)
1434         return data->chip->irq_set_affinity(data, dest, force);
1435 
1436     return -ENOSYS;
1437 }
1438 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1439 
1440 /**
1441  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1442  * @data:   Pointer to interrupt specific data
1443  * @type:   IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1444  *
1445  * Conditional, as the underlying parent chip might not implement it.
1446  */
1447 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1448 {
1449     data = data->parent_data;
1450 
1451     if (data->chip->irq_set_type)
1452         return data->chip->irq_set_type(data, type);
1453 
1454     return -ENOSYS;
1455 }
1456 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1457 
1458 /**
1459  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1460  * @data:   Pointer to interrupt specific data
1461  *
1462  * Iterate through the domain hierarchy of the interrupt and check
1463  * whether a hw retrigger function exists. If yes, invoke it.
1464  */
1465 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1466 {
1467     for (data = data->parent_data; data; data = data->parent_data)
1468         if (data->chip && data->chip->irq_retrigger)
1469             return data->chip->irq_retrigger(data);
1470 
1471     return 0;
1472 }
1473 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1474 
1475 /**
1476  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1477  * @data:   Pointer to interrupt specific data
1478  * @vcpu_info:  The vcpu affinity information
1479  */
1480 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1481 {
1482     data = data->parent_data;
1483     if (data->chip->irq_set_vcpu_affinity)
1484         return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1485 
1486     return -ENOSYS;
1487 }
1488 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1489 /**
1490  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1491  * @data:   Pointer to interrupt specific data
1492  * @on:     Whether to set or reset the wake-up capability of this irq
1493  *
1494  * Conditional, as the underlying parent chip might not implement it.
1495  */
1496 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1497 {
1498     data = data->parent_data;
1499 
1500     if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1501         return 0;
1502 
1503     if (data->chip->irq_set_wake)
1504         return data->chip->irq_set_wake(data, on);
1505 
1506     return -ENOSYS;
1507 }
1508 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1509 
1510 /**
1511  * irq_chip_request_resources_parent - Request resources on the parent interrupt
1512  * @data:   Pointer to interrupt specific data
1513  */
1514 int irq_chip_request_resources_parent(struct irq_data *data)
1515 {
1516     data = data->parent_data;
1517 
1518     if (data->chip->irq_request_resources)
1519         return data->chip->irq_request_resources(data);
1520 
1521     /* no error on missing optional irq_chip::irq_request_resources */
1522     return 0;
1523 }
1524 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1525 
1526 /**
1527  * irq_chip_release_resources_parent - Release resources on the parent interrupt
1528  * @data:   Pointer to interrupt specific data
1529  */
1530 void irq_chip_release_resources_parent(struct irq_data *data)
1531 {
1532     data = data->parent_data;
1533     if (data->chip->irq_release_resources)
1534         data->chip->irq_release_resources(data);
1535 }
1536 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1537 #endif
1538 
1539 /**
1540  * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1541  * @data:   Pointer to interrupt specific data
1542  * @msg:    Pointer to the MSI message
1543  *
1544  * For hierarchical domains we find the first chip in the hierarchy
1545  * which implements the irq_compose_msi_msg callback. For non
1546  * hierarchical we use the top level chip.
1547  */
1548 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1549 {
1550     struct irq_data *pos;
1551 
1552     for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1553         if (data->chip && data->chip->irq_compose_msi_msg)
1554             pos = data;
1555     }
1556 
1557     if (!pos)
1558         return -ENOSYS;
1559 
1560     pos->chip->irq_compose_msi_msg(pos, msg);
1561     return 0;
1562 }
1563 
1564 static struct device *irq_get_parent_device(struct irq_data *data)
1565 {
1566     if (data->domain)
1567         return data->domain->dev;
1568 
1569     return NULL;
1570 }
1571 
1572 /**
1573  * irq_chip_pm_get - Enable power for an IRQ chip
1574  * @data:   Pointer to interrupt specific data
1575  *
1576  * Enable the power to the IRQ chip referenced by the interrupt data
1577  * structure.
1578  */
1579 int irq_chip_pm_get(struct irq_data *data)
1580 {
1581     struct device *dev = irq_get_parent_device(data);
1582     int retval = 0;
1583 
1584     if (IS_ENABLED(CONFIG_PM) && dev)
1585         retval = pm_runtime_resume_and_get(dev);
1586 
1587     return retval;
1588 }
1589 
1590 /**
1591  * irq_chip_pm_put - Disable power for an IRQ chip
1592  * @data:   Pointer to interrupt specific data
1593  *
1594  * Disable the power to the IRQ chip referenced by the interrupt data
1595  * structure, belongs. Note that power will only be disabled, once this
1596  * function has been called for all IRQs that have called irq_chip_pm_get().
1597  */
1598 int irq_chip_pm_put(struct irq_data *data)
1599 {
1600     struct device *dev = irq_get_parent_device(data);
1601     int retval = 0;
1602 
1603     if (IS_ENABLED(CONFIG_PM) && dev)
1604         retval = pm_runtime_put(dev);
1605 
1606     return (retval < 0) ? retval : 0;
1607 }