Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_IRQ_H
0003 #define _LINUX_IRQ_H
0004 
0005 /*
0006  * Please do not include this file in generic code.  There is currently
0007  * no requirement for any architecture to implement anything held
0008  * within this file.
0009  *
0010  * Thanks. --rmk
0011  */
0012 
0013 #include <linux/cache.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/irqhandler.h>
0017 #include <linux/irqreturn.h>
0018 #include <linux/irqnr.h>
0019 #include <linux/topology.h>
0020 #include <linux/io.h>
0021 #include <linux/slab.h>
0022 
0023 #include <asm/irq.h>
0024 #include <asm/ptrace.h>
0025 #include <asm/irq_regs.h>
0026 
0027 struct seq_file;
0028 struct module;
0029 struct msi_msg;
0030 struct irq_affinity_desc;
0031 enum irqchip_irq_state;
0032 
0033 /*
0034  * IRQ line status.
0035  *
0036  * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
0037  *
0038  * IRQ_TYPE_NONE        - default, unspecified type
0039  * IRQ_TYPE_EDGE_RISING     - rising edge triggered
0040  * IRQ_TYPE_EDGE_FALLING    - falling edge triggered
0041  * IRQ_TYPE_EDGE_BOTH       - rising and falling edge triggered
0042  * IRQ_TYPE_LEVEL_HIGH      - high level triggered
0043  * IRQ_TYPE_LEVEL_LOW       - low level triggered
0044  * IRQ_TYPE_LEVEL_MASK      - Mask to filter out the level bits
0045  * IRQ_TYPE_SENSE_MASK      - Mask for all the above bits
0046  * IRQ_TYPE_DEFAULT     - For use by some PICs to ask irq_set_type
0047  *                to setup the HW to a sane default (used
0048  *                                by irqdomain map() callbacks to synchronize
0049  *                                the HW state and SW flags for a newly
0050  *                                allocated descriptor).
0051  *
0052  * IRQ_TYPE_PROBE       - Special flag for probing in progress
0053  *
0054  * Bits which can be modified via irq_set/clear/modify_status_flags()
0055  * IRQ_LEVEL            - Interrupt is level type. Will be also
0056  *                updated in the code when the above trigger
0057  *                bits are modified via irq_set_irq_type()
0058  * IRQ_PER_CPU          - Mark an interrupt PER_CPU. Will protect
0059  *                it from affinity setting
0060  * IRQ_NOPROBE          - Interrupt cannot be probed by autoprobing
0061  * IRQ_NOREQUEST        - Interrupt cannot be requested via
0062  *                request_irq()
0063  * IRQ_NOTHREAD         - Interrupt cannot be threaded
0064  * IRQ_NOAUTOEN         - Interrupt is not automatically enabled in
0065  *                request/setup_irq()
0066  * IRQ_NO_BALANCING     - Interrupt cannot be balanced (affinity set)
0067  * IRQ_MOVE_PCNTXT      - Interrupt can be migrated from process context
0068  * IRQ_NESTED_THREAD        - Interrupt nests into another thread
0069  * IRQ_PER_CPU_DEVID        - Dev_id is a per-cpu variable
0070  * IRQ_IS_POLLED        - Always polled by another interrupt. Exclude
0071  *                it from the spurious interrupt detection
0072  *                mechanism and from core side polling.
0073  * IRQ_DISABLE_UNLAZY       - Disable lazy irq disable
0074  * IRQ_HIDDEN           - Don't show up in /proc/interrupts
0075  * IRQ_NO_DEBUG         - Exclude from note_interrupt() debugging
0076  */
0077 enum {
0078     IRQ_TYPE_NONE       = 0x00000000,
0079     IRQ_TYPE_EDGE_RISING    = 0x00000001,
0080     IRQ_TYPE_EDGE_FALLING   = 0x00000002,
0081     IRQ_TYPE_EDGE_BOTH  = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
0082     IRQ_TYPE_LEVEL_HIGH = 0x00000004,
0083     IRQ_TYPE_LEVEL_LOW  = 0x00000008,
0084     IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
0085     IRQ_TYPE_SENSE_MASK = 0x0000000f,
0086     IRQ_TYPE_DEFAULT    = IRQ_TYPE_SENSE_MASK,
0087 
0088     IRQ_TYPE_PROBE      = 0x00000010,
0089 
0090     IRQ_LEVEL       = (1 <<  8),
0091     IRQ_PER_CPU     = (1 <<  9),
0092     IRQ_NOPROBE     = (1 << 10),
0093     IRQ_NOREQUEST       = (1 << 11),
0094     IRQ_NOAUTOEN        = (1 << 12),
0095     IRQ_NO_BALANCING    = (1 << 13),
0096     IRQ_MOVE_PCNTXT     = (1 << 14),
0097     IRQ_NESTED_THREAD   = (1 << 15),
0098     IRQ_NOTHREAD        = (1 << 16),
0099     IRQ_PER_CPU_DEVID   = (1 << 17),
0100     IRQ_IS_POLLED       = (1 << 18),
0101     IRQ_DISABLE_UNLAZY  = (1 << 19),
0102     IRQ_HIDDEN      = (1 << 20),
0103     IRQ_NO_DEBUG        = (1 << 21),
0104 };
0105 
0106 #define IRQF_MODIFY_MASK    \
0107     (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
0108      IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
0109      IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
0110      IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
0111 
0112 #define IRQ_NO_BALANCING_MASK   (IRQ_PER_CPU | IRQ_NO_BALANCING)
0113 
0114 /*
0115  * Return value for chip->irq_set_affinity()
0116  *
0117  * IRQ_SET_MASK_OK  - OK, core updates irq_common_data.affinity
0118  * IRQ_SET_MASK_NOCPY   - OK, chip did update irq_common_data.affinity
0119  * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
0120  *            support stacked irqchips, which indicates skipping
0121  *            all descendant irqchips.
0122  */
0123 enum {
0124     IRQ_SET_MASK_OK = 0,
0125     IRQ_SET_MASK_OK_NOCOPY,
0126     IRQ_SET_MASK_OK_DONE,
0127 };
0128 
0129 struct msi_desc;
0130 struct irq_domain;
0131 
0132 /**
0133  * struct irq_common_data - per irq data shared by all irqchips
0134  * @state_use_accessors: status information for irq chip functions.
0135  *          Use accessor functions to deal with it
0136  * @node:       node index useful for balancing
0137  * @handler_data:   per-IRQ data for the irq_chip methods
0138  * @affinity:       IRQ affinity on SMP. If this is an IPI
0139  *          related irq, then this is the mask of the
0140  *          CPUs to which an IPI can be sent.
0141  * @effective_affinity: The effective IRQ affinity on SMP as some irq
0142  *          chips do not allow multi CPU destinations.
0143  *          A subset of @affinity.
0144  * @msi_desc:       MSI descriptor
0145  * @ipi_offset:     Offset of first IPI target cpu in @affinity. Optional.
0146  */
0147 struct irq_common_data {
0148     unsigned int        __private state_use_accessors;
0149 #ifdef CONFIG_NUMA
0150     unsigned int        node;
0151 #endif
0152     void            *handler_data;
0153     struct msi_desc     *msi_desc;
0154 #ifdef CONFIG_SMP
0155     cpumask_var_t       affinity;
0156 #endif
0157 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0158     cpumask_var_t       effective_affinity;
0159 #endif
0160 #ifdef CONFIG_GENERIC_IRQ_IPI
0161     unsigned int        ipi_offset;
0162 #endif
0163 };
0164 
0165 /**
0166  * struct irq_data - per irq chip data passed down to chip functions
0167  * @mask:       precomputed bitmask for accessing the chip registers
0168  * @irq:        interrupt number
0169  * @hwirq:      hardware interrupt number, local to the interrupt domain
0170  * @common:     point to data shared by all irqchips
0171  * @chip:       low level interrupt hardware access
0172  * @domain:     Interrupt translation domain; responsible for mapping
0173  *          between hwirq number and linux irq number.
0174  * @parent_data:    pointer to parent struct irq_data to support hierarchy
0175  *          irq_domain
0176  * @chip_data:      platform-specific per-chip private data for the chip
0177  *          methods, to allow shared chip implementations
0178  */
0179 struct irq_data {
0180     u32         mask;
0181     unsigned int        irq;
0182     unsigned long       hwirq;
0183     struct irq_common_data  *common;
0184     struct irq_chip     *chip;
0185     struct irq_domain   *domain;
0186 #ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
0187     struct irq_data     *parent_data;
0188 #endif
0189     void            *chip_data;
0190 };
0191 
0192 /*
0193  * Bit masks for irq_common_data.state_use_accessors
0194  *
0195  * IRQD_TRIGGER_MASK        - Mask for the trigger type bits
0196  * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
0197  * IRQD_ACTIVATED       - Interrupt has already been activated
0198  * IRQD_NO_BALANCING        - Balancing disabled for this IRQ
0199  * IRQD_PER_CPU         - Interrupt is per cpu
0200  * IRQD_AFFINITY_SET        - Interrupt affinity was set
0201  * IRQD_LEVEL           - Interrupt is level triggered
0202  * IRQD_WAKEUP_STATE        - Interrupt is configured for wakeup
0203  *                from suspend
0204  * IRQD_MOVE_PCNTXT     - Interrupt can be moved in process
0205  *                context
0206  * IRQD_IRQ_DISABLED        - Disabled state of the interrupt
0207  * IRQD_IRQ_MASKED      - Masked state of the interrupt
0208  * IRQD_IRQ_INPROGRESS      - In progress state of the interrupt
0209  * IRQD_WAKEUP_ARMED        - Wakeup mode armed
0210  * IRQD_FORWARDED_TO_VCPU   - The interrupt is forwarded to a VCPU
0211  * IRQD_AFFINITY_MANAGED    - Affinity is auto-managed by the kernel
0212  * IRQD_IRQ_STARTED     - Startup state of the interrupt
0213  * IRQD_MANAGED_SHUTDOWN    - Interrupt was shutdown due to empty affinity
0214  *                mask. Applies only to affinity managed irqs.
0215  * IRQD_SINGLE_TARGET       - IRQ allows only a single affinity target
0216  * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
0217  * IRQD_CAN_RESERVE     - Can use reservation mode
0218  * IRQD_MSI_NOMASK_QUIRK    - Non-maskable MSI quirk for affinity change
0219  *                required
0220  * IRQD_HANDLE_ENFORCE_IRQCTX   - Enforce that handle_irq_*() is only invoked
0221  *                from actual interrupt context.
0222  * IRQD_AFFINITY_ON_ACTIVATE    - Affinity is set on activation. Don't call
0223  *                irq_chip::irq_set_affinity() when deactivated.
0224  * IRQD_IRQ_ENABLED_ON_SUSPEND  - Interrupt is enabled on suspend by irq pm if
0225  *                irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
0226  */
0227 enum {
0228     IRQD_TRIGGER_MASK       = 0xf,
0229     IRQD_SETAFFINITY_PENDING    = (1 <<  8),
0230     IRQD_ACTIVATED          = (1 <<  9),
0231     IRQD_NO_BALANCING       = (1 << 10),
0232     IRQD_PER_CPU            = (1 << 11),
0233     IRQD_AFFINITY_SET       = (1 << 12),
0234     IRQD_LEVEL          = (1 << 13),
0235     IRQD_WAKEUP_STATE       = (1 << 14),
0236     IRQD_MOVE_PCNTXT        = (1 << 15),
0237     IRQD_IRQ_DISABLED       = (1 << 16),
0238     IRQD_IRQ_MASKED         = (1 << 17),
0239     IRQD_IRQ_INPROGRESS     = (1 << 18),
0240     IRQD_WAKEUP_ARMED       = (1 << 19),
0241     IRQD_FORWARDED_TO_VCPU      = (1 << 20),
0242     IRQD_AFFINITY_MANAGED       = (1 << 21),
0243     IRQD_IRQ_STARTED        = (1 << 22),
0244     IRQD_MANAGED_SHUTDOWN       = (1 << 23),
0245     IRQD_SINGLE_TARGET      = (1 << 24),
0246     IRQD_DEFAULT_TRIGGER_SET    = (1 << 25),
0247     IRQD_CAN_RESERVE        = (1 << 26),
0248     IRQD_MSI_NOMASK_QUIRK       = (1 << 27),
0249     IRQD_HANDLE_ENFORCE_IRQCTX  = (1 << 28),
0250     IRQD_AFFINITY_ON_ACTIVATE   = (1 << 29),
0251     IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
0252 };
0253 
0254 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
0255 
0256 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
0257 {
0258     return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
0259 }
0260 
0261 static inline bool irqd_is_per_cpu(struct irq_data *d)
0262 {
0263     return __irqd_to_state(d) & IRQD_PER_CPU;
0264 }
0265 
0266 static inline bool irqd_can_balance(struct irq_data *d)
0267 {
0268     return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
0269 }
0270 
0271 static inline bool irqd_affinity_was_set(struct irq_data *d)
0272 {
0273     return __irqd_to_state(d) & IRQD_AFFINITY_SET;
0274 }
0275 
0276 static inline void irqd_mark_affinity_was_set(struct irq_data *d)
0277 {
0278     __irqd_to_state(d) |= IRQD_AFFINITY_SET;
0279 }
0280 
0281 static inline bool irqd_trigger_type_was_set(struct irq_data *d)
0282 {
0283     return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
0284 }
0285 
0286 static inline u32 irqd_get_trigger_type(struct irq_data *d)
0287 {
0288     return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
0289 }
0290 
0291 /*
0292  * Must only be called inside irq_chip.irq_set_type() functions or
0293  * from the DT/ACPI setup code.
0294  */
0295 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
0296 {
0297     __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
0298     __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
0299     __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
0300 }
0301 
0302 static inline bool irqd_is_level_type(struct irq_data *d)
0303 {
0304     return __irqd_to_state(d) & IRQD_LEVEL;
0305 }
0306 
0307 /*
0308  * Must only be called of irqchip.irq_set_affinity() or low level
0309  * hierarchy domain allocation functions.
0310  */
0311 static inline void irqd_set_single_target(struct irq_data *d)
0312 {
0313     __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
0314 }
0315 
0316 static inline bool irqd_is_single_target(struct irq_data *d)
0317 {
0318     return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
0319 }
0320 
0321 static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
0322 {
0323     __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
0324 }
0325 
0326 static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
0327 {
0328     return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
0329 }
0330 
0331 static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
0332 {
0333     return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
0334 }
0335 
0336 static inline bool irqd_is_wakeup_set(struct irq_data *d)
0337 {
0338     return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
0339 }
0340 
0341 static inline bool irqd_can_move_in_process_context(struct irq_data *d)
0342 {
0343     return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
0344 }
0345 
0346 static inline bool irqd_irq_disabled(struct irq_data *d)
0347 {
0348     return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
0349 }
0350 
0351 static inline bool irqd_irq_masked(struct irq_data *d)
0352 {
0353     return __irqd_to_state(d) & IRQD_IRQ_MASKED;
0354 }
0355 
0356 static inline bool irqd_irq_inprogress(struct irq_data *d)
0357 {
0358     return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
0359 }
0360 
0361 static inline bool irqd_is_wakeup_armed(struct irq_data *d)
0362 {
0363     return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
0364 }
0365 
0366 static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
0367 {
0368     return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
0369 }
0370 
0371 static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
0372 {
0373     __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
0374 }
0375 
0376 static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
0377 {
0378     __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
0379 }
0380 
0381 static inline bool irqd_affinity_is_managed(struct irq_data *d)
0382 {
0383     return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
0384 }
0385 
0386 static inline bool irqd_is_activated(struct irq_data *d)
0387 {
0388     return __irqd_to_state(d) & IRQD_ACTIVATED;
0389 }
0390 
0391 static inline void irqd_set_activated(struct irq_data *d)
0392 {
0393     __irqd_to_state(d) |= IRQD_ACTIVATED;
0394 }
0395 
0396 static inline void irqd_clr_activated(struct irq_data *d)
0397 {
0398     __irqd_to_state(d) &= ~IRQD_ACTIVATED;
0399 }
0400 
0401 static inline bool irqd_is_started(struct irq_data *d)
0402 {
0403     return __irqd_to_state(d) & IRQD_IRQ_STARTED;
0404 }
0405 
0406 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
0407 {
0408     return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
0409 }
0410 
0411 static inline void irqd_set_can_reserve(struct irq_data *d)
0412 {
0413     __irqd_to_state(d) |= IRQD_CAN_RESERVE;
0414 }
0415 
0416 static inline void irqd_clr_can_reserve(struct irq_data *d)
0417 {
0418     __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
0419 }
0420 
0421 static inline bool irqd_can_reserve(struct irq_data *d)
0422 {
0423     return __irqd_to_state(d) & IRQD_CAN_RESERVE;
0424 }
0425 
0426 static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
0427 {
0428     __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
0429 }
0430 
0431 static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
0432 {
0433     __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
0434 }
0435 
0436 static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
0437 {
0438     return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
0439 }
0440 
0441 static inline void irqd_set_affinity_on_activate(struct irq_data *d)
0442 {
0443     __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
0444 }
0445 
0446 static inline bool irqd_affinity_on_activate(struct irq_data *d)
0447 {
0448     return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
0449 }
0450 
0451 #undef __irqd_to_state
0452 
0453 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
0454 {
0455     return d->hwirq;
0456 }
0457 
0458 /**
0459  * struct irq_chip - hardware interrupt chip descriptor
0460  *
0461  * @name:       name for /proc/interrupts
0462  * @irq_startup:    start up the interrupt (defaults to ->enable if NULL)
0463  * @irq_shutdown:   shut down the interrupt (defaults to ->disable if NULL)
0464  * @irq_enable:     enable the interrupt (defaults to chip->unmask if NULL)
0465  * @irq_disable:    disable the interrupt
0466  * @irq_ack:        start of a new interrupt
0467  * @irq_mask:       mask an interrupt source
0468  * @irq_mask_ack:   ack and mask an interrupt source
0469  * @irq_unmask:     unmask an interrupt source
0470  * @irq_eoi:        end of interrupt
0471  * @irq_set_affinity:   Set the CPU affinity on SMP machines. If the force
0472  *          argument is true, it tells the driver to
0473  *          unconditionally apply the affinity setting. Sanity
0474  *          checks against the supplied affinity mask are not
0475  *          required. This is used for CPU hotplug where the
0476  *          target CPU is not yet set in the cpu_online_mask.
0477  * @irq_retrigger:  resend an IRQ to the CPU
0478  * @irq_set_type:   set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
0479  * @irq_set_wake:   enable/disable power-management wake-on of an IRQ
0480  * @irq_bus_lock:   function to lock access to slow bus (i2c) chips
0481  * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
0482  * @irq_cpu_online: configure an interrupt source for a secondary CPU
0483  * @irq_cpu_offline:    un-configure an interrupt source for a secondary CPU
0484  * @irq_suspend:    function called from core code on suspend once per
0485  *          chip, when one or more interrupts are installed
0486  * @irq_resume:     function called from core code on resume once per chip,
0487  *          when one ore more interrupts are installed
0488  * @irq_pm_shutdown:    function called from core code on shutdown once per chip
0489  * @irq_calc_mask:  Optional function to set irq_data.mask for special cases
0490  * @irq_print_chip: optional to print special chip info in show_interrupts
0491  * @irq_request_resources:  optional to request resources before calling
0492  *              any other callback related to this irq
0493  * @irq_release_resources:  optional to release resources acquired with
0494  *              irq_request_resources
0495  * @irq_compose_msi_msg:    optional to compose message content for MSI
0496  * @irq_write_msi_msg:  optional to write message content for MSI
0497  * @irq_get_irqchip_state:  return the internal state of an interrupt
0498  * @irq_set_irqchip_state:  set the internal state of a interrupt
0499  * @irq_set_vcpu_affinity:  optional to target a vCPU in a virtual machine
0500  * @ipi_send_single:    send a single IPI to destination cpus
0501  * @ipi_send_mask:  send an IPI to destination cpus in cpumask
0502  * @irq_nmi_setup:  function called from core code before enabling an NMI
0503  * @irq_nmi_teardown:   function called from core code after disabling an NMI
0504  * @flags:      chip specific flags
0505  */
0506 struct irq_chip {
0507     const char  *name;
0508     unsigned int    (*irq_startup)(struct irq_data *data);
0509     void        (*irq_shutdown)(struct irq_data *data);
0510     void        (*irq_enable)(struct irq_data *data);
0511     void        (*irq_disable)(struct irq_data *data);
0512 
0513     void        (*irq_ack)(struct irq_data *data);
0514     void        (*irq_mask)(struct irq_data *data);
0515     void        (*irq_mask_ack)(struct irq_data *data);
0516     void        (*irq_unmask)(struct irq_data *data);
0517     void        (*irq_eoi)(struct irq_data *data);
0518 
0519     int     (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
0520     int     (*irq_retrigger)(struct irq_data *data);
0521     int     (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
0522     int     (*irq_set_wake)(struct irq_data *data, unsigned int on);
0523 
0524     void        (*irq_bus_lock)(struct irq_data *data);
0525     void        (*irq_bus_sync_unlock)(struct irq_data *data);
0526 
0527 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
0528     void        (*irq_cpu_online)(struct irq_data *data);
0529     void        (*irq_cpu_offline)(struct irq_data *data);
0530 #endif
0531     void        (*irq_suspend)(struct irq_data *data);
0532     void        (*irq_resume)(struct irq_data *data);
0533     void        (*irq_pm_shutdown)(struct irq_data *data);
0534 
0535     void        (*irq_calc_mask)(struct irq_data *data);
0536 
0537     void        (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
0538     int     (*irq_request_resources)(struct irq_data *data);
0539     void        (*irq_release_resources)(struct irq_data *data);
0540 
0541     void        (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
0542     void        (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
0543 
0544     int     (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
0545     int     (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
0546 
0547     int     (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
0548 
0549     void        (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
0550     void        (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
0551 
0552     int     (*irq_nmi_setup)(struct irq_data *data);
0553     void        (*irq_nmi_teardown)(struct irq_data *data);
0554 
0555     unsigned long   flags;
0556 };
0557 
0558 /*
0559  * irq_chip specific flags
0560  *
0561  * IRQCHIP_SET_TYPE_MASKED:           Mask before calling chip.irq_set_type()
0562  * IRQCHIP_EOI_IF_HANDLED:            Only issue irq_eoi() when irq was handled
0563  * IRQCHIP_MASK_ON_SUSPEND:           Mask non wake irqs in the suspend path
0564  * IRQCHIP_ONOFFLINE_ENABLED:         Only call irq_on/off_line callbacks
0565  *                                    when irq enabled
0566  * IRQCHIP_SKIP_SET_WAKE:             Skip chip.irq_set_wake(), for this irq chip
0567  * IRQCHIP_ONESHOT_SAFE:              One shot does not require mask/unmask
0568  * IRQCHIP_EOI_THREADED:              Chip requires eoi() on unmask in threaded mode
0569  * IRQCHIP_SUPPORTS_LEVEL_MSI:        Chip can provide two doorbells for Level MSIs
0570  * IRQCHIP_SUPPORTS_NMI:              Chip can deliver NMIs, only for root irqchips
0571  * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND:  Invokes __enable_irq()/__disable_irq() for wake irqs
0572  *                                    in the suspend path if they are in disabled state
0573  * IRQCHIP_AFFINITY_PRE_STARTUP:      Default affinity update before startup
0574  * IRQCHIP_IMMUTABLE:             Don't ever change anything in this chip
0575  */
0576 enum {
0577     IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
0578     IRQCHIP_EOI_IF_HANDLED          = (1 <<  1),
0579     IRQCHIP_MASK_ON_SUSPEND         = (1 <<  2),
0580     IRQCHIP_ONOFFLINE_ENABLED       = (1 <<  3),
0581     IRQCHIP_SKIP_SET_WAKE           = (1 <<  4),
0582     IRQCHIP_ONESHOT_SAFE            = (1 <<  5),
0583     IRQCHIP_EOI_THREADED            = (1 <<  6),
0584     IRQCHIP_SUPPORTS_LEVEL_MSI      = (1 <<  7),
0585     IRQCHIP_SUPPORTS_NMI            = (1 <<  8),
0586     IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND    = (1 <<  9),
0587     IRQCHIP_AFFINITY_PRE_STARTUP        = (1 << 10),
0588     IRQCHIP_IMMUTABLE           = (1 << 11),
0589 };
0590 
0591 #include <linux/irqdesc.h>
0592 
0593 /*
0594  * Pick up the arch-dependent methods:
0595  */
0596 #include <asm/hw_irq.h>
0597 
0598 #ifndef NR_IRQS_LEGACY
0599 # define NR_IRQS_LEGACY 0
0600 #endif
0601 
0602 #ifndef ARCH_IRQ_INIT_FLAGS
0603 # define ARCH_IRQ_INIT_FLAGS    0
0604 #endif
0605 
0606 #define IRQ_DEFAULT_INIT_FLAGS  ARCH_IRQ_INIT_FLAGS
0607 
0608 struct irqaction;
0609 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
0610 extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
0611 
0612 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
0613 extern void irq_cpu_online(void);
0614 extern void irq_cpu_offline(void);
0615 #endif
0616 extern int irq_set_affinity_locked(struct irq_data *data,
0617                    const struct cpumask *cpumask, bool force);
0618 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
0619 
0620 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
0621 extern void irq_migrate_all_off_this_cpu(void);
0622 extern int irq_affinity_online_cpu(unsigned int cpu);
0623 #else
0624 # define irq_affinity_online_cpu    NULL
0625 #endif
0626 
0627 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
0628 void __irq_move_irq(struct irq_data *data);
0629 static inline void irq_move_irq(struct irq_data *data)
0630 {
0631     if (unlikely(irqd_is_setaffinity_pending(data)))
0632         __irq_move_irq(data);
0633 }
0634 void irq_move_masked_irq(struct irq_data *data);
0635 void irq_force_complete_move(struct irq_desc *desc);
0636 #else
0637 static inline void irq_move_irq(struct irq_data *data) { }
0638 static inline void irq_move_masked_irq(struct irq_data *data) { }
0639 static inline void irq_force_complete_move(struct irq_desc *desc) { }
0640 #endif
0641 
0642 extern int no_irq_affinity;
0643 
0644 #ifdef CONFIG_HARDIRQS_SW_RESEND
0645 int irq_set_parent(int irq, int parent_irq);
0646 #else
0647 static inline int irq_set_parent(int irq, int parent_irq)
0648 {
0649     return 0;
0650 }
0651 #endif
0652 
0653 /*
0654  * Built-in IRQ handlers for various IRQ types,
0655  * callable via desc->handle_irq()
0656  */
0657 extern void handle_level_irq(struct irq_desc *desc);
0658 extern void handle_fasteoi_irq(struct irq_desc *desc);
0659 extern void handle_edge_irq(struct irq_desc *desc);
0660 extern void handle_edge_eoi_irq(struct irq_desc *desc);
0661 extern void handle_simple_irq(struct irq_desc *desc);
0662 extern void handle_untracked_irq(struct irq_desc *desc);
0663 extern void handle_percpu_irq(struct irq_desc *desc);
0664 extern void handle_percpu_devid_irq(struct irq_desc *desc);
0665 extern void handle_bad_irq(struct irq_desc *desc);
0666 extern void handle_nested_irq(unsigned int irq);
0667 
0668 extern void handle_fasteoi_nmi(struct irq_desc *desc);
0669 extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
0670 
0671 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
0672 extern int irq_chip_pm_get(struct irq_data *data);
0673 extern int irq_chip_pm_put(struct irq_data *data);
0674 #ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
0675 extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
0676 extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
0677 extern int irq_chip_set_parent_state(struct irq_data *data,
0678                      enum irqchip_irq_state which,
0679                      bool val);
0680 extern int irq_chip_get_parent_state(struct irq_data *data,
0681                      enum irqchip_irq_state which,
0682                      bool *state);
0683 extern void irq_chip_enable_parent(struct irq_data *data);
0684 extern void irq_chip_disable_parent(struct irq_data *data);
0685 extern void irq_chip_ack_parent(struct irq_data *data);
0686 extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
0687 extern void irq_chip_mask_parent(struct irq_data *data);
0688 extern void irq_chip_mask_ack_parent(struct irq_data *data);
0689 extern void irq_chip_unmask_parent(struct irq_data *data);
0690 extern void irq_chip_eoi_parent(struct irq_data *data);
0691 extern int irq_chip_set_affinity_parent(struct irq_data *data,
0692                     const struct cpumask *dest,
0693                     bool force);
0694 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
0695 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
0696                          void *vcpu_info);
0697 extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
0698 extern int irq_chip_request_resources_parent(struct irq_data *data);
0699 extern void irq_chip_release_resources_parent(struct irq_data *data);
0700 #endif
0701 
0702 /* Handling of unhandled and spurious interrupts: */
0703 extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
0704 
0705 
0706 /* Enable/disable irq debugging output: */
0707 extern int noirqdebug_setup(char *str);
0708 
0709 /* Checks whether the interrupt can be requested by request_irq(): */
0710 extern int can_request_irq(unsigned int irq, unsigned long irqflags);
0711 
0712 /* Dummy irq-chip implementations: */
0713 extern struct irq_chip no_irq_chip;
0714 extern struct irq_chip dummy_irq_chip;
0715 
0716 extern void
0717 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
0718                   irq_flow_handler_t handle, const char *name);
0719 
0720 static inline void irq_set_chip_and_handler(unsigned int irq,
0721                         const struct irq_chip *chip,
0722                         irq_flow_handler_t handle)
0723 {
0724     irq_set_chip_and_handler_name(irq, chip, handle, NULL);
0725 }
0726 
0727 extern int irq_set_percpu_devid(unsigned int irq);
0728 extern int irq_set_percpu_devid_partition(unsigned int irq,
0729                       const struct cpumask *affinity);
0730 extern int irq_get_percpu_devid_partition(unsigned int irq,
0731                       struct cpumask *affinity);
0732 
0733 extern void
0734 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
0735           const char *name);
0736 
0737 static inline void
0738 irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
0739 {
0740     __irq_set_handler(irq, handle, 0, NULL);
0741 }
0742 
0743 /*
0744  * Set a highlevel chained flow handler for a given IRQ.
0745  * (a chained handler is automatically enabled and set to
0746  *  IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
0747  */
0748 static inline void
0749 irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
0750 {
0751     __irq_set_handler(irq, handle, 1, NULL);
0752 }
0753 
0754 /*
0755  * Set a highlevel chained flow handler and its data for a given IRQ.
0756  * (a chained handler is automatically enabled and set to
0757  *  IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
0758  */
0759 void
0760 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
0761                  void *data);
0762 
0763 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
0764 
0765 static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
0766 {
0767     irq_modify_status(irq, 0, set);
0768 }
0769 
0770 static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
0771 {
0772     irq_modify_status(irq, clr, 0);
0773 }
0774 
0775 static inline void irq_set_noprobe(unsigned int irq)
0776 {
0777     irq_modify_status(irq, 0, IRQ_NOPROBE);
0778 }
0779 
0780 static inline void irq_set_probe(unsigned int irq)
0781 {
0782     irq_modify_status(irq, IRQ_NOPROBE, 0);
0783 }
0784 
0785 static inline void irq_set_nothread(unsigned int irq)
0786 {
0787     irq_modify_status(irq, 0, IRQ_NOTHREAD);
0788 }
0789 
0790 static inline void irq_set_thread(unsigned int irq)
0791 {
0792     irq_modify_status(irq, IRQ_NOTHREAD, 0);
0793 }
0794 
0795 static inline void irq_set_nested_thread(unsigned int irq, bool nest)
0796 {
0797     if (nest)
0798         irq_set_status_flags(irq, IRQ_NESTED_THREAD);
0799     else
0800         irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
0801 }
0802 
0803 static inline void irq_set_percpu_devid_flags(unsigned int irq)
0804 {
0805     irq_set_status_flags(irq,
0806                  IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
0807                  IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
0808 }
0809 
0810 /* Set/get chip/data for an IRQ: */
0811 extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
0812 extern int irq_set_handler_data(unsigned int irq, void *data);
0813 extern int irq_set_chip_data(unsigned int irq, void *data);
0814 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
0815 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
0816 extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
0817                 struct msi_desc *entry);
0818 extern struct irq_data *irq_get_irq_data(unsigned int irq);
0819 
0820 static inline struct irq_chip *irq_get_chip(unsigned int irq)
0821 {
0822     struct irq_data *d = irq_get_irq_data(irq);
0823     return d ? d->chip : NULL;
0824 }
0825 
0826 static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
0827 {
0828     return d->chip;
0829 }
0830 
0831 static inline void *irq_get_chip_data(unsigned int irq)
0832 {
0833     struct irq_data *d = irq_get_irq_data(irq);
0834     return d ? d->chip_data : NULL;
0835 }
0836 
0837 static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
0838 {
0839     return d->chip_data;
0840 }
0841 
0842 static inline void *irq_get_handler_data(unsigned int irq)
0843 {
0844     struct irq_data *d = irq_get_irq_data(irq);
0845     return d ? d->common->handler_data : NULL;
0846 }
0847 
0848 static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
0849 {
0850     return d->common->handler_data;
0851 }
0852 
0853 static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
0854 {
0855     struct irq_data *d = irq_get_irq_data(irq);
0856     return d ? d->common->msi_desc : NULL;
0857 }
0858 
0859 static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
0860 {
0861     return d->common->msi_desc;
0862 }
0863 
0864 static inline u32 irq_get_trigger_type(unsigned int irq)
0865 {
0866     struct irq_data *d = irq_get_irq_data(irq);
0867     return d ? irqd_get_trigger_type(d) : 0;
0868 }
0869 
0870 static inline int irq_common_data_get_node(struct irq_common_data *d)
0871 {
0872 #ifdef CONFIG_NUMA
0873     return d->node;
0874 #else
0875     return 0;
0876 #endif
0877 }
0878 
0879 static inline int irq_data_get_node(struct irq_data *d)
0880 {
0881     return irq_common_data_get_node(d->common);
0882 }
0883 
0884 static inline
0885 const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
0886 {
0887 #ifdef CONFIG_SMP
0888     return d->common->affinity;
0889 #else
0890     return cpumask_of(0);
0891 #endif
0892 }
0893 
0894 static inline void irq_data_update_affinity(struct irq_data *d,
0895                         const struct cpumask *m)
0896 {
0897 #ifdef CONFIG_SMP
0898     cpumask_copy(d->common->affinity, m);
0899 #endif
0900 }
0901 
0902 static inline const struct cpumask *irq_get_affinity_mask(int irq)
0903 {
0904     struct irq_data *d = irq_get_irq_data(irq);
0905 
0906     return d ? irq_data_get_affinity_mask(d) : NULL;
0907 }
0908 
0909 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0910 static inline
0911 const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
0912 {
0913     return d->common->effective_affinity;
0914 }
0915 static inline void irq_data_update_effective_affinity(struct irq_data *d,
0916                               const struct cpumask *m)
0917 {
0918     cpumask_copy(d->common->effective_affinity, m);
0919 }
0920 #else
0921 static inline void irq_data_update_effective_affinity(struct irq_data *d,
0922                               const struct cpumask *m)
0923 {
0924 }
0925 static inline
0926 const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
0927 {
0928     return irq_data_get_affinity_mask(d);
0929 }
0930 #endif
0931 
0932 static inline
0933 const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
0934 {
0935     struct irq_data *d = irq_get_irq_data(irq);
0936 
0937     return d ? irq_data_get_effective_affinity_mask(d) : NULL;
0938 }
0939 
0940 unsigned int arch_dynirq_lower_bound(unsigned int from);
0941 
0942 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
0943               struct module *owner,
0944               const struct irq_affinity_desc *affinity);
0945 
0946 int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
0947                unsigned int cnt, int node, struct module *owner,
0948                const struct irq_affinity_desc *affinity);
0949 
0950 /* use macros to avoid needing export.h for THIS_MODULE */
0951 #define irq_alloc_descs(irq, from, cnt, node)   \
0952     __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
0953 
0954 #define irq_alloc_desc(node)            \
0955     irq_alloc_descs(-1, 1, 1, node)
0956 
0957 #define irq_alloc_desc_at(at, node)     \
0958     irq_alloc_descs(at, at, 1, node)
0959 
0960 #define irq_alloc_desc_from(from, node)     \
0961     irq_alloc_descs(-1, from, 1, node)
0962 
0963 #define irq_alloc_descs_from(from, cnt, node)   \
0964     irq_alloc_descs(-1, from, cnt, node)
0965 
0966 #define devm_irq_alloc_descs(dev, irq, from, cnt, node)     \
0967     __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
0968 
0969 #define devm_irq_alloc_desc(dev, node)              \
0970     devm_irq_alloc_descs(dev, -1, 1, 1, node)
0971 
0972 #define devm_irq_alloc_desc_at(dev, at, node)           \
0973     devm_irq_alloc_descs(dev, at, at, 1, node)
0974 
0975 #define devm_irq_alloc_desc_from(dev, from, node)       \
0976     devm_irq_alloc_descs(dev, -1, from, 1, node)
0977 
0978 #define devm_irq_alloc_descs_from(dev, from, cnt, node)     \
0979     devm_irq_alloc_descs(dev, -1, from, cnt, node)
0980 
0981 void irq_free_descs(unsigned int irq, unsigned int cnt);
0982 static inline void irq_free_desc(unsigned int irq)
0983 {
0984     irq_free_descs(irq, 1);
0985 }
0986 
0987 #ifdef CONFIG_GENERIC_IRQ_LEGACY
0988 void irq_init_desc(unsigned int irq);
0989 #endif
0990 
0991 /**
0992  * struct irq_chip_regs - register offsets for struct irq_gci
0993  * @enable: Enable register offset to reg_base
0994  * @disable:    Disable register offset to reg_base
0995  * @mask:   Mask register offset to reg_base
0996  * @ack:    Ack register offset to reg_base
0997  * @eoi:    Eoi register offset to reg_base
0998  * @type:   Type configuration register offset to reg_base
0999  * @polarity:   Polarity configuration register offset to reg_base
1000  */
1001 struct irq_chip_regs {
1002     unsigned long       enable;
1003     unsigned long       disable;
1004     unsigned long       mask;
1005     unsigned long       ack;
1006     unsigned long       eoi;
1007     unsigned long       type;
1008     unsigned long       polarity;
1009 };
1010 
1011 /**
1012  * struct irq_chip_type - Generic interrupt chip instance for a flow type
1013  * @chip:       The real interrupt chip which provides the callbacks
1014  * @regs:       Register offsets for this chip
1015  * @handler:        Flow handler associated with this chip
1016  * @type:       Chip can handle these flow types
1017  * @mask_cache_priv:    Cached mask register private to the chip type
1018  * @mask_cache:     Pointer to cached mask register
1019  *
1020  * A irq_generic_chip can have several instances of irq_chip_type when
1021  * it requires different functions and register offsets for different
1022  * flow types.
1023  */
1024 struct irq_chip_type {
1025     struct irq_chip     chip;
1026     struct irq_chip_regs    regs;
1027     irq_flow_handler_t  handler;
1028     u32         type;
1029     u32         mask_cache_priv;
1030     u32         *mask_cache;
1031 };
1032 
1033 /**
1034  * struct irq_chip_generic - Generic irq chip data structure
1035  * @lock:       Lock to protect register and cache data access
1036  * @reg_base:       Register base address (virtual)
1037  * @reg_readl:      Alternate I/O accessor (defaults to readl if NULL)
1038  * @reg_writel:     Alternate I/O accessor (defaults to writel if NULL)
1039  * @suspend:        Function called from core code on suspend once per
1040  *          chip; can be useful instead of irq_chip::suspend to
1041  *          handle chip details even when no interrupts are in use
1042  * @resume:     Function called from core code on resume once per chip;
1043  *          can be useful instead of irq_chip::suspend to handle
1044  *          chip details even when no interrupts are in use
1045  * @irq_base:       Interrupt base nr for this chip
1046  * @irq_cnt:        Number of interrupts handled by this chip
1047  * @mask_cache:     Cached mask register shared between all chip types
1048  * @type_cache:     Cached type register
1049  * @polarity_cache: Cached polarity register
1050  * @wake_enabled:   Interrupt can wakeup from suspend
1051  * @wake_active:    Interrupt is marked as an wakeup from suspend source
1052  * @num_ct:     Number of available irq_chip_type instances (usually 1)
1053  * @private:        Private data for non generic chip callbacks
1054  * @installed:      bitfield to denote installed interrupts
1055  * @unused:     bitfield to denote unused interrupts
1056  * @domain:     irq domain pointer
1057  * @list:       List head for keeping track of instances
1058  * @chip_types:     Array of interrupt irq_chip_types
1059  *
1060  * Note, that irq_chip_generic can have multiple irq_chip_type
1061  * implementations which can be associated to a particular irq line of
1062  * an irq_chip_generic instance. That allows to share and protect
1063  * state in an irq_chip_generic instance when we need to implement
1064  * different flow mechanisms (level/edge) for it.
1065  */
1066 struct irq_chip_generic {
1067     raw_spinlock_t      lock;
1068     void __iomem        *reg_base;
1069     u32         (*reg_readl)(void __iomem *addr);
1070     void            (*reg_writel)(u32 val, void __iomem *addr);
1071     void            (*suspend)(struct irq_chip_generic *gc);
1072     void            (*resume)(struct irq_chip_generic *gc);
1073     unsigned int        irq_base;
1074     unsigned int        irq_cnt;
1075     u32         mask_cache;
1076     u32         type_cache;
1077     u32         polarity_cache;
1078     u32         wake_enabled;
1079     u32         wake_active;
1080     unsigned int        num_ct;
1081     void            *private;
1082     unsigned long       installed;
1083     unsigned long       unused;
1084     struct irq_domain   *domain;
1085     struct list_head    list;
1086     struct irq_chip_type    chip_types[];
1087 };
1088 
1089 /**
1090  * enum irq_gc_flags - Initialization flags for generic irq chips
1091  * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg
1092  * @IRQ_GC_INIT_NESTED_LOCK:    Set the lock class of the irqs to nested for
1093  *              irq chips which need to call irq_set_wake() on
1094  *              the parent irq. Usually GPIO implementations
1095  * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
1096  * @IRQ_GC_NO_MASK:     Do not calculate irq_data->mask
1097  * @IRQ_GC_BE_IO:       Use big-endian register accesses (default: LE)
1098  */
1099 enum irq_gc_flags {
1100     IRQ_GC_INIT_MASK_CACHE      = 1 << 0,
1101     IRQ_GC_INIT_NESTED_LOCK     = 1 << 1,
1102     IRQ_GC_MASK_CACHE_PER_TYPE  = 1 << 2,
1103     IRQ_GC_NO_MASK          = 1 << 3,
1104     IRQ_GC_BE_IO            = 1 << 4,
1105 };
1106 
1107 /*
1108  * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains
1109  * @irqs_per_chip:  Number of interrupts per chip
1110  * @num_chips:      Number of chips
1111  * @irq_flags_to_set:   IRQ* flags to set on irq setup
1112  * @irq_flags_to_clear: IRQ* flags to clear on irq setup
1113  * @gc_flags:       Generic chip specific setup flags
1114  * @gc:         Array of pointers to generic interrupt chips
1115  */
1116 struct irq_domain_chip_generic {
1117     unsigned int        irqs_per_chip;
1118     unsigned int        num_chips;
1119     unsigned int        irq_flags_to_clear;
1120     unsigned int        irq_flags_to_set;
1121     enum irq_gc_flags   gc_flags;
1122     struct irq_chip_generic *gc[];
1123 };
1124 
1125 /* Generic chip callback functions */
1126 void irq_gc_noop(struct irq_data *d);
1127 void irq_gc_mask_disable_reg(struct irq_data *d);
1128 void irq_gc_mask_set_bit(struct irq_data *d);
1129 void irq_gc_mask_clr_bit(struct irq_data *d);
1130 void irq_gc_unmask_enable_reg(struct irq_data *d);
1131 void irq_gc_ack_set_bit(struct irq_data *d);
1132 void irq_gc_ack_clr_bit(struct irq_data *d);
1133 void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1134 void irq_gc_eoi(struct irq_data *d);
1135 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1136 
1137 /* Setup functions for irq_chip_generic */
1138 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1139              irq_hw_number_t hw_irq);
1140 void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
1141 struct irq_chip_generic *
1142 irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1143                void __iomem *reg_base, irq_flow_handler_t handler);
1144 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1145                 enum irq_gc_flags flags, unsigned int clr,
1146                 unsigned int set);
1147 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1148 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1149                  unsigned int clr, unsigned int set);
1150 
1151 struct irq_chip_generic *
1152 devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1153                 unsigned int irq_base, void __iomem *reg_base,
1154                 irq_flow_handler_t handler);
1155 int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1156                 u32 msk, enum irq_gc_flags flags,
1157                 unsigned int clr, unsigned int set);
1158 
1159 struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1160 
1161 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1162                      int num_ct, const char *name,
1163                      irq_flow_handler_t handler,
1164                      unsigned int clr, unsigned int set,
1165                      enum irq_gc_flags flags);
1166 
1167 #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,  \
1168                        handler, clr, set, flags)    \
1169 ({                                  \
1170     MAYBE_BUILD_BUG_ON(irqs_per_chip > 32);             \
1171     __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1172                      handler, clr, set, flags); \
1173 })
1174 
1175 static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1176 {
1177     kfree(gc);
1178 }
1179 
1180 static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1181                         u32 msk, unsigned int clr,
1182                         unsigned int set)
1183 {
1184     irq_remove_generic_chip(gc, msk, clr, set);
1185     irq_free_generic_chip(gc);
1186 }
1187 
1188 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1189 {
1190     return container_of(d->chip, struct irq_chip_type, chip);
1191 }
1192 
1193 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1194 
1195 #ifdef CONFIG_SMP
1196 static inline void irq_gc_lock(struct irq_chip_generic *gc)
1197 {
1198     raw_spin_lock(&gc->lock);
1199 }
1200 
1201 static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1202 {
1203     raw_spin_unlock(&gc->lock);
1204 }
1205 #else
1206 static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1207 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1208 #endif
1209 
1210 /*
1211  * The irqsave variants are for usage in non interrupt code. Do not use
1212  * them in irq_chip callbacks. Use irq_gc_lock() instead.
1213  */
1214 #define irq_gc_lock_irqsave(gc, flags)  \
1215     raw_spin_lock_irqsave(&(gc)->lock, flags)
1216 
1217 #define irq_gc_unlock_irqrestore(gc, flags) \
1218     raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1219 
1220 static inline void irq_reg_writel(struct irq_chip_generic *gc,
1221                   u32 val, int reg_offset)
1222 {
1223     if (gc->reg_writel)
1224         gc->reg_writel(val, gc->reg_base + reg_offset);
1225     else
1226         writel(val, gc->reg_base + reg_offset);
1227 }
1228 
1229 static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1230                 int reg_offset)
1231 {
1232     if (gc->reg_readl)
1233         return gc->reg_readl(gc->reg_base + reg_offset);
1234     else
1235         return readl(gc->reg_base + reg_offset);
1236 }
1237 
1238 struct irq_matrix;
1239 struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1240                     unsigned int alloc_start,
1241                     unsigned int alloc_end);
1242 void irq_matrix_online(struct irq_matrix *m);
1243 void irq_matrix_offline(struct irq_matrix *m);
1244 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1245 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1246 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1247 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1248                 unsigned int *mapped_cpu);
1249 void irq_matrix_reserve(struct irq_matrix *m);
1250 void irq_matrix_remove_reserved(struct irq_matrix *m);
1251 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1252              bool reserved, unsigned int *mapped_cpu);
1253 void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1254              unsigned int bit, bool managed);
1255 void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1256 unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1257 unsigned int irq_matrix_allocated(struct irq_matrix *m);
1258 unsigned int irq_matrix_reserved(struct irq_matrix *m);
1259 void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1260 
1261 /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
1262 #define INVALID_HWIRQ   (~0UL)
1263 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1264 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1265 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1266 int ipi_send_single(unsigned int virq, unsigned int cpu);
1267 int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1268 
1269 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1270 /*
1271  * Registers a generic IRQ handling function as the top-level IRQ handler in
1272  * the system, which is generally the first C code called from an assembly
1273  * architecture-specific interrupt handler.
1274  *
1275  * Returns 0 on success, or -EBUSY if an IRQ handler has already been
1276  * registered.
1277  */
1278 int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
1279 
1280 /*
1281  * Allows interrupt handlers to find the irqchip that's been registered as the
1282  * top-level IRQ handler.
1283  */
1284 extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
1285 asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
1286 #else
1287 #ifndef set_handle_irq
1288 #define set_handle_irq(handle_irq)      \
1289     do {                    \
1290         (void)handle_irq;       \
1291         WARN_ON(1);         \
1292     } while (0)
1293 #endif
1294 #endif
1295 
1296 #endif /* _LINUX_IRQ_H */