Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* interrupt.h */
0003 #ifndef _LINUX_INTERRUPT_H
0004 #define _LINUX_INTERRUPT_H
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/bitops.h>
0008 #include <linux/cpumask.h>
0009 #include <linux/irqreturn.h>
0010 #include <linux/irqnr.h>
0011 #include <linux/hardirq.h>
0012 #include <linux/irqflags.h>
0013 #include <linux/hrtimer.h>
0014 #include <linux/kref.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/jump_label.h>
0017 
0018 #include <linux/atomic.h>
0019 #include <asm/ptrace.h>
0020 #include <asm/irq.h>
0021 #include <asm/sections.h>
0022 
0023 /*
0024  * These correspond to the IORESOURCE_IRQ_* defines in
0025  * linux/ioport.h to select the interrupt line behaviour.  When
0026  * requesting an interrupt without specifying a IRQF_TRIGGER, the
0027  * setting should be assumed to be "as already configured", which
0028  * may be as per machine or firmware initialisation.
0029  */
0030 #define IRQF_TRIGGER_NONE   0x00000000
0031 #define IRQF_TRIGGER_RISING 0x00000001
0032 #define IRQF_TRIGGER_FALLING    0x00000002
0033 #define IRQF_TRIGGER_HIGH   0x00000004
0034 #define IRQF_TRIGGER_LOW    0x00000008
0035 #define IRQF_TRIGGER_MASK   (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
0036                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
0037 #define IRQF_TRIGGER_PROBE  0x00000010
0038 
0039 /*
0040  * These flags used only by the kernel as part of the
0041  * irq handling routines.
0042  *
0043  * IRQF_SHARED - allow sharing the irq among several devices
0044  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
0045  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
0046  * IRQF_PERCPU - Interrupt is per cpu
0047  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
0048  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
0049  *                registered first in a shared interrupt is considered for
0050  *                performance reasons)
0051  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
0052  *                Used by threaded interrupts which need to keep the
0053  *                irq line disabled until the threaded handler has been run.
0054  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
0055  *                   that this interrupt will wake the system from a suspended
0056  *                   state.  See Documentation/power/suspend-and-interrupts.rst
0057  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
0058  * IRQF_NO_THREAD - Interrupt cannot be threaded
0059  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
0060  *                resume time.
0061  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
0062  *                interrupt handler after suspending interrupts. For system
0063  *                wakeup devices users need to implement wakeup detection in
0064  *                their interrupt handlers.
0065  * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
0066  *                Users will enable it explicitly by enable_irq() or enable_nmi()
0067  *                later.
0068  * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
0069  *         depends on IRQF_PERCPU.
0070  */
0071 #define IRQF_SHARED     0x00000080
0072 #define IRQF_PROBE_SHARED   0x00000100
0073 #define __IRQF_TIMER        0x00000200
0074 #define IRQF_PERCPU     0x00000400
0075 #define IRQF_NOBALANCING    0x00000800
0076 #define IRQF_IRQPOLL        0x00001000
0077 #define IRQF_ONESHOT        0x00002000
0078 #define IRQF_NO_SUSPEND     0x00004000
0079 #define IRQF_FORCE_RESUME   0x00008000
0080 #define IRQF_NO_THREAD      0x00010000
0081 #define IRQF_EARLY_RESUME   0x00020000
0082 #define IRQF_COND_SUSPEND   0x00040000
0083 #define IRQF_NO_AUTOEN      0x00080000
0084 #define IRQF_NO_DEBUG       0x00100000
0085 
0086 #define IRQF_TIMER      (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
0087 
0088 /*
0089  * These values can be returned by request_any_context_irq() and
0090  * describe the context the interrupt will be run in.
0091  *
0092  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
0093  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
0094  */
0095 enum {
0096     IRQC_IS_HARDIRQ = 0,
0097     IRQC_IS_NESTED,
0098 };
0099 
0100 typedef irqreturn_t (*irq_handler_t)(int, void *);
0101 
0102 /**
0103  * struct irqaction - per interrupt action descriptor
0104  * @handler:    interrupt handler function
0105  * @name:   name of the device
0106  * @dev_id: cookie to identify the device
0107  * @percpu_dev_id:  cookie to identify the device
0108  * @next:   pointer to the next irqaction for shared interrupts
0109  * @irq:    interrupt number
0110  * @flags:  flags (see IRQF_* above)
0111  * @thread_fn:  interrupt handler function for threaded interrupts
0112  * @thread: thread pointer for threaded interrupts
0113  * @secondary:  pointer to secondary irqaction (force threading)
0114  * @thread_flags:   flags related to @thread
0115  * @thread_mask:    bitmask for keeping track of @thread activity
0116  * @dir:    pointer to the proc/irq/NN/name entry
0117  */
0118 struct irqaction {
0119     irq_handler_t       handler;
0120     void            *dev_id;
0121     void __percpu       *percpu_dev_id;
0122     struct irqaction    *next;
0123     irq_handler_t       thread_fn;
0124     struct task_struct  *thread;
0125     struct irqaction    *secondary;
0126     unsigned int        irq;
0127     unsigned int        flags;
0128     unsigned long       thread_flags;
0129     unsigned long       thread_mask;
0130     const char      *name;
0131     struct proc_dir_entry   *dir;
0132 } ____cacheline_internodealigned_in_smp;
0133 
0134 extern irqreturn_t no_action(int cpl, void *dev_id);
0135 
0136 /*
0137  * If a (PCI) device interrupt is not connected we set dev->irq to
0138  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
0139  * can distingiush that case from other error returns.
0140  *
0141  * 0x80000000 is guaranteed to be outside the available range of interrupts
0142  * and easy to distinguish from other possible incorrect values.
0143  */
0144 #define IRQ_NOTCONNECTED    (1U << 31)
0145 
0146 extern int __must_check
0147 request_threaded_irq(unsigned int irq, irq_handler_t handler,
0148              irq_handler_t thread_fn,
0149              unsigned long flags, const char *name, void *dev);
0150 
0151 /**
0152  * request_irq - Add a handler for an interrupt line
0153  * @irq:    The interrupt line to allocate
0154  * @handler:    Function to be called when the IRQ occurs.
0155  *      Primary handler for threaded interrupts
0156  *      If NULL, the default primary handler is installed
0157  * @flags:  Handling flags
0158  * @name:   Name of the device generating this interrupt
0159  * @dev:    A cookie passed to the handler function
0160  *
0161  * This call allocates an interrupt and establishes a handler; see
0162  * the documentation for request_threaded_irq() for details.
0163  */
0164 static inline int __must_check
0165 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
0166         const char *name, void *dev)
0167 {
0168     return request_threaded_irq(irq, handler, NULL, flags, name, dev);
0169 }
0170 
0171 extern int __must_check
0172 request_any_context_irq(unsigned int irq, irq_handler_t handler,
0173             unsigned long flags, const char *name, void *dev_id);
0174 
0175 extern int __must_check
0176 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
0177              unsigned long flags, const char *devname,
0178              void __percpu *percpu_dev_id);
0179 
0180 extern int __must_check
0181 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
0182         const char *name, void *dev);
0183 
0184 static inline int __must_check
0185 request_percpu_irq(unsigned int irq, irq_handler_t handler,
0186            const char *devname, void __percpu *percpu_dev_id)
0187 {
0188     return __request_percpu_irq(irq, handler, 0,
0189                     devname, percpu_dev_id);
0190 }
0191 
0192 extern int __must_check
0193 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
0194            const char *devname, void __percpu *dev);
0195 
0196 extern const void *free_irq(unsigned int, void *);
0197 extern void free_percpu_irq(unsigned int, void __percpu *);
0198 
0199 extern const void *free_nmi(unsigned int irq, void *dev_id);
0200 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
0201 
0202 struct device;
0203 
0204 extern int __must_check
0205 devm_request_threaded_irq(struct device *dev, unsigned int irq,
0206               irq_handler_t handler, irq_handler_t thread_fn,
0207               unsigned long irqflags, const char *devname,
0208               void *dev_id);
0209 
0210 static inline int __must_check
0211 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
0212          unsigned long irqflags, const char *devname, void *dev_id)
0213 {
0214     return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
0215                      devname, dev_id);
0216 }
0217 
0218 extern int __must_check
0219 devm_request_any_context_irq(struct device *dev, unsigned int irq,
0220          irq_handler_t handler, unsigned long irqflags,
0221          const char *devname, void *dev_id);
0222 
0223 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
0224 
0225 bool irq_has_action(unsigned int irq);
0226 extern void disable_irq_nosync(unsigned int irq);
0227 extern bool disable_hardirq(unsigned int irq);
0228 extern void disable_irq(unsigned int irq);
0229 extern void disable_percpu_irq(unsigned int irq);
0230 extern void enable_irq(unsigned int irq);
0231 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
0232 extern bool irq_percpu_is_enabled(unsigned int irq);
0233 extern void irq_wake_thread(unsigned int irq, void *dev_id);
0234 
0235 extern void disable_nmi_nosync(unsigned int irq);
0236 extern void disable_percpu_nmi(unsigned int irq);
0237 extern void enable_nmi(unsigned int irq);
0238 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
0239 extern int prepare_percpu_nmi(unsigned int irq);
0240 extern void teardown_percpu_nmi(unsigned int irq);
0241 
0242 extern int irq_inject_interrupt(unsigned int irq);
0243 
0244 /* The following three functions are for the core kernel use only. */
0245 extern void suspend_device_irqs(void);
0246 extern void resume_device_irqs(void);
0247 extern void rearm_wake_irq(unsigned int irq);
0248 
0249 /**
0250  * struct irq_affinity_notify - context for notification of IRQ affinity changes
0251  * @irq:        Interrupt to which notification applies
0252  * @kref:       Reference count, for internal use
0253  * @work:       Work item, for internal use
0254  * @notify:     Function to be called on change.  This will be
0255  *          called in process context.
0256  * @release:        Function to be called on release.  This will be
0257  *          called in process context.  Once registered, the
0258  *          structure must only be freed when this function is
0259  *          called or later.
0260  */
0261 struct irq_affinity_notify {
0262     unsigned int irq;
0263     struct kref kref;
0264     struct work_struct work;
0265     void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
0266     void (*release)(struct kref *ref);
0267 };
0268 
0269 #define IRQ_AFFINITY_MAX_SETS  4
0270 
0271 /**
0272  * struct irq_affinity - Description for automatic irq affinity assignements
0273  * @pre_vectors:    Don't apply affinity to @pre_vectors at beginning of
0274  *          the MSI(-X) vector space
0275  * @post_vectors:   Don't apply affinity to @post_vectors at end of
0276  *          the MSI(-X) vector space
0277  * @nr_sets:        The number of interrupt sets for which affinity
0278  *          spreading is required
0279  * @set_size:       Array holding the size of each interrupt set
0280  * @calc_sets:      Callback for calculating the number and size
0281  *          of interrupt sets
0282  * @priv:       Private data for usage by @calc_sets, usually a
0283  *          pointer to driver/device specific data.
0284  */
0285 struct irq_affinity {
0286     unsigned int    pre_vectors;
0287     unsigned int    post_vectors;
0288     unsigned int    nr_sets;
0289     unsigned int    set_size[IRQ_AFFINITY_MAX_SETS];
0290     void        (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
0291     void        *priv;
0292 };
0293 
0294 /**
0295  * struct irq_affinity_desc - Interrupt affinity descriptor
0296  * @mask:   cpumask to hold the affinity assignment
0297  * @is_managed: 1 if the interrupt is managed internally
0298  */
0299 struct irq_affinity_desc {
0300     struct cpumask  mask;
0301     unsigned int    is_managed : 1;
0302 };
0303 
0304 #if defined(CONFIG_SMP)
0305 
0306 extern cpumask_var_t irq_default_affinity;
0307 
0308 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
0309 extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
0310 
0311 extern int irq_can_set_affinity(unsigned int irq);
0312 extern int irq_select_affinity(unsigned int irq);
0313 
0314 extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
0315                      bool setaffinity);
0316 
0317 /**
0318  * irq_update_affinity_hint - Update the affinity hint
0319  * @irq:    Interrupt to update
0320  * @m:      cpumask pointer (NULL to clear the hint)
0321  *
0322  * Updates the affinity hint, but does not change the affinity of the interrupt.
0323  */
0324 static inline int
0325 irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
0326 {
0327     return __irq_apply_affinity_hint(irq, m, false);
0328 }
0329 
0330 /**
0331  * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
0332  *               cpumask to the interrupt
0333  * @irq:    Interrupt to update
0334  * @m:      cpumask pointer (NULL to clear the hint)
0335  *
0336  * Updates the affinity hint and if @m is not NULL it applies it as the
0337  * affinity of that interrupt.
0338  */
0339 static inline int
0340 irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
0341 {
0342     return __irq_apply_affinity_hint(irq, m, true);
0343 }
0344 
0345 /*
0346  * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
0347  * instead.
0348  */
0349 static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
0350 {
0351     return irq_set_affinity_and_hint(irq, m);
0352 }
0353 
0354 extern int irq_update_affinity_desc(unsigned int irq,
0355                     struct irq_affinity_desc *affinity);
0356 
0357 extern int
0358 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
0359 
0360 struct irq_affinity_desc *
0361 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
0362 
0363 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
0364                        const struct irq_affinity *affd);
0365 
0366 #else /* CONFIG_SMP */
0367 
0368 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
0369 {
0370     return -EINVAL;
0371 }
0372 
0373 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
0374 {
0375     return 0;
0376 }
0377 
0378 static inline int irq_can_set_affinity(unsigned int irq)
0379 {
0380     return 0;
0381 }
0382 
0383 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
0384 
0385 static inline int irq_update_affinity_hint(unsigned int irq,
0386                        const struct cpumask *m)
0387 {
0388     return -EINVAL;
0389 }
0390 
0391 static inline int irq_set_affinity_and_hint(unsigned int irq,
0392                         const struct cpumask *m)
0393 {
0394     return -EINVAL;
0395 }
0396 
0397 static inline int irq_set_affinity_hint(unsigned int irq,
0398                     const struct cpumask *m)
0399 {
0400     return -EINVAL;
0401 }
0402 
0403 static inline int irq_update_affinity_desc(unsigned int irq,
0404                        struct irq_affinity_desc *affinity)
0405 {
0406     return -EINVAL;
0407 }
0408 
0409 static inline int
0410 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
0411 {
0412     return 0;
0413 }
0414 
0415 static inline struct irq_affinity_desc *
0416 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
0417 {
0418     return NULL;
0419 }
0420 
0421 static inline unsigned int
0422 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
0423               const struct irq_affinity *affd)
0424 {
0425     return maxvec;
0426 }
0427 
0428 #endif /* CONFIG_SMP */
0429 
0430 /*
0431  * Special lockdep variants of irq disabling/enabling.
0432  * These should be used for locking constructs that
0433  * know that a particular irq context which is disabled,
0434  * and which is the only irq-context user of a lock,
0435  * that it's safe to take the lock in the irq-disabled
0436  * section without disabling hardirqs.
0437  *
0438  * On !CONFIG_LOCKDEP they are equivalent to the normal
0439  * irq disable/enable methods.
0440  */
0441 static inline void disable_irq_nosync_lockdep(unsigned int irq)
0442 {
0443     disable_irq_nosync(irq);
0444 #ifdef CONFIG_LOCKDEP
0445     local_irq_disable();
0446 #endif
0447 }
0448 
0449 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
0450 {
0451     disable_irq_nosync(irq);
0452 #ifdef CONFIG_LOCKDEP
0453     local_irq_save(*flags);
0454 #endif
0455 }
0456 
0457 static inline void disable_irq_lockdep(unsigned int irq)
0458 {
0459     disable_irq(irq);
0460 #ifdef CONFIG_LOCKDEP
0461     local_irq_disable();
0462 #endif
0463 }
0464 
0465 static inline void enable_irq_lockdep(unsigned int irq)
0466 {
0467 #ifdef CONFIG_LOCKDEP
0468     local_irq_enable();
0469 #endif
0470     enable_irq(irq);
0471 }
0472 
0473 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
0474 {
0475 #ifdef CONFIG_LOCKDEP
0476     local_irq_restore(*flags);
0477 #endif
0478     enable_irq(irq);
0479 }
0480 
0481 /* IRQ wakeup (PM) control: */
0482 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
0483 
0484 static inline int enable_irq_wake(unsigned int irq)
0485 {
0486     return irq_set_irq_wake(irq, 1);
0487 }
0488 
0489 static inline int disable_irq_wake(unsigned int irq)
0490 {
0491     return irq_set_irq_wake(irq, 0);
0492 }
0493 
0494 /*
0495  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
0496  */
0497 enum irqchip_irq_state {
0498     IRQCHIP_STATE_PENDING,      /* Is interrupt pending? */
0499     IRQCHIP_STATE_ACTIVE,       /* Is interrupt in progress? */
0500     IRQCHIP_STATE_MASKED,       /* Is interrupt masked? */
0501     IRQCHIP_STATE_LINE_LEVEL,   /* Is IRQ line high? */
0502 };
0503 
0504 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
0505                  bool *state);
0506 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
0507                  bool state);
0508 
0509 #ifdef CONFIG_IRQ_FORCED_THREADING
0510 # ifdef CONFIG_PREEMPT_RT
0511 #  define force_irqthreads()    (true)
0512 # else
0513 DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
0514 #  define force_irqthreads()    (static_branch_unlikely(&force_irqthreads_key))
0515 # endif
0516 #else
0517 #define force_irqthreads()  (false)
0518 #endif
0519 
0520 #ifndef local_softirq_pending
0521 
0522 #ifndef local_softirq_pending_ref
0523 #define local_softirq_pending_ref irq_stat.__softirq_pending
0524 #endif
0525 
0526 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
0527 #define set_softirq_pending(x)  (__this_cpu_write(local_softirq_pending_ref, (x)))
0528 #define or_softirq_pending(x)   (__this_cpu_or(local_softirq_pending_ref, (x)))
0529 
0530 #endif /* local_softirq_pending */
0531 
0532 /* Some architectures might implement lazy enabling/disabling of
0533  * interrupts. In some cases, such as stop_machine, we might want
0534  * to ensure that after a local_irq_disable(), interrupts have
0535  * really been disabled in hardware. Such architectures need to
0536  * implement the following hook.
0537  */
0538 #ifndef hard_irq_disable
0539 #define hard_irq_disable()  do { } while(0)
0540 #endif
0541 
0542 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
0543    frequency threaded job scheduling. For almost all the purposes
0544    tasklets are more than enough. F.e. all serial device BHs et
0545    al. should be converted to tasklets, not to softirqs.
0546  */
0547 
0548 enum
0549 {
0550     HI_SOFTIRQ=0,
0551     TIMER_SOFTIRQ,
0552     NET_TX_SOFTIRQ,
0553     NET_RX_SOFTIRQ,
0554     BLOCK_SOFTIRQ,
0555     IRQ_POLL_SOFTIRQ,
0556     TASKLET_SOFTIRQ,
0557     SCHED_SOFTIRQ,
0558     HRTIMER_SOFTIRQ,
0559     RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
0560 
0561     NR_SOFTIRQS
0562 };
0563 
0564 /*
0565  * The following vectors can be safely ignored after ksoftirqd is parked:
0566  *
0567  * _ RCU:
0568  *  1) rcutree_migrate_callbacks() migrates the queue.
0569  *  2) rcu_report_dead() reports the final quiescent states.
0570  *
0571  * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
0572  */
0573 #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
0574 
0575 /* map softirq index to softirq name. update 'softirq_to_name' in
0576  * kernel/softirq.c when adding a new softirq.
0577  */
0578 extern const char * const softirq_to_name[NR_SOFTIRQS];
0579 
0580 /* softirq mask and active fields moved to irq_cpustat_t in
0581  * asm/hardirq.h to get better cache usage.  KAO
0582  */
0583 
0584 struct softirq_action
0585 {
0586     void    (*action)(struct softirq_action *);
0587 };
0588 
0589 asmlinkage void do_softirq(void);
0590 asmlinkage void __do_softirq(void);
0591 
0592 #ifdef CONFIG_PREEMPT_RT
0593 extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
0594 #else
0595 static inline void do_softirq_post_smp_call_flush(unsigned int unused)
0596 {
0597     do_softirq();
0598 }
0599 #endif
0600 
0601 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
0602 extern void softirq_init(void);
0603 extern void __raise_softirq_irqoff(unsigned int nr);
0604 
0605 extern void raise_softirq_irqoff(unsigned int nr);
0606 extern void raise_softirq(unsigned int nr);
0607 
0608 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
0609 
0610 static inline struct task_struct *this_cpu_ksoftirqd(void)
0611 {
0612     return this_cpu_read(ksoftirqd);
0613 }
0614 
0615 /* Tasklets --- multithreaded analogue of BHs.
0616 
0617    This API is deprecated. Please consider using threaded IRQs instead:
0618    https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
0619 
0620    Main feature differing them of generic softirqs: tasklet
0621    is running only on one CPU simultaneously.
0622 
0623    Main feature differing them of BHs: different tasklets
0624    may be run simultaneously on different CPUs.
0625 
0626    Properties:
0627    * If tasklet_schedule() is called, then tasklet is guaranteed
0628      to be executed on some cpu at least once after this.
0629    * If the tasklet is already scheduled, but its execution is still not
0630      started, it will be executed only once.
0631    * If this tasklet is already running on another CPU (or schedule is called
0632      from tasklet itself), it is rescheduled for later.
0633    * Tasklet is strictly serialized wrt itself, but not
0634      wrt another tasklets. If client needs some intertask synchronization,
0635      he makes it with spinlocks.
0636  */
0637 
0638 struct tasklet_struct
0639 {
0640     struct tasklet_struct *next;
0641     unsigned long state;
0642     atomic_t count;
0643     bool use_callback;
0644     union {
0645         void (*func)(unsigned long data);
0646         void (*callback)(struct tasklet_struct *t);
0647     };
0648     unsigned long data;
0649 };
0650 
0651 #define DECLARE_TASKLET(name, _callback)        \
0652 struct tasklet_struct name = {              \
0653     .count = ATOMIC_INIT(0),            \
0654     .callback = _callback,              \
0655     .use_callback = true,               \
0656 }
0657 
0658 #define DECLARE_TASKLET_DISABLED(name, _callback)   \
0659 struct tasklet_struct name = {              \
0660     .count = ATOMIC_INIT(1),            \
0661     .callback = _callback,              \
0662     .use_callback = true,               \
0663 }
0664 
0665 #define from_tasklet(var, callback_tasklet, tasklet_fieldname)  \
0666     container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
0667 
0668 #define DECLARE_TASKLET_OLD(name, _func)        \
0669 struct tasklet_struct name = {              \
0670     .count = ATOMIC_INIT(0),            \
0671     .func = _func,                  \
0672 }
0673 
0674 #define DECLARE_TASKLET_DISABLED_OLD(name, _func)   \
0675 struct tasklet_struct name = {              \
0676     .count = ATOMIC_INIT(1),            \
0677     .func = _func,                  \
0678 }
0679 
0680 enum
0681 {
0682     TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
0683     TASKLET_STATE_RUN   /* Tasklet is running (SMP only) */
0684 };
0685 
0686 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
0687 static inline int tasklet_trylock(struct tasklet_struct *t)
0688 {
0689     return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
0690 }
0691 
0692 void tasklet_unlock(struct tasklet_struct *t);
0693 void tasklet_unlock_wait(struct tasklet_struct *t);
0694 void tasklet_unlock_spin_wait(struct tasklet_struct *t);
0695 
0696 #else
0697 static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
0698 static inline void tasklet_unlock(struct tasklet_struct *t) { }
0699 static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
0700 static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
0701 #endif
0702 
0703 extern void __tasklet_schedule(struct tasklet_struct *t);
0704 
0705 static inline void tasklet_schedule(struct tasklet_struct *t)
0706 {
0707     if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
0708         __tasklet_schedule(t);
0709 }
0710 
0711 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
0712 
0713 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
0714 {
0715     if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
0716         __tasklet_hi_schedule(t);
0717 }
0718 
0719 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
0720 {
0721     atomic_inc(&t->count);
0722     smp_mb__after_atomic();
0723 }
0724 
0725 /*
0726  * Do not use in new code. Disabling tasklets from atomic contexts is
0727  * error prone and should be avoided.
0728  */
0729 static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
0730 {
0731     tasklet_disable_nosync(t);
0732     tasklet_unlock_spin_wait(t);
0733     smp_mb();
0734 }
0735 
0736 static inline void tasklet_disable(struct tasklet_struct *t)
0737 {
0738     tasklet_disable_nosync(t);
0739     tasklet_unlock_wait(t);
0740     smp_mb();
0741 }
0742 
0743 static inline void tasklet_enable(struct tasklet_struct *t)
0744 {
0745     smp_mb__before_atomic();
0746     atomic_dec(&t->count);
0747 }
0748 
0749 extern void tasklet_kill(struct tasklet_struct *t);
0750 extern void tasklet_init(struct tasklet_struct *t,
0751              void (*func)(unsigned long), unsigned long data);
0752 extern void tasklet_setup(struct tasklet_struct *t,
0753               void (*callback)(struct tasklet_struct *));
0754 
0755 /*
0756  * Autoprobing for irqs:
0757  *
0758  * probe_irq_on() and probe_irq_off() provide robust primitives
0759  * for accurate IRQ probing during kernel initialization.  They are
0760  * reasonably simple to use, are not "fooled" by spurious interrupts,
0761  * and, unlike other attempts at IRQ probing, they do not get hung on
0762  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
0763  *
0764  * For reasonably foolproof probing, use them as follows:
0765  *
0766  * 1. clear and/or mask the device's internal interrupt.
0767  * 2. sti();
0768  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
0769  * 4. enable the device and cause it to trigger an interrupt.
0770  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
0771  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
0772  * 7. service the device to clear its pending interrupt.
0773  * 8. loop again if paranoia is required.
0774  *
0775  * probe_irq_on() returns a mask of allocated irq's.
0776  *
0777  * probe_irq_off() takes the mask as a parameter,
0778  * and returns the irq number which occurred,
0779  * or zero if none occurred, or a negative irq number
0780  * if more than one irq occurred.
0781  */
0782 
0783 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
0784 static inline unsigned long probe_irq_on(void)
0785 {
0786     return 0;
0787 }
0788 static inline int probe_irq_off(unsigned long val)
0789 {
0790     return 0;
0791 }
0792 static inline unsigned int probe_irq_mask(unsigned long val)
0793 {
0794     return 0;
0795 }
0796 #else
0797 extern unsigned long probe_irq_on(void);    /* returns 0 on failure */
0798 extern int probe_irq_off(unsigned long);    /* returns 0 or negative on failure */
0799 extern unsigned int probe_irq_mask(unsigned long);  /* returns mask of ISA interrupts */
0800 #endif
0801 
0802 #ifdef CONFIG_PROC_FS
0803 /* Initialize /proc/irq/ */
0804 extern void init_irq_proc(void);
0805 #else
0806 static inline void init_irq_proc(void)
0807 {
0808 }
0809 #endif
0810 
0811 #ifdef CONFIG_IRQ_TIMINGS
0812 void irq_timings_enable(void);
0813 void irq_timings_disable(void);
0814 u64 irq_timings_next_event(u64 now);
0815 #endif
0816 
0817 struct seq_file;
0818 int show_interrupts(struct seq_file *p, void *v);
0819 int arch_show_interrupts(struct seq_file *p, int prec);
0820 
0821 extern int early_irq_init(void);
0822 extern int arch_probe_nr_irqs(void);
0823 extern int arch_early_irq_init(void);
0824 
0825 /*
0826  * We want to know which function is an entrypoint of a hardirq or a softirq.
0827  */
0828 #ifndef __irq_entry
0829 # define __irq_entry     __section(".irqentry.text")
0830 #endif
0831 
0832 #define __softirq_entry  __section(".softirqentry.text")
0833 
0834 #endif