0001
0002
0003 #ifndef _LINUX_INTERRUPT_H
0004 #define _LINUX_INTERRUPT_H
0005
0006 #include <linux/kernel.h>
0007 #include <linux/bitops.h>
0008 #include <linux/cpumask.h>
0009 #include <linux/irqreturn.h>
0010 #include <linux/irqnr.h>
0011 #include <linux/hardirq.h>
0012 #include <linux/irqflags.h>
0013 #include <linux/hrtimer.h>
0014 #include <linux/kref.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/jump_label.h>
0017
0018 #include <linux/atomic.h>
0019 #include <asm/ptrace.h>
0020 #include <asm/irq.h>
0021 #include <asm/sections.h>
0022
0023
0024
0025
0026
0027
0028
0029
0030 #define IRQF_TRIGGER_NONE 0x00000000
0031 #define IRQF_TRIGGER_RISING 0x00000001
0032 #define IRQF_TRIGGER_FALLING 0x00000002
0033 #define IRQF_TRIGGER_HIGH 0x00000004
0034 #define IRQF_TRIGGER_LOW 0x00000008
0035 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
0036 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
0037 #define IRQF_TRIGGER_PROBE 0x00000010
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 #define IRQF_SHARED 0x00000080
0072 #define IRQF_PROBE_SHARED 0x00000100
0073 #define __IRQF_TIMER 0x00000200
0074 #define IRQF_PERCPU 0x00000400
0075 #define IRQF_NOBALANCING 0x00000800
0076 #define IRQF_IRQPOLL 0x00001000
0077 #define IRQF_ONESHOT 0x00002000
0078 #define IRQF_NO_SUSPEND 0x00004000
0079 #define IRQF_FORCE_RESUME 0x00008000
0080 #define IRQF_NO_THREAD 0x00010000
0081 #define IRQF_EARLY_RESUME 0x00020000
0082 #define IRQF_COND_SUSPEND 0x00040000
0083 #define IRQF_NO_AUTOEN 0x00080000
0084 #define IRQF_NO_DEBUG 0x00100000
0085
0086 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
0087
0088
0089
0090
0091
0092
0093
0094
0095 enum {
0096 IRQC_IS_HARDIRQ = 0,
0097 IRQC_IS_NESTED,
0098 };
0099
0100 typedef irqreturn_t (*irq_handler_t)(int, void *);
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 struct irqaction {
0119 irq_handler_t handler;
0120 void *dev_id;
0121 void __percpu *percpu_dev_id;
0122 struct irqaction *next;
0123 irq_handler_t thread_fn;
0124 struct task_struct *thread;
0125 struct irqaction *secondary;
0126 unsigned int irq;
0127 unsigned int flags;
0128 unsigned long thread_flags;
0129 unsigned long thread_mask;
0130 const char *name;
0131 struct proc_dir_entry *dir;
0132 } ____cacheline_internodealigned_in_smp;
0133
0134 extern irqreturn_t no_action(int cpl, void *dev_id);
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 #define IRQ_NOTCONNECTED (1U << 31)
0145
0146 extern int __must_check
0147 request_threaded_irq(unsigned int irq, irq_handler_t handler,
0148 irq_handler_t thread_fn,
0149 unsigned long flags, const char *name, void *dev);
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 static inline int __must_check
0165 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
0166 const char *name, void *dev)
0167 {
0168 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
0169 }
0170
0171 extern int __must_check
0172 request_any_context_irq(unsigned int irq, irq_handler_t handler,
0173 unsigned long flags, const char *name, void *dev_id);
0174
0175 extern int __must_check
0176 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
0177 unsigned long flags, const char *devname,
0178 void __percpu *percpu_dev_id);
0179
0180 extern int __must_check
0181 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
0182 const char *name, void *dev);
0183
0184 static inline int __must_check
0185 request_percpu_irq(unsigned int irq, irq_handler_t handler,
0186 const char *devname, void __percpu *percpu_dev_id)
0187 {
0188 return __request_percpu_irq(irq, handler, 0,
0189 devname, percpu_dev_id);
0190 }
0191
0192 extern int __must_check
0193 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
0194 const char *devname, void __percpu *dev);
0195
0196 extern const void *free_irq(unsigned int, void *);
0197 extern void free_percpu_irq(unsigned int, void __percpu *);
0198
0199 extern const void *free_nmi(unsigned int irq, void *dev_id);
0200 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
0201
0202 struct device;
0203
0204 extern int __must_check
0205 devm_request_threaded_irq(struct device *dev, unsigned int irq,
0206 irq_handler_t handler, irq_handler_t thread_fn,
0207 unsigned long irqflags, const char *devname,
0208 void *dev_id);
0209
0210 static inline int __must_check
0211 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
0212 unsigned long irqflags, const char *devname, void *dev_id)
0213 {
0214 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
0215 devname, dev_id);
0216 }
0217
0218 extern int __must_check
0219 devm_request_any_context_irq(struct device *dev, unsigned int irq,
0220 irq_handler_t handler, unsigned long irqflags,
0221 const char *devname, void *dev_id);
0222
0223 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
0224
0225 bool irq_has_action(unsigned int irq);
0226 extern void disable_irq_nosync(unsigned int irq);
0227 extern bool disable_hardirq(unsigned int irq);
0228 extern void disable_irq(unsigned int irq);
0229 extern void disable_percpu_irq(unsigned int irq);
0230 extern void enable_irq(unsigned int irq);
0231 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
0232 extern bool irq_percpu_is_enabled(unsigned int irq);
0233 extern void irq_wake_thread(unsigned int irq, void *dev_id);
0234
0235 extern void disable_nmi_nosync(unsigned int irq);
0236 extern void disable_percpu_nmi(unsigned int irq);
0237 extern void enable_nmi(unsigned int irq);
0238 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
0239 extern int prepare_percpu_nmi(unsigned int irq);
0240 extern void teardown_percpu_nmi(unsigned int irq);
0241
0242 extern int irq_inject_interrupt(unsigned int irq);
0243
0244
0245 extern void suspend_device_irqs(void);
0246 extern void resume_device_irqs(void);
0247 extern void rearm_wake_irq(unsigned int irq);
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 struct irq_affinity_notify {
0262 unsigned int irq;
0263 struct kref kref;
0264 struct work_struct work;
0265 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
0266 void (*release)(struct kref *ref);
0267 };
0268
0269 #define IRQ_AFFINITY_MAX_SETS 4
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 struct irq_affinity {
0286 unsigned int pre_vectors;
0287 unsigned int post_vectors;
0288 unsigned int nr_sets;
0289 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
0290 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
0291 void *priv;
0292 };
0293
0294
0295
0296
0297
0298
0299 struct irq_affinity_desc {
0300 struct cpumask mask;
0301 unsigned int is_managed : 1;
0302 };
0303
0304 #if defined(CONFIG_SMP)
0305
0306 extern cpumask_var_t irq_default_affinity;
0307
0308 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
0309 extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
0310
0311 extern int irq_can_set_affinity(unsigned int irq);
0312 extern int irq_select_affinity(unsigned int irq);
0313
0314 extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
0315 bool setaffinity);
0316
0317
0318
0319
0320
0321
0322
0323
0324 static inline int
0325 irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
0326 {
0327 return __irq_apply_affinity_hint(irq, m, false);
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 static inline int
0340 irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
0341 {
0342 return __irq_apply_affinity_hint(irq, m, true);
0343 }
0344
0345
0346
0347
0348
0349 static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
0350 {
0351 return irq_set_affinity_and_hint(irq, m);
0352 }
0353
0354 extern int irq_update_affinity_desc(unsigned int irq,
0355 struct irq_affinity_desc *affinity);
0356
0357 extern int
0358 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
0359
0360 struct irq_affinity_desc *
0361 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
0362
0363 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
0364 const struct irq_affinity *affd);
0365
0366 #else
0367
0368 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
0369 {
0370 return -EINVAL;
0371 }
0372
0373 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
0374 {
0375 return 0;
0376 }
0377
0378 static inline int irq_can_set_affinity(unsigned int irq)
0379 {
0380 return 0;
0381 }
0382
0383 static inline int irq_select_affinity(unsigned int irq) { return 0; }
0384
0385 static inline int irq_update_affinity_hint(unsigned int irq,
0386 const struct cpumask *m)
0387 {
0388 return -EINVAL;
0389 }
0390
0391 static inline int irq_set_affinity_and_hint(unsigned int irq,
0392 const struct cpumask *m)
0393 {
0394 return -EINVAL;
0395 }
0396
0397 static inline int irq_set_affinity_hint(unsigned int irq,
0398 const struct cpumask *m)
0399 {
0400 return -EINVAL;
0401 }
0402
0403 static inline int irq_update_affinity_desc(unsigned int irq,
0404 struct irq_affinity_desc *affinity)
0405 {
0406 return -EINVAL;
0407 }
0408
0409 static inline int
0410 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
0411 {
0412 return 0;
0413 }
0414
0415 static inline struct irq_affinity_desc *
0416 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
0417 {
0418 return NULL;
0419 }
0420
0421 static inline unsigned int
0422 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
0423 const struct irq_affinity *affd)
0424 {
0425 return maxvec;
0426 }
0427
0428 #endif
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 static inline void disable_irq_nosync_lockdep(unsigned int irq)
0442 {
0443 disable_irq_nosync(irq);
0444 #ifdef CONFIG_LOCKDEP
0445 local_irq_disable();
0446 #endif
0447 }
0448
0449 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
0450 {
0451 disable_irq_nosync(irq);
0452 #ifdef CONFIG_LOCKDEP
0453 local_irq_save(*flags);
0454 #endif
0455 }
0456
0457 static inline void disable_irq_lockdep(unsigned int irq)
0458 {
0459 disable_irq(irq);
0460 #ifdef CONFIG_LOCKDEP
0461 local_irq_disable();
0462 #endif
0463 }
0464
0465 static inline void enable_irq_lockdep(unsigned int irq)
0466 {
0467 #ifdef CONFIG_LOCKDEP
0468 local_irq_enable();
0469 #endif
0470 enable_irq(irq);
0471 }
0472
0473 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
0474 {
0475 #ifdef CONFIG_LOCKDEP
0476 local_irq_restore(*flags);
0477 #endif
0478 enable_irq(irq);
0479 }
0480
0481
0482 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
0483
0484 static inline int enable_irq_wake(unsigned int irq)
0485 {
0486 return irq_set_irq_wake(irq, 1);
0487 }
0488
0489 static inline int disable_irq_wake(unsigned int irq)
0490 {
0491 return irq_set_irq_wake(irq, 0);
0492 }
0493
0494
0495
0496
0497 enum irqchip_irq_state {
0498 IRQCHIP_STATE_PENDING,
0499 IRQCHIP_STATE_ACTIVE,
0500 IRQCHIP_STATE_MASKED,
0501 IRQCHIP_STATE_LINE_LEVEL,
0502 };
0503
0504 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
0505 bool *state);
0506 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
0507 bool state);
0508
0509 #ifdef CONFIG_IRQ_FORCED_THREADING
0510 # ifdef CONFIG_PREEMPT_RT
0511 # define force_irqthreads() (true)
0512 # else
0513 DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
0514 # define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
0515 # endif
0516 #else
0517 #define force_irqthreads() (false)
0518 #endif
0519
0520 #ifndef local_softirq_pending
0521
0522 #ifndef local_softirq_pending_ref
0523 #define local_softirq_pending_ref irq_stat.__softirq_pending
0524 #endif
0525
0526 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
0527 #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
0528 #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
0529
0530 #endif
0531
0532
0533
0534
0535
0536
0537
0538 #ifndef hard_irq_disable
0539 #define hard_irq_disable() do { } while(0)
0540 #endif
0541
0542
0543
0544
0545
0546
0547
0548 enum
0549 {
0550 HI_SOFTIRQ=0,
0551 TIMER_SOFTIRQ,
0552 NET_TX_SOFTIRQ,
0553 NET_RX_SOFTIRQ,
0554 BLOCK_SOFTIRQ,
0555 IRQ_POLL_SOFTIRQ,
0556 TASKLET_SOFTIRQ,
0557 SCHED_SOFTIRQ,
0558 HRTIMER_SOFTIRQ,
0559 RCU_SOFTIRQ,
0560
0561 NR_SOFTIRQS
0562 };
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
0574
0575
0576
0577
0578 extern const char * const softirq_to_name[NR_SOFTIRQS];
0579
0580
0581
0582
0583
0584 struct softirq_action
0585 {
0586 void (*action)(struct softirq_action *);
0587 };
0588
0589 asmlinkage void do_softirq(void);
0590 asmlinkage void __do_softirq(void);
0591
0592 #ifdef CONFIG_PREEMPT_RT
0593 extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
0594 #else
0595 static inline void do_softirq_post_smp_call_flush(unsigned int unused)
0596 {
0597 do_softirq();
0598 }
0599 #endif
0600
0601 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
0602 extern void softirq_init(void);
0603 extern void __raise_softirq_irqoff(unsigned int nr);
0604
0605 extern void raise_softirq_irqoff(unsigned int nr);
0606 extern void raise_softirq(unsigned int nr);
0607
0608 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
0609
0610 static inline struct task_struct *this_cpu_ksoftirqd(void)
0611 {
0612 return this_cpu_read(ksoftirqd);
0613 }
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 struct tasklet_struct
0639 {
0640 struct tasklet_struct *next;
0641 unsigned long state;
0642 atomic_t count;
0643 bool use_callback;
0644 union {
0645 void (*func)(unsigned long data);
0646 void (*callback)(struct tasklet_struct *t);
0647 };
0648 unsigned long data;
0649 };
0650
0651 #define DECLARE_TASKLET(name, _callback) \
0652 struct tasklet_struct name = { \
0653 .count = ATOMIC_INIT(0), \
0654 .callback = _callback, \
0655 .use_callback = true, \
0656 }
0657
0658 #define DECLARE_TASKLET_DISABLED(name, _callback) \
0659 struct tasklet_struct name = { \
0660 .count = ATOMIC_INIT(1), \
0661 .callback = _callback, \
0662 .use_callback = true, \
0663 }
0664
0665 #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
0666 container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
0667
0668 #define DECLARE_TASKLET_OLD(name, _func) \
0669 struct tasklet_struct name = { \
0670 .count = ATOMIC_INIT(0), \
0671 .func = _func, \
0672 }
0673
0674 #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
0675 struct tasklet_struct name = { \
0676 .count = ATOMIC_INIT(1), \
0677 .func = _func, \
0678 }
0679
0680 enum
0681 {
0682 TASKLET_STATE_SCHED,
0683 TASKLET_STATE_RUN
0684 };
0685
0686 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
0687 static inline int tasklet_trylock(struct tasklet_struct *t)
0688 {
0689 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
0690 }
0691
0692 void tasklet_unlock(struct tasklet_struct *t);
0693 void tasklet_unlock_wait(struct tasklet_struct *t);
0694 void tasklet_unlock_spin_wait(struct tasklet_struct *t);
0695
0696 #else
0697 static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
0698 static inline void tasklet_unlock(struct tasklet_struct *t) { }
0699 static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
0700 static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
0701 #endif
0702
0703 extern void __tasklet_schedule(struct tasklet_struct *t);
0704
0705 static inline void tasklet_schedule(struct tasklet_struct *t)
0706 {
0707 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
0708 __tasklet_schedule(t);
0709 }
0710
0711 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
0712
0713 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
0714 {
0715 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
0716 __tasklet_hi_schedule(t);
0717 }
0718
0719 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
0720 {
0721 atomic_inc(&t->count);
0722 smp_mb__after_atomic();
0723 }
0724
0725
0726
0727
0728
0729 static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
0730 {
0731 tasklet_disable_nosync(t);
0732 tasklet_unlock_spin_wait(t);
0733 smp_mb();
0734 }
0735
0736 static inline void tasklet_disable(struct tasklet_struct *t)
0737 {
0738 tasklet_disable_nosync(t);
0739 tasklet_unlock_wait(t);
0740 smp_mb();
0741 }
0742
0743 static inline void tasklet_enable(struct tasklet_struct *t)
0744 {
0745 smp_mb__before_atomic();
0746 atomic_dec(&t->count);
0747 }
0748
0749 extern void tasklet_kill(struct tasklet_struct *t);
0750 extern void tasklet_init(struct tasklet_struct *t,
0751 void (*func)(unsigned long), unsigned long data);
0752 extern void tasklet_setup(struct tasklet_struct *t,
0753 void (*callback)(struct tasklet_struct *));
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
0784 static inline unsigned long probe_irq_on(void)
0785 {
0786 return 0;
0787 }
0788 static inline int probe_irq_off(unsigned long val)
0789 {
0790 return 0;
0791 }
0792 static inline unsigned int probe_irq_mask(unsigned long val)
0793 {
0794 return 0;
0795 }
0796 #else
0797 extern unsigned long probe_irq_on(void);
0798 extern int probe_irq_off(unsigned long);
0799 extern unsigned int probe_irq_mask(unsigned long);
0800 #endif
0801
0802 #ifdef CONFIG_PROC_FS
0803
0804 extern void init_irq_proc(void);
0805 #else
0806 static inline void init_irq_proc(void)
0807 {
0808 }
0809 #endif
0810
0811 #ifdef CONFIG_IRQ_TIMINGS
0812 void irq_timings_enable(void);
0813 void irq_timings_disable(void);
0814 u64 irq_timings_next_event(u64 now);
0815 #endif
0816
0817 struct seq_file;
0818 int show_interrupts(struct seq_file *p, void *v);
0819 int arch_show_interrupts(struct seq_file *p, int prec);
0820
0821 extern int early_irq_init(void);
0822 extern int arch_probe_nr_irqs(void);
0823 extern int arch_early_irq_init(void);
0824
0825
0826
0827
0828 #ifndef __irq_entry
0829 # define __irq_entry __section(".irqentry.text")
0830 #endif
0831
0832 #define __softirq_entry __section(".softirqentry.text")
0833
0834 #endif