Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef LINUX_HARDIRQ_H
0003 #define LINUX_HARDIRQ_H
0004 
0005 #include <linux/context_tracking_state.h>
0006 #include <linux/preempt.h>
0007 #include <linux/lockdep.h>
0008 #include <linux/ftrace_irq.h>
0009 #include <linux/sched.h>
0010 #include <linux/vtime.h>
0011 #include <asm/hardirq.h>
0012 
0013 extern void synchronize_irq(unsigned int irq);
0014 extern bool synchronize_hardirq(unsigned int irq);
0015 
0016 #ifdef CONFIG_NO_HZ_FULL
0017 void __rcu_irq_enter_check_tick(void);
0018 #else
0019 static inline void __rcu_irq_enter_check_tick(void) { }
0020 #endif
0021 
0022 static __always_inline void rcu_irq_enter_check_tick(void)
0023 {
0024     if (context_tracking_enabled())
0025         __rcu_irq_enter_check_tick();
0026 }
0027 
0028 /*
0029  * It is safe to do non-atomic ops on ->hardirq_context,
0030  * because NMI handlers may not preempt and the ops are
0031  * always balanced, so the interrupted value of ->hardirq_context
0032  * will always be restored.
0033  */
0034 #define __irq_enter()                   \
0035     do {                        \
0036         preempt_count_add(HARDIRQ_OFFSET);  \
0037         lockdep_hardirq_enter();        \
0038         account_hardirq_enter(current);     \
0039     } while (0)
0040 
0041 /*
0042  * Like __irq_enter() without time accounting for fast
0043  * interrupts, e.g. reschedule IPI where time accounting
0044  * is more expensive than the actual interrupt.
0045  */
0046 #define __irq_enter_raw()               \
0047     do {                        \
0048         preempt_count_add(HARDIRQ_OFFSET);  \
0049         lockdep_hardirq_enter();        \
0050     } while (0)
0051 
0052 /*
0053  * Enter irq context (on NO_HZ, update jiffies):
0054  */
0055 void irq_enter(void);
0056 /*
0057  * Like irq_enter(), but RCU is already watching.
0058  */
0059 void irq_enter_rcu(void);
0060 
0061 /*
0062  * Exit irq context without processing softirqs:
0063  */
0064 #define __irq_exit()                    \
0065     do {                        \
0066         account_hardirq_exit(current);      \
0067         lockdep_hardirq_exit();         \
0068         preempt_count_sub(HARDIRQ_OFFSET);  \
0069     } while (0)
0070 
0071 /*
0072  * Like __irq_exit() without time accounting
0073  */
0074 #define __irq_exit_raw()                \
0075     do {                        \
0076         lockdep_hardirq_exit();         \
0077         preempt_count_sub(HARDIRQ_OFFSET);  \
0078     } while (0)
0079 
0080 /*
0081  * Exit irq context and process softirqs if needed:
0082  */
0083 void irq_exit(void);
0084 
0085 /*
0086  * Like irq_exit(), but return with RCU watching.
0087  */
0088 void irq_exit_rcu(void);
0089 
0090 #ifndef arch_nmi_enter
0091 #define arch_nmi_enter()    do { } while (0)
0092 #define arch_nmi_exit()     do { } while (0)
0093 #endif
0094 
0095 /*
0096  * NMI vs Tracing
0097  * --------------
0098  *
0099  * We must not land in a tracer until (or after) we've changed preempt_count
0100  * such that in_nmi() becomes true. To that effect all NMI C entry points must
0101  * be marked 'notrace' and call nmi_enter() as soon as possible.
0102  */
0103 
0104 /*
0105  * nmi_enter() can nest up to 15 times; see NMI_BITS.
0106  */
0107 #define __nmi_enter()                       \
0108     do {                            \
0109         lockdep_off();                  \
0110         arch_nmi_enter();               \
0111         BUG_ON(in_nmi() == NMI_MASK);           \
0112         __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);   \
0113     } while (0)
0114 
0115 #define nmi_enter()                     \
0116     do {                            \
0117         __nmi_enter();                  \
0118         lockdep_hardirq_enter();            \
0119         ct_nmi_enter();             \
0120         instrumentation_begin();            \
0121         ftrace_nmi_enter();             \
0122         instrumentation_end();              \
0123     } while (0)
0124 
0125 #define __nmi_exit()                        \
0126     do {                            \
0127         BUG_ON(!in_nmi());              \
0128         __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);   \
0129         arch_nmi_exit();                \
0130         lockdep_on();                   \
0131     } while (0)
0132 
0133 #define nmi_exit()                      \
0134     do {                            \
0135         instrumentation_begin();            \
0136         ftrace_nmi_exit();              \
0137         instrumentation_end();              \
0138         ct_nmi_exit();                  \
0139         lockdep_hardirq_exit();             \
0140         __nmi_exit();                   \
0141     } while (0)
0142 
0143 #endif /* LINUX_HARDIRQ_H */