0001
0002 #ifndef _LINUX_CONTEXT_TRACKING_STATE_H
0003 #define _LINUX_CONTEXT_TRACKING_STATE_H
0004
0005 #include <linux/percpu.h>
0006 #include <linux/static_key.h>
0007 #include <linux/context_tracking_irq.h>
0008
0009
0010 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
0011
0012 enum ctx_state {
0013 CONTEXT_DISABLED = -1,
0014 CONTEXT_KERNEL = 0,
0015 CONTEXT_IDLE = 1,
0016 CONTEXT_USER = 2,
0017 CONTEXT_GUEST = 3,
0018 CONTEXT_MAX = 4,
0019 };
0020
0021
0022 #define RCU_DYNTICKS_IDX CONTEXT_MAX
0023
0024 #define CT_STATE_MASK (CONTEXT_MAX - 1)
0025 #define CT_DYNTICKS_MASK (~CT_STATE_MASK)
0026
0027 struct context_tracking {
0028 #ifdef CONFIG_CONTEXT_TRACKING_USER
0029
0030
0031
0032
0033
0034
0035 bool active;
0036 int recursion;
0037 #endif
0038 #ifdef CONFIG_CONTEXT_TRACKING
0039 atomic_t state;
0040 #endif
0041 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
0042 long dynticks_nesting;
0043 long dynticks_nmi_nesting;
0044 #endif
0045 };
0046
0047 #ifdef CONFIG_CONTEXT_TRACKING
0048 DECLARE_PER_CPU(struct context_tracking, context_tracking);
0049
0050 static __always_inline int __ct_state(void)
0051 {
0052 return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
0053 }
0054 #endif
0055
0056 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
0057 static __always_inline int ct_dynticks(void)
0058 {
0059 return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
0060 }
0061
0062 static __always_inline int ct_dynticks_cpu(int cpu)
0063 {
0064 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
0065
0066 return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
0067 }
0068
0069 static __always_inline int ct_dynticks_cpu_acquire(int cpu)
0070 {
0071 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
0072
0073 return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
0074 }
0075
0076 static __always_inline long ct_dynticks_nesting(void)
0077 {
0078 return __this_cpu_read(context_tracking.dynticks_nesting);
0079 }
0080
0081 static __always_inline long ct_dynticks_nesting_cpu(int cpu)
0082 {
0083 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
0084
0085 return ct->dynticks_nesting;
0086 }
0087
0088 static __always_inline long ct_dynticks_nmi_nesting(void)
0089 {
0090 return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
0091 }
0092
0093 static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
0094 {
0095 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
0096
0097 return ct->dynticks_nmi_nesting;
0098 }
0099 #endif
0100
0101 #ifdef CONFIG_CONTEXT_TRACKING_USER
0102 extern struct static_key_false context_tracking_key;
0103
0104 static __always_inline bool context_tracking_enabled(void)
0105 {
0106 return static_branch_unlikely(&context_tracking_key);
0107 }
0108
0109 static __always_inline bool context_tracking_enabled_cpu(int cpu)
0110 {
0111 return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
0112 }
0113
0114 static inline bool context_tracking_enabled_this_cpu(void)
0115 {
0116 return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126 static __always_inline int ct_state(void)
0127 {
0128 int ret;
0129
0130 if (!context_tracking_enabled())
0131 return CONTEXT_DISABLED;
0132
0133 preempt_disable();
0134 ret = __ct_state();
0135 preempt_enable();
0136
0137 return ret;
0138 }
0139
0140 #else
0141 static __always_inline bool context_tracking_enabled(void) { return false; }
0142 static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
0143 static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
0144 #endif
0145
0146 #endif