0001
0002 #ifndef __LINUX_PREEMPT_H
0003 #define __LINUX_PREEMPT_H
0004
0005
0006
0007
0008
0009
0010 #include <linux/linkage.h>
0011 #include <linux/list.h>
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define PREEMPT_BITS 8
0033 #define SOFTIRQ_BITS 8
0034 #define HARDIRQ_BITS 4
0035 #define NMI_BITS 4
0036
0037 #define PREEMPT_SHIFT 0
0038 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
0039 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
0040 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
0041
0042 #define __IRQ_MASK(x) ((1UL << (x))-1)
0043
0044 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
0045 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
0046 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
0047 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
0048
0049 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
0050 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
0051 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
0052 #define NMI_OFFSET (1UL << NMI_SHIFT)
0053
0054 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
0055
0056 #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
0057
0058
0059
0060
0061
0062
0063
0064 #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
0076
0077
0078 #include <asm/preempt.h>
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static __always_inline unsigned char interrupt_context_level(void)
0090 {
0091 unsigned long pc = preempt_count();
0092 unsigned char level = 0;
0093
0094 level += !!(pc & (NMI_MASK));
0095 level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
0096 level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
0097
0098 return level;
0099 }
0100
0101 #define nmi_count() (preempt_count() & NMI_MASK)
0102 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
0103 #ifdef CONFIG_PREEMPT_RT
0104 # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
0105 #else
0106 # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
0107 #endif
0108 #define irq_count() (nmi_count() | hardirq_count() | softirq_count())
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 #define in_nmi() (nmi_count())
0119 #define in_hardirq() (hardirq_count())
0120 #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
0121 #define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
0122
0123
0124
0125
0126
0127
0128
0129 #define in_irq() (hardirq_count())
0130 #define in_softirq() (softirq_count())
0131 #define in_interrupt() (irq_count())
0132
0133
0134
0135
0136 #if defined(CONFIG_PREEMPT_COUNT)
0137 # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
0138 #else
0139 # define PREEMPT_DISABLE_OFFSET 0
0140 #endif
0141
0142
0143
0144
0145 #if !defined(CONFIG_PREEMPT_RT)
0146 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
0147 #else
0148
0149 #define PREEMPT_LOCK_OFFSET 0
0150 #endif
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
0166
0167
0168
0169
0170
0171
0172
0173
0174 #define in_atomic() (preempt_count() != 0)
0175
0176
0177
0178
0179
0180 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
0181
0182 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
0183 extern void preempt_count_add(int val);
0184 extern void preempt_count_sub(int val);
0185 #define preempt_count_dec_and_test() \
0186 ({ preempt_count_sub(1); should_resched(0); })
0187 #else
0188 #define preempt_count_add(val) __preempt_count_add(val)
0189 #define preempt_count_sub(val) __preempt_count_sub(val)
0190 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
0191 #endif
0192
0193 #define __preempt_count_inc() __preempt_count_add(1)
0194 #define __preempt_count_dec() __preempt_count_sub(1)
0195
0196 #define preempt_count_inc() preempt_count_add(1)
0197 #define preempt_count_dec() preempt_count_sub(1)
0198
0199 #ifdef CONFIG_PREEMPT_COUNT
0200
0201 #define preempt_disable() \
0202 do { \
0203 preempt_count_inc(); \
0204 barrier(); \
0205 } while (0)
0206
0207 #define sched_preempt_enable_no_resched() \
0208 do { \
0209 barrier(); \
0210 preempt_count_dec(); \
0211 } while (0)
0212
0213 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
0214
0215 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
0216
0217 #ifdef CONFIG_PREEMPTION
0218 #define preempt_enable() \
0219 do { \
0220 barrier(); \
0221 if (unlikely(preempt_count_dec_and_test())) \
0222 __preempt_schedule(); \
0223 } while (0)
0224
0225 #define preempt_enable_notrace() \
0226 do { \
0227 barrier(); \
0228 if (unlikely(__preempt_count_dec_and_test())) \
0229 __preempt_schedule_notrace(); \
0230 } while (0)
0231
0232 #define preempt_check_resched() \
0233 do { \
0234 if (should_resched(0)) \
0235 __preempt_schedule(); \
0236 } while (0)
0237
0238 #else
0239 #define preempt_enable() \
0240 do { \
0241 barrier(); \
0242 preempt_count_dec(); \
0243 } while (0)
0244
0245 #define preempt_enable_notrace() \
0246 do { \
0247 barrier(); \
0248 __preempt_count_dec(); \
0249 } while (0)
0250
0251 #define preempt_check_resched() do { } while (0)
0252 #endif
0253
0254 #define preempt_disable_notrace() \
0255 do { \
0256 __preempt_count_inc(); \
0257 barrier(); \
0258 } while (0)
0259
0260 #define preempt_enable_no_resched_notrace() \
0261 do { \
0262 barrier(); \
0263 __preempt_count_dec(); \
0264 } while (0)
0265
0266 #else
0267
0268
0269
0270
0271
0272
0273
0274 #define preempt_disable() barrier()
0275 #define sched_preempt_enable_no_resched() barrier()
0276 #define preempt_enable_no_resched() barrier()
0277 #define preempt_enable() barrier()
0278 #define preempt_check_resched() do { } while (0)
0279
0280 #define preempt_disable_notrace() barrier()
0281 #define preempt_enable_no_resched_notrace() barrier()
0282 #define preempt_enable_notrace() barrier()
0283 #define preemptible() 0
0284
0285 #endif
0286
0287 #ifdef MODULE
0288
0289
0290
0291 #undef sched_preempt_enable_no_resched
0292 #undef preempt_enable_no_resched
0293 #undef preempt_enable_no_resched_notrace
0294 #undef preempt_check_resched
0295 #endif
0296
0297 #define preempt_set_need_resched() \
0298 do { \
0299 set_preempt_need_resched(); \
0300 } while (0)
0301 #define preempt_fold_need_resched() \
0302 do { \
0303 if (tif_need_resched()) \
0304 set_preempt_need_resched(); \
0305 } while (0)
0306
0307 #ifdef CONFIG_PREEMPT_NOTIFIERS
0308
0309 struct preempt_notifier;
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 struct preempt_ops {
0326 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
0327 void (*sched_out)(struct preempt_notifier *notifier,
0328 struct task_struct *next);
0329 };
0330
0331
0332
0333
0334
0335
0336
0337
0338 struct preempt_notifier {
0339 struct hlist_node link;
0340 struct preempt_ops *ops;
0341 };
0342
0343 void preempt_notifier_inc(void);
0344 void preempt_notifier_dec(void);
0345 void preempt_notifier_register(struct preempt_notifier *notifier);
0346 void preempt_notifier_unregister(struct preempt_notifier *notifier);
0347
0348 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
0349 struct preempt_ops *ops)
0350 {
0351 INIT_HLIST_NODE(¬ifier->link);
0352 notifier->ops = ops;
0353 }
0354
0355 #endif
0356
0357 #ifdef CONFIG_SMP
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 extern void migrate_disable(void);
0415 extern void migrate_enable(void);
0416
0417 #else
0418
0419 static inline void migrate_disable(void) { }
0420 static inline void migrate_enable(void) { }
0421
0422 #endif
0423
0424 #endif