0001
0002
0003
0004
0005
0006 #ifndef _LINUX_WORKQUEUE_H
0007 #define _LINUX_WORKQUEUE_H
0008
0009 #include <linux/timer.h>
0010 #include <linux/linkage.h>
0011 #include <linux/bitops.h>
0012 #include <linux/lockdep.h>
0013 #include <linux/threads.h>
0014 #include <linux/atomic.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/rcupdate.h>
0017
0018 struct workqueue_struct;
0019
0020 struct work_struct;
0021 typedef void (*work_func_t)(struct work_struct *work);
0022 void delayed_work_timer_fn(struct timer_list *t);
0023
0024
0025
0026
0027
0028 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
0029
0030 enum {
0031 WORK_STRUCT_PENDING_BIT = 0,
0032 WORK_STRUCT_INACTIVE_BIT= 1,
0033 WORK_STRUCT_PWQ_BIT = 2,
0034 WORK_STRUCT_LINKED_BIT = 3,
0035 #ifdef CONFIG_DEBUG_OBJECTS_WORK
0036 WORK_STRUCT_STATIC_BIT = 4,
0037 WORK_STRUCT_COLOR_SHIFT = 5,
0038 #else
0039 WORK_STRUCT_COLOR_SHIFT = 4,
0040 #endif
0041
0042 WORK_STRUCT_COLOR_BITS = 4,
0043
0044 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
0045 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
0046 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
0047 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
0048 #ifdef CONFIG_DEBUG_OBJECTS_WORK
0049 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
0050 #else
0051 WORK_STRUCT_STATIC = 0,
0052 #endif
0053
0054 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
0055
0056
0057 WORK_CPU_UNBOUND = NR_CPUS,
0058
0059
0060
0061
0062
0063
0064 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
0065 WORK_STRUCT_COLOR_BITS,
0066
0067
0068 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
0069
0070 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
0071 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
0072
0073
0074
0075
0076
0077
0078 WORK_OFFQ_FLAG_BITS = 1,
0079 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
0080 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
0081 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
0082 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
0083
0084
0085 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
0086 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
0087 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
0088
0089
0090 WORK_BUSY_PENDING = 1 << 0,
0091 WORK_BUSY_RUNNING = 1 << 1,
0092
0093
0094 WORKER_DESC_LEN = 24,
0095 };
0096
0097 struct work_struct {
0098 atomic_long_t data;
0099 struct list_head entry;
0100 work_func_t func;
0101 #ifdef CONFIG_LOCKDEP
0102 struct lockdep_map lockdep_map;
0103 #endif
0104 };
0105
0106 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
0107 #define WORK_DATA_STATIC_INIT() \
0108 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
0109
0110 struct delayed_work {
0111 struct work_struct work;
0112 struct timer_list timer;
0113
0114
0115 struct workqueue_struct *wq;
0116 int cpu;
0117 };
0118
0119 struct rcu_work {
0120 struct work_struct work;
0121 struct rcu_head rcu;
0122
0123
0124 struct workqueue_struct *wq;
0125 };
0126
0127
0128
0129
0130
0131
0132 struct workqueue_attrs {
0133
0134
0135
0136 int nice;
0137
0138
0139
0140
0141 cpumask_var_t cpumask;
0142
0143
0144
0145
0146
0147
0148
0149
0150 bool no_numa;
0151 };
0152
0153 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
0154 {
0155 return container_of(work, struct delayed_work, work);
0156 }
0157
0158 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
0159 {
0160 return container_of(work, struct rcu_work, work);
0161 }
0162
0163 struct execute_work {
0164 struct work_struct work;
0165 };
0166
0167 #ifdef CONFIG_LOCKDEP
0168
0169
0170
0171
0172
0173 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
0174 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
0175 #else
0176 #define __WORK_INIT_LOCKDEP_MAP(n, k)
0177 #endif
0178
0179 #define __WORK_INITIALIZER(n, f) { \
0180 .data = WORK_DATA_STATIC_INIT(), \
0181 .entry = { &(n).entry, &(n).entry }, \
0182 .func = (f), \
0183 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
0184 }
0185
0186 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
0187 .work = __WORK_INITIALIZER((n).work, (f)), \
0188 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
0189 (tflags) | TIMER_IRQSAFE), \
0190 }
0191
0192 #define DECLARE_WORK(n, f) \
0193 struct work_struct n = __WORK_INITIALIZER(n, f)
0194
0195 #define DECLARE_DELAYED_WORK(n, f) \
0196 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
0197
0198 #define DECLARE_DEFERRABLE_WORK(n, f) \
0199 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
0200
0201 #ifdef CONFIG_DEBUG_OBJECTS_WORK
0202 extern void __init_work(struct work_struct *work, int onstack);
0203 extern void destroy_work_on_stack(struct work_struct *work);
0204 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
0205 static inline unsigned int work_static(struct work_struct *work)
0206 {
0207 return *work_data_bits(work) & WORK_STRUCT_STATIC;
0208 }
0209 #else
0210 static inline void __init_work(struct work_struct *work, int onstack) { }
0211 static inline void destroy_work_on_stack(struct work_struct *work) { }
0212 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
0213 static inline unsigned int work_static(struct work_struct *work) { return 0; }
0214 #endif
0215
0216
0217
0218
0219
0220
0221
0222
0223 #ifdef CONFIG_LOCKDEP
0224 #define __INIT_WORK(_work, _func, _onstack) \
0225 do { \
0226 static struct lock_class_key __key; \
0227 \
0228 __init_work((_work), _onstack); \
0229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
0230 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
0231 INIT_LIST_HEAD(&(_work)->entry); \
0232 (_work)->func = (_func); \
0233 } while (0)
0234 #else
0235 #define __INIT_WORK(_work, _func, _onstack) \
0236 do { \
0237 __init_work((_work), _onstack); \
0238 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
0239 INIT_LIST_HEAD(&(_work)->entry); \
0240 (_work)->func = (_func); \
0241 } while (0)
0242 #endif
0243
0244 #define INIT_WORK(_work, _func) \
0245 __INIT_WORK((_work), (_func), 0)
0246
0247 #define INIT_WORK_ONSTACK(_work, _func) \
0248 __INIT_WORK((_work), (_func), 1)
0249
0250 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
0251 do { \
0252 INIT_WORK(&(_work)->work, (_func)); \
0253 __init_timer(&(_work)->timer, \
0254 delayed_work_timer_fn, \
0255 (_tflags) | TIMER_IRQSAFE); \
0256 } while (0)
0257
0258 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
0259 do { \
0260 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
0261 __init_timer_on_stack(&(_work)->timer, \
0262 delayed_work_timer_fn, \
0263 (_tflags) | TIMER_IRQSAFE); \
0264 } while (0)
0265
0266 #define INIT_DELAYED_WORK(_work, _func) \
0267 __INIT_DELAYED_WORK(_work, _func, 0)
0268
0269 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
0270 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
0271
0272 #define INIT_DEFERRABLE_WORK(_work, _func) \
0273 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
0274
0275 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
0276 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
0277
0278 #define INIT_RCU_WORK(_work, _func) \
0279 INIT_WORK(&(_work)->work, (_func))
0280
0281 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
0282 INIT_WORK_ONSTACK(&(_work)->work, (_func))
0283
0284
0285
0286
0287
0288 #define work_pending(work) \
0289 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
0290
0291
0292
0293
0294
0295
0296 #define delayed_work_pending(w) \
0297 work_pending(&(w)->work)
0298
0299
0300
0301
0302
0303 enum {
0304 WQ_UNBOUND = 1 << 1,
0305 WQ_FREEZABLE = 1 << 2,
0306 WQ_MEM_RECLAIM = 1 << 3,
0307 WQ_HIGHPRI = 1 << 4,
0308 WQ_CPU_INTENSIVE = 1 << 5,
0309 WQ_SYSFS = 1 << 6,
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 WQ_POWER_EFFICIENT = 1 << 7,
0337
0338 __WQ_DRAINING = 1 << 16,
0339 __WQ_ORDERED = 1 << 17,
0340 __WQ_LEGACY = 1 << 18,
0341 __WQ_ORDERED_EXPLICIT = 1 << 19,
0342
0343 WQ_MAX_ACTIVE = 512,
0344 WQ_MAX_UNBOUND_PER_CPU = 4,
0345 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
0346 };
0347
0348
0349 #define WQ_UNBOUND_MAX_ACTIVE \
0350 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 extern struct workqueue_struct *system_wq;
0381 extern struct workqueue_struct *system_highpri_wq;
0382 extern struct workqueue_struct *system_long_wq;
0383 extern struct workqueue_struct *system_unbound_wq;
0384 extern struct workqueue_struct *system_freezable_wq;
0385 extern struct workqueue_struct *system_power_efficient_wq;
0386 extern struct workqueue_struct *system_freezable_power_efficient_wq;
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 __printf(1, 4) struct workqueue_struct *
0403 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 #define alloc_ordered_workqueue(fmt, flags, args...) \
0419 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
0420 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
0421
0422 #define create_workqueue(name) \
0423 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
0424 #define create_freezable_workqueue(name) \
0425 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
0426 WQ_MEM_RECLAIM, 1, (name))
0427 #define create_singlethread_workqueue(name) \
0428 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
0429
0430 extern void destroy_workqueue(struct workqueue_struct *wq);
0431
0432 struct workqueue_attrs *alloc_workqueue_attrs(void);
0433 void free_workqueue_attrs(struct workqueue_attrs *attrs);
0434 int apply_workqueue_attrs(struct workqueue_struct *wq,
0435 const struct workqueue_attrs *attrs);
0436 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
0437
0438 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
0439 struct work_struct *work);
0440 extern bool queue_work_node(int node, struct workqueue_struct *wq,
0441 struct work_struct *work);
0442 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
0443 struct delayed_work *work, unsigned long delay);
0444 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
0445 struct delayed_work *dwork, unsigned long delay);
0446 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
0447
0448 extern void __flush_workqueue(struct workqueue_struct *wq);
0449 extern void drain_workqueue(struct workqueue_struct *wq);
0450
0451 extern int schedule_on_each_cpu(work_func_t func);
0452
0453 int execute_in_process_context(work_func_t fn, struct execute_work *);
0454
0455 extern bool flush_work(struct work_struct *work);
0456 extern bool cancel_work(struct work_struct *work);
0457 extern bool cancel_work_sync(struct work_struct *work);
0458
0459 extern bool flush_delayed_work(struct delayed_work *dwork);
0460 extern bool cancel_delayed_work(struct delayed_work *dwork);
0461 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
0462
0463 extern bool flush_rcu_work(struct rcu_work *rwork);
0464
0465 extern void workqueue_set_max_active(struct workqueue_struct *wq,
0466 int max_active);
0467 extern struct work_struct *current_work(void);
0468 extern bool current_is_workqueue_rescuer(void);
0469 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
0470 extern unsigned int work_busy(struct work_struct *work);
0471 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
0472 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
0473 extern void show_all_workqueues(void);
0474 extern void show_one_workqueue(struct workqueue_struct *wq);
0475 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 static inline bool queue_work(struct workqueue_struct *wq,
0501 struct work_struct *work)
0502 {
0503 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 static inline bool queue_delayed_work(struct workqueue_struct *wq,
0515 struct delayed_work *dwork,
0516 unsigned long delay)
0517 {
0518 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static inline bool mod_delayed_work(struct workqueue_struct *wq,
0530 struct delayed_work *dwork,
0531 unsigned long delay)
0532 {
0533 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
0534 }
0535
0536
0537
0538
0539
0540
0541
0542
0543 static inline bool schedule_work_on(int cpu, struct work_struct *work)
0544 {
0545 return queue_work_on(cpu, system_wq, work);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562 static inline bool schedule_work(struct work_struct *work)
0563 {
0564 return queue_work(system_wq, work);
0565 }
0566
0567
0568
0569
0570
0571
0572
0573 extern void __warn_flushing_systemwide_wq(void)
0574 __compiletime_warning("Please avoid flushing system-wide workqueues.");
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 #define flush_scheduled_work() \
0612 ({ \
0613 if (0) \
0614 __warn_flushing_systemwide_wq(); \
0615 __flush_workqueue(system_wq); \
0616 })
0617
0618
0619
0620
0621
0622 #define flush_workqueue(wq) \
0623 ({ \
0624 struct workqueue_struct *_wq = (wq); \
0625 \
0626 if ((__builtin_constant_p(_wq == system_wq) && \
0627 _wq == system_wq) || \
0628 (__builtin_constant_p(_wq == system_highpri_wq) && \
0629 _wq == system_highpri_wq) || \
0630 (__builtin_constant_p(_wq == system_long_wq) && \
0631 _wq == system_long_wq) || \
0632 (__builtin_constant_p(_wq == system_unbound_wq) && \
0633 _wq == system_unbound_wq) || \
0634 (__builtin_constant_p(_wq == system_freezable_wq) && \
0635 _wq == system_freezable_wq) || \
0636 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
0637 _wq == system_power_efficient_wq) || \
0638 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
0639 _wq == system_freezable_power_efficient_wq)) \
0640 __warn_flushing_systemwide_wq(); \
0641 __flush_workqueue(_wq); \
0642 })
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
0654 unsigned long delay)
0655 {
0656 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667 static inline bool schedule_delayed_work(struct delayed_work *dwork,
0668 unsigned long delay)
0669 {
0670 return queue_delayed_work(system_wq, dwork, delay);
0671 }
0672
0673 #ifndef CONFIG_SMP
0674 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
0675 {
0676 return fn(arg);
0677 }
0678 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
0679 {
0680 return fn(arg);
0681 }
0682 #else
0683 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
0684 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
0685 #endif
0686
0687 #ifdef CONFIG_FREEZER
0688 extern void freeze_workqueues_begin(void);
0689 extern bool freeze_workqueues_busy(void);
0690 extern void thaw_workqueues(void);
0691 #endif
0692
0693 #ifdef CONFIG_SYSFS
0694 int workqueue_sysfs_register(struct workqueue_struct *wq);
0695 #else
0696 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
0697 { return 0; }
0698 #endif
0699
0700 #ifdef CONFIG_WQ_WATCHDOG
0701 void wq_watchdog_touch(int cpu);
0702 #else
0703 static inline void wq_watchdog_touch(int cpu) { }
0704 #endif
0705
0706 #ifdef CONFIG_SMP
0707 int workqueue_prepare_cpu(unsigned int cpu);
0708 int workqueue_online_cpu(unsigned int cpu);
0709 int workqueue_offline_cpu(unsigned int cpu);
0710 #endif
0711
0712 void __init workqueue_init_early(void);
0713 void __init workqueue_init(void);
0714
0715 #endif