0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <uapi/linux/sched/types.h>
0011 #include <linux/mm.h>
0012 #include <linux/mmu_context.h>
0013 #include <linux/sched.h>
0014 #include <linux/sched/mm.h>
0015 #include <linux/sched/task.h>
0016 #include <linux/kthread.h>
0017 #include <linux/completion.h>
0018 #include <linux/err.h>
0019 #include <linux/cgroup.h>
0020 #include <linux/cpuset.h>
0021 #include <linux/unistd.h>
0022 #include <linux/file.h>
0023 #include <linux/export.h>
0024 #include <linux/mutex.h>
0025 #include <linux/slab.h>
0026 #include <linux/freezer.h>
0027 #include <linux/ptrace.h>
0028 #include <linux/uaccess.h>
0029 #include <linux/numa.h>
0030 #include <linux/sched/isolation.h>
0031 #include <trace/events/sched.h>
0032
0033
0034 static DEFINE_SPINLOCK(kthread_create_lock);
0035 static LIST_HEAD(kthread_create_list);
0036 struct task_struct *kthreadd_task;
0037
0038 struct kthread_create_info
0039 {
0040
0041 int (*threadfn)(void *data);
0042 void *data;
0043 int node;
0044
0045
0046 struct task_struct *result;
0047 struct completion *done;
0048
0049 struct list_head list;
0050 };
0051
0052 struct kthread {
0053 unsigned long flags;
0054 unsigned int cpu;
0055 int result;
0056 int (*threadfn)(void *);
0057 void *data;
0058 struct completion parked;
0059 struct completion exited;
0060 #ifdef CONFIG_BLK_CGROUP
0061 struct cgroup_subsys_state *blkcg_css;
0062 #endif
0063
0064 char *full_name;
0065 };
0066
0067 enum KTHREAD_BITS {
0068 KTHREAD_IS_PER_CPU = 0,
0069 KTHREAD_SHOULD_STOP,
0070 KTHREAD_SHOULD_PARK,
0071 };
0072
0073 static inline struct kthread *to_kthread(struct task_struct *k)
0074 {
0075 WARN_ON(!(k->flags & PF_KTHREAD));
0076 return k->worker_private;
0077 }
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static inline struct kthread *__to_kthread(struct task_struct *p)
0091 {
0092 void *kthread = p->worker_private;
0093 if (kthread && !(p->flags & PF_KTHREAD))
0094 kthread = NULL;
0095 return kthread;
0096 }
0097
0098 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
0099 {
0100 struct kthread *kthread = to_kthread(tsk);
0101
0102 if (!kthread || !kthread->full_name) {
0103 __get_task_comm(buf, buf_size, tsk);
0104 return;
0105 }
0106
0107 strscpy_pad(buf, kthread->full_name, buf_size);
0108 }
0109
0110 bool set_kthread_struct(struct task_struct *p)
0111 {
0112 struct kthread *kthread;
0113
0114 if (WARN_ON_ONCE(to_kthread(p)))
0115 return false;
0116
0117 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
0118 if (!kthread)
0119 return false;
0120
0121 init_completion(&kthread->exited);
0122 init_completion(&kthread->parked);
0123 p->vfork_done = &kthread->exited;
0124
0125 p->worker_private = kthread;
0126 return true;
0127 }
0128
0129 void free_kthread_struct(struct task_struct *k)
0130 {
0131 struct kthread *kthread;
0132
0133
0134
0135
0136 kthread = to_kthread(k);
0137 if (!kthread)
0138 return;
0139
0140 #ifdef CONFIG_BLK_CGROUP
0141 WARN_ON_ONCE(kthread->blkcg_css);
0142 #endif
0143 k->worker_private = NULL;
0144 kfree(kthread->full_name);
0145 kfree(kthread);
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155 bool kthread_should_stop(void)
0156 {
0157 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
0158 }
0159 EXPORT_SYMBOL(kthread_should_stop);
0160
0161 bool __kthread_should_park(struct task_struct *k)
0162 {
0163 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
0164 }
0165 EXPORT_SYMBOL_GPL(__kthread_should_park);
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 bool kthread_should_park(void)
0179 {
0180 return __kthread_should_park(current);
0181 }
0182 EXPORT_SYMBOL_GPL(kthread_should_park);
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 bool kthread_freezable_should_stop(bool *was_frozen)
0194 {
0195 bool frozen = false;
0196
0197 might_sleep();
0198
0199 if (unlikely(freezing(current)))
0200 frozen = __refrigerator(true);
0201
0202 if (was_frozen)
0203 *was_frozen = frozen;
0204
0205 return kthread_should_stop();
0206 }
0207 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
0208
0209
0210
0211
0212
0213
0214
0215 void *kthread_func(struct task_struct *task)
0216 {
0217 struct kthread *kthread = __to_kthread(task);
0218 if (kthread)
0219 return kthread->threadfn;
0220 return NULL;
0221 }
0222 EXPORT_SYMBOL_GPL(kthread_func);
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 void *kthread_data(struct task_struct *task)
0233 {
0234 return to_kthread(task)->data;
0235 }
0236 EXPORT_SYMBOL_GPL(kthread_data);
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 void *kthread_probe_data(struct task_struct *task)
0248 {
0249 struct kthread *kthread = __to_kthread(task);
0250 void *data = NULL;
0251
0252 if (kthread)
0253 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
0254 return data;
0255 }
0256
0257 static void __kthread_parkme(struct kthread *self)
0258 {
0259 for (;;) {
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 set_special_state(TASK_PARKED);
0270 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
0271 break;
0272
0273
0274
0275
0276
0277
0278 preempt_disable();
0279 complete(&self->parked);
0280 schedule_preempt_disabled();
0281 preempt_enable();
0282 }
0283 __set_current_state(TASK_RUNNING);
0284 }
0285
0286 void kthread_parkme(void)
0287 {
0288 __kthread_parkme(to_kthread(current));
0289 }
0290 EXPORT_SYMBOL_GPL(kthread_parkme);
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 void __noreturn kthread_exit(long result)
0303 {
0304 struct kthread *kthread = to_kthread(current);
0305 kthread->result = result;
0306 do_exit(0);
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
0322 {
0323 if (comp)
0324 complete(comp);
0325
0326 kthread_exit(code);
0327 }
0328 EXPORT_SYMBOL(kthread_complete_and_exit);
0329
0330 static int kthread(void *_create)
0331 {
0332 static const struct sched_param param = { .sched_priority = 0 };
0333
0334 struct kthread_create_info *create = _create;
0335 int (*threadfn)(void *data) = create->threadfn;
0336 void *data = create->data;
0337 struct completion *done;
0338 struct kthread *self;
0339 int ret;
0340
0341 self = to_kthread(current);
0342
0343
0344 done = xchg(&create->done, NULL);
0345 if (!done) {
0346 kfree(create);
0347 kthread_exit(-EINTR);
0348 }
0349
0350 self->threadfn = threadfn;
0351 self->data = data;
0352
0353
0354
0355
0356
0357 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
0358 set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
0359
0360
0361 __set_current_state(TASK_UNINTERRUPTIBLE);
0362 create->result = current;
0363
0364
0365
0366
0367 preempt_disable();
0368 complete(done);
0369 schedule_preempt_disabled();
0370 preempt_enable();
0371
0372 ret = -EINTR;
0373 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
0374 cgroup_kthread_ready();
0375 __kthread_parkme(self);
0376 ret = threadfn(data);
0377 }
0378 kthread_exit(ret);
0379 }
0380
0381
0382 int tsk_fork_get_node(struct task_struct *tsk)
0383 {
0384 #ifdef CONFIG_NUMA
0385 if (tsk == kthreadd_task)
0386 return tsk->pref_node_fork;
0387 #endif
0388 return NUMA_NO_NODE;
0389 }
0390
0391 static void create_kthread(struct kthread_create_info *create)
0392 {
0393 int pid;
0394
0395 #ifdef CONFIG_NUMA
0396 current->pref_node_fork = create->node;
0397 #endif
0398
0399 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
0400 if (pid < 0) {
0401
0402 struct completion *done = xchg(&create->done, NULL);
0403
0404 if (!done) {
0405 kfree(create);
0406 return;
0407 }
0408 create->result = ERR_PTR(pid);
0409 complete(done);
0410 }
0411 }
0412
0413 static __printf(4, 0)
0414 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
0415 void *data, int node,
0416 const char namefmt[],
0417 va_list args)
0418 {
0419 DECLARE_COMPLETION_ONSTACK(done);
0420 struct task_struct *task;
0421 struct kthread_create_info *create = kmalloc(sizeof(*create),
0422 GFP_KERNEL);
0423
0424 if (!create)
0425 return ERR_PTR(-ENOMEM);
0426 create->threadfn = threadfn;
0427 create->data = data;
0428 create->node = node;
0429 create->done = &done;
0430
0431 spin_lock(&kthread_create_lock);
0432 list_add_tail(&create->list, &kthread_create_list);
0433 spin_unlock(&kthread_create_lock);
0434
0435 wake_up_process(kthreadd_task);
0436
0437
0438
0439
0440
0441 if (unlikely(wait_for_completion_killable(&done))) {
0442
0443
0444
0445
0446
0447 if (xchg(&create->done, NULL))
0448 return ERR_PTR(-EINTR);
0449
0450
0451
0452
0453 wait_for_completion(&done);
0454 }
0455 task = create->result;
0456 if (!IS_ERR(task)) {
0457 char name[TASK_COMM_LEN];
0458 va_list aq;
0459 int len;
0460
0461
0462
0463
0464
0465 va_copy(aq, args);
0466 len = vsnprintf(name, sizeof(name), namefmt, aq);
0467 va_end(aq);
0468 if (len >= TASK_COMM_LEN) {
0469 struct kthread *kthread = to_kthread(task);
0470
0471
0472 kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
0473 }
0474 set_task_comm(task, name);
0475 }
0476 kfree(create);
0477 return task;
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
0504 void *data, int node,
0505 const char namefmt[],
0506 ...)
0507 {
0508 struct task_struct *task;
0509 va_list args;
0510
0511 va_start(args, namefmt);
0512 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
0513 va_end(args);
0514
0515 return task;
0516 }
0517 EXPORT_SYMBOL(kthread_create_on_node);
0518
0519 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
0520 {
0521 unsigned long flags;
0522
0523 if (!wait_task_inactive(p, state)) {
0524 WARN_ON(1);
0525 return;
0526 }
0527
0528
0529 raw_spin_lock_irqsave(&p->pi_lock, flags);
0530 do_set_cpus_allowed(p, mask);
0531 p->flags |= PF_NO_SETAFFINITY;
0532 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
0533 }
0534
0535 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
0536 {
0537 __kthread_bind_mask(p, cpumask_of(cpu), state);
0538 }
0539
0540 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
0541 {
0542 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
0543 }
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 void kthread_bind(struct task_struct *p, unsigned int cpu)
0555 {
0556 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
0557 }
0558 EXPORT_SYMBOL(kthread_bind);
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
0571 void *data, unsigned int cpu,
0572 const char *namefmt)
0573 {
0574 struct task_struct *p;
0575
0576 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
0577 cpu);
0578 if (IS_ERR(p))
0579 return p;
0580 kthread_bind(p, cpu);
0581
0582 to_kthread(p)->cpu = cpu;
0583 return p;
0584 }
0585 EXPORT_SYMBOL(kthread_create_on_cpu);
0586
0587 void kthread_set_per_cpu(struct task_struct *k, int cpu)
0588 {
0589 struct kthread *kthread = to_kthread(k);
0590 if (!kthread)
0591 return;
0592
0593 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
0594
0595 if (cpu < 0) {
0596 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
0597 return;
0598 }
0599
0600 kthread->cpu = cpu;
0601 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
0602 }
0603
0604 bool kthread_is_per_cpu(struct task_struct *p)
0605 {
0606 struct kthread *kthread = __to_kthread(p);
0607 if (!kthread)
0608 return false;
0609
0610 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 void kthread_unpark(struct task_struct *k)
0622 {
0623 struct kthread *kthread = to_kthread(k);
0624
0625
0626
0627
0628
0629 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
0630 __kthread_bind(k, kthread->cpu, TASK_PARKED);
0631
0632 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
0633
0634
0635
0636 wake_up_state(k, TASK_PARKED);
0637 }
0638 EXPORT_SYMBOL_GPL(kthread_unpark);
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 int kthread_park(struct task_struct *k)
0653 {
0654 struct kthread *kthread = to_kthread(k);
0655
0656 if (WARN_ON(k->flags & PF_EXITING))
0657 return -ENOSYS;
0658
0659 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
0660 return -EBUSY;
0661
0662 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
0663 if (k != current) {
0664 wake_up_process(k);
0665
0666
0667
0668
0669 wait_for_completion(&kthread->parked);
0670
0671
0672
0673
0674 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
0675 }
0676
0677 return 0;
0678 }
0679 EXPORT_SYMBOL_GPL(kthread_park);
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 int kthread_stop(struct task_struct *k)
0697 {
0698 struct kthread *kthread;
0699 int ret;
0700
0701 trace_sched_kthread_stop(k);
0702
0703 get_task_struct(k);
0704 kthread = to_kthread(k);
0705 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
0706 kthread_unpark(k);
0707 wake_up_process(k);
0708 wait_for_completion(&kthread->exited);
0709 ret = kthread->result;
0710 put_task_struct(k);
0711
0712 trace_sched_kthread_stop_ret(ret);
0713 return ret;
0714 }
0715 EXPORT_SYMBOL(kthread_stop);
0716
0717 int kthreadd(void *unused)
0718 {
0719 struct task_struct *tsk = current;
0720
0721
0722 set_task_comm(tsk, "kthreadd");
0723 ignore_signals(tsk);
0724 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
0725 set_mems_allowed(node_states[N_MEMORY]);
0726
0727 current->flags |= PF_NOFREEZE;
0728 cgroup_init_kthreadd();
0729
0730 for (;;) {
0731 set_current_state(TASK_INTERRUPTIBLE);
0732 if (list_empty(&kthread_create_list))
0733 schedule();
0734 __set_current_state(TASK_RUNNING);
0735
0736 spin_lock(&kthread_create_lock);
0737 while (!list_empty(&kthread_create_list)) {
0738 struct kthread_create_info *create;
0739
0740 create = list_entry(kthread_create_list.next,
0741 struct kthread_create_info, list);
0742 list_del_init(&create->list);
0743 spin_unlock(&kthread_create_lock);
0744
0745 create_kthread(create);
0746
0747 spin_lock(&kthread_create_lock);
0748 }
0749 spin_unlock(&kthread_create_lock);
0750 }
0751
0752 return 0;
0753 }
0754
0755 void __kthread_init_worker(struct kthread_worker *worker,
0756 const char *name,
0757 struct lock_class_key *key)
0758 {
0759 memset(worker, 0, sizeof(struct kthread_worker));
0760 raw_spin_lock_init(&worker->lock);
0761 lockdep_set_class_and_name(&worker->lock, key, name);
0762 INIT_LIST_HEAD(&worker->work_list);
0763 INIT_LIST_HEAD(&worker->delayed_work_list);
0764 }
0765 EXPORT_SYMBOL_GPL(__kthread_init_worker);
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782 int kthread_worker_fn(void *worker_ptr)
0783 {
0784 struct kthread_worker *worker = worker_ptr;
0785 struct kthread_work *work;
0786
0787
0788
0789
0790
0791 WARN_ON(worker->task && worker->task != current);
0792 worker->task = current;
0793
0794 if (worker->flags & KTW_FREEZABLE)
0795 set_freezable();
0796
0797 repeat:
0798 set_current_state(TASK_INTERRUPTIBLE);
0799
0800 if (kthread_should_stop()) {
0801 __set_current_state(TASK_RUNNING);
0802 raw_spin_lock_irq(&worker->lock);
0803 worker->task = NULL;
0804 raw_spin_unlock_irq(&worker->lock);
0805 return 0;
0806 }
0807
0808 work = NULL;
0809 raw_spin_lock_irq(&worker->lock);
0810 if (!list_empty(&worker->work_list)) {
0811 work = list_first_entry(&worker->work_list,
0812 struct kthread_work, node);
0813 list_del_init(&work->node);
0814 }
0815 worker->current_work = work;
0816 raw_spin_unlock_irq(&worker->lock);
0817
0818 if (work) {
0819 kthread_work_func_t func = work->func;
0820 __set_current_state(TASK_RUNNING);
0821 trace_sched_kthread_work_execute_start(work);
0822 work->func(work);
0823
0824
0825
0826
0827 trace_sched_kthread_work_execute_end(work, func);
0828 } else if (!freezing(current))
0829 schedule();
0830
0831 try_to_freeze();
0832 cond_resched();
0833 goto repeat;
0834 }
0835 EXPORT_SYMBOL_GPL(kthread_worker_fn);
0836
0837 static __printf(3, 0) struct kthread_worker *
0838 __kthread_create_worker(int cpu, unsigned int flags,
0839 const char namefmt[], va_list args)
0840 {
0841 struct kthread_worker *worker;
0842 struct task_struct *task;
0843 int node = NUMA_NO_NODE;
0844
0845 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
0846 if (!worker)
0847 return ERR_PTR(-ENOMEM);
0848
0849 kthread_init_worker(worker);
0850
0851 if (cpu >= 0)
0852 node = cpu_to_node(cpu);
0853
0854 task = __kthread_create_on_node(kthread_worker_fn, worker,
0855 node, namefmt, args);
0856 if (IS_ERR(task))
0857 goto fail_task;
0858
0859 if (cpu >= 0)
0860 kthread_bind(task, cpu);
0861
0862 worker->flags = flags;
0863 worker->task = task;
0864 wake_up_process(task);
0865 return worker;
0866
0867 fail_task:
0868 kfree(worker);
0869 return ERR_CAST(task);
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 struct kthread_worker *
0882 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
0883 {
0884 struct kthread_worker *worker;
0885 va_list args;
0886
0887 va_start(args, namefmt);
0888 worker = __kthread_create_worker(-1, flags, namefmt, args);
0889 va_end(args);
0890
0891 return worker;
0892 }
0893 EXPORT_SYMBOL(kthread_create_worker);
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930 struct kthread_worker *
0931 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
0932 const char namefmt[], ...)
0933 {
0934 struct kthread_worker *worker;
0935 va_list args;
0936
0937 va_start(args, namefmt);
0938 worker = __kthread_create_worker(cpu, flags, namefmt, args);
0939 va_end(args);
0940
0941 return worker;
0942 }
0943 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
0944
0945
0946
0947
0948
0949
0950 static inline bool queuing_blocked(struct kthread_worker *worker,
0951 struct kthread_work *work)
0952 {
0953 lockdep_assert_held(&worker->lock);
0954
0955 return !list_empty(&work->node) || work->canceling;
0956 }
0957
0958 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
0959 struct kthread_work *work)
0960 {
0961 lockdep_assert_held(&worker->lock);
0962 WARN_ON_ONCE(!list_empty(&work->node));
0963
0964 WARN_ON_ONCE(work->worker && work->worker != worker);
0965 }
0966
0967
0968 static void kthread_insert_work(struct kthread_worker *worker,
0969 struct kthread_work *work,
0970 struct list_head *pos)
0971 {
0972 kthread_insert_work_sanity_check(worker, work);
0973
0974 trace_sched_kthread_work_queue_work(worker, work);
0975
0976 list_add_tail(&work->node, pos);
0977 work->worker = worker;
0978 if (!worker->current_work && likely(worker->task))
0979 wake_up_process(worker->task);
0980 }
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 bool kthread_queue_work(struct kthread_worker *worker,
0995 struct kthread_work *work)
0996 {
0997 bool ret = false;
0998 unsigned long flags;
0999
1000 raw_spin_lock_irqsave(&worker->lock, flags);
1001 if (!queuing_blocked(worker, work)) {
1002 kthread_insert_work(worker, work, &worker->work_list);
1003 ret = true;
1004 }
1005 raw_spin_unlock_irqrestore(&worker->lock, flags);
1006 return ret;
1007 }
1008 EXPORT_SYMBOL_GPL(kthread_queue_work);
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 void kthread_delayed_work_timer_fn(struct timer_list *t)
1019 {
1020 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1021 struct kthread_work *work = &dwork->work;
1022 struct kthread_worker *worker = work->worker;
1023 unsigned long flags;
1024
1025
1026
1027
1028
1029 if (WARN_ON_ONCE(!worker))
1030 return;
1031
1032 raw_spin_lock_irqsave(&worker->lock, flags);
1033
1034 WARN_ON_ONCE(work->worker != worker);
1035
1036
1037 WARN_ON_ONCE(list_empty(&work->node));
1038 list_del_init(&work->node);
1039 if (!work->canceling)
1040 kthread_insert_work(worker, work, &worker->work_list);
1041
1042 raw_spin_unlock_irqrestore(&worker->lock, flags);
1043 }
1044 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1045
1046 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1047 struct kthread_delayed_work *dwork,
1048 unsigned long delay)
1049 {
1050 struct timer_list *timer = &dwork->timer;
1051 struct kthread_work *work = &dwork->work;
1052
1053 WARN_ON_FUNCTION_MISMATCH(timer->function,
1054 kthread_delayed_work_timer_fn);
1055
1056
1057
1058
1059
1060
1061
1062 if (!delay) {
1063 kthread_insert_work(worker, work, &worker->work_list);
1064 return;
1065 }
1066
1067
1068 kthread_insert_work_sanity_check(worker, work);
1069
1070 list_add(&work->node, &worker->delayed_work_list);
1071 work->worker = worker;
1072 timer->expires = jiffies + delay;
1073 add_timer(timer);
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1092 struct kthread_delayed_work *dwork,
1093 unsigned long delay)
1094 {
1095 struct kthread_work *work = &dwork->work;
1096 unsigned long flags;
1097 bool ret = false;
1098
1099 raw_spin_lock_irqsave(&worker->lock, flags);
1100
1101 if (!queuing_blocked(worker, work)) {
1102 __kthread_queue_delayed_work(worker, dwork, delay);
1103 ret = true;
1104 }
1105
1106 raw_spin_unlock_irqrestore(&worker->lock, flags);
1107 return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1110
1111 struct kthread_flush_work {
1112 struct kthread_work work;
1113 struct completion done;
1114 };
1115
1116 static void kthread_flush_work_fn(struct kthread_work *work)
1117 {
1118 struct kthread_flush_work *fwork =
1119 container_of(work, struct kthread_flush_work, work);
1120 complete(&fwork->done);
1121 }
1122
1123
1124
1125
1126
1127
1128
1129 void kthread_flush_work(struct kthread_work *work)
1130 {
1131 struct kthread_flush_work fwork = {
1132 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1133 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1134 };
1135 struct kthread_worker *worker;
1136 bool noop = false;
1137
1138 worker = work->worker;
1139 if (!worker)
1140 return;
1141
1142 raw_spin_lock_irq(&worker->lock);
1143
1144 WARN_ON_ONCE(work->worker != worker);
1145
1146 if (!list_empty(&work->node))
1147 kthread_insert_work(worker, &fwork.work, work->node.next);
1148 else if (worker->current_work == work)
1149 kthread_insert_work(worker, &fwork.work,
1150 worker->work_list.next);
1151 else
1152 noop = true;
1153
1154 raw_spin_unlock_irq(&worker->lock);
1155
1156 if (!noop)
1157 wait_for_completion(&fwork.done);
1158 }
1159 EXPORT_SYMBOL_GPL(kthread_flush_work);
1160
1161
1162
1163
1164
1165
1166
1167
1168 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1169 unsigned long *flags)
1170 {
1171 struct kthread_delayed_work *dwork =
1172 container_of(work, struct kthread_delayed_work, work);
1173 struct kthread_worker *worker = work->worker;
1174
1175
1176
1177
1178
1179
1180
1181 work->canceling++;
1182 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1183 del_timer_sync(&dwork->timer);
1184 raw_spin_lock_irqsave(&worker->lock, *flags);
1185 work->canceling--;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 static bool __kthread_cancel_work(struct kthread_work *work)
1202 {
1203
1204
1205
1206
1207 if (!list_empty(&work->node)) {
1208 list_del_init(&work->node);
1209 return true;
1210 }
1211
1212 return false;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1239 struct kthread_delayed_work *dwork,
1240 unsigned long delay)
1241 {
1242 struct kthread_work *work = &dwork->work;
1243 unsigned long flags;
1244 int ret;
1245
1246 raw_spin_lock_irqsave(&worker->lock, flags);
1247
1248
1249 if (!work->worker) {
1250 ret = false;
1251 goto fast_queue;
1252 }
1253
1254
1255 WARN_ON_ONCE(work->worker != worker);
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 kthread_cancel_delayed_work_timer(work, &flags);
1270 if (work->canceling) {
1271
1272 ret = true;
1273 goto out;
1274 }
1275 ret = __kthread_cancel_work(work);
1276
1277 fast_queue:
1278 __kthread_queue_delayed_work(worker, dwork, delay);
1279 out:
1280 raw_spin_unlock_irqrestore(&worker->lock, flags);
1281 return ret;
1282 }
1283 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1284
1285 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1286 {
1287 struct kthread_worker *worker = work->worker;
1288 unsigned long flags;
1289 int ret = false;
1290
1291 if (!worker)
1292 goto out;
1293
1294 raw_spin_lock_irqsave(&worker->lock, flags);
1295
1296 WARN_ON_ONCE(work->worker != worker);
1297
1298 if (is_dwork)
1299 kthread_cancel_delayed_work_timer(work, &flags);
1300
1301 ret = __kthread_cancel_work(work);
1302
1303 if (worker->current_work != work)
1304 goto out_fast;
1305
1306
1307
1308
1309
1310 work->canceling++;
1311 raw_spin_unlock_irqrestore(&worker->lock, flags);
1312 kthread_flush_work(work);
1313 raw_spin_lock_irqsave(&worker->lock, flags);
1314 work->canceling--;
1315
1316 out_fast:
1317 raw_spin_unlock_irqrestore(&worker->lock, flags);
1318 out:
1319 return ret;
1320 }
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 bool kthread_cancel_work_sync(struct kthread_work *work)
1339 {
1340 return __kthread_cancel_work_sync(work, false);
1341 }
1342 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1354 {
1355 return __kthread_cancel_work_sync(&dwork->work, true);
1356 }
1357 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1358
1359
1360
1361
1362
1363
1364
1365
1366 void kthread_flush_worker(struct kthread_worker *worker)
1367 {
1368 struct kthread_flush_work fwork = {
1369 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1370 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1371 };
1372
1373 kthread_queue_work(worker, &fwork.work);
1374 wait_for_completion(&fwork.done);
1375 }
1376 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 void kthread_destroy_worker(struct kthread_worker *worker)
1387 {
1388 struct task_struct *task;
1389
1390 task = worker->task;
1391 if (WARN_ON(!task))
1392 return;
1393
1394 kthread_flush_worker(worker);
1395 kthread_stop(task);
1396 WARN_ON(!list_empty(&worker->work_list));
1397 kfree(worker);
1398 }
1399 EXPORT_SYMBOL(kthread_destroy_worker);
1400
1401
1402
1403
1404
1405 void kthread_use_mm(struct mm_struct *mm)
1406 {
1407 struct mm_struct *active_mm;
1408 struct task_struct *tsk = current;
1409
1410 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1411 WARN_ON_ONCE(tsk->mm);
1412
1413 task_lock(tsk);
1414
1415 local_irq_disable();
1416 active_mm = tsk->active_mm;
1417 if (active_mm != mm) {
1418 mmgrab(mm);
1419 tsk->active_mm = mm;
1420 }
1421 tsk->mm = mm;
1422 membarrier_update_current_mm(mm);
1423 switch_mm_irqs_off(active_mm, mm, tsk);
1424 local_irq_enable();
1425 task_unlock(tsk);
1426 #ifdef finish_arch_post_lock_switch
1427 finish_arch_post_lock_switch();
1428 #endif
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (active_mm != mm)
1440 mmdrop(active_mm);
1441 else
1442 smp_mb();
1443 }
1444 EXPORT_SYMBOL_GPL(kthread_use_mm);
1445
1446
1447
1448
1449
1450 void kthread_unuse_mm(struct mm_struct *mm)
1451 {
1452 struct task_struct *tsk = current;
1453
1454 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1455 WARN_ON_ONCE(!tsk->mm);
1456
1457 task_lock(tsk);
1458
1459
1460
1461
1462
1463
1464
1465 smp_mb__after_spinlock();
1466 sync_mm_rss(mm);
1467 local_irq_disable();
1468 tsk->mm = NULL;
1469 membarrier_update_current_mm(NULL);
1470
1471 enter_lazy_tlb(mm, tsk);
1472 local_irq_enable();
1473 task_unlock(tsk);
1474 }
1475 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1476
1477 #ifdef CONFIG_BLK_CGROUP
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1489 {
1490 struct kthread *kthread;
1491
1492 if (!(current->flags & PF_KTHREAD))
1493 return;
1494 kthread = to_kthread(current);
1495 if (!kthread)
1496 return;
1497
1498 if (kthread->blkcg_css) {
1499 css_put(kthread->blkcg_css);
1500 kthread->blkcg_css = NULL;
1501 }
1502 if (css) {
1503 css_get(css);
1504 kthread->blkcg_css = css;
1505 }
1506 }
1507 EXPORT_SYMBOL(kthread_associate_blkcg);
1508
1509
1510
1511
1512
1513
1514 struct cgroup_subsys_state *kthread_blkcg(void)
1515 {
1516 struct kthread *kthread;
1517
1518 if (current->flags & PF_KTHREAD) {
1519 kthread = to_kthread(current);
1520 if (kthread)
1521 return kthread->blkcg_css;
1522 }
1523 return NULL;
1524 }
1525 #endif