Back to home page

LXR

 
 

    


0001 /* Kernel thread helper functions.
0002  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
0003  *
0004  * Creation is done via kthreadd, so that we get a clean environment
0005  * even if we're invoked from userspace (think modprobe, hotplug cpu,
0006  * etc.).
0007  */
0008 #include <linux/sched.h>
0009 #include <linux/kthread.h>
0010 #include <linux/completion.h>
0011 #include <linux/err.h>
0012 #include <linux/cpuset.h>
0013 #include <linux/unistd.h>
0014 #include <linux/file.h>
0015 #include <linux/export.h>
0016 #include <linux/mutex.h>
0017 #include <linux/slab.h>
0018 #include <linux/freezer.h>
0019 #include <linux/ptrace.h>
0020 #include <linux/uaccess.h>
0021 #include <trace/events/sched.h>
0022 
0023 static DEFINE_SPINLOCK(kthread_create_lock);
0024 static LIST_HEAD(kthread_create_list);
0025 struct task_struct *kthreadd_task;
0026 
0027 struct kthread_create_info
0028 {
0029     /* Information passed to kthread() from kthreadd. */
0030     int (*threadfn)(void *data);
0031     void *data;
0032     int node;
0033 
0034     /* Result passed back to kthread_create() from kthreadd. */
0035     struct task_struct *result;
0036     struct completion *done;
0037 
0038     struct list_head list;
0039 };
0040 
0041 struct kthread {
0042     unsigned long flags;
0043     unsigned int cpu;
0044     void *data;
0045     struct completion parked;
0046     struct completion exited;
0047 };
0048 
0049 enum KTHREAD_BITS {
0050     KTHREAD_IS_PER_CPU = 0,
0051     KTHREAD_SHOULD_STOP,
0052     KTHREAD_SHOULD_PARK,
0053     KTHREAD_IS_PARKED,
0054 };
0055 
0056 static inline void set_kthread_struct(void *kthread)
0057 {
0058     /*
0059      * We abuse ->set_child_tid to avoid the new member and because it
0060      * can't be wrongly copied by copy_process(). We also rely on fact
0061      * that the caller can't exec, so PF_KTHREAD can't be cleared.
0062      */
0063     current->set_child_tid = (__force void __user *)kthread;
0064 }
0065 
0066 static inline struct kthread *to_kthread(struct task_struct *k)
0067 {
0068     WARN_ON(!(k->flags & PF_KTHREAD));
0069     return (__force void *)k->set_child_tid;
0070 }
0071 
0072 void free_kthread_struct(struct task_struct *k)
0073 {
0074     /*
0075      * Can be NULL if this kthread was created by kernel_thread()
0076      * or if kmalloc() in kthread() failed.
0077      */
0078     kfree(to_kthread(k));
0079 }
0080 
0081 /**
0082  * kthread_should_stop - should this kthread return now?
0083  *
0084  * When someone calls kthread_stop() on your kthread, it will be woken
0085  * and this will return true.  You should then return, and your return
0086  * value will be passed through to kthread_stop().
0087  */
0088 bool kthread_should_stop(void)
0089 {
0090     return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
0091 }
0092 EXPORT_SYMBOL(kthread_should_stop);
0093 
0094 /**
0095  * kthread_should_park - should this kthread park now?
0096  *
0097  * When someone calls kthread_park() on your kthread, it will be woken
0098  * and this will return true.  You should then do the necessary
0099  * cleanup and call kthread_parkme()
0100  *
0101  * Similar to kthread_should_stop(), but this keeps the thread alive
0102  * and in a park position. kthread_unpark() "restarts" the thread and
0103  * calls the thread function again.
0104  */
0105 bool kthread_should_park(void)
0106 {
0107     return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
0108 }
0109 EXPORT_SYMBOL_GPL(kthread_should_park);
0110 
0111 /**
0112  * kthread_freezable_should_stop - should this freezable kthread return now?
0113  * @was_frozen: optional out parameter, indicates whether %current was frozen
0114  *
0115  * kthread_should_stop() for freezable kthreads, which will enter
0116  * refrigerator if necessary.  This function is safe from kthread_stop() /
0117  * freezer deadlock and freezable kthreads should use this function instead
0118  * of calling try_to_freeze() directly.
0119  */
0120 bool kthread_freezable_should_stop(bool *was_frozen)
0121 {
0122     bool frozen = false;
0123 
0124     might_sleep();
0125 
0126     if (unlikely(freezing(current)))
0127         frozen = __refrigerator(true);
0128 
0129     if (was_frozen)
0130         *was_frozen = frozen;
0131 
0132     return kthread_should_stop();
0133 }
0134 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
0135 
0136 /**
0137  * kthread_data - return data value specified on kthread creation
0138  * @task: kthread task in question
0139  *
0140  * Return the data value specified when kthread @task was created.
0141  * The caller is responsible for ensuring the validity of @task when
0142  * calling this function.
0143  */
0144 void *kthread_data(struct task_struct *task)
0145 {
0146     return to_kthread(task)->data;
0147 }
0148 
0149 /**
0150  * kthread_probe_data - speculative version of kthread_data()
0151  * @task: possible kthread task in question
0152  *
0153  * @task could be a kthread task.  Return the data value specified when it
0154  * was created if accessible.  If @task isn't a kthread task or its data is
0155  * inaccessible for any reason, %NULL is returned.  This function requires
0156  * that @task itself is safe to dereference.
0157  */
0158 void *kthread_probe_data(struct task_struct *task)
0159 {
0160     struct kthread *kthread = to_kthread(task);
0161     void *data = NULL;
0162 
0163     probe_kernel_read(&data, &kthread->data, sizeof(data));
0164     return data;
0165 }
0166 
0167 static void __kthread_parkme(struct kthread *self)
0168 {
0169     __set_current_state(TASK_PARKED);
0170     while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
0171         if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
0172             complete(&self->parked);
0173         schedule();
0174         __set_current_state(TASK_PARKED);
0175     }
0176     clear_bit(KTHREAD_IS_PARKED, &self->flags);
0177     __set_current_state(TASK_RUNNING);
0178 }
0179 
0180 void kthread_parkme(void)
0181 {
0182     __kthread_parkme(to_kthread(current));
0183 }
0184 EXPORT_SYMBOL_GPL(kthread_parkme);
0185 
0186 static int kthread(void *_create)
0187 {
0188     /* Copy data: it's on kthread's stack */
0189     struct kthread_create_info *create = _create;
0190     int (*threadfn)(void *data) = create->threadfn;
0191     void *data = create->data;
0192     struct completion *done;
0193     struct kthread *self;
0194     int ret;
0195 
0196     self = kmalloc(sizeof(*self), GFP_KERNEL);
0197     set_kthread_struct(self);
0198 
0199     /* If user was SIGKILLed, I release the structure. */
0200     done = xchg(&create->done, NULL);
0201     if (!done) {
0202         kfree(create);
0203         do_exit(-EINTR);
0204     }
0205 
0206     if (!self) {
0207         create->result = ERR_PTR(-ENOMEM);
0208         complete(done);
0209         do_exit(-ENOMEM);
0210     }
0211 
0212     self->flags = 0;
0213     self->data = data;
0214     init_completion(&self->exited);
0215     init_completion(&self->parked);
0216     current->vfork_done = &self->exited;
0217 
0218     /* OK, tell user we're spawned, wait for stop or wakeup */
0219     __set_current_state(TASK_UNINTERRUPTIBLE);
0220     create->result = current;
0221     complete(done);
0222     schedule();
0223 
0224     ret = -EINTR;
0225     if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
0226         __kthread_parkme(self);
0227         ret = threadfn(data);
0228     }
0229     do_exit(ret);
0230 }
0231 
0232 /* called from do_fork() to get node information for about to be created task */
0233 int tsk_fork_get_node(struct task_struct *tsk)
0234 {
0235 #ifdef CONFIG_NUMA
0236     if (tsk == kthreadd_task)
0237         return tsk->pref_node_fork;
0238 #endif
0239     return NUMA_NO_NODE;
0240 }
0241 
0242 static void create_kthread(struct kthread_create_info *create)
0243 {
0244     int pid;
0245 
0246 #ifdef CONFIG_NUMA
0247     current->pref_node_fork = create->node;
0248 #endif
0249     /* We want our own signal handler (we take no signals by default). */
0250     pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
0251     if (pid < 0) {
0252         /* If user was SIGKILLed, I release the structure. */
0253         struct completion *done = xchg(&create->done, NULL);
0254 
0255         if (!done) {
0256             kfree(create);
0257             return;
0258         }
0259         create->result = ERR_PTR(pid);
0260         complete(done);
0261     }
0262 }
0263 
0264 static __printf(4, 0)
0265 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
0266                             void *data, int node,
0267                             const char namefmt[],
0268                             va_list args)
0269 {
0270     DECLARE_COMPLETION_ONSTACK(done);
0271     struct task_struct *task;
0272     struct kthread_create_info *create = kmalloc(sizeof(*create),
0273                              GFP_KERNEL);
0274 
0275     if (!create)
0276         return ERR_PTR(-ENOMEM);
0277     create->threadfn = threadfn;
0278     create->data = data;
0279     create->node = node;
0280     create->done = &done;
0281 
0282     spin_lock(&kthread_create_lock);
0283     list_add_tail(&create->list, &kthread_create_list);
0284     spin_unlock(&kthread_create_lock);
0285 
0286     wake_up_process(kthreadd_task);
0287     /*
0288      * Wait for completion in killable state, for I might be chosen by
0289      * the OOM killer while kthreadd is trying to allocate memory for
0290      * new kernel thread.
0291      */
0292     if (unlikely(wait_for_completion_killable(&done))) {
0293         /*
0294          * If I was SIGKILLed before kthreadd (or new kernel thread)
0295          * calls complete(), leave the cleanup of this structure to
0296          * that thread.
0297          */
0298         if (xchg(&create->done, NULL))
0299             return ERR_PTR(-EINTR);
0300         /*
0301          * kthreadd (or new kernel thread) will call complete()
0302          * shortly.
0303          */
0304         wait_for_completion(&done);
0305     }
0306     task = create->result;
0307     if (!IS_ERR(task)) {
0308         static const struct sched_param param = { .sched_priority = 0 };
0309 
0310         vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
0311         /*
0312          * root may have changed our (kthreadd's) priority or CPU mask.
0313          * The kernel thread should not inherit these properties.
0314          */
0315         sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
0316         set_cpus_allowed_ptr(task, cpu_all_mask);
0317     }
0318     kfree(create);
0319     return task;
0320 }
0321 
0322 /**
0323  * kthread_create_on_node - create a kthread.
0324  * @threadfn: the function to run until signal_pending(current).
0325  * @data: data ptr for @threadfn.
0326  * @node: task and thread structures for the thread are allocated on this node
0327  * @namefmt: printf-style name for the thread.
0328  *
0329  * Description: This helper function creates and names a kernel
0330  * thread.  The thread will be stopped: use wake_up_process() to start
0331  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
0332  * is affine to all CPUs.
0333  *
0334  * If thread is going to be bound on a particular cpu, give its node
0335  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
0336  * When woken, the thread will run @threadfn() with @data as its
0337  * argument. @threadfn() can either call do_exit() directly if it is a
0338  * standalone thread for which no one will call kthread_stop(), or
0339  * return when 'kthread_should_stop()' is true (which means
0340  * kthread_stop() has been called).  The return value should be zero
0341  * or a negative error number; it will be passed to kthread_stop().
0342  *
0343  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
0344  */
0345 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
0346                        void *data, int node,
0347                        const char namefmt[],
0348                        ...)
0349 {
0350     struct task_struct *task;
0351     va_list args;
0352 
0353     va_start(args, namefmt);
0354     task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
0355     va_end(args);
0356 
0357     return task;
0358 }
0359 EXPORT_SYMBOL(kthread_create_on_node);
0360 
0361 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
0362 {
0363     unsigned long flags;
0364 
0365     if (!wait_task_inactive(p, state)) {
0366         WARN_ON(1);
0367         return;
0368     }
0369 
0370     /* It's safe because the task is inactive. */
0371     raw_spin_lock_irqsave(&p->pi_lock, flags);
0372     do_set_cpus_allowed(p, mask);
0373     p->flags |= PF_NO_SETAFFINITY;
0374     raw_spin_unlock_irqrestore(&p->pi_lock, flags);
0375 }
0376 
0377 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
0378 {
0379     __kthread_bind_mask(p, cpumask_of(cpu), state);
0380 }
0381 
0382 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
0383 {
0384     __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
0385 }
0386 
0387 /**
0388  * kthread_bind - bind a just-created kthread to a cpu.
0389  * @p: thread created by kthread_create().
0390  * @cpu: cpu (might not be online, must be possible) for @k to run on.
0391  *
0392  * Description: This function is equivalent to set_cpus_allowed(),
0393  * except that @cpu doesn't need to be online, and the thread must be
0394  * stopped (i.e., just returned from kthread_create()).
0395  */
0396 void kthread_bind(struct task_struct *p, unsigned int cpu)
0397 {
0398     __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
0399 }
0400 EXPORT_SYMBOL(kthread_bind);
0401 
0402 /**
0403  * kthread_create_on_cpu - Create a cpu bound kthread
0404  * @threadfn: the function to run until signal_pending(current).
0405  * @data: data ptr for @threadfn.
0406  * @cpu: The cpu on which the thread should be bound,
0407  * @namefmt: printf-style name for the thread. Format is restricted
0408  *       to "name.*%u". Code fills in cpu number.
0409  *
0410  * Description: This helper function creates and names a kernel thread
0411  * The thread will be woken and put into park mode.
0412  */
0413 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
0414                       void *data, unsigned int cpu,
0415                       const char *namefmt)
0416 {
0417     struct task_struct *p;
0418 
0419     p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
0420                    cpu);
0421     if (IS_ERR(p))
0422         return p;
0423     kthread_bind(p, cpu);
0424     /* CPU hotplug need to bind once again when unparking the thread. */
0425     set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
0426     to_kthread(p)->cpu = cpu;
0427     return p;
0428 }
0429 
0430 /**
0431  * kthread_unpark - unpark a thread created by kthread_create().
0432  * @k:      thread created by kthread_create().
0433  *
0434  * Sets kthread_should_park() for @k to return false, wakes it, and
0435  * waits for it to return. If the thread is marked percpu then its
0436  * bound to the cpu again.
0437  */
0438 void kthread_unpark(struct task_struct *k)
0439 {
0440     struct kthread *kthread = to_kthread(k);
0441 
0442     clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
0443     /*
0444      * We clear the IS_PARKED bit here as we don't wait
0445      * until the task has left the park code. So if we'd
0446      * park before that happens we'd see the IS_PARKED bit
0447      * which might be about to be cleared.
0448      */
0449     if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
0450         /*
0451          * Newly created kthread was parked when the CPU was offline.
0452          * The binding was lost and we need to set it again.
0453          */
0454         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
0455             __kthread_bind(k, kthread->cpu, TASK_PARKED);
0456         wake_up_state(k, TASK_PARKED);
0457     }
0458 }
0459 EXPORT_SYMBOL_GPL(kthread_unpark);
0460 
0461 /**
0462  * kthread_park - park a thread created by kthread_create().
0463  * @k: thread created by kthread_create().
0464  *
0465  * Sets kthread_should_park() for @k to return true, wakes it, and
0466  * waits for it to return. This can also be called after kthread_create()
0467  * instead of calling wake_up_process(): the thread will park without
0468  * calling threadfn().
0469  *
0470  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
0471  * If called by the kthread itself just the park bit is set.
0472  */
0473 int kthread_park(struct task_struct *k)
0474 {
0475     struct kthread *kthread = to_kthread(k);
0476 
0477     if (WARN_ON(k->flags & PF_EXITING))
0478         return -ENOSYS;
0479 
0480     if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
0481         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
0482         if (k != current) {
0483             wake_up_process(k);
0484             wait_for_completion(&kthread->parked);
0485         }
0486     }
0487 
0488     return 0;
0489 }
0490 EXPORT_SYMBOL_GPL(kthread_park);
0491 
0492 /**
0493  * kthread_stop - stop a thread created by kthread_create().
0494  * @k: thread created by kthread_create().
0495  *
0496  * Sets kthread_should_stop() for @k to return true, wakes it, and
0497  * waits for it to exit. This can also be called after kthread_create()
0498  * instead of calling wake_up_process(): the thread will exit without
0499  * calling threadfn().
0500  *
0501  * If threadfn() may call do_exit() itself, the caller must ensure
0502  * task_struct can't go away.
0503  *
0504  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
0505  * was never called.
0506  */
0507 int kthread_stop(struct task_struct *k)
0508 {
0509     struct kthread *kthread;
0510     int ret;
0511 
0512     trace_sched_kthread_stop(k);
0513 
0514     get_task_struct(k);
0515     kthread = to_kthread(k);
0516     set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
0517     kthread_unpark(k);
0518     wake_up_process(k);
0519     wait_for_completion(&kthread->exited);
0520     ret = k->exit_code;
0521     put_task_struct(k);
0522 
0523     trace_sched_kthread_stop_ret(ret);
0524     return ret;
0525 }
0526 EXPORT_SYMBOL(kthread_stop);
0527 
0528 int kthreadd(void *unused)
0529 {
0530     struct task_struct *tsk = current;
0531 
0532     /* Setup a clean context for our children to inherit. */
0533     set_task_comm(tsk, "kthreadd");
0534     ignore_signals(tsk);
0535     set_cpus_allowed_ptr(tsk, cpu_all_mask);
0536     set_mems_allowed(node_states[N_MEMORY]);
0537 
0538     current->flags |= PF_NOFREEZE;
0539 
0540     for (;;) {
0541         set_current_state(TASK_INTERRUPTIBLE);
0542         if (list_empty(&kthread_create_list))
0543             schedule();
0544         __set_current_state(TASK_RUNNING);
0545 
0546         spin_lock(&kthread_create_lock);
0547         while (!list_empty(&kthread_create_list)) {
0548             struct kthread_create_info *create;
0549 
0550             create = list_entry(kthread_create_list.next,
0551                         struct kthread_create_info, list);
0552             list_del_init(&create->list);
0553             spin_unlock(&kthread_create_lock);
0554 
0555             create_kthread(create);
0556 
0557             spin_lock(&kthread_create_lock);
0558         }
0559         spin_unlock(&kthread_create_lock);
0560     }
0561 
0562     return 0;
0563 }
0564 
0565 void __kthread_init_worker(struct kthread_worker *worker,
0566                 const char *name,
0567                 struct lock_class_key *key)
0568 {
0569     memset(worker, 0, sizeof(struct kthread_worker));
0570     spin_lock_init(&worker->lock);
0571     lockdep_set_class_and_name(&worker->lock, key, name);
0572     INIT_LIST_HEAD(&worker->work_list);
0573     INIT_LIST_HEAD(&worker->delayed_work_list);
0574 }
0575 EXPORT_SYMBOL_GPL(__kthread_init_worker);
0576 
0577 /**
0578  * kthread_worker_fn - kthread function to process kthread_worker
0579  * @worker_ptr: pointer to initialized kthread_worker
0580  *
0581  * This function implements the main cycle of kthread worker. It processes
0582  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
0583  * is empty.
0584  *
0585  * The works are not allowed to keep any locks, disable preemption or interrupts
0586  * when they finish. There is defined a safe point for freezing when one work
0587  * finishes and before a new one is started.
0588  *
0589  * Also the works must not be handled by more than one worker at the same time,
0590  * see also kthread_queue_work().
0591  */
0592 int kthread_worker_fn(void *worker_ptr)
0593 {
0594     struct kthread_worker *worker = worker_ptr;
0595     struct kthread_work *work;
0596 
0597     /*
0598      * FIXME: Update the check and remove the assignment when all kthread
0599      * worker users are created using kthread_create_worker*() functions.
0600      */
0601     WARN_ON(worker->task && worker->task != current);
0602     worker->task = current;
0603 
0604     if (worker->flags & KTW_FREEZABLE)
0605         set_freezable();
0606 
0607 repeat:
0608     set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
0609 
0610     if (kthread_should_stop()) {
0611         __set_current_state(TASK_RUNNING);
0612         spin_lock_irq(&worker->lock);
0613         worker->task = NULL;
0614         spin_unlock_irq(&worker->lock);
0615         return 0;
0616     }
0617 
0618     work = NULL;
0619     spin_lock_irq(&worker->lock);
0620     if (!list_empty(&worker->work_list)) {
0621         work = list_first_entry(&worker->work_list,
0622                     struct kthread_work, node);
0623         list_del_init(&work->node);
0624     }
0625     worker->current_work = work;
0626     spin_unlock_irq(&worker->lock);
0627 
0628     if (work) {
0629         __set_current_state(TASK_RUNNING);
0630         work->func(work);
0631     } else if (!freezing(current))
0632         schedule();
0633 
0634     try_to_freeze();
0635     goto repeat;
0636 }
0637 EXPORT_SYMBOL_GPL(kthread_worker_fn);
0638 
0639 static __printf(3, 0) struct kthread_worker *
0640 __kthread_create_worker(int cpu, unsigned int flags,
0641             const char namefmt[], va_list args)
0642 {
0643     struct kthread_worker *worker;
0644     struct task_struct *task;
0645     int node = -1;
0646 
0647     worker = kzalloc(sizeof(*worker), GFP_KERNEL);
0648     if (!worker)
0649         return ERR_PTR(-ENOMEM);
0650 
0651     kthread_init_worker(worker);
0652 
0653     if (cpu >= 0)
0654         node = cpu_to_node(cpu);
0655 
0656     task = __kthread_create_on_node(kthread_worker_fn, worker,
0657                         node, namefmt, args);
0658     if (IS_ERR(task))
0659         goto fail_task;
0660 
0661     if (cpu >= 0)
0662         kthread_bind(task, cpu);
0663 
0664     worker->flags = flags;
0665     worker->task = task;
0666     wake_up_process(task);
0667     return worker;
0668 
0669 fail_task:
0670     kfree(worker);
0671     return ERR_CAST(task);
0672 }
0673 
0674 /**
0675  * kthread_create_worker - create a kthread worker
0676  * @flags: flags modifying the default behavior of the worker
0677  * @namefmt: printf-style name for the kthread worker (task).
0678  *
0679  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
0680  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
0681  * when the worker was SIGKILLed.
0682  */
0683 struct kthread_worker *
0684 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
0685 {
0686     struct kthread_worker *worker;
0687     va_list args;
0688 
0689     va_start(args, namefmt);
0690     worker = __kthread_create_worker(-1, flags, namefmt, args);
0691     va_end(args);
0692 
0693     return worker;
0694 }
0695 EXPORT_SYMBOL(kthread_create_worker);
0696 
0697 /**
0698  * kthread_create_worker_on_cpu - create a kthread worker and bind it
0699  *  it to a given CPU and the associated NUMA node.
0700  * @cpu: CPU number
0701  * @flags: flags modifying the default behavior of the worker
0702  * @namefmt: printf-style name for the kthread worker (task).
0703  *
0704  * Use a valid CPU number if you want to bind the kthread worker
0705  * to the given CPU and the associated NUMA node.
0706  *
0707  * A good practice is to add the cpu number also into the worker name.
0708  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
0709  *
0710  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
0711  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
0712  * when the worker was SIGKILLed.
0713  */
0714 struct kthread_worker *
0715 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
0716                  const char namefmt[], ...)
0717 {
0718     struct kthread_worker *worker;
0719     va_list args;
0720 
0721     va_start(args, namefmt);
0722     worker = __kthread_create_worker(cpu, flags, namefmt, args);
0723     va_end(args);
0724 
0725     return worker;
0726 }
0727 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
0728 
0729 /*
0730  * Returns true when the work could not be queued at the moment.
0731  * It happens when it is already pending in a worker list
0732  * or when it is being cancelled.
0733  */
0734 static inline bool queuing_blocked(struct kthread_worker *worker,
0735                    struct kthread_work *work)
0736 {
0737     lockdep_assert_held(&worker->lock);
0738 
0739     return !list_empty(&work->node) || work->canceling;
0740 }
0741 
0742 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
0743                          struct kthread_work *work)
0744 {
0745     lockdep_assert_held(&worker->lock);
0746     WARN_ON_ONCE(!list_empty(&work->node));
0747     /* Do not use a work with >1 worker, see kthread_queue_work() */
0748     WARN_ON_ONCE(work->worker && work->worker != worker);
0749 }
0750 
0751 /* insert @work before @pos in @worker */
0752 static void kthread_insert_work(struct kthread_worker *worker,
0753                 struct kthread_work *work,
0754                 struct list_head *pos)
0755 {
0756     kthread_insert_work_sanity_check(worker, work);
0757 
0758     list_add_tail(&work->node, pos);
0759     work->worker = worker;
0760     if (!worker->current_work && likely(worker->task))
0761         wake_up_process(worker->task);
0762 }
0763 
0764 /**
0765  * kthread_queue_work - queue a kthread_work
0766  * @worker: target kthread_worker
0767  * @work: kthread_work to queue
0768  *
0769  * Queue @work to work processor @task for async execution.  @task
0770  * must have been created with kthread_worker_create().  Returns %true
0771  * if @work was successfully queued, %false if it was already pending.
0772  *
0773  * Reinitialize the work if it needs to be used by another worker.
0774  * For example, when the worker was stopped and started again.
0775  */
0776 bool kthread_queue_work(struct kthread_worker *worker,
0777             struct kthread_work *work)
0778 {
0779     bool ret = false;
0780     unsigned long flags;
0781 
0782     spin_lock_irqsave(&worker->lock, flags);
0783     if (!queuing_blocked(worker, work)) {
0784         kthread_insert_work(worker, work, &worker->work_list);
0785         ret = true;
0786     }
0787     spin_unlock_irqrestore(&worker->lock, flags);
0788     return ret;
0789 }
0790 EXPORT_SYMBOL_GPL(kthread_queue_work);
0791 
0792 /**
0793  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
0794  *  delayed work when the timer expires.
0795  * @__data: pointer to the data associated with the timer
0796  *
0797  * The format of the function is defined by struct timer_list.
0798  * It should have been called from irqsafe timer with irq already off.
0799  */
0800 void kthread_delayed_work_timer_fn(unsigned long __data)
0801 {
0802     struct kthread_delayed_work *dwork =
0803         (struct kthread_delayed_work *)__data;
0804     struct kthread_work *work = &dwork->work;
0805     struct kthread_worker *worker = work->worker;
0806 
0807     /*
0808      * This might happen when a pending work is reinitialized.
0809      * It means that it is used a wrong way.
0810      */
0811     if (WARN_ON_ONCE(!worker))
0812         return;
0813 
0814     spin_lock(&worker->lock);
0815     /* Work must not be used with >1 worker, see kthread_queue_work(). */
0816     WARN_ON_ONCE(work->worker != worker);
0817 
0818     /* Move the work from worker->delayed_work_list. */
0819     WARN_ON_ONCE(list_empty(&work->node));
0820     list_del_init(&work->node);
0821     kthread_insert_work(worker, work, &worker->work_list);
0822 
0823     spin_unlock(&worker->lock);
0824 }
0825 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
0826 
0827 void __kthread_queue_delayed_work(struct kthread_worker *worker,
0828                   struct kthread_delayed_work *dwork,
0829                   unsigned long delay)
0830 {
0831     struct timer_list *timer = &dwork->timer;
0832     struct kthread_work *work = &dwork->work;
0833 
0834     WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
0835              timer->data != (unsigned long)dwork);
0836 
0837     /*
0838      * If @delay is 0, queue @dwork->work immediately.  This is for
0839      * both optimization and correctness.  The earliest @timer can
0840      * expire is on the closest next tick and delayed_work users depend
0841      * on that there's no such delay when @delay is 0.
0842      */
0843     if (!delay) {
0844         kthread_insert_work(worker, work, &worker->work_list);
0845         return;
0846     }
0847 
0848     /* Be paranoid and try to detect possible races already now. */
0849     kthread_insert_work_sanity_check(worker, work);
0850 
0851     list_add(&work->node, &worker->delayed_work_list);
0852     work->worker = worker;
0853     timer_stats_timer_set_start_info(&dwork->timer);
0854     timer->expires = jiffies + delay;
0855     add_timer(timer);
0856 }
0857 
0858 /**
0859  * kthread_queue_delayed_work - queue the associated kthread work
0860  *  after a delay.
0861  * @worker: target kthread_worker
0862  * @dwork: kthread_delayed_work to queue
0863  * @delay: number of jiffies to wait before queuing
0864  *
0865  * If the work has not been pending it starts a timer that will queue
0866  * the work after the given @delay. If @delay is zero, it queues the
0867  * work immediately.
0868  *
0869  * Return: %false if the @work has already been pending. It means that
0870  * either the timer was running or the work was queued. It returns %true
0871  * otherwise.
0872  */
0873 bool kthread_queue_delayed_work(struct kthread_worker *worker,
0874                 struct kthread_delayed_work *dwork,
0875                 unsigned long delay)
0876 {
0877     struct kthread_work *work = &dwork->work;
0878     unsigned long flags;
0879     bool ret = false;
0880 
0881     spin_lock_irqsave(&worker->lock, flags);
0882 
0883     if (!queuing_blocked(worker, work)) {
0884         __kthread_queue_delayed_work(worker, dwork, delay);
0885         ret = true;
0886     }
0887 
0888     spin_unlock_irqrestore(&worker->lock, flags);
0889     return ret;
0890 }
0891 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
0892 
0893 struct kthread_flush_work {
0894     struct kthread_work work;
0895     struct completion   done;
0896 };
0897 
0898 static void kthread_flush_work_fn(struct kthread_work *work)
0899 {
0900     struct kthread_flush_work *fwork =
0901         container_of(work, struct kthread_flush_work, work);
0902     complete(&fwork->done);
0903 }
0904 
0905 /**
0906  * kthread_flush_work - flush a kthread_work
0907  * @work: work to flush
0908  *
0909  * If @work is queued or executing, wait for it to finish execution.
0910  */
0911 void kthread_flush_work(struct kthread_work *work)
0912 {
0913     struct kthread_flush_work fwork = {
0914         KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
0915         COMPLETION_INITIALIZER_ONSTACK(fwork.done),
0916     };
0917     struct kthread_worker *worker;
0918     bool noop = false;
0919 
0920     worker = work->worker;
0921     if (!worker)
0922         return;
0923 
0924     spin_lock_irq(&worker->lock);
0925     /* Work must not be used with >1 worker, see kthread_queue_work(). */
0926     WARN_ON_ONCE(work->worker != worker);
0927 
0928     if (!list_empty(&work->node))
0929         kthread_insert_work(worker, &fwork.work, work->node.next);
0930     else if (worker->current_work == work)
0931         kthread_insert_work(worker, &fwork.work,
0932                     worker->work_list.next);
0933     else
0934         noop = true;
0935 
0936     spin_unlock_irq(&worker->lock);
0937 
0938     if (!noop)
0939         wait_for_completion(&fwork.done);
0940 }
0941 EXPORT_SYMBOL_GPL(kthread_flush_work);
0942 
0943 /*
0944  * This function removes the work from the worker queue. Also it makes sure
0945  * that it won't get queued later via the delayed work's timer.
0946  *
0947  * The work might still be in use when this function finishes. See the
0948  * current_work proceed by the worker.
0949  *
0950  * Return: %true if @work was pending and successfully canceled,
0951  *  %false if @work was not pending
0952  */
0953 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
0954                   unsigned long *flags)
0955 {
0956     /* Try to cancel the timer if exists. */
0957     if (is_dwork) {
0958         struct kthread_delayed_work *dwork =
0959             container_of(work, struct kthread_delayed_work, work);
0960         struct kthread_worker *worker = work->worker;
0961 
0962         /*
0963          * del_timer_sync() must be called to make sure that the timer
0964          * callback is not running. The lock must be temporary released
0965          * to avoid a deadlock with the callback. In the meantime,
0966          * any queuing is blocked by setting the canceling counter.
0967          */
0968         work->canceling++;
0969         spin_unlock_irqrestore(&worker->lock, *flags);
0970         del_timer_sync(&dwork->timer);
0971         spin_lock_irqsave(&worker->lock, *flags);
0972         work->canceling--;
0973     }
0974 
0975     /*
0976      * Try to remove the work from a worker list. It might either
0977      * be from worker->work_list or from worker->delayed_work_list.
0978      */
0979     if (!list_empty(&work->node)) {
0980         list_del_init(&work->node);
0981         return true;
0982     }
0983 
0984     return false;
0985 }
0986 
0987 /**
0988  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
0989  * @worker: kthread worker to use
0990  * @dwork: kthread delayed work to queue
0991  * @delay: number of jiffies to wait before queuing
0992  *
0993  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
0994  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
0995  * @work is guaranteed to be queued immediately.
0996  *
0997  * Return: %true if @dwork was pending and its timer was modified,
0998  * %false otherwise.
0999  *
1000  * A special case is when the work is being canceled in parallel.
1001  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1002  * or yet another kthread_mod_delayed_work() call. We let the other command
1003  * win and return %false here. The caller is supposed to synchronize these
1004  * operations a reasonable way.
1005  *
1006  * This function is safe to call from any context including IRQ handler.
1007  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1008  * for details.
1009  */
1010 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1011                   struct kthread_delayed_work *dwork,
1012                   unsigned long delay)
1013 {
1014     struct kthread_work *work = &dwork->work;
1015     unsigned long flags;
1016     int ret = false;
1017 
1018     spin_lock_irqsave(&worker->lock, flags);
1019 
1020     /* Do not bother with canceling when never queued. */
1021     if (!work->worker)
1022         goto fast_queue;
1023 
1024     /* Work must not be used with >1 worker, see kthread_queue_work() */
1025     WARN_ON_ONCE(work->worker != worker);
1026 
1027     /* Do not fight with another command that is canceling this work. */
1028     if (work->canceling)
1029         goto out;
1030 
1031     ret = __kthread_cancel_work(work, true, &flags);
1032 fast_queue:
1033     __kthread_queue_delayed_work(worker, dwork, delay);
1034 out:
1035     spin_unlock_irqrestore(&worker->lock, flags);
1036     return ret;
1037 }
1038 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1039 
1040 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1041 {
1042     struct kthread_worker *worker = work->worker;
1043     unsigned long flags;
1044     int ret = false;
1045 
1046     if (!worker)
1047         goto out;
1048 
1049     spin_lock_irqsave(&worker->lock, flags);
1050     /* Work must not be used with >1 worker, see kthread_queue_work(). */
1051     WARN_ON_ONCE(work->worker != worker);
1052 
1053     ret = __kthread_cancel_work(work, is_dwork, &flags);
1054 
1055     if (worker->current_work != work)
1056         goto out_fast;
1057 
1058     /*
1059      * The work is in progress and we need to wait with the lock released.
1060      * In the meantime, block any queuing by setting the canceling counter.
1061      */
1062     work->canceling++;
1063     spin_unlock_irqrestore(&worker->lock, flags);
1064     kthread_flush_work(work);
1065     spin_lock_irqsave(&worker->lock, flags);
1066     work->canceling--;
1067 
1068 out_fast:
1069     spin_unlock_irqrestore(&worker->lock, flags);
1070 out:
1071     return ret;
1072 }
1073 
1074 /**
1075  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1076  * @work: the kthread work to cancel
1077  *
1078  * Cancel @work and wait for its execution to finish.  This function
1079  * can be used even if the work re-queues itself. On return from this
1080  * function, @work is guaranteed to be not pending or executing on any CPU.
1081  *
1082  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1083  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1084  *
1085  * The caller must ensure that the worker on which @work was last
1086  * queued can't be destroyed before this function returns.
1087  *
1088  * Return: %true if @work was pending, %false otherwise.
1089  */
1090 bool kthread_cancel_work_sync(struct kthread_work *work)
1091 {
1092     return __kthread_cancel_work_sync(work, false);
1093 }
1094 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1095 
1096 /**
1097  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1098  *  wait for it to finish.
1099  * @dwork: the kthread delayed work to cancel
1100  *
1101  * This is kthread_cancel_work_sync() for delayed works.
1102  *
1103  * Return: %true if @dwork was pending, %false otherwise.
1104  */
1105 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1106 {
1107     return __kthread_cancel_work_sync(&dwork->work, true);
1108 }
1109 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1110 
1111 /**
1112  * kthread_flush_worker - flush all current works on a kthread_worker
1113  * @worker: worker to flush
1114  *
1115  * Wait until all currently executing or pending works on @worker are
1116  * finished.
1117  */
1118 void kthread_flush_worker(struct kthread_worker *worker)
1119 {
1120     struct kthread_flush_work fwork = {
1121         KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1122         COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1123     };
1124 
1125     kthread_queue_work(worker, &fwork.work);
1126     wait_for_completion(&fwork.done);
1127 }
1128 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1129 
1130 /**
1131  * kthread_destroy_worker - destroy a kthread worker
1132  * @worker: worker to be destroyed
1133  *
1134  * Flush and destroy @worker.  The simple flush is enough because the kthread
1135  * worker API is used only in trivial scenarios.  There are no multi-step state
1136  * machines needed.
1137  */
1138 void kthread_destroy_worker(struct kthread_worker *worker)
1139 {
1140     struct task_struct *task;
1141 
1142     task = worker->task;
1143     if (WARN_ON(!task))
1144         return;
1145 
1146     kthread_flush_worker(worker);
1147     kthread_stop(task);
1148     WARN_ON(!list_empty(&worker->work_list));
1149     kfree(worker);
1150 }
1151 EXPORT_SYMBOL(kthread_destroy_worker);