![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 #include <linux/spinlock.h> 0003 #include <linux/task_work.h> 0004 #include <linux/resume_user_mode.h> 0005 0006 static struct callback_head work_exited; /* all we need is ->next == NULL */ 0007 0008 /** 0009 * task_work_add - ask the @task to execute @work->func() 0010 * @task: the task which should run the callback 0011 * @work: the callback to run 0012 * @notify: how to notify the targeted task 0013 * 0014 * Queue @work for task_work_run() below and notify the @task if @notify 0015 * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI. 0016 * 0017 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted 0018 * task and run the task_work, regardless of whether the task is currently 0019 * running in the kernel or userspace. 0020 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a 0021 * reschedule IPI to force the targeted task to reschedule and run task_work. 0022 * This can be advantageous if there's no strict requirement that the 0023 * task_work be run as soon as possible, just whenever the task enters the 0024 * kernel anyway. 0025 * @TWA_RESUME work is run only when the task exits the kernel and returns to 0026 * user mode, or before entering guest mode. 0027 * 0028 * Fails if the @task is exiting/exited and thus it can't process this @work. 0029 * Otherwise @work->func() will be called when the @task goes through one of 0030 * the aforementioned transitions, or exits. 0031 * 0032 * If the targeted task is exiting, then an error is returned and the work item 0033 * is not queued. It's up to the caller to arrange for an alternative mechanism 0034 * in that case. 0035 * 0036 * Note: there is no ordering guarantee on works queued here. The task_work 0037 * list is LIFO. 0038 * 0039 * RETURNS: 0040 * 0 if succeeds or -ESRCH. 0041 */ 0042 int task_work_add(struct task_struct *task, struct callback_head *work, 0043 enum task_work_notify_mode notify) 0044 { 0045 struct callback_head *head; 0046 0047 /* record the work call stack in order to print it in KASAN reports */ 0048 kasan_record_aux_stack(work); 0049 0050 do { 0051 head = READ_ONCE(task->task_works); 0052 if (unlikely(head == &work_exited)) 0053 return -ESRCH; 0054 work->next = head; 0055 } while (cmpxchg(&task->task_works, head, work) != head); 0056 0057 switch (notify) { 0058 case TWA_NONE: 0059 break; 0060 case TWA_RESUME: 0061 set_notify_resume(task); 0062 break; 0063 case TWA_SIGNAL: 0064 set_notify_signal(task); 0065 break; 0066 case TWA_SIGNAL_NO_IPI: 0067 __set_notify_signal(task); 0068 break; 0069 default: 0070 WARN_ON_ONCE(1); 0071 break; 0072 } 0073 0074 return 0; 0075 } 0076 0077 /** 0078 * task_work_cancel_match - cancel a pending work added by task_work_add() 0079 * @task: the task which should execute the work 0080 * @match: match function to call 0081 * 0082 * RETURNS: 0083 * The found work or NULL if not found. 0084 */ 0085 struct callback_head * 0086 task_work_cancel_match(struct task_struct *task, 0087 bool (*match)(struct callback_head *, void *data), 0088 void *data) 0089 { 0090 struct callback_head **pprev = &task->task_works; 0091 struct callback_head *work; 0092 unsigned long flags; 0093 0094 if (likely(!task_work_pending(task))) 0095 return NULL; 0096 /* 0097 * If cmpxchg() fails we continue without updating pprev. 0098 * Either we raced with task_work_add() which added the 0099 * new entry before this work, we will find it again. Or 0100 * we raced with task_work_run(), *pprev == NULL/exited. 0101 */ 0102 raw_spin_lock_irqsave(&task->pi_lock, flags); 0103 while ((work = READ_ONCE(*pprev))) { 0104 if (!match(work, data)) 0105 pprev = &work->next; 0106 else if (cmpxchg(pprev, work, work->next) == work) 0107 break; 0108 } 0109 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 0110 0111 return work; 0112 } 0113 0114 static bool task_work_func_match(struct callback_head *cb, void *data) 0115 { 0116 return cb->func == data; 0117 } 0118 0119 /** 0120 * task_work_cancel - cancel a pending work added by task_work_add() 0121 * @task: the task which should execute the work 0122 * @func: identifies the work to remove 0123 * 0124 * Find the last queued pending work with ->func == @func and remove 0125 * it from queue. 0126 * 0127 * RETURNS: 0128 * The found work or NULL if not found. 0129 */ 0130 struct callback_head * 0131 task_work_cancel(struct task_struct *task, task_work_func_t func) 0132 { 0133 return task_work_cancel_match(task, task_work_func_match, func); 0134 } 0135 0136 /** 0137 * task_work_run - execute the works added by task_work_add() 0138 * 0139 * Flush the pending works. Should be used by the core kernel code. 0140 * Called before the task returns to the user-mode or stops, or when 0141 * it exits. In the latter case task_work_add() can no longer add the 0142 * new work after task_work_run() returns. 0143 */ 0144 void task_work_run(void) 0145 { 0146 struct task_struct *task = current; 0147 struct callback_head *work, *head, *next; 0148 0149 for (;;) { 0150 /* 0151 * work->func() can do task_work_add(), do not set 0152 * work_exited unless the list is empty. 0153 */ 0154 do { 0155 head = NULL; 0156 work = READ_ONCE(task->task_works); 0157 if (!work) { 0158 if (task->flags & PF_EXITING) 0159 head = &work_exited; 0160 else 0161 break; 0162 } 0163 } while (cmpxchg(&task->task_works, work, head) != work); 0164 0165 if (!work) 0166 break; 0167 /* 0168 * Synchronize with task_work_cancel(). It can not remove 0169 * the first entry == work, cmpxchg(task_works) must fail. 0170 * But it can remove another entry from the ->next list. 0171 */ 0172 raw_spin_lock_irq(&task->pi_lock); 0173 raw_spin_unlock_irq(&task->pi_lock); 0174 0175 do { 0176 next = work->next; 0177 work->func(work); 0178 work = next; 0179 cond_resched(); 0180 } while (work); 0181 } 0182 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |