0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 #include <linux/async.h>
0048 #include <linux/atomic.h>
0049 #include <linux/ktime.h>
0050 #include <linux/export.h>
0051 #include <linux/wait.h>
0052 #include <linux/sched.h>
0053 #include <linux/slab.h>
0054 #include <linux/workqueue.h>
0055
0056 #include "workqueue_internal.h"
0057
0058 static async_cookie_t next_cookie = 1;
0059
0060 #define MAX_WORK 32768
0061 #define ASYNC_COOKIE_MAX ULLONG_MAX
0062
0063 static LIST_HEAD(async_global_pending);
0064 static ASYNC_DOMAIN(async_dfl_domain);
0065 static DEFINE_SPINLOCK(async_lock);
0066
0067 struct async_entry {
0068 struct list_head domain_list;
0069 struct list_head global_list;
0070 struct work_struct work;
0071 async_cookie_t cookie;
0072 async_func_t func;
0073 void *data;
0074 struct async_domain *domain;
0075 };
0076
0077 static DECLARE_WAIT_QUEUE_HEAD(async_done);
0078
0079 static atomic_t entry_count;
0080
0081 static long long microseconds_since(ktime_t start)
0082 {
0083 ktime_t now = ktime_get();
0084 return ktime_to_ns(ktime_sub(now, start)) >> 10;
0085 }
0086
0087 static async_cookie_t lowest_in_progress(struct async_domain *domain)
0088 {
0089 struct async_entry *first = NULL;
0090 async_cookie_t ret = ASYNC_COOKIE_MAX;
0091 unsigned long flags;
0092
0093 spin_lock_irqsave(&async_lock, flags);
0094
0095 if (domain) {
0096 if (!list_empty(&domain->pending))
0097 first = list_first_entry(&domain->pending,
0098 struct async_entry, domain_list);
0099 } else {
0100 if (!list_empty(&async_global_pending))
0101 first = list_first_entry(&async_global_pending,
0102 struct async_entry, global_list);
0103 }
0104
0105 if (first)
0106 ret = first->cookie;
0107
0108 spin_unlock_irqrestore(&async_lock, flags);
0109 return ret;
0110 }
0111
0112
0113
0114
0115 static void async_run_entry_fn(struct work_struct *work)
0116 {
0117 struct async_entry *entry =
0118 container_of(work, struct async_entry, work);
0119 unsigned long flags;
0120 ktime_t calltime;
0121
0122
0123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
0124 entry->func, task_pid_nr(current));
0125 calltime = ktime_get();
0126
0127 entry->func(entry->data, entry->cookie);
0128
0129 pr_debug("initcall %lli_%pS returned after %lld usecs\n",
0130 (long long)entry->cookie, entry->func,
0131 microseconds_since(calltime));
0132
0133
0134 spin_lock_irqsave(&async_lock, flags);
0135 list_del_init(&entry->domain_list);
0136 list_del_init(&entry->global_list);
0137
0138
0139 kfree(entry);
0140 atomic_dec(&entry_count);
0141
0142 spin_unlock_irqrestore(&async_lock, flags);
0143
0144
0145 wake_up(&async_done);
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
0166 int node, struct async_domain *domain)
0167 {
0168 struct async_entry *entry;
0169 unsigned long flags;
0170 async_cookie_t newcookie;
0171
0172
0173 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
0174
0175
0176
0177
0178
0179 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
0180 kfree(entry);
0181 spin_lock_irqsave(&async_lock, flags);
0182 newcookie = next_cookie++;
0183 spin_unlock_irqrestore(&async_lock, flags);
0184
0185
0186 func(data, newcookie);
0187 return newcookie;
0188 }
0189 INIT_LIST_HEAD(&entry->domain_list);
0190 INIT_LIST_HEAD(&entry->global_list);
0191 INIT_WORK(&entry->work, async_run_entry_fn);
0192 entry->func = func;
0193 entry->data = data;
0194 entry->domain = domain;
0195
0196 spin_lock_irqsave(&async_lock, flags);
0197
0198
0199 newcookie = entry->cookie = next_cookie++;
0200
0201 list_add_tail(&entry->domain_list, &domain->pending);
0202 if (domain->registered)
0203 list_add_tail(&entry->global_list, &async_global_pending);
0204
0205 atomic_inc(&entry_count);
0206 spin_unlock_irqrestore(&async_lock, flags);
0207
0208
0209 queue_work_node(node, system_unbound_wq, &entry->work);
0210
0211 return newcookie;
0212 }
0213 EXPORT_SYMBOL_GPL(async_schedule_node_domain);
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
0229 {
0230 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
0231 }
0232 EXPORT_SYMBOL_GPL(async_schedule_node);
0233
0234
0235
0236
0237
0238
0239 void async_synchronize_full(void)
0240 {
0241 async_synchronize_full_domain(NULL);
0242 }
0243 EXPORT_SYMBOL_GPL(async_synchronize_full);
0244
0245
0246
0247
0248
0249
0250
0251
0252 void async_synchronize_full_domain(struct async_domain *domain)
0253 {
0254 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
0255 }
0256 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
0268 {
0269 ktime_t starttime;
0270
0271 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
0272 starttime = ktime_get();
0273
0274 wait_event(async_done, lowest_in_progress(domain) >= cookie);
0275
0276 pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
0277 microseconds_since(starttime));
0278 }
0279 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
0280
0281
0282
0283
0284
0285
0286
0287
0288 void async_synchronize_cookie(async_cookie_t cookie)
0289 {
0290 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
0291 }
0292 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
0293
0294
0295
0296
0297
0298
0299 bool current_is_async(void)
0300 {
0301 struct worker *worker = current_wq_worker();
0302
0303 return worker && worker->current_func == async_run_entry_fn;
0304 }
0305 EXPORT_SYMBOL_GPL(current_is_async);