Back to home page

LXR

 
 

    


0001 /*
0002  * async.c: Asynchronous function calls for boot performance
0003  *
0004  * (C) Copyright 2009 Intel Corporation
0005  * Author: Arjan van de Ven <arjan@linux.intel.com>
0006  *
0007  * This program is free software; you can redistribute it and/or
0008  * modify it under the terms of the GNU General Public License
0009  * as published by the Free Software Foundation; version 2
0010  * of the License.
0011  */
0012 
0013 
0014 /*
0015 
0016 Goals and Theory of Operation
0017 
0018 The primary goal of this feature is to reduce the kernel boot time,
0019 by doing various independent hardware delays and discovery operations
0020 decoupled and not strictly serialized.
0021 
0022 More specifically, the asynchronous function call concept allows
0023 certain operations (primarily during system boot) to happen
0024 asynchronously, out of order, while these operations still
0025 have their externally visible parts happen sequentially and in-order.
0026 (not unlike how out-of-order CPUs retire their instructions in order)
0027 
0028 Key to the asynchronous function call implementation is the concept of
0029 a "sequence cookie" (which, although it has an abstracted type, can be
0030 thought of as a monotonically incrementing number).
0031 
0032 The async core will assign each scheduled event such a sequence cookie and
0033 pass this to the called functions.
0034 
0035 The asynchronously called function should before doing a globally visible
0036 operation, such as registering device numbers, call the
0037 async_synchronize_cookie() function and pass in its own cookie. The
0038 async_synchronize_cookie() function will make sure that all asynchronous
0039 operations that were scheduled prior to the operation corresponding with the
0040 cookie have completed.
0041 
0042 Subsystem/driver initialization code that scheduled asynchronous probe
0043 functions, but which shares global resources with other drivers/subsystems
0044 that do not use the asynchronous call feature, need to do a full
0045 synchronization with the async_synchronize_full() function, before returning
0046 from their init function. This is to maintain strict ordering between the
0047 asynchronous and synchronous parts of the kernel.
0048 
0049 */
0050 
0051 #include <linux/async.h>
0052 #include <linux/atomic.h>
0053 #include <linux/ktime.h>
0054 #include <linux/export.h>
0055 #include <linux/wait.h>
0056 #include <linux/sched.h>
0057 #include <linux/slab.h>
0058 #include <linux/workqueue.h>
0059 
0060 #include "workqueue_internal.h"
0061 
0062 static async_cookie_t next_cookie = 1;
0063 
0064 #define MAX_WORK        32768
0065 #define ASYNC_COOKIE_MAX    ULLONG_MAX  /* infinity cookie */
0066 
0067 static LIST_HEAD(async_global_pending); /* pending from all registered doms */
0068 static ASYNC_DOMAIN(async_dfl_domain);
0069 static DEFINE_SPINLOCK(async_lock);
0070 
0071 struct async_entry {
0072     struct list_head    domain_list;
0073     struct list_head    global_list;
0074     struct work_struct  work;
0075     async_cookie_t      cookie;
0076     async_func_t        func;
0077     void            *data;
0078     struct async_domain *domain;
0079 };
0080 
0081 static DECLARE_WAIT_QUEUE_HEAD(async_done);
0082 
0083 static atomic_t entry_count;
0084 
0085 static async_cookie_t lowest_in_progress(struct async_domain *domain)
0086 {
0087     struct list_head *pending;
0088     async_cookie_t ret = ASYNC_COOKIE_MAX;
0089     unsigned long flags;
0090 
0091     spin_lock_irqsave(&async_lock, flags);
0092 
0093     if (domain)
0094         pending = &domain->pending;
0095     else
0096         pending = &async_global_pending;
0097 
0098     if (!list_empty(pending))
0099         ret = list_first_entry(pending, struct async_entry,
0100                        domain_list)->cookie;
0101 
0102     spin_unlock_irqrestore(&async_lock, flags);
0103     return ret;
0104 }
0105 
0106 /*
0107  * pick the first pending entry and run it
0108  */
0109 static void async_run_entry_fn(struct work_struct *work)
0110 {
0111     struct async_entry *entry =
0112         container_of(work, struct async_entry, work);
0113     unsigned long flags;
0114     ktime_t uninitialized_var(calltime), delta, rettime;
0115 
0116     /* 1) run (and print duration) */
0117     if (initcall_debug && system_state == SYSTEM_BOOTING) {
0118         pr_debug("calling  %lli_%pF @ %i\n",
0119             (long long)entry->cookie,
0120             entry->func, task_pid_nr(current));
0121         calltime = ktime_get();
0122     }
0123     entry->func(entry->data, entry->cookie);
0124     if (initcall_debug && system_state == SYSTEM_BOOTING) {
0125         rettime = ktime_get();
0126         delta = ktime_sub(rettime, calltime);
0127         pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
0128             (long long)entry->cookie,
0129             entry->func,
0130             (long long)ktime_to_ns(delta) >> 10);
0131     }
0132 
0133     /* 2) remove self from the pending queues */
0134     spin_lock_irqsave(&async_lock, flags);
0135     list_del_init(&entry->domain_list);
0136     list_del_init(&entry->global_list);
0137 
0138     /* 3) free the entry */
0139     kfree(entry);
0140     atomic_dec(&entry_count);
0141 
0142     spin_unlock_irqrestore(&async_lock, flags);
0143 
0144     /* 4) wake up any waiters */
0145     wake_up(&async_done);
0146 }
0147 
0148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
0149 {
0150     struct async_entry *entry;
0151     unsigned long flags;
0152     async_cookie_t newcookie;
0153 
0154     /* allow irq-off callers */
0155     entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
0156 
0157     /*
0158      * If we're out of memory or if there's too much work
0159      * pending already, we execute synchronously.
0160      */
0161     if (!entry || atomic_read(&entry_count) > MAX_WORK) {
0162         kfree(entry);
0163         spin_lock_irqsave(&async_lock, flags);
0164         newcookie = next_cookie++;
0165         spin_unlock_irqrestore(&async_lock, flags);
0166 
0167         /* low on memory.. run synchronously */
0168         func(data, newcookie);
0169         return newcookie;
0170     }
0171     INIT_LIST_HEAD(&entry->domain_list);
0172     INIT_LIST_HEAD(&entry->global_list);
0173     INIT_WORK(&entry->work, async_run_entry_fn);
0174     entry->func = func;
0175     entry->data = data;
0176     entry->domain = domain;
0177 
0178     spin_lock_irqsave(&async_lock, flags);
0179 
0180     /* allocate cookie and queue */
0181     newcookie = entry->cookie = next_cookie++;
0182 
0183     list_add_tail(&entry->domain_list, &domain->pending);
0184     if (domain->registered)
0185         list_add_tail(&entry->global_list, &async_global_pending);
0186 
0187     atomic_inc(&entry_count);
0188     spin_unlock_irqrestore(&async_lock, flags);
0189 
0190     /* mark that this task has queued an async job, used by module init */
0191     current->flags |= PF_USED_ASYNC;
0192 
0193     /* schedule for execution */
0194     queue_work(system_unbound_wq, &entry->work);
0195 
0196     return newcookie;
0197 }
0198 
0199 /**
0200  * async_schedule - schedule a function for asynchronous execution
0201  * @func: function to execute asynchronously
0202  * @data: data pointer to pass to the function
0203  *
0204  * Returns an async_cookie_t that may be used for checkpointing later.
0205  * Note: This function may be called from atomic or non-atomic contexts.
0206  */
0207 async_cookie_t async_schedule(async_func_t func, void *data)
0208 {
0209     return __async_schedule(func, data, &async_dfl_domain);
0210 }
0211 EXPORT_SYMBOL_GPL(async_schedule);
0212 
0213 /**
0214  * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
0215  * @func: function to execute asynchronously
0216  * @data: data pointer to pass to the function
0217  * @domain: the domain
0218  *
0219  * Returns an async_cookie_t that may be used for checkpointing later.
0220  * @domain may be used in the async_synchronize_*_domain() functions to
0221  * wait within a certain synchronization domain rather than globally.  A
0222  * synchronization domain is specified via @domain.  Note: This function
0223  * may be called from atomic or non-atomic contexts.
0224  */
0225 async_cookie_t async_schedule_domain(async_func_t func, void *data,
0226                      struct async_domain *domain)
0227 {
0228     return __async_schedule(func, data, domain);
0229 }
0230 EXPORT_SYMBOL_GPL(async_schedule_domain);
0231 
0232 /**
0233  * async_synchronize_full - synchronize all asynchronous function calls
0234  *
0235  * This function waits until all asynchronous function calls have been done.
0236  */
0237 void async_synchronize_full(void)
0238 {
0239     async_synchronize_full_domain(NULL);
0240 }
0241 EXPORT_SYMBOL_GPL(async_synchronize_full);
0242 
0243 /**
0244  * async_unregister_domain - ensure no more anonymous waiters on this domain
0245  * @domain: idle domain to flush out of any async_synchronize_full instances
0246  *
0247  * async_synchronize_{cookie|full}_domain() are not flushed since callers
0248  * of these routines should know the lifetime of @domain
0249  *
0250  * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
0251  */
0252 void async_unregister_domain(struct async_domain *domain)
0253 {
0254     spin_lock_irq(&async_lock);
0255     WARN_ON(!domain->registered || !list_empty(&domain->pending));
0256     domain->registered = 0;
0257     spin_unlock_irq(&async_lock);
0258 }
0259 EXPORT_SYMBOL_GPL(async_unregister_domain);
0260 
0261 /**
0262  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
0263  * @domain: the domain to synchronize
0264  *
0265  * This function waits until all asynchronous function calls for the
0266  * synchronization domain specified by @domain have been done.
0267  */
0268 void async_synchronize_full_domain(struct async_domain *domain)
0269 {
0270     async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
0271 }
0272 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
0273 
0274 /**
0275  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
0276  * @cookie: async_cookie_t to use as checkpoint
0277  * @domain: the domain to synchronize (%NULL for all registered domains)
0278  *
0279  * This function waits until all asynchronous function calls for the
0280  * synchronization domain specified by @domain submitted prior to @cookie
0281  * have been done.
0282  */
0283 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
0284 {
0285     ktime_t uninitialized_var(starttime), delta, endtime;
0286 
0287     if (initcall_debug && system_state == SYSTEM_BOOTING) {
0288         pr_debug("async_waiting @ %i\n", task_pid_nr(current));
0289         starttime = ktime_get();
0290     }
0291 
0292     wait_event(async_done, lowest_in_progress(domain) >= cookie);
0293 
0294     if (initcall_debug && system_state == SYSTEM_BOOTING) {
0295         endtime = ktime_get();
0296         delta = ktime_sub(endtime, starttime);
0297 
0298         pr_debug("async_continuing @ %i after %lli usec\n",
0299             task_pid_nr(current),
0300             (long long)ktime_to_ns(delta) >> 10);
0301     }
0302 }
0303 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
0304 
0305 /**
0306  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
0307  * @cookie: async_cookie_t to use as checkpoint
0308  *
0309  * This function waits until all asynchronous function calls prior to @cookie
0310  * have been done.
0311  */
0312 void async_synchronize_cookie(async_cookie_t cookie)
0313 {
0314     async_synchronize_cookie_domain(cookie, &async_dfl_domain);
0315 }
0316 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
0317 
0318 /**
0319  * current_is_async - is %current an async worker task?
0320  *
0321  * Returns %true if %current is an async worker task.
0322  */
0323 bool current_is_async(void)
0324 {
0325     struct worker *worker = current_wq_worker();
0326 
0327     return worker && worker->current_func == async_run_entry_fn;
0328 }
0329 EXPORT_SYMBOL_GPL(current_is_async);