1 // SPDX-License-Identifier: GPL-2.0-only
3 * async.c: Asynchronous function calls for boot performance
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
12 Goals and Theory of Operation
14 The primary goal of this feature is to reduce the kernel boot time,
15 by doing various independent hardware delays and discovery operations
16 decoupled and not strictly serialized.
18 More specifically, the asynchronous function call concept allows
19 certain operations (primarily during system boot) to happen
20 asynchronously, out of order, while these operations still
21 have their externally visible parts happen sequentially and in-order.
22 (not unlike how out-of-order CPUs retire their instructions in order)
24 Key to the asynchronous function call implementation is the concept of
25 a "sequence cookie" (which, although it has an abstracted type, can be
26 thought of as a monotonically incrementing number).
28 The async core will assign each scheduled event such a sequence cookie and
29 pass this to the called functions.
31 The asynchronously called function should before doing a globally visible
32 operation, such as registering device numbers, call the
33 async_synchronize_cookie() function and pass in its own cookie. The
34 async_synchronize_cookie() function will make sure that all asynchronous
35 operations that were scheduled prior to the operation corresponding with the
36 cookie have completed.
38 Subsystem/driver initialization code that scheduled asynchronous probe
39 functions, but which shares global resources with other drivers/subsystems
40 that do not use the asynchronous call feature, need to do a full
41 synchronization with the async_synchronize_full() function, before returning
42 from their init function. This is to maintain strict ordering between the
43 asynchronous and synchronous parts of the kernel.
47 #include <linux/async.h>
48 #include <linux/atomic.h>
49 #include <linux/ktime.h>
50 #include <linux/export.h>
51 #include <linux/wait.h>
52 #include <linux/sched.h>
53 #include <linux/slab.h>
54 #include <linux/workqueue.h>
56 #include "workqueue_internal.h"
58 static async_cookie_t next_cookie
= 1;
60 #define MAX_WORK 32768
61 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
63 static LIST_HEAD(async_global_pending
); /* pending from all registered doms */
64 static ASYNC_DOMAIN(async_dfl_domain
);
65 static DEFINE_SPINLOCK(async_lock
);
68 struct list_head domain_list
;
69 struct list_head global_list
;
70 struct work_struct work
;
71 async_cookie_t cookie
;
74 struct async_domain
*domain
;
77 static DECLARE_WAIT_QUEUE_HEAD(async_done
);
79 static atomic_t entry_count
;
81 static async_cookie_t
lowest_in_progress(struct async_domain
*domain
)
83 struct async_entry
*first
= NULL
;
84 async_cookie_t ret
= ASYNC_COOKIE_MAX
;
87 spin_lock_irqsave(&async_lock
, flags
);
90 if (!list_empty(&domain
->pending
))
91 first
= list_first_entry(&domain
->pending
,
92 struct async_entry
, domain_list
);
94 if (!list_empty(&async_global_pending
))
95 first
= list_first_entry(&async_global_pending
,
96 struct async_entry
, global_list
);
102 spin_unlock_irqrestore(&async_lock
, flags
);
107 * pick the first pending entry and run it
109 static void async_run_entry_fn(struct work_struct
*work
)
111 struct async_entry
*entry
=
112 container_of(work
, struct async_entry
, work
);
114 ktime_t
uninitialized_var(calltime
), delta
, rettime
;
116 /* 1) run (and print duration) */
117 if (initcall_debug
&& system_state
< SYSTEM_RUNNING
) {
118 pr_debug("calling %lli_%pS @ %i\n",
119 (long long)entry
->cookie
,
120 entry
->func
, task_pid_nr(current
));
121 calltime
= ktime_get();
123 entry
->func(entry
->data
, entry
->cookie
);
124 if (initcall_debug
&& system_state
< SYSTEM_RUNNING
) {
125 rettime
= ktime_get();
126 delta
= ktime_sub(rettime
, calltime
);
127 pr_debug("initcall %lli_%pS returned 0 after %lld usecs\n",
128 (long long)entry
->cookie
,
130 (long long)ktime_to_ns(delta
) >> 10);
133 /* 2) remove self from the pending queues */
134 spin_lock_irqsave(&async_lock
, flags
);
135 list_del_init(&entry
->domain_list
);
136 list_del_init(&entry
->global_list
);
138 /* 3) free the entry */
140 atomic_dec(&entry_count
);
142 spin_unlock_irqrestore(&async_lock
, flags
);
144 /* 4) wake up any waiters */
145 wake_up(&async_done
);
149 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
150 * @func: function to execute asynchronously
151 * @data: data pointer to pass to the function
152 * @node: NUMA node that we want to schedule this on or close to
153 * @domain: the domain
155 * Returns an async_cookie_t that may be used for checkpointing later.
156 * @domain may be used in the async_synchronize_*_domain() functions to
157 * wait within a certain synchronization domain rather than globally.
159 * Note: This function may be called from atomic or non-atomic contexts.
161 * The node requested will be honored on a best effort basis. If the node
162 * has no CPUs associated with it then the work is distributed among all
165 async_cookie_t
async_schedule_node_domain(async_func_t func
, void *data
,
166 int node
, struct async_domain
*domain
)
168 struct async_entry
*entry
;
170 async_cookie_t newcookie
;
172 /* allow irq-off callers */
173 entry
= kzalloc(sizeof(struct async_entry
), GFP_ATOMIC
);
176 * If we're out of memory or if there's too much work
177 * pending already, we execute synchronously.
179 if (!entry
|| atomic_read(&entry_count
) > MAX_WORK
) {
181 spin_lock_irqsave(&async_lock
, flags
);
182 newcookie
= next_cookie
++;
183 spin_unlock_irqrestore(&async_lock
, flags
);
185 /* low on memory.. run synchronously */
186 func(data
, newcookie
);
189 INIT_LIST_HEAD(&entry
->domain_list
);
190 INIT_LIST_HEAD(&entry
->global_list
);
191 INIT_WORK(&entry
->work
, async_run_entry_fn
);
194 entry
->domain
= domain
;
196 spin_lock_irqsave(&async_lock
, flags
);
198 /* allocate cookie and queue */
199 newcookie
= entry
->cookie
= next_cookie
++;
201 list_add_tail(&entry
->domain_list
, &domain
->pending
);
202 if (domain
->registered
)
203 list_add_tail(&entry
->global_list
, &async_global_pending
);
205 atomic_inc(&entry_count
);
206 spin_unlock_irqrestore(&async_lock
, flags
);
208 /* mark that this task has queued an async job, used by module init */
209 current
->flags
|= PF_USED_ASYNC
;
211 /* schedule for execution */
212 queue_work_node(node
, system_unbound_wq
, &entry
->work
);
216 EXPORT_SYMBOL_GPL(async_schedule_node_domain
);
219 * async_schedule_node - NUMA specific version of async_schedule
220 * @func: function to execute asynchronously
221 * @data: data pointer to pass to the function
222 * @node: NUMA node that we want to schedule this on or close to
224 * Returns an async_cookie_t that may be used for checkpointing later.
225 * Note: This function may be called from atomic or non-atomic contexts.
227 * The node requested will be honored on a best effort basis. If the node
228 * has no CPUs associated with it then the work is distributed among all
231 async_cookie_t
async_schedule_node(async_func_t func
, void *data
, int node
)
233 return async_schedule_node_domain(func
, data
, node
, &async_dfl_domain
);
235 EXPORT_SYMBOL_GPL(async_schedule_node
);
238 * async_synchronize_full - synchronize all asynchronous function calls
240 * This function waits until all asynchronous function calls have been done.
242 void async_synchronize_full(void)
244 async_synchronize_full_domain(NULL
);
246 EXPORT_SYMBOL_GPL(async_synchronize_full
);
249 * async_unregister_domain - ensure no more anonymous waiters on this domain
250 * @domain: idle domain to flush out of any async_synchronize_full instances
252 * async_synchronize_{cookie|full}_domain() are not flushed since callers
253 * of these routines should know the lifetime of @domain
255 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
257 void async_unregister_domain(struct async_domain
*domain
)
259 spin_lock_irq(&async_lock
);
260 WARN_ON(!domain
->registered
|| !list_empty(&domain
->pending
));
261 domain
->registered
= 0;
262 spin_unlock_irq(&async_lock
);
264 EXPORT_SYMBOL_GPL(async_unregister_domain
);
267 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
268 * @domain: the domain to synchronize
270 * This function waits until all asynchronous function calls for the
271 * synchronization domain specified by @domain have been done.
273 void async_synchronize_full_domain(struct async_domain
*domain
)
275 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX
, domain
);
277 EXPORT_SYMBOL_GPL(async_synchronize_full_domain
);
280 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
281 * @cookie: async_cookie_t to use as checkpoint
282 * @domain: the domain to synchronize (%NULL for all registered domains)
284 * This function waits until all asynchronous function calls for the
285 * synchronization domain specified by @domain submitted prior to @cookie
288 void async_synchronize_cookie_domain(async_cookie_t cookie
, struct async_domain
*domain
)
290 ktime_t
uninitialized_var(starttime
), delta
, endtime
;
292 if (initcall_debug
&& system_state
< SYSTEM_RUNNING
) {
293 pr_debug("async_waiting @ %i\n", task_pid_nr(current
));
294 starttime
= ktime_get();
297 wait_event(async_done
, lowest_in_progress(domain
) >= cookie
);
299 if (initcall_debug
&& system_state
< SYSTEM_RUNNING
) {
300 endtime
= ktime_get();
301 delta
= ktime_sub(endtime
, starttime
);
303 pr_debug("async_continuing @ %i after %lli usec\n",
304 task_pid_nr(current
),
305 (long long)ktime_to_ns(delta
) >> 10);
308 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain
);
311 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
312 * @cookie: async_cookie_t to use as checkpoint
314 * This function waits until all asynchronous function calls prior to @cookie
317 void async_synchronize_cookie(async_cookie_t cookie
)
319 async_synchronize_cookie_domain(cookie
, &async_dfl_domain
);
321 EXPORT_SYMBOL_GPL(async_synchronize_cookie
);
324 * current_is_async - is %current an async worker task?
326 * Returns %true if %current is an async worker task.
328 bool current_is_async(void)
330 struct worker
*worker
= current_wq_worker();
332 return worker
&& worker
->current_func
== async_run_entry_fn
;
334 EXPORT_SYMBOL_GPL(current_is_async
);