drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / kernel / async.c
blob4c3e6a44595fa176f06108c869d164cca26546a1
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * async.c: Asynchronous function calls for boot performance
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 */
12 Goals and Theory of Operation
14 The primary goal of this feature is to reduce the kernel boot time,
15 by doing various independent hardware delays and discovery operations
16 decoupled and not strictly serialized.
18 More specifically, the asynchronous function call concept allows
19 certain operations (primarily during system boot) to happen
20 asynchronously, out of order, while these operations still
21 have their externally visible parts happen sequentially and in-order.
22 (not unlike how out-of-order CPUs retire their instructions in order)
24 Key to the asynchronous function call implementation is the concept of
25 a "sequence cookie" (which, although it has an abstracted type, can be
26 thought of as a monotonically incrementing number).
28 The async core will assign each scheduled event such a sequence cookie and
29 pass this to the called functions.
31 The asynchronously called function should before doing a globally visible
32 operation, such as registering device numbers, call the
33 async_synchronize_cookie() function and pass in its own cookie. The
34 async_synchronize_cookie() function will make sure that all asynchronous
35 operations that were scheduled prior to the operation corresponding with the
36 cookie have completed.
38 Subsystem/driver initialization code that scheduled asynchronous probe
39 functions, but which shares global resources with other drivers/subsystems
40 that do not use the asynchronous call feature, need to do a full
41 synchronization with the async_synchronize_full() function, before returning
42 from their init function. This is to maintain strict ordering between the
43 asynchronous and synchronous parts of the kernel.
47 #include <linux/async.h>
48 #include <linux/atomic.h>
49 #include <linux/export.h>
50 #include <linux/ktime.h>
51 #include <linux/pid.h>
52 #include <linux/sched.h>
53 #include <linux/slab.h>
54 #include <linux/wait.h>
55 #include <linux/workqueue.h>
57 #include "workqueue_internal.h"
59 static async_cookie_t next_cookie = 1;
61 #define MAX_WORK 32768
62 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
64 static LIST_HEAD(async_global_pending); /* pending from all registered doms */
65 static ASYNC_DOMAIN(async_dfl_domain);
66 static DEFINE_SPINLOCK(async_lock);
67 static struct workqueue_struct *async_wq;
69 struct async_entry {
70 struct list_head domain_list;
71 struct list_head global_list;
72 struct work_struct work;
73 async_cookie_t cookie;
74 async_func_t func;
75 void *data;
76 struct async_domain *domain;
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
81 static atomic_t entry_count;
83 static long long microseconds_since(ktime_t start)
85 ktime_t now = ktime_get();
86 return ktime_to_ns(ktime_sub(now, start)) >> 10;
89 static async_cookie_t lowest_in_progress(struct async_domain *domain)
91 struct async_entry *first = NULL;
92 async_cookie_t ret = ASYNC_COOKIE_MAX;
93 unsigned long flags;
95 spin_lock_irqsave(&async_lock, flags);
97 if (domain) {
98 if (!list_empty(&domain->pending))
99 first = list_first_entry(&domain->pending,
100 struct async_entry, domain_list);
101 } else {
102 if (!list_empty(&async_global_pending))
103 first = list_first_entry(&async_global_pending,
104 struct async_entry, global_list);
107 if (first)
108 ret = first->cookie;
110 spin_unlock_irqrestore(&async_lock, flags);
111 return ret;
115 * pick the first pending entry and run it
117 static void async_run_entry_fn(struct work_struct *work)
119 struct async_entry *entry =
120 container_of(work, struct async_entry, work);
121 unsigned long flags;
122 ktime_t calltime;
124 /* 1) run (and print duration) */
125 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
126 entry->func, task_pid_nr(current));
127 calltime = ktime_get();
129 entry->func(entry->data, entry->cookie);
131 pr_debug("initcall %lli_%pS returned after %lld usecs\n",
132 (long long)entry->cookie, entry->func,
133 microseconds_since(calltime));
135 /* 2) remove self from the pending queues */
136 spin_lock_irqsave(&async_lock, flags);
137 list_del_init(&entry->domain_list);
138 list_del_init(&entry->global_list);
140 /* 3) free the entry */
141 kfree(entry);
142 atomic_dec(&entry_count);
144 spin_unlock_irqrestore(&async_lock, flags);
146 /* 4) wake up any waiters */
147 wake_up(&async_done);
150 static async_cookie_t __async_schedule_node_domain(async_func_t func,
151 void *data, int node,
152 struct async_domain *domain,
153 struct async_entry *entry)
155 async_cookie_t newcookie;
156 unsigned long flags;
158 INIT_LIST_HEAD(&entry->domain_list);
159 INIT_LIST_HEAD(&entry->global_list);
160 INIT_WORK(&entry->work, async_run_entry_fn);
161 entry->func = func;
162 entry->data = data;
163 entry->domain = domain;
165 spin_lock_irqsave(&async_lock, flags);
167 /* allocate cookie and queue */
168 newcookie = entry->cookie = next_cookie++;
170 list_add_tail(&entry->domain_list, &domain->pending);
171 if (domain->registered)
172 list_add_tail(&entry->global_list, &async_global_pending);
174 atomic_inc(&entry_count);
175 spin_unlock_irqrestore(&async_lock, flags);
177 /* schedule for execution */
178 queue_work_node(node, async_wq, &entry->work);
180 return newcookie;
184 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
185 * @func: function to execute asynchronously
186 * @data: data pointer to pass to the function
187 * @node: NUMA node that we want to schedule this on or close to
188 * @domain: the domain
190 * Returns an async_cookie_t that may be used for checkpointing later.
191 * @domain may be used in the async_synchronize_*_domain() functions to
192 * wait within a certain synchronization domain rather than globally.
194 * Note: This function may be called from atomic or non-atomic contexts.
196 * The node requested will be honored on a best effort basis. If the node
197 * has no CPUs associated with it then the work is distributed among all
198 * available CPUs.
200 async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
201 int node, struct async_domain *domain)
203 struct async_entry *entry;
204 unsigned long flags;
205 async_cookie_t newcookie;
207 /* allow irq-off callers */
208 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
211 * If we're out of memory or if there's too much work
212 * pending already, we execute synchronously.
214 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
215 kfree(entry);
216 spin_lock_irqsave(&async_lock, flags);
217 newcookie = next_cookie++;
218 spin_unlock_irqrestore(&async_lock, flags);
220 /* low on memory.. run synchronously */
221 func(data, newcookie);
222 return newcookie;
225 return __async_schedule_node_domain(func, data, node, domain, entry);
227 EXPORT_SYMBOL_GPL(async_schedule_node_domain);
230 * async_schedule_node - NUMA specific version of async_schedule
231 * @func: function to execute asynchronously
232 * @data: data pointer to pass to the function
233 * @node: NUMA node that we want to schedule this on or close to
235 * Returns an async_cookie_t that may be used for checkpointing later.
236 * Note: This function may be called from atomic or non-atomic contexts.
238 * The node requested will be honored on a best effort basis. If the node
239 * has no CPUs associated with it then the work is distributed among all
240 * available CPUs.
242 async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
244 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
246 EXPORT_SYMBOL_GPL(async_schedule_node);
249 * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
250 * @func: function to execute asynchronously
251 * @dev: device argument to be passed to function
253 * @dev is used as both the argument for the function and to provide NUMA
254 * context for where to run the function.
256 * If the asynchronous execution of @func is scheduled successfully, return
257 * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
258 * that will run the function synchronously then.
260 bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
262 struct async_entry *entry;
264 entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
266 /* Give up if there is no memory or too much work. */
267 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
268 kfree(entry);
269 return false;
272 __async_schedule_node_domain(func, dev, dev_to_node(dev),
273 &async_dfl_domain, entry);
274 return true;
278 * async_synchronize_full - synchronize all asynchronous function calls
280 * This function waits until all asynchronous function calls have been done.
282 void async_synchronize_full(void)
284 async_synchronize_full_domain(NULL);
286 EXPORT_SYMBOL_GPL(async_synchronize_full);
289 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
290 * @domain: the domain to synchronize
292 * This function waits until all asynchronous function calls for the
293 * synchronization domain specified by @domain have been done.
295 void async_synchronize_full_domain(struct async_domain *domain)
297 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
299 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
302 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
303 * @cookie: async_cookie_t to use as checkpoint
304 * @domain: the domain to synchronize (%NULL for all registered domains)
306 * This function waits until all asynchronous function calls for the
307 * synchronization domain specified by @domain submitted prior to @cookie
308 * have been done.
310 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
312 ktime_t starttime;
314 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
315 starttime = ktime_get();
317 wait_event(async_done, lowest_in_progress(domain) >= cookie);
319 pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
320 microseconds_since(starttime));
322 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
325 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
326 * @cookie: async_cookie_t to use as checkpoint
328 * This function waits until all asynchronous function calls prior to @cookie
329 * have been done.
331 void async_synchronize_cookie(async_cookie_t cookie)
333 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
335 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
338 * current_is_async - is %current an async worker task?
340 * Returns %true if %current is an async worker task.
342 bool current_is_async(void)
344 struct worker *worker = current_wq_worker();
346 return worker && worker->current_func == async_run_entry_fn;
348 EXPORT_SYMBOL_GPL(current_is_async);
350 void __init async_init(void)
353 * Async can schedule a number of interdependent work items. However,
354 * unbound workqueues can handle only upto min_active interdependent
355 * work items. The default min_active of 8 isn't sufficient for async
356 * and can lead to stalls. Let's use a dedicated workqueue with raised
357 * min_active.
359 async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
360 BUG_ON(!async_wq);
361 workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);