1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic waiting primitives.
5 * (C) 2004 Nadia Yvette Chambers, Oracle
9 void __init_waitqueue_head(struct wait_queue_head
*wq_head
, const char *name
, struct lock_class_key
*key
)
11 spin_lock_init(&wq_head
->lock
);
12 lockdep_set_class_and_name(&wq_head
->lock
, key
, name
);
13 INIT_LIST_HEAD(&wq_head
->head
);
16 EXPORT_SYMBOL(__init_waitqueue_head
);
18 void add_wait_queue(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
22 wq_entry
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
23 spin_lock_irqsave(&wq_head
->lock
, flags
);
24 __add_wait_queue(wq_head
, wq_entry
);
25 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
27 EXPORT_SYMBOL(add_wait_queue
);
29 void add_wait_queue_exclusive(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
33 wq_entry
->flags
|= WQ_FLAG_EXCLUSIVE
;
34 spin_lock_irqsave(&wq_head
->lock
, flags
);
35 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
36 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
38 EXPORT_SYMBOL(add_wait_queue_exclusive
);
40 void remove_wait_queue(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
44 spin_lock_irqsave(&wq_head
->lock
, flags
);
45 __remove_wait_queue(wq_head
, wq_entry
);
46 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
48 EXPORT_SYMBOL(remove_wait_queue
);
51 * Scan threshold to break wait queue walk.
52 * This allows a waker to take a break from holding the
53 * wait queue lock during the wait queue walk.
55 #define WAITQUEUE_WALK_BREAK_CNT 64
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
66 static int __wake_up_common(struct wait_queue_head
*wq_head
, unsigned int mode
,
67 int nr_exclusive
, int wake_flags
, void *key
,
68 wait_queue_entry_t
*bookmark
)
70 wait_queue_entry_t
*curr
, *next
;
73 lockdep_assert_held(&wq_head
->lock
);
75 if (bookmark
&& (bookmark
->flags
& WQ_FLAG_BOOKMARK
)) {
76 curr
= list_next_entry(bookmark
, entry
);
78 list_del(&bookmark
->entry
);
81 curr
= list_first_entry(&wq_head
->head
, wait_queue_entry_t
, entry
);
83 if (&curr
->entry
== &wq_head
->head
)
86 list_for_each_entry_safe_from(curr
, next
, &wq_head
->head
, entry
) {
87 unsigned flags
= curr
->flags
;
90 if (flags
& WQ_FLAG_BOOKMARK
)
93 ret
= curr
->func(curr
, mode
, wake_flags
, key
);
96 if (ret
&& (flags
& WQ_FLAG_EXCLUSIVE
) && !--nr_exclusive
)
99 if (bookmark
&& (++cnt
> WAITQUEUE_WALK_BREAK_CNT
) &&
100 (&next
->entry
!= &wq_head
->head
)) {
101 bookmark
->flags
= WQ_FLAG_BOOKMARK
;
102 list_add_tail(&bookmark
->entry
, &next
->entry
);
110 static void __wake_up_common_lock(struct wait_queue_head
*wq_head
, unsigned int mode
,
111 int nr_exclusive
, int wake_flags
, void *key
)
114 wait_queue_entry_t bookmark
;
117 bookmark
.private = NULL
;
118 bookmark
.func
= NULL
;
119 INIT_LIST_HEAD(&bookmark
.entry
);
122 spin_lock_irqsave(&wq_head
->lock
, flags
);
123 nr_exclusive
= __wake_up_common(wq_head
, mode
, nr_exclusive
,
124 wake_flags
, key
, &bookmark
);
125 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
126 } while (bookmark
.flags
& WQ_FLAG_BOOKMARK
);
130 * __wake_up - wake up threads blocked on a waitqueue.
131 * @wq_head: the waitqueue
132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
139 void __wake_up(struct wait_queue_head
*wq_head
, unsigned int mode
,
140 int nr_exclusive
, void *key
)
142 __wake_up_common_lock(wq_head
, mode
, nr_exclusive
, 0, key
);
144 EXPORT_SYMBOL(__wake_up
);
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
149 void __wake_up_locked(struct wait_queue_head
*wq_head
, unsigned int mode
, int nr
)
151 __wake_up_common(wq_head
, mode
, nr
, 0, NULL
, NULL
);
153 EXPORT_SYMBOL_GPL(__wake_up_locked
);
155 void __wake_up_locked_key(struct wait_queue_head
*wq_head
, unsigned int mode
, void *key
)
157 __wake_up_common(wq_head
, mode
, 1, 0, key
, NULL
);
159 EXPORT_SYMBOL_GPL(__wake_up_locked_key
);
161 void __wake_up_locked_key_bookmark(struct wait_queue_head
*wq_head
,
162 unsigned int mode
, void *key
, wait_queue_entry_t
*bookmark
)
164 __wake_up_common(wq_head
, mode
, 1, 0, key
, bookmark
);
166 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark
);
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue
171 * @mode: which threads
172 * @key: opaque value to be passed to wakeup targets
174 * The sync wakeup differs that the waker knows that it will schedule
175 * away soon, so while the target thread will be woken up, it will not
176 * be migrated to another CPU - ie. the two threads are 'synchronized'
177 * with each other. This can prevent needless bouncing between CPUs.
179 * On UP it can prevent extra preemption.
181 * If this function wakes up a task, it executes a full memory barrier before
182 * accessing the task state.
184 void __wake_up_sync_key(struct wait_queue_head
*wq_head
, unsigned int mode
,
187 if (unlikely(!wq_head
))
190 __wake_up_common_lock(wq_head
, mode
, 1, WF_SYNC
, key
);
192 EXPORT_SYMBOL_GPL(__wake_up_sync_key
);
195 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
196 * @wq_head: the waitqueue
197 * @mode: which threads
198 * @key: opaque value to be passed to wakeup targets
200 * The sync wakeup differs in that the waker knows that it will schedule
201 * away soon, so while the target thread will be woken up, it will not
202 * be migrated to another CPU - ie. the two threads are 'synchronized'
203 * with each other. This can prevent needless bouncing between CPUs.
205 * On UP it can prevent extra preemption.
207 * If this function wakes up a task, it executes a full memory barrier before
208 * accessing the task state.
210 void __wake_up_locked_sync_key(struct wait_queue_head
*wq_head
,
211 unsigned int mode
, void *key
)
213 __wake_up_common(wq_head
, mode
, 1, WF_SYNC
, key
, NULL
);
215 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key
);
218 * __wake_up_sync - see __wake_up_sync_key()
220 void __wake_up_sync(struct wait_queue_head
*wq_head
, unsigned int mode
)
222 __wake_up_sync_key(wq_head
, mode
, NULL
);
224 EXPORT_SYMBOL_GPL(__wake_up_sync
); /* For internal use only */
227 * Note: we use "set_current_state()" _after_ the wait-queue add,
228 * because we need a memory barrier there on SMP, so that any
229 * wake-function that tests for the wait-queue being active
230 * will be guaranteed to see waitqueue addition _or_ subsequent
231 * tests in this thread will see the wakeup having taken place.
233 * The spin_unlock() itself is semi-permeable and only protects
234 * one way (it only protects stuff inside the critical region and
235 * stops them from bleeding out - it would still allow subsequent
236 * loads to move into the critical region).
239 prepare_to_wait(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
243 wq_entry
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
244 spin_lock_irqsave(&wq_head
->lock
, flags
);
245 if (list_empty(&wq_entry
->entry
))
246 __add_wait_queue(wq_head
, wq_entry
);
247 set_current_state(state
);
248 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
250 EXPORT_SYMBOL(prepare_to_wait
);
253 prepare_to_wait_exclusive(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
257 wq_entry
->flags
|= WQ_FLAG_EXCLUSIVE
;
258 spin_lock_irqsave(&wq_head
->lock
, flags
);
259 if (list_empty(&wq_entry
->entry
))
260 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
261 set_current_state(state
);
262 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
264 EXPORT_SYMBOL(prepare_to_wait_exclusive
);
266 void init_wait_entry(struct wait_queue_entry
*wq_entry
, int flags
)
268 wq_entry
->flags
= flags
;
269 wq_entry
->private = current
;
270 wq_entry
->func
= autoremove_wake_function
;
271 INIT_LIST_HEAD(&wq_entry
->entry
);
273 EXPORT_SYMBOL(init_wait_entry
);
275 long prepare_to_wait_event(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
280 spin_lock_irqsave(&wq_head
->lock
, flags
);
281 if (signal_pending_state(state
, current
)) {
283 * Exclusive waiter must not fail if it was selected by wakeup,
284 * it should "consume" the condition we were waiting for.
286 * The caller will recheck the condition and return success if
287 * we were already woken up, we can not miss the event because
288 * wakeup locks/unlocks the same wq_head->lock.
290 * But we need to ensure that set-condition + wakeup after that
291 * can't see us, it should wake up another exclusive waiter if
294 list_del_init(&wq_entry
->entry
);
297 if (list_empty(&wq_entry
->entry
)) {
298 if (wq_entry
->flags
& WQ_FLAG_EXCLUSIVE
)
299 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
301 __add_wait_queue(wq_head
, wq_entry
);
303 set_current_state(state
);
305 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
309 EXPORT_SYMBOL(prepare_to_wait_event
);
312 * Note! These two wait functions are entered with the
313 * wait-queue lock held (and interrupts off in the _irq
314 * case), so there is no race with testing the wakeup
315 * condition in the caller before they add the wait
316 * entry to the wake queue.
318 int do_wait_intr(wait_queue_head_t
*wq
, wait_queue_entry_t
*wait
)
320 if (likely(list_empty(&wait
->entry
)))
321 __add_wait_queue_entry_tail(wq
, wait
);
323 set_current_state(TASK_INTERRUPTIBLE
);
324 if (signal_pending(current
))
327 spin_unlock(&wq
->lock
);
329 spin_lock(&wq
->lock
);
333 EXPORT_SYMBOL(do_wait_intr
);
335 int do_wait_intr_irq(wait_queue_head_t
*wq
, wait_queue_entry_t
*wait
)
337 if (likely(list_empty(&wait
->entry
)))
338 __add_wait_queue_entry_tail(wq
, wait
);
340 set_current_state(TASK_INTERRUPTIBLE
);
341 if (signal_pending(current
))
344 spin_unlock_irq(&wq
->lock
);
346 spin_lock_irq(&wq
->lock
);
350 EXPORT_SYMBOL(do_wait_intr_irq
);
353 * finish_wait - clean up after waiting in a queue
354 * @wq_head: waitqueue waited on
355 * @wq_entry: wait descriptor
357 * Sets current thread back to running state and removes
358 * the wait descriptor from the given waitqueue if still
361 void finish_wait(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
365 __set_current_state(TASK_RUNNING
);
367 * We can check for list emptiness outside the lock
369 * - we use the "careful" check that verifies both
370 * the next and prev pointers, so that there cannot
371 * be any half-pending updates in progress on other
372 * CPU's that we haven't seen yet (and that might
373 * still change the stack area.
375 * - all other users take the lock (ie we can only
376 * have _one_ other CPU that looks at or modifies
379 if (!list_empty_careful(&wq_entry
->entry
)) {
380 spin_lock_irqsave(&wq_head
->lock
, flags
);
381 list_del_init(&wq_entry
->entry
);
382 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
385 EXPORT_SYMBOL(finish_wait
);
387 int autoremove_wake_function(struct wait_queue_entry
*wq_entry
, unsigned mode
, int sync
, void *key
)
389 int ret
= default_wake_function(wq_entry
, mode
, sync
, key
);
392 list_del_init(&wq_entry
->entry
);
396 EXPORT_SYMBOL(autoremove_wake_function
);
398 static inline bool is_kthread_should_stop(void)
400 return (current
->flags
& PF_KTHREAD
) && kthread_should_stop();
404 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
406 * add_wait_queue(&wq_head, &wait);
411 * // in wait_woken() // in woken_wake_function()
413 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
414 * smp_mb(); // A try_to_wake_up():
415 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
416 * schedule() if (p->state & mode)
417 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
418 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
419 * smp_mb(); // B condition = true;
421 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
423 long wait_woken(struct wait_queue_entry
*wq_entry
, unsigned mode
, long timeout
)
426 * The below executes an smp_mb(), which matches with the full barrier
427 * executed by the try_to_wake_up() in woken_wake_function() such that
428 * either we see the store to wq_entry->flags in woken_wake_function()
429 * or woken_wake_function() sees our store to current->state.
431 set_current_state(mode
); /* A */
432 if (!(wq_entry
->flags
& WQ_FLAG_WOKEN
) && !is_kthread_should_stop())
433 timeout
= schedule_timeout(timeout
);
434 __set_current_state(TASK_RUNNING
);
437 * The below executes an smp_mb(), which matches with the smp_mb() (C)
438 * in woken_wake_function() such that either we see the wait condition
439 * being true or the store to wq_entry->flags in woken_wake_function()
440 * follows ours in the coherence order.
442 smp_store_mb(wq_entry
->flags
, wq_entry
->flags
& ~WQ_FLAG_WOKEN
); /* B */
446 EXPORT_SYMBOL(wait_woken
);
448 int woken_wake_function(struct wait_queue_entry
*wq_entry
, unsigned mode
, int sync
, void *key
)
450 /* Pairs with the smp_store_mb() in wait_woken(). */
452 wq_entry
->flags
|= WQ_FLAG_WOKEN
;
454 return default_wake_function(wq_entry
, mode
, sync
, key
);
456 EXPORT_SYMBOL(woken_wake_function
);