2 * Generic waiting primitives.
4 * (C) 2004 Nadia Yvette Chambers, Oracle
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/debug.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
13 #include <linux/kthread.h>
15 void __init_waitqueue_head(struct wait_queue_head
*wq_head
, const char *name
, struct lock_class_key
*key
)
17 spin_lock_init(&wq_head
->lock
);
18 lockdep_set_class_and_name(&wq_head
->lock
, key
, name
);
19 INIT_LIST_HEAD(&wq_head
->head
);
22 EXPORT_SYMBOL(__init_waitqueue_head
);
24 void add_wait_queue(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
28 wq_entry
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
29 spin_lock_irqsave(&wq_head
->lock
, flags
);
30 __add_wait_queue(wq_head
, wq_entry
);
31 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
33 EXPORT_SYMBOL(add_wait_queue
);
35 void add_wait_queue_exclusive(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
39 wq_entry
->flags
|= WQ_FLAG_EXCLUSIVE
;
40 spin_lock_irqsave(&wq_head
->lock
, flags
);
41 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
42 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
44 EXPORT_SYMBOL(add_wait_queue_exclusive
);
46 void remove_wait_queue(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
50 spin_lock_irqsave(&wq_head
->lock
, flags
);
51 __remove_wait_queue(wq_head
, wq_entry
);
52 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
54 EXPORT_SYMBOL(remove_wait_queue
);
57 * Scan threshold to break wait queue walk.
58 * This allows a waker to take a break from holding the
59 * wait queue lock during the wait queue walk.
61 #define WAITQUEUE_WALK_BREAK_CNT 64
64 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
65 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
66 * number) then we wake all the non-exclusive tasks and one exclusive task.
68 * There are circumstances in which we can try to wake a task which has already
69 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
70 * zero in this (rare) case, and we handle it by continuing to scan the queue.
72 static int __wake_up_common(struct wait_queue_head
*wq_head
, unsigned int mode
,
73 int nr_exclusive
, int wake_flags
, void *key
,
74 wait_queue_entry_t
*bookmark
)
76 wait_queue_entry_t
*curr
, *next
;
79 if (bookmark
&& (bookmark
->flags
& WQ_FLAG_BOOKMARK
)) {
80 curr
= list_next_entry(bookmark
, entry
);
82 list_del(&bookmark
->entry
);
85 curr
= list_first_entry(&wq_head
->head
, wait_queue_entry_t
, entry
);
87 if (&curr
->entry
== &wq_head
->head
)
90 list_for_each_entry_safe_from(curr
, next
, &wq_head
->head
, entry
) {
91 unsigned flags
= curr
->flags
;
94 if (flags
& WQ_FLAG_BOOKMARK
)
97 ret
= curr
->func(curr
, mode
, wake_flags
, key
);
100 if (ret
&& (flags
& WQ_FLAG_EXCLUSIVE
) && !--nr_exclusive
)
103 if (bookmark
&& (++cnt
> WAITQUEUE_WALK_BREAK_CNT
) &&
104 (&next
->entry
!= &wq_head
->head
)) {
105 bookmark
->flags
= WQ_FLAG_BOOKMARK
;
106 list_add_tail(&bookmark
->entry
, &next
->entry
);
113 static void __wake_up_common_lock(struct wait_queue_head
*wq_head
, unsigned int mode
,
114 int nr_exclusive
, int wake_flags
, void *key
)
117 wait_queue_entry_t bookmark
;
120 bookmark
.private = NULL
;
121 bookmark
.func
= NULL
;
122 INIT_LIST_HEAD(&bookmark
.entry
);
124 spin_lock_irqsave(&wq_head
->lock
, flags
);
125 nr_exclusive
= __wake_up_common(wq_head
, mode
, nr_exclusive
, wake_flags
, key
, &bookmark
);
126 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
128 while (bookmark
.flags
& WQ_FLAG_BOOKMARK
) {
129 spin_lock_irqsave(&wq_head
->lock
, flags
);
130 nr_exclusive
= __wake_up_common(wq_head
, mode
, nr_exclusive
,
131 wake_flags
, key
, &bookmark
);
132 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
137 * __wake_up - wake up threads blocked on a waitqueue.
138 * @wq_head: the waitqueue
139 * @mode: which threads
140 * @nr_exclusive: how many wake-one or wake-many threads to wake up
141 * @key: is directly passed to the wakeup function
143 * It may be assumed that this function implies a write memory barrier before
144 * changing the task state if and only if any tasks are woken up.
146 void __wake_up(struct wait_queue_head
*wq_head
, unsigned int mode
,
147 int nr_exclusive
, void *key
)
149 __wake_up_common_lock(wq_head
, mode
, nr_exclusive
, 0, key
);
151 EXPORT_SYMBOL(__wake_up
);
154 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
156 void __wake_up_locked(struct wait_queue_head
*wq_head
, unsigned int mode
, int nr
)
158 __wake_up_common(wq_head
, mode
, nr
, 0, NULL
, NULL
);
160 EXPORT_SYMBOL_GPL(__wake_up_locked
);
162 void __wake_up_locked_key(struct wait_queue_head
*wq_head
, unsigned int mode
, void *key
)
164 __wake_up_common(wq_head
, mode
, 1, 0, key
, NULL
);
166 EXPORT_SYMBOL_GPL(__wake_up_locked_key
);
168 void __wake_up_locked_key_bookmark(struct wait_queue_head
*wq_head
,
169 unsigned int mode
, void *key
, wait_queue_entry_t
*bookmark
)
171 __wake_up_common(wq_head
, mode
, 1, 0, key
, bookmark
);
173 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark
);
176 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
177 * @wq_head: the waitqueue
178 * @mode: which threads
179 * @nr_exclusive: how many wake-one or wake-many threads to wake up
180 * @key: opaque value to be passed to wakeup targets
182 * The sync wakeup differs that the waker knows that it will schedule
183 * away soon, so while the target thread will be woken up, it will not
184 * be migrated to another CPU - ie. the two threads are 'synchronized'
185 * with each other. This can prevent needless bouncing between CPUs.
187 * On UP it can prevent extra preemption.
189 * It may be assumed that this function implies a write memory barrier before
190 * changing the task state if and only if any tasks are woken up.
192 void __wake_up_sync_key(struct wait_queue_head
*wq_head
, unsigned int mode
,
193 int nr_exclusive
, void *key
)
195 int wake_flags
= 1; /* XXX WF_SYNC */
197 if (unlikely(!wq_head
))
200 if (unlikely(nr_exclusive
!= 1))
203 __wake_up_common_lock(wq_head
, mode
, nr_exclusive
, wake_flags
, key
);
205 EXPORT_SYMBOL_GPL(__wake_up_sync_key
);
208 * __wake_up_sync - see __wake_up_sync_key()
210 void __wake_up_sync(struct wait_queue_head
*wq_head
, unsigned int mode
, int nr_exclusive
)
212 __wake_up_sync_key(wq_head
, mode
, nr_exclusive
, NULL
);
214 EXPORT_SYMBOL_GPL(__wake_up_sync
); /* For internal use only */
217 * Note: we use "set_current_state()" _after_ the wait-queue add,
218 * because we need a memory barrier there on SMP, so that any
219 * wake-function that tests for the wait-queue being active
220 * will be guaranteed to see waitqueue addition _or_ subsequent
221 * tests in this thread will see the wakeup having taken place.
223 * The spin_unlock() itself is semi-permeable and only protects
224 * one way (it only protects stuff inside the critical region and
225 * stops them from bleeding out - it would still allow subsequent
226 * loads to move into the critical region).
229 prepare_to_wait(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
233 wq_entry
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
234 spin_lock_irqsave(&wq_head
->lock
, flags
);
235 if (list_empty(&wq_entry
->entry
))
236 __add_wait_queue(wq_head
, wq_entry
);
237 set_current_state(state
);
238 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
240 EXPORT_SYMBOL(prepare_to_wait
);
243 prepare_to_wait_exclusive(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
247 wq_entry
->flags
|= WQ_FLAG_EXCLUSIVE
;
248 spin_lock_irqsave(&wq_head
->lock
, flags
);
249 if (list_empty(&wq_entry
->entry
))
250 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
251 set_current_state(state
);
252 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
254 EXPORT_SYMBOL(prepare_to_wait_exclusive
);
256 void init_wait_entry(struct wait_queue_entry
*wq_entry
, int flags
)
258 wq_entry
->flags
= flags
;
259 wq_entry
->private = current
;
260 wq_entry
->func
= autoremove_wake_function
;
261 INIT_LIST_HEAD(&wq_entry
->entry
);
263 EXPORT_SYMBOL(init_wait_entry
);
265 long prepare_to_wait_event(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
, int state
)
270 spin_lock_irqsave(&wq_head
->lock
, flags
);
271 if (unlikely(signal_pending_state(state
, current
))) {
273 * Exclusive waiter must not fail if it was selected by wakeup,
274 * it should "consume" the condition we were waiting for.
276 * The caller will recheck the condition and return success if
277 * we were already woken up, we can not miss the event because
278 * wakeup locks/unlocks the same wq_head->lock.
280 * But we need to ensure that set-condition + wakeup after that
281 * can't see us, it should wake up another exclusive waiter if
284 list_del_init(&wq_entry
->entry
);
287 if (list_empty(&wq_entry
->entry
)) {
288 if (wq_entry
->flags
& WQ_FLAG_EXCLUSIVE
)
289 __add_wait_queue_entry_tail(wq_head
, wq_entry
);
291 __add_wait_queue(wq_head
, wq_entry
);
293 set_current_state(state
);
295 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
299 EXPORT_SYMBOL(prepare_to_wait_event
);
302 * Note! These two wait functions are entered with the
303 * wait-queue lock held (and interrupts off in the _irq
304 * case), so there is no race with testing the wakeup
305 * condition in the caller before they add the wait
306 * entry to the wake queue.
308 int do_wait_intr(wait_queue_head_t
*wq
, wait_queue_entry_t
*wait
)
310 if (likely(list_empty(&wait
->entry
)))
311 __add_wait_queue_entry_tail(wq
, wait
);
313 set_current_state(TASK_INTERRUPTIBLE
);
314 if (signal_pending(current
))
317 spin_unlock(&wq
->lock
);
319 spin_lock(&wq
->lock
);
322 EXPORT_SYMBOL(do_wait_intr
);
324 int do_wait_intr_irq(wait_queue_head_t
*wq
, wait_queue_entry_t
*wait
)
326 if (likely(list_empty(&wait
->entry
)))
327 __add_wait_queue_entry_tail(wq
, wait
);
329 set_current_state(TASK_INTERRUPTIBLE
);
330 if (signal_pending(current
))
333 spin_unlock_irq(&wq
->lock
);
335 spin_lock_irq(&wq
->lock
);
338 EXPORT_SYMBOL(do_wait_intr_irq
);
341 * finish_wait - clean up after waiting in a queue
342 * @wq_head: waitqueue waited on
343 * @wq_entry: wait descriptor
345 * Sets current thread back to running state and removes
346 * the wait descriptor from the given waitqueue if still
349 void finish_wait(struct wait_queue_head
*wq_head
, struct wait_queue_entry
*wq_entry
)
353 __set_current_state(TASK_RUNNING
);
355 * We can check for list emptiness outside the lock
357 * - we use the "careful" check that verifies both
358 * the next and prev pointers, so that there cannot
359 * be any half-pending updates in progress on other
360 * CPU's that we haven't seen yet (and that might
361 * still change the stack area.
363 * - all other users take the lock (ie we can only
364 * have _one_ other CPU that looks at or modifies
367 if (!list_empty_careful(&wq_entry
->entry
)) {
368 spin_lock_irqsave(&wq_head
->lock
, flags
);
369 list_del_init(&wq_entry
->entry
);
370 spin_unlock_irqrestore(&wq_head
->lock
, flags
);
373 EXPORT_SYMBOL(finish_wait
);
375 int autoremove_wake_function(struct wait_queue_entry
*wq_entry
, unsigned mode
, int sync
, void *key
)
377 int ret
= default_wake_function(wq_entry
, mode
, sync
, key
);
380 list_del_init(&wq_entry
->entry
);
383 EXPORT_SYMBOL(autoremove_wake_function
);
385 static inline bool is_kthread_should_stop(void)
387 return (current
->flags
& PF_KTHREAD
) && kthread_should_stop();
391 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
393 * add_wait_queue(&wq_head, &wait);
398 * p->state = mode; condition = true;
399 * smp_mb(); // A smp_wmb(); // C
400 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
401 * schedule() try_to_wake_up();
402 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
403 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
404 * smp_mb() // B smp_wmb(); // C
405 * wq_entry->flags |= WQ_FLAG_WOKEN;
407 * remove_wait_queue(&wq_head, &wait);
410 long wait_woken(struct wait_queue_entry
*wq_entry
, unsigned mode
, long timeout
)
412 set_current_state(mode
); /* A */
414 * The above implies an smp_mb(), which matches with the smp_wmb() from
415 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
416 * also observe all state before the wakeup.
418 if (!(wq_entry
->flags
& WQ_FLAG_WOKEN
) && !is_kthread_should_stop())
419 timeout
= schedule_timeout(timeout
);
420 __set_current_state(TASK_RUNNING
);
423 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
424 * woken_wake_function() such that we must either observe the wait
425 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
428 smp_store_mb(wq_entry
->flags
, wq_entry
->flags
& ~WQ_FLAG_WOKEN
); /* B */
432 EXPORT_SYMBOL(wait_woken
);
434 int woken_wake_function(struct wait_queue_entry
*wq_entry
, unsigned mode
, int sync
, void *key
)
437 * Although this function is called under waitqueue lock, LOCK
438 * doesn't imply write barrier and the users expects write
439 * barrier semantics on wakeup functions. The following
440 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
441 * and is paired with smp_store_mb() in wait_woken().
444 wq_entry
->flags
|= WQ_FLAG_WOKEN
;
446 return default_wake_function(wq_entry
, mode
, sync
, key
);
448 EXPORT_SYMBOL(woken_wake_function
);