printf: Remove unused 'bprintf'
[drm/drm-misc.git] / include / linux / wait_bit.h
blob9e29d79fc790affbd9c933915c06803be54cd7ec
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_BIT_H
3 #define _LINUX_WAIT_BIT_H
5 /*
6 * Linux wait-bit related types and methods:
7 */
8 #include <linux/wait.h>
10 struct wait_bit_key {
11 unsigned long *flags;
12 int bit_nr;
13 unsigned long timeout;
16 struct wait_bit_queue_entry {
17 struct wait_bit_key key;
18 struct wait_queue_entry wq_entry;
21 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
22 { .flags = word, .bit_nr = bit, }
24 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
26 void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit);
27 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
28 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
29 void wake_up_bit(unsigned long *word, int bit);
30 int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
31 int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
32 int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
33 struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit);
34 extern void __init wait_bit_init(void);
36 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
38 #define DEFINE_WAIT_BIT(name, word, bit) \
39 struct wait_bit_queue_entry name = { \
40 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
41 .wq_entry = { \
42 .private = current, \
43 .func = wake_bit_function, \
44 .entry = \
45 LIST_HEAD_INIT((name).wq_entry.entry), \
46 }, \
49 extern int bit_wait(struct wait_bit_key *key, int mode);
50 extern int bit_wait_io(struct wait_bit_key *key, int mode);
51 extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
53 /**
54 * wait_on_bit - wait for a bit to be cleared
55 * @word: the address containing the bit being waited on
56 * @bit: the bit at that address being waited on
57 * @mode: the task state to sleep in
59 * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
60 * to be cleared. The clearing of the bit must be signalled with
61 * wake_up_bit(), often as clear_and_wake_up_bit().
63 * The process will wait on a waitqueue selected by hash from a shared
64 * pool. It will only be woken on a wake_up for the target bit, even
65 * if other processes on the same queue are waiting for other bits.
67 * Returned value will be zero if the bit was cleared in which case the
68 * call has ACQUIRE semantics, or %-EINTR if the process received a
69 * signal and the mode permitted wake up on that signal.
71 static inline int
72 wait_on_bit(unsigned long *word, int bit, unsigned mode)
74 might_sleep();
75 if (!test_bit_acquire(bit, word))
76 return 0;
77 return out_of_line_wait_on_bit(word, bit,
78 bit_wait,
79 mode);
82 /**
83 * wait_on_bit_io - wait for a bit to be cleared
84 * @word: the address containing the bit being waited on
85 * @bit: the bit at that address being waited on
86 * @mode: the task state to sleep in
88 * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
89 * to be cleared. The clearing of the bit must be signalled with
90 * wake_up_bit(), often as clear_and_wake_up_bit().
92 * This is similar to wait_on_bit(), but calls io_schedule() instead of
93 * schedule() for the actual waiting.
95 * Returned value will be zero if the bit was cleared in which case the
96 * call has ACQUIRE semantics, or %-EINTR if the process received a
97 * signal and the mode permitted wake up on that signal.
99 static inline int
100 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
102 might_sleep();
103 if (!test_bit_acquire(bit, word))
104 return 0;
105 return out_of_line_wait_on_bit(word, bit,
106 bit_wait_io,
107 mode);
111 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse
112 * @word: the address containing the bit being waited on
113 * @bit: the bit at that address being waited on
114 * @mode: the task state to sleep in
115 * @timeout: timeout, in jiffies
117 * Wait for the given bit in an unsigned long or bitmap (see
118 * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The
119 * clearing of the bit must be signalled with wake_up_bit(), often as
120 * clear_and_wake_up_bit().
122 * This is similar to wait_on_bit(), except it also takes a timeout
123 * parameter.
125 * Returned value will be zero if the bit was cleared in which case the
126 * call has ACQUIRE semantics, or %-EINTR if the process received a
127 * signal and the mode permitted wake up on that signal, or %-EAGAIN if the
128 * timeout elapsed.
130 static inline int
131 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
132 unsigned long timeout)
134 might_sleep();
135 if (!test_bit_acquire(bit, word))
136 return 0;
137 return out_of_line_wait_on_bit_timeout(word, bit,
138 bit_wait_timeout,
139 mode, timeout);
143 * wait_on_bit_action - wait for a bit to be cleared
144 * @word: the address containing the bit waited on
145 * @bit: the bit at that address being waited on
146 * @action: the function used to sleep, which may take special actions
147 * @mode: the task state to sleep in
149 * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
150 * to be cleared. The clearing of the bit must be signalled with
151 * wake_up_bit(), often as clear_and_wake_up_bit().
153 * This is similar to wait_on_bit(), but calls @action() instead of
154 * schedule() for the actual waiting.
156 * Returned value will be zero if the bit was cleared in which case the
157 * call has ACQUIRE semantics, or the error code returned by @action if
158 * that call returned non-zero.
160 static inline int
161 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
162 unsigned mode)
164 might_sleep();
165 if (!test_bit_acquire(bit, word))
166 return 0;
167 return out_of_line_wait_on_bit(word, bit, action, mode);
171 * wait_on_bit_lock - wait for a bit to be cleared, then set it
172 * @word: the address containing the bit being waited on
173 * @bit: the bit of the word being waited on and set
174 * @mode: the task state to sleep in
176 * Wait for the given bit in an unsigned long or bitmap (see
177 * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
178 * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
179 * soon as it is clear, atomically set it and return.
181 * This is similar to wait_on_bit(), but sets the bit before returning.
183 * Returned value will be zero if the bit was successfully set in which
184 * case the call has the same memory sequencing semantics as
185 * test_and_clear_bit(), or %-EINTR if the process received a signal and
186 * the mode permitted wake up on that signal.
188 static inline int
189 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
191 might_sleep();
192 if (!test_and_set_bit(bit, word))
193 return 0;
194 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
198 * wait_on_bit_lock_io - wait for a bit to be cleared, then set it
199 * @word: the address containing the bit being waited on
200 * @bit: the bit of the word being waited on and set
201 * @mode: the task state to sleep in
203 * Wait for the given bit in an unsigned long or bitmap (see
204 * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
205 * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
206 * soon as it is clear, atomically set it and return.
208 * This is similar to wait_on_bit_lock(), but calls io_schedule() instead
209 * of schedule().
211 * Returns zero if the bit was (eventually) found to be clear and was
212 * set. Returns non-zero if a signal was delivered to the process and
213 * the @mode allows that signal to wake the process.
215 static inline int
216 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
218 might_sleep();
219 if (!test_and_set_bit(bit, word))
220 return 0;
221 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
225 * wait_on_bit_lock_action - wait for a bit to be cleared, then set it
226 * @word: the address containing the bit being waited on
227 * @bit: the bit of the word being waited on and set
228 * @action: the function used to sleep, which may take special actions
229 * @mode: the task state to sleep in
231 * This is similar to wait_on_bit_lock(), but calls @action() instead of
232 * schedule() for the actual waiting.
234 * Returned value will be zero if the bit was successfully set in which
235 * case the call has the same memory sequencing semantics as
236 * test_and_clear_bit(), or the error code returned by @action if that
237 * call returned non-zero.
239 static inline int
240 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
241 unsigned mode)
243 might_sleep();
244 if (!test_and_set_bit(bit, word))
245 return 0;
246 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
249 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
250 extern void wake_up_var(void *var);
251 extern wait_queue_head_t *__var_waitqueue(void *p);
253 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
254 ({ \
255 __label__ __out; \
256 struct wait_queue_head *__wq_head = __var_waitqueue(var); \
257 struct wait_bit_queue_entry __wbq_entry; \
258 long __ret = ret; /* explicit shadow */ \
260 init_wait_var_entry(&__wbq_entry, var, \
261 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
262 for (;;) { \
263 long __int = prepare_to_wait_event(__wq_head, \
264 &__wbq_entry.wq_entry, \
265 state); \
266 if (condition) \
267 break; \
269 if (___wait_is_interruptible(state) && __int) { \
270 __ret = __int; \
271 goto __out; \
274 cmd; \
276 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
277 __out: __ret; \
280 #define __wait_var_event(var, condition) \
281 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
282 schedule())
283 #define __wait_var_event_io(var, condition) \
284 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
285 io_schedule())
288 * wait_var_event - wait for a variable to be updated and notified
289 * @var: the address of variable being waited on
290 * @condition: the condition to wait for
292 * Wait for a @condition to be true, only re-checking when a wake up is
293 * received for the given @var (an arbitrary kernel address which need
294 * not be directly related to the given condition, but usually is).
296 * The process will wait on a waitqueue selected by hash from a shared
297 * pool. It will only be woken on a wake_up for the given address.
299 * The condition should normally use smp_load_acquire() or a similarly
300 * ordered access to ensure that any changes to memory made before the
301 * condition became true will be visible after the wait completes.
303 #define wait_var_event(var, condition) \
304 do { \
305 might_sleep(); \
306 if (condition) \
307 break; \
308 __wait_var_event(var, condition); \
309 } while (0)
312 * wait_var_event_io - wait for a variable to be updated and notified
313 * @var: the address of variable being waited on
314 * @condition: the condition to wait for
316 * Wait for an IO related @condition to be true, only re-checking when a
317 * wake up is received for the given @var (an arbitrary kernel address
318 * which need not be directly related to the given condition, but
319 * usually is).
321 * The process will wait on a waitqueue selected by hash from a shared
322 * pool. It will only be woken on a wake_up for the given address.
324 * This is similar to wait_var_event(), but calls io_schedule() instead
325 * of schedule().
327 * The condition should normally use smp_load_acquire() or a similarly
328 * ordered access to ensure that any changes to memory made before the
329 * condition became true will be visible after the wait completes.
331 #define wait_var_event_io(var, condition) \
332 do { \
333 might_sleep(); \
334 if (condition) \
335 break; \
336 __wait_var_event_io(var, condition); \
337 } while (0)
339 #define __wait_var_event_killable(var, condition) \
340 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
341 schedule())
344 * wait_var_event_killable - wait for a variable to be updated and notified
345 * @var: the address of variable being waited on
346 * @condition: the condition to wait for
348 * Wait for a @condition to be true or a fatal signal to be received,
349 * only re-checking the condition when a wake up is received for the given
350 * @var (an arbitrary kernel address which need not be directly related
351 * to the given condition, but usually is).
353 * This is similar to wait_var_event() but returns a value which is
354 * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal
355 * was received.
357 * The condition should normally use smp_load_acquire() or a similarly
358 * ordered access to ensure that any changes to memory made before the
359 * condition became true will be visible after the wait completes.
361 #define wait_var_event_killable(var, condition) \
362 ({ \
363 int __ret = 0; \
364 might_sleep(); \
365 if (!(condition)) \
366 __ret = __wait_var_event_killable(var, condition); \
367 __ret; \
370 #define __wait_var_event_timeout(var, condition, timeout) \
371 ___wait_var_event(var, ___wait_cond_timeout(condition), \
372 TASK_UNINTERRUPTIBLE, 0, timeout, \
373 __ret = schedule_timeout(__ret))
376 * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire
377 * @var: the address of variable being waited on
378 * @condition: the condition to wait for
379 * @timeout: maximum time to wait in jiffies
381 * Wait for a @condition to be true or a timeout to expire, only
382 * re-checking the condition when a wake up is received for the given
383 * @var (an arbitrary kernel address which need not be directly related
384 * to the given condition, but usually is).
386 * This is similar to wait_var_event() but returns a value which is 0 if
387 * the timeout expired and the condition was still false, or the
388 * remaining time left in the timeout (but at least 1) if the condition
389 * was found to be true.
391 * The condition should normally use smp_load_acquire() or a similarly
392 * ordered access to ensure that any changes to memory made before the
393 * condition became true will be visible after the wait completes.
395 #define wait_var_event_timeout(var, condition, timeout) \
396 ({ \
397 long __ret = timeout; \
398 might_sleep(); \
399 if (!___wait_cond_timeout(condition)) \
400 __ret = __wait_var_event_timeout(var, condition, timeout); \
401 __ret; \
404 #define __wait_var_event_interruptible(var, condition) \
405 ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \
406 schedule())
409 * wait_var_event_killable - wait for a variable to be updated and notified
410 * @var: the address of variable being waited on
411 * @condition: the condition to wait for
413 * Wait for a @condition to be true or a signal to be received, only
414 * re-checking the condition when a wake up is received for the given
415 * @var (an arbitrary kernel address which need not be directly related
416 * to the given condition, but usually is).
418 * This is similar to wait_var_event() but returns a value which is 0 if
419 * the condition became true, or %-ERESTARTSYS if a signal was received.
421 * The condition should normally use smp_load_acquire() or a similarly
422 * ordered access to ensure that any changes to memory made before the
423 * condition became true will be visible after the wait completes.
425 #define wait_var_event_interruptible(var, condition) \
426 ({ \
427 int __ret = 0; \
428 might_sleep(); \
429 if (!(condition)) \
430 __ret = __wait_var_event_interruptible(var, condition); \
431 __ret; \
435 * wait_var_event_any_lock - wait for a variable to be updated under a lock
436 * @var: the address of the variable being waited on
437 * @condition: condition to wait for
438 * @lock: the object that is locked to protect updates to the variable
439 * @type: prefix on lock and unlock operations
440 * @state: waiting state, %TASK_UNINTERRUPTIBLE etc.
442 * Wait for a condition which can only be reliably tested while holding
443 * a lock. The variables assessed in the condition will normal be updated
444 * under the same lock, and the wake up should be signalled with
445 * wake_up_var_locked() under the same lock.
447 * This is similar to wait_var_event(), but assumes a lock is held
448 * while calling this function and while updating the variable.
450 * This must be called while the given lock is held and the lock will be
451 * dropped when schedule() is called to wait for a wake up, and will be
452 * reclaimed before testing the condition again. The functions used to
453 * unlock and lock the object are constructed by appending _unlock and _lock
454 * to @type.
456 * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt
457 * the wait according to @state.
459 #define wait_var_event_any_lock(var, condition, lock, type, state) \
460 ({ \
461 int __ret = 0; \
462 if (!(condition)) \
463 __ret = ___wait_var_event(var, condition, state, 0, 0, \
464 type ## _unlock(lock); \
465 schedule(); \
466 type ## _lock(lock)); \
467 __ret; \
471 * wait_var_event_spinlock - wait for a variable to be updated under a spinlock
472 * @var: the address of the variable being waited on
473 * @condition: condition to wait for
474 * @lock: the spinlock which protects updates to the variable
476 * Wait for a condition which can only be reliably tested while holding
477 * a spinlock. The variables assessed in the condition will normal be updated
478 * under the same spinlock, and the wake up should be signalled with
479 * wake_up_var_locked() under the same spinlock.
481 * This is similar to wait_var_event(), but assumes a spinlock is held
482 * while calling this function and while updating the variable.
484 * This must be called while the given lock is held and the lock will be
485 * dropped when schedule() is called to wait for a wake up, and will be
486 * reclaimed before testing the condition again.
488 #define wait_var_event_spinlock(var, condition, lock) \
489 wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE)
492 * wait_var_event_mutex - wait for a variable to be updated under a mutex
493 * @var: the address of the variable being waited on
494 * @condition: condition to wait for
495 * @mutex: the mutex which protects updates to the variable
497 * Wait for a condition which can only be reliably tested while holding
498 * a mutex. The variables assessed in the condition will normal be
499 * updated under the same mutex, and the wake up should be signalled
500 * with wake_up_var_locked() under the same mutex.
502 * This is similar to wait_var_event(), but assumes a mutex is held
503 * while calling this function and while updating the variable.
505 * This must be called while the given mutex is held and the mutex will be
506 * dropped when schedule() is called to wait for a wake up, and will be
507 * reclaimed before testing the condition again.
509 #define wait_var_event_mutex(var, condition, lock) \
510 wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE)
513 * wake_up_var_protected - wake up waiters for a variable asserting that it is safe
514 * @var: the address of the variable being waited on
515 * @cond: the condition which afirms this is safe
517 * When waking waiters which use wait_var_event_any_lock() the waker must be
518 * holding the reelvant lock to avoid races. This version of wake_up_var()
519 * asserts that the relevant lock is held and so no barrier is needed.
520 * The @cond is only tested when CONFIG_LOCKDEP is enabled.
522 #define wake_up_var_protected(var, cond) \
523 do { \
524 lockdep_assert(cond); \
525 wake_up_var(var); \
526 } while (0)
529 * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex
530 * @var: the address of the variable being waited on
531 * @lock: The spinlock or mutex what protects the variable
533 * Send a wake up for the given variable which should be waited for with
534 * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(),
535 * no extra barriers are needed as the locking provides sufficient sequencing.
537 #define wake_up_var_locked(var, lock) \
538 wake_up_var_protected(var, lockdep_is_held(lock))
541 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
542 * @bit: the bit of the word being waited on
543 * @word: the address containing the bit being waited on
545 * The designated bit is cleared and any tasks waiting in wait_on_bit()
546 * or similar will be woken. This call has RELEASE semantics so that
547 * any changes to memory made before this call are guaranteed to be visible
548 * after the corresponding wait_on_bit() completes.
550 static inline void clear_and_wake_up_bit(int bit, unsigned long *word)
552 clear_bit_unlock(bit, word);
553 /* See wake_up_bit() for which memory barrier you need to use. */
554 smp_mb__after_atomic();
555 wake_up_bit(word, bit);
559 * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
560 * @bit: the bit of the word being waited on
561 * @word: the address of memory containing that bit
563 * If the bit is set and can be atomically cleared, any tasks waiting in
564 * wait_on_bit() or similar will be woken. This call has the same
565 * complete ordering semantics as test_and_clear_bit(). Any changes to
566 * memory made before this call are guaranteed to be visible after the
567 * corresponding wait_on_bit() completes.
569 * Returns %true if the bit was successfully set and the wake up was sent.
571 static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
573 if (!test_and_clear_bit(bit, word))
574 return false;
575 /* no extra barrier required */
576 wake_up_bit(word, bit);
577 return true;
581 * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
582 * @var: the variable to dec and test
584 * Decrements the atomic variable and if it reaches zero, send a wake_up to any
585 * processes waiting on the variable.
587 * This function has the same complete ordering semantics as atomic_dec_and_test.
589 * Returns %true is the variable reaches zero and the wake up was sent.
592 static inline bool atomic_dec_and_wake_up(atomic_t *var)
594 if (!atomic_dec_and_test(var))
595 return false;
596 /* No extra barrier required */
597 wake_up_var(var);
598 return true;
602 * store_release_wake_up - update a variable and send a wake_up
603 * @var: the address of the variable to be updated and woken
604 * @val: the value to store in the variable.
606 * Store the given value in the variable send a wake up to any tasks
607 * waiting on the variable. All necessary barriers are included to ensure
608 * the task calling wait_var_event() sees the new value and all values
609 * written to memory before this call.
611 #define store_release_wake_up(var, val) \
612 do { \
613 smp_store_release(var, val); \
614 smp_mb(); \
615 wake_up_var(var); \
616 } while (0)
618 #endif /* _LINUX_WAIT_BIT_H */