4 #define WNOHANG 0x00000001
5 #define WUNTRACED 0x00000002
6 #define WSTOPPED WUNTRACED
7 #define WEXITED 0x00000004
8 #define WCONTINUED 0x00000008
9 #define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
13 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
15 /* First argument to waitid: */
22 #include <linux/config.h>
23 #include <linux/list.h>
24 #include <linux/stddef.h>
25 #include <linux/spinlock.h>
26 #include <asm/system.h>
27 #include <asm/current.h>
29 typedef struct __wait_queue wait_queue_t
;
30 typedef int (*wait_queue_func_t
)(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
31 int default_wake_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
35 #define WQ_FLAG_EXCLUSIVE 0x01
36 struct task_struct
* task
;
37 wait_queue_func_t func
;
38 struct list_head task_list
;
46 struct wait_bit_queue
{
47 struct wait_bit_key key
;
51 struct __wait_queue_head
{
53 struct list_head task_list
;
55 typedef struct __wait_queue_head wait_queue_head_t
;
59 * Macros for declaration and initialisaton of the datatypes
62 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
64 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
67 #define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = SPIN_LOCK_UNLOCKED, \
72 .task_list = { &(name).task_list, &(name).task_list } }
74 #define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
77 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
80 static inline void init_waitqueue_head(wait_queue_head_t
*q
)
82 spin_lock_init(&q
->lock
);
83 INIT_LIST_HEAD(&q
->task_list
);
86 static inline void init_waitqueue_entry(wait_queue_t
*q
, struct task_struct
*p
)
90 q
->func
= default_wake_function
;
93 static inline void init_waitqueue_func_entry(wait_queue_t
*q
,
94 wait_queue_func_t func
)
101 static inline int waitqueue_active(wait_queue_head_t
*q
)
103 return !list_empty(&q
->task_list
);
107 * Used to distinguish between sync and async io wait context:
108 * sync i/o typically specifies a NULL wait queue entry or a wait
109 * queue entry bound to a task (current task) to wake up.
110 * aio specifies a wait queue entry with an async notification
111 * callback routine, not associated with any task.
113 #define is_sync_wait(wait) (!(wait) || ((wait)->task))
115 extern void FASTCALL(add_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
));
116 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t
*q
, wait_queue_t
* wait
));
117 extern void FASTCALL(remove_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
));
119 static inline void __add_wait_queue(wait_queue_head_t
*head
, wait_queue_t
*new)
121 list_add(&new->task_list
, &head
->task_list
);
125 * Used for wake-one threads:
127 static inline void __add_wait_queue_tail(wait_queue_head_t
*head
,
130 list_add_tail(&new->task_list
, &head
->task_list
);
133 static inline void __remove_wait_queue(wait_queue_head_t
*head
,
136 list_del(&old
->task_list
);
139 void FASTCALL(__wake_up(wait_queue_head_t
*q
, unsigned int mode
, int nr
, void *key
));
140 extern void FASTCALL(__wake_up_locked(wait_queue_head_t
*q
, unsigned int mode
));
141 extern void FASTCALL(__wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
, int nr
));
142 void FASTCALL(__wake_up_bit(wait_queue_head_t
*, void *, int));
143 int FASTCALL(__wait_on_bit(wait_queue_head_t
*, struct wait_bit_queue
*, int (*)(void *), unsigned));
144 int FASTCALL(__wait_on_bit_lock(wait_queue_head_t
*, struct wait_bit_queue
*, int (*)(void *), unsigned));
145 void FASTCALL(wake_up_bit(void *, int));
146 int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
147 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
148 wait_queue_head_t
*FASTCALL(bit_waitqueue(void *, int));
150 #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
151 #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
152 #define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
153 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
154 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
155 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
156 #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
157 #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
159 #define __wait_event(wq, condition) \
161 DEFINE_WAIT(__wait); \
164 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
169 finish_wait(&wq, &__wait); \
173 * wait_event - sleep until a condition gets true
174 * @wq: the waitqueue to wait on
175 * @condition: a C expression for the event to wait for
177 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
178 * @condition evaluates to true. The @condition is checked each time
179 * the waitqueue @wq is woken up.
181 * wake_up() has to be called after changing any variable that could
182 * change the result of the wait condition.
184 #define wait_event(wq, condition) \
188 __wait_event(wq, condition); \
191 #define __wait_event_timeout(wq, condition, ret) \
193 DEFINE_WAIT(__wait); \
196 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
199 ret = schedule_timeout(ret); \
203 finish_wait(&wq, &__wait); \
207 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
208 * @wq: the waitqueue to wait on
209 * @condition: a C expression for the event to wait for
210 * @timeout: timeout, in jiffies
212 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
213 * @condition evaluates to true. The @condition is checked each time
214 * the waitqueue @wq is woken up.
216 * wake_up() has to be called after changing any variable that could
217 * change the result of the wait condition.
219 * The function returns 0 if the @timeout elapsed, and the remaining
220 * jiffies if the condition evaluated to true before the timeout elapsed.
222 #define wait_event_timeout(wq, condition, timeout) \
224 long __ret = timeout; \
226 __wait_event_timeout(wq, condition, __ret); \
230 #define __wait_event_interruptible(wq, condition, ret) \
232 DEFINE_WAIT(__wait); \
235 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
238 if (!signal_pending(current)) { \
242 ret = -ERESTARTSYS; \
245 finish_wait(&wq, &__wait); \
249 * wait_event_interruptible - sleep until a condition gets true
250 * @wq: the waitqueue to wait on
251 * @condition: a C expression for the event to wait for
253 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
254 * @condition evaluates to true or a signal is received.
255 * The @condition is checked each time the waitqueue @wq is woken up.
257 * wake_up() has to be called after changing any variable that could
258 * change the result of the wait condition.
260 * The function will return -ERESTARTSYS if it was interrupted by a
261 * signal and 0 if @condition evaluated to true.
263 #define wait_event_interruptible(wq, condition) \
267 __wait_event_interruptible(wq, condition, __ret); \
271 #define __wait_event_interruptible_timeout(wq, condition, ret) \
273 DEFINE_WAIT(__wait); \
276 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
279 if (!signal_pending(current)) { \
280 ret = schedule_timeout(ret); \
285 ret = -ERESTARTSYS; \
288 finish_wait(&wq, &__wait); \
292 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
293 * @wq: the waitqueue to wait on
294 * @condition: a C expression for the event to wait for
295 * @timeout: timeout, in jiffies
297 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
298 * @condition evaluates to true or a signal is received.
299 * The @condition is checked each time the waitqueue @wq is woken up.
301 * wake_up() has to be called after changing any variable that could
302 * change the result of the wait condition.
304 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
305 * was interrupted by a signal, and the remaining jiffies otherwise
306 * if the condition evaluated to true before the timeout elapsed.
308 #define wait_event_interruptible_timeout(wq, condition, timeout) \
310 long __ret = timeout; \
312 __wait_event_interruptible_timeout(wq, condition, __ret); \
316 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
318 DEFINE_WAIT(__wait); \
321 prepare_to_wait_exclusive(&wq, &__wait, \
322 TASK_INTERRUPTIBLE); \
325 if (!signal_pending(current)) { \
329 ret = -ERESTARTSYS; \
332 finish_wait(&wq, &__wait); \
335 #define wait_event_interruptible_exclusive(wq, condition) \
339 __wait_event_interruptible_exclusive(wq, condition, __ret);\
344 * Must be called with the spinlock in the wait_queue_head_t held.
346 static inline void add_wait_queue_exclusive_locked(wait_queue_head_t
*q
,
349 wait
->flags
|= WQ_FLAG_EXCLUSIVE
;
350 __add_wait_queue_tail(q
, wait
);
354 * Must be called with the spinlock in the wait_queue_head_t held.
356 static inline void remove_wait_queue_locked(wait_queue_head_t
*q
,
359 __remove_wait_queue(q
, wait
);
363 * These are the old interfaces to sleep waiting for an event.
364 * They are racy. DO NOT use them, use the wait_event* interfaces above.
365 * We plan to remove these interfaces during 2.7.
367 extern void FASTCALL(sleep_on(wait_queue_head_t
*q
));
368 extern long FASTCALL(sleep_on_timeout(wait_queue_head_t
*q
,
369 signed long timeout
));
370 extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t
*q
));
371 extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t
*q
,
372 signed long timeout
));
375 * Waitqueues which are removed from the waitqueue_head at wakeup time
377 void FASTCALL(prepare_to_wait(wait_queue_head_t
*q
,
378 wait_queue_t
*wait
, int state
));
379 void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t
*q
,
380 wait_queue_t
*wait
, int state
));
381 void FASTCALL(finish_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
));
382 int autoremove_wake_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
383 int wake_bit_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
385 #define DEFINE_WAIT(name) \
386 wait_queue_t name = { \
388 .func = autoremove_wake_function, \
389 .task_list = { .next = &(name).task_list, \
390 .prev = &(name).task_list, \
394 #define DEFINE_WAIT_BIT(name, word, bit) \
395 struct wait_bit_queue name = { \
396 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
399 .func = wake_bit_function, \
401 LIST_HEAD_INIT((name).wait.task_list), \
405 #define init_wait(wait) \
407 (wait)->task = current; \
408 (wait)->func = autoremove_wake_function; \
409 INIT_LIST_HEAD(&(wait)->task_list); \
413 * wait_on_bit - wait for a bit to be cleared
414 * @word: the word being waited on, a kernel virtual address
415 * @bit: the bit of the word being waited on
416 * @action: the function used to sleep, which may take special actions
417 * @mode: the task state to sleep in
419 * There is a standard hashed waitqueue table for generic use. This
420 * is the part of the hashtable's accessor API that waits on a bit.
421 * For instance, if one were to have waiters on a bitflag, one would
422 * call wait_on_bit() in threads waiting for the bit to clear.
423 * One uses wait_on_bit() where one is waiting for the bit to clear,
424 * but has no intention of setting it.
426 static inline int wait_on_bit(void *word
, int bit
,
427 int (*action
)(void *), unsigned mode
)
429 if (!test_bit(bit
, word
))
431 return out_of_line_wait_on_bit(word
, bit
, action
, mode
);
435 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
436 * @word: the word being waited on, a kernel virtual address
437 * @bit: the bit of the word being waited on
438 * @action: the function used to sleep, which may take special actions
439 * @mode: the task state to sleep in
441 * There is a standard hashed waitqueue table for generic use. This
442 * is the part of the hashtable's accessor API that waits on a bit
443 * when one intends to set it, for instance, trying to lock bitflags.
444 * For instance, if one were to have waiters trying to set bitflag
445 * and waiting for it to clear before setting it, one would call
446 * wait_on_bit() in threads waiting to be able to set the bit.
447 * One uses wait_on_bit_lock() where one is waiting for the bit to
448 * clear with the intention of setting it, and when done, clearing it.
450 static inline int wait_on_bit_lock(void *word
, int bit
,
451 int (*action
)(void *), unsigned mode
)
453 if (!test_and_set_bit(bit
, word
))
455 return out_of_line_wait_on_bit_lock(word
, bit
, action
, mode
);
458 #endif /* __KERNEL__ */