1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLOSURE_H
3 #define _LINUX_CLOSURE_H
5 #include <linux/llist.h>
6 #include <linux/sched.h>
7 #include <linux/sched/task_stack.h>
8 #include <linux/workqueue.h>
11 * Closure is perhaps the most overused and abused term in computer science, but
12 * since I've been unable to come up with anything better you're stuck with it
17 * They embed a refcount. The basic idea is they count "things that are in
18 * progress" - in flight bios, some other thread that's doing something else -
19 * anything you might want to wait on.
21 * The refcount may be manipulated with closure_get() and closure_put().
22 * closure_put() is where many of the interesting things happen, when it causes
23 * the refcount to go to 0.
25 * Closures can be used to wait on things both synchronously and asynchronously,
26 * and synchronous and asynchronous use can be mixed without restriction. To
27 * wait synchronously, use closure_sync() - you will sleep until your closure's
30 * To wait asynchronously, use
31 * continue_at(cl, next_function, workqueue);
33 * passing it, as you might expect, the function to run when nothing is pending
34 * and the workqueue to run that function out of.
36 * continue_at() also, critically, requires a 'return' immediately following the
37 * location where this macro is referenced, to return to the calling function.
38 * There's good reason for this.
40 * To use safely closures asynchronously, they must always have a refcount while
41 * they are running owned by the thread that is running them. Otherwise, suppose
42 * you submit some bios and wish to have a function run when they all complete:
44 * foo_endio(struct bio *bio)
53 * bio1->bi_endio = foo_endio;
58 * bio2->bi_endio = foo_endio;
61 * continue_at(cl, complete_some_read, system_wq);
63 * If closure's refcount started at 0, complete_some_read() could run before the
64 * second bio was submitted - which is almost always not what you want! More
65 * importantly, it wouldn't be possible to say whether the original thread or
66 * complete_some_read()'s thread owned the closure - and whatever state it was
69 * So, closure_init() initializes a closure's refcount to 1 - and when a
70 * closure_fn is run, the refcount will be reset to 1 first.
72 * Then, the rule is - if you got the refcount with closure_get(), release it
73 * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
74 * on a closure because you called closure_init() or you were run out of a
75 * closure - _always_ use continue_at(). Doing so consistently will help
76 * eliminate an entire class of particularly pernicious races.
78 * Lastly, you might have a wait list dedicated to a specific event, and have no
79 * need for specifying the condition - you just want to wait until someone runs
80 * closure_wake_up() on the appropriate wait list. In that case, just use
81 * closure_wait(). It will return either true or false, depending on whether the
82 * closure was already on a wait list or not - a closure can only be on one wait
87 * closure_init() takes two arguments - it takes the closure to initialize, and
88 * a (possibly null) parent.
90 * If parent is non null, the new closure will have a refcount for its lifetime;
91 * a closure is considered to be "finished" when its refcount hits 0 and the
92 * function to run is null. Hence
94 * continue_at(cl, NULL, NULL);
96 * returns up the (spaghetti) stack of closures, precisely like normal return
97 * returns up the C stack. continue_at() with non null fn is better thought of
98 * as doing a tail call.
100 * All this implies that a closure should typically be embedded in a particular
101 * struct (which its refcount will normally control the lifetime of), and that
102 * struct can very much be thought of as a stack frame.
106 struct closure_syncer
;
107 typedef void (closure_fn
) (struct work_struct
*);
108 extern struct dentry
*bcache_debug
;
110 struct closure_waitlist
{
111 struct llist_head list
;
116 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
117 * the thread that owns the closure, and cleared by the thread that's
118 * waking up the closure.
120 * The rest are for debugging and don't affect behaviour:
122 * CLOSURE_RUNNING: Set when a closure is running (i.e. by
123 * closure_init() and when closure_put() runs then next function), and
124 * must be cleared before remaining hits 0. Primarily to help guard
125 * against incorrect usage and accidentally transferring references.
126 * continue_at() and closure_return() clear it for you, if you're doing
127 * something unusual you can use closure_set_dead() which also helps
128 * annotate where references are being transferred.
131 CLOSURE_BITS_START
= (1U << 26),
132 CLOSURE_DESTRUCTOR
= (1U << 26),
133 CLOSURE_WAITING
= (1U << 28),
134 CLOSURE_RUNNING
= (1U << 30),
137 #define CLOSURE_GUARD_MASK \
138 ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
140 #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
141 #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
146 struct workqueue_struct
*wq
;
147 struct closure_syncer
*s
;
148 struct llist_node list
;
151 struct work_struct work
;
154 struct closure
*parent
;
157 bool closure_get_happened
;
159 #ifdef CONFIG_DEBUG_CLOSURES
160 #define CLOSURE_MAGIC_DEAD 0xc054dead
161 #define CLOSURE_MAGIC_ALIVE 0xc054a11e
162 #define CLOSURE_MAGIC_STACK 0xc05451cc
165 struct list_head all
;
167 unsigned long waiting_on
;
171 void closure_sub(struct closure
*cl
, int v
);
172 void closure_put(struct closure
*cl
);
173 void __closure_wake_up(struct closure_waitlist
*list
);
174 bool closure_wait(struct closure_waitlist
*list
, struct closure
*cl
);
175 void __closure_sync(struct closure
*cl
);
177 static inline unsigned closure_nr_remaining(struct closure
*cl
)
179 return atomic_read(&cl
->remaining
) & CLOSURE_REMAINING_MASK
;
183 * closure_sync - sleep until a closure a closure has nothing left to wait on
185 * Sleeps until the refcount hits 1 - the thread that's running the closure owns
188 static inline void closure_sync(struct closure
*cl
)
190 #ifdef CONFIG_DEBUG_CLOSURES
191 BUG_ON(closure_nr_remaining(cl
) != 1 && !cl
->closure_get_happened
);
194 if (cl
->closure_get_happened
)
198 int __closure_sync_timeout(struct closure
*cl
, unsigned long timeout
);
200 static inline int closure_sync_timeout(struct closure
*cl
, unsigned long timeout
)
202 #ifdef CONFIG_DEBUG_CLOSURES
203 BUG_ON(closure_nr_remaining(cl
) != 1 && !cl
->closure_get_happened
);
205 return cl
->closure_get_happened
206 ? __closure_sync_timeout(cl
, timeout
)
210 #ifdef CONFIG_DEBUG_CLOSURES
212 void closure_debug_create(struct closure
*cl
);
213 void closure_debug_destroy(struct closure
*cl
);
217 static inline void closure_debug_create(struct closure
*cl
) {}
218 static inline void closure_debug_destroy(struct closure
*cl
) {}
222 static inline void closure_set_ip(struct closure
*cl
)
224 #ifdef CONFIG_DEBUG_CLOSURES
229 static inline void closure_set_ret_ip(struct closure
*cl
)
231 #ifdef CONFIG_DEBUG_CLOSURES
236 static inline void closure_set_waiting(struct closure
*cl
, unsigned long f
)
238 #ifdef CONFIG_DEBUG_CLOSURES
243 static inline void closure_set_stopped(struct closure
*cl
)
245 atomic_sub(CLOSURE_RUNNING
, &cl
->remaining
);
248 static inline void set_closure_fn(struct closure
*cl
, closure_fn
*fn
,
249 struct workqueue_struct
*wq
)
256 static inline void closure_queue(struct closure
*cl
)
258 struct workqueue_struct
*wq
= cl
->wq
;
260 * Changes made to closure, work_struct, or a couple of other structs
261 * may cause work.func not pointing to the right location.
263 BUILD_BUG_ON(offsetof(struct closure
, fn
)
264 != offsetof(struct work_struct
, func
));
267 INIT_WORK(&cl
->work
, cl
->work
.func
);
268 BUG_ON(!queue_work(wq
, &cl
->work
));
274 * closure_get - increment a closure's refcount
276 static inline void closure_get(struct closure
*cl
)
278 cl
->closure_get_happened
= true;
280 #ifdef CONFIG_DEBUG_CLOSURES
281 BUG_ON((atomic_inc_return(&cl
->remaining
) &
282 CLOSURE_REMAINING_MASK
) <= 1);
284 atomic_inc(&cl
->remaining
);
289 * closure_get_not_zero
291 static inline bool closure_get_not_zero(struct closure
*cl
)
293 unsigned old
= atomic_read(&cl
->remaining
);
295 if (!(old
& CLOSURE_REMAINING_MASK
))
298 } while (!atomic_try_cmpxchg_acquire(&cl
->remaining
, &old
, old
+ 1));
304 * closure_init - Initialize a closure, setting the refcount to 1
305 * @cl: closure to initialize
306 * @parent: parent of the new closure. cl will take a refcount on it for its
307 * lifetime; may be NULL.
309 static inline void closure_init(struct closure
*cl
, struct closure
*parent
)
316 atomic_set(&cl
->remaining
, CLOSURE_REMAINING_INITIALIZER
);
317 cl
->closure_get_happened
= false;
319 closure_debug_create(cl
);
323 static inline void closure_init_stack(struct closure
*cl
)
325 memset(cl
, 0, sizeof(struct closure
));
326 atomic_set(&cl
->remaining
, CLOSURE_REMAINING_INITIALIZER
);
327 #ifdef CONFIG_DEBUG_CLOSURES
328 cl
->magic
= CLOSURE_MAGIC_STACK
;
332 static inline void closure_init_stack_release(struct closure
*cl
)
334 memset(cl
, 0, sizeof(struct closure
));
335 atomic_set_release(&cl
->remaining
, CLOSURE_REMAINING_INITIALIZER
);
336 #ifdef CONFIG_DEBUG_CLOSURES
337 cl
->magic
= CLOSURE_MAGIC_STACK
;
342 * closure_wake_up - wake up all closures on a wait list,
343 * with memory barrier
345 static inline void closure_wake_up(struct closure_waitlist
*list
)
347 /* Memory barrier for the wait list */
349 __closure_wake_up(list
);
352 #define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
353 #define closure_type(name, type, member) \
354 struct closure *cl = container_of(ws, struct closure, work); \
355 type *name = container_of(cl, type, member)
358 * continue_at - jump to another function with barrier
360 * After @cl is no longer waiting on anything (i.e. all outstanding refs have
361 * been dropped with closure_put()), it will resume execution at @fn running out
362 * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
364 * This is because after calling continue_at() you no longer have a ref on @cl,
365 * and whatever @cl owns may be freed out from under you - a running closure fn
366 * has a ref on its own closure which continue_at() drops.
368 * Note you are expected to immediately return after using this macro.
370 #define continue_at(_cl, _fn, _wq) \
372 set_closure_fn(_cl, _fn, _wq); \
373 closure_sub(_cl, CLOSURE_RUNNING + 1); \
377 * closure_return - finish execution of a closure
379 * This is used to indicate that @cl is finished: when all outstanding refs on
380 * @cl have been dropped @cl's ref on its parent closure (as passed to
381 * closure_init()) will be dropped, if one was specified - thus this can be
382 * thought of as returning to the parent closure.
384 #define closure_return(_cl) continue_at((_cl), NULL, NULL)
386 void closure_return_sync(struct closure
*cl
);
389 * continue_at_nobarrier - jump to another function without barrier
391 * Causes @fn to be executed out of @cl, in @wq context (or called directly if
394 * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
395 * thus it's not safe to touch anything protected by @cl after a
396 * continue_at_nobarrier().
398 #define continue_at_nobarrier(_cl, _fn, _wq) \
400 set_closure_fn(_cl, _fn, _wq); \
401 closure_queue(_cl); \
405 * closure_return_with_destructor - finish execution of a closure,
408 * Works like closure_return(), except @destructor will be called when all
409 * outstanding refs on @cl have been dropped; @destructor may be used to safely
410 * free the memory occupied by @cl, and it is called with the ref on the parent
411 * closure still held - so @destructor could safely return an item to a
412 * freelist protected by @cl's parent.
414 #define closure_return_with_destructor(_cl, _destructor) \
416 set_closure_fn(_cl, _destructor, NULL); \
417 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
421 * closure_call - execute @fn out of a new, uninitialized closure
423 * Typically used when running out of one closure, and we want to run @fn
424 * asynchronously out of a new closure - @parent will then wait for @cl to
427 static inline void closure_call(struct closure
*cl
, closure_fn fn
,
428 struct workqueue_struct
*wq
,
429 struct closure
*parent
)
431 closure_init(cl
, parent
);
432 continue_at_nobarrier(cl
, fn
, wq
);
435 #define __closure_wait_event(waitlist, _cond) \
439 closure_init_stack(&cl); \
442 closure_wait(waitlist, &cl); \
447 closure_wake_up(waitlist); \
451 #define closure_wait_event(waitlist, _cond) \
454 __closure_wait_event(waitlist, _cond); \
457 #define __closure_wait_event_timeout(waitlist, _cond, _until) \
462 closure_init_stack(&cl); \
465 closure_wait(waitlist, &cl); \
467 _t = max_t(long, 1L, _until - jiffies); \
470 _t = max_t(long, 0L, _until - jiffies); \
473 closure_sync_timeout(&cl, _t); \
475 closure_wake_up(waitlist); \
481 * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
482 * condition became true
484 #define closure_wait_event_timeout(waitlist, _cond, _timeout) \
486 unsigned long _until = jiffies + _timeout; \
488 ? max_t(long, 1L, _until - jiffies) \
489 : __closure_wait_event_timeout(waitlist, _cond, _until);\
492 #endif /* _LINUX_CLOSURE_H */