1 /* SPDX-License-Identifier: GPL-2.0-only */
6 * Light-weight single-linked queue built from llist
8 * Entries can be enqueued from any context with no locking.
9 * Entries can be dequeued from process context with integrated locking.
11 * This is particularly suitable when work items are queued in
12 * BH or IRQ context, and where work items are handled one at a time
13 * by dedicated threads.
15 #include <linux/container_of.h>
16 #include <linux/spinlock.h>
17 #include <linux/llist.h>
20 struct llist_node node
;
25 struct llist_node
*ready
; /* entries to be dequeued */
26 struct llist_head
new; /* entries being enqueued */
30 * lwq_init - initialise a lwq
33 static inline void lwq_init(struct lwq
*q
)
35 spin_lock_init(&q
->lock
);
37 init_llist_head(&q
->new);
41 * lwq_empty - test if lwq contains any entry
44 * This empty test contains an acquire barrier so that if a wakeup
45 * is sent when lwq_dequeue returns true, it is safe to go to sleep after
46 * a test on lwq_empty().
48 static inline bool lwq_empty(struct lwq
*q
)
50 /* acquire ensures ordering wrt lwq_enqueue() */
51 return smp_load_acquire(&q
->ready
) == NULL
&& llist_empty(&q
->new);
54 struct llist_node
*__lwq_dequeue(struct lwq
*q
);
56 * lwq_dequeue - dequeue first (oldest) entry from lwq
57 * @q: the queue to dequeue from
58 * @type: the type of object to return
59 * @member: them member in returned object which is an lwq_node.
61 * Remove a single object from the lwq and return it. This will take
62 * a spinlock and so must always be called in the same context, typcially
65 #define lwq_dequeue(q, type, member) \
66 ({ struct llist_node *_n = __lwq_dequeue(q); \
67 _n ? container_of(_n, type, member.node) : NULL; })
69 struct llist_node
*lwq_dequeue_all(struct lwq
*q
);
72 * lwq_for_each_safe - iterate over detached queue allowing deletion
73 * @_n: iterator variable
74 * @_t1: temporary struct llist_node **
75 * @_t2: temporary struct llist_node *
76 * @_l: address of llist_node pointer from lwq_dequeue_all()
77 * @_member: member in _n where lwq_node is found.
79 * Iterate over members in a dequeued list. If the iterator variable
80 * is set to NULL, the iterator removes that entry from the queue.
82 #define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
84 *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
85 _t2 = ((*_t1)->next), \
88 (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
89 : ((*(_t1) = (_t2)), 0))
92 * lwq_enqueue - add a new item to the end of the queue
93 * @n - the lwq_node embedded in the item to be added
94 * @q - the lwq to append to.
96 * No locking is needed to append to the queue so this can
97 * be called from any context.
98 * Return %true is the list may have previously been empty.
100 static inline bool lwq_enqueue(struct lwq_node
*n
, struct lwq
*q
)
102 /* acquire enqures ordering wrt lwq_dequeue */
103 return llist_add(&n
->node
, &q
->new) &&
104 smp_load_acquire(&q
->ready
) == NULL
;
108 * lwq_enqueue_batch - add a list of new items to the end of the queue
109 * @n - the lwq_node embedded in the first item to be added
110 * @q - the lwq to append to.
112 * No locking is needed to append to the queue so this can
113 * be called from any context.
114 * Return %true is the list may have previously been empty.
116 static inline bool lwq_enqueue_batch(struct llist_node
*n
, struct lwq
*q
)
118 struct llist_node
*e
= n
;
120 /* acquire enqures ordering wrt lwq_dequeue */
121 return llist_add_batch(llist_reverse_order(n
), e
, &q
->new) &&
122 smp_load_acquire(&q
->ready
) == NULL
;