drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / lib / lwq.c
blob57d080a4d53d7078ef3f2dad76b2bb48c4c7c6a3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Light-weight single-linked queue.
5 * Entries are enqueued to the head of an llist, with no blocking.
6 * This can happen in any context.
8 * Entries are dequeued using a spinlock to protect against multiple
9 * access. The llist is staged in reverse order, and refreshed
10 * from the llist when it exhausts.
12 * This is particularly suitable when work items are queued in BH or
13 * IRQ context, and where work items are handled one at a time by
14 * dedicated threads.
16 #include <linux/rcupdate.h>
17 #include <linux/lwq.h>
19 struct llist_node *__lwq_dequeue(struct lwq *q)
21 struct llist_node *this;
23 if (lwq_empty(q))
24 return NULL;
25 spin_lock(&q->lock);
26 this = q->ready;
27 if (!this && !llist_empty(&q->new)) {
28 /* ensure queue doesn't appear transiently lwq_empty */
29 smp_store_release(&q->ready, (void *)1);
30 this = llist_reverse_order(llist_del_all(&q->new));
31 if (!this)
32 q->ready = NULL;
34 if (this)
35 q->ready = llist_next(this);
36 spin_unlock(&q->lock);
37 return this;
39 EXPORT_SYMBOL_GPL(__lwq_dequeue);
41 /**
42 * lwq_dequeue_all - dequeue all currently enqueued objects
43 * @q: the queue to dequeue from
45 * Remove and return a linked list of llist_nodes of all the objects that were
46 * in the queue. The first on the list will be the object that was least
47 * recently enqueued.
49 struct llist_node *lwq_dequeue_all(struct lwq *q)
51 struct llist_node *r, *t, **ep;
53 if (lwq_empty(q))
54 return NULL;
56 spin_lock(&q->lock);
57 r = q->ready;
58 q->ready = NULL;
59 t = llist_del_all(&q->new);
60 spin_unlock(&q->lock);
61 ep = &r;
62 while (*ep)
63 ep = &(*ep)->next;
64 *ep = llist_reverse_order(t);
65 return r;
67 EXPORT_SYMBOL_GPL(lwq_dequeue_all);
69 #if IS_ENABLED(CONFIG_LWQ_TEST)
71 #include <linux/module.h>
72 #include <linux/slab.h>
73 #include <linux/wait_bit.h>
74 #include <linux/kthread.h>
75 #include <linux/delay.h>
76 struct tnode {
77 struct lwq_node n;
78 int i;
79 int c;
82 static int lwq_exercise(void *qv)
84 struct lwq *q = qv;
85 int cnt;
86 struct tnode *t;
88 for (cnt = 0; cnt < 10000; cnt++) {
89 wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
90 t->c++;
91 if (lwq_enqueue(&t->n, q))
92 wake_up_var(q);
94 while (!kthread_should_stop())
95 schedule_timeout_idle(1);
96 return 0;
99 static int lwq_test(void)
101 int i;
102 struct lwq q;
103 struct llist_node *l, **t1, *t2;
104 struct tnode *t;
105 struct task_struct *threads[8];
107 printk(KERN_INFO "testing lwq....\n");
108 lwq_init(&q);
109 printk(KERN_INFO " lwq: run some threads\n");
110 for (i = 0; i < ARRAY_SIZE(threads); i++)
111 threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
112 for (i = 0; i < 100; i++) {
113 t = kmalloc(sizeof(*t), GFP_KERNEL);
114 if (!t)
115 break;
116 t->i = i;
117 t->c = 0;
118 if (lwq_enqueue(&t->n, &q))
119 wake_up_var(&q);
121 /* wait for threads to exit */
122 for (i = 0; i < ARRAY_SIZE(threads); i++)
123 if (!IS_ERR_OR_NULL(threads[i]))
124 kthread_stop(threads[i]);
125 printk(KERN_INFO " lwq: dequeue first 50:");
126 for (i = 0; i < 50 ; i++) {
127 if (i && (i % 10) == 0) {
128 printk(KERN_CONT "\n");
129 printk(KERN_INFO " lwq: ... ");
131 t = lwq_dequeue(&q, struct tnode, n);
132 if (t)
133 printk(KERN_CONT " %d(%d)", t->i, t->c);
134 kfree(t);
136 printk(KERN_CONT "\n");
137 l = lwq_dequeue_all(&q);
138 printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n");
139 lwq_for_each_safe(t, t1, t2, &l, n) {
140 if ((t->i % 3) == 0) {
141 t->i = -1;
142 kfree(t);
143 t = NULL;
146 if (l)
147 lwq_enqueue_batch(l, &q);
148 printk(KERN_INFO " lwq: dequeue remaining:");
149 while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
150 printk(KERN_CONT " %d", t->i);
151 kfree(t);
153 printk(KERN_CONT "\n");
154 return 0;
157 module_init(lwq_test);
158 #endif /* CONFIG_LWQ_TEST*/