printf: Remove unused 'bprintf'
[drm/drm-misc.git] / include / linux / rcu_segcblist.h
blob2fdc2208f1ca3a51bc2567413cdddaaf1ec03b65
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * RCU segmented callback lists
5 * This seemingly RCU-private file must be available to SRCU users
6 * because the size of the TREE SRCU srcu_struct structure depends
7 * on these definitions.
9 * Copyright IBM Corporation, 2017
11 * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com>
14 #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
15 #define __INCLUDE_LINUX_RCU_SEGCBLIST_H
17 #include <linux/types.h>
18 #include <linux/atomic.h>
20 /* Simple unsegmented callback lists. */
21 struct rcu_cblist {
22 struct rcu_head *head;
23 struct rcu_head **tail;
24 long len;
27 #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
29 /* Complicated segmented callback lists. ;-) */
32 * Index values for segments in rcu_segcblist structure.
34 * The segments are as follows:
36 * [head, *tails[RCU_DONE_TAIL]):
37 * Callbacks whose grace period has elapsed, and thus can be invoked.
38 * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
39 * Callbacks waiting for the current GP from the current CPU's viewpoint.
40 * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
41 * Callbacks that arrived before the next GP started, again from
42 * the current CPU's viewpoint. These can be handled by the next GP.
43 * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
44 * Callbacks that might have arrived after the next GP started.
45 * There is some uncertainty as to when a given GP starts and
46 * ends, but a CPU knows the exact times if it is the one starting
47 * or ending the GP. Other CPUs know that the previous GP ends
48 * before the next one starts.
50 * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
51 * empty.
53 * The ->gp_seq[] array contains the grace-period number at which the
54 * corresponding segment of callbacks will be ready to invoke. A given
55 * element of this array is meaningful only when the corresponding segment
56 * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
57 * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
58 * not yet been assigned a grace-period number).
60 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
61 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
62 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
63 #define RCU_NEXT_TAIL 3
64 #define RCU_CBLIST_NSEGS 4
68 * ==NOCB Offloading state machine==
71 * ----------------------------------------------------------------------------
72 * | SEGCBLIST_RCU_CORE |
73 * | |
74 * | Callbacks processed by rcu_core() from softirqs or local |
75 * | rcuc kthread, without holding nocb_lock. |
76 * ----------------------------------------------------------------------------
77 * |
78 * v
79 * ----------------------------------------------------------------------------
80 * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
81 * | |
82 * | Callbacks processed by rcu_core() from softirqs or local |
83 * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
84 * ----------------------------------------------------------------------------
85 * |
86 * v
87 * ----------------------------------------------------------------------------
88 * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
89 * | + unparked CB kthread |
90 * | |
91 * | CB kthread got unparked and processes callbacks concurrently with |
92 * | rcu_core(), holding nocb_lock. |
93 * ---------------------------------------------------------------------------
94 * |
95 * v
96 * ---------------------------------------------------------------------------|
97 * | SEGCBLIST_RCU_CORE | |
98 * | SEGCBLIST_LOCKING | |
99 * | SEGCBLIST_OFFLOADED | |
100 * | SEGCBLIST_KTHREAD_GP |
101 * | + unparked CB kthread |
102 * | |
103 * | GP kthread woke up and acknowledged nocb_lock. |
104 * ---------------------------------------- -----------------------------------
107 * |--------------------------------------------------------------------------|
108 * | SEGCBLIST_LOCKING | |
109 * | SEGCBLIST_OFFLOADED | |
110 * | SEGCBLIST_KTHREAD_GP | |
111 * | + unparked CB kthread |
112 * | |
113 * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
114 * | handling callbacks. Enable bypass queueing. |
115 * ----------------------------------------------------------------------------
121 * ==NOCB De-Offloading state machine==
124 * |--------------------------------------------------------------------------|
125 * | SEGCBLIST_LOCKING | |
126 * | SEGCBLIST_OFFLOADED | |
127 * | SEGCBLIST_KTHREAD_GP |
128 * | + unparked CB kthread |
129 * | |
130 * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
131 * | ignores callbacks. Bypass enqueue is enabled. |
132 * ----------------------------------------------------------------------------
135 * |--------------------------------------------------------------------------|
136 * | SEGCBLIST_RCU_CORE | |
137 * | SEGCBLIST_LOCKING | |
138 * | SEGCBLIST_OFFLOADED | |
139 * | SEGCBLIST_KTHREAD_GP |
140 * | + unparked CB kthread |
141 * | |
142 * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
143 * | handles callbacks concurrently. Bypass enqueue is disabled. |
144 * | Invoke RCU core so we make sure not to preempt it in the middle with |
145 * | leaving some urgent work unattended within a jiffy. |
146 * ----------------------------------------------------------------------------
149 * |--------------------------------------------------------------------------|
150 * | SEGCBLIST_RCU_CORE | |
151 * | SEGCBLIST_LOCKING | |
152 * | SEGCBLIST_KTHREAD_GP |
153 * | + unparked CB kthread |
154 * | |
155 * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
156 * | holding nocb_lock. Wake up GP kthread if necessary. |
157 * ----------------------------------------------------------------------------
160 * |--------------------------------------------------------------------------|
161 * | SEGCBLIST_RCU_CORE | |
162 * | SEGCBLIST_LOCKING | |
163 * | + unparked CB kthread |
164 * | |
165 * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
166 * | got cleared. The callbacks from the target CPU will be ignored from the|
167 * | GP kthread loop. |
168 * ----------------------------------------------------------------------------
171 * ----------------------------------------------------------------------------
172 * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
173 * | + parked CB kthread |
174 * | |
175 * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
176 * | local rcuc kthread, while holding nocb_lock. |
177 * ----------------------------------------------------------------------------
180 * ----------------------------------------------------------------------------
181 * | SEGCBLIST_RCU_CORE |
182 * | |
183 * | Callbacks processed by rcu_core() from softirqs or local |
184 * | rcuc kthread, without holding nocb_lock. |
185 * ----------------------------------------------------------------------------
187 #define SEGCBLIST_ENABLED BIT(0)
188 #define SEGCBLIST_OFFLOADED BIT(1)
190 struct rcu_segcblist {
191 struct rcu_head *head;
192 struct rcu_head **tails[RCU_CBLIST_NSEGS];
193 unsigned long gp_seq[RCU_CBLIST_NSEGS];
194 #ifdef CONFIG_RCU_NOCB_CPU
195 atomic_long_t len;
196 #else
197 long len;
198 #endif
199 long seglen[RCU_CBLIST_NSEGS];
200 u8 flags;
203 #define RCU_SEGCBLIST_INITIALIZER(n) \
205 .head = NULL, \
206 .tails[RCU_DONE_TAIL] = &n.head, \
207 .tails[RCU_WAIT_TAIL] = &n.head, \
208 .tails[RCU_NEXT_READY_TAIL] = &n.head, \
209 .tails[RCU_NEXT_TAIL] = &n.head, \
212 #endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */