2 * RCU segmented callback lists, function definitions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2017
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/interrupt.h>
26 #include <linux/rcupdate.h>
28 #include "rcu_segcblist.h"
30 /* Initialize simple callback list. */
31 void rcu_cblist_init(struct rcu_cblist
*rclp
)
34 rclp
->tail
= &rclp
->head
;
40 * Dequeue the oldest rcu_head structure from the specified callback
41 * list. This function assumes that the callback is non-lazy, but
42 * the caller can later invoke rcu_cblist_dequeued_lazy() if it
43 * finds otherwise (and if it cares about laziness). This allows
44 * different users to have different ways of determining laziness.
46 struct rcu_head
*rcu_cblist_dequeue(struct rcu_cblist
*rclp
)
54 rclp
->head
= rhp
->next
;
56 rclp
->tail
= &rclp
->head
;
61 * Initialize an rcu_segcblist structure.
63 void rcu_segcblist_init(struct rcu_segcblist
*rsclp
)
67 BUILD_BUG_ON(RCU_NEXT_TAIL
+ 1 != ARRAY_SIZE(rsclp
->gp_seq
));
68 BUILD_BUG_ON(ARRAY_SIZE(rsclp
->tails
) != ARRAY_SIZE(rsclp
->gp_seq
));
70 for (i
= 0; i
< RCU_CBLIST_NSEGS
; i
++)
71 rsclp
->tails
[i
] = &rsclp
->head
;
77 * Disable the specified rcu_segcblist structure, so that callbacks can
78 * no longer be posted to it. This structure must be empty.
80 void rcu_segcblist_disable(struct rcu_segcblist
*rsclp
)
82 WARN_ON_ONCE(!rcu_segcblist_empty(rsclp
));
83 WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp
));
84 WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp
));
85 rsclp
->tails
[RCU_NEXT_TAIL
] = NULL
;
89 * Does the specified rcu_segcblist structure contain callbacks that
90 * are ready to be invoked?
92 bool rcu_segcblist_ready_cbs(struct rcu_segcblist
*rsclp
)
94 return rcu_segcblist_is_enabled(rsclp
) &&
95 &rsclp
->head
!= rsclp
->tails
[RCU_DONE_TAIL
];
99 * Does the specified rcu_segcblist structure contain callbacks that
100 * are still pending, that is, not yet ready to be invoked?
102 bool rcu_segcblist_pend_cbs(struct rcu_segcblist
*rsclp
)
104 return rcu_segcblist_is_enabled(rsclp
) &&
105 !rcu_segcblist_restempty(rsclp
, RCU_DONE_TAIL
);
109 * Return a pointer to the first callback in the specified rcu_segcblist
110 * structure. This is useful for diagnostics.
112 struct rcu_head
*rcu_segcblist_first_cb(struct rcu_segcblist
*rsclp
)
114 if (rcu_segcblist_is_enabled(rsclp
))
120 * Return a pointer to the first pending callback in the specified
121 * rcu_segcblist structure. This is useful just after posting a given
122 * callback -- if that callback is the first pending callback, then
123 * you cannot rely on someone else having already started up the required
126 struct rcu_head
*rcu_segcblist_first_pend_cb(struct rcu_segcblist
*rsclp
)
128 if (rcu_segcblist_is_enabled(rsclp
))
129 return *rsclp
->tails
[RCU_DONE_TAIL
];
134 * Enqueue the specified callback onto the specified rcu_segcblist
135 * structure, updating accounting as needed. Note that the ->len
136 * field may be accessed locklessly, hence the WRITE_ONCE().
137 * The ->len field is used by rcu_barrier() and friends to determine
138 * if it must post a callback on this structure, and it is OK
139 * for rcu_barrier() to sometimes post callbacks needlessly, but
140 * absolutely not OK for it to ever miss posting a callback.
142 void rcu_segcblist_enqueue(struct rcu_segcblist
*rsclp
,
143 struct rcu_head
*rhp
, bool lazy
)
145 WRITE_ONCE(rsclp
->len
, rsclp
->len
+ 1); /* ->len sampled locklessly. */
148 smp_mb(); /* Ensure counts are updated before callback is enqueued. */
150 *rsclp
->tails
[RCU_NEXT_TAIL
] = rhp
;
151 rsclp
->tails
[RCU_NEXT_TAIL
] = &rhp
->next
;
155 * Entrain the specified callback onto the specified rcu_segcblist at
156 * the end of the last non-empty segment. If the entire rcu_segcblist
157 * is empty, make no change, but return false.
159 * This is intended for use by rcu_barrier()-like primitives, -not-
160 * for normal grace-period use. IMPORTANT: The callback you enqueue
161 * will wait for all prior callbacks, NOT necessarily for a grace
162 * period. You have been warned.
164 bool rcu_segcblist_entrain(struct rcu_segcblist
*rsclp
,
165 struct rcu_head
*rhp
, bool lazy
)
169 if (rcu_segcblist_n_cbs(rsclp
) == 0)
171 WRITE_ONCE(rsclp
->len
, rsclp
->len
+ 1);
174 smp_mb(); /* Ensure counts are updated before callback is entrained. */
176 for (i
= RCU_NEXT_TAIL
; i
> RCU_DONE_TAIL
; i
--)
177 if (rsclp
->tails
[i
] != rsclp
->tails
[i
- 1])
179 *rsclp
->tails
[i
] = rhp
;
180 for (; i
<= RCU_NEXT_TAIL
; i
++)
181 rsclp
->tails
[i
] = &rhp
->next
;
186 * Extract only the counts from the specified rcu_segcblist structure,
187 * and place them in the specified rcu_cblist structure. This function
188 * supports both callback orphaning and invocation, hence the separation
189 * of counts and callbacks. (Callbacks ready for invocation must be
190 * orphaned and adopted separately from pending callbacks, but counts
191 * apply to all callbacks. Locking must be used to make sure that
192 * both orphaned-callbacks lists are consistent.)
194 void rcu_segcblist_extract_count(struct rcu_segcblist
*rsclp
,
195 struct rcu_cblist
*rclp
)
197 rclp
->len_lazy
+= rsclp
->len_lazy
;
198 rclp
->len
+= rsclp
->len
;
200 WRITE_ONCE(rsclp
->len
, 0); /* ->len sampled locklessly. */
204 * Extract only those callbacks ready to be invoked from the specified
205 * rcu_segcblist structure and place them in the specified rcu_cblist
208 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist
*rsclp
,
209 struct rcu_cblist
*rclp
)
213 if (!rcu_segcblist_ready_cbs(rsclp
))
214 return; /* Nothing to do. */
215 *rclp
->tail
= rsclp
->head
;
216 rsclp
->head
= *rsclp
->tails
[RCU_DONE_TAIL
];
217 *rsclp
->tails
[RCU_DONE_TAIL
] = NULL
;
218 rclp
->tail
= rsclp
->tails
[RCU_DONE_TAIL
];
219 for (i
= RCU_CBLIST_NSEGS
- 1; i
>= RCU_DONE_TAIL
; i
--)
220 if (rsclp
->tails
[i
] == rsclp
->tails
[RCU_DONE_TAIL
])
221 rsclp
->tails
[i
] = &rsclp
->head
;
225 * Extract only those callbacks still pending (not yet ready to be
226 * invoked) from the specified rcu_segcblist structure and place them in
227 * the specified rcu_cblist structure. Note that this loses information
228 * about any callbacks that might have been partway done waiting for
229 * their grace period. Too bad! They will have to start over.
231 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist
*rsclp
,
232 struct rcu_cblist
*rclp
)
236 if (!rcu_segcblist_pend_cbs(rsclp
))
237 return; /* Nothing to do. */
238 *rclp
->tail
= *rsclp
->tails
[RCU_DONE_TAIL
];
239 rclp
->tail
= rsclp
->tails
[RCU_NEXT_TAIL
];
240 *rsclp
->tails
[RCU_DONE_TAIL
] = NULL
;
241 for (i
= RCU_DONE_TAIL
+ 1; i
< RCU_CBLIST_NSEGS
; i
++)
242 rsclp
->tails
[i
] = rsclp
->tails
[RCU_DONE_TAIL
];
246 * Insert counts from the specified rcu_cblist structure in the
247 * specified rcu_segcblist structure.
249 void rcu_segcblist_insert_count(struct rcu_segcblist
*rsclp
,
250 struct rcu_cblist
*rclp
)
252 rsclp
->len_lazy
+= rclp
->len_lazy
;
253 /* ->len sampled locklessly. */
254 WRITE_ONCE(rsclp
->len
, rsclp
->len
+ rclp
->len
);
260 * Move callbacks from the specified rcu_cblist to the beginning of the
261 * done-callbacks segment of the specified rcu_segcblist.
263 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist
*rsclp
,
264 struct rcu_cblist
*rclp
)
269 return; /* No callbacks to move. */
270 *rclp
->tail
= rsclp
->head
;
271 rsclp
->head
= rclp
->head
;
272 for (i
= RCU_DONE_TAIL
; i
< RCU_CBLIST_NSEGS
; i
++)
273 if (&rsclp
->head
== rsclp
->tails
[i
])
274 rsclp
->tails
[i
] = rclp
->tail
;
278 rclp
->tail
= &rclp
->head
;
282 * Move callbacks from the specified rcu_cblist to the end of the
283 * new-callbacks segment of the specified rcu_segcblist.
285 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist
*rsclp
,
286 struct rcu_cblist
*rclp
)
289 return; /* Nothing to do. */
290 *rsclp
->tails
[RCU_NEXT_TAIL
] = rclp
->head
;
291 rsclp
->tails
[RCU_NEXT_TAIL
] = rclp
->tail
;
293 rclp
->tail
= &rclp
->head
;
297 * Advance the callbacks in the specified rcu_segcblist structure based
298 * on the current value passed in for the grace-period counter.
300 void rcu_segcblist_advance(struct rcu_segcblist
*rsclp
, unsigned long seq
)
304 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp
));
305 if (rcu_segcblist_restempty(rsclp
, RCU_DONE_TAIL
))
309 * Find all callbacks whose ->gp_seq numbers indicate that they
310 * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
312 for (i
= RCU_WAIT_TAIL
; i
< RCU_NEXT_TAIL
; i
++) {
313 if (ULONG_CMP_LT(seq
, rsclp
->gp_seq
[i
]))
315 rsclp
->tails
[RCU_DONE_TAIL
] = rsclp
->tails
[i
];
318 /* If no callbacks moved, nothing more need be done. */
319 if (i
== RCU_WAIT_TAIL
)
322 /* Clean up tail pointers that might have been misordered above. */
323 for (j
= RCU_WAIT_TAIL
; j
< i
; j
++)
324 rsclp
->tails
[j
] = rsclp
->tails
[RCU_DONE_TAIL
];
327 * Callbacks moved, so clean up the misordered ->tails[] pointers
328 * that now point into the middle of the list of ready-to-invoke
329 * callbacks. The overall effect is to copy down the later pointers
330 * into the gap that was created by the now-ready segments.
332 for (j
= RCU_WAIT_TAIL
; i
< RCU_NEXT_TAIL
; i
++, j
++) {
333 if (rsclp
->tails
[j
] == rsclp
->tails
[RCU_NEXT_TAIL
])
334 break; /* No more callbacks. */
335 rsclp
->tails
[j
] = rsclp
->tails
[i
];
336 rsclp
->gp_seq
[j
] = rsclp
->gp_seq
[i
];
341 * "Accelerate" callbacks based on more-accurate grace-period information.
342 * The reason for this is that RCU does not synchronize the beginnings and
343 * ends of grace periods, and that callbacks are posted locally. This in
344 * turn means that the callbacks must be labelled conservatively early
345 * on, as getting exact information would degrade both performance and
346 * scalability. When more accurate grace-period information becomes
347 * available, previously posted callbacks can be "accelerated", marking
348 * them to complete at the end of the earlier grace period.
350 * This function operates on an rcu_segcblist structure, and also the
351 * grace-period sequence number seq at which new callbacks would become
352 * ready to invoke. Returns true if there are callbacks that won't be
353 * ready to invoke until seq, false otherwise.
355 bool rcu_segcblist_accelerate(struct rcu_segcblist
*rsclp
, unsigned long seq
)
359 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp
));
360 if (rcu_segcblist_restempty(rsclp
, RCU_DONE_TAIL
))
364 * Find the segment preceding the oldest segment of callbacks
365 * whose ->gp_seq[] completion is at or after that passed in via
366 * "seq", skipping any empty segments. This oldest segment, along
367 * with any later segments, can be merged in with any newly arrived
368 * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
369 * as their ->gp_seq[] grace-period completion sequence number.
371 for (i
= RCU_NEXT_READY_TAIL
; i
> RCU_DONE_TAIL
; i
--)
372 if (rsclp
->tails
[i
] != rsclp
->tails
[i
- 1] &&
373 ULONG_CMP_LT(rsclp
->gp_seq
[i
], seq
))
377 * If all the segments contain callbacks that correspond to
378 * earlier grace-period sequence numbers than "seq", leave.
379 * Assuming that the rcu_segcblist structure has enough
380 * segments in its arrays, this can only happen if some of
381 * the non-done segments contain callbacks that really are
382 * ready to invoke. This situation will get straightened
383 * out by the next call to rcu_segcblist_advance().
385 * Also advance to the oldest segment of callbacks whose
386 * ->gp_seq[] completion is at or after that passed in via "seq",
387 * skipping any empty segments.
389 if (++i
>= RCU_NEXT_TAIL
)
393 * Merge all later callbacks, including newly arrived callbacks,
394 * into the segment located by the for-loop above. Assign "seq"
395 * as the ->gp_seq[] value in order to correctly handle the case
396 * where there were no pending callbacks in the rcu_segcblist
397 * structure other than in the RCU_NEXT_TAIL segment.
399 for (; i
< RCU_NEXT_TAIL
; i
++) {
400 rsclp
->tails
[i
] = rsclp
->tails
[RCU_NEXT_TAIL
];
401 rsclp
->gp_seq
[i
] = seq
;
407 * Scan the specified rcu_segcblist structure for callbacks that need
408 * a grace period later than the one specified by "seq". We don't look
409 * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't
410 * have a grace-period sequence number.
412 bool rcu_segcblist_future_gp_needed(struct rcu_segcblist
*rsclp
,
417 for (i
= RCU_WAIT_TAIL
; i
< RCU_NEXT_TAIL
; i
++)
418 if (rsclp
->tails
[i
- 1] != rsclp
->tails
[i
] &&
419 ULONG_CMP_LT(seq
, rsclp
->gp_seq
[i
]))
425 * Merge the source rcu_segcblist structure into the destination
426 * rcu_segcblist structure, then initialize the source. Any pending
427 * callbacks from the source get to start over. It is best to
428 * advance and accelerate both the destination and the source
431 void rcu_segcblist_merge(struct rcu_segcblist
*dst_rsclp
,
432 struct rcu_segcblist
*src_rsclp
)
434 struct rcu_cblist donecbs
;
435 struct rcu_cblist pendcbs
;
437 rcu_cblist_init(&donecbs
);
438 rcu_cblist_init(&pendcbs
);
439 rcu_segcblist_extract_count(src_rsclp
, &donecbs
);
440 rcu_segcblist_extract_done_cbs(src_rsclp
, &donecbs
);
441 rcu_segcblist_extract_pend_cbs(src_rsclp
, &pendcbs
);
442 rcu_segcblist_insert_count(dst_rsclp
, &donecbs
);
443 rcu_segcblist_insert_done_cbs(dst_rsclp
, &donecbs
);
444 rcu_segcblist_insert_pend_cbs(dst_rsclp
, &pendcbs
);
445 rcu_segcblist_init(src_rsclp
);