perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / kernel / rcu / srcutree.c
bloba8846ed7f352986474b92286137c6eda486deb73
1 /*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #define pr_fmt(fmt) "rcu: " fmt
31 #include <linux/export.h>
32 #include <linux/mutex.h>
33 #include <linux/percpu.h>
34 #include <linux/preempt.h>
35 #include <linux/rcupdate_wait.h>
36 #include <linux/sched.h>
37 #include <linux/smp.h>
38 #include <linux/delay.h>
39 #include <linux/module.h>
40 #include <linux/srcu.h>
42 #include "rcu.h"
43 #include "rcu_segcblist.h"
45 /* Holdoff in nanoseconds for auto-expediting. */
46 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
47 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
48 module_param(exp_holdoff, ulong, 0444);
50 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
51 static ulong counter_wrap_check = (ULONG_MAX >> 2);
52 module_param(counter_wrap_check, ulong, 0444);
54 /* Early-boot callback-management, so early that no lock is required! */
55 static LIST_HEAD(srcu_boot_list);
56 static bool __read_mostly srcu_init_done;
58 static void srcu_invoke_callbacks(struct work_struct *work);
59 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
60 static void process_srcu(struct work_struct *work);
62 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
63 #define spin_lock_rcu_node(p) \
64 do { \
65 spin_lock(&ACCESS_PRIVATE(p, lock)); \
66 smp_mb__after_unlock_lock(); \
67 } while (0)
69 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
71 #define spin_lock_irq_rcu_node(p) \
72 do { \
73 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
74 smp_mb__after_unlock_lock(); \
75 } while (0)
77 #define spin_unlock_irq_rcu_node(p) \
78 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
80 #define spin_lock_irqsave_rcu_node(p, flags) \
81 do { \
82 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
83 smp_mb__after_unlock_lock(); \
84 } while (0)
86 #define spin_unlock_irqrestore_rcu_node(p, flags) \
87 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
90 * Initialize SRCU combining tree. Note that statically allocated
91 * srcu_struct structures might already have srcu_read_lock() and
92 * srcu_read_unlock() running against them. So if the is_static parameter
93 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
95 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
97 int cpu;
98 int i;
99 int level = 0;
100 int levelspread[RCU_NUM_LVLS];
101 struct srcu_data *sdp;
102 struct srcu_node *snp;
103 struct srcu_node *snp_first;
105 /* Work out the overall tree geometry. */
106 sp->level[0] = &sp->node[0];
107 for (i = 1; i < rcu_num_lvls; i++)
108 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
109 rcu_init_levelspread(levelspread, num_rcu_lvl);
111 /* Each pass through this loop initializes one srcu_node structure. */
112 srcu_for_each_node_breadth_first(sp, snp) {
113 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
114 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
115 ARRAY_SIZE(snp->srcu_data_have_cbs));
116 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
117 snp->srcu_have_cbs[i] = 0;
118 snp->srcu_data_have_cbs[i] = 0;
120 snp->srcu_gp_seq_needed_exp = 0;
121 snp->grplo = -1;
122 snp->grphi = -1;
123 if (snp == &sp->node[0]) {
124 /* Root node, special case. */
125 snp->srcu_parent = NULL;
126 continue;
129 /* Non-root node. */
130 if (snp == sp->level[level + 1])
131 level++;
132 snp->srcu_parent = sp->level[level - 1] +
133 (snp - sp->level[level]) /
134 levelspread[level - 1];
138 * Initialize the per-CPU srcu_data array, which feeds into the
139 * leaves of the srcu_node tree.
141 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
142 ARRAY_SIZE(sdp->srcu_unlock_count));
143 level = rcu_num_lvls - 1;
144 snp_first = sp->level[level];
145 for_each_possible_cpu(cpu) {
146 sdp = per_cpu_ptr(sp->sda, cpu);
147 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
148 rcu_segcblist_init(&sdp->srcu_cblist);
149 sdp->srcu_cblist_invoking = false;
150 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
151 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
152 sdp->mynode = &snp_first[cpu / levelspread[level]];
153 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
154 if (snp->grplo < 0)
155 snp->grplo = cpu;
156 snp->grphi = cpu;
158 sdp->cpu = cpu;
159 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
160 sdp->sp = sp;
161 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
162 if (is_static)
163 continue;
165 /* Dynamically allocated, better be no srcu_read_locks()! */
166 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
167 sdp->srcu_lock_count[i] = 0;
168 sdp->srcu_unlock_count[i] = 0;
174 * Initialize non-compile-time initialized fields, including the
175 * associated srcu_node and srcu_data structures. The is_static
176 * parameter is passed through to init_srcu_struct_nodes(), and
177 * also tells us that ->sda has already been wired up to srcu_data.
179 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
181 mutex_init(&sp->srcu_cb_mutex);
182 mutex_init(&sp->srcu_gp_mutex);
183 sp->srcu_idx = 0;
184 sp->srcu_gp_seq = 0;
185 sp->srcu_barrier_seq = 0;
186 mutex_init(&sp->srcu_barrier_mutex);
187 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
188 INIT_DELAYED_WORK(&sp->work, process_srcu);
189 if (!is_static)
190 sp->sda = alloc_percpu(struct srcu_data);
191 init_srcu_struct_nodes(sp, is_static);
192 sp->srcu_gp_seq_needed_exp = 0;
193 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
194 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
195 return sp->sda ? 0 : -ENOMEM;
198 #ifdef CONFIG_DEBUG_LOCK_ALLOC
200 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
201 struct lock_class_key *key)
203 /* Don't re-initialize a lock while it is held. */
204 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
205 lockdep_init_map(&sp->dep_map, name, key, 0);
206 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
207 return init_srcu_struct_fields(sp, false);
209 EXPORT_SYMBOL_GPL(__init_srcu_struct);
211 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
214 * init_srcu_struct - initialize a sleep-RCU structure
215 * @sp: structure to initialize.
217 * Must invoke this on a given srcu_struct before passing that srcu_struct
218 * to any other function. Each srcu_struct represents a separate domain
219 * of SRCU protection.
221 int init_srcu_struct(struct srcu_struct *sp)
223 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
224 return init_srcu_struct_fields(sp, false);
226 EXPORT_SYMBOL_GPL(init_srcu_struct);
228 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
231 * First-use initialization of statically allocated srcu_struct
232 * structure. Wiring up the combining tree is more than can be
233 * done with compile-time initialization, so this check is added
234 * to each update-side SRCU primitive. Use sp->lock, which -is-
235 * compile-time initialized, to resolve races involving multiple
236 * CPUs trying to garner first-use privileges.
238 static void check_init_srcu_struct(struct srcu_struct *sp)
240 unsigned long flags;
242 /* The smp_load_acquire() pairs with the smp_store_release(). */
243 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
244 return; /* Already initialized. */
245 spin_lock_irqsave_rcu_node(sp, flags);
246 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
247 spin_unlock_irqrestore_rcu_node(sp, flags);
248 return;
250 init_srcu_struct_fields(sp, true);
251 spin_unlock_irqrestore_rcu_node(sp, flags);
255 * Returns approximate total of the readers' ->srcu_lock_count[] values
256 * for the rank of per-CPU counters specified by idx.
258 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
260 int cpu;
261 unsigned long sum = 0;
263 for_each_possible_cpu(cpu) {
264 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
266 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
268 return sum;
272 * Returns approximate total of the readers' ->srcu_unlock_count[] values
273 * for the rank of per-CPU counters specified by idx.
275 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
277 int cpu;
278 unsigned long sum = 0;
280 for_each_possible_cpu(cpu) {
281 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
283 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
285 return sum;
289 * Return true if the number of pre-existing readers is determined to
290 * be zero.
292 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
294 unsigned long unlocks;
296 unlocks = srcu_readers_unlock_idx(sp, idx);
299 * Make sure that a lock is always counted if the corresponding
300 * unlock is counted. Needs to be a smp_mb() as the read side may
301 * contain a read from a variable that is written to before the
302 * synchronize_srcu() in the write side. In this case smp_mb()s
303 * A and B act like the store buffering pattern.
305 * This smp_mb() also pairs with smp_mb() C to prevent accesses
306 * after the synchronize_srcu() from being executed before the
307 * grace period ends.
309 smp_mb(); /* A */
312 * If the locks are the same as the unlocks, then there must have
313 * been no readers on this index at some time in between. This does
314 * not mean that there are no more readers, as one could have read
315 * the current index but not have incremented the lock counter yet.
317 * So suppose that the updater is preempted here for so long
318 * that more than ULONG_MAX non-nested readers come and go in
319 * the meantime. It turns out that this cannot result in overflow
320 * because if a reader modifies its unlock count after we read it
321 * above, then that reader's next load of ->srcu_idx is guaranteed
322 * to get the new value, which will cause it to operate on the
323 * other bank of counters, where it cannot contribute to the
324 * overflow of these counters. This means that there is a maximum
325 * of 2*NR_CPUS increments, which cannot overflow given current
326 * systems, especially not on 64-bit systems.
328 * OK, how about nesting? This does impose a limit on nesting
329 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
330 * especially on 64-bit systems.
332 return srcu_readers_lock_idx(sp, idx) == unlocks;
336 * srcu_readers_active - returns true if there are readers. and false
337 * otherwise
338 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
340 * Note that this is not an atomic primitive, and can therefore suffer
341 * severe errors when invoked on an active srcu_struct. That said, it
342 * can be useful as an error check at cleanup time.
344 static bool srcu_readers_active(struct srcu_struct *sp)
346 int cpu;
347 unsigned long sum = 0;
349 for_each_possible_cpu(cpu) {
350 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
352 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
353 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
354 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
355 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
357 return sum;
360 #define SRCU_INTERVAL 1
363 * Return grace-period delay, zero if there are expedited grace
364 * periods pending, SRCU_INTERVAL otherwise.
366 static unsigned long srcu_get_delay(struct srcu_struct *sp)
368 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
369 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
370 return 0;
371 return SRCU_INTERVAL;
374 /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
375 void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
377 int cpu;
379 if (WARN_ON(!srcu_get_delay(sp)))
380 return; /* Just leak it! */
381 if (WARN_ON(srcu_readers_active(sp)))
382 return; /* Just leak it! */
383 if (quiesced) {
384 if (WARN_ON(delayed_work_pending(&sp->work)))
385 return; /* Just leak it! */
386 } else {
387 flush_delayed_work(&sp->work);
389 for_each_possible_cpu(cpu)
390 if (quiesced) {
391 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
392 return; /* Just leak it! */
393 } else {
394 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
396 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
397 WARN_ON(srcu_readers_active(sp))) {
398 pr_info("%s: Active srcu_struct %p state: %d\n",
399 __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
400 return; /* Caller forgot to stop doing call_srcu()? */
402 free_percpu(sp->sda);
403 sp->sda = NULL;
405 EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
408 * Counts the new reader in the appropriate per-CPU element of the
409 * srcu_struct.
410 * Returns an index that must be passed to the matching srcu_read_unlock().
412 int __srcu_read_lock(struct srcu_struct *sp)
414 int idx;
416 idx = READ_ONCE(sp->srcu_idx) & 0x1;
417 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
418 smp_mb(); /* B */ /* Avoid leaking the critical section. */
419 return idx;
421 EXPORT_SYMBOL_GPL(__srcu_read_lock);
424 * Removes the count for the old reader from the appropriate per-CPU
425 * element of the srcu_struct. Note that this may well be a different
426 * CPU than that which was incremented by the corresponding srcu_read_lock().
428 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
430 smp_mb(); /* C */ /* Avoid leaking the critical section. */
431 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
433 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
436 * We use an adaptive strategy for synchronize_srcu() and especially for
437 * synchronize_srcu_expedited(). We spin for a fixed time period
438 * (defined below) to allow SRCU readers to exit their read-side critical
439 * sections. If there are still some readers after a few microseconds,
440 * we repeatedly block for 1-millisecond time periods.
442 #define SRCU_RETRY_CHECK_DELAY 5
445 * Start an SRCU grace period.
447 static void srcu_gp_start(struct srcu_struct *sp)
449 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
450 int state;
452 lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
453 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
454 rcu_segcblist_advance(&sdp->srcu_cblist,
455 rcu_seq_current(&sp->srcu_gp_seq));
456 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
457 rcu_seq_snap(&sp->srcu_gp_seq));
458 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
459 rcu_seq_start(&sp->srcu_gp_seq);
460 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
461 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
465 * Track online CPUs to guide callback workqueue placement.
467 DEFINE_PER_CPU(bool, srcu_online);
469 void srcu_online_cpu(unsigned int cpu)
471 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
474 void srcu_offline_cpu(unsigned int cpu)
476 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
480 * Place the workqueue handler on the specified CPU if online, otherwise
481 * just run it whereever. This is useful for placing workqueue handlers
482 * that are to invoke the specified CPU's callbacks.
484 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
485 struct delayed_work *dwork,
486 unsigned long delay)
488 bool ret;
490 preempt_disable();
491 if (READ_ONCE(per_cpu(srcu_online, cpu)))
492 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
493 else
494 ret = queue_delayed_work(wq, dwork, delay);
495 preempt_enable();
496 return ret;
500 * Schedule callback invocation for the specified srcu_data structure,
501 * if possible, on the corresponding CPU.
503 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
505 srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
509 * Schedule callback invocation for all srcu_data structures associated
510 * with the specified srcu_node structure that have callbacks for the
511 * just-completed grace period, the one corresponding to idx. If possible,
512 * schedule this invocation on the corresponding CPUs.
514 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
515 unsigned long mask, unsigned long delay)
517 int cpu;
519 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
520 if (!(mask & (1 << (cpu - snp->grplo))))
521 continue;
522 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
527 * Note the end of an SRCU grace period. Initiates callback invocation
528 * and starts a new grace period if needed.
530 * The ->srcu_cb_mutex acquisition does not protect any data, but
531 * instead prevents more than one grace period from starting while we
532 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
533 * array to have a finite number of elements.
535 static void srcu_gp_end(struct srcu_struct *sp)
537 unsigned long cbdelay;
538 bool cbs;
539 bool last_lvl;
540 int cpu;
541 unsigned long flags;
542 unsigned long gpseq;
543 int idx;
544 unsigned long mask;
545 struct srcu_data *sdp;
546 struct srcu_node *snp;
548 /* Prevent more than one additional grace period. */
549 mutex_lock(&sp->srcu_cb_mutex);
551 /* End the current grace period. */
552 spin_lock_irq_rcu_node(sp);
553 idx = rcu_seq_state(sp->srcu_gp_seq);
554 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
555 cbdelay = srcu_get_delay(sp);
556 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
557 rcu_seq_end(&sp->srcu_gp_seq);
558 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
559 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
560 sp->srcu_gp_seq_needed_exp = gpseq;
561 spin_unlock_irq_rcu_node(sp);
562 mutex_unlock(&sp->srcu_gp_mutex);
563 /* A new grace period can start at this point. But only one. */
565 /* Initiate callback invocation as needed. */
566 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
567 srcu_for_each_node_breadth_first(sp, snp) {
568 spin_lock_irq_rcu_node(snp);
569 cbs = false;
570 last_lvl = snp >= sp->level[rcu_num_lvls - 1];
571 if (last_lvl)
572 cbs = snp->srcu_have_cbs[idx] == gpseq;
573 snp->srcu_have_cbs[idx] = gpseq;
574 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
575 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
576 snp->srcu_gp_seq_needed_exp = gpseq;
577 mask = snp->srcu_data_have_cbs[idx];
578 snp->srcu_data_have_cbs[idx] = 0;
579 spin_unlock_irq_rcu_node(snp);
580 if (cbs)
581 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
583 /* Occasionally prevent srcu_data counter wrap. */
584 if (!(gpseq & counter_wrap_check) && last_lvl)
585 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
586 sdp = per_cpu_ptr(sp->sda, cpu);
587 spin_lock_irqsave_rcu_node(sdp, flags);
588 if (ULONG_CMP_GE(gpseq,
589 sdp->srcu_gp_seq_needed + 100))
590 sdp->srcu_gp_seq_needed = gpseq;
591 if (ULONG_CMP_GE(gpseq,
592 sdp->srcu_gp_seq_needed_exp + 100))
593 sdp->srcu_gp_seq_needed_exp = gpseq;
594 spin_unlock_irqrestore_rcu_node(sdp, flags);
598 /* Callback initiation done, allow grace periods after next. */
599 mutex_unlock(&sp->srcu_cb_mutex);
601 /* Start a new grace period if needed. */
602 spin_lock_irq_rcu_node(sp);
603 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
604 if (!rcu_seq_state(gpseq) &&
605 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
606 srcu_gp_start(sp);
607 spin_unlock_irq_rcu_node(sp);
608 srcu_reschedule(sp, 0);
609 } else {
610 spin_unlock_irq_rcu_node(sp);
615 * Funnel-locking scheme to scalably mediate many concurrent expedited
616 * grace-period requests. This function is invoked for the first known
617 * expedited request for a grace period that has already been requested,
618 * but without expediting. To start a completely new grace period,
619 * whether expedited or not, use srcu_funnel_gp_start() instead.
621 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
622 unsigned long s)
624 unsigned long flags;
626 for (; snp != NULL; snp = snp->srcu_parent) {
627 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
628 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
629 return;
630 spin_lock_irqsave_rcu_node(snp, flags);
631 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
632 spin_unlock_irqrestore_rcu_node(snp, flags);
633 return;
635 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
636 spin_unlock_irqrestore_rcu_node(snp, flags);
638 spin_lock_irqsave_rcu_node(sp, flags);
639 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
640 sp->srcu_gp_seq_needed_exp = s;
641 spin_unlock_irqrestore_rcu_node(sp, flags);
645 * Funnel-locking scheme to scalably mediate many concurrent grace-period
646 * requests. The winner has to do the work of actually starting grace
647 * period s. Losers must either ensure that their desired grace-period
648 * number is recorded on at least their leaf srcu_node structure, or they
649 * must take steps to invoke their own callbacks.
651 * Note that this function also does the work of srcu_funnel_exp_start(),
652 * in some cases by directly invoking it.
654 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
655 unsigned long s, bool do_norm)
657 unsigned long flags;
658 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
659 struct srcu_node *snp = sdp->mynode;
660 unsigned long snp_seq;
662 /* Each pass through the loop does one level of the srcu_node tree. */
663 for (; snp != NULL; snp = snp->srcu_parent) {
664 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
665 return; /* GP already done and CBs recorded. */
666 spin_lock_irqsave_rcu_node(snp, flags);
667 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
668 snp_seq = snp->srcu_have_cbs[idx];
669 if (snp == sdp->mynode && snp_seq == s)
670 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
671 spin_unlock_irqrestore_rcu_node(snp, flags);
672 if (snp == sdp->mynode && snp_seq != s) {
673 srcu_schedule_cbs_sdp(sdp, do_norm
674 ? SRCU_INTERVAL
675 : 0);
676 return;
678 if (!do_norm)
679 srcu_funnel_exp_start(sp, snp, s);
680 return;
682 snp->srcu_have_cbs[idx] = s;
683 if (snp == sdp->mynode)
684 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
685 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
686 snp->srcu_gp_seq_needed_exp = s;
687 spin_unlock_irqrestore_rcu_node(snp, flags);
690 /* Top of tree, must ensure the grace period will be started. */
691 spin_lock_irqsave_rcu_node(sp, flags);
692 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
694 * Record need for grace period s. Pair with load
695 * acquire setting up for initialization.
697 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
699 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
700 sp->srcu_gp_seq_needed_exp = s;
702 /* If grace period not already done and none in progress, start it. */
703 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
704 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
705 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
706 srcu_gp_start(sp);
707 if (likely(srcu_init_done))
708 queue_delayed_work(rcu_gp_wq, &sp->work,
709 srcu_get_delay(sp));
710 else if (list_empty(&sp->work.work.entry))
711 list_add(&sp->work.work.entry, &srcu_boot_list);
713 spin_unlock_irqrestore_rcu_node(sp, flags);
717 * Wait until all readers counted by array index idx complete, but
718 * loop an additional time if there is an expedited grace period pending.
719 * The caller must ensure that ->srcu_idx is not changed while checking.
721 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
723 for (;;) {
724 if (srcu_readers_active_idx_check(sp, idx))
725 return true;
726 if (--trycount + !srcu_get_delay(sp) <= 0)
727 return false;
728 udelay(SRCU_RETRY_CHECK_DELAY);
733 * Increment the ->srcu_idx counter so that future SRCU readers will
734 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
735 * us to wait for pre-existing readers in a starvation-free manner.
737 static void srcu_flip(struct srcu_struct *sp)
740 * Ensure that if this updater saw a given reader's increment
741 * from __srcu_read_lock(), that reader was using an old value
742 * of ->srcu_idx. Also ensure that if a given reader sees the
743 * new value of ->srcu_idx, this updater's earlier scans cannot
744 * have seen that reader's increments (which is OK, because this
745 * grace period need not wait on that reader).
747 smp_mb(); /* E */ /* Pairs with B and C. */
749 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
752 * Ensure that if the updater misses an __srcu_read_unlock()
753 * increment, that task's next __srcu_read_lock() will see the
754 * above counter update. Note that both this memory barrier
755 * and the one in srcu_readers_active_idx_check() provide the
756 * guarantee for __srcu_read_lock().
758 smp_mb(); /* D */ /* Pairs with C. */
762 * If SRCU is likely idle, return true, otherwise return false.
764 * Note that it is OK for several current from-idle requests for a new
765 * grace period from idle to specify expediting because they will all end
766 * up requesting the same grace period anyhow. So no loss.
768 * Note also that if any CPU (including the current one) is still invoking
769 * callbacks, this function will nevertheless say "idle". This is not
770 * ideal, but the overhead of checking all CPUs' callback lists is even
771 * less ideal, especially on large systems. Furthermore, the wakeup
772 * can happen before the callback is fully removed, so we have no choice
773 * but to accept this type of error.
775 * This function is also subject to counter-wrap errors, but let's face
776 * it, if this function was preempted for enough time for the counters
777 * to wrap, it really doesn't matter whether or not we expedite the grace
778 * period. The extra overhead of a needlessly expedited grace period is
779 * negligible when amoritized over that time period, and the extra latency
780 * of a needlessly non-expedited grace period is similarly negligible.
782 static bool srcu_might_be_idle(struct srcu_struct *sp)
784 unsigned long curseq;
785 unsigned long flags;
786 struct srcu_data *sdp;
787 unsigned long t;
789 /* If the local srcu_data structure has callbacks, not idle. */
790 local_irq_save(flags);
791 sdp = this_cpu_ptr(sp->sda);
792 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
793 local_irq_restore(flags);
794 return false; /* Callbacks already present, so not idle. */
796 local_irq_restore(flags);
799 * No local callbacks, so probabalistically probe global state.
800 * Exact information would require acquiring locks, which would
801 * kill scalability, hence the probabalistic nature of the probe.
804 /* First, see if enough time has passed since the last GP. */
805 t = ktime_get_mono_fast_ns();
806 if (exp_holdoff == 0 ||
807 time_in_range_open(t, sp->srcu_last_gp_end,
808 sp->srcu_last_gp_end + exp_holdoff))
809 return false; /* Too soon after last GP. */
811 /* Next, check for probable idleness. */
812 curseq = rcu_seq_current(&sp->srcu_gp_seq);
813 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
814 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
815 return false; /* Grace period in progress, so not idle. */
816 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
817 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
818 return false; /* GP # changed, so not idle. */
819 return true; /* With reasonable probability, idle! */
823 * SRCU callback function to leak a callback.
825 static void srcu_leak_callback(struct rcu_head *rhp)
830 * Enqueue an SRCU callback on the srcu_data structure associated with
831 * the current CPU and the specified srcu_struct structure, initiating
832 * grace-period processing if it is not already running.
834 * Note that all CPUs must agree that the grace period extended beyond
835 * all pre-existing SRCU read-side critical section. On systems with
836 * more than one CPU, this means that when "func()" is invoked, each CPU
837 * is guaranteed to have executed a full memory barrier since the end of
838 * its last corresponding SRCU read-side critical section whose beginning
839 * preceded the call to call_srcu(). It also means that each CPU executing
840 * an SRCU read-side critical section that continues beyond the start of
841 * "func()" must have executed a memory barrier after the call_srcu()
842 * but before the beginning of that SRCU read-side critical section.
843 * Note that these guarantees include CPUs that are offline, idle, or
844 * executing in user mode, as well as CPUs that are executing in the kernel.
846 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
847 * resulting SRCU callback function "func()", then both CPU A and CPU
848 * B are guaranteed to execute a full memory barrier during the time
849 * interval between the call to call_srcu() and the invocation of "func()".
850 * This guarantee applies even if CPU A and CPU B are the same CPU (but
851 * again only if the system has more than one CPU).
853 * Of course, these guarantees apply only for invocations of call_srcu(),
854 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
855 * srcu_struct structure.
857 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
858 rcu_callback_t func, bool do_norm)
860 unsigned long flags;
861 bool needexp = false;
862 bool needgp = false;
863 unsigned long s;
864 struct srcu_data *sdp;
866 check_init_srcu_struct(sp);
867 if (debug_rcu_head_queue(rhp)) {
868 /* Probable double call_srcu(), so leak the callback. */
869 WRITE_ONCE(rhp->func, srcu_leak_callback);
870 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
871 return;
873 rhp->func = func;
874 local_irq_save(flags);
875 sdp = this_cpu_ptr(sp->sda);
876 spin_lock_rcu_node(sdp);
877 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
878 rcu_segcblist_advance(&sdp->srcu_cblist,
879 rcu_seq_current(&sp->srcu_gp_seq));
880 s = rcu_seq_snap(&sp->srcu_gp_seq);
881 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
882 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
883 sdp->srcu_gp_seq_needed = s;
884 needgp = true;
886 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
887 sdp->srcu_gp_seq_needed_exp = s;
888 needexp = true;
890 spin_unlock_irqrestore_rcu_node(sdp, flags);
891 if (needgp)
892 srcu_funnel_gp_start(sp, sdp, s, do_norm);
893 else if (needexp)
894 srcu_funnel_exp_start(sp, sdp->mynode, s);
898 * call_srcu() - Queue a callback for invocation after an SRCU grace period
899 * @sp: srcu_struct in queue the callback
900 * @rhp: structure to be used for queueing the SRCU callback.
901 * @func: function to be invoked after the SRCU grace period
903 * The callback function will be invoked some time after a full SRCU
904 * grace period elapses, in other words after all pre-existing SRCU
905 * read-side critical sections have completed. However, the callback
906 * function might well execute concurrently with other SRCU read-side
907 * critical sections that started after call_srcu() was invoked. SRCU
908 * read-side critical sections are delimited by srcu_read_lock() and
909 * srcu_read_unlock(), and may be nested.
911 * The callback will be invoked from process context, but must nevertheless
912 * be fast and must not block.
914 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
915 rcu_callback_t func)
917 __call_srcu(sp, rhp, func, true);
919 EXPORT_SYMBOL_GPL(call_srcu);
922 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
924 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
926 struct rcu_synchronize rcu;
928 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
929 lock_is_held(&rcu_bh_lock_map) ||
930 lock_is_held(&rcu_lock_map) ||
931 lock_is_held(&rcu_sched_lock_map),
932 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
934 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
935 return;
936 might_sleep();
937 check_init_srcu_struct(sp);
938 init_completion(&rcu.completion);
939 init_rcu_head_on_stack(&rcu.head);
940 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
941 wait_for_completion(&rcu.completion);
942 destroy_rcu_head_on_stack(&rcu.head);
945 * Make sure that later code is ordered after the SRCU grace
946 * period. This pairs with the spin_lock_irq_rcu_node()
947 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
948 * because the current CPU might have been totally uninvolved with
949 * (and thus unordered against) that grace period.
951 smp_mb();
955 * synchronize_srcu_expedited - Brute-force SRCU grace period
956 * @sp: srcu_struct with which to synchronize.
958 * Wait for an SRCU grace period to elapse, but be more aggressive about
959 * spinning rather than blocking when waiting.
961 * Note that synchronize_srcu_expedited() has the same deadlock and
962 * memory-ordering properties as does synchronize_srcu().
964 void synchronize_srcu_expedited(struct srcu_struct *sp)
966 __synchronize_srcu(sp, rcu_gp_is_normal());
968 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
971 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
972 * @sp: srcu_struct with which to synchronize.
974 * Wait for the count to drain to zero of both indexes. To avoid the
975 * possible starvation of synchronize_srcu(), it waits for the count of
976 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
977 * and then flip the srcu_idx and wait for the count of the other index.
979 * Can block; must be called from process context.
981 * Note that it is illegal to call synchronize_srcu() from the corresponding
982 * SRCU read-side critical section; doing so will result in deadlock.
983 * However, it is perfectly legal to call synchronize_srcu() on one
984 * srcu_struct from some other srcu_struct's read-side critical section,
985 * as long as the resulting graph of srcu_structs is acyclic.
987 * There are memory-ordering constraints implied by synchronize_srcu().
988 * On systems with more than one CPU, when synchronize_srcu() returns,
989 * each CPU is guaranteed to have executed a full memory barrier since
990 * the end of its last corresponding SRCU read-side critical section
991 * whose beginning preceded the call to synchronize_srcu(). In addition,
992 * each CPU having an SRCU read-side critical section that extends beyond
993 * the return from synchronize_srcu() is guaranteed to have executed a
994 * full memory barrier after the beginning of synchronize_srcu() and before
995 * the beginning of that SRCU read-side critical section. Note that these
996 * guarantees include CPUs that are offline, idle, or executing in user mode,
997 * as well as CPUs that are executing in the kernel.
999 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1000 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1001 * to have executed a full memory barrier during the execution of
1002 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1003 * are the same CPU, but again only if the system has more than one CPU.
1005 * Of course, these memory-ordering guarantees apply only when
1006 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1007 * passed the same srcu_struct structure.
1009 * If SRCU is likely idle, expedite the first request. This semantic
1010 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1011 * SRCU must also provide it. Note that detecting idleness is heuristic
1012 * and subject to both false positives and negatives.
1014 void synchronize_srcu(struct srcu_struct *sp)
1016 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1017 synchronize_srcu_expedited(sp);
1018 else
1019 __synchronize_srcu(sp, true);
1021 EXPORT_SYMBOL_GPL(synchronize_srcu);
1024 * Callback function for srcu_barrier() use.
1026 static void srcu_barrier_cb(struct rcu_head *rhp)
1028 struct srcu_data *sdp;
1029 struct srcu_struct *sp;
1031 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1032 sp = sdp->sp;
1033 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1034 complete(&sp->srcu_barrier_completion);
1038 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1039 * @sp: srcu_struct on which to wait for in-flight callbacks.
1041 void srcu_barrier(struct srcu_struct *sp)
1043 int cpu;
1044 struct srcu_data *sdp;
1045 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1047 check_init_srcu_struct(sp);
1048 mutex_lock(&sp->srcu_barrier_mutex);
1049 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1050 smp_mb(); /* Force ordering following return. */
1051 mutex_unlock(&sp->srcu_barrier_mutex);
1052 return; /* Someone else did our work for us. */
1054 rcu_seq_start(&sp->srcu_barrier_seq);
1055 init_completion(&sp->srcu_barrier_completion);
1057 /* Initial count prevents reaching zero until all CBs are posted. */
1058 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1061 * Each pass through this loop enqueues a callback, but only
1062 * on CPUs already having callbacks enqueued. Note that if
1063 * a CPU already has callbacks enqueue, it must have already
1064 * registered the need for a future grace period, so all we
1065 * need do is enqueue a callback that will use the same
1066 * grace period as the last callback already in the queue.
1068 for_each_possible_cpu(cpu) {
1069 sdp = per_cpu_ptr(sp->sda, cpu);
1070 spin_lock_irq_rcu_node(sdp);
1071 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1072 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1073 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1074 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1075 &sdp->srcu_barrier_head, 0)) {
1076 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1077 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1079 spin_unlock_irq_rcu_node(sdp);
1082 /* Remove the initial count, at which point reaching zero can happen. */
1083 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1084 complete(&sp->srcu_barrier_completion);
1085 wait_for_completion(&sp->srcu_barrier_completion);
1087 rcu_seq_end(&sp->srcu_barrier_seq);
1088 mutex_unlock(&sp->srcu_barrier_mutex);
1090 EXPORT_SYMBOL_GPL(srcu_barrier);
1093 * srcu_batches_completed - return batches completed.
1094 * @sp: srcu_struct on which to report batch completion.
1096 * Report the number of batches, correlated with, but not necessarily
1097 * precisely the same as, the number of grace periods that have elapsed.
1099 unsigned long srcu_batches_completed(struct srcu_struct *sp)
1101 return sp->srcu_idx;
1103 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1106 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1107 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1108 * completed in that state.
1110 static void srcu_advance_state(struct srcu_struct *sp)
1112 int idx;
1114 mutex_lock(&sp->srcu_gp_mutex);
1117 * Because readers might be delayed for an extended period after
1118 * fetching ->srcu_idx for their index, at any point in time there
1119 * might well be readers using both idx=0 and idx=1. We therefore
1120 * need to wait for readers to clear from both index values before
1121 * invoking a callback.
1123 * The load-acquire ensures that we see the accesses performed
1124 * by the prior grace period.
1126 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1127 if (idx == SRCU_STATE_IDLE) {
1128 spin_lock_irq_rcu_node(sp);
1129 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1130 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1131 spin_unlock_irq_rcu_node(sp);
1132 mutex_unlock(&sp->srcu_gp_mutex);
1133 return;
1135 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1136 if (idx == SRCU_STATE_IDLE)
1137 srcu_gp_start(sp);
1138 spin_unlock_irq_rcu_node(sp);
1139 if (idx != SRCU_STATE_IDLE) {
1140 mutex_unlock(&sp->srcu_gp_mutex);
1141 return; /* Someone else started the grace period. */
1145 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1146 idx = 1 ^ (sp->srcu_idx & 1);
1147 if (!try_check_zero(sp, idx, 1)) {
1148 mutex_unlock(&sp->srcu_gp_mutex);
1149 return; /* readers present, retry later. */
1151 srcu_flip(sp);
1152 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1155 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1158 * SRCU read-side critical sections are normally short,
1159 * so check at least twice in quick succession after a flip.
1161 idx = 1 ^ (sp->srcu_idx & 1);
1162 if (!try_check_zero(sp, idx, 2)) {
1163 mutex_unlock(&sp->srcu_gp_mutex);
1164 return; /* readers present, retry later. */
1166 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1171 * Invoke a limited number of SRCU callbacks that have passed through
1172 * their grace period. If there are more to do, SRCU will reschedule
1173 * the workqueue. Note that needed memory barriers have been executed
1174 * in this task's context by srcu_readers_active_idx_check().
1176 static void srcu_invoke_callbacks(struct work_struct *work)
1178 bool more;
1179 struct rcu_cblist ready_cbs;
1180 struct rcu_head *rhp;
1181 struct srcu_data *sdp;
1182 struct srcu_struct *sp;
1184 sdp = container_of(work, struct srcu_data, work.work);
1185 sp = sdp->sp;
1186 rcu_cblist_init(&ready_cbs);
1187 spin_lock_irq_rcu_node(sdp);
1188 rcu_segcblist_advance(&sdp->srcu_cblist,
1189 rcu_seq_current(&sp->srcu_gp_seq));
1190 if (sdp->srcu_cblist_invoking ||
1191 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1192 spin_unlock_irq_rcu_node(sdp);
1193 return; /* Someone else on the job or nothing to do. */
1196 /* We are on the job! Extract and invoke ready callbacks. */
1197 sdp->srcu_cblist_invoking = true;
1198 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1199 spin_unlock_irq_rcu_node(sdp);
1200 rhp = rcu_cblist_dequeue(&ready_cbs);
1201 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1202 debug_rcu_head_unqueue(rhp);
1203 local_bh_disable();
1204 rhp->func(rhp);
1205 local_bh_enable();
1209 * Update counts, accelerate new callbacks, and if needed,
1210 * schedule another round of callback invocation.
1212 spin_lock_irq_rcu_node(sdp);
1213 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1214 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1215 rcu_seq_snap(&sp->srcu_gp_seq));
1216 sdp->srcu_cblist_invoking = false;
1217 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1218 spin_unlock_irq_rcu_node(sdp);
1219 if (more)
1220 srcu_schedule_cbs_sdp(sdp, 0);
1224 * Finished one round of SRCU grace period. Start another if there are
1225 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1227 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1229 bool pushgp = true;
1231 spin_lock_irq_rcu_node(sp);
1232 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1233 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1234 /* All requests fulfilled, time to go idle. */
1235 pushgp = false;
1237 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1238 /* Outstanding request and no GP. Start one. */
1239 srcu_gp_start(sp);
1241 spin_unlock_irq_rcu_node(sp);
1243 if (pushgp)
1244 queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1248 * This is the work-queue function that handles SRCU grace periods.
1250 static void process_srcu(struct work_struct *work)
1252 struct srcu_struct *sp;
1254 sp = container_of(work, struct srcu_struct, work.work);
1256 srcu_advance_state(sp);
1257 srcu_reschedule(sp, srcu_get_delay(sp));
1260 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1261 struct srcu_struct *sp, int *flags,
1262 unsigned long *gp_seq)
1264 if (test_type != SRCU_FLAVOR)
1265 return;
1266 *flags = 0;
1267 *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
1269 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1271 void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1273 int cpu;
1274 int idx;
1275 unsigned long s0 = 0, s1 = 0;
1277 idx = sp->srcu_idx & 0x1;
1278 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1279 tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
1280 for_each_possible_cpu(cpu) {
1281 unsigned long l0, l1;
1282 unsigned long u0, u1;
1283 long c0, c1;
1284 struct srcu_data *sdp;
1286 sdp = per_cpu_ptr(sp->sda, cpu);
1287 u0 = sdp->srcu_unlock_count[!idx];
1288 u1 = sdp->srcu_unlock_count[idx];
1291 * Make sure that a lock is always counted if the corresponding
1292 * unlock is counted.
1294 smp_rmb();
1296 l0 = sdp->srcu_lock_count[!idx];
1297 l1 = sdp->srcu_lock_count[idx];
1299 c0 = l0 - u0;
1300 c1 = l1 - u1;
1301 pr_cont(" %d(%ld,%ld %1p)",
1302 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1303 s0 += c0;
1304 s1 += c1;
1306 pr_cont(" T(%ld,%ld)\n", s0, s1);
1308 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1310 static int __init srcu_bootup_announce(void)
1312 pr_info("Hierarchical SRCU implementation.\n");
1313 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1314 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1315 return 0;
1317 early_initcall(srcu_bootup_announce);
1319 void __init srcu_init(void)
1321 struct srcu_struct *sp;
1323 srcu_init_done = true;
1324 while (!list_empty(&srcu_boot_list)) {
1325 sp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1326 work.work.entry);
1327 check_init_srcu_struct(sp);
1328 list_del_init(&sp->work.work.entry);
1329 queue_work(rcu_gp_wq, &sp->work.work);