1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
16 #define pr_fmt(fmt) "rcu: " fmt
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/srcu.h>
30 #include "rcu_segcblist.h"
33 #define data_race(expr) \
38 #ifndef ASSERT_EXCLUSIVE_WRITER
39 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
41 #ifndef ASSERT_EXCLUSIVE_ACCESS
42 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
45 /* Holdoff in nanoseconds for auto-expediting. */
46 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
47 static ulong exp_holdoff
= DEFAULT_SRCU_EXP_HOLDOFF
;
48 module_param(exp_holdoff
, ulong
, 0444);
50 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
51 static ulong counter_wrap_check
= (ULONG_MAX
>> 2);
52 module_param(counter_wrap_check
, ulong
, 0444);
54 /* Early-boot callback-management, so early that no lock is required! */
55 static LIST_HEAD(srcu_boot_list
);
56 static bool __read_mostly srcu_init_done
;
58 static void srcu_invoke_callbacks(struct work_struct
*work
);
59 static void srcu_reschedule(struct srcu_struct
*ssp
, unsigned long delay
);
60 static void process_srcu(struct work_struct
*work
);
61 static void srcu_delay_timer(struct timer_list
*t
);
63 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
64 #define spin_lock_rcu_node(p) \
66 spin_lock(&ACCESS_PRIVATE(p, lock)); \
67 smp_mb__after_unlock_lock(); \
70 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
72 #define spin_lock_irq_rcu_node(p) \
74 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
75 smp_mb__after_unlock_lock(); \
78 #define spin_unlock_irq_rcu_node(p) \
79 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
81 #define spin_lock_irqsave_rcu_node(p, flags) \
83 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
84 smp_mb__after_unlock_lock(); \
87 #define spin_unlock_irqrestore_rcu_node(p, flags) \
88 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
91 * Initialize SRCU combining tree. Note that statically allocated
92 * srcu_struct structures might already have srcu_read_lock() and
93 * srcu_read_unlock() running against them. So if the is_static parameter
94 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
96 static void init_srcu_struct_nodes(struct srcu_struct
*ssp
, bool is_static
)
101 int levelspread
[RCU_NUM_LVLS
];
102 struct srcu_data
*sdp
;
103 struct srcu_node
*snp
;
104 struct srcu_node
*snp_first
;
106 /* Work out the overall tree geometry. */
107 ssp
->level
[0] = &ssp
->node
[0];
108 for (i
= 1; i
< rcu_num_lvls
; i
++)
109 ssp
->level
[i
] = ssp
->level
[i
- 1] + num_rcu_lvl
[i
- 1];
110 rcu_init_levelspread(levelspread
, num_rcu_lvl
);
112 /* Each pass through this loop initializes one srcu_node structure. */
113 srcu_for_each_node_breadth_first(ssp
, snp
) {
114 spin_lock_init(&ACCESS_PRIVATE(snp
, lock
));
115 WARN_ON_ONCE(ARRAY_SIZE(snp
->srcu_have_cbs
) !=
116 ARRAY_SIZE(snp
->srcu_data_have_cbs
));
117 for (i
= 0; i
< ARRAY_SIZE(snp
->srcu_have_cbs
); i
++) {
118 snp
->srcu_have_cbs
[i
] = 0;
119 snp
->srcu_data_have_cbs
[i
] = 0;
121 snp
->srcu_gp_seq_needed_exp
= 0;
124 if (snp
== &ssp
->node
[0]) {
125 /* Root node, special case. */
126 snp
->srcu_parent
= NULL
;
131 if (snp
== ssp
->level
[level
+ 1])
133 snp
->srcu_parent
= ssp
->level
[level
- 1] +
134 (snp
- ssp
->level
[level
]) /
135 levelspread
[level
- 1];
139 * Initialize the per-CPU srcu_data array, which feeds into the
140 * leaves of the srcu_node tree.
142 WARN_ON_ONCE(ARRAY_SIZE(sdp
->srcu_lock_count
) !=
143 ARRAY_SIZE(sdp
->srcu_unlock_count
));
144 level
= rcu_num_lvls
- 1;
145 snp_first
= ssp
->level
[level
];
146 for_each_possible_cpu(cpu
) {
147 sdp
= per_cpu_ptr(ssp
->sda
, cpu
);
148 spin_lock_init(&ACCESS_PRIVATE(sdp
, lock
));
149 rcu_segcblist_init(&sdp
->srcu_cblist
);
150 sdp
->srcu_cblist_invoking
= false;
151 sdp
->srcu_gp_seq_needed
= ssp
->srcu_gp_seq
;
152 sdp
->srcu_gp_seq_needed_exp
= ssp
->srcu_gp_seq
;
153 sdp
->mynode
= &snp_first
[cpu
/ levelspread
[level
]];
154 for (snp
= sdp
->mynode
; snp
!= NULL
; snp
= snp
->srcu_parent
) {
160 INIT_WORK(&sdp
->work
, srcu_invoke_callbacks
);
161 timer_setup(&sdp
->delay_work
, srcu_delay_timer
, 0);
163 sdp
->grpmask
= 1 << (cpu
- sdp
->mynode
->grplo
);
167 /* Dynamically allocated, better be no srcu_read_locks()! */
168 for (i
= 0; i
< ARRAY_SIZE(sdp
->srcu_lock_count
); i
++) {
169 sdp
->srcu_lock_count
[i
] = 0;
170 sdp
->srcu_unlock_count
[i
] = 0;
176 * Initialize non-compile-time initialized fields, including the
177 * associated srcu_node and srcu_data structures. The is_static
178 * parameter is passed through to init_srcu_struct_nodes(), and
179 * also tells us that ->sda has already been wired up to srcu_data.
181 static int init_srcu_struct_fields(struct srcu_struct
*ssp
, bool is_static
)
183 mutex_init(&ssp
->srcu_cb_mutex
);
184 mutex_init(&ssp
->srcu_gp_mutex
);
186 ssp
->srcu_gp_seq
= 0;
187 ssp
->srcu_barrier_seq
= 0;
188 mutex_init(&ssp
->srcu_barrier_mutex
);
189 atomic_set(&ssp
->srcu_barrier_cpu_cnt
, 0);
190 INIT_DELAYED_WORK(&ssp
->work
, process_srcu
);
192 ssp
->sda
= alloc_percpu(struct srcu_data
);
193 init_srcu_struct_nodes(ssp
, is_static
);
194 ssp
->srcu_gp_seq_needed_exp
= 0;
195 ssp
->srcu_last_gp_end
= ktime_get_mono_fast_ns();
196 smp_store_release(&ssp
->srcu_gp_seq_needed
, 0); /* Init done. */
197 return ssp
->sda
? 0 : -ENOMEM
;
200 #ifdef CONFIG_DEBUG_LOCK_ALLOC
202 int __init_srcu_struct(struct srcu_struct
*ssp
, const char *name
,
203 struct lock_class_key
*key
)
205 /* Don't re-initialize a lock while it is held. */
206 debug_check_no_locks_freed((void *)ssp
, sizeof(*ssp
));
207 lockdep_init_map(&ssp
->dep_map
, name
, key
, 0);
208 spin_lock_init(&ACCESS_PRIVATE(ssp
, lock
));
209 return init_srcu_struct_fields(ssp
, false);
211 EXPORT_SYMBOL_GPL(__init_srcu_struct
);
213 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
216 * init_srcu_struct - initialize a sleep-RCU structure
217 * @ssp: structure to initialize.
219 * Must invoke this on a given srcu_struct before passing that srcu_struct
220 * to any other function. Each srcu_struct represents a separate domain
221 * of SRCU protection.
223 int init_srcu_struct(struct srcu_struct
*ssp
)
225 spin_lock_init(&ACCESS_PRIVATE(ssp
, lock
));
226 return init_srcu_struct_fields(ssp
, false);
228 EXPORT_SYMBOL_GPL(init_srcu_struct
);
230 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
233 * First-use initialization of statically allocated srcu_struct
234 * structure. Wiring up the combining tree is more than can be
235 * done with compile-time initialization, so this check is added
236 * to each update-side SRCU primitive. Use ssp->lock, which -is-
237 * compile-time initialized, to resolve races involving multiple
238 * CPUs trying to garner first-use privileges.
240 static void check_init_srcu_struct(struct srcu_struct
*ssp
)
244 /* The smp_load_acquire() pairs with the smp_store_release(). */
245 if (!rcu_seq_state(smp_load_acquire(&ssp
->srcu_gp_seq_needed
))) /*^^^*/
246 return; /* Already initialized. */
247 spin_lock_irqsave_rcu_node(ssp
, flags
);
248 if (!rcu_seq_state(ssp
->srcu_gp_seq_needed
)) {
249 spin_unlock_irqrestore_rcu_node(ssp
, flags
);
252 init_srcu_struct_fields(ssp
, true);
253 spin_unlock_irqrestore_rcu_node(ssp
, flags
);
257 * Returns approximate total of the readers' ->srcu_lock_count[] values
258 * for the rank of per-CPU counters specified by idx.
260 static unsigned long srcu_readers_lock_idx(struct srcu_struct
*ssp
, int idx
)
263 unsigned long sum
= 0;
265 for_each_possible_cpu(cpu
) {
266 struct srcu_data
*cpuc
= per_cpu_ptr(ssp
->sda
, cpu
);
268 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[idx
]);
274 * Returns approximate total of the readers' ->srcu_unlock_count[] values
275 * for the rank of per-CPU counters specified by idx.
277 static unsigned long srcu_readers_unlock_idx(struct srcu_struct
*ssp
, int idx
)
280 unsigned long sum
= 0;
282 for_each_possible_cpu(cpu
) {
283 struct srcu_data
*cpuc
= per_cpu_ptr(ssp
->sda
, cpu
);
285 sum
+= READ_ONCE(cpuc
->srcu_unlock_count
[idx
]);
291 * Return true if the number of pre-existing readers is determined to
294 static bool srcu_readers_active_idx_check(struct srcu_struct
*ssp
, int idx
)
296 unsigned long unlocks
;
298 unlocks
= srcu_readers_unlock_idx(ssp
, idx
);
301 * Make sure that a lock is always counted if the corresponding
302 * unlock is counted. Needs to be a smp_mb() as the read side may
303 * contain a read from a variable that is written to before the
304 * synchronize_srcu() in the write side. In this case smp_mb()s
305 * A and B act like the store buffering pattern.
307 * This smp_mb() also pairs with smp_mb() C to prevent accesses
308 * after the synchronize_srcu() from being executed before the
314 * If the locks are the same as the unlocks, then there must have
315 * been no readers on this index at some time in between. This does
316 * not mean that there are no more readers, as one could have read
317 * the current index but not have incremented the lock counter yet.
319 * So suppose that the updater is preempted here for so long
320 * that more than ULONG_MAX non-nested readers come and go in
321 * the meantime. It turns out that this cannot result in overflow
322 * because if a reader modifies its unlock count after we read it
323 * above, then that reader's next load of ->srcu_idx is guaranteed
324 * to get the new value, which will cause it to operate on the
325 * other bank of counters, where it cannot contribute to the
326 * overflow of these counters. This means that there is a maximum
327 * of 2*NR_CPUS increments, which cannot overflow given current
328 * systems, especially not on 64-bit systems.
330 * OK, how about nesting? This does impose a limit on nesting
331 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
332 * especially on 64-bit systems.
334 return srcu_readers_lock_idx(ssp
, idx
) == unlocks
;
338 * srcu_readers_active - returns true if there are readers. and false
340 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
342 * Note that this is not an atomic primitive, and can therefore suffer
343 * severe errors when invoked on an active srcu_struct. That said, it
344 * can be useful as an error check at cleanup time.
346 static bool srcu_readers_active(struct srcu_struct
*ssp
)
349 unsigned long sum
= 0;
351 for_each_possible_cpu(cpu
) {
352 struct srcu_data
*cpuc
= per_cpu_ptr(ssp
->sda
, cpu
);
354 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[0]);
355 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[1]);
356 sum
-= READ_ONCE(cpuc
->srcu_unlock_count
[0]);
357 sum
-= READ_ONCE(cpuc
->srcu_unlock_count
[1]);
362 #define SRCU_INTERVAL 1
365 * Return grace-period delay, zero if there are expedited grace
366 * periods pending, SRCU_INTERVAL otherwise.
368 static unsigned long srcu_get_delay(struct srcu_struct
*ssp
)
370 if (ULONG_CMP_LT(READ_ONCE(ssp
->srcu_gp_seq
),
371 READ_ONCE(ssp
->srcu_gp_seq_needed_exp
)))
373 return SRCU_INTERVAL
;
377 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
378 * @ssp: structure to clean up.
380 * Must invoke this after you are finished using a given srcu_struct that
381 * was initialized via init_srcu_struct(), else you leak memory.
383 void cleanup_srcu_struct(struct srcu_struct
*ssp
)
387 if (WARN_ON(!srcu_get_delay(ssp
)))
388 return; /* Just leak it! */
389 if (WARN_ON(srcu_readers_active(ssp
)))
390 return; /* Just leak it! */
391 flush_delayed_work(&ssp
->work
);
392 for_each_possible_cpu(cpu
) {
393 struct srcu_data
*sdp
= per_cpu_ptr(ssp
->sda
, cpu
);
395 del_timer_sync(&sdp
->delay_work
);
396 flush_work(&sdp
->work
);
397 if (WARN_ON(rcu_segcblist_n_cbs(&sdp
->srcu_cblist
)))
398 return; /* Forgot srcu_barrier(), so just leak it! */
400 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp
->srcu_gp_seq
)) != SRCU_STATE_IDLE
) ||
401 WARN_ON(srcu_readers_active(ssp
))) {
402 pr_info("%s: Active srcu_struct %p state: %d\n",
403 __func__
, ssp
, rcu_seq_state(READ_ONCE(ssp
->srcu_gp_seq
)));
404 return; /* Caller forgot to stop doing call_srcu()? */
406 free_percpu(ssp
->sda
);
409 EXPORT_SYMBOL_GPL(cleanup_srcu_struct
);
412 * Counts the new reader in the appropriate per-CPU element of the
414 * Returns an index that must be passed to the matching srcu_read_unlock().
416 int __srcu_read_lock(struct srcu_struct
*ssp
)
420 idx
= READ_ONCE(ssp
->srcu_idx
) & 0x1;
421 this_cpu_inc(ssp
->sda
->srcu_lock_count
[idx
]);
422 smp_mb(); /* B */ /* Avoid leaking the critical section. */
425 EXPORT_SYMBOL_GPL(__srcu_read_lock
);
428 * Removes the count for the old reader from the appropriate per-CPU
429 * element of the srcu_struct. Note that this may well be a different
430 * CPU than that which was incremented by the corresponding srcu_read_lock().
432 void __srcu_read_unlock(struct srcu_struct
*ssp
, int idx
)
434 smp_mb(); /* C */ /* Avoid leaking the critical section. */
435 this_cpu_inc(ssp
->sda
->srcu_unlock_count
[idx
]);
437 EXPORT_SYMBOL_GPL(__srcu_read_unlock
);
440 * We use an adaptive strategy for synchronize_srcu() and especially for
441 * synchronize_srcu_expedited(). We spin for a fixed time period
442 * (defined below) to allow SRCU readers to exit their read-side critical
443 * sections. If there are still some readers after a few microseconds,
444 * we repeatedly block for 1-millisecond time periods.
446 #define SRCU_RETRY_CHECK_DELAY 5
449 * Start an SRCU grace period.
451 static void srcu_gp_start(struct srcu_struct
*ssp
)
453 struct srcu_data
*sdp
= this_cpu_ptr(ssp
->sda
);
456 lockdep_assert_held(&ACCESS_PRIVATE(ssp
, lock
));
457 WARN_ON_ONCE(ULONG_CMP_GE(ssp
->srcu_gp_seq
, ssp
->srcu_gp_seq_needed
));
458 spin_lock_rcu_node(sdp
); /* Interrupts already disabled. */
459 rcu_segcblist_advance(&sdp
->srcu_cblist
,
460 rcu_seq_current(&ssp
->srcu_gp_seq
));
461 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
,
462 rcu_seq_snap(&ssp
->srcu_gp_seq
));
463 spin_unlock_rcu_node(sdp
); /* Interrupts remain disabled. */
464 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
465 rcu_seq_start(&ssp
->srcu_gp_seq
);
466 state
= rcu_seq_state(ssp
->srcu_gp_seq
);
467 WARN_ON_ONCE(state
!= SRCU_STATE_SCAN1
);
471 static void srcu_delay_timer(struct timer_list
*t
)
473 struct srcu_data
*sdp
= container_of(t
, struct srcu_data
, delay_work
);
475 queue_work_on(sdp
->cpu
, rcu_gp_wq
, &sdp
->work
);
478 static void srcu_queue_delayed_work_on(struct srcu_data
*sdp
,
482 queue_work_on(sdp
->cpu
, rcu_gp_wq
, &sdp
->work
);
486 timer_reduce(&sdp
->delay_work
, jiffies
+ delay
);
490 * Schedule callback invocation for the specified srcu_data structure,
491 * if possible, on the corresponding CPU.
493 static void srcu_schedule_cbs_sdp(struct srcu_data
*sdp
, unsigned long delay
)
495 srcu_queue_delayed_work_on(sdp
, delay
);
499 * Schedule callback invocation for all srcu_data structures associated
500 * with the specified srcu_node structure that have callbacks for the
501 * just-completed grace period, the one corresponding to idx. If possible,
502 * schedule this invocation on the corresponding CPUs.
504 static void srcu_schedule_cbs_snp(struct srcu_struct
*ssp
, struct srcu_node
*snp
,
505 unsigned long mask
, unsigned long delay
)
509 for (cpu
= snp
->grplo
; cpu
<= snp
->grphi
; cpu
++) {
510 if (!(mask
& (1 << (cpu
- snp
->grplo
))))
512 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp
->sda
, cpu
), delay
);
517 * Note the end of an SRCU grace period. Initiates callback invocation
518 * and starts a new grace period if needed.
520 * The ->srcu_cb_mutex acquisition does not protect any data, but
521 * instead prevents more than one grace period from starting while we
522 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
523 * array to have a finite number of elements.
525 static void srcu_gp_end(struct srcu_struct
*ssp
)
527 unsigned long cbdelay
;
535 struct srcu_data
*sdp
;
536 struct srcu_node
*snp
;
538 /* Prevent more than one additional grace period. */
539 mutex_lock(&ssp
->srcu_cb_mutex
);
541 /* End the current grace period. */
542 spin_lock_irq_rcu_node(ssp
);
543 idx
= rcu_seq_state(ssp
->srcu_gp_seq
);
544 WARN_ON_ONCE(idx
!= SRCU_STATE_SCAN2
);
545 cbdelay
= srcu_get_delay(ssp
);
546 WRITE_ONCE(ssp
->srcu_last_gp_end
, ktime_get_mono_fast_ns());
547 rcu_seq_end(&ssp
->srcu_gp_seq
);
548 gpseq
= rcu_seq_current(&ssp
->srcu_gp_seq
);
549 if (ULONG_CMP_LT(ssp
->srcu_gp_seq_needed_exp
, gpseq
))
550 WRITE_ONCE(ssp
->srcu_gp_seq_needed_exp
, gpseq
);
551 spin_unlock_irq_rcu_node(ssp
);
552 mutex_unlock(&ssp
->srcu_gp_mutex
);
553 /* A new grace period can start at this point. But only one. */
555 /* Initiate callback invocation as needed. */
556 idx
= rcu_seq_ctr(gpseq
) % ARRAY_SIZE(snp
->srcu_have_cbs
);
557 srcu_for_each_node_breadth_first(ssp
, snp
) {
558 spin_lock_irq_rcu_node(snp
);
560 last_lvl
= snp
>= ssp
->level
[rcu_num_lvls
- 1];
562 cbs
= snp
->srcu_have_cbs
[idx
] == gpseq
;
563 snp
->srcu_have_cbs
[idx
] = gpseq
;
564 rcu_seq_set_state(&snp
->srcu_have_cbs
[idx
], 1);
565 if (ULONG_CMP_LT(snp
->srcu_gp_seq_needed_exp
, gpseq
))
566 WRITE_ONCE(snp
->srcu_gp_seq_needed_exp
, gpseq
);
567 mask
= snp
->srcu_data_have_cbs
[idx
];
568 snp
->srcu_data_have_cbs
[idx
] = 0;
569 spin_unlock_irq_rcu_node(snp
);
571 srcu_schedule_cbs_snp(ssp
, snp
, mask
, cbdelay
);
573 /* Occasionally prevent srcu_data counter wrap. */
574 if (!(gpseq
& counter_wrap_check
) && last_lvl
)
575 for (cpu
= snp
->grplo
; cpu
<= snp
->grphi
; cpu
++) {
576 sdp
= per_cpu_ptr(ssp
->sda
, cpu
);
577 spin_lock_irqsave_rcu_node(sdp
, flags
);
578 if (ULONG_CMP_GE(gpseq
,
579 sdp
->srcu_gp_seq_needed
+ 100))
580 sdp
->srcu_gp_seq_needed
= gpseq
;
581 if (ULONG_CMP_GE(gpseq
,
582 sdp
->srcu_gp_seq_needed_exp
+ 100))
583 sdp
->srcu_gp_seq_needed_exp
= gpseq
;
584 spin_unlock_irqrestore_rcu_node(sdp
, flags
);
588 /* Callback initiation done, allow grace periods after next. */
589 mutex_unlock(&ssp
->srcu_cb_mutex
);
591 /* Start a new grace period if needed. */
592 spin_lock_irq_rcu_node(ssp
);
593 gpseq
= rcu_seq_current(&ssp
->srcu_gp_seq
);
594 if (!rcu_seq_state(gpseq
) &&
595 ULONG_CMP_LT(gpseq
, ssp
->srcu_gp_seq_needed
)) {
597 spin_unlock_irq_rcu_node(ssp
);
598 srcu_reschedule(ssp
, 0);
600 spin_unlock_irq_rcu_node(ssp
);
605 * Funnel-locking scheme to scalably mediate many concurrent expedited
606 * grace-period requests. This function is invoked for the first known
607 * expedited request for a grace period that has already been requested,
608 * but without expediting. To start a completely new grace period,
609 * whether expedited or not, use srcu_funnel_gp_start() instead.
611 static void srcu_funnel_exp_start(struct srcu_struct
*ssp
, struct srcu_node
*snp
,
616 for (; snp
!= NULL
; snp
= snp
->srcu_parent
) {
617 if (rcu_seq_done(&ssp
->srcu_gp_seq
, s
) ||
618 ULONG_CMP_GE(READ_ONCE(snp
->srcu_gp_seq_needed_exp
), s
))
620 spin_lock_irqsave_rcu_node(snp
, flags
);
621 if (ULONG_CMP_GE(snp
->srcu_gp_seq_needed_exp
, s
)) {
622 spin_unlock_irqrestore_rcu_node(snp
, flags
);
625 WRITE_ONCE(snp
->srcu_gp_seq_needed_exp
, s
);
626 spin_unlock_irqrestore_rcu_node(snp
, flags
);
628 spin_lock_irqsave_rcu_node(ssp
, flags
);
629 if (ULONG_CMP_LT(ssp
->srcu_gp_seq_needed_exp
, s
))
630 WRITE_ONCE(ssp
->srcu_gp_seq_needed_exp
, s
);
631 spin_unlock_irqrestore_rcu_node(ssp
, flags
);
635 * Funnel-locking scheme to scalably mediate many concurrent grace-period
636 * requests. The winner has to do the work of actually starting grace
637 * period s. Losers must either ensure that their desired grace-period
638 * number is recorded on at least their leaf srcu_node structure, or they
639 * must take steps to invoke their own callbacks.
641 * Note that this function also does the work of srcu_funnel_exp_start(),
642 * in some cases by directly invoking it.
644 static void srcu_funnel_gp_start(struct srcu_struct
*ssp
, struct srcu_data
*sdp
,
645 unsigned long s
, bool do_norm
)
648 int idx
= rcu_seq_ctr(s
) % ARRAY_SIZE(sdp
->mynode
->srcu_have_cbs
);
649 struct srcu_node
*snp
= sdp
->mynode
;
650 unsigned long snp_seq
;
652 /* Each pass through the loop does one level of the srcu_node tree. */
653 for (; snp
!= NULL
; snp
= snp
->srcu_parent
) {
654 if (rcu_seq_done(&ssp
->srcu_gp_seq
, s
) && snp
!= sdp
->mynode
)
655 return; /* GP already done and CBs recorded. */
656 spin_lock_irqsave_rcu_node(snp
, flags
);
657 if (ULONG_CMP_GE(snp
->srcu_have_cbs
[idx
], s
)) {
658 snp_seq
= snp
->srcu_have_cbs
[idx
];
659 if (snp
== sdp
->mynode
&& snp_seq
== s
)
660 snp
->srcu_data_have_cbs
[idx
] |= sdp
->grpmask
;
661 spin_unlock_irqrestore_rcu_node(snp
, flags
);
662 if (snp
== sdp
->mynode
&& snp_seq
!= s
) {
663 srcu_schedule_cbs_sdp(sdp
, do_norm
669 srcu_funnel_exp_start(ssp
, snp
, s
);
672 snp
->srcu_have_cbs
[idx
] = s
;
673 if (snp
== sdp
->mynode
)
674 snp
->srcu_data_have_cbs
[idx
] |= sdp
->grpmask
;
675 if (!do_norm
&& ULONG_CMP_LT(snp
->srcu_gp_seq_needed_exp
, s
))
676 WRITE_ONCE(snp
->srcu_gp_seq_needed_exp
, s
);
677 spin_unlock_irqrestore_rcu_node(snp
, flags
);
680 /* Top of tree, must ensure the grace period will be started. */
681 spin_lock_irqsave_rcu_node(ssp
, flags
);
682 if (ULONG_CMP_LT(ssp
->srcu_gp_seq_needed
, s
)) {
684 * Record need for grace period s. Pair with load
685 * acquire setting up for initialization.
687 smp_store_release(&ssp
->srcu_gp_seq_needed
, s
); /*^^^*/
689 if (!do_norm
&& ULONG_CMP_LT(ssp
->srcu_gp_seq_needed_exp
, s
))
690 WRITE_ONCE(ssp
->srcu_gp_seq_needed_exp
, s
);
692 /* If grace period not already done and none in progress, start it. */
693 if (!rcu_seq_done(&ssp
->srcu_gp_seq
, s
) &&
694 rcu_seq_state(ssp
->srcu_gp_seq
) == SRCU_STATE_IDLE
) {
695 WARN_ON_ONCE(ULONG_CMP_GE(ssp
->srcu_gp_seq
, ssp
->srcu_gp_seq_needed
));
697 if (likely(srcu_init_done
))
698 queue_delayed_work(rcu_gp_wq
, &ssp
->work
,
699 srcu_get_delay(ssp
));
700 else if (list_empty(&ssp
->work
.work
.entry
))
701 list_add(&ssp
->work
.work
.entry
, &srcu_boot_list
);
703 spin_unlock_irqrestore_rcu_node(ssp
, flags
);
707 * Wait until all readers counted by array index idx complete, but
708 * loop an additional time if there is an expedited grace period pending.
709 * The caller must ensure that ->srcu_idx is not changed while checking.
711 static bool try_check_zero(struct srcu_struct
*ssp
, int idx
, int trycount
)
714 if (srcu_readers_active_idx_check(ssp
, idx
))
716 if (--trycount
+ !srcu_get_delay(ssp
) <= 0)
718 udelay(SRCU_RETRY_CHECK_DELAY
);
723 * Increment the ->srcu_idx counter so that future SRCU readers will
724 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
725 * us to wait for pre-existing readers in a starvation-free manner.
727 static void srcu_flip(struct srcu_struct
*ssp
)
730 * Ensure that if this updater saw a given reader's increment
731 * from __srcu_read_lock(), that reader was using an old value
732 * of ->srcu_idx. Also ensure that if a given reader sees the
733 * new value of ->srcu_idx, this updater's earlier scans cannot
734 * have seen that reader's increments (which is OK, because this
735 * grace period need not wait on that reader).
737 smp_mb(); /* E */ /* Pairs with B and C. */
739 WRITE_ONCE(ssp
->srcu_idx
, ssp
->srcu_idx
+ 1);
742 * Ensure that if the updater misses an __srcu_read_unlock()
743 * increment, that task's next __srcu_read_lock() will see the
744 * above counter update. Note that both this memory barrier
745 * and the one in srcu_readers_active_idx_check() provide the
746 * guarantee for __srcu_read_lock().
748 smp_mb(); /* D */ /* Pairs with C. */
752 * If SRCU is likely idle, return true, otherwise return false.
754 * Note that it is OK for several current from-idle requests for a new
755 * grace period from idle to specify expediting because they will all end
756 * up requesting the same grace period anyhow. So no loss.
758 * Note also that if any CPU (including the current one) is still invoking
759 * callbacks, this function will nevertheless say "idle". This is not
760 * ideal, but the overhead of checking all CPUs' callback lists is even
761 * less ideal, especially on large systems. Furthermore, the wakeup
762 * can happen before the callback is fully removed, so we have no choice
763 * but to accept this type of error.
765 * This function is also subject to counter-wrap errors, but let's face
766 * it, if this function was preempted for enough time for the counters
767 * to wrap, it really doesn't matter whether or not we expedite the grace
768 * period. The extra overhead of a needlessly expedited grace period is
769 * negligible when amortized over that time period, and the extra latency
770 * of a needlessly non-expedited grace period is similarly negligible.
772 static bool srcu_might_be_idle(struct srcu_struct
*ssp
)
774 unsigned long curseq
;
776 struct srcu_data
*sdp
;
780 check_init_srcu_struct(ssp
);
781 /* If the local srcu_data structure has callbacks, not idle. */
782 sdp
= raw_cpu_ptr(ssp
->sda
);
783 spin_lock_irqsave_rcu_node(sdp
, flags
);
784 if (rcu_segcblist_pend_cbs(&sdp
->srcu_cblist
)) {
785 spin_unlock_irqrestore_rcu_node(sdp
, flags
);
786 return false; /* Callbacks already present, so not idle. */
788 spin_unlock_irqrestore_rcu_node(sdp
, flags
);
791 * No local callbacks, so probabalistically probe global state.
792 * Exact information would require acquiring locks, which would
793 * kill scalability, hence the probabalistic nature of the probe.
796 /* First, see if enough time has passed since the last GP. */
797 t
= ktime_get_mono_fast_ns();
798 tlast
= READ_ONCE(ssp
->srcu_last_gp_end
);
799 if (exp_holdoff
== 0 ||
800 time_in_range_open(t
, tlast
, tlast
+ exp_holdoff
))
801 return false; /* Too soon after last GP. */
803 /* Next, check for probable idleness. */
804 curseq
= rcu_seq_current(&ssp
->srcu_gp_seq
);
805 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
806 if (ULONG_CMP_LT(curseq
, READ_ONCE(ssp
->srcu_gp_seq_needed
)))
807 return false; /* Grace period in progress, so not idle. */
808 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
809 if (curseq
!= rcu_seq_current(&ssp
->srcu_gp_seq
))
810 return false; /* GP # changed, so not idle. */
811 return true; /* With reasonable probability, idle! */
815 * SRCU callback function to leak a callback.
817 static void srcu_leak_callback(struct rcu_head
*rhp
)
822 * Enqueue an SRCU callback on the srcu_data structure associated with
823 * the current CPU and the specified srcu_struct structure, initiating
824 * grace-period processing if it is not already running.
826 * Note that all CPUs must agree that the grace period extended beyond
827 * all pre-existing SRCU read-side critical section. On systems with
828 * more than one CPU, this means that when "func()" is invoked, each CPU
829 * is guaranteed to have executed a full memory barrier since the end of
830 * its last corresponding SRCU read-side critical section whose beginning
831 * preceded the call to call_srcu(). It also means that each CPU executing
832 * an SRCU read-side critical section that continues beyond the start of
833 * "func()" must have executed a memory barrier after the call_srcu()
834 * but before the beginning of that SRCU read-side critical section.
835 * Note that these guarantees include CPUs that are offline, idle, or
836 * executing in user mode, as well as CPUs that are executing in the kernel.
838 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
839 * resulting SRCU callback function "func()", then both CPU A and CPU
840 * B are guaranteed to execute a full memory barrier during the time
841 * interval between the call to call_srcu() and the invocation of "func()".
842 * This guarantee applies even if CPU A and CPU B are the same CPU (but
843 * again only if the system has more than one CPU).
845 * Of course, these guarantees apply only for invocations of call_srcu(),
846 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
847 * srcu_struct structure.
849 static void __call_srcu(struct srcu_struct
*ssp
, struct rcu_head
*rhp
,
850 rcu_callback_t func
, bool do_norm
)
854 bool needexp
= false;
857 struct srcu_data
*sdp
;
859 check_init_srcu_struct(ssp
);
860 if (debug_rcu_head_queue(rhp
)) {
861 /* Probable double call_srcu(), so leak the callback. */
862 WRITE_ONCE(rhp
->func
, srcu_leak_callback
);
863 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
867 idx
= srcu_read_lock(ssp
);
868 sdp
= raw_cpu_ptr(ssp
->sda
);
869 spin_lock_irqsave_rcu_node(sdp
, flags
);
870 rcu_segcblist_enqueue(&sdp
->srcu_cblist
, rhp
);
871 rcu_segcblist_advance(&sdp
->srcu_cblist
,
872 rcu_seq_current(&ssp
->srcu_gp_seq
));
873 s
= rcu_seq_snap(&ssp
->srcu_gp_seq
);
874 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
, s
);
875 if (ULONG_CMP_LT(sdp
->srcu_gp_seq_needed
, s
)) {
876 sdp
->srcu_gp_seq_needed
= s
;
879 if (!do_norm
&& ULONG_CMP_LT(sdp
->srcu_gp_seq_needed_exp
, s
)) {
880 sdp
->srcu_gp_seq_needed_exp
= s
;
883 spin_unlock_irqrestore_rcu_node(sdp
, flags
);
885 srcu_funnel_gp_start(ssp
, sdp
, s
, do_norm
);
887 srcu_funnel_exp_start(ssp
, sdp
->mynode
, s
);
888 srcu_read_unlock(ssp
, idx
);
892 * call_srcu() - Queue a callback for invocation after an SRCU grace period
893 * @ssp: srcu_struct in queue the callback
894 * @rhp: structure to be used for queueing the SRCU callback.
895 * @func: function to be invoked after the SRCU grace period
897 * The callback function will be invoked some time after a full SRCU
898 * grace period elapses, in other words after all pre-existing SRCU
899 * read-side critical sections have completed. However, the callback
900 * function might well execute concurrently with other SRCU read-side
901 * critical sections that started after call_srcu() was invoked. SRCU
902 * read-side critical sections are delimited by srcu_read_lock() and
903 * srcu_read_unlock(), and may be nested.
905 * The callback will be invoked from process context, but must nevertheless
906 * be fast and must not block.
908 void call_srcu(struct srcu_struct
*ssp
, struct rcu_head
*rhp
,
911 __call_srcu(ssp
, rhp
, func
, true);
913 EXPORT_SYMBOL_GPL(call_srcu
);
916 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
918 static void __synchronize_srcu(struct srcu_struct
*ssp
, bool do_norm
)
920 struct rcu_synchronize rcu
;
922 RCU_LOCKDEP_WARN(lock_is_held(&ssp
->dep_map
) ||
923 lock_is_held(&rcu_bh_lock_map
) ||
924 lock_is_held(&rcu_lock_map
) ||
925 lock_is_held(&rcu_sched_lock_map
),
926 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
928 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
931 check_init_srcu_struct(ssp
);
932 init_completion(&rcu
.completion
);
933 init_rcu_head_on_stack(&rcu
.head
);
934 __call_srcu(ssp
, &rcu
.head
, wakeme_after_rcu
, do_norm
);
935 wait_for_completion(&rcu
.completion
);
936 destroy_rcu_head_on_stack(&rcu
.head
);
939 * Make sure that later code is ordered after the SRCU grace
940 * period. This pairs with the spin_lock_irq_rcu_node()
941 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
942 * because the current CPU might have been totally uninvolved with
943 * (and thus unordered against) that grace period.
949 * synchronize_srcu_expedited - Brute-force SRCU grace period
950 * @ssp: srcu_struct with which to synchronize.
952 * Wait for an SRCU grace period to elapse, but be more aggressive about
953 * spinning rather than blocking when waiting.
955 * Note that synchronize_srcu_expedited() has the same deadlock and
956 * memory-ordering properties as does synchronize_srcu().
958 void synchronize_srcu_expedited(struct srcu_struct
*ssp
)
960 __synchronize_srcu(ssp
, rcu_gp_is_normal());
962 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited
);
965 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
966 * @ssp: srcu_struct with which to synchronize.
968 * Wait for the count to drain to zero of both indexes. To avoid the
969 * possible starvation of synchronize_srcu(), it waits for the count of
970 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
971 * and then flip the srcu_idx and wait for the count of the other index.
973 * Can block; must be called from process context.
975 * Note that it is illegal to call synchronize_srcu() from the corresponding
976 * SRCU read-side critical section; doing so will result in deadlock.
977 * However, it is perfectly legal to call synchronize_srcu() on one
978 * srcu_struct from some other srcu_struct's read-side critical section,
979 * as long as the resulting graph of srcu_structs is acyclic.
981 * There are memory-ordering constraints implied by synchronize_srcu().
982 * On systems with more than one CPU, when synchronize_srcu() returns,
983 * each CPU is guaranteed to have executed a full memory barrier since
984 * the end of its last corresponding SRCU read-side critical section
985 * whose beginning preceded the call to synchronize_srcu(). In addition,
986 * each CPU having an SRCU read-side critical section that extends beyond
987 * the return from synchronize_srcu() is guaranteed to have executed a
988 * full memory barrier after the beginning of synchronize_srcu() and before
989 * the beginning of that SRCU read-side critical section. Note that these
990 * guarantees include CPUs that are offline, idle, or executing in user mode,
991 * as well as CPUs that are executing in the kernel.
993 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
994 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
995 * to have executed a full memory barrier during the execution of
996 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
997 * are the same CPU, but again only if the system has more than one CPU.
999 * Of course, these memory-ordering guarantees apply only when
1000 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1001 * passed the same srcu_struct structure.
1003 * If SRCU is likely idle, expedite the first request. This semantic
1004 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1005 * SRCU must also provide it. Note that detecting idleness is heuristic
1006 * and subject to both false positives and negatives.
1008 void synchronize_srcu(struct srcu_struct
*ssp
)
1010 if (srcu_might_be_idle(ssp
) || rcu_gp_is_expedited())
1011 synchronize_srcu_expedited(ssp
);
1013 __synchronize_srcu(ssp
, true);
1015 EXPORT_SYMBOL_GPL(synchronize_srcu
);
1018 * Callback function for srcu_barrier() use.
1020 static void srcu_barrier_cb(struct rcu_head
*rhp
)
1022 struct srcu_data
*sdp
;
1023 struct srcu_struct
*ssp
;
1025 sdp
= container_of(rhp
, struct srcu_data
, srcu_barrier_head
);
1027 if (atomic_dec_and_test(&ssp
->srcu_barrier_cpu_cnt
))
1028 complete(&ssp
->srcu_barrier_completion
);
1032 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1033 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1035 void srcu_barrier(struct srcu_struct
*ssp
)
1038 struct srcu_data
*sdp
;
1039 unsigned long s
= rcu_seq_snap(&ssp
->srcu_barrier_seq
);
1041 check_init_srcu_struct(ssp
);
1042 mutex_lock(&ssp
->srcu_barrier_mutex
);
1043 if (rcu_seq_done(&ssp
->srcu_barrier_seq
, s
)) {
1044 smp_mb(); /* Force ordering following return. */
1045 mutex_unlock(&ssp
->srcu_barrier_mutex
);
1046 return; /* Someone else did our work for us. */
1048 rcu_seq_start(&ssp
->srcu_barrier_seq
);
1049 init_completion(&ssp
->srcu_barrier_completion
);
1051 /* Initial count prevents reaching zero until all CBs are posted. */
1052 atomic_set(&ssp
->srcu_barrier_cpu_cnt
, 1);
1055 * Each pass through this loop enqueues a callback, but only
1056 * on CPUs already having callbacks enqueued. Note that if
1057 * a CPU already has callbacks enqueue, it must have already
1058 * registered the need for a future grace period, so all we
1059 * need do is enqueue a callback that will use the same
1060 * grace period as the last callback already in the queue.
1062 for_each_possible_cpu(cpu
) {
1063 sdp
= per_cpu_ptr(ssp
->sda
, cpu
);
1064 spin_lock_irq_rcu_node(sdp
);
1065 atomic_inc(&ssp
->srcu_barrier_cpu_cnt
);
1066 sdp
->srcu_barrier_head
.func
= srcu_barrier_cb
;
1067 debug_rcu_head_queue(&sdp
->srcu_barrier_head
);
1068 if (!rcu_segcblist_entrain(&sdp
->srcu_cblist
,
1069 &sdp
->srcu_barrier_head
)) {
1070 debug_rcu_head_unqueue(&sdp
->srcu_barrier_head
);
1071 atomic_dec(&ssp
->srcu_barrier_cpu_cnt
);
1073 spin_unlock_irq_rcu_node(sdp
);
1076 /* Remove the initial count, at which point reaching zero can happen. */
1077 if (atomic_dec_and_test(&ssp
->srcu_barrier_cpu_cnt
))
1078 complete(&ssp
->srcu_barrier_completion
);
1079 wait_for_completion(&ssp
->srcu_barrier_completion
);
1081 rcu_seq_end(&ssp
->srcu_barrier_seq
);
1082 mutex_unlock(&ssp
->srcu_barrier_mutex
);
1084 EXPORT_SYMBOL_GPL(srcu_barrier
);
1087 * srcu_batches_completed - return batches completed.
1088 * @ssp: srcu_struct on which to report batch completion.
1090 * Report the number of batches, correlated with, but not necessarily
1091 * precisely the same as, the number of grace periods that have elapsed.
1093 unsigned long srcu_batches_completed(struct srcu_struct
*ssp
)
1095 return READ_ONCE(ssp
->srcu_idx
);
1097 EXPORT_SYMBOL_GPL(srcu_batches_completed
);
1100 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1101 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1102 * completed in that state.
1104 static void srcu_advance_state(struct srcu_struct
*ssp
)
1108 mutex_lock(&ssp
->srcu_gp_mutex
);
1111 * Because readers might be delayed for an extended period after
1112 * fetching ->srcu_idx for their index, at any point in time there
1113 * might well be readers using both idx=0 and idx=1. We therefore
1114 * need to wait for readers to clear from both index values before
1115 * invoking a callback.
1117 * The load-acquire ensures that we see the accesses performed
1118 * by the prior grace period.
1120 idx
= rcu_seq_state(smp_load_acquire(&ssp
->srcu_gp_seq
)); /* ^^^ */
1121 if (idx
== SRCU_STATE_IDLE
) {
1122 spin_lock_irq_rcu_node(ssp
);
1123 if (ULONG_CMP_GE(ssp
->srcu_gp_seq
, ssp
->srcu_gp_seq_needed
)) {
1124 WARN_ON_ONCE(rcu_seq_state(ssp
->srcu_gp_seq
));
1125 spin_unlock_irq_rcu_node(ssp
);
1126 mutex_unlock(&ssp
->srcu_gp_mutex
);
1129 idx
= rcu_seq_state(READ_ONCE(ssp
->srcu_gp_seq
));
1130 if (idx
== SRCU_STATE_IDLE
)
1132 spin_unlock_irq_rcu_node(ssp
);
1133 if (idx
!= SRCU_STATE_IDLE
) {
1134 mutex_unlock(&ssp
->srcu_gp_mutex
);
1135 return; /* Someone else started the grace period. */
1139 if (rcu_seq_state(READ_ONCE(ssp
->srcu_gp_seq
)) == SRCU_STATE_SCAN1
) {
1140 idx
= 1 ^ (ssp
->srcu_idx
& 1);
1141 if (!try_check_zero(ssp
, idx
, 1)) {
1142 mutex_unlock(&ssp
->srcu_gp_mutex
);
1143 return; /* readers present, retry later. */
1146 spin_lock_irq_rcu_node(ssp
);
1147 rcu_seq_set_state(&ssp
->srcu_gp_seq
, SRCU_STATE_SCAN2
);
1148 spin_unlock_irq_rcu_node(ssp
);
1151 if (rcu_seq_state(READ_ONCE(ssp
->srcu_gp_seq
)) == SRCU_STATE_SCAN2
) {
1154 * SRCU read-side critical sections are normally short,
1155 * so check at least twice in quick succession after a flip.
1157 idx
= 1 ^ (ssp
->srcu_idx
& 1);
1158 if (!try_check_zero(ssp
, idx
, 2)) {
1159 mutex_unlock(&ssp
->srcu_gp_mutex
);
1160 return; /* readers present, retry later. */
1162 srcu_gp_end(ssp
); /* Releases ->srcu_gp_mutex. */
1167 * Invoke a limited number of SRCU callbacks that have passed through
1168 * their grace period. If there are more to do, SRCU will reschedule
1169 * the workqueue. Note that needed memory barriers have been executed
1170 * in this task's context by srcu_readers_active_idx_check().
1172 static void srcu_invoke_callbacks(struct work_struct
*work
)
1175 struct rcu_cblist ready_cbs
;
1176 struct rcu_head
*rhp
;
1177 struct srcu_data
*sdp
;
1178 struct srcu_struct
*ssp
;
1180 sdp
= container_of(work
, struct srcu_data
, work
);
1183 rcu_cblist_init(&ready_cbs
);
1184 spin_lock_irq_rcu_node(sdp
);
1185 rcu_segcblist_advance(&sdp
->srcu_cblist
,
1186 rcu_seq_current(&ssp
->srcu_gp_seq
));
1187 if (sdp
->srcu_cblist_invoking
||
1188 !rcu_segcblist_ready_cbs(&sdp
->srcu_cblist
)) {
1189 spin_unlock_irq_rcu_node(sdp
);
1190 return; /* Someone else on the job or nothing to do. */
1193 /* We are on the job! Extract and invoke ready callbacks. */
1194 sdp
->srcu_cblist_invoking
= true;
1195 rcu_segcblist_extract_done_cbs(&sdp
->srcu_cblist
, &ready_cbs
);
1196 spin_unlock_irq_rcu_node(sdp
);
1197 rhp
= rcu_cblist_dequeue(&ready_cbs
);
1198 for (; rhp
!= NULL
; rhp
= rcu_cblist_dequeue(&ready_cbs
)) {
1199 debug_rcu_head_unqueue(rhp
);
1206 * Update counts, accelerate new callbacks, and if needed,
1207 * schedule another round of callback invocation.
1209 spin_lock_irq_rcu_node(sdp
);
1210 rcu_segcblist_insert_count(&sdp
->srcu_cblist
, &ready_cbs
);
1211 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
,
1212 rcu_seq_snap(&ssp
->srcu_gp_seq
));
1213 sdp
->srcu_cblist_invoking
= false;
1214 more
= rcu_segcblist_ready_cbs(&sdp
->srcu_cblist
);
1215 spin_unlock_irq_rcu_node(sdp
);
1217 srcu_schedule_cbs_sdp(sdp
, 0);
1221 * Finished one round of SRCU grace period. Start another if there are
1222 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1224 static void srcu_reschedule(struct srcu_struct
*ssp
, unsigned long delay
)
1228 spin_lock_irq_rcu_node(ssp
);
1229 if (ULONG_CMP_GE(ssp
->srcu_gp_seq
, ssp
->srcu_gp_seq_needed
)) {
1230 if (!WARN_ON_ONCE(rcu_seq_state(ssp
->srcu_gp_seq
))) {
1231 /* All requests fulfilled, time to go idle. */
1234 } else if (!rcu_seq_state(ssp
->srcu_gp_seq
)) {
1235 /* Outstanding request and no GP. Start one. */
1238 spin_unlock_irq_rcu_node(ssp
);
1241 queue_delayed_work(rcu_gp_wq
, &ssp
->work
, delay
);
1245 * This is the work-queue function that handles SRCU grace periods.
1247 static void process_srcu(struct work_struct
*work
)
1249 struct srcu_struct
*ssp
;
1251 ssp
= container_of(work
, struct srcu_struct
, work
.work
);
1253 srcu_advance_state(ssp
);
1254 srcu_reschedule(ssp
, srcu_get_delay(ssp
));
1257 void srcutorture_get_gp_data(enum rcutorture_type test_type
,
1258 struct srcu_struct
*ssp
, int *flags
,
1259 unsigned long *gp_seq
)
1261 if (test_type
!= SRCU_FLAVOR
)
1264 *gp_seq
= rcu_seq_current(&ssp
->srcu_gp_seq
);
1266 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data
);
1268 void srcu_torture_stats_print(struct srcu_struct
*ssp
, char *tt
, char *tf
)
1272 unsigned long s0
= 0, s1
= 0;
1274 idx
= ssp
->srcu_idx
& 0x1;
1275 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1276 tt
, tf
, rcu_seq_current(&ssp
->srcu_gp_seq
), idx
);
1277 for_each_possible_cpu(cpu
) {
1278 unsigned long l0
, l1
;
1279 unsigned long u0
, u1
;
1281 struct srcu_data
*sdp
;
1283 sdp
= per_cpu_ptr(ssp
->sda
, cpu
);
1284 u0
= data_race(sdp
->srcu_unlock_count
[!idx
]);
1285 u1
= data_race(sdp
->srcu_unlock_count
[idx
]);
1288 * Make sure that a lock is always counted if the corresponding
1289 * unlock is counted.
1293 l0
= data_race(sdp
->srcu_lock_count
[!idx
]);
1294 l1
= data_race(sdp
->srcu_lock_count
[idx
]);
1298 pr_cont(" %d(%ld,%ld %c)",
1300 "C."[rcu_segcblist_empty(&sdp
->srcu_cblist
)]);
1304 pr_cont(" T(%ld,%ld)\n", s0
, s1
);
1306 EXPORT_SYMBOL_GPL(srcu_torture_stats_print
);
1308 static int __init
srcu_bootup_announce(void)
1310 pr_info("Hierarchical SRCU implementation.\n");
1311 if (exp_holdoff
!= DEFAULT_SRCU_EXP_HOLDOFF
)
1312 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff
);
1315 early_initcall(srcu_bootup_announce
);
1317 void __init
srcu_init(void)
1319 struct srcu_struct
*ssp
;
1321 srcu_init_done
= true;
1322 while (!list_empty(&srcu_boot_list
)) {
1323 ssp
= list_first_entry(&srcu_boot_list
, struct srcu_struct
,
1325 check_init_srcu_struct(ssp
);
1326 list_del_init(&ssp
->work
.work
.entry
);
1327 queue_work(rcu_gp_wq
, &ssp
->work
.work
);
1331 #ifdef CONFIG_MODULES
1333 /* Initialize any global-scope srcu_struct structures used by this module. */
1334 static int srcu_module_coming(struct module
*mod
)
1337 struct srcu_struct
**sspp
= mod
->srcu_struct_ptrs
;
1340 for (i
= 0; i
< mod
->num_srcu_structs
; i
++) {
1341 ret
= init_srcu_struct(*(sspp
++));
1342 if (WARN_ON_ONCE(ret
))
1348 /* Clean up any global-scope srcu_struct structures used by this module. */
1349 static void srcu_module_going(struct module
*mod
)
1352 struct srcu_struct
**sspp
= mod
->srcu_struct_ptrs
;
1354 for (i
= 0; i
< mod
->num_srcu_structs
; i
++)
1355 cleanup_srcu_struct(*(sspp
++));
1358 /* Handle one module, either coming or going. */
1359 static int srcu_module_notify(struct notifier_block
*self
,
1360 unsigned long val
, void *data
)
1362 struct module
*mod
= data
;
1366 case MODULE_STATE_COMING
:
1367 ret
= srcu_module_coming(mod
);
1369 case MODULE_STATE_GOING
:
1370 srcu_module_going(mod
);
1378 static struct notifier_block srcu_module_nb
= {
1379 .notifier_call
= srcu_module_notify
,
1383 static __init
int init_srcu_module_notifier(void)
1387 ret
= register_module_notifier(&srcu_module_nb
);
1389 pr_warn("Failed to register srcu module notifier\n");
1392 late_initcall(init_srcu_module_notifier
);
1394 #endif /* #ifdef CONFIG_MODULES */