1 // SPDX-License-Identifier: GPL-2.0+
3 * RCU-based infrastructure for lightweight reader-writer locking
5 * Copyright (c) 2015, Red Hat, Inc.
7 * Author: Oleg Nesterov <oleg@redhat.com>
10 #include <linux/rcu_sync.h>
11 #include <linux/sched.h>
13 #ifdef CONFIG_PROVE_RCU
14 #define __INIT_HELD(func) .held = func,
16 #define __INIT_HELD(func)
21 void (*call
)(struct rcu_head
*, void (*)(struct rcu_head
*));
23 #ifdef CONFIG_PROVE_RCU
28 .sync
= synchronize_rcu
,
31 __INIT_HELD(rcu_read_lock_held
)
34 .sync
= synchronize_rcu
,
37 __INIT_HELD(rcu_read_lock_sched_held
)
40 .sync
= synchronize_rcu
,
43 __INIT_HELD(rcu_read_lock_bh_held
)
47 enum { GP_IDLE
= 0, GP_PENDING
, GP_PASSED
};
48 enum { CB_IDLE
= 0, CB_PENDING
, CB_REPLAY
};
50 #define rss_lock gp_wait.lock
52 #ifdef CONFIG_PROVE_RCU
53 void rcu_sync_lockdep_assert(struct rcu_sync
*rsp
)
55 RCU_LOCKDEP_WARN(!gp_ops
[rsp
->gp_type
].held(),
56 "suspicious rcu_sync_is_idle() usage");
59 EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert
);
63 * rcu_sync_init() - Initialize an rcu_sync structure
64 * @rsp: Pointer to rcu_sync structure to be initialized
65 * @type: Flavor of RCU with which to synchronize rcu_sync structure
67 void rcu_sync_init(struct rcu_sync
*rsp
, enum rcu_sync_type type
)
69 memset(rsp
, 0, sizeof(*rsp
));
70 init_waitqueue_head(&rsp
->gp_wait
);
75 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
76 * @rsp: Pointer to rcu_sync structure to use for synchronization
78 * Must be called after rcu_sync_init() and before first use.
80 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
81 * pairs turn into NO-OPs.
83 void rcu_sync_enter_start(struct rcu_sync
*rsp
)
86 rsp
->gp_state
= GP_PASSED
;
90 * rcu_sync_enter() - Force readers onto slowpath
91 * @rsp: Pointer to rcu_sync structure to use for synchronization
93 * This function is used by updaters who need readers to make use of
94 * a slowpath during the update. After this function returns, all
95 * subsequent calls to rcu_sync_is_idle() will return false, which
96 * tells readers to stay off their fastpaths. A later call to
97 * rcu_sync_exit() re-enables reader slowpaths.
99 * When called in isolation, rcu_sync_enter() must wait for a grace
100 * period, however, closely spaced calls to rcu_sync_enter() can
101 * optimize away the grace-period wait via a state machine implemented
102 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
104 void rcu_sync_enter(struct rcu_sync
*rsp
)
106 bool need_wait
, need_sync
;
108 spin_lock_irq(&rsp
->rss_lock
);
109 need_wait
= rsp
->gp_count
++;
110 need_sync
= rsp
->gp_state
== GP_IDLE
;
112 rsp
->gp_state
= GP_PENDING
;
113 spin_unlock_irq(&rsp
->rss_lock
);
115 WARN_ON_ONCE(need_wait
&& need_sync
);
117 gp_ops
[rsp
->gp_type
].sync();
118 rsp
->gp_state
= GP_PASSED
;
119 wake_up_all(&rsp
->gp_wait
);
120 } else if (need_wait
) {
121 wait_event(rsp
->gp_wait
, rsp
->gp_state
== GP_PASSED
);
124 * Possible when there's a pending CB from a rcu_sync_exit().
125 * Nobody has yet been allowed the 'fast' path and thus we can
126 * avoid doing any sync(). The callback will get 'dropped'.
128 WARN_ON_ONCE(rsp
->gp_state
!= GP_PASSED
);
133 * rcu_sync_func() - Callback function managing reader access to fastpath
134 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
136 * This function is passed to one of the call_rcu() functions by
137 * rcu_sync_exit(), so that it is invoked after a grace period following the
138 * that invocation of rcu_sync_exit(). It takes action based on events that
139 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
140 * and rcu_sync_exit() pairs need not wait for a grace period.
142 * If another rcu_sync_enter() is invoked before the grace period
143 * ended, reset state to allow the next rcu_sync_exit() to let the
144 * readers back onto their fastpaths (after a grace period). If both
145 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
146 * before the grace period ended, re-invoke call_rcu() on behalf of that
147 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
148 * can again use their fastpaths.
150 static void rcu_sync_func(struct rcu_head
*rhp
)
152 struct rcu_sync
*rsp
= container_of(rhp
, struct rcu_sync
, cb_head
);
155 WARN_ON_ONCE(rsp
->gp_state
!= GP_PASSED
);
156 WARN_ON_ONCE(rsp
->cb_state
== CB_IDLE
);
158 spin_lock_irqsave(&rsp
->rss_lock
, flags
);
161 * A new rcu_sync_begin() has happened; drop the callback.
163 rsp
->cb_state
= CB_IDLE
;
164 } else if (rsp
->cb_state
== CB_REPLAY
) {
166 * A new rcu_sync_exit() has happened; requeue the callback
167 * to catch a later GP.
169 rsp
->cb_state
= CB_PENDING
;
170 gp_ops
[rsp
->gp_type
].call(&rsp
->cb_head
, rcu_sync_func
);
173 * We're at least a GP after rcu_sync_exit(); eveybody will now
174 * have observed the write side critical section. Let 'em rip!.
176 rsp
->cb_state
= CB_IDLE
;
177 rsp
->gp_state
= GP_IDLE
;
179 spin_unlock_irqrestore(&rsp
->rss_lock
, flags
);
183 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
184 * @rsp: Pointer to rcu_sync structure to use for synchronization
186 * This function is used by updaters who have completed, and can therefore
187 * now allow readers to make use of their fastpaths after a grace period
188 * has elapsed. After this grace period has completed, all subsequent
189 * calls to rcu_sync_is_idle() will return true, which tells readers that
190 * they can once again use their fastpaths.
192 void rcu_sync_exit(struct rcu_sync
*rsp
)
194 spin_lock_irq(&rsp
->rss_lock
);
195 if (!--rsp
->gp_count
) {
196 if (rsp
->cb_state
== CB_IDLE
) {
197 rsp
->cb_state
= CB_PENDING
;
198 gp_ops
[rsp
->gp_type
].call(&rsp
->cb_head
, rcu_sync_func
);
199 } else if (rsp
->cb_state
== CB_PENDING
) {
200 rsp
->cb_state
= CB_REPLAY
;
203 spin_unlock_irq(&rsp
->rss_lock
);
207 * rcu_sync_dtor() - Clean up an rcu_sync structure
208 * @rsp: Pointer to rcu_sync structure to be cleaned up
210 void rcu_sync_dtor(struct rcu_sync
*rsp
)
214 WARN_ON_ONCE(rsp
->gp_count
);
216 spin_lock_irq(&rsp
->rss_lock
);
217 if (rsp
->cb_state
== CB_REPLAY
)
218 rsp
->cb_state
= CB_PENDING
;
219 cb_state
= rsp
->cb_state
;
220 spin_unlock_irq(&rsp
->rss_lock
);
222 if (cb_state
!= CB_IDLE
) {
223 gp_ops
[rsp
->gp_type
].wait();
224 WARN_ON_ONCE(rsp
->cb_state
!= CB_IDLE
);