1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/atomic.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/sched/debug.h>
11 #include <linux/errno.h>
12 #include <trace/events/lock.h>
14 int __percpu_init_rwsem(struct percpu_rw_semaphore
*sem
,
15 const char *name
, struct lock_class_key
*key
)
17 sem
->read_count
= alloc_percpu(int);
18 if (unlikely(!sem
->read_count
))
21 rcu_sync_init(&sem
->rss
);
22 rcuwait_init(&sem
->writer
);
23 init_waitqueue_head(&sem
->waiters
);
24 atomic_set(&sem
->block
, 0);
25 #ifdef CONFIG_DEBUG_LOCK_ALLOC
26 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
27 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
31 EXPORT_SYMBOL_GPL(__percpu_init_rwsem
);
33 void percpu_free_rwsem(struct percpu_rw_semaphore
*sem
)
36 * XXX: temporary kludge. The error path in alloc_super()
37 * assumes that percpu_free_rwsem() is safe after kzalloc().
42 rcu_sync_dtor(&sem
->rss
);
43 free_percpu(sem
->read_count
);
44 sem
->read_count
= NULL
; /* catch use after free bugs */
46 EXPORT_SYMBOL_GPL(percpu_free_rwsem
);
48 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore
*sem
)
50 this_cpu_inc(*sem
->read_count
);
53 * Due to having preemption disabled the decrement happens on
54 * the same CPU as the increment, avoiding the
55 * increment-on-one-CPU-and-decrement-on-another problem.
57 * If the reader misses the writer's assignment of sem->block, then the
58 * writer is guaranteed to see the reader's increment.
60 * Conversely, any readers that increment their sem->read_count after
61 * the writer looks are guaranteed to see the sem->block value, which
62 * in turn means that they are guaranteed to immediately decrement
63 * their sem->read_count, so that it doesn't matter that the writer
67 smp_mb(); /* A matches D */
70 * If !sem->block the critical section starts here, matched by the
71 * release in percpu_up_write().
73 if (likely(!atomic_read_acquire(&sem
->block
)))
76 this_cpu_dec(*sem
->read_count
);
78 /* Prod writer to re-evaluate readers_active_check() */
79 rcuwait_wake_up(&sem
->writer
);
84 static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore
*sem
)
86 if (atomic_read(&sem
->block
))
89 return atomic_xchg(&sem
->block
, 1) == 0;
92 static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore
*sem
, bool reader
)
98 ret
= __percpu_down_read_trylock(sem
);
103 return __percpu_down_write_trylock(sem
);
107 * The return value of wait_queue_entry::func means:
109 * <0 - error, wakeup is terminated and the error is returned
110 * 0 - no wakeup, a next waiter is tried
111 * >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
113 * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
114 * and play games with the return value to allow waking multiple readers.
116 * Specifically, we wake readers until we've woken a single writer, or until a
119 static int percpu_rwsem_wake_function(struct wait_queue_entry
*wq_entry
,
120 unsigned int mode
, int wake_flags
,
123 bool reader
= wq_entry
->flags
& WQ_FLAG_CUSTOM
;
124 struct percpu_rw_semaphore
*sem
= key
;
125 struct task_struct
*p
;
127 /* concurrent against percpu_down_write(), can get stolen */
128 if (!__percpu_rwsem_trylock(sem
, reader
))
131 p
= get_task_struct(wq_entry
->private);
132 list_del_init(&wq_entry
->entry
);
133 smp_store_release(&wq_entry
->private, NULL
);
138 return !reader
; /* wake (readers until) 1 writer */
141 static void percpu_rwsem_wait(struct percpu_rw_semaphore
*sem
, bool reader
)
143 DEFINE_WAIT_FUNC(wq_entry
, percpu_rwsem_wake_function
);
146 spin_lock_irq(&sem
->waiters
.lock
);
148 * Serialize against the wakeup in percpu_up_write(), if we fail
149 * the trylock, the wakeup must see us on the list.
151 wait
= !__percpu_rwsem_trylock(sem
, reader
);
153 wq_entry
.flags
|= WQ_FLAG_EXCLUSIVE
| reader
* WQ_FLAG_CUSTOM
;
154 __add_wait_queue_entry_tail(&sem
->waiters
, &wq_entry
);
156 spin_unlock_irq(&sem
->waiters
.lock
);
159 set_current_state(TASK_UNINTERRUPTIBLE
);
160 if (!smp_load_acquire(&wq_entry
.private))
164 __set_current_state(TASK_RUNNING
);
167 bool __sched
__percpu_down_read(struct percpu_rw_semaphore
*sem
, bool try)
169 if (__percpu_down_read_trylock(sem
))
175 trace_contention_begin(sem
, LCB_F_PERCPU
| LCB_F_READ
);
177 percpu_rwsem_wait(sem
, /* .reader = */ true);
179 trace_contention_end(sem
, 0);
183 EXPORT_SYMBOL_GPL(__percpu_down_read
);
185 #define per_cpu_sum(var) \
187 typeof(var) __sum = 0; \
189 compiletime_assert_atomic_type(__sum); \
190 for_each_possible_cpu(cpu) \
191 __sum += per_cpu(var, cpu); \
195 bool percpu_is_read_locked(struct percpu_rw_semaphore
*sem
)
197 return per_cpu_sum(*sem
->read_count
) != 0 && !atomic_read(&sem
->block
);
199 EXPORT_SYMBOL_GPL(percpu_is_read_locked
);
202 * Return true if the modular sum of the sem->read_count per-CPU variable is
203 * zero. If this sum is zero, then it is stable due to the fact that if any
204 * newly arriving readers increment a given counter, they will immediately
205 * decrement that same counter.
207 * Assumes sem->block is set.
209 static bool readers_active_check(struct percpu_rw_semaphore
*sem
)
211 if (per_cpu_sum(*sem
->read_count
) != 0)
215 * If we observed the decrement; ensure we see the entire critical
219 smp_mb(); /* C matches B */
224 void __sched
percpu_down_write(struct percpu_rw_semaphore
*sem
)
226 bool contended
= false;
229 rwsem_acquire(&sem
->dep_map
, 0, 0, _RET_IP_
);
231 /* Notify readers to take the slow path. */
232 rcu_sync_enter(&sem
->rss
);
235 * Try set sem->block; this provides writer-writer exclusion.
236 * Having sem->block set makes new readers block.
238 if (!__percpu_down_write_trylock(sem
)) {
239 trace_contention_begin(sem
, LCB_F_PERCPU
| LCB_F_WRITE
);
240 percpu_rwsem_wait(sem
, /* .reader = */ false);
244 /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
247 * If they don't see our store of sem->block, then we are guaranteed to
248 * see their sem->read_count increment, and therefore will wait for
252 /* Wait for all active readers to complete. */
253 rcuwait_wait_event(&sem
->writer
, readers_active_check(sem
), TASK_UNINTERRUPTIBLE
);
255 trace_contention_end(sem
, 0);
257 EXPORT_SYMBOL_GPL(percpu_down_write
);
259 void percpu_up_write(struct percpu_rw_semaphore
*sem
)
261 rwsem_release(&sem
->dep_map
, _RET_IP_
);
264 * Signal the writer is done, no fast path yet.
266 * One reason that we cannot just immediately flip to readers_fast is
267 * that new readers might fail to see the results of this writer's
270 * Therefore we force it through the slow path which guarantees an
271 * acquire and thereby guarantees the critical section's consistency.
273 atomic_set_release(&sem
->block
, 0);
276 * Prod any pending reader/writer to make progress.
278 __wake_up(&sem
->waiters
, TASK_NORMAL
, 1, sem
);
281 * Once this completes (at least one RCU-sched grace period hence) the
282 * reader fast path will be available again. Safe to use outside the
283 * exclusive write lock because its counting.
285 rcu_sync_exit(&sem
->rss
);
287 EXPORT_SYMBOL_GPL(percpu_up_write
);