1 #include <linux/atomic.h>
2 #include <linux/rwsem.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
11 int __percpu_init_rwsem(struct percpu_rw_semaphore
*brw
,
12 const char *name
, struct lock_class_key
*rwsem_key
)
14 brw
->fast_read_ctr
= alloc_percpu(int);
15 if (unlikely(!brw
->fast_read_ctr
))
18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw
->rw_sem
, name
, rwsem_key
);
20 atomic_set(&brw
->write_ctr
, 0);
21 atomic_set(&brw
->slow_read_ctr
, 0);
22 init_waitqueue_head(&brw
->write_waitq
);
26 void percpu_free_rwsem(struct percpu_rw_semaphore
*brw
)
28 free_percpu(brw
->fast_read_ctr
);
29 brw
->fast_read_ctr
= NULL
; /* catch use after free bugs */
33 * This is the fast-path for down_read/up_read, it only needs to ensure
34 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
35 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
36 * serialize with the preempt-disabled section below.
38 * The nontrivial part is that we should guarantee acquire/release semantics
41 * R_W: down_write() comes after up_read(), the writer should see all
42 * changes done by the reader
44 * W_R: down_read() comes after up_write(), the reader should see all
45 * changes done by the writer
47 * If this helper fails the callers rely on the normal rw_semaphore and
48 * atomic_dec_and_test(), so in this case we have the necessary barriers.
50 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
51 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
52 * reader inside the critical section. See the comments in down_write and
55 static bool update_fast_ctr(struct percpu_rw_semaphore
*brw
, unsigned int val
)
60 if (likely(!atomic_read(&brw
->write_ctr
))) {
61 __this_cpu_add(*brw
->fast_read_ctr
, val
);
70 * Like the normal down_read() this is not recursive, the writer can
71 * come after the first percpu_down_read() and create the deadlock.
73 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
74 * percpu_up_read() does rwsem_release(). This pairs with the usage
75 * of ->rw_sem in percpu_down/up_write().
77 void percpu_down_read(struct percpu_rw_semaphore
*brw
)
80 if (likely(update_fast_ctr(brw
, +1))) {
81 rwsem_acquire_read(&brw
->rw_sem
.dep_map
, 0, 0, _RET_IP_
);
85 down_read(&brw
->rw_sem
);
86 atomic_inc(&brw
->slow_read_ctr
);
87 /* avoid up_read()->rwsem_release() */
88 __up_read(&brw
->rw_sem
);
91 int percpu_down_read_trylock(struct percpu_rw_semaphore
*brw
)
93 if (unlikely(!update_fast_ctr(brw
, +1))) {
94 if (!__down_read_trylock(&brw
->rw_sem
))
96 atomic_inc(&brw
->slow_read_ctr
);
97 __up_read(&brw
->rw_sem
);
100 rwsem_acquire_read(&brw
->rw_sem
.dep_map
, 0, 1, _RET_IP_
);
104 void percpu_up_read(struct percpu_rw_semaphore
*brw
)
106 rwsem_release(&brw
->rw_sem
.dep_map
, 1, _RET_IP_
);
108 if (likely(update_fast_ctr(brw
, -1)))
111 /* false-positive is possible but harmless */
112 if (atomic_dec_and_test(&brw
->slow_read_ctr
))
113 wake_up_all(&brw
->write_waitq
);
116 static int clear_fast_ctr(struct percpu_rw_semaphore
*brw
)
118 unsigned int sum
= 0;
121 for_each_possible_cpu(cpu
) {
122 sum
+= per_cpu(*brw
->fast_read_ctr
, cpu
);
123 per_cpu(*brw
->fast_read_ctr
, cpu
) = 0;
130 * A writer increments ->write_ctr to force the readers to switch to the
131 * slow mode, note the atomic_read() check in update_fast_ctr().
133 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
134 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
135 * counter it represents the number of active readers.
137 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
138 * then waits until the slow counter becomes zero.
140 void percpu_down_write(struct percpu_rw_semaphore
*brw
)
142 /* tell update_fast_ctr() there is a pending writer */
143 atomic_inc(&brw
->write_ctr
);
145 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
146 * so that update_fast_ctr() can't succeed.
148 * 2. Ensures we see the result of every previous this_cpu_add() in
151 * 3. Ensures that if any reader has exited its critical section via
152 * fast-path, it executes a full memory barrier before we return.
153 * See R_W case in the comment above update_fast_ctr().
155 synchronize_sched_expedited();
157 /* exclude other writers, and block the new readers completely */
158 down_write(&brw
->rw_sem
);
160 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
161 atomic_add(clear_fast_ctr(brw
), &brw
->slow_read_ctr
);
163 /* wait for all readers to complete their percpu_up_read() */
164 wait_event(brw
->write_waitq
, !atomic_read(&brw
->slow_read_ctr
));
167 void percpu_up_write(struct percpu_rw_semaphore
*brw
)
169 /* release the lock, but the readers can't use the fast-path */
170 up_write(&brw
->rw_sem
);
172 * Insert the barrier before the next fast-path in down_read,
173 * see W_R case in the comment above update_fast_ctr().
175 synchronize_sched_expedited();
176 /* the last writer unblocks update_fast_ctr() */
177 atomic_dec(&brw
->write_ctr
);