1 /* SPDX-License-Identifier: GPL-2.0 */
3 * The least significant 2 bits of the owner value has the following
5 * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
6 * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
7 * i.e. the owner(s) cannot be readily determined. It can be reader
8 * owned or the owning writer is indeterminate.
10 * When a writer acquires a rwsem, it puts its task_struct pointer
11 * into the owner field. It is cleared after an unlock.
13 * When a reader acquires a rwsem, it will also puts its task_struct
14 * pointer into the owner field with both the RWSEM_READER_OWNED and
15 * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
16 * largely be left untouched. So for a free or reader-owned rwsem,
17 * the owner value may contain information about the last reader that
18 * acquires the rwsem. The anonymous bit is set because that particular
19 * reader may or may not still own the lock.
21 * That information may be helpful in debugging cases where the system
22 * seems to hang on a reader owned rwsem especially if only one reader
23 * is involved. Ideally we would like to track all the readers that own
24 * a rwsem, but the overhead is simply too big.
26 #include "lock_events.h"
28 #define RWSEM_READER_OWNED (1UL << 0)
29 #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
31 #ifdef CONFIG_DEBUG_RWSEMS
32 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
33 if (!debug_locks_silent && \
34 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
35 #c, atomic_long_read(&(sem)->count), \
36 (long)((sem)->owner), (long)current, \
37 list_empty(&(sem)->wait_list) ? "" : "not ")) \
41 # define DEBUG_RWSEMS_WARN_ON(c, sem)
45 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
46 * Adapted largely from include/asm-i386/rwsem.h
47 * by Paul Mackerras <paulus@samba.org>.
51 * the semaphore definition
54 # define RWSEM_ACTIVE_MASK 0xffffffffL
56 # define RWSEM_ACTIVE_MASK 0x0000ffffL
59 #define RWSEM_ACTIVE_BIAS 0x00000001L
60 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
61 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
64 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
66 * All writes to owner are protected by WRITE_ONCE() to make sure that
67 * store tearing can't happen as optimistic spinners may read and use
68 * the owner value concurrently without lock. Read from owner, however,
69 * may not need READ_ONCE() as long as the pointer value is only used
70 * for comparison and isn't being dereferenced.
72 static inline void rwsem_set_owner(struct rw_semaphore
*sem
)
74 WRITE_ONCE(sem
->owner
, current
);
77 static inline void rwsem_clear_owner(struct rw_semaphore
*sem
)
79 WRITE_ONCE(sem
->owner
, NULL
);
83 * The task_struct pointer of the last owning reader will be left in
86 * Note that the owner value just indicates the task has owned the rwsem
87 * previously, it may not be the real owner or one of the real owners
88 * anymore when that field is examined, so take it with a grain of salt.
90 static inline void __rwsem_set_reader_owned(struct rw_semaphore
*sem
,
91 struct task_struct
*owner
)
93 unsigned long val
= (unsigned long)owner
| RWSEM_READER_OWNED
94 | RWSEM_ANONYMOUSLY_OWNED
;
96 WRITE_ONCE(sem
->owner
, (struct task_struct
*)val
);
99 static inline void rwsem_set_reader_owned(struct rw_semaphore
*sem
)
101 __rwsem_set_reader_owned(sem
, current
);
105 * Return true if the a rwsem waiter can spin on the rwsem's owner
106 * and steal the lock, i.e. the lock is not anonymously owned.
107 * N.B. !owner is considered spinnable.
109 static inline bool is_rwsem_owner_spinnable(struct task_struct
*owner
)
111 return !((unsigned long)owner
& RWSEM_ANONYMOUSLY_OWNED
);
115 * Return true if rwsem is owned by an anonymous writer or readers.
117 static inline bool rwsem_has_anonymous_owner(struct task_struct
*owner
)
119 return (unsigned long)owner
& RWSEM_ANONYMOUSLY_OWNED
;
122 #ifdef CONFIG_DEBUG_RWSEMS
124 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
125 * is a task pointer in owner of a reader-owned rwsem, it will be the
126 * real owner or one of the real owners. The only exception is when the
127 * unlock is done by up_read_non_owner().
129 #define rwsem_clear_reader_owned rwsem_clear_reader_owned
130 static inline void rwsem_clear_reader_owned(struct rw_semaphore
*sem
)
132 unsigned long val
= (unsigned long)current
| RWSEM_READER_OWNED
133 | RWSEM_ANONYMOUSLY_OWNED
;
134 if (READ_ONCE(sem
->owner
) == (struct task_struct
*)val
)
135 cmpxchg_relaxed((unsigned long *)&sem
->owner
, val
,
136 RWSEM_READER_OWNED
| RWSEM_ANONYMOUSLY_OWNED
);
141 static inline void rwsem_set_owner(struct rw_semaphore
*sem
)
145 static inline void rwsem_clear_owner(struct rw_semaphore
*sem
)
149 static inline void __rwsem_set_reader_owned(struct rw_semaphore
*sem
,
150 struct task_struct
*owner
)
154 static inline void rwsem_set_reader_owned(struct rw_semaphore
*sem
)
159 #ifndef rwsem_clear_reader_owned
160 static inline void rwsem_clear_reader_owned(struct rw_semaphore
*sem
)
165 extern struct rw_semaphore
*rwsem_down_read_failed(struct rw_semaphore
*sem
);
166 extern struct rw_semaphore
*rwsem_down_read_failed_killable(struct rw_semaphore
*sem
);
167 extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore
*sem
);
168 extern struct rw_semaphore
*rwsem_down_write_failed_killable(struct rw_semaphore
*sem
);
169 extern struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
);
170 extern struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
);
175 static inline void __down_read(struct rw_semaphore
*sem
)
177 if (unlikely(atomic_long_inc_return_acquire(&sem
->count
) <= 0)) {
178 rwsem_down_read_failed(sem
);
179 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem
->owner
&
180 RWSEM_READER_OWNED
), sem
);
182 rwsem_set_reader_owned(sem
);
186 static inline int __down_read_killable(struct rw_semaphore
*sem
)
188 if (unlikely(atomic_long_inc_return_acquire(&sem
->count
) <= 0)) {
189 if (IS_ERR(rwsem_down_read_failed_killable(sem
)))
191 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem
->owner
&
192 RWSEM_READER_OWNED
), sem
);
194 rwsem_set_reader_owned(sem
);
199 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
202 * Optimize for the case when the rwsem is not locked at all.
204 long tmp
= RWSEM_UNLOCKED_VALUE
;
206 lockevent_inc(rwsem_rtrylock
);
208 if (atomic_long_try_cmpxchg_acquire(&sem
->count
, &tmp
,
209 tmp
+ RWSEM_ACTIVE_READ_BIAS
)) {
210 rwsem_set_reader_owned(sem
);
220 static inline void __down_write(struct rw_semaphore
*sem
)
224 tmp
= atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS
,
226 if (unlikely(tmp
!= RWSEM_ACTIVE_WRITE_BIAS
))
227 rwsem_down_write_failed(sem
);
228 rwsem_set_owner(sem
);
231 static inline int __down_write_killable(struct rw_semaphore
*sem
)
235 tmp
= atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS
,
237 if (unlikely(tmp
!= RWSEM_ACTIVE_WRITE_BIAS
))
238 if (IS_ERR(rwsem_down_write_failed_killable(sem
)))
240 rwsem_set_owner(sem
);
244 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
248 lockevent_inc(rwsem_wtrylock
);
249 tmp
= atomic_long_cmpxchg_acquire(&sem
->count
, RWSEM_UNLOCKED_VALUE
,
250 RWSEM_ACTIVE_WRITE_BIAS
);
251 if (tmp
== RWSEM_UNLOCKED_VALUE
) {
252 rwsem_set_owner(sem
);
259 * unlock after reading
261 static inline void __up_read(struct rw_semaphore
*sem
)
265 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem
->owner
& RWSEM_READER_OWNED
),
267 rwsem_clear_reader_owned(sem
);
268 tmp
= atomic_long_dec_return_release(&sem
->count
);
269 if (unlikely(tmp
< -1 && (tmp
& RWSEM_ACTIVE_MASK
) == 0))
274 * unlock after writing
276 static inline void __up_write(struct rw_semaphore
*sem
)
278 DEBUG_RWSEMS_WARN_ON(sem
->owner
!= current
, sem
);
279 rwsem_clear_owner(sem
);
280 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS
,
286 * downgrade write lock to read lock
288 static inline void __downgrade_write(struct rw_semaphore
*sem
)
293 * When downgrading from exclusive to shared ownership,
294 * anything inside the write-locked region cannot leak
295 * into the read side. In contrast, anything in the
296 * read-locked region is ok to be re-ordered into the
297 * write side. As such, rely on RELEASE semantics.
299 DEBUG_RWSEMS_WARN_ON(sem
->owner
!= current
, sem
);
300 tmp
= atomic_long_add_return_release(-RWSEM_WAITING_BIAS
, &sem
->count
);
301 rwsem_set_reader_owned(sem
);
303 rwsem_downgrade_wake(sem
);