1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* rwsem.h: R/W semaphores, public interface
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
11 #include <linux/linkage.h>
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/atomic.h>
17 #include <linux/err.h>
18 #include <linux/cleanup.h>
20 #ifdef CONFIG_DEBUG_LOCK_ALLOC
21 # define __RWSEM_DEP_MAP_INIT(lockname) \
24 .wait_type_inner = LD_WAIT_SLEEP, \
27 # define __RWSEM_DEP_MAP_INIT(lockname)
30 #ifndef CONFIG_PREEMPT_RT
32 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
33 #include <linux/osq_lock.h>
37 * For an uncontended rwsem, count and owner are the only fields a task
38 * needs to touch when acquiring the rwsem. So they are put next to each
39 * other to increase the chance that they will share the same cacheline.
41 * In a contended rwsem, the owner is likely the most frequently accessed
42 * field in the structure as the optimistic waiter that holds the osq lock
43 * will spin on owner. For an embedded rwsem, other hot fields in the
44 * containing structure should be moved further away from the rwsem to
45 * reduce the chance that they will share the same cacheline causing
46 * cacheline bouncing problem.
51 * Write owner or one of the read owners as well flags regarding
52 * the current state of the rwsem. Can be used as a speculative
53 * check to see if the write owner is running on the cpu.
56 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57 struct optimistic_spin_queue osq
; /* spinner MCS lock */
59 raw_spinlock_t wait_lock
;
60 struct list_head wait_list
;
61 #ifdef CONFIG_DEBUG_RWSEMS
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map
;
69 #define RWSEM_UNLOCKED_VALUE 0UL
70 #define RWSEM_WRITER_LOCKED (1UL << 0)
71 #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
73 static inline int rwsem_is_locked(struct rw_semaphore
*sem
)
75 return atomic_long_read(&sem
->count
) != RWSEM_UNLOCKED_VALUE
;
78 static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore
*sem
)
80 WARN_ON(atomic_long_read(&sem
->count
) == RWSEM_UNLOCKED_VALUE
);
83 static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore
*sem
)
85 WARN_ON(!(atomic_long_read(&sem
->count
) & RWSEM_WRITER_LOCKED
));
88 /* Common initializer macros and functions */
90 #ifdef CONFIG_DEBUG_RWSEMS
91 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
93 # define __RWSEM_DEBUG_INIT(lockname)
96 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
97 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
99 #define __RWSEM_OPT_INIT(lockname)
102 #define __RWSEM_INITIALIZER(name) \
103 { __RWSEM_COUNT_INIT(name), \
104 .owner = ATOMIC_LONG_INIT(0), \
105 __RWSEM_OPT_INIT(name) \
106 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
107 .wait_list = LIST_HEAD_INIT((name).wait_list), \
108 __RWSEM_DEBUG_INIT(name) \
109 __RWSEM_DEP_MAP_INIT(name) }
111 #define DECLARE_RWSEM(name) \
112 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
114 extern void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
115 struct lock_class_key
*key
);
117 #define init_rwsem(sem) \
119 static struct lock_class_key __key; \
121 __init_rwsem((sem), #sem, &__key); \
125 * This is the same regardless of which rwsem implementation that is being used.
126 * It is just a heuristic meant to be called by somebody already holding the
127 * rwsem to see if somebody from an incompatible type is wanting access to the
130 static inline int rwsem_is_contended(struct rw_semaphore
*sem
)
132 return !list_empty(&sem
->wait_list
);
135 #else /* !CONFIG_PREEMPT_RT */
137 #include <linux/rwbase_rt.h>
139 struct rw_semaphore
{
140 struct rwbase_rt rwbase
;
141 #ifdef CONFIG_DEBUG_LOCK_ALLOC
142 struct lockdep_map dep_map
;
146 #define __RWSEM_INITIALIZER(name) \
148 .rwbase = __RWBASE_INITIALIZER(name), \
149 __RWSEM_DEP_MAP_INIT(name) \
152 #define DECLARE_RWSEM(lockname) \
153 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
155 extern void __init_rwsem(struct rw_semaphore
*rwsem
, const char *name
,
156 struct lock_class_key
*key
);
158 #define init_rwsem(sem) \
160 static struct lock_class_key __key; \
162 __init_rwsem((sem), #sem, &__key); \
165 static __always_inline
int rwsem_is_locked(const struct rw_semaphore
*sem
)
167 return rw_base_is_locked(&sem
->rwbase
);
170 static __always_inline
void rwsem_assert_held_nolockdep(const struct rw_semaphore
*sem
)
172 WARN_ON(!rwsem_is_locked(sem
));
175 static __always_inline
void rwsem_assert_held_write_nolockdep(const struct rw_semaphore
*sem
)
177 WARN_ON(!rw_base_is_write_locked(&sem
->rwbase
));
180 static __always_inline
int rwsem_is_contended(struct rw_semaphore
*sem
)
182 return rw_base_is_contended(&sem
->rwbase
);
185 #endif /* CONFIG_PREEMPT_RT */
188 * The functions below are the same for all rwsem implementations including
189 * the RT specific variant.
192 static inline void rwsem_assert_held(const struct rw_semaphore
*sem
)
194 if (IS_ENABLED(CONFIG_LOCKDEP
))
195 lockdep_assert_held(sem
);
197 rwsem_assert_held_nolockdep(sem
);
200 static inline void rwsem_assert_held_write(const struct rw_semaphore
*sem
)
202 if (IS_ENABLED(CONFIG_LOCKDEP
))
203 lockdep_assert_held_write(sem
);
205 rwsem_assert_held_write_nolockdep(sem
);
211 extern void down_read(struct rw_semaphore
*sem
);
212 extern int __must_check
down_read_interruptible(struct rw_semaphore
*sem
);
213 extern int __must_check
down_read_killable(struct rw_semaphore
*sem
);
216 * trylock for reading -- returns 1 if successful, 0 if contention
218 extern int down_read_trylock(struct rw_semaphore
*sem
);
223 extern void down_write(struct rw_semaphore
*sem
);
224 extern int __must_check
down_write_killable(struct rw_semaphore
*sem
);
227 * trylock for writing -- returns 1 if successful, 0 if contention
229 extern int down_write_trylock(struct rw_semaphore
*sem
);
232 * release a read lock
234 extern void up_read(struct rw_semaphore
*sem
);
237 * release a write lock
239 extern void up_write(struct rw_semaphore
*sem
);
241 DEFINE_GUARD(rwsem_read
, struct rw_semaphore
*, down_read(_T
), up_read(_T
))
242 DEFINE_GUARD_COND(rwsem_read
, _try
, down_read_trylock(_T
))
243 DEFINE_GUARD_COND(rwsem_read
, _intr
, down_read_interruptible(_T
) == 0)
245 DEFINE_GUARD(rwsem_write
, struct rw_semaphore
*, down_write(_T
), up_write(_T
))
246 DEFINE_GUARD_COND(rwsem_write
, _try
, down_write_trylock(_T
))
249 * downgrade write lock to read lock
251 extern void downgrade_write(struct rw_semaphore
*sem
);
253 #ifdef CONFIG_DEBUG_LOCK_ALLOC
255 * nested locking. NOTE: rwsems are not allowed to recurse
256 * (which occurs if the same task tries to acquire the same
257 * lock instance multiple times), but multiple locks of the
258 * same lock class might be taken, if the order of the locks
259 * is always the same. This ordering rule can be expressed
260 * to lockdep via the _nested() APIs, but enumerating the
261 * subclasses that are used. (If the nesting relationship is
262 * static then another method for expressing nested locking is
263 * the explicit definition of lock class keys and the use of
264 * lockdep_set_class() at lock initialization time.
265 * See Documentation/locking/lockdep-design.rst for more details.)
267 extern void down_read_nested(struct rw_semaphore
*sem
, int subclass
);
268 extern int __must_check
down_read_killable_nested(struct rw_semaphore
*sem
, int subclass
);
269 extern void down_write_nested(struct rw_semaphore
*sem
, int subclass
);
270 extern int down_write_killable_nested(struct rw_semaphore
*sem
, int subclass
);
271 extern void _down_write_nest_lock(struct rw_semaphore
*sem
, struct lockdep_map
*nest_lock
);
273 # define down_write_nest_lock(sem, nest_lock) \
275 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
276 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
280 * Take/release a lock when not the owner will release it.
282 * [ This API should be avoided as much as possible - the
283 * proper abstraction for this case is completions. ]
285 extern void down_read_non_owner(struct rw_semaphore
*sem
);
286 extern void up_read_non_owner(struct rw_semaphore
*sem
);
288 # define down_read_nested(sem, subclass) down_read(sem)
289 # define down_read_killable_nested(sem, subclass) down_read_killable(sem)
290 # define down_write_nest_lock(sem, nest_lock) down_write(sem)
291 # define down_write_nested(sem, subclass) down_write(sem)
292 # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
293 # define down_read_non_owner(sem) down_read(sem)
294 # define up_read_non_owner(sem) up_read(sem)
297 #endif /* _LINUX_RWSEM_H */