1 // SPDX-License-Identifier: GPL-2.0-only
3 * PREEMPT_RT substitution for spin/rw_locks
5 * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
6 * resemble the non RT semantics:
8 * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
9 * preserving. The task state is saved before blocking on the underlying
10 * rtmutex, and restored when the lock has been acquired. Regular wakeups
11 * during that time are redirected to the saved state so no wake up is
14 * - Non RT spin/rwlocks disable preemption and eventually interrupts.
15 * Disabling preemption has the side effect of disabling migration and
16 * preventing RCU grace periods.
18 * The RT substitutions explicitly disable migration and take
19 * rcu_read_lock() across the lock held section.
21 #include <linux/spinlock.h>
22 #include <linux/export.h>
24 #define RT_MUTEX_BUILD_SPINLOCKS
28 * __might_resched() skips the state check as rtlocks are state
29 * preserving. Take RCU nesting into account as spin/read/write_lock() can
30 * legitimately nest into an RCU read side critical section.
32 #define RTLOCK_RESCHED_OFFSETS \
33 (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
35 #define rtlock_might_resched() \
36 __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
38 static __always_inline
void rtlock_lock(struct rt_mutex_base
*rtm
)
40 lockdep_assert(!current
->pi_blocked_on
);
42 if (unlikely(!rt_mutex_cmpxchg_acquire(rtm
, NULL
, current
)))
46 static __always_inline
void __rt_spin_lock(spinlock_t
*lock
)
48 rtlock_might_resched();
49 rtlock_lock(&lock
->lock
);
54 void __sched
rt_spin_lock(spinlock_t
*lock
) __acquires(RCU
)
56 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
59 EXPORT_SYMBOL(rt_spin_lock
);
61 #ifdef CONFIG_DEBUG_LOCK_ALLOC
62 void __sched
rt_spin_lock_nested(spinlock_t
*lock
, int subclass
)
64 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
67 EXPORT_SYMBOL(rt_spin_lock_nested
);
69 void __sched
rt_spin_lock_nest_lock(spinlock_t
*lock
,
70 struct lockdep_map
*nest_lock
)
72 spin_acquire_nest(&lock
->dep_map
, 0, 0, nest_lock
, _RET_IP_
);
75 EXPORT_SYMBOL(rt_spin_lock_nest_lock
);
78 void __sched
rt_spin_unlock(spinlock_t
*lock
) __releases(RCU
)
80 spin_release(&lock
->dep_map
, _RET_IP_
);
84 if (unlikely(!rt_mutex_cmpxchg_release(&lock
->lock
, current
, NULL
)))
85 rt_mutex_slowunlock(&lock
->lock
);
87 EXPORT_SYMBOL(rt_spin_unlock
);
90 * Wait for the lock to get unlocked: instead of polling for an unlock
91 * (like raw spinlocks do), lock and unlock, to force the kernel to
92 * schedule if there's contention:
94 void __sched
rt_spin_lock_unlock(spinlock_t
*lock
)
99 EXPORT_SYMBOL(rt_spin_lock_unlock
);
101 static __always_inline
int __rt_spin_trylock(spinlock_t
*lock
)
105 if (unlikely(!rt_mutex_cmpxchg_acquire(&lock
->lock
, NULL
, current
)))
106 ret
= rt_mutex_slowtrylock(&lock
->lock
);
109 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
116 int __sched
rt_spin_trylock(spinlock_t
*lock
)
118 return __rt_spin_trylock(lock
);
120 EXPORT_SYMBOL(rt_spin_trylock
);
122 int __sched
rt_spin_trylock_bh(spinlock_t
*lock
)
127 ret
= __rt_spin_trylock(lock
);
132 EXPORT_SYMBOL(rt_spin_trylock_bh
);
134 #ifdef CONFIG_DEBUG_LOCK_ALLOC
135 void __rt_spin_lock_init(spinlock_t
*lock
, const char *name
,
136 struct lock_class_key
*key
, bool percpu
)
138 u8 type
= percpu
? LD_LOCK_PERCPU
: LD_LOCK_NORMAL
;
140 debug_check_no_locks_freed((void *)lock
, sizeof(*lock
));
141 lockdep_init_map_type(&lock
->dep_map
, name
, key
, 0, LD_WAIT_CONFIG
,
144 EXPORT_SYMBOL(__rt_spin_lock_init
);
148 * RT-specific reader/writer locks
150 #define rwbase_set_and_save_current_state(state) \
151 current_save_and_set_rtlock_wait_state()
153 #define rwbase_restore_current_state() \
154 current_restore_rtlock_saved_state()
156 static __always_inline
int
157 rwbase_rtmutex_lock_state(struct rt_mutex_base
*rtm
, unsigned int state
)
159 if (unlikely(!rt_mutex_cmpxchg_acquire(rtm
, NULL
, current
)))
160 rtlock_slowlock(rtm
);
164 static __always_inline
int
165 rwbase_rtmutex_slowlock_locked(struct rt_mutex_base
*rtm
, unsigned int state
,
166 struct wake_q_head
*wake_q
)
168 rtlock_slowlock_locked(rtm
, wake_q
);
172 static __always_inline
void rwbase_rtmutex_unlock(struct rt_mutex_base
*rtm
)
174 if (likely(rt_mutex_cmpxchg_acquire(rtm
, current
, NULL
)))
177 rt_mutex_slowunlock(rtm
);
180 static __always_inline
int rwbase_rtmutex_trylock(struct rt_mutex_base
*rtm
)
182 if (likely(rt_mutex_cmpxchg_acquire(rtm
, NULL
, current
)))
185 return rt_mutex_slowtrylock(rtm
);
188 #define rwbase_signal_pending_state(state, current) (0)
190 #define rwbase_pre_schedule()
192 #define rwbase_schedule() \
195 #define rwbase_post_schedule()
197 #include "rwbase_rt.c"
199 * The common functions which get wrapped into the rwlock API.
201 int __sched
rt_read_trylock(rwlock_t
*rwlock
)
205 ret
= rwbase_read_trylock(&rwlock
->rwbase
);
207 rwlock_acquire_read(&rwlock
->dep_map
, 0, 1, _RET_IP_
);
213 EXPORT_SYMBOL(rt_read_trylock
);
215 int __sched
rt_write_trylock(rwlock_t
*rwlock
)
219 ret
= rwbase_write_trylock(&rwlock
->rwbase
);
221 rwlock_acquire(&rwlock
->dep_map
, 0, 1, _RET_IP_
);
227 EXPORT_SYMBOL(rt_write_trylock
);
229 void __sched
rt_read_lock(rwlock_t
*rwlock
) __acquires(RCU
)
231 rtlock_might_resched();
232 rwlock_acquire_read(&rwlock
->dep_map
, 0, 0, _RET_IP_
);
233 rwbase_read_lock(&rwlock
->rwbase
, TASK_RTLOCK_WAIT
);
237 EXPORT_SYMBOL(rt_read_lock
);
239 void __sched
rt_write_lock(rwlock_t
*rwlock
) __acquires(RCU
)
241 rtlock_might_resched();
242 rwlock_acquire(&rwlock
->dep_map
, 0, 0, _RET_IP_
);
243 rwbase_write_lock(&rwlock
->rwbase
, TASK_RTLOCK_WAIT
);
247 EXPORT_SYMBOL(rt_write_lock
);
249 #ifdef CONFIG_DEBUG_LOCK_ALLOC
250 void __sched
rt_write_lock_nested(rwlock_t
*rwlock
, int subclass
) __acquires(RCU
)
252 rtlock_might_resched();
253 rwlock_acquire(&rwlock
->dep_map
, subclass
, 0, _RET_IP_
);
254 rwbase_write_lock(&rwlock
->rwbase
, TASK_RTLOCK_WAIT
);
258 EXPORT_SYMBOL(rt_write_lock_nested
);
261 void __sched
rt_read_unlock(rwlock_t
*rwlock
) __releases(RCU
)
263 rwlock_release(&rwlock
->dep_map
, _RET_IP_
);
266 rwbase_read_unlock(&rwlock
->rwbase
, TASK_RTLOCK_WAIT
);
268 EXPORT_SYMBOL(rt_read_unlock
);
270 void __sched
rt_write_unlock(rwlock_t
*rwlock
) __releases(RCU
)
272 rwlock_release(&rwlock
->dep_map
, _RET_IP_
);
275 rwbase_write_unlock(&rwlock
->rwbase
);
277 EXPORT_SYMBOL(rt_write_unlock
);
279 #ifdef CONFIG_DEBUG_LOCK_ALLOC
280 void __rt_rwlock_init(rwlock_t
*rwlock
, const char *name
,
281 struct lock_class_key
*key
)
283 debug_check_no_locks_freed((void *)rwlock
, sizeof(*rwlock
));
284 lockdep_init_map_wait(&rwlock
->dep_map
, name
, key
, 0, LD_WAIT_CONFIG
);
286 EXPORT_SYMBOL(__rt_rwlock_init
);