1 // SPDX-License-Identifier: GPL-2.0-only
4 * RT-specific reader/writer semaphores and reader/writer locks
6 * down_write/write_lock()
8 * 2) Remove the reader BIAS to force readers into the slow path
9 * 3) Wait until all readers have left the critical section
10 * 4) Mark it write locked
12 * up_write/write_unlock()
13 * 1) Remove the write locked marker
14 * 2) Set the reader BIAS, so readers can use the fast path again
15 * 3) Unlock rtmutex, to release blocked readers
17 * down_read/read_lock()
18 * 1) Try fast path acquisition (reader BIAS is set)
19 * 2) Take tmutex::wait_lock, which protects the writelocked flag
20 * 3) If !writelocked, acquire it for read
21 * 4) If writelocked, block on tmutex
22 * 5) unlock rtmutex, goto 1)
24 * up_read/read_unlock()
25 * 1) Try fast path release (reader count != 1)
26 * 2) Wake the writer waiting in down_write()/write_lock() #3
28 * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29 * locks on RT are not writer fair, but writers, which should be avoided in
30 * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31 * inheritance mechanism.
33 * It's possible to make the rw primitives writer fair by keeping a list of
34 * active readers. A blocked writer would force all newly incoming readers
35 * to block on the rtmutex, but the rtmutex would have to be proxy locked
36 * for one reader after the other. We can't use multi-reader inheritance
37 * because there is no way to support that with SCHED_DEADLINE.
38 * Implementing the one by one reader boosting/handover mechanism is a
39 * major surgery for a very dubious value.
41 * The risk of writer starvation is there, but the pathological use cases
42 * which trigger it are not necessarily the typical RT workloads.
44 * Fast-path orderings:
45 * The lock/unlock of readers can run in fast paths: lock and unlock are only
46 * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
47 * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
48 * and _release() (or stronger).
50 * Common code shared between RT rw_semaphore and rwlock
53 static __always_inline
int rwbase_read_trylock(struct rwbase_rt
*rwb
)
58 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
61 for (r
= atomic_read(&rwb
->readers
); r
< 0;) {
62 if (likely(atomic_try_cmpxchg_acquire(&rwb
->readers
, &r
, r
+ 1)))
68 static int __sched
__rwbase_read_lock(struct rwbase_rt
*rwb
,
71 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
72 DEFINE_WAKE_Q(wake_q
);
75 rwbase_pre_schedule();
76 raw_spin_lock_irq(&rtm
->wait_lock
);
79 * Call into the slow lock path with the rtmutex->wait_lock
80 * held, so this can't result in the following race:
82 * Reader1 Reader2 Writer
88 * unlock(m->wait_lock)
92 * sem->writelocked=true
93 * unlock(m->wait_lock)
96 * sem->writelocked=false
104 * That would put Reader1 behind the writer waiting on
105 * Reader2 to call up_read(), which might be unbound.
108 trace_contention_begin(rwb
, LCB_F_RT
| LCB_F_READ
);
111 * For rwlocks this returns 0 unconditionally, so the below
112 * !ret conditionals are optimized out.
114 ret
= rwbase_rtmutex_slowlock_locked(rtm
, state
, &wake_q
);
117 * On success the rtmutex is held, so there can't be a writer
118 * active. Increment the reader count and immediately drop the
121 * rtmutex->wait_lock has to be unlocked in any case of course.
124 atomic_inc(&rwb
->readers
);
127 raw_spin_unlock_irq(&rtm
->wait_lock
);
132 rwbase_rtmutex_unlock(rtm
);
134 trace_contention_end(rwb
, ret
);
135 rwbase_post_schedule();
139 static __always_inline
int rwbase_read_lock(struct rwbase_rt
*rwb
,
142 lockdep_assert(!current
->pi_blocked_on
);
144 if (rwbase_read_trylock(rwb
))
147 return __rwbase_read_lock(rwb
, state
);
150 static void __sched
__rwbase_read_unlock(struct rwbase_rt
*rwb
,
153 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
154 struct task_struct
*owner
;
155 DEFINE_RT_WAKE_Q(wqh
);
157 raw_spin_lock_irq(&rtm
->wait_lock
);
159 * Wake the writer, i.e. the rtmutex owner. It might release the
160 * rtmutex concurrently in the fast path (due to a signal), but to
161 * clean up rwb->readers it needs to acquire rtm->wait_lock. The
162 * worst case which can happen is a spurious wakeup.
164 owner
= rt_mutex_owner(rtm
);
166 rt_mutex_wake_q_add_task(&wqh
, owner
, state
);
168 /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
170 raw_spin_unlock_irq(&rtm
->wait_lock
);
171 rt_mutex_wake_up_q(&wqh
);
174 static __always_inline
void rwbase_read_unlock(struct rwbase_rt
*rwb
,
178 * rwb->readers can only hit 0 when a writer is waiting for the
179 * active readers to leave the critical section.
181 * dec_and_test() is fully ordered, provides RELEASE.
183 if (unlikely(atomic_dec_and_test(&rwb
->readers
)))
184 __rwbase_read_unlock(rwb
, state
);
187 static inline void __rwbase_write_unlock(struct rwbase_rt
*rwb
, int bias
,
190 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
193 * _release() is needed in case that reader is in fast path, pairing
194 * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
196 (void)atomic_add_return_release(READER_BIAS
- bias
, &rwb
->readers
);
197 raw_spin_unlock_irqrestore(&rtm
->wait_lock
, flags
);
198 rwbase_rtmutex_unlock(rtm
);
201 static inline void rwbase_write_unlock(struct rwbase_rt
*rwb
)
203 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
206 raw_spin_lock_irqsave(&rtm
->wait_lock
, flags
);
207 __rwbase_write_unlock(rwb
, WRITER_BIAS
, flags
);
210 static inline void rwbase_write_downgrade(struct rwbase_rt
*rwb
)
212 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
215 raw_spin_lock_irqsave(&rtm
->wait_lock
, flags
);
216 /* Release it and account current as reader */
217 __rwbase_write_unlock(rwb
, WRITER_BIAS
- 1, flags
);
220 static inline bool __rwbase_write_trylock(struct rwbase_rt
*rwb
)
222 /* Can do without CAS because we're serialized by wait_lock. */
223 lockdep_assert_held(&rwb
->rtmutex
.wait_lock
);
226 * _acquire is needed in case the reader is in the fast path, pairing
227 * with rwbase_read_unlock(), provides ACQUIRE.
229 if (!atomic_read_acquire(&rwb
->readers
)) {
230 atomic_set(&rwb
->readers
, WRITER_BIAS
);
237 static int __sched
rwbase_write_lock(struct rwbase_rt
*rwb
,
240 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
243 /* Take the rtmutex as a first step */
244 if (rwbase_rtmutex_lock_state(rtm
, state
))
247 /* Force readers into slow path */
248 atomic_sub(READER_BIAS
, &rwb
->readers
);
250 rwbase_pre_schedule();
252 raw_spin_lock_irqsave(&rtm
->wait_lock
, flags
);
253 if (__rwbase_write_trylock(rwb
))
256 rwbase_set_and_save_current_state(state
);
257 trace_contention_begin(rwb
, LCB_F_RT
| LCB_F_WRITE
);
259 /* Optimized out for rwlocks */
260 if (rwbase_signal_pending_state(state
, current
)) {
261 rwbase_restore_current_state();
262 __rwbase_write_unlock(rwb
, 0, flags
);
263 rwbase_post_schedule();
264 trace_contention_end(rwb
, -EINTR
);
268 if (__rwbase_write_trylock(rwb
))
271 raw_spin_unlock_irqrestore(&rtm
->wait_lock
, flags
);
273 raw_spin_lock_irqsave(&rtm
->wait_lock
, flags
);
275 set_current_state(state
);
277 rwbase_restore_current_state();
278 trace_contention_end(rwb
, 0);
281 raw_spin_unlock_irqrestore(&rtm
->wait_lock
, flags
);
282 rwbase_post_schedule();
286 static inline int rwbase_write_trylock(struct rwbase_rt
*rwb
)
288 struct rt_mutex_base
*rtm
= &rwb
->rtmutex
;
291 if (!rwbase_rtmutex_trylock(rtm
))
294 atomic_sub(READER_BIAS
, &rwb
->readers
);
296 raw_spin_lock_irqsave(&rtm
->wait_lock
, flags
);
297 if (__rwbase_write_trylock(rwb
)) {
298 raw_spin_unlock_irqrestore(&rtm
->wait_lock
, flags
);
301 __rwbase_write_unlock(rwb
, 0, flags
);