Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / locking / rwsem-xadd.c
blob0b1f779572402d4736783cbc890bfbf748416b38
1 // SPDX-License-Identifier: GPL-2.0
2 /* rwsem.c: R/W semaphores: contention handling functions
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 #include <linux/rwsem.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/wake_q.h>
19 #include <linux/sched/debug.h>
20 #include <linux/osq_lock.h>
22 #include "rwsem.h"
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
30 * (X*ACTIVE_BIAS)
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
48 * (ACTIVE_WRITE_BIAS)
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
52 * (WAITING_BIAS)
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
69 * steal the lock.
74 * Initialize an rwsem:
76 void __init_rwsem(struct rw_semaphore *sem, const char *name,
77 struct lock_class_key *key)
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 * Make sure we are not reinitializing a held semaphore:
83 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84 lockdep_init_map(&sem->dep_map, name, key, 0);
85 #endif
86 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87 raw_spin_lock_init(&sem->wait_lock);
88 INIT_LIST_HEAD(&sem->wait_list);
89 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
90 sem->owner = NULL;
91 osq_lock_init(&sem->osq);
92 #endif
95 EXPORT_SYMBOL(__init_rwsem);
97 enum rwsem_waiter_type {
98 RWSEM_WAITING_FOR_WRITE,
99 RWSEM_WAITING_FOR_READ
102 struct rwsem_waiter {
103 struct list_head list;
104 struct task_struct *task;
105 enum rwsem_waiter_type type;
108 enum rwsem_wake_type {
109 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
127 static void __rwsem_mark_wake(struct rw_semaphore *sem,
128 enum rwsem_wake_type wake_type,
129 struct wake_q_head *wake_q)
131 struct rwsem_waiter *waiter, *tmp;
132 long oldcount, woken = 0, adjustment = 0;
133 struct list_head wlist;
136 * Take a peek at the queue head waiter such that we can determine
137 * the wakeup(s) to perform.
139 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
141 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
142 if (wake_type == RWSEM_WAKE_ANY) {
144 * Mark writer at the front of the queue for wakeup.
145 * Until the task is actually later awoken later by
146 * the caller, other writers are able to steal it.
147 * Readers, on the other hand, will block as they
148 * will notice the queued writer.
150 wake_q_add(wake_q, waiter->task);
151 lockevent_inc(rwsem_wake_writer);
154 return;
158 * Writers might steal the lock before we grant it to the next reader.
159 * We prefer to do the first reader grant before counting readers
160 * so we can bail out early if a writer stole the lock.
162 if (wake_type != RWSEM_WAKE_READ_OWNED) {
163 adjustment = RWSEM_ACTIVE_READ_BIAS;
164 try_reader_grant:
165 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
166 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
168 * If the count is still less than RWSEM_WAITING_BIAS
169 * after removing the adjustment, it is assumed that
170 * a writer has stolen the lock. We have to undo our
171 * reader grant.
173 if (atomic_long_add_return(-adjustment, &sem->count) <
174 RWSEM_WAITING_BIAS)
175 return;
177 /* Last active locker left. Retry waking readers. */
178 goto try_reader_grant;
181 * Set it to reader-owned to give spinners an early
182 * indication that readers now have the lock.
184 __rwsem_set_reader_owned(sem, waiter->task);
188 * Grant an infinite number of read locks to the readers at the front
189 * of the queue. We know that woken will be at least 1 as we accounted
190 * for above. Note we increment the 'active part' of the count by the
191 * number of readers before waking any processes up.
193 * We have to do wakeup in 2 passes to prevent the possibility that
194 * the reader count may be decremented before it is incremented. It
195 * is because the to-be-woken waiter may not have slept yet. So it
196 * may see waiter->task got cleared, finish its critical section and
197 * do an unlock before the reader count increment.
199 * 1) Collect the read-waiters in a separate list, count them and
200 * fully increment the reader count in rwsem.
201 * 2) For each waiters in the new list, clear waiter->task and
202 * put them into wake_q to be woken up later.
204 list_for_each_entry(waiter, &sem->wait_list, list) {
205 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
206 break;
208 woken++;
210 list_cut_before(&wlist, &sem->wait_list, &waiter->list);
212 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
213 lockevent_cond_inc(rwsem_wake_reader, woken);
214 if (list_empty(&sem->wait_list)) {
215 /* hit end of list above */
216 adjustment -= RWSEM_WAITING_BIAS;
219 if (adjustment)
220 atomic_long_add(adjustment, &sem->count);
222 /* 2nd pass */
223 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
224 struct task_struct *tsk;
226 tsk = waiter->task;
227 get_task_struct(tsk);
230 * Ensure calling get_task_struct() before setting the reader
231 * waiter to nil such that rwsem_down_read_failed() cannot
232 * race with do_exit() by always holding a reference count
233 * to the task to wakeup.
235 smp_store_release(&waiter->task, NULL);
237 * Ensure issuing the wakeup (either by us or someone else)
238 * after setting the reader waiter to nil.
240 wake_q_add_safe(wake_q, tsk);
245 * This function must be called with the sem->wait_lock held to prevent
246 * race conditions between checking the rwsem wait list and setting the
247 * sem->count accordingly.
249 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
252 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
254 if (count != RWSEM_WAITING_BIAS)
255 return false;
258 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
259 * are other tasks on the wait list, we need to add on WAITING_BIAS.
261 count = list_is_singular(&sem->wait_list) ?
262 RWSEM_ACTIVE_WRITE_BIAS :
263 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
265 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
266 == RWSEM_WAITING_BIAS) {
267 rwsem_set_owner(sem);
268 return true;
271 return false;
274 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
276 * Try to acquire write lock before the writer has been put on wait queue.
278 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
280 long count = atomic_long_read(&sem->count);
282 while (!count || count == RWSEM_WAITING_BIAS) {
283 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
284 count + RWSEM_ACTIVE_WRITE_BIAS)) {
285 rwsem_set_owner(sem);
286 lockevent_inc(rwsem_opt_wlock);
287 return true;
290 return false;
293 static inline bool owner_on_cpu(struct task_struct *owner)
296 * As lock holder preemption issue, we both skip spinning if
297 * task is not on cpu or its cpu is preempted
299 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
302 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
304 struct task_struct *owner;
305 bool ret = true;
307 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
309 if (need_resched())
310 return false;
312 rcu_read_lock();
313 owner = READ_ONCE(sem->owner);
314 if (owner) {
315 ret = is_rwsem_owner_spinnable(owner) &&
316 owner_on_cpu(owner);
318 rcu_read_unlock();
319 return ret;
323 * Return true only if we can still spin on the owner field of the rwsem.
325 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
327 struct task_struct *owner = READ_ONCE(sem->owner);
329 if (!is_rwsem_owner_spinnable(owner))
330 return false;
332 rcu_read_lock();
333 while (owner && (READ_ONCE(sem->owner) == owner)) {
335 * Ensure we emit the owner->on_cpu, dereference _after_
336 * checking sem->owner still matches owner, if that fails,
337 * owner might point to free()d memory, if it still matches,
338 * the rcu_read_lock() ensures the memory stays valid.
340 barrier();
343 * abort spinning when need_resched or owner is not running or
344 * owner's cpu is preempted.
346 if (need_resched() || !owner_on_cpu(owner)) {
347 rcu_read_unlock();
348 return false;
351 cpu_relax();
353 rcu_read_unlock();
356 * If there is a new owner or the owner is not set, we continue
357 * spinning.
359 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
362 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
364 bool taken = false;
366 preempt_disable();
368 /* sem->wait_lock should not be held when doing optimistic spinning */
369 if (!rwsem_can_spin_on_owner(sem))
370 goto done;
372 if (!osq_lock(&sem->osq))
373 goto done;
376 * Optimistically spin on the owner field and attempt to acquire the
377 * lock whenever the owner changes. Spinning will be stopped when:
378 * 1) the owning writer isn't running; or
379 * 2) readers own the lock as we can't determine if they are
380 * actively running or not.
382 while (rwsem_spin_on_owner(sem)) {
384 * Try to acquire the lock
386 if (rwsem_try_write_lock_unqueued(sem)) {
387 taken = true;
388 break;
392 * When there's no owner, we might have preempted between the
393 * owner acquiring the lock and setting the owner field. If
394 * we're an RT task that will live-lock because we won't let
395 * the owner complete.
397 if (!sem->owner && (need_resched() || rt_task(current)))
398 break;
401 * The cpu_relax() call is a compiler barrier which forces
402 * everything in this loop to be re-loaded. We don't need
403 * memory barriers as we'll eventually observe the right
404 * values at the cost of a few extra spins.
406 cpu_relax();
408 osq_unlock(&sem->osq);
409 done:
410 preempt_enable();
411 lockevent_cond_inc(rwsem_opt_fail, !taken);
412 return taken;
416 * Return true if the rwsem has active spinner
418 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
420 return osq_is_locked(&sem->osq);
423 #else
424 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
426 return false;
429 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
431 return false;
433 #endif
436 * Wait for the read lock to be granted
438 static inline struct rw_semaphore __sched *
439 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
441 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
442 struct rwsem_waiter waiter;
443 DEFINE_WAKE_Q(wake_q);
445 waiter.task = current;
446 waiter.type = RWSEM_WAITING_FOR_READ;
448 raw_spin_lock_irq(&sem->wait_lock);
449 if (list_empty(&sem->wait_list)) {
451 * In case the wait queue is empty and the lock isn't owned
452 * by a writer, this reader can exit the slowpath and return
453 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
454 * been set in the count.
456 if (atomic_long_read(&sem->count) >= 0) {
457 raw_spin_unlock_irq(&sem->wait_lock);
458 rwsem_set_reader_owned(sem);
459 lockevent_inc(rwsem_rlock_fast);
460 return sem;
462 adjustment += RWSEM_WAITING_BIAS;
464 list_add_tail(&waiter.list, &sem->wait_list);
466 /* we're now waiting on the lock, but no longer actively locking */
467 count = atomic_long_add_return(adjustment, &sem->count);
470 * If there are no active locks, wake the front queued process(es).
472 * If there are no writers and we are first in the queue,
473 * wake our own waiter to join the existing active readers !
475 if (count == RWSEM_WAITING_BIAS ||
476 (count > RWSEM_WAITING_BIAS &&
477 adjustment != -RWSEM_ACTIVE_READ_BIAS))
478 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
480 raw_spin_unlock_irq(&sem->wait_lock);
481 wake_up_q(&wake_q);
483 /* wait to be given the lock */
484 while (true) {
485 set_current_state(state);
486 if (!waiter.task)
487 break;
488 if (signal_pending_state(state, current)) {
489 raw_spin_lock_irq(&sem->wait_lock);
490 if (waiter.task)
491 goto out_nolock;
492 raw_spin_unlock_irq(&sem->wait_lock);
493 break;
495 schedule();
496 lockevent_inc(rwsem_sleep_reader);
499 __set_current_state(TASK_RUNNING);
500 lockevent_inc(rwsem_rlock);
501 return sem;
502 out_nolock:
503 list_del(&waiter.list);
504 if (list_empty(&sem->wait_list))
505 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
506 raw_spin_unlock_irq(&sem->wait_lock);
507 __set_current_state(TASK_RUNNING);
508 lockevent_inc(rwsem_rlock_fail);
509 return ERR_PTR(-EINTR);
512 __visible struct rw_semaphore * __sched
513 rwsem_down_read_failed(struct rw_semaphore *sem)
515 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
517 EXPORT_SYMBOL(rwsem_down_read_failed);
519 __visible struct rw_semaphore * __sched
520 rwsem_down_read_failed_killable(struct rw_semaphore *sem)
522 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
524 EXPORT_SYMBOL(rwsem_down_read_failed_killable);
527 * Wait until we successfully acquire the write lock
529 static inline struct rw_semaphore *
530 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
532 long count;
533 bool waiting = true; /* any queued threads before us */
534 struct rwsem_waiter waiter;
535 struct rw_semaphore *ret = sem;
536 DEFINE_WAKE_Q(wake_q);
538 /* undo write bias from down_write operation, stop active locking */
539 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
541 /* do optimistic spinning and steal lock if possible */
542 if (rwsem_optimistic_spin(sem))
543 return sem;
546 * Optimistic spinning failed, proceed to the slowpath
547 * and block until we can acquire the sem.
549 waiter.task = current;
550 waiter.type = RWSEM_WAITING_FOR_WRITE;
552 raw_spin_lock_irq(&sem->wait_lock);
554 /* account for this before adding a new element to the list */
555 if (list_empty(&sem->wait_list))
556 waiting = false;
558 list_add_tail(&waiter.list, &sem->wait_list);
560 /* we're now waiting on the lock, but no longer actively locking */
561 if (waiting) {
562 count = atomic_long_read(&sem->count);
565 * If there were already threads queued before us and there are
566 * no active writers, the lock must be read owned; so we try to
567 * wake any read locks that were queued ahead of us.
569 if (count > RWSEM_WAITING_BIAS) {
570 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
572 * The wakeup is normally called _after_ the wait_lock
573 * is released, but given that we are proactively waking
574 * readers we can deal with the wake_q overhead as it is
575 * similar to releasing and taking the wait_lock again
576 * for attempting rwsem_try_write_lock().
578 wake_up_q(&wake_q);
581 * Reinitialize wake_q after use.
583 wake_q_init(&wake_q);
586 } else
587 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
589 /* wait until we successfully acquire the lock */
590 set_current_state(state);
591 while (true) {
592 if (rwsem_try_write_lock(count, sem))
593 break;
594 raw_spin_unlock_irq(&sem->wait_lock);
596 /* Block until there are no active lockers. */
597 do {
598 if (signal_pending_state(state, current))
599 goto out_nolock;
601 schedule();
602 lockevent_inc(rwsem_sleep_writer);
603 set_current_state(state);
604 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
606 raw_spin_lock_irq(&sem->wait_lock);
608 __set_current_state(TASK_RUNNING);
609 list_del(&waiter.list);
610 raw_spin_unlock_irq(&sem->wait_lock);
611 lockevent_inc(rwsem_wlock);
613 return ret;
615 out_nolock:
616 __set_current_state(TASK_RUNNING);
617 raw_spin_lock_irq(&sem->wait_lock);
618 list_del(&waiter.list);
619 if (list_empty(&sem->wait_list))
620 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
621 else
622 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
623 raw_spin_unlock_irq(&sem->wait_lock);
624 wake_up_q(&wake_q);
625 lockevent_inc(rwsem_wlock_fail);
627 return ERR_PTR(-EINTR);
630 __visible struct rw_semaphore * __sched
631 rwsem_down_write_failed(struct rw_semaphore *sem)
633 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
635 EXPORT_SYMBOL(rwsem_down_write_failed);
637 __visible struct rw_semaphore * __sched
638 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
640 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
642 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
645 * handle waking up a waiter on the semaphore
646 * - up_read/up_write has decremented the active part of count if we come here
648 __visible
649 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
651 unsigned long flags;
652 DEFINE_WAKE_Q(wake_q);
655 * __rwsem_down_write_failed_common(sem)
656 * rwsem_optimistic_spin(sem)
657 * osq_unlock(sem->osq)
658 * ...
659 * atomic_long_add_return(&sem->count)
661 * - VS -
663 * __up_write()
664 * if (atomic_long_sub_return_release(&sem->count) < 0)
665 * rwsem_wake(sem)
666 * osq_is_locked(&sem->osq)
668 * And __up_write() must observe !osq_is_locked() when it observes the
669 * atomic_long_add_return() in order to not miss a wakeup.
671 * This boils down to:
673 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
674 * MB RMB
675 * [RmW] Y += 1 [L] r1 = X
677 * exists (r0=1 /\ r1=0)
679 smp_rmb();
682 * If a spinner is present, it is not necessary to do the wakeup.
683 * Try to do wakeup only if the trylock succeeds to minimize
684 * spinlock contention which may introduce too much delay in the
685 * unlock operation.
687 * spinning writer up_write/up_read caller
688 * --------------- -----------------------
689 * [S] osq_unlock() [L] osq
690 * MB RMB
691 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
693 * Here, it is important to make sure that there won't be a missed
694 * wakeup while the rwsem is free and the only spinning writer goes
695 * to sleep without taking the rwsem. Even when the spinning writer
696 * is just going to break out of the waiting loop, it will still do
697 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
698 * rwsem_has_spinner() is true, it will guarantee at least one
699 * trylock attempt on the rwsem later on.
701 if (rwsem_has_spinner(sem)) {
703 * The smp_rmb() here is to make sure that the spinner
704 * state is consulted before reading the wait_lock.
706 smp_rmb();
707 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
708 return sem;
709 goto locked;
711 raw_spin_lock_irqsave(&sem->wait_lock, flags);
712 locked:
714 if (!list_empty(&sem->wait_list))
715 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
717 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
718 wake_up_q(&wake_q);
720 return sem;
722 EXPORT_SYMBOL(rwsem_wake);
725 * downgrade a write lock into a read lock
726 * - caller incremented waiting part of count and discovered it still negative
727 * - just wake up any readers at the front of the queue
729 __visible
730 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
732 unsigned long flags;
733 DEFINE_WAKE_Q(wake_q);
735 raw_spin_lock_irqsave(&sem->wait_lock, flags);
737 if (!list_empty(&sem->wait_list))
738 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
740 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
741 wake_up_q(&wake_q);
743 return sem;
745 EXPORT_SYMBOL(rwsem_downgrade_wake);