Linux 4.9.151
[linux/fpc-iii.git] / kernel / futex.c
blob053d7be08be5da84e0555211dd210699489c7781
1 /*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67 #include <linux/fault-inject.h>
69 #include <asm/futex.h>
71 #include "locking/rtmutex_common.h"
74 * READ this before attempting to hack on futexes!
76 * Basic futex operation and ordering guarantees
77 * =============================================
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
82 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
86 * The waker side modifies the user space value of the futex and calls
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
121 * CPU 0 CPU 1
122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
126 * waiters++; (a)
127 * smp_mb(); (A) <-- paired with -.
129 * lock(hash_bucket(futex)); |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
136 * `--------> smp_mb(); (B)
137 * if (uval == val)
138 * queue();
139 * unlock(hash_bucket(futex));
140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
150 * This yields the following case (where X:=waiters, Y:=futex):
152 * X = Y = 0
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
174 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175 int __read_mostly futex_cmpxchg_enabled;
176 #endif
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
182 #ifdef CONFIG_MMU
183 # define FLAGS_SHARED 0x01
184 #else
186 * NOMMU does not have per process address space. Let the compiler optimize
187 * code away.
189 # define FLAGS_SHARED 0x00
190 #endif
191 #define FLAGS_CLOCKRT 0x02
192 #define FLAGS_HAS_TIMEOUT 0x04
195 * Priority Inheritance state:
197 struct futex_pi_state {
199 * list of 'owned' pi_state instances - these have to be
200 * cleaned up in do_exit() if the task exits prematurely:
202 struct list_head list;
205 * The PI object:
207 struct rt_mutex pi_mutex;
209 struct task_struct *owner;
210 atomic_t refcount;
212 union futex_key key;
216 * struct futex_q - The hashed futex queue entry, one per waiting task
217 * @list: priority-sorted list of tasks waiting on this futex
218 * @task: the task waiting on the futex
219 * @lock_ptr: the hash bucket lock
220 * @key: the key the futex is hashed on
221 * @pi_state: optional priority inheritance state
222 * @rt_waiter: rt_waiter storage for use with requeue_pi
223 * @requeue_pi_key: the requeue_pi target futex key
224 * @bitset: bitset for the optional bitmasked wakeup
226 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
227 * we can wake only the relevant ones (hashed queues may be shared).
229 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
230 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
231 * The order of wakeup is always to make the first condition true, then
232 * the second.
234 * PI futexes are typically woken before they are removed from the hash list via
235 * the rt_mutex code. See unqueue_me_pi().
237 struct futex_q {
238 struct plist_node list;
240 struct task_struct *task;
241 spinlock_t *lock_ptr;
242 union futex_key key;
243 struct futex_pi_state *pi_state;
244 struct rt_mutex_waiter *rt_waiter;
245 union futex_key *requeue_pi_key;
246 u32 bitset;
249 static const struct futex_q futex_q_init = {
250 /* list gets initialized in queue_me()*/
251 .key = FUTEX_KEY_INIT,
252 .bitset = FUTEX_BITSET_MATCH_ANY
256 * Hash buckets are shared by all the futex_keys that hash to the same
257 * location. Each key may have multiple futex_q structures, one for each task
258 * waiting on a futex.
260 struct futex_hash_bucket {
261 atomic_t waiters;
262 spinlock_t lock;
263 struct plist_head chain;
264 } ____cacheline_aligned_in_smp;
267 * The base of the bucket array and its size are always used together
268 * (after initialization only in hash_futex()), so ensure that they
269 * reside in the same cacheline.
271 static struct {
272 struct futex_hash_bucket *queues;
273 unsigned long hashsize;
274 } __futex_data __read_mostly __aligned(2*sizeof(long));
275 #define futex_queues (__futex_data.queues)
276 #define futex_hashsize (__futex_data.hashsize)
280 * Fault injections for futexes.
282 #ifdef CONFIG_FAIL_FUTEX
284 static struct {
285 struct fault_attr attr;
287 bool ignore_private;
288 } fail_futex = {
289 .attr = FAULT_ATTR_INITIALIZER,
290 .ignore_private = false,
293 static int __init setup_fail_futex(char *str)
295 return setup_fault_attr(&fail_futex.attr, str);
297 __setup("fail_futex=", setup_fail_futex);
299 static bool should_fail_futex(bool fshared)
301 if (fail_futex.ignore_private && !fshared)
302 return false;
304 return should_fail(&fail_futex.attr, 1);
307 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
309 static int __init fail_futex_debugfs(void)
311 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
312 struct dentry *dir;
314 dir = fault_create_debugfs_attr("fail_futex", NULL,
315 &fail_futex.attr);
316 if (IS_ERR(dir))
317 return PTR_ERR(dir);
319 if (!debugfs_create_bool("ignore-private", mode, dir,
320 &fail_futex.ignore_private)) {
321 debugfs_remove_recursive(dir);
322 return -ENOMEM;
325 return 0;
328 late_initcall(fail_futex_debugfs);
330 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
332 #else
333 static inline bool should_fail_futex(bool fshared)
335 return false;
337 #endif /* CONFIG_FAIL_FUTEX */
339 static inline void futex_get_mm(union futex_key *key)
341 atomic_inc(&key->private.mm->mm_count);
343 * Ensure futex_get_mm() implies a full barrier such that
344 * get_futex_key() implies a full barrier. This is relied upon
345 * as smp_mb(); (B), see the ordering comment above.
347 smp_mb__after_atomic();
351 * Reflects a new waiter being added to the waitqueue.
353 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
355 #ifdef CONFIG_SMP
356 atomic_inc(&hb->waiters);
358 * Full barrier (A), see the ordering comment above.
360 smp_mb__after_atomic();
361 #endif
365 * Reflects a waiter being removed from the waitqueue by wakeup
366 * paths.
368 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
370 #ifdef CONFIG_SMP
371 atomic_dec(&hb->waiters);
372 #endif
375 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
377 #ifdef CONFIG_SMP
378 return atomic_read(&hb->waiters);
379 #else
380 return 1;
381 #endif
385 * hash_futex - Return the hash bucket in the global hash
386 * @key: Pointer to the futex key for which the hash is calculated
388 * We hash on the keys returned from get_futex_key (see below) and return the
389 * corresponding hash bucket in the global hash.
391 static struct futex_hash_bucket *hash_futex(union futex_key *key)
393 u32 hash = jhash2((u32*)&key->both.word,
394 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
395 key->both.offset);
396 return &futex_queues[hash & (futex_hashsize - 1)];
401 * match_futex - Check whether two futex keys are equal
402 * @key1: Pointer to key1
403 * @key2: Pointer to key2
405 * Return 1 if two futex_keys are equal, 0 otherwise.
407 static inline int match_futex(union futex_key *key1, union futex_key *key2)
409 return (key1 && key2
410 && key1->both.word == key2->both.word
411 && key1->both.ptr == key2->both.ptr
412 && key1->both.offset == key2->both.offset);
416 * Take a reference to the resource addressed by a key.
417 * Can be called while holding spinlocks.
420 static void get_futex_key_refs(union futex_key *key)
422 if (!key->both.ptr)
423 return;
426 * On MMU less systems futexes are always "private" as there is no per
427 * process address space. We need the smp wmb nevertheless - yes,
428 * arch/blackfin has MMU less SMP ...
430 if (!IS_ENABLED(CONFIG_MMU)) {
431 smp_mb(); /* explicit smp_mb(); (B) */
432 return;
435 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
436 case FUT_OFF_INODE:
437 ihold(key->shared.inode); /* implies smp_mb(); (B) */
438 break;
439 case FUT_OFF_MMSHARED:
440 futex_get_mm(key); /* implies smp_mb(); (B) */
441 break;
442 default:
444 * Private futexes do not hold reference on an inode or
445 * mm, therefore the only purpose of calling get_futex_key_refs
446 * is because we need the barrier for the lockless waiter check.
448 smp_mb(); /* explicit smp_mb(); (B) */
453 * Drop a reference to the resource addressed by a key.
454 * The hash bucket spinlock must not be held. This is
455 * a no-op for private futexes, see comment in the get
456 * counterpart.
458 static void drop_futex_key_refs(union futex_key *key)
460 if (!key->both.ptr) {
461 /* If we're here then we tried to put a key we failed to get */
462 WARN_ON_ONCE(1);
463 return;
466 if (!IS_ENABLED(CONFIG_MMU))
467 return;
469 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
470 case FUT_OFF_INODE:
471 iput(key->shared.inode);
472 break;
473 case FUT_OFF_MMSHARED:
474 mmdrop(key->private.mm);
475 break;
480 * get_futex_key() - Get parameters which are the keys for a futex
481 * @uaddr: virtual address of the futex
482 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
483 * @key: address where result is stored.
484 * @rw: mapping needs to be read/write (values: VERIFY_READ,
485 * VERIFY_WRITE)
487 * Return: a negative error code or 0
489 * The key words are stored in *key on success.
491 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
492 * offset_within_page). For private mappings, it's (uaddr, current->mm).
493 * We can usually work out the index without swapping in the page.
495 * lock_page() might sleep, the caller should not hold a spinlock.
497 static int
498 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
500 unsigned long address = (unsigned long)uaddr;
501 struct mm_struct *mm = current->mm;
502 struct page *page, *tail;
503 struct address_space *mapping;
504 int err, ro = 0;
507 * The futex address must be "naturally" aligned.
509 key->both.offset = address % PAGE_SIZE;
510 if (unlikely((address % sizeof(u32)) != 0))
511 return -EINVAL;
512 address -= key->both.offset;
514 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
515 return -EFAULT;
517 if (unlikely(should_fail_futex(fshared)))
518 return -EFAULT;
521 * PROCESS_PRIVATE futexes are fast.
522 * As the mm cannot disappear under us and the 'key' only needs
523 * virtual address, we dont even have to find the underlying vma.
524 * Note : We do have to check 'uaddr' is a valid user address,
525 * but access_ok() should be faster than find_vma()
527 if (!fshared) {
528 key->private.mm = mm;
529 key->private.address = address;
530 get_futex_key_refs(key); /* implies smp_mb(); (B) */
531 return 0;
534 again:
535 /* Ignore any VERIFY_READ mapping (futex common case) */
536 if (unlikely(should_fail_futex(fshared)))
537 return -EFAULT;
539 err = get_user_pages_fast(address, 1, 1, &page);
541 * If write access is not required (eg. FUTEX_WAIT), try
542 * and get read-only access.
544 if (err == -EFAULT && rw == VERIFY_READ) {
545 err = get_user_pages_fast(address, 1, 0, &page);
546 ro = 1;
548 if (err < 0)
549 return err;
550 else
551 err = 0;
554 * The treatment of mapping from this point on is critical. The page
555 * lock protects many things but in this context the page lock
556 * stabilizes mapping, prevents inode freeing in the shared
557 * file-backed region case and guards against movement to swap cache.
559 * Strictly speaking the page lock is not needed in all cases being
560 * considered here and page lock forces unnecessarily serialization
561 * From this point on, mapping will be re-verified if necessary and
562 * page lock will be acquired only if it is unavoidable
564 * Mapping checks require the head page for any compound page so the
565 * head page and mapping is looked up now. For anonymous pages, it
566 * does not matter if the page splits in the future as the key is
567 * based on the address. For filesystem-backed pages, the tail is
568 * required as the index of the page determines the key. For
569 * base pages, there is no tail page and tail == page.
571 tail = page;
572 page = compound_head(page);
573 mapping = READ_ONCE(page->mapping);
576 * If page->mapping is NULL, then it cannot be a PageAnon
577 * page; but it might be the ZERO_PAGE or in the gate area or
578 * in a special mapping (all cases which we are happy to fail);
579 * or it may have been a good file page when get_user_pages_fast
580 * found it, but truncated or holepunched or subjected to
581 * invalidate_complete_page2 before we got the page lock (also
582 * cases which we are happy to fail). And we hold a reference,
583 * so refcount care in invalidate_complete_page's remove_mapping
584 * prevents drop_caches from setting mapping to NULL beneath us.
586 * The case we do have to guard against is when memory pressure made
587 * shmem_writepage move it from filecache to swapcache beneath us:
588 * an unlikely race, but we do need to retry for page->mapping.
590 if (unlikely(!mapping)) {
591 int shmem_swizzled;
594 * Page lock is required to identify which special case above
595 * applies. If this is really a shmem page then the page lock
596 * will prevent unexpected transitions.
598 lock_page(page);
599 shmem_swizzled = PageSwapCache(page) || page->mapping;
600 unlock_page(page);
601 put_page(page);
603 if (shmem_swizzled)
604 goto again;
606 return -EFAULT;
610 * Private mappings are handled in a simple way.
612 * If the futex key is stored on an anonymous page, then the associated
613 * object is the mm which is implicitly pinned by the calling process.
615 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
616 * it's a read-only handle, it's expected that futexes attach to
617 * the object not the particular process.
619 if (PageAnon(page)) {
621 * A RO anonymous page will never change and thus doesn't make
622 * sense for futex operations.
624 if (unlikely(should_fail_futex(fshared)) || ro) {
625 err = -EFAULT;
626 goto out;
629 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
630 key->private.mm = mm;
631 key->private.address = address;
633 get_futex_key_refs(key); /* implies smp_mb(); (B) */
635 } else {
636 struct inode *inode;
639 * The associated futex object in this case is the inode and
640 * the page->mapping must be traversed. Ordinarily this should
641 * be stabilised under page lock but it's not strictly
642 * necessary in this case as we just want to pin the inode, not
643 * update the radix tree or anything like that.
645 * The RCU read lock is taken as the inode is finally freed
646 * under RCU. If the mapping still matches expectations then the
647 * mapping->host can be safely accessed as being a valid inode.
649 rcu_read_lock();
651 if (READ_ONCE(page->mapping) != mapping) {
652 rcu_read_unlock();
653 put_page(page);
655 goto again;
658 inode = READ_ONCE(mapping->host);
659 if (!inode) {
660 rcu_read_unlock();
661 put_page(page);
663 goto again;
667 * Take a reference unless it is about to be freed. Previously
668 * this reference was taken by ihold under the page lock
669 * pinning the inode in place so i_lock was unnecessary. The
670 * only way for this check to fail is if the inode was
671 * truncated in parallel which is almost certainly an
672 * application bug. In such a case, just retry.
674 * We are not calling into get_futex_key_refs() in file-backed
675 * cases, therefore a successful atomic_inc return below will
676 * guarantee that get_futex_key() will still imply smp_mb(); (B).
678 if (!atomic_inc_not_zero(&inode->i_count)) {
679 rcu_read_unlock();
680 put_page(page);
682 goto again;
685 /* Should be impossible but lets be paranoid for now */
686 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
687 err = -EFAULT;
688 rcu_read_unlock();
689 iput(inode);
691 goto out;
694 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
695 key->shared.inode = inode;
696 key->shared.pgoff = basepage_index(tail);
697 rcu_read_unlock();
700 out:
701 put_page(page);
702 return err;
705 static inline void put_futex_key(union futex_key *key)
707 drop_futex_key_refs(key);
711 * fault_in_user_writeable() - Fault in user address and verify RW access
712 * @uaddr: pointer to faulting user space address
714 * Slow path to fixup the fault we just took in the atomic write
715 * access to @uaddr.
717 * We have no generic implementation of a non-destructive write to the
718 * user address. We know that we faulted in the atomic pagefault
719 * disabled section so we can as well avoid the #PF overhead by
720 * calling get_user_pages() right away.
722 static int fault_in_user_writeable(u32 __user *uaddr)
724 struct mm_struct *mm = current->mm;
725 int ret;
727 down_read(&mm->mmap_sem);
728 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
729 FAULT_FLAG_WRITE, NULL);
730 up_read(&mm->mmap_sem);
732 return ret < 0 ? ret : 0;
736 * futex_top_waiter() - Return the highest priority waiter on a futex
737 * @hb: the hash bucket the futex_q's reside in
738 * @key: the futex key (to distinguish it from other futex futex_q's)
740 * Must be called with the hb lock held.
742 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
743 union futex_key *key)
745 struct futex_q *this;
747 plist_for_each_entry(this, &hb->chain, list) {
748 if (match_futex(&this->key, key))
749 return this;
751 return NULL;
754 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
755 u32 uval, u32 newval)
757 int ret;
759 pagefault_disable();
760 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
761 pagefault_enable();
763 return ret;
766 static int get_futex_value_locked(u32 *dest, u32 __user *from)
768 int ret;
770 pagefault_disable();
771 ret = __get_user(*dest, from);
772 pagefault_enable();
774 return ret ? -EFAULT : 0;
779 * PI code:
781 static int refill_pi_state_cache(void)
783 struct futex_pi_state *pi_state;
785 if (likely(current->pi_state_cache))
786 return 0;
788 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
790 if (!pi_state)
791 return -ENOMEM;
793 INIT_LIST_HEAD(&pi_state->list);
794 /* pi_mutex gets initialized later */
795 pi_state->owner = NULL;
796 atomic_set(&pi_state->refcount, 1);
797 pi_state->key = FUTEX_KEY_INIT;
799 current->pi_state_cache = pi_state;
801 return 0;
804 static struct futex_pi_state * alloc_pi_state(void)
806 struct futex_pi_state *pi_state = current->pi_state_cache;
808 WARN_ON(!pi_state);
809 current->pi_state_cache = NULL;
811 return pi_state;
815 * Drops a reference to the pi_state object and frees or caches it
816 * when the last reference is gone.
818 * Must be called with the hb lock held.
820 static void put_pi_state(struct futex_pi_state *pi_state)
822 if (!pi_state)
823 return;
825 if (!atomic_dec_and_test(&pi_state->refcount))
826 return;
829 * If pi_state->owner is NULL, the owner is most probably dying
830 * and has cleaned up the pi_state already
832 if (pi_state->owner) {
833 raw_spin_lock_irq(&pi_state->owner->pi_lock);
834 list_del_init(&pi_state->list);
835 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
837 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
840 if (current->pi_state_cache)
841 kfree(pi_state);
842 else {
844 * pi_state->list is already empty.
845 * clear pi_state->owner.
846 * refcount is at 0 - put it back to 1.
848 pi_state->owner = NULL;
849 atomic_set(&pi_state->refcount, 1);
850 current->pi_state_cache = pi_state;
855 * Look up the task based on what TID userspace gave us.
856 * We dont trust it.
858 static struct task_struct * futex_find_get_task(pid_t pid)
860 struct task_struct *p;
862 rcu_read_lock();
863 p = find_task_by_vpid(pid);
864 if (p)
865 get_task_struct(p);
867 rcu_read_unlock();
869 return p;
873 * This task is holding PI mutexes at exit time => bad.
874 * Kernel cleans up PI-state, but userspace is likely hosed.
875 * (Robust-futex cleanup is separate and might save the day for userspace.)
877 void exit_pi_state_list(struct task_struct *curr)
879 struct list_head *next, *head = &curr->pi_state_list;
880 struct futex_pi_state *pi_state;
881 struct futex_hash_bucket *hb;
882 union futex_key key = FUTEX_KEY_INIT;
884 if (!futex_cmpxchg_enabled)
885 return;
887 * We are a ZOMBIE and nobody can enqueue itself on
888 * pi_state_list anymore, but we have to be careful
889 * versus waiters unqueueing themselves:
891 raw_spin_lock_irq(&curr->pi_lock);
892 while (!list_empty(head)) {
894 next = head->next;
895 pi_state = list_entry(next, struct futex_pi_state, list);
896 key = pi_state->key;
897 hb = hash_futex(&key);
898 raw_spin_unlock_irq(&curr->pi_lock);
900 spin_lock(&hb->lock);
902 raw_spin_lock_irq(&curr->pi_lock);
904 * We dropped the pi-lock, so re-check whether this
905 * task still owns the PI-state:
907 if (head->next != next) {
908 spin_unlock(&hb->lock);
909 continue;
912 WARN_ON(pi_state->owner != curr);
913 WARN_ON(list_empty(&pi_state->list));
914 list_del_init(&pi_state->list);
915 pi_state->owner = NULL;
916 raw_spin_unlock_irq(&curr->pi_lock);
918 rt_mutex_unlock(&pi_state->pi_mutex);
920 spin_unlock(&hb->lock);
922 raw_spin_lock_irq(&curr->pi_lock);
924 raw_spin_unlock_irq(&curr->pi_lock);
928 * We need to check the following states:
930 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
932 * [1] NULL | --- | --- | 0 | 0/1 | Valid
933 * [2] NULL | --- | --- | >0 | 0/1 | Valid
935 * [3] Found | NULL | -- | Any | 0/1 | Invalid
937 * [4] Found | Found | NULL | 0 | 1 | Valid
938 * [5] Found | Found | NULL | >0 | 1 | Invalid
940 * [6] Found | Found | task | 0 | 1 | Valid
942 * [7] Found | Found | NULL | Any | 0 | Invalid
944 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
945 * [9] Found | Found | task | 0 | 0 | Invalid
946 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
948 * [1] Indicates that the kernel can acquire the futex atomically. We
949 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
951 * [2] Valid, if TID does not belong to a kernel thread. If no matching
952 * thread is found then it indicates that the owner TID has died.
954 * [3] Invalid. The waiter is queued on a non PI futex
956 * [4] Valid state after exit_robust_list(), which sets the user space
957 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
959 * [5] The user space value got manipulated between exit_robust_list()
960 * and exit_pi_state_list()
962 * [6] Valid state after exit_pi_state_list() which sets the new owner in
963 * the pi_state but cannot access the user space value.
965 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
967 * [8] Owner and user space value match
969 * [9] There is no transient state which sets the user space TID to 0
970 * except exit_robust_list(), but this is indicated by the
971 * FUTEX_OWNER_DIED bit. See [4]
973 * [10] There is no transient state which leaves owner and user space
974 * TID out of sync.
978 * Validate that the existing waiter has a pi_state and sanity check
979 * the pi_state against the user space value. If correct, attach to
980 * it.
982 static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
983 struct futex_pi_state **ps)
985 pid_t pid = uval & FUTEX_TID_MASK;
988 * Userspace might have messed up non-PI and PI futexes [3]
990 if (unlikely(!pi_state))
991 return -EINVAL;
993 WARN_ON(!atomic_read(&pi_state->refcount));
996 * Handle the owner died case:
998 if (uval & FUTEX_OWNER_DIED) {
1000 * exit_pi_state_list sets owner to NULL and wakes the
1001 * topmost waiter. The task which acquires the
1002 * pi_state->rt_mutex will fixup owner.
1004 if (!pi_state->owner) {
1006 * No pi state owner, but the user space TID
1007 * is not 0. Inconsistent state. [5]
1009 if (pid)
1010 return -EINVAL;
1012 * Take a ref on the state and return success. [4]
1014 goto out_state;
1018 * If TID is 0, then either the dying owner has not
1019 * yet executed exit_pi_state_list() or some waiter
1020 * acquired the rtmutex in the pi state, but did not
1021 * yet fixup the TID in user space.
1023 * Take a ref on the state and return success. [6]
1025 if (!pid)
1026 goto out_state;
1027 } else {
1029 * If the owner died bit is not set, then the pi_state
1030 * must have an owner. [7]
1032 if (!pi_state->owner)
1033 return -EINVAL;
1037 * Bail out if user space manipulated the futex value. If pi
1038 * state exists then the owner TID must be the same as the
1039 * user space TID. [9/10]
1041 if (pid != task_pid_vnr(pi_state->owner))
1042 return -EINVAL;
1043 out_state:
1044 atomic_inc(&pi_state->refcount);
1045 *ps = pi_state;
1046 return 0;
1050 * Lookup the task for the TID provided from user space and attach to
1051 * it after doing proper sanity checks.
1053 static int attach_to_pi_owner(u32 uval, union futex_key *key,
1054 struct futex_pi_state **ps)
1056 pid_t pid = uval & FUTEX_TID_MASK;
1057 struct futex_pi_state *pi_state;
1058 struct task_struct *p;
1061 * We are the first waiter - try to look up the real owner and attach
1062 * the new pi_state to it, but bail out when TID = 0 [1]
1064 if (!pid)
1065 return -ESRCH;
1066 p = futex_find_get_task(pid);
1067 if (!p)
1068 return -ESRCH;
1070 if (unlikely(p->flags & PF_KTHREAD)) {
1071 put_task_struct(p);
1072 return -EPERM;
1076 * We need to look at the task state flags to figure out,
1077 * whether the task is exiting. To protect against the do_exit
1078 * change of the task flags, we do this protected by
1079 * p->pi_lock:
1081 raw_spin_lock_irq(&p->pi_lock);
1082 if (unlikely(p->flags & PF_EXITING)) {
1084 * The task is on the way out. When PF_EXITPIDONE is
1085 * set, we know that the task has finished the
1086 * cleanup:
1088 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1090 raw_spin_unlock_irq(&p->pi_lock);
1091 put_task_struct(p);
1092 return ret;
1096 * No existing pi state. First waiter. [2]
1098 pi_state = alloc_pi_state();
1101 * Initialize the pi_mutex in locked state and make @p
1102 * the owner of it:
1104 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1106 /* Store the key for possible exit cleanups: */
1107 pi_state->key = *key;
1109 WARN_ON(!list_empty(&pi_state->list));
1110 list_add(&pi_state->list, &p->pi_state_list);
1111 pi_state->owner = p;
1112 raw_spin_unlock_irq(&p->pi_lock);
1114 put_task_struct(p);
1116 *ps = pi_state;
1118 return 0;
1121 static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1122 union futex_key *key, struct futex_pi_state **ps)
1124 struct futex_q *match = futex_top_waiter(hb, key);
1127 * If there is a waiter on that futex, validate it and
1128 * attach to the pi_state when the validation succeeds.
1130 if (match)
1131 return attach_to_pi_state(uval, match->pi_state, ps);
1134 * We are the first waiter - try to look up the owner based on
1135 * @uval and attach to it.
1137 return attach_to_pi_owner(uval, key, ps);
1140 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1142 u32 uninitialized_var(curval);
1144 if (unlikely(should_fail_futex(true)))
1145 return -EFAULT;
1147 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1148 return -EFAULT;
1150 /*If user space value changed, let the caller retry */
1151 return curval != uval ? -EAGAIN : 0;
1155 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1156 * @uaddr: the pi futex user address
1157 * @hb: the pi futex hash bucket
1158 * @key: the futex key associated with uaddr and hb
1159 * @ps: the pi_state pointer where we store the result of the
1160 * lookup
1161 * @task: the task to perform the atomic lock work for. This will
1162 * be "current" except in the case of requeue pi.
1163 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1165 * Return:
1166 * 0 - ready to wait;
1167 * 1 - acquired the lock;
1168 * <0 - error
1170 * The hb->lock and futex_key refs shall be held by the caller.
1172 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1173 union futex_key *key,
1174 struct futex_pi_state **ps,
1175 struct task_struct *task, int set_waiters)
1177 u32 uval, newval, vpid = task_pid_vnr(task);
1178 struct futex_q *match;
1179 int ret;
1182 * Read the user space value first so we can validate a few
1183 * things before proceeding further.
1185 if (get_futex_value_locked(&uval, uaddr))
1186 return -EFAULT;
1188 if (unlikely(should_fail_futex(true)))
1189 return -EFAULT;
1192 * Detect deadlocks.
1194 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1195 return -EDEADLK;
1197 if ((unlikely(should_fail_futex(true))))
1198 return -EDEADLK;
1201 * Lookup existing state first. If it exists, try to attach to
1202 * its pi_state.
1204 match = futex_top_waiter(hb, key);
1205 if (match)
1206 return attach_to_pi_state(uval, match->pi_state, ps);
1209 * No waiter and user TID is 0. We are here because the
1210 * waiters or the owner died bit is set or called from
1211 * requeue_cmp_pi or for whatever reason something took the
1212 * syscall.
1214 if (!(uval & FUTEX_TID_MASK)) {
1216 * We take over the futex. No other waiters and the user space
1217 * TID is 0. We preserve the owner died bit.
1219 newval = uval & FUTEX_OWNER_DIED;
1220 newval |= vpid;
1222 /* The futex requeue_pi code can enforce the waiters bit */
1223 if (set_waiters)
1224 newval |= FUTEX_WAITERS;
1226 ret = lock_pi_update_atomic(uaddr, uval, newval);
1227 /* If the take over worked, return 1 */
1228 return ret < 0 ? ret : 1;
1232 * First waiter. Set the waiters bit before attaching ourself to
1233 * the owner. If owner tries to unlock, it will be forced into
1234 * the kernel and blocked on hb->lock.
1236 newval = uval | FUTEX_WAITERS;
1237 ret = lock_pi_update_atomic(uaddr, uval, newval);
1238 if (ret)
1239 return ret;
1241 * If the update of the user space value succeeded, we try to
1242 * attach to the owner. If that fails, no harm done, we only
1243 * set the FUTEX_WAITERS bit in the user space variable.
1245 return attach_to_pi_owner(uval, key, ps);
1249 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1250 * @q: The futex_q to unqueue
1252 * The q->lock_ptr must not be NULL and must be held by the caller.
1254 static void __unqueue_futex(struct futex_q *q)
1256 struct futex_hash_bucket *hb;
1258 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1259 || WARN_ON(plist_node_empty(&q->list)))
1260 return;
1262 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1263 plist_del(&q->list, &hb->chain);
1264 hb_waiters_dec(hb);
1268 * The hash bucket lock must be held when this is called.
1269 * Afterwards, the futex_q must not be accessed. Callers
1270 * must ensure to later call wake_up_q() for the actual
1271 * wakeups to occur.
1273 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1275 struct task_struct *p = q->task;
1277 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1278 return;
1281 * Queue the task for later wakeup for after we've released
1282 * the hb->lock. wake_q_add() grabs reference to p.
1284 wake_q_add(wake_q, p);
1285 __unqueue_futex(q);
1287 * The waiting task can free the futex_q as soon as
1288 * q->lock_ptr = NULL is written, without taking any locks. A
1289 * memory barrier is required here to prevent the following
1290 * store to lock_ptr from getting ahead of the plist_del.
1292 smp_wmb();
1293 q->lock_ptr = NULL;
1296 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1297 struct futex_hash_bucket *hb)
1299 struct task_struct *new_owner;
1300 struct futex_pi_state *pi_state = this->pi_state;
1301 u32 uninitialized_var(curval), newval;
1302 WAKE_Q(wake_q);
1303 bool deboost;
1304 int ret = 0;
1306 if (!pi_state)
1307 return -EINVAL;
1310 * If current does not own the pi_state then the futex is
1311 * inconsistent and user space fiddled with the futex value.
1313 if (pi_state->owner != current)
1314 return -EINVAL;
1316 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1317 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1320 * It is possible that the next waiter (the one that brought
1321 * this owner to the kernel) timed out and is no longer
1322 * waiting on the lock.
1324 if (!new_owner)
1325 new_owner = this->task;
1328 * We pass it to the next owner. The WAITERS bit is always
1329 * kept enabled while there is PI state around. We cleanup the
1330 * owner died bit, because we are the owner.
1332 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1334 if (unlikely(should_fail_futex(true)))
1335 ret = -EFAULT;
1337 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1338 ret = -EFAULT;
1339 } else if (curval != uval) {
1341 * If a unconditional UNLOCK_PI operation (user space did not
1342 * try the TID->0 transition) raced with a waiter setting the
1343 * FUTEX_WAITERS flag between get_user() and locking the hash
1344 * bucket lock, retry the operation.
1346 if ((FUTEX_TID_MASK & curval) == uval)
1347 ret = -EAGAIN;
1348 else
1349 ret = -EINVAL;
1351 if (ret) {
1352 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1353 return ret;
1356 raw_spin_lock(&pi_state->owner->pi_lock);
1357 WARN_ON(list_empty(&pi_state->list));
1358 list_del_init(&pi_state->list);
1359 raw_spin_unlock(&pi_state->owner->pi_lock);
1361 raw_spin_lock(&new_owner->pi_lock);
1362 WARN_ON(!list_empty(&pi_state->list));
1363 list_add(&pi_state->list, &new_owner->pi_state_list);
1364 pi_state->owner = new_owner;
1365 raw_spin_unlock(&new_owner->pi_lock);
1367 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1369 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1372 * First unlock HB so the waiter does not spin on it once he got woken
1373 * up. Second wake up the waiter before the priority is adjusted. If we
1374 * deboost first (and lose our higher priority), then the task might get
1375 * scheduled away before the wake up can take place.
1377 spin_unlock(&hb->lock);
1378 wake_up_q(&wake_q);
1379 if (deboost)
1380 rt_mutex_adjust_prio(current);
1382 return 0;
1386 * Express the locking dependencies for lockdep:
1388 static inline void
1389 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1391 if (hb1 <= hb2) {
1392 spin_lock(&hb1->lock);
1393 if (hb1 < hb2)
1394 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1395 } else { /* hb1 > hb2 */
1396 spin_lock(&hb2->lock);
1397 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1401 static inline void
1402 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1404 spin_unlock(&hb1->lock);
1405 if (hb1 != hb2)
1406 spin_unlock(&hb2->lock);
1410 * Wake up waiters matching bitset queued on this futex (uaddr).
1412 static int
1413 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1415 struct futex_hash_bucket *hb;
1416 struct futex_q *this, *next;
1417 union futex_key key = FUTEX_KEY_INIT;
1418 int ret;
1419 WAKE_Q(wake_q);
1421 if (!bitset)
1422 return -EINVAL;
1424 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1425 if (unlikely(ret != 0))
1426 goto out;
1428 hb = hash_futex(&key);
1430 /* Make sure we really have tasks to wakeup */
1431 if (!hb_waiters_pending(hb))
1432 goto out_put_key;
1434 spin_lock(&hb->lock);
1436 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1437 if (match_futex (&this->key, &key)) {
1438 if (this->pi_state || this->rt_waiter) {
1439 ret = -EINVAL;
1440 break;
1443 /* Check if one of the bits is set in both bitsets */
1444 if (!(this->bitset & bitset))
1445 continue;
1447 mark_wake_futex(&wake_q, this);
1448 if (++ret >= nr_wake)
1449 break;
1453 spin_unlock(&hb->lock);
1454 wake_up_q(&wake_q);
1455 out_put_key:
1456 put_futex_key(&key);
1457 out:
1458 return ret;
1461 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1463 unsigned int op = (encoded_op & 0x70000000) >> 28;
1464 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1465 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1466 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1467 int oldval, ret;
1469 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1470 if (oparg < 0 || oparg > 31) {
1471 char comm[sizeof(current->comm)];
1473 * kill this print and return -EINVAL when userspace
1474 * is sane again
1476 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1477 get_task_comm(comm, current), oparg);
1478 oparg &= 31;
1480 oparg = 1 << oparg;
1483 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1484 return -EFAULT;
1486 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1487 if (ret)
1488 return ret;
1490 switch (cmp) {
1491 case FUTEX_OP_CMP_EQ:
1492 return oldval == cmparg;
1493 case FUTEX_OP_CMP_NE:
1494 return oldval != cmparg;
1495 case FUTEX_OP_CMP_LT:
1496 return oldval < cmparg;
1497 case FUTEX_OP_CMP_GE:
1498 return oldval >= cmparg;
1499 case FUTEX_OP_CMP_LE:
1500 return oldval <= cmparg;
1501 case FUTEX_OP_CMP_GT:
1502 return oldval > cmparg;
1503 default:
1504 return -ENOSYS;
1509 * Wake up all waiters hashed on the physical page that is mapped
1510 * to this virtual address:
1512 static int
1513 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1514 int nr_wake, int nr_wake2, int op)
1516 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1517 struct futex_hash_bucket *hb1, *hb2;
1518 struct futex_q *this, *next;
1519 int ret, op_ret;
1520 WAKE_Q(wake_q);
1522 retry:
1523 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1524 if (unlikely(ret != 0))
1525 goto out;
1526 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1527 if (unlikely(ret != 0))
1528 goto out_put_key1;
1530 hb1 = hash_futex(&key1);
1531 hb2 = hash_futex(&key2);
1533 retry_private:
1534 double_lock_hb(hb1, hb2);
1535 op_ret = futex_atomic_op_inuser(op, uaddr2);
1536 if (unlikely(op_ret < 0)) {
1538 double_unlock_hb(hb1, hb2);
1540 #ifndef CONFIG_MMU
1542 * we don't get EFAULT from MMU faults if we don't have an MMU,
1543 * but we might get them from range checking
1545 ret = op_ret;
1546 goto out_put_keys;
1547 #endif
1549 if (unlikely(op_ret != -EFAULT)) {
1550 ret = op_ret;
1551 goto out_put_keys;
1554 ret = fault_in_user_writeable(uaddr2);
1555 if (ret)
1556 goto out_put_keys;
1558 if (!(flags & FLAGS_SHARED))
1559 goto retry_private;
1561 put_futex_key(&key2);
1562 put_futex_key(&key1);
1563 goto retry;
1566 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1567 if (match_futex (&this->key, &key1)) {
1568 if (this->pi_state || this->rt_waiter) {
1569 ret = -EINVAL;
1570 goto out_unlock;
1572 mark_wake_futex(&wake_q, this);
1573 if (++ret >= nr_wake)
1574 break;
1578 if (op_ret > 0) {
1579 op_ret = 0;
1580 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1581 if (match_futex (&this->key, &key2)) {
1582 if (this->pi_state || this->rt_waiter) {
1583 ret = -EINVAL;
1584 goto out_unlock;
1586 mark_wake_futex(&wake_q, this);
1587 if (++op_ret >= nr_wake2)
1588 break;
1591 ret += op_ret;
1594 out_unlock:
1595 double_unlock_hb(hb1, hb2);
1596 wake_up_q(&wake_q);
1597 out_put_keys:
1598 put_futex_key(&key2);
1599 out_put_key1:
1600 put_futex_key(&key1);
1601 out:
1602 return ret;
1606 * requeue_futex() - Requeue a futex_q from one hb to another
1607 * @q: the futex_q to requeue
1608 * @hb1: the source hash_bucket
1609 * @hb2: the target hash_bucket
1610 * @key2: the new key for the requeued futex_q
1612 static inline
1613 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1614 struct futex_hash_bucket *hb2, union futex_key *key2)
1618 * If key1 and key2 hash to the same bucket, no need to
1619 * requeue.
1621 if (likely(&hb1->chain != &hb2->chain)) {
1622 plist_del(&q->list, &hb1->chain);
1623 hb_waiters_dec(hb1);
1624 hb_waiters_inc(hb2);
1625 plist_add(&q->list, &hb2->chain);
1626 q->lock_ptr = &hb2->lock;
1628 get_futex_key_refs(key2);
1629 q->key = *key2;
1633 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1634 * @q: the futex_q
1635 * @key: the key of the requeue target futex
1636 * @hb: the hash_bucket of the requeue target futex
1638 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1639 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1640 * to the requeue target futex so the waiter can detect the wakeup on the right
1641 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1642 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1643 * to protect access to the pi_state to fixup the owner later. Must be called
1644 * with both q->lock_ptr and hb->lock held.
1646 static inline
1647 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1648 struct futex_hash_bucket *hb)
1650 get_futex_key_refs(key);
1651 q->key = *key;
1653 __unqueue_futex(q);
1655 WARN_ON(!q->rt_waiter);
1656 q->rt_waiter = NULL;
1658 q->lock_ptr = &hb->lock;
1660 wake_up_state(q->task, TASK_NORMAL);
1664 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1665 * @pifutex: the user address of the to futex
1666 * @hb1: the from futex hash bucket, must be locked by the caller
1667 * @hb2: the to futex hash bucket, must be locked by the caller
1668 * @key1: the from futex key
1669 * @key2: the to futex key
1670 * @ps: address to store the pi_state pointer
1671 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1673 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1674 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1675 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1676 * hb1 and hb2 must be held by the caller.
1678 * Return:
1679 * 0 - failed to acquire the lock atomically;
1680 * >0 - acquired the lock, return value is vpid of the top_waiter
1681 * <0 - error
1683 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1684 struct futex_hash_bucket *hb1,
1685 struct futex_hash_bucket *hb2,
1686 union futex_key *key1, union futex_key *key2,
1687 struct futex_pi_state **ps, int set_waiters)
1689 struct futex_q *top_waiter = NULL;
1690 u32 curval;
1691 int ret, vpid;
1693 if (get_futex_value_locked(&curval, pifutex))
1694 return -EFAULT;
1696 if (unlikely(should_fail_futex(true)))
1697 return -EFAULT;
1700 * Find the top_waiter and determine if there are additional waiters.
1701 * If the caller intends to requeue more than 1 waiter to pifutex,
1702 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1703 * as we have means to handle the possible fault. If not, don't set
1704 * the bit unecessarily as it will force the subsequent unlock to enter
1705 * the kernel.
1707 top_waiter = futex_top_waiter(hb1, key1);
1709 /* There are no waiters, nothing for us to do. */
1710 if (!top_waiter)
1711 return 0;
1713 /* Ensure we requeue to the expected futex. */
1714 if (!match_futex(top_waiter->requeue_pi_key, key2))
1715 return -EINVAL;
1718 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1719 * the contended case or if set_waiters is 1. The pi_state is returned
1720 * in ps in contended cases.
1722 vpid = task_pid_vnr(top_waiter->task);
1723 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1724 set_waiters);
1725 if (ret == 1) {
1726 requeue_pi_wake_futex(top_waiter, key2, hb2);
1727 return vpid;
1729 return ret;
1733 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1734 * @uaddr1: source futex user address
1735 * @flags: futex flags (FLAGS_SHARED, etc.)
1736 * @uaddr2: target futex user address
1737 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1738 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1739 * @cmpval: @uaddr1 expected value (or %NULL)
1740 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1741 * pi futex (pi to pi requeue is not supported)
1743 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1744 * uaddr2 atomically on behalf of the top waiter.
1746 * Return:
1747 * >=0 - on success, the number of tasks requeued or woken;
1748 * <0 - on error
1750 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1751 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1752 u32 *cmpval, int requeue_pi)
1754 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1755 int drop_count = 0, task_count = 0, ret;
1756 struct futex_pi_state *pi_state = NULL;
1757 struct futex_hash_bucket *hb1, *hb2;
1758 struct futex_q *this, *next;
1759 WAKE_Q(wake_q);
1761 if (nr_wake < 0 || nr_requeue < 0)
1762 return -EINVAL;
1764 if (requeue_pi) {
1766 * Requeue PI only works on two distinct uaddrs. This
1767 * check is only valid for private futexes. See below.
1769 if (uaddr1 == uaddr2)
1770 return -EINVAL;
1773 * requeue_pi requires a pi_state, try to allocate it now
1774 * without any locks in case it fails.
1776 if (refill_pi_state_cache())
1777 return -ENOMEM;
1779 * requeue_pi must wake as many tasks as it can, up to nr_wake
1780 * + nr_requeue, since it acquires the rt_mutex prior to
1781 * returning to userspace, so as to not leave the rt_mutex with
1782 * waiters and no owner. However, second and third wake-ups
1783 * cannot be predicted as they involve race conditions with the
1784 * first wake and a fault while looking up the pi_state. Both
1785 * pthread_cond_signal() and pthread_cond_broadcast() should
1786 * use nr_wake=1.
1788 if (nr_wake != 1)
1789 return -EINVAL;
1792 retry:
1793 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1794 if (unlikely(ret != 0))
1795 goto out;
1796 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1797 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1798 if (unlikely(ret != 0))
1799 goto out_put_key1;
1802 * The check above which compares uaddrs is not sufficient for
1803 * shared futexes. We need to compare the keys:
1805 if (requeue_pi && match_futex(&key1, &key2)) {
1806 ret = -EINVAL;
1807 goto out_put_keys;
1810 hb1 = hash_futex(&key1);
1811 hb2 = hash_futex(&key2);
1813 retry_private:
1814 hb_waiters_inc(hb2);
1815 double_lock_hb(hb1, hb2);
1817 if (likely(cmpval != NULL)) {
1818 u32 curval;
1820 ret = get_futex_value_locked(&curval, uaddr1);
1822 if (unlikely(ret)) {
1823 double_unlock_hb(hb1, hb2);
1824 hb_waiters_dec(hb2);
1826 ret = get_user(curval, uaddr1);
1827 if (ret)
1828 goto out_put_keys;
1830 if (!(flags & FLAGS_SHARED))
1831 goto retry_private;
1833 put_futex_key(&key2);
1834 put_futex_key(&key1);
1835 goto retry;
1837 if (curval != *cmpval) {
1838 ret = -EAGAIN;
1839 goto out_unlock;
1843 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1845 * Attempt to acquire uaddr2 and wake the top waiter. If we
1846 * intend to requeue waiters, force setting the FUTEX_WAITERS
1847 * bit. We force this here where we are able to easily handle
1848 * faults rather in the requeue loop below.
1850 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1851 &key2, &pi_state, nr_requeue);
1854 * At this point the top_waiter has either taken uaddr2 or is
1855 * waiting on it. If the former, then the pi_state will not
1856 * exist yet, look it up one more time to ensure we have a
1857 * reference to it. If the lock was taken, ret contains the
1858 * vpid of the top waiter task.
1859 * If the lock was not taken, we have pi_state and an initial
1860 * refcount on it. In case of an error we have nothing.
1862 if (ret > 0) {
1863 WARN_ON(pi_state);
1864 drop_count++;
1865 task_count++;
1867 * If we acquired the lock, then the user space value
1868 * of uaddr2 should be vpid. It cannot be changed by
1869 * the top waiter as it is blocked on hb2 lock if it
1870 * tries to do so. If something fiddled with it behind
1871 * our back the pi state lookup might unearth it. So
1872 * we rather use the known value than rereading and
1873 * handing potential crap to lookup_pi_state.
1875 * If that call succeeds then we have pi_state and an
1876 * initial refcount on it.
1878 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1881 switch (ret) {
1882 case 0:
1883 /* We hold a reference on the pi state. */
1884 break;
1886 /* If the above failed, then pi_state is NULL */
1887 case -EFAULT:
1888 double_unlock_hb(hb1, hb2);
1889 hb_waiters_dec(hb2);
1890 put_futex_key(&key2);
1891 put_futex_key(&key1);
1892 ret = fault_in_user_writeable(uaddr2);
1893 if (!ret)
1894 goto retry;
1895 goto out;
1896 case -EAGAIN:
1898 * Two reasons for this:
1899 * - Owner is exiting and we just wait for the
1900 * exit to complete.
1901 * - The user space value changed.
1903 double_unlock_hb(hb1, hb2);
1904 hb_waiters_dec(hb2);
1905 put_futex_key(&key2);
1906 put_futex_key(&key1);
1907 cond_resched();
1908 goto retry;
1909 default:
1910 goto out_unlock;
1914 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1915 if (task_count - nr_wake >= nr_requeue)
1916 break;
1918 if (!match_futex(&this->key, &key1))
1919 continue;
1922 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1923 * be paired with each other and no other futex ops.
1925 * We should never be requeueing a futex_q with a pi_state,
1926 * which is awaiting a futex_unlock_pi().
1928 if ((requeue_pi && !this->rt_waiter) ||
1929 (!requeue_pi && this->rt_waiter) ||
1930 this->pi_state) {
1931 ret = -EINVAL;
1932 break;
1936 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1937 * lock, we already woke the top_waiter. If not, it will be
1938 * woken by futex_unlock_pi().
1940 if (++task_count <= nr_wake && !requeue_pi) {
1941 mark_wake_futex(&wake_q, this);
1942 continue;
1945 /* Ensure we requeue to the expected futex for requeue_pi. */
1946 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1947 ret = -EINVAL;
1948 break;
1952 * Requeue nr_requeue waiters and possibly one more in the case
1953 * of requeue_pi if we couldn't acquire the lock atomically.
1955 if (requeue_pi) {
1957 * Prepare the waiter to take the rt_mutex. Take a
1958 * refcount on the pi_state and store the pointer in
1959 * the futex_q object of the waiter.
1961 atomic_inc(&pi_state->refcount);
1962 this->pi_state = pi_state;
1963 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1964 this->rt_waiter,
1965 this->task);
1966 if (ret == 1) {
1968 * We got the lock. We do neither drop the
1969 * refcount on pi_state nor clear
1970 * this->pi_state because the waiter needs the
1971 * pi_state for cleaning up the user space
1972 * value. It will drop the refcount after
1973 * doing so.
1975 requeue_pi_wake_futex(this, &key2, hb2);
1976 drop_count++;
1977 continue;
1978 } else if (ret) {
1980 * rt_mutex_start_proxy_lock() detected a
1981 * potential deadlock when we tried to queue
1982 * that waiter. Drop the pi_state reference
1983 * which we took above and remove the pointer
1984 * to the state from the waiters futex_q
1985 * object.
1987 this->pi_state = NULL;
1988 put_pi_state(pi_state);
1990 * We stop queueing more waiters and let user
1991 * space deal with the mess.
1993 break;
1996 requeue_futex(this, hb1, hb2, &key2);
1997 drop_count++;
2001 * We took an extra initial reference to the pi_state either
2002 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2003 * need to drop it here again.
2005 put_pi_state(pi_state);
2007 out_unlock:
2008 double_unlock_hb(hb1, hb2);
2009 wake_up_q(&wake_q);
2010 hb_waiters_dec(hb2);
2013 * drop_futex_key_refs() must be called outside the spinlocks. During
2014 * the requeue we moved futex_q's from the hash bucket at key1 to the
2015 * one at key2 and updated their key pointer. We no longer need to
2016 * hold the references to key1.
2018 while (--drop_count >= 0)
2019 drop_futex_key_refs(&key1);
2021 out_put_keys:
2022 put_futex_key(&key2);
2023 out_put_key1:
2024 put_futex_key(&key1);
2025 out:
2026 return ret ? ret : task_count;
2029 /* The key must be already stored in q->key. */
2030 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2031 __acquires(&hb->lock)
2033 struct futex_hash_bucket *hb;
2035 hb = hash_futex(&q->key);
2038 * Increment the counter before taking the lock so that
2039 * a potential waker won't miss a to-be-slept task that is
2040 * waiting for the spinlock. This is safe as all queue_lock()
2041 * users end up calling queue_me(). Similarly, for housekeeping,
2042 * decrement the counter at queue_unlock() when some error has
2043 * occurred and we don't end up adding the task to the list.
2045 hb_waiters_inc(hb);
2047 q->lock_ptr = &hb->lock;
2049 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
2050 return hb;
2053 static inline void
2054 queue_unlock(struct futex_hash_bucket *hb)
2055 __releases(&hb->lock)
2057 spin_unlock(&hb->lock);
2058 hb_waiters_dec(hb);
2062 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2063 * @q: The futex_q to enqueue
2064 * @hb: The destination hash bucket
2066 * The hb->lock must be held by the caller, and is released here. A call to
2067 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2068 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2069 * or nothing if the unqueue is done as part of the wake process and the unqueue
2070 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2071 * an example).
2073 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2074 __releases(&hb->lock)
2076 int prio;
2079 * The priority used to register this element is
2080 * - either the real thread-priority for the real-time threads
2081 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2082 * - or MAX_RT_PRIO for non-RT threads.
2083 * Thus, all RT-threads are woken first in priority order, and
2084 * the others are woken last, in FIFO order.
2086 prio = min(current->normal_prio, MAX_RT_PRIO);
2088 plist_node_init(&q->list, prio);
2089 plist_add(&q->list, &hb->chain);
2090 q->task = current;
2091 spin_unlock(&hb->lock);
2095 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2096 * @q: The futex_q to unqueue
2098 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2099 * be paired with exactly one earlier call to queue_me().
2101 * Return:
2102 * 1 - if the futex_q was still queued (and we removed unqueued it);
2103 * 0 - if the futex_q was already removed by the waking thread
2105 static int unqueue_me(struct futex_q *q)
2107 spinlock_t *lock_ptr;
2108 int ret = 0;
2110 /* In the common case we don't take the spinlock, which is nice. */
2111 retry:
2113 * q->lock_ptr can change between this read and the following spin_lock.
2114 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2115 * optimizing lock_ptr out of the logic below.
2117 lock_ptr = READ_ONCE(q->lock_ptr);
2118 if (lock_ptr != NULL) {
2119 spin_lock(lock_ptr);
2121 * q->lock_ptr can change between reading it and
2122 * spin_lock(), causing us to take the wrong lock. This
2123 * corrects the race condition.
2125 * Reasoning goes like this: if we have the wrong lock,
2126 * q->lock_ptr must have changed (maybe several times)
2127 * between reading it and the spin_lock(). It can
2128 * change again after the spin_lock() but only if it was
2129 * already changed before the spin_lock(). It cannot,
2130 * however, change back to the original value. Therefore
2131 * we can detect whether we acquired the correct lock.
2133 if (unlikely(lock_ptr != q->lock_ptr)) {
2134 spin_unlock(lock_ptr);
2135 goto retry;
2137 __unqueue_futex(q);
2139 BUG_ON(q->pi_state);
2141 spin_unlock(lock_ptr);
2142 ret = 1;
2145 drop_futex_key_refs(&q->key);
2146 return ret;
2150 * PI futexes can not be requeued and must remove themself from the
2151 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2152 * and dropped here.
2154 static void unqueue_me_pi(struct futex_q *q)
2155 __releases(q->lock_ptr)
2157 __unqueue_futex(q);
2159 BUG_ON(!q->pi_state);
2160 put_pi_state(q->pi_state);
2161 q->pi_state = NULL;
2163 spin_unlock(q->lock_ptr);
2167 * Fixup the pi_state owner with the new owner.
2169 * Must be called with hash bucket lock held and mm->sem held for non
2170 * private futexes.
2172 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2173 struct task_struct *newowner)
2175 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2176 struct futex_pi_state *pi_state = q->pi_state;
2177 struct task_struct *oldowner = pi_state->owner;
2178 u32 uval, uninitialized_var(curval), newval;
2179 int ret;
2181 /* Owner died? */
2182 if (!pi_state->owner)
2183 newtid |= FUTEX_OWNER_DIED;
2186 * We are here either because we stole the rtmutex from the
2187 * previous highest priority waiter or we are the highest priority
2188 * waiter but failed to get the rtmutex the first time.
2189 * We have to replace the newowner TID in the user space variable.
2190 * This must be atomic as we have to preserve the owner died bit here.
2192 * Note: We write the user space value _before_ changing the pi_state
2193 * because we can fault here. Imagine swapped out pages or a fork
2194 * that marked all the anonymous memory readonly for cow.
2196 * Modifying pi_state _before_ the user space value would
2197 * leave the pi_state in an inconsistent state when we fault
2198 * here, because we need to drop the hash bucket lock to
2199 * handle the fault. This might be observed in the PID check
2200 * in lookup_pi_state.
2202 retry:
2203 if (get_futex_value_locked(&uval, uaddr))
2204 goto handle_fault;
2206 while (1) {
2207 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2209 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2210 goto handle_fault;
2211 if (curval == uval)
2212 break;
2213 uval = curval;
2217 * We fixed up user space. Now we need to fix the pi_state
2218 * itself.
2220 if (pi_state->owner != NULL) {
2221 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2222 WARN_ON(list_empty(&pi_state->list));
2223 list_del_init(&pi_state->list);
2224 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2227 pi_state->owner = newowner;
2229 raw_spin_lock_irq(&newowner->pi_lock);
2230 WARN_ON(!list_empty(&pi_state->list));
2231 list_add(&pi_state->list, &newowner->pi_state_list);
2232 raw_spin_unlock_irq(&newowner->pi_lock);
2233 return 0;
2236 * To handle the page fault we need to drop the hash bucket
2237 * lock here. That gives the other task (either the highest priority
2238 * waiter itself or the task which stole the rtmutex) the
2239 * chance to try the fixup of the pi_state. So once we are
2240 * back from handling the fault we need to check the pi_state
2241 * after reacquiring the hash bucket lock and before trying to
2242 * do another fixup. When the fixup has been done already we
2243 * simply return.
2245 handle_fault:
2246 spin_unlock(q->lock_ptr);
2248 ret = fault_in_user_writeable(uaddr);
2250 spin_lock(q->lock_ptr);
2253 * Check if someone else fixed it for us:
2255 if (pi_state->owner != oldowner)
2256 return 0;
2258 if (ret)
2259 return ret;
2261 goto retry;
2264 static long futex_wait_restart(struct restart_block *restart);
2267 * fixup_owner() - Post lock pi_state and corner case management
2268 * @uaddr: user address of the futex
2269 * @q: futex_q (contains pi_state and access to the rt_mutex)
2270 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2272 * After attempting to lock an rt_mutex, this function is called to cleanup
2273 * the pi_state owner as well as handle race conditions that may allow us to
2274 * acquire the lock. Must be called with the hb lock held.
2276 * Return:
2277 * 1 - success, lock taken;
2278 * 0 - success, lock not taken;
2279 * <0 - on error (-EFAULT)
2281 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2283 struct task_struct *owner;
2284 int ret = 0;
2286 if (locked) {
2288 * Got the lock. We might not be the anticipated owner if we
2289 * did a lock-steal - fix up the PI-state in that case:
2291 if (q->pi_state->owner != current)
2292 ret = fixup_pi_state_owner(uaddr, q, current);
2293 goto out;
2297 * Catch the rare case, where the lock was released when we were on the
2298 * way back before we locked the hash bucket.
2300 if (q->pi_state->owner == current) {
2302 * Try to get the rt_mutex now. This might fail as some other
2303 * task acquired the rt_mutex after we removed ourself from the
2304 * rt_mutex waiters list.
2306 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2307 locked = 1;
2308 goto out;
2312 * pi_state is incorrect, some other task did a lock steal and
2313 * we returned due to timeout or signal without taking the
2314 * rt_mutex. Too late.
2316 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2317 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2318 if (!owner)
2319 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2320 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2321 ret = fixup_pi_state_owner(uaddr, q, owner);
2322 goto out;
2326 * Paranoia check. If we did not take the lock, then we should not be
2327 * the owner of the rt_mutex.
2329 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2330 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2331 "pi-state %p\n", ret,
2332 q->pi_state->pi_mutex.owner,
2333 q->pi_state->owner);
2335 out:
2336 return ret ? ret : locked;
2340 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2341 * @hb: the futex hash bucket, must be locked by the caller
2342 * @q: the futex_q to queue up on
2343 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2345 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2346 struct hrtimer_sleeper *timeout)
2349 * The task state is guaranteed to be set before another task can
2350 * wake it. set_current_state() is implemented using smp_store_mb() and
2351 * queue_me() calls spin_unlock() upon completion, both serializing
2352 * access to the hash list and forcing another memory barrier.
2354 set_current_state(TASK_INTERRUPTIBLE);
2355 queue_me(q, hb);
2357 /* Arm the timer */
2358 if (timeout)
2359 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2362 * If we have been removed from the hash list, then another task
2363 * has tried to wake us, and we can skip the call to schedule().
2365 if (likely(!plist_node_empty(&q->list))) {
2367 * If the timer has already expired, current will already be
2368 * flagged for rescheduling. Only call schedule if there
2369 * is no timeout, or if it has yet to expire.
2371 if (!timeout || timeout->task)
2372 freezable_schedule();
2374 __set_current_state(TASK_RUNNING);
2378 * futex_wait_setup() - Prepare to wait on a futex
2379 * @uaddr: the futex userspace address
2380 * @val: the expected value
2381 * @flags: futex flags (FLAGS_SHARED, etc.)
2382 * @q: the associated futex_q
2383 * @hb: storage for hash_bucket pointer to be returned to caller
2385 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2386 * compare it with the expected value. Handle atomic faults internally.
2387 * Return with the hb lock held and a q.key reference on success, and unlocked
2388 * with no q.key reference on failure.
2390 * Return:
2391 * 0 - uaddr contains val and hb has been locked;
2392 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2394 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2395 struct futex_q *q, struct futex_hash_bucket **hb)
2397 u32 uval;
2398 int ret;
2401 * Access the page AFTER the hash-bucket is locked.
2402 * Order is important:
2404 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2405 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2407 * The basic logical guarantee of a futex is that it blocks ONLY
2408 * if cond(var) is known to be true at the time of blocking, for
2409 * any cond. If we locked the hash-bucket after testing *uaddr, that
2410 * would open a race condition where we could block indefinitely with
2411 * cond(var) false, which would violate the guarantee.
2413 * On the other hand, we insert q and release the hash-bucket only
2414 * after testing *uaddr. This guarantees that futex_wait() will NOT
2415 * absorb a wakeup if *uaddr does not match the desired values
2416 * while the syscall executes.
2418 retry:
2419 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2420 if (unlikely(ret != 0))
2421 return ret;
2423 retry_private:
2424 *hb = queue_lock(q);
2426 ret = get_futex_value_locked(&uval, uaddr);
2428 if (ret) {
2429 queue_unlock(*hb);
2431 ret = get_user(uval, uaddr);
2432 if (ret)
2433 goto out;
2435 if (!(flags & FLAGS_SHARED))
2436 goto retry_private;
2438 put_futex_key(&q->key);
2439 goto retry;
2442 if (uval != val) {
2443 queue_unlock(*hb);
2444 ret = -EWOULDBLOCK;
2447 out:
2448 if (ret)
2449 put_futex_key(&q->key);
2450 return ret;
2453 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2454 ktime_t *abs_time, u32 bitset)
2456 struct hrtimer_sleeper timeout, *to = NULL;
2457 struct restart_block *restart;
2458 struct futex_hash_bucket *hb;
2459 struct futex_q q = futex_q_init;
2460 int ret;
2462 if (!bitset)
2463 return -EINVAL;
2464 q.bitset = bitset;
2466 if (abs_time) {
2467 to = &timeout;
2469 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2470 CLOCK_REALTIME : CLOCK_MONOTONIC,
2471 HRTIMER_MODE_ABS);
2472 hrtimer_init_sleeper(to, current);
2473 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2474 current->timer_slack_ns);
2477 retry:
2479 * Prepare to wait on uaddr. On success, holds hb lock and increments
2480 * q.key refs.
2482 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2483 if (ret)
2484 goto out;
2486 /* queue_me and wait for wakeup, timeout, or a signal. */
2487 futex_wait_queue_me(hb, &q, to);
2489 /* If we were woken (and unqueued), we succeeded, whatever. */
2490 ret = 0;
2491 /* unqueue_me() drops q.key ref */
2492 if (!unqueue_me(&q))
2493 goto out;
2494 ret = -ETIMEDOUT;
2495 if (to && !to->task)
2496 goto out;
2499 * We expect signal_pending(current), but we might be the
2500 * victim of a spurious wakeup as well.
2502 if (!signal_pending(current))
2503 goto retry;
2505 ret = -ERESTARTSYS;
2506 if (!abs_time)
2507 goto out;
2509 restart = &current->restart_block;
2510 restart->fn = futex_wait_restart;
2511 restart->futex.uaddr = uaddr;
2512 restart->futex.val = val;
2513 restart->futex.time = abs_time->tv64;
2514 restart->futex.bitset = bitset;
2515 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2517 ret = -ERESTART_RESTARTBLOCK;
2519 out:
2520 if (to) {
2521 hrtimer_cancel(&to->timer);
2522 destroy_hrtimer_on_stack(&to->timer);
2524 return ret;
2528 static long futex_wait_restart(struct restart_block *restart)
2530 u32 __user *uaddr = restart->futex.uaddr;
2531 ktime_t t, *tp = NULL;
2533 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2534 t.tv64 = restart->futex.time;
2535 tp = &t;
2537 restart->fn = do_no_restart_syscall;
2539 return (long)futex_wait(uaddr, restart->futex.flags,
2540 restart->futex.val, tp, restart->futex.bitset);
2545 * Userspace tried a 0 -> TID atomic transition of the futex value
2546 * and failed. The kernel side here does the whole locking operation:
2547 * if there are waiters then it will block as a consequence of relying
2548 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2549 * a 0 value of the futex too.).
2551 * Also serves as futex trylock_pi()'ing, and due semantics.
2553 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2554 ktime_t *time, int trylock)
2556 struct hrtimer_sleeper timeout, *to = NULL;
2557 struct futex_hash_bucket *hb;
2558 struct futex_q q = futex_q_init;
2559 int res, ret;
2561 if (refill_pi_state_cache())
2562 return -ENOMEM;
2564 if (time) {
2565 to = &timeout;
2566 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2567 HRTIMER_MODE_ABS);
2568 hrtimer_init_sleeper(to, current);
2569 hrtimer_set_expires(&to->timer, *time);
2572 retry:
2573 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2574 if (unlikely(ret != 0))
2575 goto out;
2577 retry_private:
2578 hb = queue_lock(&q);
2580 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2581 if (unlikely(ret)) {
2583 * Atomic work succeeded and we got the lock,
2584 * or failed. Either way, we do _not_ block.
2586 switch (ret) {
2587 case 1:
2588 /* We got the lock. */
2589 ret = 0;
2590 goto out_unlock_put_key;
2591 case -EFAULT:
2592 goto uaddr_faulted;
2593 case -EAGAIN:
2595 * Two reasons for this:
2596 * - Task is exiting and we just wait for the
2597 * exit to complete.
2598 * - The user space value changed.
2600 queue_unlock(hb);
2601 put_futex_key(&q.key);
2602 cond_resched();
2603 goto retry;
2604 default:
2605 goto out_unlock_put_key;
2610 * Only actually queue now that the atomic ops are done:
2612 queue_me(&q, hb);
2614 WARN_ON(!q.pi_state);
2616 * Block on the PI mutex:
2618 if (!trylock) {
2619 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2620 } else {
2621 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2622 /* Fixup the trylock return value: */
2623 ret = ret ? 0 : -EWOULDBLOCK;
2626 spin_lock(q.lock_ptr);
2628 * Fixup the pi_state owner and possibly acquire the lock if we
2629 * haven't already.
2631 res = fixup_owner(uaddr, &q, !ret);
2633 * If fixup_owner() returned an error, proprogate that. If it acquired
2634 * the lock, clear our -ETIMEDOUT or -EINTR.
2636 if (res)
2637 ret = (res < 0) ? res : 0;
2640 * If fixup_owner() faulted and was unable to handle the fault, unlock
2641 * it and return the fault to userspace.
2643 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2644 rt_mutex_unlock(&q.pi_state->pi_mutex);
2646 /* Unqueue and drop the lock */
2647 unqueue_me_pi(&q);
2649 goto out_put_key;
2651 out_unlock_put_key:
2652 queue_unlock(hb);
2654 out_put_key:
2655 put_futex_key(&q.key);
2656 out:
2657 if (to)
2658 destroy_hrtimer_on_stack(&to->timer);
2659 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2661 uaddr_faulted:
2662 queue_unlock(hb);
2664 ret = fault_in_user_writeable(uaddr);
2665 if (ret)
2666 goto out_put_key;
2668 if (!(flags & FLAGS_SHARED))
2669 goto retry_private;
2671 put_futex_key(&q.key);
2672 goto retry;
2676 * Userspace attempted a TID -> 0 atomic transition, and failed.
2677 * This is the in-kernel slowpath: we look up the PI state (if any),
2678 * and do the rt-mutex unlock.
2680 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2682 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2683 union futex_key key = FUTEX_KEY_INIT;
2684 struct futex_hash_bucket *hb;
2685 struct futex_q *match;
2686 int ret;
2688 retry:
2689 if (get_user(uval, uaddr))
2690 return -EFAULT;
2692 * We release only a lock we actually own:
2694 if ((uval & FUTEX_TID_MASK) != vpid)
2695 return -EPERM;
2697 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2698 if (ret)
2699 return ret;
2701 hb = hash_futex(&key);
2702 spin_lock(&hb->lock);
2705 * Check waiters first. We do not trust user space values at
2706 * all and we at least want to know if user space fiddled
2707 * with the futex value instead of blindly unlocking.
2709 match = futex_top_waiter(hb, &key);
2710 if (match) {
2711 ret = wake_futex_pi(uaddr, uval, match, hb);
2713 * In case of success wake_futex_pi dropped the hash
2714 * bucket lock.
2716 if (!ret)
2717 goto out_putkey;
2719 * The atomic access to the futex value generated a
2720 * pagefault, so retry the user-access and the wakeup:
2722 if (ret == -EFAULT)
2723 goto pi_faulted;
2725 * A unconditional UNLOCK_PI op raced against a waiter
2726 * setting the FUTEX_WAITERS bit. Try again.
2728 if (ret == -EAGAIN) {
2729 spin_unlock(&hb->lock);
2730 put_futex_key(&key);
2731 goto retry;
2734 * wake_futex_pi has detected invalid state. Tell user
2735 * space.
2737 goto out_unlock;
2741 * We have no kernel internal state, i.e. no waiters in the
2742 * kernel. Waiters which are about to queue themselves are stuck
2743 * on hb->lock. So we can safely ignore them. We do neither
2744 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2745 * owner.
2747 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2748 goto pi_faulted;
2751 * If uval has changed, let user space handle it.
2753 ret = (curval == uval) ? 0 : -EAGAIN;
2755 out_unlock:
2756 spin_unlock(&hb->lock);
2757 out_putkey:
2758 put_futex_key(&key);
2759 return ret;
2761 pi_faulted:
2762 spin_unlock(&hb->lock);
2763 put_futex_key(&key);
2765 ret = fault_in_user_writeable(uaddr);
2766 if (!ret)
2767 goto retry;
2769 return ret;
2773 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2774 * @hb: the hash_bucket futex_q was original enqueued on
2775 * @q: the futex_q woken while waiting to be requeued
2776 * @key2: the futex_key of the requeue target futex
2777 * @timeout: the timeout associated with the wait (NULL if none)
2779 * Detect if the task was woken on the initial futex as opposed to the requeue
2780 * target futex. If so, determine if it was a timeout or a signal that caused
2781 * the wakeup and return the appropriate error code to the caller. Must be
2782 * called with the hb lock held.
2784 * Return:
2785 * 0 = no early wakeup detected;
2786 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2788 static inline
2789 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2790 struct futex_q *q, union futex_key *key2,
2791 struct hrtimer_sleeper *timeout)
2793 int ret = 0;
2796 * With the hb lock held, we avoid races while we process the wakeup.
2797 * We only need to hold hb (and not hb2) to ensure atomicity as the
2798 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2799 * It can't be requeued from uaddr2 to something else since we don't
2800 * support a PI aware source futex for requeue.
2802 if (!match_futex(&q->key, key2)) {
2803 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2805 * We were woken prior to requeue by a timeout or a signal.
2806 * Unqueue the futex_q and determine which it was.
2808 plist_del(&q->list, &hb->chain);
2809 hb_waiters_dec(hb);
2811 /* Handle spurious wakeups gracefully */
2812 ret = -EWOULDBLOCK;
2813 if (timeout && !timeout->task)
2814 ret = -ETIMEDOUT;
2815 else if (signal_pending(current))
2816 ret = -ERESTARTNOINTR;
2818 return ret;
2822 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2823 * @uaddr: the futex we initially wait on (non-pi)
2824 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2825 * the same type, no requeueing from private to shared, etc.
2826 * @val: the expected value of uaddr
2827 * @abs_time: absolute timeout
2828 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2829 * @uaddr2: the pi futex we will take prior to returning to user-space
2831 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2832 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2833 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2834 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2835 * without one, the pi logic would not know which task to boost/deboost, if
2836 * there was a need to.
2838 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2839 * via the following--
2840 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2841 * 2) wakeup on uaddr2 after a requeue
2842 * 3) signal
2843 * 4) timeout
2845 * If 3, cleanup and return -ERESTARTNOINTR.
2847 * If 2, we may then block on trying to take the rt_mutex and return via:
2848 * 5) successful lock
2849 * 6) signal
2850 * 7) timeout
2851 * 8) other lock acquisition failure
2853 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2855 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2857 * Return:
2858 * 0 - On success;
2859 * <0 - On error
2861 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2862 u32 val, ktime_t *abs_time, u32 bitset,
2863 u32 __user *uaddr2)
2865 struct hrtimer_sleeper timeout, *to = NULL;
2866 struct rt_mutex_waiter rt_waiter;
2867 struct futex_hash_bucket *hb;
2868 union futex_key key2 = FUTEX_KEY_INIT;
2869 struct futex_q q = futex_q_init;
2870 int res, ret;
2872 if (uaddr == uaddr2)
2873 return -EINVAL;
2875 if (!bitset)
2876 return -EINVAL;
2878 if (abs_time) {
2879 to = &timeout;
2880 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2881 CLOCK_REALTIME : CLOCK_MONOTONIC,
2882 HRTIMER_MODE_ABS);
2883 hrtimer_init_sleeper(to, current);
2884 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2885 current->timer_slack_ns);
2889 * The waiter is allocated on our stack, manipulated by the requeue
2890 * code while we sleep on uaddr.
2892 debug_rt_mutex_init_waiter(&rt_waiter);
2893 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2894 RB_CLEAR_NODE(&rt_waiter.tree_entry);
2895 rt_waiter.task = NULL;
2897 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2898 if (unlikely(ret != 0))
2899 goto out;
2901 q.bitset = bitset;
2902 q.rt_waiter = &rt_waiter;
2903 q.requeue_pi_key = &key2;
2906 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2907 * count.
2909 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2910 if (ret)
2911 goto out_key2;
2914 * The check above which compares uaddrs is not sufficient for
2915 * shared futexes. We need to compare the keys:
2917 if (match_futex(&q.key, &key2)) {
2918 queue_unlock(hb);
2919 ret = -EINVAL;
2920 goto out_put_keys;
2923 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2924 futex_wait_queue_me(hb, &q, to);
2926 spin_lock(&hb->lock);
2927 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2928 spin_unlock(&hb->lock);
2929 if (ret)
2930 goto out_put_keys;
2933 * In order for us to be here, we know our q.key == key2, and since
2934 * we took the hb->lock above, we also know that futex_requeue() has
2935 * completed and we no longer have to concern ourselves with a wakeup
2936 * race with the atomic proxy lock acquisition by the requeue code. The
2937 * futex_requeue dropped our key1 reference and incremented our key2
2938 * reference count.
2941 /* Check if the requeue code acquired the second futex for us. */
2942 if (!q.rt_waiter) {
2944 * Got the lock. We might not be the anticipated owner if we
2945 * did a lock-steal - fix up the PI-state in that case.
2947 if (q.pi_state && (q.pi_state->owner != current)) {
2948 spin_lock(q.lock_ptr);
2949 ret = fixup_pi_state_owner(uaddr2, &q, current);
2950 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
2951 rt_mutex_unlock(&q.pi_state->pi_mutex);
2953 * Drop the reference to the pi state which
2954 * the requeue_pi() code acquired for us.
2956 put_pi_state(q.pi_state);
2957 spin_unlock(q.lock_ptr);
2959 } else {
2960 struct rt_mutex *pi_mutex;
2963 * We have been woken up by futex_unlock_pi(), a timeout, or a
2964 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2965 * the pi_state.
2967 WARN_ON(!q.pi_state);
2968 pi_mutex = &q.pi_state->pi_mutex;
2969 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2970 debug_rt_mutex_free_waiter(&rt_waiter);
2972 spin_lock(q.lock_ptr);
2974 * Fixup the pi_state owner and possibly acquire the lock if we
2975 * haven't already.
2977 res = fixup_owner(uaddr2, &q, !ret);
2979 * If fixup_owner() returned an error, proprogate that. If it
2980 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2982 if (res)
2983 ret = (res < 0) ? res : 0;
2986 * If fixup_pi_state_owner() faulted and was unable to handle
2987 * the fault, unlock the rt_mutex and return the fault to
2988 * userspace.
2990 if (ret && rt_mutex_owner(pi_mutex) == current)
2991 rt_mutex_unlock(pi_mutex);
2993 /* Unqueue and drop the lock. */
2994 unqueue_me_pi(&q);
2997 if (ret == -EINTR) {
2999 * We've already been requeued, but cannot restart by calling
3000 * futex_lock_pi() directly. We could restart this syscall, but
3001 * it would detect that the user space "val" changed and return
3002 * -EWOULDBLOCK. Save the overhead of the restart and return
3003 * -EWOULDBLOCK directly.
3005 ret = -EWOULDBLOCK;
3008 out_put_keys:
3009 put_futex_key(&q.key);
3010 out_key2:
3011 put_futex_key(&key2);
3013 out:
3014 if (to) {
3015 hrtimer_cancel(&to->timer);
3016 destroy_hrtimer_on_stack(&to->timer);
3018 return ret;
3022 * Support for robust futexes: the kernel cleans up held futexes at
3023 * thread exit time.
3025 * Implementation: user-space maintains a per-thread list of locks it
3026 * is holding. Upon do_exit(), the kernel carefully walks this list,
3027 * and marks all locks that are owned by this thread with the
3028 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3029 * always manipulated with the lock held, so the list is private and
3030 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3031 * field, to allow the kernel to clean up if the thread dies after
3032 * acquiring the lock, but just before it could have added itself to
3033 * the list. There can only be one such pending lock.
3037 * sys_set_robust_list() - Set the robust-futex list head of a task
3038 * @head: pointer to the list-head
3039 * @len: length of the list-head, as userspace expects
3041 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3042 size_t, len)
3044 if (!futex_cmpxchg_enabled)
3045 return -ENOSYS;
3047 * The kernel knows only one size for now:
3049 if (unlikely(len != sizeof(*head)))
3050 return -EINVAL;
3052 current->robust_list = head;
3054 return 0;
3058 * sys_get_robust_list() - Get the robust-futex list head of a task
3059 * @pid: pid of the process [zero for current task]
3060 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3061 * @len_ptr: pointer to a length field, the kernel fills in the header size
3063 SYSCALL_DEFINE3(get_robust_list, int, pid,
3064 struct robust_list_head __user * __user *, head_ptr,
3065 size_t __user *, len_ptr)
3067 struct robust_list_head __user *head;
3068 unsigned long ret;
3069 struct task_struct *p;
3071 if (!futex_cmpxchg_enabled)
3072 return -ENOSYS;
3074 rcu_read_lock();
3076 ret = -ESRCH;
3077 if (!pid)
3078 p = current;
3079 else {
3080 p = find_task_by_vpid(pid);
3081 if (!p)
3082 goto err_unlock;
3085 ret = -EPERM;
3086 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3087 goto err_unlock;
3089 head = p->robust_list;
3090 rcu_read_unlock();
3092 if (put_user(sizeof(*head), len_ptr))
3093 return -EFAULT;
3094 return put_user(head, head_ptr);
3096 err_unlock:
3097 rcu_read_unlock();
3099 return ret;
3103 * Process a futex-list entry, check whether it's owned by the
3104 * dying task, and do notification if so:
3106 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3108 u32 uval, uninitialized_var(nval), mval;
3110 retry:
3111 if (get_user(uval, uaddr))
3112 return -1;
3114 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3116 * Ok, this dying thread is truly holding a futex
3117 * of interest. Set the OWNER_DIED bit atomically
3118 * via cmpxchg, and if the value had FUTEX_WAITERS
3119 * set, wake up a waiter (if any). (We have to do a
3120 * futex_wake() even if OWNER_DIED is already set -
3121 * to handle the rare but possible case of recursive
3122 * thread-death.) The rest of the cleanup is done in
3123 * userspace.
3125 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3127 * We are not holding a lock here, but we want to have
3128 * the pagefault_disable/enable() protection because
3129 * we want to handle the fault gracefully. If the
3130 * access fails we try to fault in the futex with R/W
3131 * verification via get_user_pages. get_user() above
3132 * does not guarantee R/W access. If that fails we
3133 * give up and leave the futex locked.
3135 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3136 if (fault_in_user_writeable(uaddr))
3137 return -1;
3138 goto retry;
3140 if (nval != uval)
3141 goto retry;
3144 * Wake robust non-PI futexes here. The wakeup of
3145 * PI futexes happens in exit_pi_state():
3147 if (!pi && (uval & FUTEX_WAITERS))
3148 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3150 return 0;
3154 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3156 static inline int fetch_robust_entry(struct robust_list __user **entry,
3157 struct robust_list __user * __user *head,
3158 unsigned int *pi)
3160 unsigned long uentry;
3162 if (get_user(uentry, (unsigned long __user *)head))
3163 return -EFAULT;
3165 *entry = (void __user *)(uentry & ~1UL);
3166 *pi = uentry & 1;
3168 return 0;
3172 * Walk curr->robust_list (very carefully, it's a userspace list!)
3173 * and mark any locks found there dead, and notify any waiters.
3175 * We silently return on any sign of list-walking problem.
3177 void exit_robust_list(struct task_struct *curr)
3179 struct robust_list_head __user *head = curr->robust_list;
3180 struct robust_list __user *entry, *next_entry, *pending;
3181 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3182 unsigned int uninitialized_var(next_pi);
3183 unsigned long futex_offset;
3184 int rc;
3186 if (!futex_cmpxchg_enabled)
3187 return;
3190 * Fetch the list head (which was registered earlier, via
3191 * sys_set_robust_list()):
3193 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3194 return;
3196 * Fetch the relative futex offset:
3198 if (get_user(futex_offset, &head->futex_offset))
3199 return;
3201 * Fetch any possibly pending lock-add first, and handle it
3202 * if it exists:
3204 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3205 return;
3207 next_entry = NULL; /* avoid warning with gcc */
3208 while (entry != &head->list) {
3210 * Fetch the next entry in the list before calling
3211 * handle_futex_death:
3213 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3215 * A pending lock might already be on the list, so
3216 * don't process it twice:
3218 if (entry != pending)
3219 if (handle_futex_death((void __user *)entry + futex_offset,
3220 curr, pi))
3221 return;
3222 if (rc)
3223 return;
3224 entry = next_entry;
3225 pi = next_pi;
3227 * Avoid excessively long or circular lists:
3229 if (!--limit)
3230 break;
3232 cond_resched();
3235 if (pending)
3236 handle_futex_death((void __user *)pending + futex_offset,
3237 curr, pip);
3240 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3241 u32 __user *uaddr2, u32 val2, u32 val3)
3243 int cmd = op & FUTEX_CMD_MASK;
3244 unsigned int flags = 0;
3246 if (!(op & FUTEX_PRIVATE_FLAG))
3247 flags |= FLAGS_SHARED;
3249 if (op & FUTEX_CLOCK_REALTIME) {
3250 flags |= FLAGS_CLOCKRT;
3251 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3252 cmd != FUTEX_WAIT_REQUEUE_PI)
3253 return -ENOSYS;
3256 switch (cmd) {
3257 case FUTEX_LOCK_PI:
3258 case FUTEX_UNLOCK_PI:
3259 case FUTEX_TRYLOCK_PI:
3260 case FUTEX_WAIT_REQUEUE_PI:
3261 case FUTEX_CMP_REQUEUE_PI:
3262 if (!futex_cmpxchg_enabled)
3263 return -ENOSYS;
3266 switch (cmd) {
3267 case FUTEX_WAIT:
3268 val3 = FUTEX_BITSET_MATCH_ANY;
3269 case FUTEX_WAIT_BITSET:
3270 return futex_wait(uaddr, flags, val, timeout, val3);
3271 case FUTEX_WAKE:
3272 val3 = FUTEX_BITSET_MATCH_ANY;
3273 case FUTEX_WAKE_BITSET:
3274 return futex_wake(uaddr, flags, val, val3);
3275 case FUTEX_REQUEUE:
3276 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3277 case FUTEX_CMP_REQUEUE:
3278 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3279 case FUTEX_WAKE_OP:
3280 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3281 case FUTEX_LOCK_PI:
3282 return futex_lock_pi(uaddr, flags, timeout, 0);
3283 case FUTEX_UNLOCK_PI:
3284 return futex_unlock_pi(uaddr, flags);
3285 case FUTEX_TRYLOCK_PI:
3286 return futex_lock_pi(uaddr, flags, NULL, 1);
3287 case FUTEX_WAIT_REQUEUE_PI:
3288 val3 = FUTEX_BITSET_MATCH_ANY;
3289 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3290 uaddr2);
3291 case FUTEX_CMP_REQUEUE_PI:
3292 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3294 return -ENOSYS;
3298 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3299 struct timespec __user *, utime, u32 __user *, uaddr2,
3300 u32, val3)
3302 struct timespec ts;
3303 ktime_t t, *tp = NULL;
3304 u32 val2 = 0;
3305 int cmd = op & FUTEX_CMD_MASK;
3307 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3308 cmd == FUTEX_WAIT_BITSET ||
3309 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3310 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3311 return -EFAULT;
3312 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3313 return -EFAULT;
3314 if (!timespec_valid(&ts))
3315 return -EINVAL;
3317 t = timespec_to_ktime(ts);
3318 if (cmd == FUTEX_WAIT)
3319 t = ktime_add_safe(ktime_get(), t);
3320 tp = &t;
3323 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3324 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3326 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3327 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3328 val2 = (u32) (unsigned long) utime;
3330 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3333 static void __init futex_detect_cmpxchg(void)
3335 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3336 u32 curval;
3339 * This will fail and we want it. Some arch implementations do
3340 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3341 * functionality. We want to know that before we call in any
3342 * of the complex code paths. Also we want to prevent
3343 * registration of robust lists in that case. NULL is
3344 * guaranteed to fault and we get -EFAULT on functional
3345 * implementation, the non-functional ones will return
3346 * -ENOSYS.
3348 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3349 futex_cmpxchg_enabled = 1;
3350 #endif
3353 static int __init futex_init(void)
3355 unsigned int futex_shift;
3356 unsigned long i;
3358 #if CONFIG_BASE_SMALL
3359 futex_hashsize = 16;
3360 #else
3361 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3362 #endif
3364 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3365 futex_hashsize, 0,
3366 futex_hashsize < 256 ? HASH_SMALL : 0,
3367 &futex_shift, NULL,
3368 futex_hashsize, futex_hashsize);
3369 futex_hashsize = 1UL << futex_shift;
3371 futex_detect_cmpxchg();
3373 for (i = 0; i < futex_hashsize; i++) {
3374 atomic_set(&futex_queues[i].waiters, 0);
3375 plist_head_init(&futex_queues[i].chain);
3376 spin_lock_init(&futex_queues[i].lock);
3379 return 0;
3381 core_initcall(futex_init);