]> err.no Git - linux-2.6/blob - kernel/futex.c
futex_requeue_pi optimization
[linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
20  *  enough at me, Linus for the original (flawed) idea, Matthew
21  *  Kirkwood for proof-of-concept implementation.
22  *
23  *  "The futexes are also cursed."
24  *  "But they come in a choice of three flavours!"
25  *
26  *  This program is free software; you can redistribute it and/or modify
27  *  it under the terms of the GNU General Public License as published by
28  *  the Free Software Foundation; either version 2 of the License, or
29  *  (at your option) any later version.
30  *
31  *  This program is distributed in the hope that it will be useful,
32  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
33  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
34  *  GNU General Public License for more details.
35  *
36  *  You should have received a copy of the GNU General Public License
37  *  along with this program; if not, write to the Free Software
38  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
39  */
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fs.h>
43 #include <linux/file.h>
44 #include <linux/jhash.h>
45 #include <linux/init.h>
46 #include <linux/futex.h>
47 #include <linux/mount.h>
48 #include <linux/pagemap.h>
49 #include <linux/syscalls.h>
50 #include <linux/signal.h>
51 #include <linux/module.h>
52 #include <asm/futex.h>
53
54 #include "rtmutex_common.h"
55
56 #ifdef CONFIG_DEBUG_RT_MUTEXES
57 # include "rtmutex-debug.h"
58 #else
59 # include "rtmutex.h"
60 #endif
61
62 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
63
64 /*
65  * Priority Inheritance state:
66  */
67 struct futex_pi_state {
68         /*
69          * list of 'owned' pi_state instances - these have to be
70          * cleaned up in do_exit() if the task exits prematurely:
71          */
72         struct list_head list;
73
74         /*
75          * The PI object:
76          */
77         struct rt_mutex pi_mutex;
78
79         struct task_struct *owner;
80         atomic_t refcount;
81
82         union futex_key key;
83 };
84
85 /*
86  * We use this hashed waitqueue instead of a normal wait_queue_t, so
87  * we can wake only the relevant ones (hashed queues may be shared).
88  *
89  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
90  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
91  * The order of wakup is always to make the first condition true, then
92  * wake up q->waiters, then make the second condition true.
93  */
94 struct futex_q {
95         struct plist_node list;
96         wait_queue_head_t waiters;
97
98         /* Which hash list lock to use: */
99         spinlock_t *lock_ptr;
100
101         /* Key which the futex is hashed on: */
102         union futex_key key;
103
104         /* For fd, sigio sent using these: */
105         int fd;
106         struct file *filp;
107
108         /* Optional priority inheritance state: */
109         struct futex_pi_state *pi_state;
110         struct task_struct *task;
111
112         /*
113          * This waiter is used in case of requeue from a
114          * normal futex to a PI-futex
115          */
116         struct rt_mutex_waiter waiter;
117 };
118
119 /*
120  * Split the global futex_lock into every hash list lock.
121  */
122 struct futex_hash_bucket {
123         spinlock_t lock;
124         struct plist_head chain;
125 };
126
127 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
128
129 /* Futex-fs vfsmount entry: */
130 static struct vfsmount *futex_mnt;
131
132 /*
133  * We hash on the keys returned from get_futex_key (see below).
134  */
135 static struct futex_hash_bucket *hash_futex(union futex_key *key)
136 {
137         u32 hash = jhash2((u32*)&key->both.word,
138                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
139                           key->both.offset);
140         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
141 }
142
143 /*
144  * Return 1 if two futex_keys are equal, 0 otherwise.
145  */
146 static inline int match_futex(union futex_key *key1, union futex_key *key2)
147 {
148         return (key1->both.word == key2->both.word
149                 && key1->both.ptr == key2->both.ptr
150                 && key1->both.offset == key2->both.offset);
151 }
152
153 /*
154  * Get parameters which are the keys for a futex.
155  *
156  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
157  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
158  * We can usually work out the index without swapping in the page.
159  *
160  * Returns: 0, or negative error code.
161  * The key words are stored in *key on success.
162  *
163  * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
164  */
165 int get_futex_key(u32 __user *uaddr, union futex_key *key)
166 {
167         unsigned long address = (unsigned long)uaddr;
168         struct mm_struct *mm = current->mm;
169         struct vm_area_struct *vma;
170         struct page *page;
171         int err;
172
173         /*
174          * The futex address must be "naturally" aligned.
175          */
176         key->both.offset = address % PAGE_SIZE;
177         if (unlikely((key->both.offset % sizeof(u32)) != 0))
178                 return -EINVAL;
179         address -= key->both.offset;
180
181         /*
182          * The futex is hashed differently depending on whether
183          * it's in a shared or private mapping.  So check vma first.
184          */
185         vma = find_extend_vma(mm, address);
186         if (unlikely(!vma))
187                 return -EFAULT;
188
189         /*
190          * Permissions.
191          */
192         if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
193                 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
194
195         /* Save the user address in the ley */
196         key->uaddr = uaddr;
197
198         /*
199          * Private mappings are handled in a simple way.
200          *
201          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
202          * it's a read-only handle, it's expected that futexes attach to
203          * the object not the particular process.  Therefore we use
204          * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
205          * mappings of _writable_ handles.
206          */
207         if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
208                 key->private.mm = mm;
209                 key->private.address = address;
210                 return 0;
211         }
212
213         /*
214          * Linear file mappings are also simple.
215          */
216         key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
217         key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
218         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
219                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
220                                      + vma->vm_pgoff);
221                 return 0;
222         }
223
224         /*
225          * We could walk the page table to read the non-linear
226          * pte, and get the page index without fetching the page
227          * from swap.  But that's a lot of code to duplicate here
228          * for a rare case, so we simply fetch the page.
229          */
230         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
231         if (err >= 0) {
232                 key->shared.pgoff =
233                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
234                 put_page(page);
235                 return 0;
236         }
237         return err;
238 }
239 EXPORT_SYMBOL_GPL(get_futex_key);
240
241 /*
242  * Take a reference to the resource addressed by a key.
243  * Can be called while holding spinlocks.
244  *
245  * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
246  * function, if it is called at all.  mmap_sem keeps key->shared.inode valid.
247  */
248 inline void get_futex_key_refs(union futex_key *key)
249 {
250         if (key->both.ptr != 0) {
251                 if (key->both.offset & 1)
252                         atomic_inc(&key->shared.inode->i_count);
253                 else
254                         atomic_inc(&key->private.mm->mm_count);
255         }
256 }
257 EXPORT_SYMBOL_GPL(get_futex_key_refs);
258
259 /*
260  * Drop a reference to the resource addressed by a key.
261  * The hash bucket spinlock must not be held.
262  */
263 void drop_futex_key_refs(union futex_key *key)
264 {
265         if (key->both.ptr != 0) {
266                 if (key->both.offset & 1)
267                         iput(key->shared.inode);
268                 else
269                         mmdrop(key->private.mm);
270         }
271 }
272 EXPORT_SYMBOL_GPL(drop_futex_key_refs);
273
274 static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
275 {
276         int ret;
277
278         pagefault_disable();
279         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
280         pagefault_enable();
281
282         return ret ? -EFAULT : 0;
283 }
284
285 /*
286  * Fault handling. Called with current->mm->mmap_sem held.
287  */
288 static int futex_handle_fault(unsigned long address, int attempt)
289 {
290         struct vm_area_struct * vma;
291         struct mm_struct *mm = current->mm;
292
293         if (attempt > 2 || !(vma = find_vma(mm, address)) ||
294             vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
295                 return -EFAULT;
296
297         switch (handle_mm_fault(mm, vma, address, 1)) {
298         case VM_FAULT_MINOR:
299                 current->min_flt++;
300                 break;
301         case VM_FAULT_MAJOR:
302                 current->maj_flt++;
303                 break;
304         default:
305                 return -EFAULT;
306         }
307         return 0;
308 }
309
310 /*
311  * PI code:
312  */
313 static int refill_pi_state_cache(void)
314 {
315         struct futex_pi_state *pi_state;
316
317         if (likely(current->pi_state_cache))
318                 return 0;
319
320         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
321
322         if (!pi_state)
323                 return -ENOMEM;
324
325         INIT_LIST_HEAD(&pi_state->list);
326         /* pi_mutex gets initialized later */
327         pi_state->owner = NULL;
328         atomic_set(&pi_state->refcount, 1);
329
330         current->pi_state_cache = pi_state;
331
332         return 0;
333 }
334
335 static struct futex_pi_state * alloc_pi_state(void)
336 {
337         struct futex_pi_state *pi_state = current->pi_state_cache;
338
339         WARN_ON(!pi_state);
340         current->pi_state_cache = NULL;
341
342         return pi_state;
343 }
344
345 static void free_pi_state(struct futex_pi_state *pi_state)
346 {
347         if (!atomic_dec_and_test(&pi_state->refcount))
348                 return;
349
350         /*
351          * If pi_state->owner is NULL, the owner is most probably dying
352          * and has cleaned up the pi_state already
353          */
354         if (pi_state->owner) {
355                 spin_lock_irq(&pi_state->owner->pi_lock);
356                 list_del_init(&pi_state->list);
357                 spin_unlock_irq(&pi_state->owner->pi_lock);
358
359                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
360         }
361
362         if (current->pi_state_cache)
363                 kfree(pi_state);
364         else {
365                 /*
366                  * pi_state->list is already empty.
367                  * clear pi_state->owner.
368                  * refcount is at 0 - put it back to 1.
369                  */
370                 pi_state->owner = NULL;
371                 atomic_set(&pi_state->refcount, 1);
372                 current->pi_state_cache = pi_state;
373         }
374 }
375
376 /*
377  * Look up the task based on what TID userspace gave us.
378  * We dont trust it.
379  */
380 static struct task_struct * futex_find_get_task(pid_t pid)
381 {
382         struct task_struct *p;
383
384         rcu_read_lock();
385         p = find_task_by_pid(pid);
386         if (!p)
387                 goto out_unlock;
388         if ((current->euid != p->euid) && (current->euid != p->uid)) {
389                 p = NULL;
390                 goto out_unlock;
391         }
392         if (p->exit_state != 0) {
393                 p = NULL;
394                 goto out_unlock;
395         }
396         get_task_struct(p);
397 out_unlock:
398         rcu_read_unlock();
399
400         return p;
401 }
402
403 /*
404  * This task is holding PI mutexes at exit time => bad.
405  * Kernel cleans up PI-state, but userspace is likely hosed.
406  * (Robust-futex cleanup is separate and might save the day for userspace.)
407  */
408 void exit_pi_state_list(struct task_struct *curr)
409 {
410         struct list_head *next, *head = &curr->pi_state_list;
411         struct futex_pi_state *pi_state;
412         struct futex_hash_bucket *hb;
413         union futex_key key;
414
415         /*
416          * We are a ZOMBIE and nobody can enqueue itself on
417          * pi_state_list anymore, but we have to be careful
418          * versus waiters unqueueing themselves:
419          */
420         spin_lock_irq(&curr->pi_lock);
421         while (!list_empty(head)) {
422
423                 next = head->next;
424                 pi_state = list_entry(next, struct futex_pi_state, list);
425                 key = pi_state->key;
426                 hb = hash_futex(&key);
427                 spin_unlock_irq(&curr->pi_lock);
428
429                 spin_lock(&hb->lock);
430
431                 spin_lock_irq(&curr->pi_lock);
432                 /*
433                  * We dropped the pi-lock, so re-check whether this
434                  * task still owns the PI-state:
435                  */
436                 if (head->next != next) {
437                         spin_unlock(&hb->lock);
438                         continue;
439                 }
440
441                 WARN_ON(pi_state->owner != curr);
442                 WARN_ON(list_empty(&pi_state->list));
443                 list_del_init(&pi_state->list);
444                 pi_state->owner = NULL;
445                 spin_unlock_irq(&curr->pi_lock);
446
447                 rt_mutex_unlock(&pi_state->pi_mutex);
448
449                 spin_unlock(&hb->lock);
450
451                 spin_lock_irq(&curr->pi_lock);
452         }
453         spin_unlock_irq(&curr->pi_lock);
454 }
455
456 static int
457 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
458                 union futex_key *key, struct futex_pi_state **ps)
459 {
460         struct futex_pi_state *pi_state = NULL;
461         struct futex_q *this, *next;
462         struct plist_head *head;
463         struct task_struct *p;
464         pid_t pid;
465
466         head = &hb->chain;
467
468         plist_for_each_entry_safe(this, next, head, list) {
469                 if (match_futex(&this->key, key)) {
470                         /*
471                          * Another waiter already exists - bump up
472                          * the refcount and return its pi_state:
473                          */
474                         pi_state = this->pi_state;
475                         /*
476                          * Userspace might have messed up non PI and PI futexes
477                          */
478                         if (unlikely(!pi_state))
479                                 return -EINVAL;
480
481                         WARN_ON(!atomic_read(&pi_state->refcount));
482
483                         atomic_inc(&pi_state->refcount);
484                         *ps = pi_state;
485
486                         return 0;
487                 }
488         }
489
490         /*
491          * We are the first waiter - try to look up the real owner and attach
492          * the new pi_state to it, but bail out when the owner died bit is set
493          * and TID = 0:
494          */
495         pid = uval & FUTEX_TID_MASK;
496         if (!pid && (uval & FUTEX_OWNER_DIED))
497                 return -ESRCH;
498         p = futex_find_get_task(pid);
499         if (!p)
500                 return -ESRCH;
501
502         pi_state = alloc_pi_state();
503
504         /*
505          * Initialize the pi_mutex in locked state and make 'p'
506          * the owner of it:
507          */
508         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
509
510         /* Store the key for possible exit cleanups: */
511         pi_state->key = *key;
512
513         spin_lock_irq(&p->pi_lock);
514         WARN_ON(!list_empty(&pi_state->list));
515         list_add(&pi_state->list, &p->pi_state_list);
516         pi_state->owner = p;
517         spin_unlock_irq(&p->pi_lock);
518
519         put_task_struct(p);
520
521         *ps = pi_state;
522
523         return 0;
524 }
525
526 /*
527  * The hash bucket lock must be held when this is called.
528  * Afterwards, the futex_q must not be accessed.
529  */
530 static void wake_futex(struct futex_q *q)
531 {
532         plist_del(&q->list, &q->list.plist);
533         if (q->filp)
534                 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
535         /*
536          * The lock in wake_up_all() is a crucial memory barrier after the
537          * plist_del() and also before assigning to q->lock_ptr.
538          */
539         wake_up_all(&q->waiters);
540         /*
541          * The waiting task can free the futex_q as soon as this is written,
542          * without taking any locks.  This must come last.
543          *
544          * A memory barrier is required here to prevent the following store
545          * to lock_ptr from getting ahead of the wakeup. Clearing the lock
546          * at the end of wake_up_all() does not prevent this store from
547          * moving.
548          */
549         smp_wmb();
550         q->lock_ptr = NULL;
551 }
552
553 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
554 {
555         struct task_struct *new_owner;
556         struct futex_pi_state *pi_state = this->pi_state;
557         u32 curval, newval;
558
559         if (!pi_state)
560                 return -EINVAL;
561
562         spin_lock(&pi_state->pi_mutex.wait_lock);
563         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
564
565         /*
566          * This happens when we have stolen the lock and the original
567          * pending owner did not enqueue itself back on the rt_mutex.
568          * Thats not a tragedy. We know that way, that a lock waiter
569          * is on the fly. We make the futex_q waiter the pending owner.
570          */
571         if (!new_owner)
572                 new_owner = this->task;
573
574         /*
575          * We pass it to the next owner. (The WAITERS bit is always
576          * kept enabled while there is PI state around. We must also
577          * preserve the owner died bit.)
578          */
579         if (!(uval & FUTEX_OWNER_DIED)) {
580                 newval = FUTEX_WAITERS | new_owner->pid;
581                 /* Keep the FUTEX_WAITER_REQUEUED flag if it was set */
582                 newval |= (uval & FUTEX_WAITER_REQUEUED);
583
584                 pagefault_disable();
585                 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
586                 pagefault_enable();
587                 if (curval == -EFAULT)
588                         return -EFAULT;
589                 if (curval != uval)
590                         return -EINVAL;
591         }
592
593         spin_lock_irq(&pi_state->owner->pi_lock);
594         WARN_ON(list_empty(&pi_state->list));
595         list_del_init(&pi_state->list);
596         spin_unlock_irq(&pi_state->owner->pi_lock);
597
598         spin_lock_irq(&new_owner->pi_lock);
599         WARN_ON(!list_empty(&pi_state->list));
600         list_add(&pi_state->list, &new_owner->pi_state_list);
601         pi_state->owner = new_owner;
602         spin_unlock_irq(&new_owner->pi_lock);
603
604         spin_unlock(&pi_state->pi_mutex.wait_lock);
605         rt_mutex_unlock(&pi_state->pi_mutex);
606
607         return 0;
608 }
609
610 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
611 {
612         u32 oldval;
613
614         /*
615          * There is no waiter, so we unlock the futex. The owner died
616          * bit has not to be preserved here. We are the owner:
617          */
618         pagefault_disable();
619         oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
620         pagefault_enable();
621
622         if (oldval == -EFAULT)
623                 return oldval;
624         if (oldval != uval)
625                 return -EAGAIN;
626
627         return 0;
628 }
629
630 /*
631  * Express the locking dependencies for lockdep:
632  */
633 static inline void
634 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
635 {
636         if (hb1 <= hb2) {
637                 spin_lock(&hb1->lock);
638                 if (hb1 < hb2)
639                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
640         } else { /* hb1 > hb2 */
641                 spin_lock(&hb2->lock);
642                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
643         }
644 }
645
646 /*
647  * Wake up all waiters hashed on the physical page that is mapped
648  * to this virtual address:
649  */
650 static int futex_wake(u32 __user *uaddr, int nr_wake)
651 {
652         struct futex_hash_bucket *hb;
653         struct futex_q *this, *next;
654         struct plist_head *head;
655         union futex_key key;
656         int ret;
657
658         down_read(&current->mm->mmap_sem);
659
660         ret = get_futex_key(uaddr, &key);
661         if (unlikely(ret != 0))
662                 goto out;
663
664         hb = hash_futex(&key);
665         spin_lock(&hb->lock);
666         head = &hb->chain;
667
668         plist_for_each_entry_safe(this, next, head, list) {
669                 if (match_futex (&this->key, &key)) {
670                         if (this->pi_state) {
671                                 ret = -EINVAL;
672                                 break;
673                         }
674                         wake_futex(this);
675                         if (++ret >= nr_wake)
676                                 break;
677                 }
678         }
679
680         spin_unlock(&hb->lock);
681 out:
682         up_read(&current->mm->mmap_sem);
683         return ret;
684 }
685
686 /*
687  * Called from futex_requeue_pi.
688  * Set FUTEX_WAITERS and FUTEX_WAITER_REQUEUED flags on the
689  * PI-futex value; search its associated pi_state if an owner exist
690  * or create a new one without owner.
691  */
692 static inline int
693 lookup_pi_state_for_requeue(u32 __user *uaddr, struct futex_hash_bucket *hb,
694                             union futex_key *key,
695                             struct futex_pi_state **pi_state)
696 {
697         u32 curval, uval, newval;
698
699 retry:
700         /*
701          * We can't handle a fault cleanly because we can't
702          * release the locks here. Simply return the fault.
703          */
704         if (get_futex_value_locked(&curval, uaddr))
705                 return -EFAULT;
706
707         /* set the flags FUTEX_WAITERS and FUTEX_WAITER_REQUEUED */
708         if ((curval & (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED))
709             != (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) {
710                 /*
711                  * No waiters yet, we prepare the futex to have some waiters.
712                  */
713
714                 uval = curval;
715                 newval = uval | FUTEX_WAITERS | FUTEX_WAITER_REQUEUED;
716
717                 pagefault_disable();
718                 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
719                 pagefault_enable();
720
721                 if (unlikely(curval == -EFAULT))
722                         return -EFAULT;
723                 if (unlikely(curval != uval))
724                         goto retry;
725         }
726
727         if (!(curval & FUTEX_TID_MASK)
728             || lookup_pi_state(curval, hb, key, pi_state)) {
729                 /* the futex has no owner (yet) or the lookup failed:
730                    allocate one pi_state without owner */
731
732                 *pi_state = alloc_pi_state();
733
734                 /* Already stores the key: */
735                 (*pi_state)->key = *key;
736
737                 /* init the mutex without owner */
738                 __rt_mutex_init(&(*pi_state)->pi_mutex, NULL);
739         }
740
741         return 0;
742 }
743
744 /*
745  * Keep the first nr_wake waiter from futex1, wake up one,
746  * and requeue the next nr_requeue waiters following hashed on
747  * one physical page to another physical page (PI-futex uaddr2)
748  */
749 static int futex_requeue_pi(u32 __user *uaddr1, u32 __user *uaddr2,
750                             int nr_wake, int nr_requeue, u32 *cmpval)
751 {
752         union futex_key key1, key2;
753         struct futex_hash_bucket *hb1, *hb2;
754         struct plist_head *head1;
755         struct futex_q *this, *next;
756         struct futex_pi_state *pi_state2 = NULL;
757         struct rt_mutex_waiter *waiter, *top_waiter = NULL;
758         struct rt_mutex *lock2 = NULL;
759         int ret, drop_count = 0;
760
761         if (refill_pi_state_cache())
762                 return -ENOMEM;
763
764 retry:
765         /*
766          * First take all the futex related locks:
767          */
768         down_read(&current->mm->mmap_sem);
769
770         ret = get_futex_key(uaddr1, &key1);
771         if (unlikely(ret != 0))
772                 goto out;
773         ret = get_futex_key(uaddr2, &key2);
774         if (unlikely(ret != 0))
775                 goto out;
776
777         hb1 = hash_futex(&key1);
778         hb2 = hash_futex(&key2);
779
780         double_lock_hb(hb1, hb2);
781
782         if (likely(cmpval != NULL)) {
783                 u32 curval;
784
785                 ret = get_futex_value_locked(&curval, uaddr1);
786
787                 if (unlikely(ret)) {
788                         spin_unlock(&hb1->lock);
789                         if (hb1 != hb2)
790                                 spin_unlock(&hb2->lock);
791
792                         /*
793                          * If we would have faulted, release mmap_sem, fault
794                          * it in and start all over again.
795                          */
796                         up_read(&current->mm->mmap_sem);
797
798                         ret = get_user(curval, uaddr1);
799
800                         if (!ret)
801                                 goto retry;
802
803                         return ret;
804                 }
805                 if (curval != *cmpval) {
806                         ret = -EAGAIN;
807                         goto out_unlock;
808                 }
809         }
810
811         head1 = &hb1->chain;
812         plist_for_each_entry_safe(this, next, head1, list) {
813                 if (!match_futex (&this->key, &key1))
814                         continue;
815                 if (++ret <= nr_wake) {
816                         wake_futex(this);
817                 } else {
818                         /*
819                          * FIRST: get and set the pi_state
820                          */
821                         if (!pi_state2) {
822                                 int s;
823                                 /* do this only the first time we requeue someone */
824                                 s = lookup_pi_state_for_requeue(uaddr2, hb2,
825                                                                 &key2, &pi_state2);
826                                 if (s) {
827                                         ret = s;
828                                         goto out_unlock;
829                                 }
830
831                                 lock2 = &pi_state2->pi_mutex;
832                                 spin_lock(&lock2->wait_lock);
833
834                                 /* Save the top waiter of the wait_list */
835                                 if (rt_mutex_has_waiters(lock2))
836                                         top_waiter = rt_mutex_top_waiter(lock2);
837                         } else
838                                 atomic_inc(&pi_state2->refcount);
839
840
841                         this->pi_state = pi_state2;
842
843                         /*
844                          * SECOND: requeue futex_q to the correct hashbucket
845                          */
846
847                         /*
848                          * If key1 and key2 hash to the same bucket, no need to
849                          * requeue.
850                          */
851                         if (likely(head1 != &hb2->chain)) {
852                                 plist_del(&this->list, &hb1->chain);
853                                 plist_add(&this->list, &hb2->chain);
854                                 this->lock_ptr = &hb2->lock;
855 #ifdef CONFIG_DEBUG_PI_LIST
856                                 this->list.plist.lock = &hb2->lock;
857 #endif
858                         }
859                         this->key = key2;
860                         get_futex_key_refs(&key2);
861                         drop_count++;
862
863
864                         /*
865                          * THIRD: queue it to lock2
866                          */
867                         spin_lock_irq(&this->task->pi_lock);
868                         waiter = &this->waiter;
869                         waiter->task = this->task;
870                         waiter->lock = lock2;
871                         plist_node_init(&waiter->list_entry, this->task->prio);
872                         plist_node_init(&waiter->pi_list_entry, this->task->prio);
873                         plist_add(&waiter->list_entry, &lock2->wait_list);
874                         this->task->pi_blocked_on = waiter;
875                         spin_unlock_irq(&this->task->pi_lock);
876
877                         if (ret - nr_wake >= nr_requeue)
878                                 break;
879                 }
880         }
881
882         /* If we've requeued some tasks and the top_waiter of the rt_mutex
883            has changed, we must adjust the priority of the owner, if any */
884         if (drop_count) {
885                 struct task_struct *owner = rt_mutex_owner(lock2);
886                 if (owner &&
887                     (top_waiter != (waiter = rt_mutex_top_waiter(lock2)))) {
888                         int chain_walk = 0;
889
890                         spin_lock_irq(&owner->pi_lock);
891                         if (top_waiter)
892                                 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
893                         else
894                                 /*
895                                  * There was no waiters before the requeue,
896                                  * the flag must be updated
897                                  */
898                                 mark_rt_mutex_waiters(lock2);
899
900                         plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
901                         __rt_mutex_adjust_prio(owner);
902                         if (owner->pi_blocked_on) {
903                                 chain_walk = 1;
904                                 get_task_struct(owner);
905                         }
906
907                         spin_unlock_irq(&owner->pi_lock);
908                         spin_unlock(&lock2->wait_lock);
909
910                         if (chain_walk)
911                                 rt_mutex_adjust_prio_chain(owner, 0, lock2, NULL,
912                                                            current);
913                 } else {
914                         /* No owner or the top_waiter does not change */
915                         mark_rt_mutex_waiters(lock2);
916                         spin_unlock(&lock2->wait_lock);
917                 }
918         }
919
920 out_unlock:
921         spin_unlock(&hb1->lock);
922         if (hb1 != hb2)
923                 spin_unlock(&hb2->lock);
924
925         /* drop_futex_key_refs() must be called outside the spinlocks. */
926         while (--drop_count >= 0)
927                 drop_futex_key_refs(&key1);
928
929 out:
930         up_read(&current->mm->mmap_sem);
931         return ret;
932 }
933
934 /*
935  * Wake up all waiters hashed on the physical page that is mapped
936  * to this virtual address:
937  */
938 static int
939 futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
940               int nr_wake, int nr_wake2, int op)
941 {
942         union futex_key key1, key2;
943         struct futex_hash_bucket *hb1, *hb2;
944         struct plist_head *head;
945         struct futex_q *this, *next;
946         int ret, op_ret, attempt = 0;
947
948 retryfull:
949         down_read(&current->mm->mmap_sem);
950
951         ret = get_futex_key(uaddr1, &key1);
952         if (unlikely(ret != 0))
953                 goto out;
954         ret = get_futex_key(uaddr2, &key2);
955         if (unlikely(ret != 0))
956                 goto out;
957
958         hb1 = hash_futex(&key1);
959         hb2 = hash_futex(&key2);
960
961 retry:
962         double_lock_hb(hb1, hb2);
963
964         op_ret = futex_atomic_op_inuser(op, uaddr2);
965         if (unlikely(op_ret < 0)) {
966                 u32 dummy;
967
968                 spin_unlock(&hb1->lock);
969                 if (hb1 != hb2)
970                         spin_unlock(&hb2->lock);
971
972 #ifndef CONFIG_MMU
973                 /*
974                  * we don't get EFAULT from MMU faults if we don't have an MMU,
975                  * but we might get them from range checking
976                  */
977                 ret = op_ret;
978                 goto out;
979 #endif
980
981                 if (unlikely(op_ret != -EFAULT)) {
982                         ret = op_ret;
983                         goto out;
984                 }
985
986                 /*
987                  * futex_atomic_op_inuser needs to both read and write
988                  * *(int __user *)uaddr2, but we can't modify it
989                  * non-atomically.  Therefore, if get_user below is not
990                  * enough, we need to handle the fault ourselves, while
991                  * still holding the mmap_sem.
992                  */
993                 if (attempt++) {
994                         if (futex_handle_fault((unsigned long)uaddr2,
995                                                 attempt)) {
996                                 ret = -EFAULT;
997                                 goto out;
998                         }
999                         goto retry;
1000                 }
1001
1002                 /*
1003                  * If we would have faulted, release mmap_sem,
1004                  * fault it in and start all over again.
1005                  */
1006                 up_read(&current->mm->mmap_sem);
1007
1008                 ret = get_user(dummy, uaddr2);
1009                 if (ret)
1010                         return ret;
1011
1012                 goto retryfull;
1013         }
1014
1015         head = &hb1->chain;
1016
1017         plist_for_each_entry_safe(this, next, head, list) {
1018                 if (match_futex (&this->key, &key1)) {
1019                         wake_futex(this);
1020                         if (++ret >= nr_wake)
1021                                 break;
1022                 }
1023         }
1024
1025         if (op_ret > 0) {
1026                 head = &hb2->chain;
1027
1028                 op_ret = 0;
1029                 plist_for_each_entry_safe(this, next, head, list) {
1030                         if (match_futex (&this->key, &key2)) {
1031                                 wake_futex(this);
1032                                 if (++op_ret >= nr_wake2)
1033                                         break;
1034                         }
1035                 }
1036                 ret += op_ret;
1037         }
1038
1039         spin_unlock(&hb1->lock);
1040         if (hb1 != hb2)
1041                 spin_unlock(&hb2->lock);
1042 out:
1043         up_read(&current->mm->mmap_sem);
1044         return ret;
1045 }
1046
1047 /*
1048  * Requeue all waiters hashed on one physical page to another
1049  * physical page.
1050  */
1051 static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
1052                          int nr_wake, int nr_requeue, u32 *cmpval)
1053 {
1054         union futex_key key1, key2;
1055         struct futex_hash_bucket *hb1, *hb2;
1056         struct plist_head *head1;
1057         struct futex_q *this, *next;
1058         int ret, drop_count = 0;
1059
1060  retry:
1061         down_read(&current->mm->mmap_sem);
1062
1063         ret = get_futex_key(uaddr1, &key1);
1064         if (unlikely(ret != 0))
1065                 goto out;
1066         ret = get_futex_key(uaddr2, &key2);
1067         if (unlikely(ret != 0))
1068                 goto out;
1069
1070         hb1 = hash_futex(&key1);
1071         hb2 = hash_futex(&key2);
1072
1073         double_lock_hb(hb1, hb2);
1074
1075         if (likely(cmpval != NULL)) {
1076                 u32 curval;
1077
1078                 ret = get_futex_value_locked(&curval, uaddr1);
1079
1080                 if (unlikely(ret)) {
1081                         spin_unlock(&hb1->lock);
1082                         if (hb1 != hb2)
1083                                 spin_unlock(&hb2->lock);
1084
1085                         /*
1086                          * If we would have faulted, release mmap_sem, fault
1087                          * it in and start all over again.
1088                          */
1089                         up_read(&current->mm->mmap_sem);
1090
1091                         ret = get_user(curval, uaddr1);
1092
1093                         if (!ret)
1094                                 goto retry;
1095
1096                         return ret;
1097                 }
1098                 if (curval != *cmpval) {
1099                         ret = -EAGAIN;
1100                         goto out_unlock;
1101                 }
1102         }
1103
1104         head1 = &hb1->chain;
1105         plist_for_each_entry_safe(this, next, head1, list) {
1106                 if (!match_futex (&this->key, &key1))
1107                         continue;
1108                 if (++ret <= nr_wake) {
1109                         wake_futex(this);
1110                 } else {
1111                         /*
1112                          * If key1 and key2 hash to the same bucket, no need to
1113                          * requeue.
1114                          */
1115                         if (likely(head1 != &hb2->chain)) {
1116                                 plist_del(&this->list, &hb1->chain);
1117                                 plist_add(&this->list, &hb2->chain);
1118                                 this->lock_ptr = &hb2->lock;
1119 #ifdef CONFIG_DEBUG_PI_LIST
1120                                 this->list.plist.lock = &hb2->lock;
1121 #endif
1122                         }
1123                         this->key = key2;
1124                         get_futex_key_refs(&key2);
1125                         drop_count++;
1126
1127                         if (ret - nr_wake >= nr_requeue)
1128                                 break;
1129                 }
1130         }
1131
1132 out_unlock:
1133         spin_unlock(&hb1->lock);
1134         if (hb1 != hb2)
1135                 spin_unlock(&hb2->lock);
1136
1137         /* drop_futex_key_refs() must be called outside the spinlocks. */
1138         while (--drop_count >= 0)
1139                 drop_futex_key_refs(&key1);
1140
1141 out:
1142         up_read(&current->mm->mmap_sem);
1143         return ret;
1144 }
1145
1146 /* The key must be already stored in q->key. */
1147 static inline struct futex_hash_bucket *
1148 queue_lock(struct futex_q *q, int fd, struct file *filp)
1149 {
1150         struct futex_hash_bucket *hb;
1151
1152         q->fd = fd;
1153         q->filp = filp;
1154
1155         init_waitqueue_head(&q->waiters);
1156
1157         get_futex_key_refs(&q->key);
1158         hb = hash_futex(&q->key);
1159         q->lock_ptr = &hb->lock;
1160
1161         spin_lock(&hb->lock);
1162         return hb;
1163 }
1164
1165 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1166 {
1167         int prio;
1168
1169         /*
1170          * The priority used to register this element is
1171          * - either the real thread-priority for the real-time threads
1172          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1173          * - or MAX_RT_PRIO for non-RT threads.
1174          * Thus, all RT-threads are woken first in priority order, and
1175          * the others are woken last, in FIFO order.
1176          */
1177         prio = min(current->normal_prio, MAX_RT_PRIO);
1178
1179         plist_node_init(&q->list, prio);
1180 #ifdef CONFIG_DEBUG_PI_LIST
1181         q->list.plist.lock = &hb->lock;
1182 #endif
1183         plist_add(&q->list, &hb->chain);
1184         q->task = current;
1185         spin_unlock(&hb->lock);
1186 }
1187
1188 static inline void
1189 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1190 {
1191         spin_unlock(&hb->lock);
1192         drop_futex_key_refs(&q->key);
1193 }
1194
1195 /*
1196  * queue_me and unqueue_me must be called as a pair, each
1197  * exactly once.  They are called with the hashed spinlock held.
1198  */
1199
1200 /* The key must be already stored in q->key. */
1201 static void queue_me(struct futex_q *q, int fd, struct file *filp)
1202 {
1203         struct futex_hash_bucket *hb;
1204
1205         hb = queue_lock(q, fd, filp);
1206         __queue_me(q, hb);
1207 }
1208
1209 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1210 static int unqueue_me(struct futex_q *q)
1211 {
1212         spinlock_t *lock_ptr;
1213         int ret = 0;
1214
1215         /* In the common case we don't take the spinlock, which is nice. */
1216  retry:
1217         lock_ptr = q->lock_ptr;
1218         barrier();
1219         if (lock_ptr != 0) {
1220                 spin_lock(lock_ptr);
1221                 /*
1222                  * q->lock_ptr can change between reading it and
1223                  * spin_lock(), causing us to take the wrong lock.  This
1224                  * corrects the race condition.
1225                  *
1226                  * Reasoning goes like this: if we have the wrong lock,
1227                  * q->lock_ptr must have changed (maybe several times)
1228                  * between reading it and the spin_lock().  It can
1229                  * change again after the spin_lock() but only if it was
1230                  * already changed before the spin_lock().  It cannot,
1231                  * however, change back to the original value.  Therefore
1232                  * we can detect whether we acquired the correct lock.
1233                  */
1234                 if (unlikely(lock_ptr != q->lock_ptr)) {
1235                         spin_unlock(lock_ptr);
1236                         goto retry;
1237                 }
1238                 WARN_ON(plist_node_empty(&q->list));
1239                 plist_del(&q->list, &q->list.plist);
1240
1241                 BUG_ON(q->pi_state);
1242
1243                 spin_unlock(lock_ptr);
1244                 ret = 1;
1245         }
1246
1247         drop_futex_key_refs(&q->key);
1248         return ret;
1249 }
1250
1251 /*
1252  * PI futexes can not be requeued and must remove themself from the
1253  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1254  * and dropped here.
1255  */
1256 static void unqueue_me_pi(struct futex_q *q)
1257 {
1258         WARN_ON(plist_node_empty(&q->list));
1259         plist_del(&q->list, &q->list.plist);
1260
1261         BUG_ON(!q->pi_state);
1262         free_pi_state(q->pi_state);
1263         q->pi_state = NULL;
1264
1265         spin_unlock(q->lock_ptr);
1266
1267         drop_futex_key_refs(&q->key);
1268 }
1269
1270 /*
1271  * Fixup the pi_state owner with current.
1272  *
1273  * The cur->mm semaphore must be  held, it is released at return of this
1274  * function.
1275  */
1276 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1277                                 struct futex_hash_bucket *hb,
1278                                 struct task_struct *curr)
1279 {
1280         u32 newtid = curr->pid | FUTEX_WAITERS;
1281         struct futex_pi_state *pi_state = q->pi_state;
1282         u32 uval, curval, newval;
1283         int ret;
1284
1285         /* Owner died? */
1286         if (pi_state->owner != NULL) {
1287                 spin_lock_irq(&pi_state->owner->pi_lock);
1288                 WARN_ON(list_empty(&pi_state->list));
1289                 list_del_init(&pi_state->list);
1290                 spin_unlock_irq(&pi_state->owner->pi_lock);
1291         } else
1292                 newtid |= FUTEX_OWNER_DIED;
1293
1294         pi_state->owner = curr;
1295
1296         spin_lock_irq(&curr->pi_lock);
1297         WARN_ON(!list_empty(&pi_state->list));
1298         list_add(&pi_state->list, &curr->pi_state_list);
1299         spin_unlock_irq(&curr->pi_lock);
1300
1301         /* Unqueue and drop the lock */
1302         unqueue_me_pi(q);
1303         up_read(&curr->mm->mmap_sem);
1304         /*
1305          * We own it, so we have to replace the pending owner
1306          * TID. This must be atomic as we have preserve the
1307          * owner died bit here.
1308          */
1309         ret = get_user(uval, uaddr);
1310         while (!ret) {
1311                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1312                 newval |= (uval & FUTEX_WAITER_REQUEUED);
1313                 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1314                                                        uval, newval);
1315                 if (curval == -EFAULT)
1316                         ret = -EFAULT;
1317                 if (curval == uval)
1318                         break;
1319                 uval = curval;
1320         }
1321         return ret;
1322 }
1323
1324 static long futex_wait_restart(struct restart_block *restart);
1325 static int futex_wait(u32 __user *uaddr, u32 val, ktime_t *abs_time)
1326 {
1327         struct task_struct *curr = current;
1328         DECLARE_WAITQUEUE(wait, curr);
1329         struct futex_hash_bucket *hb;
1330         struct futex_q q;
1331         u32 uval;
1332         int ret;
1333         struct hrtimer_sleeper t, *to = NULL;
1334         int rem = 0;
1335
1336         q.pi_state = NULL;
1337  retry:
1338         down_read(&curr->mm->mmap_sem);
1339
1340         ret = get_futex_key(uaddr, &q.key);
1341         if (unlikely(ret != 0))
1342                 goto out_release_sem;
1343
1344         hb = queue_lock(&q, -1, NULL);
1345
1346         /*
1347          * Access the page AFTER the futex is queued.
1348          * Order is important:
1349          *
1350          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1351          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1352          *
1353          * The basic logical guarantee of a futex is that it blocks ONLY
1354          * if cond(var) is known to be true at the time of blocking, for
1355          * any cond.  If we queued after testing *uaddr, that would open
1356          * a race condition where we could block indefinitely with
1357          * cond(var) false, which would violate the guarantee.
1358          *
1359          * A consequence is that futex_wait() can return zero and absorb
1360          * a wakeup when *uaddr != val on entry to the syscall.  This is
1361          * rare, but normal.
1362          *
1363          * We hold the mmap semaphore, so the mapping cannot have changed
1364          * since we looked it up in get_futex_key.
1365          */
1366         ret = get_futex_value_locked(&uval, uaddr);
1367
1368         if (unlikely(ret)) {
1369                 queue_unlock(&q, hb);
1370
1371                 /*
1372                  * If we would have faulted, release mmap_sem, fault it in and
1373                  * start all over again.
1374                  */
1375                 up_read(&curr->mm->mmap_sem);
1376
1377                 ret = get_user(uval, uaddr);
1378
1379                 if (!ret)
1380                         goto retry;
1381                 return ret;
1382         }
1383         ret = -EWOULDBLOCK;
1384         if (uval != val)
1385                 goto out_unlock_release_sem;
1386
1387         /*
1388          * This rt_mutex_waiter structure is prepared here and will
1389          * be used only if this task is requeued from a normal futex to
1390          * a PI-futex with futex_requeue_pi.
1391          */
1392         debug_rt_mutex_init_waiter(&q.waiter);
1393         q.waiter.task = NULL;
1394
1395         /* Only actually queue if *uaddr contained val.  */
1396         __queue_me(&q, hb);
1397
1398         /*
1399          * Now the futex is queued and we have checked the data, we
1400          * don't want to hold mmap_sem while we sleep.
1401          */
1402         up_read(&curr->mm->mmap_sem);
1403
1404         /*
1405          * There might have been scheduling since the queue_me(), as we
1406          * cannot hold a spinlock across the get_user() in case it
1407          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1408          * queueing ourselves into the futex hash.  This code thus has to
1409          * rely on the futex_wake() code removing us from hash when it
1410          * wakes us up.
1411          */
1412
1413         /* add_wait_queue is the barrier after __set_current_state. */
1414         __set_current_state(TASK_INTERRUPTIBLE);
1415         add_wait_queue(&q.waiters, &wait);
1416         /*
1417          * !plist_node_empty() is safe here without any lock.
1418          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1419          */
1420         if (likely(!plist_node_empty(&q.list))) {
1421                 if (!abs_time)
1422                         schedule();
1423                 else {
1424                         to = &t;
1425                         hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1426                         hrtimer_init_sleeper(&t, current);
1427                         t.timer.expires = *abs_time;
1428
1429                         hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
1430
1431                         /*
1432                          * the timer could have already expired, in which
1433                          * case current would be flagged for rescheduling.
1434                          * Don't bother calling schedule.
1435                          */
1436                         if (likely(t.task))
1437                                 schedule();
1438
1439                         hrtimer_cancel(&t.timer);
1440
1441                         /* Flag if a timeout occured */
1442                         rem = (t.task == NULL);
1443                 }
1444         }
1445         __set_current_state(TASK_RUNNING);
1446
1447         /*
1448          * NOTE: we don't remove ourselves from the waitqueue because
1449          * we are the only user of it.
1450          */
1451
1452         if (q.pi_state) {
1453                 /*
1454                  * We were woken but have been requeued on a PI-futex.
1455                  * We have to complete the lock acquisition by taking
1456                  * the rtmutex.
1457                  */
1458
1459                 struct rt_mutex *lock = &q.pi_state->pi_mutex;
1460
1461                 spin_lock(&lock->wait_lock);
1462                 if (unlikely(q.waiter.task)) {
1463                         remove_waiter(lock, &q.waiter);
1464                 }
1465                 spin_unlock(&lock->wait_lock);
1466
1467                 if (rem)
1468                         ret = -ETIMEDOUT;
1469                 else
1470                         ret = rt_mutex_timed_lock(lock, to, 1);
1471
1472                 down_read(&curr->mm->mmap_sem);
1473                 spin_lock(q.lock_ptr);
1474
1475                 /*
1476                  * Got the lock. We might not be the anticipated owner if we
1477                  * did a lock-steal - fix up the PI-state in that case.
1478                  */
1479                 if (!ret && q.pi_state->owner != curr) {
1480                         /*
1481                          * We MUST play with the futex we were requeued on,
1482                          * NOT the current futex.
1483                          * We can retrieve it from the key of the pi_state
1484                          */
1485                         uaddr = q.pi_state->key.uaddr;
1486
1487                         /* mmap_sem and hash_bucket lock are unlocked at
1488                            return of this function */
1489                         ret = fixup_pi_state_owner(uaddr, &q, hb, curr);
1490                 } else {
1491                         /*
1492                          * Catch the rare case, where the lock was released
1493                          * when we were on the way back before we locked
1494                          * the hash bucket.
1495                          */
1496                         if (ret && q.pi_state->owner == curr) {
1497                                 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1498                                         ret = 0;
1499                         }
1500                         /* Unqueue and drop the lock */
1501                         unqueue_me_pi(&q);
1502                         up_read(&curr->mm->mmap_sem);
1503                 }
1504
1505                 debug_rt_mutex_free_waiter(&q.waiter);
1506
1507                 return ret;
1508         }
1509
1510         debug_rt_mutex_free_waiter(&q.waiter);
1511
1512         /* If we were woken (and unqueued), we succeeded, whatever. */
1513         if (!unqueue_me(&q))
1514                 return 0;
1515         if (rem)
1516                 return -ETIMEDOUT;
1517
1518         /*
1519          * We expect signal_pending(current), but another thread may
1520          * have handled it for us already.
1521          */
1522         if (!abs_time)
1523                 return -ERESTARTSYS;
1524         else {
1525                 struct restart_block *restart;
1526                 restart = &current_thread_info()->restart_block;
1527                 restart->fn = futex_wait_restart;
1528                 restart->arg0 = (unsigned long)uaddr;
1529                 restart->arg1 = (unsigned long)val;
1530                 restart->arg2 = (unsigned long)abs_time;
1531                 return -ERESTART_RESTARTBLOCK;
1532         }
1533
1534  out_unlock_release_sem:
1535         queue_unlock(&q, hb);
1536
1537  out_release_sem:
1538         up_read(&curr->mm->mmap_sem);
1539         return ret;
1540 }
1541
1542
1543 static long futex_wait_restart(struct restart_block *restart)
1544 {
1545         u32 __user *uaddr = (u32 __user *)restart->arg0;
1546         u32 val = (u32)restart->arg1;
1547         ktime_t *abs_time = (ktime_t *)restart->arg2;
1548
1549         restart->fn = do_no_restart_syscall;
1550         return (long)futex_wait(uaddr, val, abs_time);
1551 }
1552
1553
1554 static void set_pi_futex_owner(struct futex_hash_bucket *hb,
1555                                union futex_key *key, struct task_struct *p)
1556 {
1557         struct plist_head *head;
1558         struct futex_q *this, *next;
1559         struct futex_pi_state *pi_state = NULL;
1560         struct rt_mutex *lock;
1561
1562         /* Search a waiter that should already exists */
1563
1564         head = &hb->chain;
1565
1566         plist_for_each_entry_safe(this, next, head, list) {
1567                 if (match_futex (&this->key, key)) {
1568                         pi_state = this->pi_state;
1569                         break;
1570                 }
1571         }
1572
1573         BUG_ON(!pi_state);
1574
1575         /* set p as pi_state's owner */
1576         lock = &pi_state->pi_mutex;
1577
1578         spin_lock(&lock->wait_lock);
1579         spin_lock_irq(&p->pi_lock);
1580
1581         list_add(&pi_state->list, &p->pi_state_list);
1582         pi_state->owner = p;
1583
1584
1585         /* set p as pi_mutex's owner */
1586         debug_rt_mutex_proxy_lock(lock, p);
1587         WARN_ON(rt_mutex_owner(lock));
1588         rt_mutex_set_owner(lock, p, 0);
1589         rt_mutex_deadlock_account_lock(lock, p);
1590
1591         plist_add(&rt_mutex_top_waiter(lock)->pi_list_entry,
1592                   &p->pi_waiters);
1593         __rt_mutex_adjust_prio(p);
1594
1595         spin_unlock_irq(&p->pi_lock);
1596         spin_unlock(&lock->wait_lock);
1597 }
1598
1599 /*
1600  * Userspace tried a 0 -> TID atomic transition of the futex value
1601  * and failed. The kernel side here does the whole locking operation:
1602  * if there are waiters then it will block, it does PI, etc. (Due to
1603  * races the kernel might see a 0 value of the futex too.)
1604  */
1605 static int futex_lock_pi(u32 __user *uaddr, int detect, ktime_t *time,
1606                          int trylock)
1607 {
1608         struct hrtimer_sleeper timeout, *to = NULL;
1609         struct task_struct *curr = current;
1610         struct futex_hash_bucket *hb;
1611         u32 uval, newval, curval;
1612         struct futex_q q;
1613         int ret, lock_held, attempt = 0;
1614
1615         if (refill_pi_state_cache())
1616                 return -ENOMEM;
1617
1618         if (time) {
1619                 to = &timeout;
1620                 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1621                 hrtimer_init_sleeper(to, current);
1622                 to->timer.expires = *time;
1623         }
1624
1625         q.pi_state = NULL;
1626  retry:
1627         down_read(&curr->mm->mmap_sem);
1628
1629         ret = get_futex_key(uaddr, &q.key);
1630         if (unlikely(ret != 0))
1631                 goto out_release_sem;
1632
1633         hb = queue_lock(&q, -1, NULL);
1634
1635  retry_locked:
1636         lock_held = 0;
1637
1638         /*
1639          * To avoid races, we attempt to take the lock here again
1640          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1641          * the locks. It will most likely not succeed.
1642          */
1643         newval = current->pid;
1644
1645         pagefault_disable();
1646         curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
1647         pagefault_enable();
1648
1649         if (unlikely(curval == -EFAULT))
1650                 goto uaddr_faulted;
1651
1652         /* We own the lock already */
1653         if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
1654                 if (!detect && 0)
1655                         force_sig(SIGKILL, current);
1656                 /*
1657                  * Normally, this check is done in user space.
1658                  * In case of requeue, the owner may attempt to lock this futex,
1659                  * even if the ownership has already been given by the previous
1660                  * waker.
1661                  * In the usual case, this is a case of deadlock, but not in case
1662                  * of REQUEUE_PI.
1663                  */
1664                 if (!(curval & FUTEX_WAITER_REQUEUED))
1665                         ret = -EDEADLK;
1666                 goto out_unlock_release_sem;
1667         }
1668
1669         /*
1670          * Surprise - we got the lock. Just return
1671          * to userspace:
1672          */
1673         if (unlikely(!curval))
1674                 goto out_unlock_release_sem;
1675
1676         uval = curval;
1677         /*
1678          * In case of a requeue, check if there already is an owner
1679          * If not, just take the futex.
1680          */
1681         if ((curval & FUTEX_WAITER_REQUEUED) && !(curval & FUTEX_TID_MASK)) {
1682                 /* set current as futex owner */
1683                 newval = curval | current->pid;
1684                 lock_held = 1;
1685         } else
1686                 /* Set the WAITERS flag, so the owner will know it has someone
1687                    to wake at next unlock */
1688                 newval = curval | FUTEX_WAITERS;
1689
1690         pagefault_disable();
1691         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
1692         pagefault_enable();
1693
1694         if (unlikely(curval == -EFAULT))
1695                 goto uaddr_faulted;
1696         if (unlikely(curval != uval))
1697                 goto retry_locked;
1698
1699         if (lock_held) {
1700                 set_pi_futex_owner(hb, &q.key, curr);
1701                 goto out_unlock_release_sem;
1702         }
1703
1704         /*
1705          * We dont have the lock. Look up the PI state (or create it if
1706          * we are the first waiter):
1707          */
1708         ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1709
1710         if (unlikely(ret)) {
1711                 /*
1712                  * There were no waiters and the owner task lookup
1713                  * failed. When the OWNER_DIED bit is set, then we
1714                  * know that this is a robust futex and we actually
1715                  * take the lock. This is safe as we are protected by
1716                  * the hash bucket lock. We also set the waiters bit
1717                  * unconditionally here, to simplify glibc handling of
1718                  * multiple tasks racing to acquire the lock and
1719                  * cleanup the problems which were left by the dead
1720                  * owner.
1721                  */
1722                 if (curval & FUTEX_OWNER_DIED) {
1723                         uval = newval;
1724                         newval = current->pid |
1725                                 FUTEX_OWNER_DIED | FUTEX_WAITERS;
1726
1727                         pagefault_disable();
1728                         curval = futex_atomic_cmpxchg_inatomic(uaddr,
1729                                                                uval, newval);
1730                         pagefault_enable();
1731
1732                         if (unlikely(curval == -EFAULT))
1733                                 goto uaddr_faulted;
1734                         if (unlikely(curval != uval))
1735                                 goto retry_locked;
1736                         ret = 0;
1737                 }
1738                 goto out_unlock_release_sem;
1739         }
1740
1741         /*
1742          * Only actually queue now that the atomic ops are done:
1743          */
1744         __queue_me(&q, hb);
1745
1746         /*
1747          * Now the futex is queued and we have checked the data, we
1748          * don't want to hold mmap_sem while we sleep.
1749          */
1750         up_read(&curr->mm->mmap_sem);
1751
1752         WARN_ON(!q.pi_state);
1753         /*
1754          * Block on the PI mutex:
1755          */
1756         if (!trylock)
1757                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1758         else {
1759                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1760                 /* Fixup the trylock return value: */
1761                 ret = ret ? 0 : -EWOULDBLOCK;
1762         }
1763
1764         down_read(&curr->mm->mmap_sem);
1765         spin_lock(q.lock_ptr);
1766
1767         /*
1768          * Got the lock. We might not be the anticipated owner if we
1769          * did a lock-steal - fix up the PI-state in that case.
1770          */
1771         if (!ret && q.pi_state->owner != curr)
1772                 /* mmap_sem is unlocked at return of this function */
1773                 ret = fixup_pi_state_owner(uaddr, &q, hb, curr);
1774         else {
1775                 /*
1776                  * Catch the rare case, where the lock was released
1777                  * when we were on the way back before we locked
1778                  * the hash bucket.
1779                  */
1780                 if (ret && q.pi_state->owner == curr) {
1781                         if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1782                                 ret = 0;
1783                 }
1784                 /* Unqueue and drop the lock */
1785                 unqueue_me_pi(&q);
1786                 up_read(&curr->mm->mmap_sem);
1787         }
1788
1789         if (!detect && ret == -EDEADLK && 0)
1790                 force_sig(SIGKILL, current);
1791
1792         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1793
1794  out_unlock_release_sem:
1795         queue_unlock(&q, hb);
1796
1797  out_release_sem:
1798         up_read(&curr->mm->mmap_sem);
1799         return ret;
1800
1801  uaddr_faulted:
1802         /*
1803          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1804          * non-atomically.  Therefore, if get_user below is not
1805          * enough, we need to handle the fault ourselves, while
1806          * still holding the mmap_sem.
1807          */
1808         if (attempt++) {
1809                 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1810                         ret = -EFAULT;
1811                         goto out_unlock_release_sem;
1812                 }
1813                 goto retry_locked;
1814         }
1815
1816         queue_unlock(&q, hb);
1817         up_read(&curr->mm->mmap_sem);
1818
1819         ret = get_user(uval, uaddr);
1820         if (!ret && (uval != -EFAULT))
1821                 goto retry;
1822
1823         return ret;
1824 }
1825
1826 /*
1827  * Userspace attempted a TID -> 0 atomic transition, and failed.
1828  * This is the in-kernel slowpath: we look up the PI state (if any),
1829  * and do the rt-mutex unlock.
1830  */
1831 static int futex_unlock_pi(u32 __user *uaddr)
1832 {
1833         struct futex_hash_bucket *hb;
1834         struct futex_q *this, *next;
1835         u32 uval;
1836         struct plist_head *head;
1837         union futex_key key;
1838         int ret, attempt = 0;
1839
1840 retry:
1841         if (get_user(uval, uaddr))
1842                 return -EFAULT;
1843         /*
1844          * We release only a lock we actually own:
1845          */
1846         if ((uval & FUTEX_TID_MASK) != current->pid)
1847                 return -EPERM;
1848         /*
1849          * First take all the futex related locks:
1850          */
1851         down_read(&current->mm->mmap_sem);
1852
1853         ret = get_futex_key(uaddr, &key);
1854         if (unlikely(ret != 0))
1855                 goto out;
1856
1857         hb = hash_futex(&key);
1858         spin_lock(&hb->lock);
1859
1860 retry_locked:
1861         /*
1862          * To avoid races, try to do the TID -> 0 atomic transition
1863          * again. If it succeeds then we can return without waking
1864          * anyone else up:
1865          */
1866         if (!(uval & FUTEX_OWNER_DIED)) {
1867                 pagefault_disable();
1868                 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1869                 pagefault_enable();
1870         }
1871
1872         if (unlikely(uval == -EFAULT))
1873                 goto pi_faulted;
1874         /*
1875          * Rare case: we managed to release the lock atomically,
1876          * no need to wake anyone else up:
1877          */
1878         if (unlikely(uval == current->pid))
1879                 goto out_unlock;
1880
1881         /*
1882          * Ok, other tasks may need to be woken up - check waiters
1883          * and do the wakeup if necessary:
1884          */
1885         head = &hb->chain;
1886
1887         plist_for_each_entry_safe(this, next, head, list) {
1888                 if (!match_futex (&this->key, &key))
1889                         continue;
1890                 ret = wake_futex_pi(uaddr, uval, this);
1891                 /*
1892                  * The atomic access to the futex value
1893                  * generated a pagefault, so retry the
1894                  * user-access and the wakeup:
1895                  */
1896                 if (ret == -EFAULT)
1897                         goto pi_faulted;
1898                 goto out_unlock;
1899         }
1900         /*
1901          * No waiters - kernel unlocks the futex:
1902          */
1903         if (!(uval & FUTEX_OWNER_DIED)) {
1904                 ret = unlock_futex_pi(uaddr, uval);
1905                 if (ret == -EFAULT)
1906                         goto pi_faulted;
1907         }
1908
1909 out_unlock:
1910         spin_unlock(&hb->lock);
1911 out:
1912         up_read(&current->mm->mmap_sem);
1913
1914         return ret;
1915
1916 pi_faulted:
1917         /*
1918          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1919          * non-atomically.  Therefore, if get_user below is not
1920          * enough, we need to handle the fault ourselves, while
1921          * still holding the mmap_sem.
1922          */
1923         if (attempt++) {
1924                 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1925                         ret = -EFAULT;
1926                         goto out_unlock;
1927                 }
1928                 goto retry_locked;
1929         }
1930
1931         spin_unlock(&hb->lock);
1932         up_read(&current->mm->mmap_sem);
1933
1934         ret = get_user(uval, uaddr);
1935         if (!ret && (uval != -EFAULT))
1936                 goto retry;
1937
1938         return ret;
1939 }
1940
1941 static int futex_close(struct inode *inode, struct file *filp)
1942 {
1943         struct futex_q *q = filp->private_data;
1944
1945         unqueue_me(q);
1946         kfree(q);
1947
1948         return 0;
1949 }
1950
1951 /* This is one-shot: once it's gone off you need a new fd */
1952 static unsigned int futex_poll(struct file *filp,
1953                                struct poll_table_struct *wait)
1954 {
1955         struct futex_q *q = filp->private_data;
1956         int ret = 0;
1957
1958         poll_wait(filp, &q->waiters, wait);
1959
1960         /*
1961          * plist_node_empty() is safe here without any lock.
1962          * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
1963          */
1964         if (plist_node_empty(&q->list))
1965                 ret = POLLIN | POLLRDNORM;
1966
1967         return ret;
1968 }
1969
1970 static const struct file_operations futex_fops = {
1971         .release        = futex_close,
1972         .poll           = futex_poll,
1973 };
1974
1975 /*
1976  * Signal allows caller to avoid the race which would occur if they
1977  * set the sigio stuff up afterwards.
1978  */
1979 static int futex_fd(u32 __user *uaddr, int signal)
1980 {
1981         struct futex_q *q;
1982         struct file *filp;
1983         int ret, err;
1984         static unsigned long printk_interval;
1985
1986         if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
1987                 printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
1988                         "will be removed from the kernel in June 2007\n",
1989                         current->comm);
1990         }
1991
1992         ret = -EINVAL;
1993         if (!valid_signal(signal))
1994                 goto out;
1995
1996         ret = get_unused_fd();
1997         if (ret < 0)
1998                 goto out;
1999         filp = get_empty_filp();
2000         if (!filp) {
2001                 put_unused_fd(ret);
2002                 ret = -ENFILE;
2003                 goto out;
2004         }
2005         filp->f_op = &futex_fops;
2006         filp->f_path.mnt = mntget(futex_mnt);
2007         filp->f_path.dentry = dget(futex_mnt->mnt_root);
2008         filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
2009
2010         if (signal) {
2011                 err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
2012                 if (err < 0) {
2013                         goto error;
2014                 }
2015                 filp->f_owner.signum = signal;
2016         }
2017
2018         q = kmalloc(sizeof(*q), GFP_KERNEL);
2019         if (!q) {
2020                 err = -ENOMEM;
2021                 goto error;
2022         }
2023         q->pi_state = NULL;
2024
2025         down_read(&current->mm->mmap_sem);
2026         err = get_futex_key(uaddr, &q->key);
2027
2028         if (unlikely(err != 0)) {
2029                 up_read(&current->mm->mmap_sem);
2030                 kfree(q);
2031                 goto error;
2032         }
2033
2034         /*
2035          * queue_me() must be called before releasing mmap_sem, because
2036          * key->shared.inode needs to be referenced while holding it.
2037          */
2038         filp->private_data = q;
2039
2040         queue_me(q, ret, filp);
2041         up_read(&current->mm->mmap_sem);
2042
2043         /* Now we map fd to filp, so userspace can access it */
2044         fd_install(ret, filp);
2045 out:
2046         return ret;
2047 error:
2048         put_unused_fd(ret);
2049         put_filp(filp);
2050         ret = err;
2051         goto out;
2052 }
2053
2054 /*
2055  * Support for robust futexes: the kernel cleans up held futexes at
2056  * thread exit time.
2057  *
2058  * Implementation: user-space maintains a per-thread list of locks it
2059  * is holding. Upon do_exit(), the kernel carefully walks this list,
2060  * and marks all locks that are owned by this thread with the
2061  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2062  * always manipulated with the lock held, so the list is private and
2063  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2064  * field, to allow the kernel to clean up if the thread dies after
2065  * acquiring the lock, but just before it could have added itself to
2066  * the list. There can only be one such pending lock.
2067  */
2068
2069 /**
2070  * sys_set_robust_list - set the robust-futex list head of a task
2071  * @head: pointer to the list-head
2072  * @len: length of the list-head, as userspace expects
2073  */
2074 asmlinkage long
2075 sys_set_robust_list(struct robust_list_head __user *head,
2076                     size_t len)
2077 {
2078         /*
2079          * The kernel knows only one size for now:
2080          */
2081         if (unlikely(len != sizeof(*head)))
2082                 return -EINVAL;
2083
2084         current->robust_list = head;
2085
2086         return 0;
2087 }
2088
2089 /**
2090  * sys_get_robust_list - get the robust-futex list head of a task
2091  * @pid: pid of the process [zero for current task]
2092  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2093  * @len_ptr: pointer to a length field, the kernel fills in the header size
2094  */
2095 asmlinkage long
2096 sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
2097                     size_t __user *len_ptr)
2098 {
2099         struct robust_list_head __user *head;
2100         unsigned long ret;
2101
2102         if (!pid)
2103                 head = current->robust_list;
2104         else {
2105                 struct task_struct *p;
2106
2107                 ret = -ESRCH;
2108                 rcu_read_lock();
2109                 p = find_task_by_pid(pid);
2110                 if (!p)
2111                         goto err_unlock;
2112                 ret = -EPERM;
2113                 if ((current->euid != p->euid) && (current->euid != p->uid) &&
2114                                 !capable(CAP_SYS_PTRACE))
2115                         goto err_unlock;
2116                 head = p->robust_list;
2117                 rcu_read_unlock();
2118         }
2119
2120         if (put_user(sizeof(*head), len_ptr))
2121                 return -EFAULT;
2122         return put_user(head, head_ptr);
2123
2124 err_unlock:
2125         rcu_read_unlock();
2126
2127         return ret;
2128 }
2129
2130 /*
2131  * Process a futex-list entry, check whether it's owned by the
2132  * dying task, and do notification if so:
2133  */
2134 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2135 {
2136         u32 uval, nval, mval;
2137
2138 retry:
2139         if (get_user(uval, uaddr))
2140                 return -1;
2141
2142         if ((uval & FUTEX_TID_MASK) == curr->pid) {
2143                 /*
2144                  * Ok, this dying thread is truly holding a futex
2145                  * of interest. Set the OWNER_DIED bit atomically
2146                  * via cmpxchg, and if the value had FUTEX_WAITERS
2147                  * set, wake up a waiter (if any). (We have to do a
2148                  * futex_wake() even if OWNER_DIED is already set -
2149                  * to handle the rare but possible case of recursive
2150                  * thread-death.) The rest of the cleanup is done in
2151                  * userspace.
2152                  */
2153                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2154                 /* Also keep the FUTEX_WAITER_REQUEUED flag if set */
2155                 mval |= (uval & FUTEX_WAITER_REQUEUED);
2156                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2157
2158                 if (nval == -EFAULT)
2159                         return -1;
2160
2161                 if (nval != uval)
2162                         goto retry;
2163
2164                 /*
2165                  * Wake robust non-PI futexes here. The wakeup of
2166                  * PI futexes happens in exit_pi_state():
2167                  */
2168                 if (!pi) {
2169                         if (uval & FUTEX_WAITERS)
2170                                 futex_wake(uaddr, 1);
2171                 }
2172         }
2173         return 0;
2174 }
2175
2176 /*
2177  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2178  */
2179 static inline int fetch_robust_entry(struct robust_list __user **entry,
2180                                      struct robust_list __user * __user *head,
2181                                      int *pi)
2182 {
2183         unsigned long uentry;
2184
2185         if (get_user(uentry, (unsigned long __user *)head))
2186                 return -EFAULT;
2187
2188         *entry = (void __user *)(uentry & ~1UL);
2189         *pi = uentry & 1;
2190
2191         return 0;
2192 }
2193
2194 /*
2195  * Walk curr->robust_list (very carefully, it's a userspace list!)
2196  * and mark any locks found there dead, and notify any waiters.
2197  *
2198  * We silently return on any sign of list-walking problem.
2199  */
2200 void exit_robust_list(struct task_struct *curr)
2201 {
2202         struct robust_list_head __user *head = curr->robust_list;
2203         struct robust_list __user *entry, *pending;
2204         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2205         unsigned long futex_offset;
2206
2207         /*
2208          * Fetch the list head (which was registered earlier, via
2209          * sys_set_robust_list()):
2210          */
2211         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2212                 return;
2213         /*
2214          * Fetch the relative futex offset:
2215          */
2216         if (get_user(futex_offset, &head->futex_offset))
2217                 return;
2218         /*
2219          * Fetch any possibly pending lock-add first, and handle it
2220          * if it exists:
2221          */
2222         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2223                 return;
2224
2225         if (pending)
2226                 handle_futex_death((void __user *)pending + futex_offset, curr, pip);
2227
2228         while (entry != &head->list) {
2229                 /*
2230                  * A pending lock might already be on the list, so
2231                  * don't process it twice:
2232                  */
2233                 if (entry != pending)
2234                         if (handle_futex_death((void __user *)entry + futex_offset,
2235                                                 curr, pi))
2236                                 return;
2237                 /*
2238                  * Fetch the next entry in the list:
2239                  */
2240                 if (fetch_robust_entry(&entry, &entry->next, &pi))
2241                         return;
2242                 /*
2243                  * Avoid excessively long or circular lists:
2244                  */
2245                 if (!--limit)
2246                         break;
2247
2248                 cond_resched();
2249         }
2250 }
2251
2252 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2253                 u32 __user *uaddr2, u32 val2, u32 val3)
2254 {
2255         int ret;
2256
2257         switch (op) {
2258         case FUTEX_WAIT:
2259                 ret = futex_wait(uaddr, val, timeout);
2260                 break;
2261         case FUTEX_WAKE:
2262                 ret = futex_wake(uaddr, val);
2263                 break;
2264         case FUTEX_FD:
2265                 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
2266                 ret = futex_fd(uaddr, val);
2267                 break;
2268         case FUTEX_REQUEUE:
2269                 ret = futex_requeue(uaddr, uaddr2, val, val2, NULL);
2270                 break;
2271         case FUTEX_CMP_REQUEUE:
2272                 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
2273                 break;
2274         case FUTEX_WAKE_OP:
2275                 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
2276                 break;
2277         case FUTEX_LOCK_PI:
2278                 ret = futex_lock_pi(uaddr, val, timeout, 0);
2279                 break;
2280         case FUTEX_UNLOCK_PI:
2281                 ret = futex_unlock_pi(uaddr);
2282                 break;
2283         case FUTEX_TRYLOCK_PI:
2284                 ret = futex_lock_pi(uaddr, 0, timeout, 1);
2285                 break;
2286         case FUTEX_CMP_REQUEUE_PI:
2287                 ret = futex_requeue_pi(uaddr, uaddr2, val, val2, &val3);
2288                 break;
2289         default:
2290                 ret = -ENOSYS;
2291         }
2292         return ret;
2293 }
2294
2295
2296 asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
2297                           struct timespec __user *utime, u32 __user *uaddr2,
2298                           u32 val3)
2299 {
2300         struct timespec ts;
2301         ktime_t t, *tp = NULL;
2302         u32 val2 = 0;
2303
2304         if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
2305                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2306                         return -EFAULT;
2307                 if (!timespec_valid(&ts))
2308                         return -EINVAL;
2309
2310                 t = timespec_to_ktime(ts);
2311                 if (op == FUTEX_WAIT)
2312                         t = ktime_add(ktime_get(), t);
2313                 tp = &t;
2314         }
2315         /*
2316          * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
2317          */
2318         if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE
2319             || op == FUTEX_CMP_REQUEUE_PI)
2320                 val2 = (u32) (unsigned long) utime;
2321
2322         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2323 }
2324
2325 static int futexfs_get_sb(struct file_system_type *fs_type,
2326                           int flags, const char *dev_name, void *data,
2327                           struct vfsmount *mnt)
2328 {
2329         return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
2330 }
2331
2332 static struct file_system_type futex_fs_type = {
2333         .name           = "futexfs",
2334         .get_sb         = futexfs_get_sb,
2335         .kill_sb        = kill_anon_super,
2336 };
2337
2338 static int __init init(void)
2339 {
2340         int i = register_filesystem(&futex_fs_type);
2341
2342         if (i)
2343                 return i;
2344
2345         futex_mnt = kern_mount(&futex_fs_type);
2346         if (IS_ERR(futex_mnt)) {
2347                 unregister_filesystem(&futex_fs_type);
2348                 return PTR_ERR(futex_mnt);
2349         }
2350
2351         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2352                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2353                 spin_lock_init(&futex_queues[i].lock);
2354         }
2355         return 0;
2356 }
2357 __initcall(init);