X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Ffutex.c;h=9dc591ab681a92c87209463e922f1a06e8d9bfa7;hb=b00296fb781acfafa93687000cdef72b8922bb40;hp=e45a65e41686c19e599140e0c4e04de5203f5bee;hpb=a9e82d3a02247af6b729be0a963862d70cb25bf9;p=linux-2.6 diff --git a/kernel/futex.c b/kernel/futex.c index e45a65e416..9dc591ab68 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -53,6 +53,9 @@ #include #include #include +#include +#include + #include #include "rtmutex_common.h" @@ -178,8 +181,8 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2) * For other futexes, it points to ¤t->mm->mmap_sem and * caller must have taken the reader lock. but NOT any spinlocks. */ -int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, - union futex_key *key) +static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, + union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; @@ -265,14 +268,13 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, } return err; } -EXPORT_SYMBOL_GPL(get_futex_key); /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ -inline void get_futex_key_refs(union futex_key *key) +static void get_futex_key_refs(union futex_key *key) { if (key->both.ptr == 0) return; @@ -285,13 +287,12 @@ inline void get_futex_key_refs(union futex_key *key) break; } } -EXPORT_SYMBOL_GPL(get_futex_key_refs); /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ -void drop_futex_key_refs(union futex_key *key) +static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; @@ -304,7 +305,6 @@ void drop_futex_key_refs(union futex_key *key) break; } } -EXPORT_SYMBOL_GPL(drop_futex_key_refs); static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) { @@ -443,8 +443,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) struct task_struct *p; rcu_read_lock(); - p = find_task_by_pid(pid); - + p = find_task_by_vpid(pid); if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) p = ERR_PTR(-ESRCH); else @@ -653,7 +652,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!(uval & FUTEX_OWNER_DIED)) { int ret = 0; - newval = FUTEX_WAITERS | new_owner->pid; + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); curval = cmpxchg_futex_value_locked(uaddr, uval, newval); @@ -1106,7 +1105,7 @@ static void unqueue_me_pi(struct futex_q *q) static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *curr) { - u32 newtid = curr->pid | FUTEX_WAITERS; + u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; u32 uval, curval, newval; int ret; @@ -1368,7 +1367,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ - newval = current->pid; + newval = task_pid_vnr(current); curval = cmpxchg_futex_value_locked(uaddr, 0, newval); @@ -1379,7 +1378,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * Detect deadlocks. In case of REQUEUE_PI this is a valid * situation and we return success to user space. */ - if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { + if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { ret = -EDEADLK; goto out_unlock_release_sem; } @@ -1408,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ - newval = (curval & ~FUTEX_TID_MASK) | current->pid; + newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current); ownerdied = 0; lock_taken = 1; } @@ -1587,7 +1586,7 @@ retry: /* * We release only a lock we actually own: */ - if ((uval & FUTEX_TID_MASK) != current->pid) + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; /* * First take all the futex related locks: @@ -1608,7 +1607,7 @@ retry_unlocked: * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED)) - uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0); + uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); if (unlikely(uval == -EFAULT)) @@ -1617,7 +1616,7 @@ retry_unlocked: * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ - if (unlikely(uval == current->pid)) + if (unlikely(uval == task_pid_vnr(current))) goto out_unlock; /* @@ -1854,7 +1853,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, ret = -ESRCH; rcu_read_lock(); - p = find_task_by_pid(pid); + p = find_task_by_vpid(pid); if (!p) goto err_unlock; ret = -EPERM; @@ -1887,7 +1886,7 @@ retry: if (get_user(uval, uaddr)) return -1; - if ((uval & FUTEX_TID_MASK) == curr->pid) { + if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically