X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fmutex.c;h=d046a345d365793e84076e2517ec02cdd11bc40d;hb=30e0e178193d4221abc9926b07a4c7661c7cc4a9;hp=7043db21bbce208b098aa60e100ca2c8e0d86009;hpb=f28e71617ddaf2483e3e5c5237103484a303743f;p=linux-2.6 diff --git a/kernel/mutex.c b/kernel/mutex.c index 7043db21bb..d046a345d3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -17,6 +17,7 @@ #include #include #include +#include /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, @@ -38,25 +39,27 @@ * * It is not allowed to initialize an already locked mutex. */ -void fastcall __mutex_init(struct mutex *lock, const char *name) +void +__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); - debug_mutex_init(lock, name); + debug_mutex_init(lock, name, key); } EXPORT_SYMBOL(__mutex_init); +#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ -static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); +static void noinline __sched +__mutex_lock_slowpath(atomic_t *lock_count); /*** * mutex_lock - acquire the mutex @@ -79,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); * * This function is similar to (but not equivalent to) down(). */ -void fastcall __sched mutex_lock(struct mutex *lock) +void inline __sched mutex_lock(struct mutex *lock) { might_sleep(); /* @@ -90,9 +93,9 @@ void fastcall __sched mutex_lock(struct mutex *lock) } EXPORT_SYMBOL(mutex_lock); +#endif -static void fastcall noinline __sched -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); +static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); /*** * mutex_unlock - release the mutex @@ -105,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); * * This function is similar to (but not equivalent to) up(). */ -void fastcall __sched mutex_unlock(struct mutex *lock) +void __sched mutex_unlock(struct mutex *lock) { /* * The unlocking fastpath is the 0->1 transition from 'locked' @@ -120,23 +123,30 @@ EXPORT_SYMBOL(mutex_unlock); * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched -__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + unsigned long ip) { struct task_struct *task = current; struct mutex_waiter waiter; unsigned int old_val; unsigned long flags; - debug_mutex_init_waiter(&waiter); - spin_lock_mutex(&lock->wait_lock, flags); - debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); + debug_mutex_lock_common(lock, &waiter); + mutex_acquire(&lock->dep_map, subclass, 0, ip); + debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; + old_val = atomic_xchg(&lock->count, -1); + if (old_val == 1) + goto done; + + lock_contended(&lock->dep_map, ip); + for (;;) { /* * Lets try to take the lock again - this is needed even if @@ -155,9 +165,13 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ - if (unlikely(state == TASK_INTERRUPTIBLE && - signal_pending(task))) { - mutex_remove_waiter(lock, &waiter, task->thread_info); + if (unlikely((state == TASK_INTERRUPTIBLE && + signal_pending(task)) || + (state == TASK_KILLABLE && + fatal_signal_pending(task)))) { + mutex_remove_waiter(lock, &waiter, + task_thread_info(task)); + mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); @@ -171,9 +185,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) spin_lock_mutex(&lock->wait_lock, flags); } +done: + lock_acquired(&lock->dep_map); /* got the lock - rejoice! */ - mutex_remove_waiter(lock, &waiter, task->thread_info); - debug_mutex_set_owner(lock, task->thread_info __IP__); + mutex_remove_waiter(lock, &waiter, task_thread_info(task)); + debug_mutex_set_owner(lock, task_thread_info(task)); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) @@ -183,32 +199,49 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) debug_mutex_free_waiter(&waiter); - DEBUG_WARN_ON(list_empty(&lock->held_list)); - DEBUG_WARN_ON(lock->owner != task->thread_info); - return 0; } -static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __sched +mutex_lock_nested(struct mutex *lock, unsigned int subclass) { - struct mutex *lock = container_of(lock_count, struct mutex, count); + might_sleep(); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); +} + +EXPORT_SYMBOL_GPL(mutex_lock_nested); - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); +int __sched +mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) +{ + might_sleep(); + return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); } +EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); + +int __sched +mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) +{ + might_sleep(); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); +} + +EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); +#endif /* * Release the lock, slowpath: */ -static fastcall noinline void -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) +static inline void +__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; - DEBUG_WARN_ON(lock->owner != current_thread_info()); - spin_lock_mutex(&lock->wait_lock, flags); + mutex_release(&lock->dep_map, nested, _RET_IP_); + debug_mutex_unlock(lock); /* * some architectures leave the lock unlocked in the fastpath failure @@ -218,8 +251,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); - debug_mutex_unlock(lock); - if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = @@ -236,12 +267,25 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) spin_unlock_mutex(&lock->wait_lock, flags); } +/* + * Release the lock, slowpath: + */ +static noinline void +__mutex_unlock_slowpath(atomic_t *lock_count) +{ + __mutex_unlock_common_slowpath(lock_count, 1); +} + +#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ -static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); +static noinline int __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count); + +static noinline int __sched +__mutex_lock_interruptible_slowpath(atomic_t *lock_count); /*** * mutex_lock_interruptible - acquire the mutex, interruptable @@ -254,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); * * This function is similar to (but not equivalent to) down_interruptible(). */ -int fastcall __sched mutex_lock_interruptible(struct mutex *lock) +int __sched mutex_lock_interruptible(struct mutex *lock) { might_sleep(); return __mutex_fastpath_lock_retval @@ -263,14 +307,39 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); -static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) +int __sched mutex_lock_killable(struct mutex *lock) +{ + might_sleep(); + return __mutex_fastpath_lock_retval + (&lock->count, __mutex_lock_killable_slowpath); +} +EXPORT_SYMBOL(mutex_lock_killable); + +static noinline void __sched +__mutex_lock_slowpath(atomic_t *lock_count) +{ + struct mutex *lock = container_of(lock_count, struct mutex, count); + + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); +} + +static noinline int __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); + return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); } +static noinline int __sched +__mutex_lock_interruptible_slowpath(atomic_t *lock_count) +{ + struct mutex *lock = container_of(lock_count, struct mutex, count); + + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); +} +#endif + /* * Spinlock based trylock, we take the spinlock and check whether we * can get the lock: @@ -284,8 +353,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) spin_lock_mutex(&lock->wait_lock, flags); prev = atomic_xchg(&lock->count, -1); - if (likely(prev == 1)) - debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); + if (likely(prev == 1)) { + debug_mutex_set_owner(lock, current_thread_info()); + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); @@ -309,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ -int fastcall mutex_trylock(struct mutex *lock) +int __sched mutex_trylock(struct mutex *lock) { return __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);