X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Flockdep.c;h=734da579ad134779685cbf0291fbc6e14e9cc133;hb=803dedb60849a8e4ec38e66ca41f51188c18a87d;hp=a8dc99d9fef7566a48884d7d9ae1b2a177c27906;hpb=c46261de0d98372112d8edf16f74ce418a268d46;p=linux-2.6 diff --git a/kernel/lockdep.c b/kernel/lockdep.c index a8dc99d9fe..734da579ad 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -5,7 +5,8 @@ * * Started by Ingo Molnar: * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: @@ -37,6 +38,7 @@ #include #include #include +#include #include @@ -175,6 +177,9 @@ struct lock_class_stats lock_stats(struct lock_class *class) lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); + + for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) + stats.bounces[i] += pcs->bounces[i]; } return stats; @@ -238,8 +243,7 @@ LIST_HEAD(all_lock_classes); */ #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) -#define CLASSHASH_MASK (CLASSHASH_SIZE - 1) -#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK) +#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) #define classhashentry(key) (classhash_table + __classhashfn((key))) static struct list_head classhash_table[CLASSHASH_SIZE]; @@ -250,9 +254,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE]; */ #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) -#define CHAINHASH_MASK (CHAINHASH_SIZE - 1) -#define __chainhashfn(chain) \ - (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK) +#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) static struct list_head chainhash_table[CHAINHASH_SIZE]; @@ -373,6 +375,11 @@ unsigned int max_recursion_depth; * about it later on, in lockdep_info(). */ static int lockdep_init_error; +static unsigned long lockdep_init_trace_data[20]; +static struct stack_trace lockdep_init_trace = { + .max_entries = ARRAY_SIZE(lockdep_init_trace_data), + .entries = lockdep_init_trace_data, +}; /* * Various lockdep statistics: @@ -660,6 +667,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) if (unlikely(!lockdep_initialized)) { lockdep_init(); lockdep_init_error = 1; + save_stack_trace(&lockdep_init_trace); } #endif @@ -676,7 +684,8 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) * (or spin_lock_init()) call - which acts as the key. For static * locks we use the lock object itself as the key. */ - BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); + BUILD_BUG_ON(sizeof(struct lock_class_key) > + sizeof(struct lockdep_map)); key = lock->key->subkeys + subclass; @@ -686,9 +695,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) * We can walk the hash lockfree, because the hash only * grows, and we are careful when adding entries to the end: */ - list_for_each_entry(class, hash_head, hash_entry) - if (class->key == key) + list_for_each_entry(class, hash_head, hash_entry) { + if (class->key == key) { + WARN_ON_ONCE(class->name != lock->name); return class; + } + } return NULL; } @@ -2322,6 +2334,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, lock->name = name; lock->key = key; lock->class_cache = NULL; +#ifdef CONFIG_LOCK_STAT + lock->cpu = raw_smp_processor_id(); +#endif if (subclass) register_lock_class(lock, subclass, 1); } @@ -2772,6 +2787,8 @@ found_it: stats = get_lock_stats(hlock->class); if (point < ARRAY_SIZE(stats->contention_point)) stats->contention_point[i]++; + if (lock->cpu != smp_processor_id()) + stats->bounces[bounce_contended + !!hlock->read]++; put_lock_stats(stats); } @@ -2783,8 +2800,8 @@ __lock_acquired(struct lockdep_map *lock) struct lock_class_stats *stats; unsigned int depth; u64 now; - s64 waittime; - int i; + s64 waittime = 0; + int i, cpu; depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(!depth)) @@ -2806,19 +2823,25 @@ __lock_acquired(struct lockdep_map *lock) return; found_it: - if (!hlock->waittime_stamp) - return; - - now = sched_clock(); - waittime = now - hlock->waittime_stamp; - hlock->holdtime_stamp = now; + cpu = smp_processor_id(); + if (hlock->waittime_stamp) { + now = sched_clock(); + waittime = now - hlock->waittime_stamp; + hlock->holdtime_stamp = now; + } stats = get_lock_stats(hlock->class); - if (hlock->read) - lock_time_inc(&stats->read_waittime, waittime); - else - lock_time_inc(&stats->write_waittime, waittime); + if (waittime) { + if (hlock->read) + lock_time_inc(&stats->read_waittime, waittime); + else + lock_time_inc(&stats->write_waittime, waittime); + } + if (lock->cpu != cpu) + stats->bounces[bounce_acquired + !!hlock->read]++; put_lock_stats(stats); + + lock->cpu = cpu; } void lock_contended(struct lockdep_map *lock, unsigned long ip) @@ -3023,8 +3046,11 @@ void __init lockdep_info(void) sizeof(struct held_lock) * MAX_LOCK_DEPTH); #ifdef CONFIG_DEBUG_LOCKDEP - if (lockdep_init_error) - printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n"); + if (lockdep_init_error) { + printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n"); + printk("Call stack leading to lockdep invocation was:\n"); + print_stack_trace(&lockdep_init_trace, 0); + } #endif }