X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Flockdep.c;h=0f389621bb6b42d803224db0f85d161a7c95188c;hb=cde898fa80a45bb23eab2a060fc79d0913081409;hp=55fe0c7cd95fc6776dda4f93d85fa1541200ebb1;hpb=37e58df30063e229ee5157f9d1c1fa1d749917c2;p=linux-2.6 diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 55fe0c7cd9..0f389621bb 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2424,7 +2424,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, return 0; /* - * Calculate the chain hash: it's the combined has of all the + * Calculate the chain hash: it's the combined hash of all the * lock keys along the dependency chain. We save the hash value * at every step so that we can get the current hash easily * after unlock. The chain hash is then used to cache dependency @@ -3054,11 +3054,6 @@ void __init lockdep_info(void) #endif } -static inline int in_range(const void *start, const void *addr, const void *end) -{ - return addr >= start && addr <= end; -} - static void print_freed_lock_bug(struct task_struct *curr, const void *mem_from, const void *mem_to, struct held_lock *hlock) @@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, dump_stack(); } +static inline int not_in_range(const void* mem_from, unsigned long mem_len, + const void* lock_from, unsigned long lock_len) +{ + return lock_from + lock_len <= mem_from || + mem_from + mem_len <= lock_from; +} + /* * Called when kernel memory is freed (or unmapped), or if a lock * is destroyed or reinitialized - this code checks whether there is @@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, */ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) { - const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; struct task_struct *curr = current; struct held_lock *hlock; unsigned long flags; @@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; - lock_from = (void *)hlock->instance; - lock_to = (void *)(hlock->instance + 1); - - if (!in_range(mem_from, lock_from, mem_to) && - !in_range(mem_from, lock_to, mem_to)) + if (not_in_range(mem_from, mem_len, hlock->instance, + sizeof(*hlock->instance))) continue; - print_freed_lock_bug(curr, mem_from, mem_to, hlock); + print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } local_irq_restore(flags); @@ -3173,6 +3171,13 @@ retry: printk(" locked it.\n"); do_each_thread(g, p) { + /* + * It's not reliable to print a task's held locks + * if it's not sleeping (or if it's not the current + * task): + */ + if (p->state == TASK_RUNNING && p != current) + continue; if (p->lockdep_depth) lockdep_print_held_locks(p); if (!unlock)