]> err.no Git - linux-2.6/commitdiff
debugobjects: fix lockdep warning
authorVegard Nossum <vegard.nossum@gmail.com>
Sun, 31 Aug 2008 21:39:21 +0000 (23:39 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 1 Sep 2008 07:47:16 +0000 (09:47 +0200)
Daniel J. Blueman reported:
> =======================================================
> [ INFO: possible circular locking dependency detected ]
> 2.6.27-rc4-224c #1
> -------------------------------------------------------
> hald/4680 is trying to acquire lock:
>  (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>
> but task is already holding lock:
>  (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> debug_object_free+0x5c/0x120

We fix it by moving the actual freeing to outside the lock (the lock
now only protects the list).

The pool lock is also promoted to irq-safe (suggested by Dan). It's
necessary because free_pool is now called outside the irq disabled
region. So we need to protect against an interrupt handler which calls
debug_object_init().

[tglx@linutronix.de: added hlist_move_list helper to avoid looping
     through the list twice]

Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/list.h
lib/debugobjects.c

index db35ef02e74522881a1d5b1e00ce993280e2178b..969f6e92d0895a93b593829159bdef20220b0dcb 100644 (file)
@@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n,
                next->next->pprev  = &next->next;
 }
 
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+                                  struct hlist_head *new)
+{
+       new->first = old->first;
+       if (new->first)
+               new->first->pprev = &new->first;
+       old->first = NULL;
+}
+
 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
 
 #define hlist_for_each(pos, head) \
index 45a6bde762d12232fab2c1f422e398625bb35e37..e3ab374e1334ab80fc172c3d256d2eb4fddd665e 100644 (file)
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 
 /*
  * Allocate a new object. If the pool is empty, switch off the debugger.
+ * Must be called with interrupts disabled.
  */
 static struct debug_obj *
 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
 static void free_object(struct debug_obj *obj)
 {
        unsigned long idx = (unsigned long)(obj - obj_static_pool);
+       unsigned long flags;
 
        if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
-               spin_lock(&pool_lock);
+               spin_lock_irqsave(&pool_lock, flags);
                hlist_add_head(&obj->node, &obj_pool);
                obj_pool_free++;
                obj_pool_used--;
-               spin_unlock(&pool_lock);
+               spin_unlock_irqrestore(&pool_lock, flags);
        } else {
-               spin_lock(&pool_lock);
+               spin_lock_irqsave(&pool_lock, flags);
                obj_pool_used--;
-               spin_unlock(&pool_lock);
+               spin_unlock_irqrestore(&pool_lock, flags);
                kmem_cache_free(obj_cache, obj);
        }
 }
@@ -171,6 +173,7 @@ static void debug_objects_oom(void)
 {
        struct debug_bucket *db = obj_hash;
        struct hlist_node *node, *tmp;
+       HLIST_HEAD(freelist);
        struct debug_obj *obj;
        unsigned long flags;
        int i;
@@ -179,11 +182,14 @@ static void debug_objects_oom(void)
 
        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
                spin_lock_irqsave(&db->lock, flags);
-               hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+               hlist_move_list(&db->list, &freelist);
+               spin_unlock_irqrestore(&db->lock, flags);
+
+               /* Now free them */
+               hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
                        hlist_del(&obj->node);
                        free_object(obj);
                }
-               spin_unlock_irqrestore(&db->lock, flags);
        }
 }
 
@@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
                return;
        default:
                hlist_del(&obj->node);
+               spin_unlock_irqrestore(&db->lock, flags);
                free_object(obj);
-               break;
+               return;
        }
 out_unlock:
        spin_unlock_irqrestore(&db->lock, flags);
@@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 {
        unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
        struct hlist_node *node, *tmp;
+       HLIST_HEAD(freelist);
        struct debug_obj_descr *descr;
        enum debug_obj_state state;
        struct debug_bucket *db;
@@ -545,11 +553,18 @@ repeat:
                                goto repeat;
                        default:
                                hlist_del(&obj->node);
-                               free_object(obj);
+                               hlist_add_head(&obj->node, &freelist);
                                break;
                        }
                }
                spin_unlock_irqrestore(&db->lock, flags);
+
+               /* Now free them */
+               hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+                       hlist_del(&obj->node);
+                       free_object(obj);
+               }
+
                if (cnt > debug_objects_maxchain)
                        debug_objects_maxchain = cnt;
        }