]> err.no Git - linux-2.6/commitdiff
[PATCH] slab: fix drain_array() so that it works correctly with the shared_array
authorChristoph Lameter <clameter@engr.sgi.com>
Wed, 22 Mar 2006 08:09:07 +0000 (00:09 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 22 Mar 2006 15:54:06 +0000 (07:54 -0800)
The list_lock also protects the shared array and we call drain_array() with
the shared array.  Therefore we cannot go as far as I wanted to but have to
take the lock in a way so that it also protects the array_cache in
drain_pages.

(Note: maybe we should make the array_cache locking more consistent?  I.e.
always take the array cache lock for shared arrays and disable interrupts
for the per cpu arrays?)

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/slab.c

index 3274144c0d16b78c0fd5ffa265e17b9c07c2cb42..6b691ecbac44916d48a45c5e99505a38e7dff171 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3521,7 +3521,8 @@ static void enable_cpucache(struct kmem_cache *cachep)
 
 /*
  * Drain an array if it contains any elements taking the l3 lock only if
- * necessary.
+ * necessary. Note that the l3 listlock also protects the array_cache
+ * if drain_array() is used on the shared array.
  */
 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
                         struct array_cache *ac, int force, int node)
@@ -3532,16 +3533,18 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
                return;
        if (ac->touched && !force) {
                ac->touched = 0;
-       } else if (ac->avail) {
-               tofree = force ? ac->avail : (ac->limit + 4) / 5;
-               if (tofree > ac->avail)
-                       tofree = (ac->avail + 1) / 2;
+       } else {
                spin_lock_irq(&l3->list_lock);
-               free_block(cachep, ac->entry, tofree, node);
+               if (ac->avail) {
+                       tofree = force ? ac->avail : (ac->limit + 4) / 5;
+                       if (tofree > ac->avail)
+                               tofree = (ac->avail + 1) / 2;
+                       free_block(cachep, ac->entry, tofree, node);
+                       ac->avail -= tofree;
+                       memmove(ac->entry, &(ac->entry[tofree]),
+                               sizeof(void *) * ac->avail);
+               }
                spin_unlock_irq(&l3->list_lock);
-               ac->avail -= tofree;
-               memmove(ac->entry, &(ac->entry[tofree]),
-                       sizeof(void *) * ac->avail);
        }
 }