]> err.no Git - linux-2.6/blobdiff - mm/slab.c
Merge branch 'upstream'
[linux-2.6] / mm / slab.c
index ef9f60fe37d6fec194be570189c7063287c829a4..f055c14202161a7b561599536cfb269240bbd1dc 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
                if (cache_cache.num)
                        break;
        }
-       if (!cache_cache.num)
-               BUG();
+       BUG_ON(!cache_cache.num);
        cache_cache.gfporder = order;
        cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * Always checks flags, a caller might be expecting debug support which
         * isn't available.
         */
-       if (flags & ~CREATE_MASK)
-               BUG();
+       BUG_ON(flags & ~CREATE_MASK);
 
        /*
         * Check that size is in terms of words.  This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
 
                slabp = list_entry(l3->slabs_free.prev, struct slab, list);
 #if DEBUG
-               if (slabp->inuse)
-                       BUG();
+               BUG_ON(slabp->inuse);
 #endif
                list_del(&slabp->list);
 
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        return __cache_shrink(cachep);
 }
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
        int i;
        struct kmem_list3 *l3;
 
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        /* Don't let CPUs to come and go */
        lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
-               BUG();
+       BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
        if (flags & SLAB_NO_GROW)
                return 0;
 
@@ -3311,7 +3305,7 @@ void *__alloc_percpu(size_t size)
         * and we have no way of figuring out how to fix the array
         * that we have allocated then....
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int node = cpu_to_node(i);
 
                if (node_online(node))
@@ -3398,7 +3392,7 @@ void free_percpu(const void *objp)
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
            kfree(p->ptrs[i]);
        kfree(p);
 }
@@ -3418,7 +3412,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
 EXPORT_SYMBOL_GPL(kmem_cache_name);
 
 /*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
  */
 static int alloc_kmemlist(struct kmem_cache *cachep)
 {
@@ -3433,10 +3427,13 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
                if (!new_alien)
                        goto fail;
 
-               new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+               new_shared = alloc_arraycache(node,
+                               cachep->shared*cachep->batchcount,
                                        0xbaadf00d);
-               if (!new_shared)
+               if (!new_shared) {
+                       free_alien_cache(new_alien);
                        goto fail;
+               }
 
                l3 = cachep->nodelists[node];
                if (l3) {
@@ -3445,7 +3442,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
                        spin_lock_irq(&l3->list_lock);
 
                        if (shared)
-                               free_block(cachep, shared->entry, shared->avail, node);
+                               free_block(cachep, shared->entry,
+                                               shared->avail, node);
 
                        l3->shared = new_shared;
                        if (!l3->alien) {
@@ -3460,8 +3458,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
                        continue;
                }
                l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
-               if (!l3)
+               if (!l3) {
+                       free_alien_cache(new_alien);
+                       kfree(new_shared);
                        goto fail;
+               }
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
@@ -3473,7 +3474,23 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
                cachep->nodelists[node] = l3;
        }
        return 0;
+
 fail:
+       if (!cachep->next.next) {
+               /* Cache is not active yet. Roll back what we did */
+               node--;
+               while (node >= 0) {
+                       if (cachep->nodelists[node]) {
+                               l3 = cachep->nodelists[node];
+
+                               kfree(l3->shared);
+                               free_alien_cache(l3->alien);
+                               kfree(l3);
+                               cachep->nodelists[node] = NULL;
+                       }
+                       node--;
+               }
+       }
        return -ENOMEM;
 }