]> err.no Git - linux-2.6/commitdiff
[PATCH] slab: allocate larger cache_cache if order 0 fails
authorJack Steiner <steiner@sgi.com>
Wed, 8 Mar 2006 05:55:46 +0000 (21:55 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 8 Mar 2006 22:15:04 +0000 (14:15 -0800)
kmem_cache_init() incorrectly assumes that the cache_cache object will fit
in an order 0 allocation.  On very large systems, this is not true.  Change
the code to try larger order allocations if order 0 fails.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/slab.c

index 6ad6bd5a0b3ec929c0f7783332e5781ec6f147dd..61800b88e24159dd1cd3d193a5a824e0a9887dd3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void)
        struct cache_sizes *sizes;
        struct cache_names *names;
        int i;
+       int order;
 
        for (i = 0; i < NUM_INIT_LISTS; i++) {
                kmem_list3_init(&initkmem_list3[i]);
@@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void)
 
        cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
 
-       cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
-                      &left_over, &cache_cache.num);
+       for (order = 0; order < MAX_ORDER; order++) {
+               cache_estimate(order, cache_cache.buffer_size,
+                       cache_line_size(), 0, &left_over, &cache_cache.num);
+               if (cache_cache.num)
+                       break;
+       }
        if (!cache_cache.num)
                BUG();
-
+       cache_cache.gfporder = order;
        cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
                                      sizeof(struct slab), cache_line_size());