X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslab.c;h=e34bcb87a6ee19c7fadaa6a0f9ccbb5e956e8b79;hb=64c5e135bf5a2a7f0ededb3435a31adbe0202f0c;hp=6f6abef83a1af82432305245755ab690c8363081;hpb=0542170dec523d50e8bed5515e2f7314e738c8d8;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index 6f6abef83a..e34bcb87a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1568,7 +1568,7 @@ void __init kmem_cache_init(void) /* Replace the static kmem_list3 structures for the boot cpu */ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); - for_each_online_node(nid) { + for_each_node_state(nid, N_NORMAL_MEMORY) { init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); @@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) #endif flags |= cachep->gfpflags; + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) @@ -1944,7 +1946,7 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index) { int node; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { cachep->nodelists[node] = &initkmem_list3[index + node]; cachep->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + @@ -2075,7 +2077,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) g_cpucache_up = PARTIAL_L3; } else { int node; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { cachep->nodelists[node] = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); @@ -2746,9 +2748,9 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); + BUG_ON(flags & GFP_SLAB_BUG_MASK); + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); - local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; @@ -2785,7 +2787,7 @@ static int cache_grow(struct kmem_cache *cachep, /* Get slab management. */ slabp = alloc_slabmgmt(cachep, objp, offset, - local_flags & ~GFP_THISNODE, nodeid); + local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; @@ -3225,7 +3227,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) zonelist = &NODE_DATA(slab_node(current->mempolicy)) ->node_zonelists[gfp_zone(flags)]; - local_flags = (flags & GFP_LEVEL_MASK); + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); retry: /* @@ -3792,7 +3794,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) struct array_cache *new_shared; struct array_cache **new_alien = NULL; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { if (use_alien_caches) { new_alien = alloc_alien_cache(node, cachep->limit); @@ -4446,7 +4448,8 @@ const struct seq_operations slabstats_op = { */ size_t ksize(const void *objp) { - if (unlikely(ZERO_OR_NULL_PTR(objp))) + BUG_ON(!objp); + if (unlikely(objp == ZERO_SIZE_PTR)) return 0; return obj_size(virt_to_cache(objp));