X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslab.c;h=e34bcb87a6ee19c7fadaa6a0f9ccbb5e956e8b79;hb=64c5e135bf5a2a7f0ededb3435a31adbe0202f0c;hp=a453383333fc02f4c9bfc342bedb88eeb454f685;hpb=02b2318e07f98a7cdf7089a4457a8d62424aa824;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index a453383333..e34bcb87a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif + if (!size) + return ZERO_SIZE_PTR; + while (size > csizep->cs_size) csizep++; @@ -880,6 +883,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, */ static int use_alien_caches __read_mostly = 1; +static int numa_platform __read_mostly = 1; static int __init noaliencache_setup(char *s) { use_alien_caches = 0; @@ -1160,7 +1164,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); - int memsize = sizeof(struct kmem_list3); + const int memsize = sizeof(struct kmem_list3); switch (action) { case CPU_LOCK_ACQUIRE: @@ -1396,8 +1400,10 @@ void __init kmem_cache_init(void) int order; int node; - if (num_possible_nodes() == 1) + if (num_possible_nodes() == 1) { use_alien_caches = 0; + numa_platform = 0; + } for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); @@ -1481,7 +1487,7 @@ void __init kmem_cache_init(void) sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = @@ -1489,7 +1495,7 @@ void __init kmem_cache_init(void) sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } slab_early_init = 0; @@ -1507,7 +1513,7 @@ void __init kmem_cache_init(void) sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_create( @@ -1516,7 +1522,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, - NULL, NULL); + NULL); #endif sizes++; names++; @@ -1562,7 +1568,7 @@ void __init kmem_cache_init(void) /* Replace the static kmem_list3 structures for the boot cpu */ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); - for_each_online_node(nid) { + for_each_node_state(nid, N_NORMAL_MEMORY) { init_list(malloc_sizes[INDEX_AC].cs_cachep, &initkmem_list3[SIZE_AC + nid], nid); @@ -1637,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) #endif flags |= cachep->gfpflags; + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) @@ -1938,7 +1946,7 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index) { int node; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { cachep->nodelists[node] = &initkmem_list3[index + node]; cachep->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + @@ -2069,7 +2077,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) g_cpucache_up = PARTIAL_L3; } else { int node; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { cachep->nodelists[node] = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); @@ -2098,12 +2106,10 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects (not implemented anymore). * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache - * and the @dtor is run before the pages are handed back. + * The @ctor is run when new pages are allocated by the cache. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. @@ -2123,8 +2129,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + void (*ctor)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; @@ -2133,7 +2138,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - size > KMALLOC_MAX_SIZE || dtor) { + size > KMALLOC_MAX_SIZE) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); @@ -2351,7 +2356,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * this should not happen at all. * But leave a BUG_ON for some lucky dude. */ - BUG_ON(!cachep->slabp_cache); + BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } cachep->ctor = ctor; cachep->name = name; @@ -2743,9 +2748,9 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); + BUG_ON(flags & GFP_SLAB_BUG_MASK); + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); - local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; @@ -2776,13 +2781,13 @@ static int cache_grow(struct kmem_cache *cachep, * 'nodeid'. */ if (!objp) - objp = kmem_getpages(cachep, flags, nodeid); + objp = kmem_getpages(cachep, local_flags, nodeid); if (!objp) goto failed; /* Get slab management. */ slabp = alloc_slabmgmt(cachep, objp, offset, - local_flags & ~GFP_THISNODE, nodeid); + local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; @@ -3222,7 +3227,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) zonelist = &NODE_DATA(slab_node(current->mempolicy)) ->node_zonelists[gfp_zone(flags)]; - local_flags = (flags & GFP_LEVEL_MASK); + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); retry: /* @@ -3389,6 +3394,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + if (unlikely((flags & __GFP_ZERO) && ptr)) + memset(ptr, 0, obj_size(cachep)); + return ptr; } @@ -3440,6 +3448,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); + if (unlikely((flags & __GFP_ZERO) && objp)) + memset(objp, 0, obj_size(cachep)); + return objp; } @@ -3552,7 +3563,14 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - if (cache_free_alien(cachep, objp)) + /* + * Skip calling cache_free_alien() when the platform is not numa. + * This will avoid cache misses that happen while accessing slabp (which + * is per page memory reference) to get nodeid. Instead use a global + * variable to skip the call, which is mostly likely to be present in + * the cache. + */ + if (numa_platform && cache_free_alien(cachep, objp)) return; if (likely(ac->avail < ac->limit)) { @@ -3580,23 +3598,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_alloc); -/** - * kmem_cache_zalloc - Allocate an object. The memory is set to zero. - * @cache: The cache to allocate from. - * @flags: See kmalloc(). - * - * Allocate an object from this cache and set the allocated memory to zero. - * The flags are only relevant if the cache has no available objects. - */ -void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) -{ - void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); - if (ret) - memset(ret, 0, obj_size(cache)); - return ret; -} -EXPORT_SYMBOL(kmem_cache_zalloc); - /** * kmem_ptr_validate - check if an untrusted pointer might * be a slab entry. @@ -3653,8 +3654,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) struct kmem_cache *cachep; cachep = kmem_find_general_cachep(size, flags); - if (unlikely(cachep == NULL)) - return NULL; + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; return kmem_cache_alloc_node(cachep, flags, node); } @@ -3698,8 +3699,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, * functions. */ cachep = __find_general_cachep(size, flags); - if (unlikely(cachep == NULL)) - return NULL; + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; return __cache_alloc(cachep, flags, caller); } @@ -3725,52 +3726,6 @@ void *__kmalloc(size_t size, gfp_t flags) EXPORT_SYMBOL(__kmalloc); #endif -/** - * krealloc - reallocate memory. The contents will remain unchanged. - * @p: object to reallocate memory for. - * @new_size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * The contents of the object pointed to are preserved up to the - * lesser of the new and old sizes. If @p is %NULL, krealloc() - * behaves exactly like kmalloc(). If @size is 0 and @p is not a - * %NULL pointer, the object pointed to is freed. - */ -void *krealloc(const void *p, size_t new_size, gfp_t flags) -{ - struct kmem_cache *cache, *new_cache; - void *ret; - - if (unlikely(!p)) - return kmalloc_track_caller(new_size, flags); - - if (unlikely(!new_size)) { - kfree(p); - return NULL; - } - - cache = virt_to_cache(p); - new_cache = __find_general_cachep(new_size, flags); - - /* - * If new size fits in the current cache, bail out. - */ - if (likely(cache == new_cache)) - return (void *)p; - - /* - * We are on the slow-path here so do not use __cache_alloc - * because it bloats kernel text. - */ - ret = kmalloc_track_caller(new_size, flags); - if (ret) { - memcpy(ret, p, min(new_size, ksize(p))); - kfree(p); - } - return ret; -} -EXPORT_SYMBOL(krealloc); - /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. @@ -3806,7 +3761,7 @@ void kfree(const void *objp) struct kmem_cache *c; unsigned long flags; - if (unlikely(!objp)) + if (unlikely(ZERO_OR_NULL_PTR(objp))) return; local_irq_save(flags); kfree_debugcheck(objp); @@ -3839,7 +3794,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) struct array_cache *new_shared; struct array_cache **new_alien = NULL; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { if (use_alien_caches) { new_alien = alloc_alien_cache(node, cachep->limit); @@ -4398,7 +4353,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) { #ifdef CONFIG_KALLSYMS unsigned long offset, size; - char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1]; + char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { seq_printf(m, "%s+%#lx/%#lx", name, offset, size); @@ -4493,7 +4448,8 @@ const struct seq_operations slabstats_op = { */ size_t ksize(const void *objp) { - if (unlikely(objp == NULL)) + BUG_ON(!objp); + if (unlikely(objp == ZERO_SIZE_PTR)) return 0; return obj_size(virt_to_cache(objp));