X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslab.c;h=2e71a328aa09540a75f83ab289a3da8931920914;hb=c71cd01989b417317f4d22ccf4ce112859671b64;hp=acda7e2d66e4e9f4f6b7513a7c00cf91bf0873c4;hpb=18062a91d2ddc40e19fc674afeb7cad58cfa23ab;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index acda7e2d66..2e71a328aa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -409,9 +409,6 @@ struct kmem_cache { /* constructor func */ void (*ctor) (void *, struct kmem_cache *, unsigned long); - /* de-constructor func */ - void (*dtor) (void *, struct kmem_cache *, unsigned long); - /* 5) cache creation/removal */ const char *name; struct list_head next; @@ -571,21 +568,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #endif -/* - * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp - * order. - */ -#if defined(CONFIG_LARGE_ALLOCS) -#define MAX_OBJ_ORDER 13 /* up to 32Mb */ -#define MAX_GFP_ORDER 13 /* up to 32Mb */ -#elif defined(CONFIG_MMU) -#define MAX_OBJ_ORDER 5 /* 32 pages */ -#define MAX_GFP_ORDER 5 /* 32 pages */ -#else -#define MAX_OBJ_ORDER 8 /* up to 1Mb */ -#define MAX_GFP_ORDER 8 /* up to 1Mb */ -#endif - /* * Do not go above this order unless 0 objects fit into the slab. */ @@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif + WARN_ON_ONCE(size == 0); while (size > csizep->cs_size) csizep++; @@ -928,12 +911,6 @@ static void next_reap_node(void) { int node = __get_cpu_var(reap_node); - /* - * Also drain per cpu pages on remote zones - */ - if (node != numa_node_id()) - drain_node_pages(node); - node = next_node(node, node_online_map); if (unlikely(node >= MAX_NUMNODES)) node = first_node(node_online_map); @@ -1186,8 +1163,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, int memsize = sizeof(struct kmem_list3); switch (action) { - case CPU_UP_PREPARE: + case CPU_LOCK_ACQUIRE: mutex_lock(&cache_chain_mutex); + break; + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: /* * We need to do this right in the beginning since * alloc_arraycache's are going to use this list. @@ -1274,17 +1254,28 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, } break; case CPU_ONLINE: - mutex_unlock(&cache_chain_mutex); + case CPU_ONLINE_FROZEN: start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU - case CPU_DOWN_PREPARE: - mutex_lock(&cache_chain_mutex); - break; - case CPU_DOWN_FAILED: - mutex_unlock(&cache_chain_mutex); - break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + /* + * Shutdown cache reaper. Note that the cache_chain_mutex is + * held so that if cache_reap() is invoked it cannot do + * anything expensive but will only modify reap_work + * and reschedule the timer. + */ + cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); + /* Now the cache_reaper is guaranteed to be not running. */ + per_cpu(reap_work, cpu).work.func = NULL; + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + start_cpu_timer(cpu); + break; case CPU_DEAD: + case CPU_DEAD_FROZEN: /* * Even if all the cpus of a node are down, we don't free the * kmem_list3 of any cache. This to avoid a race between @@ -1296,6 +1287,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, /* fall thru */ #endif case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; @@ -1354,6 +1346,8 @@ free_array_cache: continue; drain_freelist(cachep, l3, l3->free_objects); } + break; + case CPU_LOCK_RELEASE: mutex_unlock(&cache_chain_mutex); break; } @@ -1900,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) slab_error(cachep, "end of a freed object " "was overwritten"); } - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); } } #else static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { - if (cachep->dtor) { - int i; - for (i = 0; i < cachep->num; i++) { - void *objp = index_to_obj(cachep, slabp, i); - (cachep->dtor) (objp, cachep, 0); - } - } } #endif @@ -2002,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, size_t left_over = 0; int gfporder; - for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { + for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { unsigned int num; size_t remainder; @@ -2052,7 +2037,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, return left_over; } -static int setup_cpu_cache(struct kmem_cache *cachep) +static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) { if (g_cpucache_up == FULL) return enable_cpucache(cachep); @@ -2113,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep) * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects. + * @dtor: A destructor for the objects (not implemented anymore). * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. @@ -2148,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { + size > KMALLOC_MAX_SIZE || dtor) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); @@ -2202,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & SLAB_DESTROY_BY_RCU) BUG_ON(flags & SLAB_POISON); #endif - if (flags & SLAB_DESTROY_BY_RCU) - BUG_ON(dtor); - /* * Always checks flags, a caller might be expecting debug support which * isn't available. @@ -2359,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, BUG_ON(!cachep->slabp_cache); } cachep->ctor = ctor; - cachep->dtor = dtor; cachep->name = name; if (setup_cpu_cache(cachep)) { @@ -2614,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp, unsigned long ctor_flags) + struct slab *slabp) { int i; @@ -2638,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep, */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) cachep->ctor(objp + obj_offset(cachep), cachep, - ctor_flags); + 0); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2654,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep, cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + cachep->ctor(objp, cachep, 0); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -2743,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep, struct slab *slabp; size_t offset; gfp_t local_flags; - unsigned long ctor_flags; struct kmem_list3 *l3; /* @@ -2752,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep, */ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); - ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); @@ -2797,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep, slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2824,7 +2803,6 @@ failed: * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking - * - destructor calls, for caches with POISON+dtor */ static void kfree_debugcheck(const void *objp) { @@ -2883,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_POISON && cachep->dtor) { - /* we want to cache poison the object, - * call the destruction callback - */ - cachep->dtor(objp + obj_offset(cachep), cachep, 0); - } #ifdef CONFIG_DEBUG_SLAB_LEAK slab_bufctl(slabp)[objnr] = BUFCTL_FREE; #endif @@ -3088,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); + cachep->ctor(objp, cachep, 0); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", @@ -3742,7 +3714,6 @@ EXPORT_SYMBOL(__kmalloc); /** * krealloc - reallocate memory. The contents will remain unchanged. - * * @p: object to reallocate memory for. * @new_size: how many bytes of memory are required. * @flags: the type of memory to allocate. @@ -4140,7 +4111,6 @@ next: check_irq_on(); mutex_unlock(&cache_chain_mutex); next_reap_node(); - refresh_cpu_vm_stats(smp_processor_id()); out: /* Set up the next iteration */ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));