X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslab.c;h=6d65cf4e4b2e03fb1cbd860a1932ce0cd5cb2621;hb=8dab5241d06bfc9ee141ea78c56cde5070d7460d;hp=944b20581f8c421369828fb2090381ad53aa3969;hpb=2ecf042ef530dd0943e41d84b6344f507941af3e;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index 944b20581f..6d65cf4e4b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -409,9 +409,6 @@ struct kmem_cache { /* constructor func */ void (*ctor) (void *, struct kmem_cache *, unsigned long); - /* de-constructor func */ - void (*dtor) (void *, struct kmem_cache *, unsigned long); - /* 5) cache creation/removal */ const char *name; struct list_head next; @@ -571,21 +568,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #endif -/* - * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp - * order. - */ -#if defined(CONFIG_LARGE_ALLOCS) -#define MAX_OBJ_ORDER 13 /* up to 32Mb */ -#define MAX_GFP_ORDER 13 /* up to 32Mb */ -#elif defined(CONFIG_MMU) -#define MAX_OBJ_ORDER 5 /* 32 pages */ -#define MAX_GFP_ORDER 5 /* 32 pages */ -#else -#define MAX_OBJ_ORDER 8 /* up to 1Mb */ -#define MAX_GFP_ORDER 8 /* up to 1Mb */ -#endif - /* * Do not go above this order unless 0 objects fit into the slab. */ @@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif + WARN_ON_ONCE(size == 0); while (size > csizep->cs_size) csizep++; @@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) slab_error(cachep, "end of a freed object " "was overwritten"); } - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); } } #else static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { - if (cachep->dtor) { - int i; - for (i = 0; i < cachep->num; i++) { - void *objp = index_to_obj(cachep, slabp, i); - (cachep->dtor) (objp, cachep, 0); - } - } } #endif @@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, size_t left_over = 0; int gfporder; - for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { + for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { unsigned int num; size_t remainder; @@ -2063,7 +2037,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, return left_over; } -static int setup_cpu_cache(struct kmem_cache *cachep) +static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) { if (g_cpucache_up == FULL) return enable_cpucache(cachep); @@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep) * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects. + * @dtor: A destructor for the objects (not implemented anymore). * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. @@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { + size > KMALLOC_MAX_SIZE || dtor) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); @@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & SLAB_DESTROY_BY_RCU) BUG_ON(flags & SLAB_POISON); #endif - if (flags & SLAB_DESTROY_BY_RCU) - BUG_ON(dtor); - /* * Always checks flags, a caller might be expecting debug support which * isn't available. @@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, BUG_ON(!cachep->slabp_cache); } cachep->ctor = ctor; - cachep->dtor = dtor; cachep->name = name; if (setup_cpu_cache(cachep)) { @@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp, unsigned long ctor_flags) + struct slab *slabp) { int i; @@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep, */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) cachep->ctor(objp + obj_offset(cachep), cachep, - ctor_flags); + 0); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep, cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + cachep->ctor(objp, cachep, 0); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep, struct slab *slabp; size_t offset; gfp_t local_flags; - unsigned long ctor_flags; struct kmem_list3 *l3; /* @@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep, */ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); - ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); @@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep, slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2835,7 +2803,6 @@ failed: * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking - * - destructor calls, for caches with POISON+dtor */ static void kfree_debugcheck(const void *objp) { @@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_POISON && cachep->dtor) { - /* we want to cache poison the object, - * call the destruction callback - */ - cachep->dtor(objp + obj_offset(cachep), cachep, 0); - } #ifdef CONFIG_DEBUG_SLAB_LEAK slab_bufctl(slabp)[objnr] = BUFCTL_FREE; #endif @@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); + cachep->ctor(objp, cachep, 0); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", @@ -3578,7 +3539,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - if (use_alien_caches && cache_free_alien(cachep, objp)) + if (cache_free_alien(cachep, objp)) return; if (likely(ac->avail < ac->limit)) {