X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslub.c;h=fa28b16236442d80408c792f0140c26015846eca;hb=06b32f3ab6df4c7489729f94bdc7093c72681d4b;hp=e7ad123bb6a745be4ac1a62dd6f23152a3ff0536;hpb=a35afb830f8d71ec211531aeb9a621b09a2efb39;p=linux-2.6 diff --git a/mm/slub.c b/mm/slub.c index e7ad123bb6..fa28b16236 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -742,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } +static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) +{ + if (s->flags & SLAB_TRACE) { + printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", + s->name, + alloc ? "alloc" : "free", + object, page->inuse, + page->freelist); + + if (!alloc) + print_section("Object", (void *)object, s->objsize); + + dump_stack(); + } +} + /* * Tracking of fully allocated slabs for debugging purposes. */ @@ -766,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page) spin_unlock(&n->list_lock); } -static int alloc_object_checks(struct kmem_cache *s, struct page *page, - void *object) +static void setup_object_debug(struct kmem_cache *s, struct page *page, + void *object) +{ + if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) + return; + + init_object(s, object, 0); + init_tracking(s, object); +} + +static int alloc_debug_processing(struct kmem_cache *s, struct page *page, + void *object, void *addr) { if (!check_slab(s, page)) goto bad; @@ -782,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page, goto bad; } - if (!object) - return 1; - - if (!check_object(s, page, object, 0)) + if (object && !check_object(s, page, object, 0)) goto bad; + /* Success perform special debug activities for allocs */ + if (s->flags & SLAB_STORE_USER) + set_track(s, object, TRACK_ALLOC, addr); + trace(s, page, object, 1); + init_object(s, object, 1); return 1; + bad: if (PageSlab(page)) { /* @@ -806,8 +835,8 @@ bad: return 0; } -static int free_object_checks(struct kmem_cache *s, struct page *page, - void *object) +static int free_debug_processing(struct kmem_cache *s, struct page *page, + void *object, void *addr) { if (!check_slab(s, page)) goto fail; @@ -841,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page, "to slab %s", object, page->slab->name); goto fail; } + + /* Special debug activities for freeing objects */ + if (!SlabFrozen(page) && !page->freelist) + remove_full(s, page); + if (s->flags & SLAB_STORE_USER) + set_track(s, object, TRACK_FREE, addr); + trace(s, page, object, 0); + init_object(s, object, 0); return 1; + fail: printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", s->name, page, object); return 0; } -static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) -{ - if (s->flags & SLAB_TRACE) { - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", - s->name, - alloc ? "alloc" : "free", - object, page->inuse, - page->freelist); - - if (!alloc) - print_section("Object", (void *)object, s->objsize); - - dump_stack(); - } -} - static int __init setup_slub_debug(char *str) { if (!str || *str != '=') @@ -917,7 +939,7 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) * Debugging or ctor may create a need to move the free * pointer. Fail if this happens. */ - if (s->size >= 65535 * sizeof(void *)) { + if (s->objsize >= 65535 * sizeof(void *)) { BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); BUG_ON(s->ctor); @@ -932,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) s->flags |= slub_debug; } #else +static inline void setup_object_debug(struct kmem_cache *s, + struct page *page, void *object) {} -static inline int alloc_object_checks(struct kmem_cache *s, - struct page *page, void *object) { return 0; } +static inline int alloc_debug_processing(struct kmem_cache *s, + struct page *page, void *object, void *addr) { return 0; } -static inline int free_object_checks(struct kmem_cache *s, - struct page *page, void *object) { return 0; } +static inline int free_debug_processing(struct kmem_cache *s, + struct page *page, void *object, void *addr) { return 0; } -static inline void add_full(struct kmem_cache_node *n, struct page *page) {} -static inline void remove_full(struct kmem_cache *s, struct page *page) {} -static inline void trace(struct kmem_cache *s, struct page *page, - void *object, int alloc) {} -static inline void init_object(struct kmem_cache *s, - void *object, int active) {} -static inline void init_tracking(struct kmem_cache *s, void *object) {} static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } static inline int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { return 1; } -static inline void set_track(struct kmem_cache *s, void *object, - enum track_item alloc, void *addr) {} +static inline void add_full(struct kmem_cache_node *n, struct page *page) {} static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} #define slub_debug 0 #endif @@ -988,11 +1004,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) static void setup_object(struct kmem_cache *s, struct page *page, void *object) { - if (SlabDebug(page)) { - init_object(s, object, 0); - init_tracking(s, object); - } - + setup_object_debug(s, page, object); if (unlikely(s->ctor)) s->ctor(object, s, 0); } @@ -1449,12 +1461,8 @@ new_slab: return NULL; debug: object = page->freelist; - if (!alloc_object_checks(s, page, object)) + if (!alloc_debug_processing(s, page, object, addr)) goto another_slab; - if (s->flags & SLAB_STORE_USER) - set_track(s, object, TRACK_ALLOC, addr); - trace(s, page, object, 1); - init_object(s, object, 1); page->inuse++; page->freelist = object[page->offset]; @@ -1561,14 +1569,8 @@ slab_empty: return; debug: - if (!free_object_checks(s, page, x)) + if (!free_debug_processing(s, page, x, addr)) goto out_unlock; - if (!SlabFrozen(page) && !page->freelist) - remove_full(s, page); - if (s->flags & SLAB_STORE_USER) - set_track(s, x, TRACK_FREE, addr); - trace(s, page, object, 0); - init_object(s, object, 0); goto checks_ok; } @@ -1805,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag page->freelist = get_freepointer(kmalloc_caches, n); page->inuse++; kmalloc_caches->node[node] = n; - init_object(kmalloc_caches, n, 1); + setup_object_debug(kmalloc_caches, page, n); init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); add_partial(n, page); @@ -1915,7 +1917,6 @@ static int calculate_sizes(struct kmem_cache *s) */ s->inuse = size; -#ifdef CONFIG_SLUB_DEBUG if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || s->ctor)) { /* @@ -1930,6 +1931,7 @@ static int calculate_sizes(struct kmem_cache *s) size += sizeof(void *); } +#ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) /* * Need to store information about allocs and frees after @@ -2239,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (s) return slab_alloc(s, flags, -1, __builtin_return_address(0)); - return NULL; + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc); @@ -2250,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (s) return slab_alloc(s, flags, node, __builtin_return_address(0)); - return NULL; + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc_node); #endif size_t ksize(const void *object) { - struct page *page = get_object_page(object); + struct page *page; struct kmem_cache *s; + if (object == ZERO_SIZE_PTR) + return 0; + + page = get_object_page(object); BUG_ON(!page); s = page->slab; BUG_ON(!s); @@ -2291,7 +2297,13 @@ void kfree(const void *x) struct kmem_cache *s; struct page *page; - if (!x) + /* + * This has to be an unsigned comparison. According to Linus + * some gcc version treat a pointer as a signed entity. Then + * this comparison would be true for all "negative" pointers + * (which would cover the whole upper half of the address space). + */ + if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) return; page = virt_to_head_page(x); @@ -2396,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) void *ret; size_t ks; - if (unlikely(!p)) + if (unlikely(!p || p == ZERO_SIZE_PTR)) return kmalloc(new_size, flags); if (unlikely(!new_size)) { kfree(p); - return NULL; + return ZERO_SIZE_PTR; } ks = ksize(p); @@ -2424,6 +2436,7 @@ EXPORT_SYMBOL(krealloc); void __init kmem_cache_init(void) { int i; + int caches = 0; #ifdef CONFIG_NUMA /* @@ -2433,20 +2446,30 @@ void __init kmem_cache_init(void) */ create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); + kmalloc_caches[0].refcount = -1; + caches++; #endif /* Able to allocate the per node structures */ slab_state = PARTIAL; /* Caches that are not of the two-to-the-power-of size */ - create_kmalloc_cache(&kmalloc_caches[1], + if (KMALLOC_MIN_SIZE <= 64) { + create_kmalloc_cache(&kmalloc_caches[1], "kmalloc-96", 96, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[2], + caches++; + } + if (KMALLOC_MIN_SIZE <= 128) { + create_kmalloc_cache(&kmalloc_caches[2], "kmalloc-192", 192, GFP_KERNEL); + caches++; + } - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); + caches++; + } slab_state = UP; @@ -2463,8 +2486,8 @@ void __init kmem_cache_init(void) nr_cpu_ids * sizeof(struct page *); printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," - " Processors=%d, Nodes=%d\n", - KMALLOC_SHIFT_HIGH, cache_line_size(), + " CPUs=%d, Nodes=%d\n", + caches, cache_line_size(), slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); } @@ -2480,6 +2503,12 @@ static int slab_unmergeable(struct kmem_cache *s) if (s->ctor) return 1; + /* + * We may have set a slab to be unmergeable during bootstrap. + */ + if (s->refcount < 0) + return 1; + return 0; } @@ -2598,6 +2627,19 @@ static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) up_read(&slub_lock); } +/* + * Version of __flush_cpu_slab for the case that interrupts + * are enabled. + */ +static void cpu_slab_flush(struct kmem_cache *s, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + __flush_cpu_slab(s, cpu); + local_irq_restore(flags); +} + /* * Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. @@ -2612,7 +2654,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - for_all_slabs(__flush_cpu_slab, cpu); + for_all_slabs(cpu_slab_flush, cpu); break; default: break; @@ -2630,7 +2672,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) struct kmem_cache *s = get_slab(size, gfpflags); if (!s) - return NULL; + return ZERO_SIZE_PTR; return slab_alloc(s, gfpflags, -1, caller); } @@ -2641,7 +2683,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s = get_slab(size, gfpflags); if (!s) - return NULL; + return ZERO_SIZE_PTR; return slab_alloc(s, gfpflags, node, caller); } @@ -2835,7 +2877,7 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max) order = get_order(sizeof(struct location) * max); - l = (void *)__get_free_pages(GFP_KERNEL, order); + l = (void *)__get_free_pages(GFP_ATOMIC, order); if (!l) return 0;