X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslub.c;h=07492a83b46e12018a9eb2738a1d2d073df1d67e;hb=b345970905e34c1b632fe4d80e2af14c7de99b45;hp=beac34a5e4fdee52a9d1019c40f669103bb6c881;hpb=be7b3fbcef34452127bed93632b8e788f685d70e;p=linux-2.6 diff --git a/mm/slub.c b/mm/slub.c index beac34a5e4..07492a83b4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -66,11 +66,11 @@ * SLUB assigns one slab for allocation to each processor. * Allocations only occur from these slabs called cpu slabs. * - * Slabs with free elements are kept on a partial list. - * There is no list for full slabs. If an object in a full slab is + * Slabs with free elements are kept on a partial list and during regular + * operations no list for full slabs is used. If an object in a full slab is * freed then the slab will show up again on the partial lists. - * Otherwise there is no need to track full slabs unless we have to - * track full slabs for debugging purposes. + * We track full slabs for debugging purposes though because otherwise we + * cannot scan all objects. * * Slabs are freed when they become empty. Teardown and setup is * minimal so we rely on the page allocators per cpu caches for @@ -92,8 +92,8 @@ * * - The per cpu array is updated for each new slab and and is a remote * cacheline for most nodes. This could become a bouncing cacheline given - * enough frequent updates. There are 16 pointers in a cacheline.so at - * max 16 cpus could compete. Likely okay. + * enough frequent updates. There are 16 pointers in a cacheline, so at + * max 16 cpus could compete for the cacheline which may be okay. * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * @@ -137,6 +137,7 @@ #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) + /* * Set of flags that will prevent slab merging */ @@ -171,7 +172,7 @@ static struct notifier_block slab_notifier; static enum { DOWN, /* No slab functionality available */ PARTIAL, /* kmem_cache_open() works but kmalloc does not */ - UP, /* Everything works */ + UP, /* Everything works but does not show up in sysfs */ SYSFS /* Sysfs up */ } slab_state = DOWN; @@ -207,6 +208,38 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) #endif } +/* + * Slow version of get and set free pointer. + * + * This version requires touching the cache lines of kmem_cache which + * we avoid to do in the fast alloc free paths. There we obtain the offset + * from the page struct. + */ +static inline void *get_freepointer(struct kmem_cache *s, void *object) +{ + return *(void **)(object + s->offset); +} + +static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) +{ + *(void **)(object + s->offset) = fp; +} + +/* Loop over all objects in a slab */ +#define for_each_object(__p, __s, __addr) \ + for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\ + __p += (__s)->size) + +/* Scan freelist */ +#define for_each_free_object(__p, __s, __free) \ + for (__p = (__free); __p; __p = get_freepointer((__s), __p)) + +/* Determine object index from a given position */ +static inline int slab_index(void *p, struct kmem_cache *s, void *addr) +{ + return (p - addr) / s->size; +} + /* * Object debugging */ @@ -242,23 +275,6 @@ static void print_section(char *text, u8 *addr, unsigned int length) } } -/* - * Slow version of get and set free pointer. - * - * This requires touching the cache lines of kmem_cache. - * The offset can also be obtained from the page. In that - * case it is in the cacheline that we already need to touch. - */ -static void *get_freepointer(struct kmem_cache *s, void *object) -{ - return *(void **)(object + s->offset); -} - -static void set_freepointer(struct kmem_cache *s, void *object, void *fp) -{ - *(void **)(object + s->offset) = fp; -} - /* * Tracking user of a slab. */ @@ -405,9 +421,8 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) return 1; } - -static int check_valid_pointer(struct kmem_cache *s, struct page *page, - void *object) +static inline int check_valid_pointer(struct kmem_cache *s, + struct page *page, const void *object) { void *base; @@ -430,26 +445,34 @@ static int check_valid_pointer(struct kmem_cache *s, struct page *page, * Bytes of the object to be managed. * If the freepointer may overlay the object then the free * pointer is the first word of the object. + * * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) * * object + s->objsize * Padding to reach word boundary. This is also used for Redzoning. - * Padding is extended to word size if Redzoning is enabled - * and objsize == inuse. + * Padding is extended by another word if Redzoning is enabled and + * objsize == inuse. + * * We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 0xcc (RED_ACTIVE) for objects in use. * * object + s->inuse + * Meta data starts here. + * * A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER - * C. Padding to reach required alignment boundary - * Padding is done using 0x5a (POISON_INUSE) + * C. Padding to reach required alignment boundary or at mininum + * one word if debuggin is on to be able to detect writes + * before the word boundary. + * + * Padding is done using 0x5a (POISON_INUSE) * * object + s->size + * Nothing is used beyond s->size. * - * If slabcaches are merged then the objsize and inuse boundaries are to - * be ignored. And therefore no slab options that rely on these boundaries + * If slabcaches are merged then the objsize and inuse boundaries are mostly + * ignored. And therefore no slab options that rely on these boundaries * may be used with merged slabcaches. */ @@ -575,8 +598,7 @@ static int check_object(struct kmem_cache *s, struct page *page, /* * No choice but to zap it and thus loose the remainder * of the free objects in this slab. May cause - * another error because the object count maybe - * wrong now. + * another error because the object count is now wrong. */ set_freepointer(s, p, NULL); return 0; @@ -616,9 +638,8 @@ static int check_slab(struct kmem_cache *s, struct page *page) } /* - * Determine if a certain object on a page is on the freelist and - * therefore free. Must hold the slab lock for cpu slabs to - * guarantee that the chains are consistent. + * Determine if a certain object on a page is on the freelist. Must hold the + * slab lock to guarantee that the chains are in a consistent state. */ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) { @@ -664,7 +685,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) } /* - * Tracking of fully allocated slabs for debugging + * Tracking of fully allocated slabs for debugging purposes. */ static void add_full(struct kmem_cache_node *n, struct page *page) { @@ -715,7 +736,7 @@ bad: /* * If this is a slab page then lets do the best we can * to avoid issues in the future. Marking all objects - * as used avoids touching the remainder. + * as used avoids touching the remaining objects. */ printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", s->name, page); @@ -846,7 +867,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) memset(start, POISON_INUSE, PAGE_SIZE << s->order); last = start; - for (p = start + s->size; p < end; p += s->size) { + for_each_object(p, s, start) { setup_object(s, page, last); set_freepointer(s, last, p); last = p; @@ -867,12 +888,10 @@ static void __free_slab(struct kmem_cache *s, struct page *page) int pages = 1 << s->order; if (unlikely(PageError(page) || s->dtor)) { - void *start = page_address(page); - void *end = start + (pages << PAGE_SHIFT); void *p; slab_pad_check(s, page); - for (p = start; p <= end - s->size; p += s->size) { + for_each_object(p, s, page_address(page)) { if (s->dtor) s->dtor(p, s, 0); check_object(s, page, p, 0); @@ -971,9 +990,9 @@ static void remove_partial(struct kmem_cache *s, } /* - * Lock page and remove it from the partial list + * Lock slab and remove from the partial list. * - * Must hold list_lock + * Must hold list_lock. */ static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) { @@ -986,7 +1005,7 @@ static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) } /* - * Try to get a partial slab from a specific node + * Try to allocate a partial slab from a specific node. */ static struct page *get_partial_node(struct kmem_cache_node *n) { @@ -995,7 +1014,8 @@ static struct page *get_partial_node(struct kmem_cache_node *n) /* * Racy check. If we mistakenly see no partial slabs then we * just allocate an empty slab. If we mistakenly try to get a - * partial slab then get_partials() will return NULL. + * partial slab and there is none available then get_partials() + * will return NULL. */ if (!n || !n->nr_partial) return NULL; @@ -1011,8 +1031,7 @@ out: } /* - * Get a page from somewhere. Search in increasing NUMA - * distances. + * Get a page from somewhere. Search in increasing NUMA distances. */ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) { @@ -1022,24 +1041,22 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) struct page *page; /* - * The defrag ratio allows to configure the tradeoffs between - * inter node defragmentation and node local allocations. - * A lower defrag_ratio increases the tendency to do local - * allocations instead of scanning throught the partial - * lists on other nodes. - * - * If defrag_ratio is set to 0 then kmalloc() always - * returns node local objects. If its higher then kmalloc() - * may return off node objects in order to avoid fragmentation. + * The defrag ratio allows a configuration of the tradeoffs between + * inter node defragmentation and node local allocations. A lower + * defrag_ratio increases the tendency to do local allocations + * instead of attempting to obtain partial slabs from other nodes. * - * A higher ratio means slabs may be taken from other nodes - * thus reducing the number of partial slabs on those nodes. + * If the defrag_ratio is set to 0 then kmalloc() always + * returns node local objects. If the ratio is higher then kmalloc() + * may return off node objects because partial slabs are obtained + * from other nodes and filled up. * * If /sys/slab/xx/defrag_ratio is set to 100 (which makes - * defrag_ratio = 1000) then every (well almost) allocation - * will first attempt to defrag slab caches on other nodes. This - * means scanning over all nodes to look for partial slabs which - * may be a bit expensive to do on every slab allocation. + * defrag_ratio = 1000) then every (well almost) allocation will + * first attempt to defrag slab caches on other nodes. This means + * scanning over all nodes to look for partial slabs which may be + * expensive if we do it every time we are trying to find a slab + * with available objects. */ if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) return NULL; @@ -1099,11 +1116,12 @@ static void putback_slab(struct kmem_cache *s, struct page *page) } else { if (n->nr_partial < MIN_PARTIAL) { /* - * Adding an empty page to the partial slabs in order - * to avoid page allocator overhead. This page needs to - * come after all the others that are not fully empty - * in order to make sure that we do maximum - * defragmentation. + * Adding an empty slab to the partial slabs in order + * to avoid page allocator overhead. This slab needs + * to come after the other slabs with objects in + * order to fill them up. That way the size of the + * partial list stays small. kmem_cache_shrink can + * reclaim empty slabs from the partial list. */ add_partial_tail(n, page); slab_unlock(page); @@ -1171,7 +1189,7 @@ static void flush_all(struct kmem_cache *s) * 1. The page struct * 2. The first cacheline of the object to be allocated. * - * The only cache lines that are read (apart from code) is the + * The only other cache lines that are read (apart from code) is the * per cpu array in the kmem_cache struct. * * Fastpath is not possible if we need to get a new slab or have @@ -1225,9 +1243,11 @@ have_slab: cpu = smp_processor_id(); if (s->cpu_slab[cpu]) { /* - * Someone else populated the cpu_slab while we enabled - * interrupts, or we have got scheduled on another cpu. - * The page may not be on the requested node. + * Someone else populated the cpu_slab while we + * enabled interrupts, or we have gotten scheduled + * on another cpu. The page may not be on the + * requested node even if __GFP_THISNODE was + * specified. So we need to recheck. */ if (node == -1 || page_to_nid(s->cpu_slab[cpu]) == node) { @@ -1240,7 +1260,7 @@ have_slab: slab_lock(page); goto redo; } - /* Dump the current slab */ + /* New slab does not fit our expectations */ flush_slab(s, s->cpu_slab[cpu], cpu); } slab_lock(page); @@ -1281,7 +1301,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * The fastpath only writes the cacheline of the page struct and the first * cacheline of the object. * - * No special cachelines need to be read + * We read the cpu_slab cacheline to check if the slab is the per cpu + * slab for this processor. */ static void slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) @@ -1326,7 +1347,7 @@ out_unlock: slab_empty: if (prior) /* - * Slab on the partial list. + * Slab still on the partial list. */ remove_partial(s, page); @@ -1375,22 +1396,16 @@ static struct page *get_object_page(const void *x) } /* - * kmem_cache_open produces objects aligned at "size" and the first object - * is placed at offset 0 in the slab (We have no metainformation on the - * slab, all slabs are in essence "off slab"). - * - * In order to get the desired alignment one just needs to align the - * size. + * Object placement in a slab is made very easy because we always start at + * offset 0. If we tune the size of the object to the alignment then we can + * get the required alignment by putting one properly sized object after + * another. * * Notice that the allocation order determines the sizes of the per cpu * caches. Each processor has always one slab available for allocations. * Increasing the allocation order reduces the number of times that slabs - * must be moved on and off the partial lists and therefore may influence + * must be moved on and off the partial lists and is therefore a factor in * locking overhead. - * - * The offset is used to relocate the free list link in each object. It is - * therefore possible to move the free list link behind the object. This - * is necessary for RCU to work properly and also useful for debugging. */ /* @@ -1401,15 +1416,11 @@ static struct page *get_object_page(const void *x) */ static int slub_min_order; static int slub_max_order = DEFAULT_MAX_ORDER; - -/* - * Minimum number of objects per slab. This is necessary in order to - * reduce locking overhead. Similar to the queue size in SLAB. - */ static int slub_min_objects = DEFAULT_MIN_OBJECTS; /* * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) */ static int slub_nomerge; @@ -1423,23 +1434,27 @@ static char *slub_debug_slabs; /* * Calculate the order of allocation given an slab object size. * - * The order of allocation has significant impact on other elements - * of the system. Generally order 0 allocations should be preferred - * since they do not cause fragmentation in the page allocator. Larger - * objects may have problems with order 0 because there may be too much - * space left unused in a slab. We go to a higher order if more than 1/8th - * of the slab would be wasted. + * The order of allocation has significant impact on performance and other + * system components. Generally order 0 allocations should be preferred since + * order 0 does not cause fragmentation in the page allocator. Larger objects + * be problematic to put into order 0 slabs because there may be too much + * unused space left. We go to a higher order if more than 1/8th of the slab + * would be wasted. + * + * In order to reach satisfactory performance we must ensure that a minimum + * number of objects is in one slab. Otherwise we may generate too much + * activity on the partial lists which requires taking the list_lock. This is + * less a concern for large slabs though which are rarely used. * - * In order to reach satisfactory performance we must ensure that - * a minimum number of objects is in one slab. Otherwise we may - * generate too much activity on the partial lists. This is less a - * concern for large slabs though. slub_max_order specifies the order - * where we begin to stop considering the number of objects in a slab. + * slub_max_order specifies the order where we begin to stop considering the + * number of objects in a slab as critical. If we reach slub_max_order then + * we try to keep the page order as low as possible. So we accept more waste + * of space in favor of a small page order. * - * Higher order allocations also allow the placement of more objects - * in a slab and thereby reduce object handling overhead. If the user - * has requested a higher mininum order then we start with that one - * instead of zero. + * Higher order allocations also allow the placement of more objects in a + * slab and thereby reduce object handling overhead. If the user has + * requested a higher mininum order then we start with that one instead of + * the smallest order which will fit the object. */ static int calculate_order(int size) { @@ -1459,18 +1474,18 @@ static int calculate_order(int size) rem = slab_size % size; - if (rem <= (PAGE_SIZE << order) / 8) + if (rem <= slab_size / 8) break; } if (order >= MAX_ORDER) return -E2BIG; + return order; } /* - * Function to figure out which alignment to use from the - * various ways of specifying it. + * Figure out what the alignment of the objects will be. */ static unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) @@ -1625,18 +1640,16 @@ static int calculate_sizes(struct kmem_cache *s) size = ALIGN(size, sizeof(void *)); /* - * If we are redzoning then check if there is some space between the + * If we are Redzoning then check if there is some space between the * end of the object and the free pointer. If not then add an - * additional word, so that we can establish a redzone between - * the object and the freepointer to be able to check for overwrites. + * additional word to have some bytes to store Redzone information. */ if ((flags & SLAB_RED_ZONE) && size == s->objsize) size += sizeof(void *); /* - * With that we have determined how much of the slab is in actual - * use by the object. This is the potential offset to the free - * pointer. + * With that we have determined the number of bytes in actual use + * by the object. This is the potential offset to the free pointer. */ s->inuse = size; @@ -1670,6 +1683,7 @@ static int calculate_sizes(struct kmem_cache *s) * of the object. */ size += sizeof(void *); + /* * Determine the alignment based on various parameters that the * user specified and the dynamic determination of cache line size @@ -1705,23 +1719,6 @@ static int calculate_sizes(struct kmem_cache *s) } -static int __init finish_bootstrap(void) -{ - struct list_head *h; - int err; - - slab_state = SYSFS; - - list_for_each(h, &slab_caches) { - struct kmem_cache *s = - container_of(h, struct kmem_cache, list); - - err = sysfs_slab_add(s); - BUG_ON(err); - } - return 0; -} - static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, @@ -1788,7 +1785,6 @@ EXPORT_SYMBOL(kmem_cache_open); int kmem_ptr_validate(struct kmem_cache *s, const void *object) { struct page * page; - void *addr; page = get_object_page(object); @@ -1796,13 +1792,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) /* No slab or wrong slab */ return 0; - addr = page_address(page); - if (object < addr || object >= addr + s->objects * s->size) - /* Out of bounds */ - return 0; - - if ((object - addr) % s->size) - /* Improperly aligned */ + if (!check_valid_pointer(s, page, object)) return 0; /* @@ -1831,7 +1821,8 @@ const char *kmem_cache_name(struct kmem_cache *s) EXPORT_SYMBOL(kmem_cache_name); /* - * Attempt to free all slabs on a node + * Attempt to free all slabs on a node. Return the number of slabs we + * were unable to free. */ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, struct list_head *list) @@ -1852,7 +1843,7 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, } /* - * Release all resources used by slab cache + * Release all resources used by a slab cache. */ static int kmem_cache_close(struct kmem_cache *s) { @@ -2113,13 +2104,14 @@ void kfree(const void *x) EXPORT_SYMBOL(kfree); /* - * kmem_cache_shrink removes empty slabs from the partial lists - * and then sorts the partially allocated slabs by the number - * of items in use. The slabs with the most items in use - * come first. New allocations will remove these from the - * partial list because they are full. The slabs with the - * least items are placed last. If it happens that the objects - * are freed then the page can be returned to the page allocator. + * kmem_cache_shrink removes empty slabs from the partial lists and sorts + * the remaining slabs by the number of items in use. The slabs with the + * most items in use come first. New allocations will then fill those up + * and thus they can be removed from the partial lists. + * + * The slabs with the least items are placed last. This results in them + * being allocated from last increasing the chance that the last objects + * are freed in them. */ int kmem_cache_shrink(struct kmem_cache *s) { @@ -2148,12 +2140,10 @@ int kmem_cache_shrink(struct kmem_cache *s) spin_lock_irqsave(&n->list_lock, flags); /* - * Build lists indexed by the items in use in - * each slab or free slabs if empty. + * Build lists indexed by the items in use in each slab. * - * Note that concurrent frees may occur while - * we hold the list_lock. page->inuse here is - * the upper limit. + * Note that concurrent frees may occur while we hold the + * list_lock. page->inuse here is the upper limit. */ list_for_each_entry_safe(page, t, &n->partial, lru) { if (!page->inuse && slab_trylock(page)) { @@ -2177,8 +2167,8 @@ int kmem_cache_shrink(struct kmem_cache *s) goto out; /* - * Rebuild the partial list with the slabs filled up - * most first and the least used slabs at the end. + * Rebuild the partial list with the slabs filled up most + * first and the least used slabs at the end. */ for (i = s->objects - 1; i >= 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); @@ -2206,9 +2196,8 @@ EXPORT_SYMBOL(kmem_cache_shrink); */ void *krealloc(const void *p, size_t new_size, gfp_t flags) { - struct kmem_cache *new_cache; void *ret; - struct page *page; + size_t ks; if (unlikely(!p)) return kmalloc(new_size, flags); @@ -2218,19 +2207,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return NULL; } - page = virt_to_head_page(p); - - new_cache = get_slab(new_size, flags); - - /* - * If new size fits in the current cache, bail out. - */ - if (likely(page->slab == new_cache)) + ks = ksize(p); + if (ks >= new_size) return (void *)p; ret = kmalloc(new_size, flags); if (ret) { - memcpy(ret, p, min(new_size, ksize(p))); + memcpy(ret, p, min(new_size, ks)); kfree(p); } return ret; @@ -2248,7 +2231,7 @@ void __init kmem_cache_init(void) #ifdef CONFIG_NUMA /* * Must first have the slab cache available for the allocations of the - * struct kmalloc_cache_node's. There is special bootstrap code in + * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", @@ -2420,8 +2403,8 @@ static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) } /* - * Use the cpu notifier to insure that the slab are flushed - * when necessary. + * Use the cpu notifier to insure that the cpu slabs are flushed when + * necessary. */ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -2529,68 +2512,6 @@ static int __init cpucache_init(void) __initcall(cpucache_init); #endif -#ifdef SLUB_RESILIENCY_TEST -static unsigned long validate_slab_cache(struct kmem_cache *s); - -static void resiliency_test(void) -{ - u8 *p; - - printk(KERN_ERR "SLUB resiliency testing\n"); - printk(KERN_ERR "-----------------------\n"); - printk(KERN_ERR "A. Corruption after allocation\n"); - - p = kzalloc(16, GFP_KERNEL); - p[16] = 0x12; - printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" - " 0x12->0x%p\n\n", p + 16); - - validate_slab_cache(kmalloc_caches + 4); - - /* Hmmm... The next two are dangerous */ - p = kzalloc(32, GFP_KERNEL); - p[32 + sizeof(void *)] = 0x34; - printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" - " 0x34 -> -0x%p\n", p); - printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); - - validate_slab_cache(kmalloc_caches + 5); - p = kzalloc(64, GFP_KERNEL); - p += 64 + (get_cycles() & 0xff) * sizeof(void *); - *p = 0x56; - printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", - p); - printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); - validate_slab_cache(kmalloc_caches + 6); - - printk(KERN_ERR "\nB. Corruption after free\n"); - p = kzalloc(128, GFP_KERNEL); - kfree(p); - *p = 0x78; - printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches + 7); - - p = kzalloc(256, GFP_KERNEL); - kfree(p); - p[50] = 0x9a; - printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches + 8); - - p = kzalloc(512, GFP_KERNEL); - kfree(p); - p[512] = 0xab; - printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches + 9); -} -#else -static void resiliency_test(void) {}; -#endif - -/* - * These are not as efficient as kmalloc for the non debug case. - * We do not have the page struct available so we have to touch one - * cacheline in struct kmem_cache to check slab flags. - */ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); @@ -2618,7 +2539,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page) { void *p; void *addr = page_address(page); - unsigned long map[BITS_TO_LONGS(s->objects)]; + DECLARE_BITMAP(map, s->objects); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) @@ -2627,14 +2548,14 @@ static int validate_slab(struct kmem_cache *s, struct page *page) /* Now we know that a valid freelist exists */ bitmap_zero(map, s->objects); - for(p = page->freelist; p; p = get_freepointer(s, p)) { - set_bit((p - addr) / s->size, map); + for_each_free_object(p, s, page->freelist) { + set_bit(slab_index(p, s, addr), map); if (!check_object(s, page, p, 0)) return 0; } - for(p = addr; p < addr + s->objects * s->size; p += s->size) - if (!test_bit((p - addr) / s->size, map)) + for_each_object(p, s, addr) + if (!test_bit(slab_index(p, s, addr), map)) if (!check_object(s, page, p, 1)) return 0; return 1; @@ -2707,8 +2628,63 @@ static unsigned long validate_slab_cache(struct kmem_cache *s) return count; } +#ifdef SLUB_RESILIENCY_TEST +static void resiliency_test(void) +{ + u8 *p; + + printk(KERN_ERR "SLUB resiliency testing\n"); + printk(KERN_ERR "-----------------------\n"); + printk(KERN_ERR "A. Corruption after allocation\n"); + + p = kzalloc(16, GFP_KERNEL); + p[16] = 0x12; + printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" + " 0x12->0x%p\n\n", p + 16); + + validate_slab_cache(kmalloc_caches + 4); + + /* Hmmm... The next two are dangerous */ + p = kzalloc(32, GFP_KERNEL); + p[32 + sizeof(void *)] = 0x34; + printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" + " 0x34 -> -0x%p\n", p); + printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); + + validate_slab_cache(kmalloc_caches + 5); + p = kzalloc(64, GFP_KERNEL); + p += 64 + (get_cycles() & 0xff) * sizeof(void *); + *p = 0x56; + printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", + p); + printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); + validate_slab_cache(kmalloc_caches + 6); + + printk(KERN_ERR "\nB. Corruption after free\n"); + p = kzalloc(128, GFP_KERNEL); + kfree(p); + *p = 0x78; + printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 7); + + p = kzalloc(256, GFP_KERNEL); + kfree(p); + p[50] = 0x9a; + printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 8); + + p = kzalloc(512, GFP_KERNEL); + kfree(p); + p[512] = 0xab; + printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); + validate_slab_cache(kmalloc_caches + 9); +} +#else +static void resiliency_test(void) {}; +#endif + /* - * Generate lists of locations where slabcache objects are allocated + * Generate lists of code addresses where slabcache objects are allocated * and freed. */ @@ -2787,7 +2763,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, } /* - * Not found. Insert new tracking element + * Not found. Insert new tracking element. */ if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max)) return 0; @@ -2806,15 +2782,15 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc) { void *addr = page_address(page); - unsigned long map[BITS_TO_LONGS(s->objects)]; + DECLARE_BITMAP(map, s->objects); void *p; bitmap_zero(map, s->objects); - for (p = page->freelist; p; p = get_freepointer(s, p)) - set_bit((p - addr) / s->size, map); + for_each_free_object(p, s, page->freelist) + set_bit(slab_index(p, s, addr), map); - for (p = addr; p < addr + s->objects * s->size; p += s->size) - if (!test_bit((p - addr) / s->size, map)) { + for_each_object(p, s, addr) + if (!test_bit(slab_index(p, s, addr), map)) { void *addr = get_track(s, p, alloc)->addr; add_location(t, s, addr); @@ -3496,6 +3472,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) static int __init slab_sysfs_init(void) { + struct list_head *h; int err; err = subsystem_register(&slab_subsys); @@ -3504,7 +3481,15 @@ static int __init slab_sysfs_init(void) return -ENOSYS; } - finish_bootstrap(); + slab_state = SYSFS; + + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + err = sysfs_slab_add(s); + BUG_ON(err); + } while (alias_list) { struct saved_alias *al = alias_list; @@ -3520,6 +3505,4 @@ static int __init slab_sysfs_init(void) } __initcall(slab_sysfs_init); -#else -__initcall(finish_bootstrap); #endif