X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=mm%2Fslab.c;h=f1b644eb39d816b3865400caf8d589d458756a17;hb=af57d238aa2107e1b45d8dacad6e50db938f0567;hp=f055c14202161a7b561599536cfb269240bbd1dc;hpb=63589ed0785ffc715777a54ccb96cdfaea9edbc0;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index f055c14202..f1b644eb39 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t; #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) -/* Max number of objs-per-slab for caches which use off-slab slabs. - * Needed to avoid a possible looping condition in cache_grow(). - */ -static unsigned long offslab_limit; - /* * struct slab * @@ -420,6 +415,7 @@ struct kmem_cache { unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; + unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; @@ -465,6 +461,7 @@ struct kmem_cache { #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) +#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) #define STATS_SET_FREEABLE(x, i) \ do { \ if ((x)->max_freeable < i) \ @@ -484,6 +481,7 @@ struct kmem_cache { #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) +#define STATS_INC_ACOVERFLOW(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) @@ -697,6 +695,14 @@ static enum { FULL } g_cpucache_up; +/* + * used by boot code to determine if it can use slab based allocator + */ +int slab_is_available(void) +{ + return g_cpucache_up == FULL; +} + static DEFINE_PER_CPU(struct work_struct, reap_work); static void free_block(struct kmem_cache *cachep, void **objpp, int len, @@ -976,7 +982,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ - transfer_objects(rl3->shared, ac, ac->limit); + if (rl3->shared) + transfer_objects(rl3->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; @@ -1033,7 +1040,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1344,12 +1351,6 @@ void __init kmem_cache_init(void) NULL, NULL); } - /* Inc off-slab bufctl limit until the ceiling is hit. */ - if (!(OFF_SLAB(sizes->cs_cachep))) { - offslab_limit = sizes->cs_size - sizeof(struct slab); - offslab_limit /= sizeof(kmem_bufctl_t); - } - sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, @@ -1453,7 +1454,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) int i; flags |= cachep->gfpflags; +#ifndef CONFIG_MMU + /* nommu uses slab's for process anonymous memory allocations, so + * requires __GFP_COMP to properly refcount higher order allocations" + */ + page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder); +#else page = alloc_pages_node(nodeid, flags, cachep->gfporder); +#endif if (!page) return NULL; addr = page_address(page); @@ -1761,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index) static size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, size_t align, unsigned long flags) { + unsigned long offslab_limit; size_t left_over = 0; int gfporder; @@ -1772,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, if (!num) continue; - /* More than offslab_limit objects will cause problems */ - if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) - break; + if (flags & CFLGS_OFF_SLAB) { + /* + * Max number of objs-per-slab for caches which + * use off-slab slabs. Needed to avoid a possible + * looping condition in cache_grow(). + */ + offslab_limit = size - sizeof(struct slab); + offslab_limit /= sizeof(kmem_bufctl_t); + + if (num > offslab_limit) + break; + } /* Found something acceptable - save it away */ cachep->num = num; @@ -2181,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep) check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3) { + if (l3 && l3->alien) + drain_alien_cache(cachep, l3->alien); + } + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (l3) drain_array(cachep, l3, l3->shared, 1, node); - if (l3->alien) - drain_alien_cache(cachep, l3->alien); - } } } @@ -2318,13 +2339,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, - int colour_off, gfp_t local_flags) + int colour_off, gfp_t local_flags, + int nodeid) { struct slab *slabp; if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ - slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); + slabp = kmem_cache_alloc_node(cachep->slabp_cache, + local_flags, nodeid); if (!slabp) return NULL; } else { @@ -2334,6 +2357,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, slabp->inuse = 0; slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; + slabp->nodeid = nodeid; return slabp; } @@ -2519,7 +2543,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) goto failed; /* Get slab management. */ - slabp = alloc_slabmgmt(cachep, objp, offset, local_flags); + slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); if (!slabp) goto opps1; @@ -3080,9 +3104,11 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); - if (unlikely(alien->avail == alien->limit)) + if (unlikely(alien->avail == alien->limit)) { + STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, alien, nodeid); + } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { @@ -3760,7 +3786,7 @@ static void print_slabinfo_header(struct seq_file *m) seq_puts(m, " : slabdata "); #if STATS seq_puts(m, " : globalstat " - " "); + " "); seq_puts(m, " : cpustat "); #endif seq_putc(m, '\n'); @@ -3874,11 +3900,12 @@ static int s_show(struct seq_file *m, void *p) unsigned long max_freeable = cachep->max_freeable; unsigned long node_allocs = cachep->node_allocs; unsigned long node_frees = cachep->node_frees; + unsigned long overflows = cachep->node_overflow; seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ - %4lu %4lu %4lu %4lu", allocs, high, grown, + %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, - node_frees); + node_frees, overflows); } /* cpu stats */ {