unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
+ unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
+#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
#define STATS_SET_FREEABLE(x, i) \
do { \
if ((x)->max_freeable < i) \
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_INC_NODEFREES(x) do { } while (0)
+#define STATS_INC_ACOVERFLOW(x) do { } while (0)
#define STATS_SET_FREEABLE(x, i) do { } while (0)
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
if (cache_cache.num)
break;
}
- if (!cache_cache.num)
- BUG();
+ BUG_ON(!cache_cache.num);
cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
int i;
flags |= cachep->gfpflags;
+#ifndef CONFIG_MMU
+ /* nommu uses slab's for process anonymous memory allocations, so
+ * requires __GFP_COMP to properly refcount higher order allocations"
+ */
+ page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
+#else
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+#endif
if (!page)
return NULL;
addr = page_address(page);
* Always checks flags, a caller might be expecting debug support which
* isn't available.
*/
- if (flags & ~CREATE_MASK)
- BUG();
+ BUG_ON(flags & ~CREATE_MASK);
/*
* Check that size is in terms of words. This is needed to avoid
slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
*/
int kmem_cache_shrink(struct kmem_cache *cachep)
{
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
return __cache_shrink(cachep);
}
int i;
struct kmem_list3 *l3;
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
/* Get the memory for a slab management obj. */
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
- int colour_off, gfp_t local_flags)
+ int colour_off, gfp_t local_flags,
+ int nodeid)
{
struct slab *slabp;
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
+ slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+ local_flags, nodeid);
if (!slabp)
return NULL;
} else {
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
+ slabp->nodeid = nodeid;
return slabp;
}
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
- BUG();
+ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
if (flags & SLAB_NO_GROW)
return 0;
goto failed;
/* Get slab management. */
- slabp = alloc_slabmgmt(cachep, objp, offset, local_flags);
+ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
if (!slabp)
goto opps1;
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit))
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep,
alien, nodeid);
+ }
alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock);
} else {
* and we have no way of figuring out how to fix the array
* that we have allocated then....
*/
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
int node = cpu_to_node(i);
if (node_online(node))
/*
* We allocate for all cpus so we cannot use for online cpu here.
*/
- for_each_cpu(i)
+ for_each_possible_cpu(i)
kfree(p->ptrs[i]);
kfree(p);
}
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
{
int node;
struct kmem_list3 *l3;
- int err = 0;
+ struct array_cache *new_shared;
+ struct array_cache **new_alien;
for_each_online_node(node) {
- struct array_cache *nc = NULL, *new;
- struct array_cache **new_alien = NULL;
-#ifdef CONFIG_NUMA
+
new_alien = alloc_alien_cache(node, cachep->limit);
if (!new_alien)
goto fail;
-#endif
- new = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+
+ new_shared = alloc_arraycache(node,
+ cachep->shared*cachep->batchcount,
0xbaadf00d);
- if (!new)
+ if (!new_shared) {
+ free_alien_cache(new_alien);
goto fail;
+ }
+
l3 = cachep->nodelists[node];
if (l3) {
+ struct array_cache *shared = l3->shared;
+
spin_lock_irq(&l3->list_lock);
- nc = cachep->nodelists[node]->shared;
- if (nc)
- free_block(cachep, nc->entry, nc->avail, node);
+ if (shared)
+ free_block(cachep, shared->entry,
+ shared->avail, node);
- l3->shared = new;
- if (!cachep->nodelists[node]->alien) {
+ l3->shared = new_shared;
+ if (!l3->alien) {
l3->alien = new_alien;
new_alien = NULL;
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
- kfree(nc);
+ kfree(shared);
free_alien_cache(new_alien);
continue;
}
l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
- if (!l3)
+ if (!l3) {
+ free_alien_cache(new_alien);
+ kfree(new_shared);
goto fail;
+ }
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
- l3->shared = new;
+ l3->shared = new_shared;
l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
- return err;
+ return 0;
+
fail:
- err = -ENOMEM;
- return err;
+ if (!cachep->next.next) {
+ /* Cache is not active yet. Roll back what we did */
+ node--;
+ while (node >= 0) {
+ if (cachep->nodelists[node]) {
+ l3 = cachep->nodelists[node];
+
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ cachep->nodelists[node] = NULL;
+ }
+ node--;
+ }
+ }
+ return -ENOMEM;
}
struct ccupdate_struct {
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
- "<error> <maxfreeable> <nodeallocs> <remotefrees>");
+ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_putc(m, '\n');
unsigned long max_freeable = cachep->max_freeable;
unsigned long node_allocs = cachep->node_allocs;
unsigned long node_frees = cachep->node_frees;
+ unsigned long overflows = cachep->node_overflow;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
- %4lu %4lu %4lu %4lu", allocs, high, grown,
+ %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
- node_frees);
+ node_frees, overflows);
}
/* cpu stats */
{