X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmempool.c;h=65f2957b8d518b7a6d244a339fd4dc5ce85ab9a9;hb=2a0445158192246c421467320af0d2f45a98f02c;hp=d691b5cb8022c640a1b4fc7538cccfce24970003;hpb=b84a35be0285229b0a8a5e2e04d79360c5b75562;p=linux-2.6 diff --git a/mm/mempool.c b/mm/mempool.c index d691b5cb80..65f2957b8d 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -51,16 +51,23 @@ static void free_pool(mempool_t *pool) * functions might sleep - as long as the mempool_alloc function is not called * from IRQ contexts. */ -mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, +mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - mempool_t *pool; + return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); +} +EXPORT_SYMBOL(mempool_create); - pool = kmalloc(sizeof(*pool), GFP_KERNEL); +mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, int node_id) +{ + mempool_t *pool; + pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); if (!pool) return NULL; memset(pool, 0, sizeof(*pool)); - pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL); + pool->elements = kmalloc_node(min_nr * sizeof(void *), + GFP_KERNEL, node_id); if (!pool->elements) { kfree(pool); return NULL; @@ -87,7 +94,7 @@ mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, } return pool; } -EXPORT_SYMBOL(mempool_create); +EXPORT_SYMBOL(mempool_create_node); /** * mempool_resize - resize an existing memory pool @@ -197,37 +204,23 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) { void *element; unsigned long flags; - DEFINE_WAIT(wait); - int gfp_nowait; + wait_queue_t wait; + unsigned int gfp_temp; + + might_sleep_if(gfp_mask & __GFP_WAIT); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ - gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); - might_sleep_if(gfp_mask & __GFP_WAIT); + gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); + repeat_alloc: - element = pool->alloc(gfp_nowait, pool->pool_data); + + element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element; - /* - * If the pool is less than 50% full and we can perform effective - * page reclaim then try harder to allocate an element. - */ - mb(); - if ((gfp_mask & __GFP_FS) && (gfp_mask != gfp_nowait) && - (pool->curr_nr <= pool->min_nr/2)) { - element = pool->alloc(gfp_mask, pool->pool_data); - if (likely(element != NULL)) - return element; - } - - /* - * Kick the VM at this point. - */ - wakeup_bdflush(0); - spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); @@ -240,8 +233,11 @@ repeat_alloc: if (!(gfp_mask & __GFP_WAIT)) return NULL; + /* Now start performing page reclaim */ + gfp_temp = gfp_mask; + init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); - mb(); + smp_mb(); if (!pool->curr_nr) io_schedule(); finish_wait(&pool->wait, &wait); @@ -262,7 +258,7 @@ void mempool_free(void *element, mempool_t *pool) { unsigned long flags; - mb(); + smp_mb(); if (pool->curr_nr < pool->min_nr) { spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) {