From e00946fe2351307eb3eda7a3343530f6d2d1af2e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 25 Mar 2006 03:06:45 -0800 Subject: [PATCH] [PATCH] slab: Bypass free lists for __drain_alien_cache() __drain_alien_cache() currently drains objects by freeing them to the (remote) freelists of the original node. However, each node also has a shared list containing objects to be used on any processor of that node. We can avoid a number of remote node accesses by copying the pointers to the free objects directly into the remote shared array. And while we are at it: Skip alien draining if the alien cache spinlock is already taken. Kiran reported that this is a performance benefit. Signed-off-by: Christoph Lameter Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index dee857a868..351aa6c587 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -971,6 +971,13 @@ static void __drain_alien_cache(struct kmem_cache *cachep, if (ac->avail) { spin_lock(&rl3->list_lock); + /* + * Stuff objects into the remote nodes shared array first. + * That way we could avoid the overhead of putting the objects + * into the free lists and getting them back later. + */ + transfer_objects(rl3->shared, ac, ac->limit); + free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; spin_unlock(&rl3->list_lock); @@ -986,8 +993,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) if (l3->alien) { struct array_cache *ac = l3->alien[node]; - if (ac && ac->avail) { - spin_lock_irq(&ac->lock); + + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { __drain_alien_cache(cachep, ac, node); spin_unlock_irq(&ac->lock); } -- 2.39.5