From: Nick Piggin Date: Thu, 17 May 2007 05:10:49 +0000 (-0700) Subject: slob: implement RCU freeing X-Git-Tag: v2.6.22-rc2~63 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=afc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62;p=linux-2.6 slob: implement RCU freeing The SLOB allocator should implement SLAB_DESTROY_BY_RCU correctly, because even on UP, RCU freeing semantics are not equivalent to simply freeing immediately. This also allows SLOB to be used on SMP. Signed-off-by: Nick Piggin Acked-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/init/Kconfig b/init/Kconfig index 4e009fde4b..9264895ab3 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -577,14 +577,11 @@ config SLUB and has enhanced diagnostics. config SLOB -# -# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported -# - depends on EMBEDDED && !SMP && !SPARSEMEM + depends on EMBEDDED && !SPARSEMEM bool "SLOB (Simple Allocator)" help SLOB replaces the SLAB allocator with a drastically simpler - allocator. SLOB is more space efficient that SLAB but does not + allocator. SLOB is more space efficient than SLAB but does not scale well (single lock for all operations) and is also highly susceptible to fragmentation. SLUB can accomplish a higher object density. It is usually better to use SLUB instead of SLOB. diff --git a/mm/slob.c b/mm/slob.c index c6933bc19b..57bb72ed0d 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -35,6 +35,7 @@ #include #include #include +#include struct slob_block { int units; @@ -53,6 +54,16 @@ struct bigblock { }; typedef struct bigblock bigblock_t; +/* + * struct slob_rcu is inserted at the tail of allocated slob blocks, which + * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free + * the block using call_rcu. + */ +struct slob_rcu { + struct rcu_head head; + int size; +}; + static slob_t arena = { .next = &arena, .units = 1 }; static slob_t *slobfree = &arena; static bigblock_t *bigblocks; @@ -266,6 +277,7 @@ size_t ksize(const void *block) struct kmem_cache { unsigned int size, align; + unsigned long flags; const char *name; void (*ctor)(void *, struct kmem_cache *, unsigned long); void (*dtor)(void *, struct kmem_cache *, unsigned long); @@ -283,6 +295,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, if (c) { c->name = name; c->size = size; + if (flags & SLAB_DESTROY_BY_RCU) { + BUG_ON(dtor); + /* leave room for rcu footer at the end of object */ + c->size += sizeof(struct slob_rcu); + } + c->flags = flags; c->ctor = ctor; c->dtor = dtor; /* ignore alignment unless it's forced */ @@ -328,15 +346,35 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_zalloc); -void kmem_cache_free(struct kmem_cache *c, void *b) +static void __kmem_cache_free(void *b, int size) { - if (c->dtor) - c->dtor(b, c, 0); - - if (c->size < PAGE_SIZE) - slob_free(b, c->size); + if (size < PAGE_SIZE) + slob_free(b, size); else - free_pages((unsigned long)b, get_order(c->size)); + free_pages((unsigned long)b, get_order(size)); +} + +static void kmem_rcu_free(struct rcu_head *head) +{ + struct slob_rcu *slob_rcu = (struct slob_rcu *)head; + void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); + + __kmem_cache_free(b, slob_rcu->size); +} + +void kmem_cache_free(struct kmem_cache *c, void *b) +{ + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; + slob_rcu = b + (c->size - sizeof(struct slob_rcu)); + INIT_RCU_HEAD(&slob_rcu->head); + slob_rcu->size = c->size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { + if (c->dtor) + c->dtor(b, c, 0); + __kmem_cache_free(b, c->size); + } } EXPORT_SYMBOL(kmem_cache_free);