From aa137f9d29d30592774c727ec5cfcf9891e576fa Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 31 Aug 2007 00:48:45 -0700 Subject: [PATCH] SLUB: Force inlining for functions in slub_def.h Some compilers (especially older gcc releases) may skip inlining sometimes which will lead to link failures. Force the inlining of keyfunctions in slub_def.h to avoid these issues. Signed-off-by: Christoph Lameter Acked-by: Jan Dittmer Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 124270df87..74962077f6 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -78,7 +78,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; * Sorry that the following has to be that ugly but some versions of GCC * have trouble with constant propagation and loops. */ -static inline int kmalloc_index(size_t size) +static __always_inline int kmalloc_index(size_t size) { if (!size) return 0; @@ -133,7 +133,7 @@ static inline int kmalloc_index(size_t size) * This ought to end up with a global pointer to the right cache * in kmalloc_caches. */ -static inline struct kmem_cache *kmalloc_slab(size_t size) +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) { int index = kmalloc_index(size); @@ -166,7 +166,7 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -static inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -183,7 +183,7 @@ static inline void *kmalloc(size_t size, gfp_t flags) void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); -- 2.39.5