Considering kfree(NULL) would normally occur only in error paths and
kfree(ZERO_SIZE_PTR) is uncommon as well, so let's use unlikely() for the
condition check in SLUB's and SLOB's kfree() to optimize for the common
case. SLAB has this already.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
slobidx_t units;
unsigned long flags;
slobidx_t units;
unsigned long flags;
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
sp = (struct slob_page *)virt_to_page(block);
return;
sp = (struct slob_page *)virt_to_page(block);
- if (ZERO_OR_NULL_PTR(block))
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return 0;
sp = (struct slob_page *)virt_to_page(block);
return 0;
sp = (struct slob_page *)virt_to_page(block);
struct page *page;
struct kmem_cache *s;
struct page *page;
struct kmem_cache *s;
- if (ZERO_OR_NULL_PTR(object))
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
return 0;
page = get_object_page(object);
return 0;
page = get_object_page(object);
- if (ZERO_OR_NULL_PTR(x))
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
page = virt_to_head_page(x);
return;
page = virt_to_head_page(x);
get_order(size));
s = get_slab(size, gfpflags);
get_order(size));
s = get_slab(size, gfpflags);
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, -1, caller);
return s;
return slab_alloc(s, gfpflags, -1, caller);
get_order(size));
s = get_slab(size, gfpflags);
get_order(size));
s = get_slab(size, gfpflags);
- if (ZERO_OR_NULL_PTR(s))
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, node, caller);
return s;
return slab_alloc(s, gfpflags, node, caller);