]> err.no Git - linux-2.6/commitdiff
SLUB: clean up krealloc
authorChristoph Lameter <clameter@sgi.com>
Wed, 9 May 2007 09:32:38 +0000 (02:32 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:45 +0000 (12:30 -0700)
We really do not need all this gaga there.

ksize gives us all the information we need to figure out if the object can
cope with the new size.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 1832ae1ea5366ed62a5ad80c8b667633f6472d06..5d425d7116e81bba42facc8b4b781f481f31a7e4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  */
 void *krealloc(const void *p, size_t new_size, gfp_t flags)
 {
-       struct kmem_cache *new_cache;
        void *ret;
-       struct page *page;
+       size_t ks;
 
        if (unlikely(!p))
                return kmalloc(new_size, flags);
@@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
                return NULL;
        }
 
-       page = virt_to_head_page(p);
-
-       new_cache = get_slab(new_size, flags);
-
-       /*
-        * If new size fits in the current cache, bail out.
-        */
-       if (likely(page->slab == new_cache))
+       ks = ksize(p);
+       if (ks >= new_size)
                return (void *)p;
 
        ret = kmalloc(new_size, flags);
        if (ret) {
-               memcpy(ret, p, min(new_size, ksize(p)));
+               memcpy(ret, p, min(new_size, ks));
                kfree(p);
        }
        return ret;