]> err.no Git - linux-2.6/commitdiff
[POWERPC] rheap - eliminates internal fragments caused by alignment
authorLi Yang <leoli@freescale.com>
Mon, 18 Jun 2007 11:29:21 +0000 (19:29 +0800)
committerKumar Gala <galak@kernel.crashing.org>
Wed, 20 Jun 2007 03:35:53 +0000 (22:35 -0500)
The patch adds fragments caused by rh_alloc_align() back to free list, instead
of allocating the whole chunk of memory.  This will greatly improve memory
utilization managed by rheap.

It solves MURAM not enough problem with 3 UCCs enabled on MPC8323.

Signed-off-by: Li Yang <leoli@freescale.com>
Acked-by: Joakim Tjernlund <joakim.tjernlund@transmode.se>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
arch/powerpc/lib/rheap.c

index 180ee2933ab96ffacd076c1e0ae67aac57e002c1..2f24ea0d723afdb4f4b96916f72bf33d2d91639f 100644 (file)
@@ -437,27 +437,26 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
        struct list_head *l;
        rh_block_t *blk;
        rh_block_t *newblk;
-       unsigned long start;
+       unsigned long start, sp_size;
 
        /* Validate size, and alignment must be power of two */
        if (size <= 0 || (alignment & (alignment - 1)) != 0)
                return (unsigned long) -EINVAL;
 
-       /* given alignment larger that default rheap alignment */
-       if (alignment > info->alignment)
-               size += alignment - 1;
-
        /* Align to configured alignment */
        size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
 
-       if (assure_empty(info, 1) < 0)
+       if (assure_empty(info, 2) < 0)
                return (unsigned long) -ENOMEM;
 
        blk = NULL;
        list_for_each(l, &info->free_list) {
                blk = list_entry(l, rh_block_t, list);
-               if (size <= blk->size)
-                       break;
+               if (size <= blk->size) {
+                       start = (blk->start + alignment - 1) & ~(alignment - 1);
+                       if (start + size <= blk->start + blk->size)
+                               break;
+               }
                blk = NULL;
        }
 
@@ -470,25 +469,36 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
                list_del(&blk->list);
                newblk = blk;
        } else {
+               /* Fragment caused, split if needed */
+               /* Create block for fragment in the beginning */
+               sp_size = start - blk->start;
+               if (sp_size) {
+                       rh_block_t *spblk;
+
+                       spblk = get_slot(info);
+                       spblk->start = blk->start;
+                       spblk->size = sp_size;
+                       /* add before the blk */
+                       list_add(&spblk->list, blk->list.prev);
+               }
                newblk = get_slot(info);
-               newblk->start = blk->start;
+               newblk->start = start;
                newblk->size = size;
 
-               /* blk still in free list, with updated start, size */
-               blk->start += size;
-               blk->size -= size;
+               /* blk still in free list, with updated start and size
+                * for fragment in the end */
+               blk->start = start + size;
+               blk->size -= sp_size + size;
+               /* No fragment in the end, remove blk */
+               if (blk->size == 0) {
+                       list_del(&blk->list);
+                       release_slot(info, blk);
+               }
        }
 
        newblk->owner = owner;
-       start = newblk->start;
        attach_taken_block(info, newblk);
 
-       /* for larger alignment return fixed up pointer  */
-       /* this is no problem with the deallocator since */
-       /* we scan for pointers that lie in the blocks   */
-       if (alignment > info->alignment)
-               start = (start + alignment - 1) & ~(alignment - 1);
-
        return start;
 }