]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
[PATCH] optimize follow_hugetlb_page
[linux-2.6] / mm / hugetlb.c
index 27fad5d9bcf648b27564465b35c3cd72969257fa..06699d871a8e568325361f5935ea72dacbd4f635 100644 (file)
@@ -88,6 +88,17 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
        return page;
 }
 
+static void free_huge_page(struct page *page)
+{
+       BUG_ON(page_count(page));
+
+       INIT_LIST_HEAD(&page->lru);
+
+       spin_lock(&hugetlb_lock);
+       enqueue_huge_page(page);
+       spin_unlock(&hugetlb_lock);
+}
+
 static int alloc_fresh_huge_page(void)
 {
        static int nid = 0;
@@ -107,18 +118,8 @@ static int alloc_fresh_huge_page(void)
        return 0;
 }
 
-void free_huge_page(struct page *page)
-{
-       BUG_ON(page_count(page));
-
-       INIT_LIST_HEAD(&page->lru);
-
-       spin_lock(&hugetlb_lock);
-       enqueue_huge_page(page);
-       spin_unlock(&hugetlb_lock);
-}
-
-struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
+static struct page *alloc_huge_page(struct vm_area_struct *vma,
+                                   unsigned long addr)
 {
        struct inode *inode = vma->vm_file->f_dentry->d_inode;
        struct page *page;
@@ -660,10 +661,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
                        unsigned long *position, int *length, int i)
 {
-       unsigned long vpfn, vaddr = *position;
+       unsigned long pfn_offset;
+       unsigned long vaddr = *position;
        int remainder = *length;
 
-       vpfn = vaddr/PAGE_SIZE;
        spin_lock(&mm->page_table_lock);
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
@@ -691,19 +692,28 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        break;
                }
 
-               if (pages) {
-                       page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
-                       get_page(page);
-                       pages[i] = page;
-               }
+               pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
+               page = pte_page(*pte);
+same_page:
+               get_page(page);
+               if (pages)
+                       pages[i] = page + pfn_offset;
 
                if (vmas)
                        vmas[i] = vma;
 
                vaddr += PAGE_SIZE;
-               ++vpfn;
+               ++pfn_offset;
                --remainder;
                ++i;
+               if (vaddr < vma->vm_end && remainder &&
+                               pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
+                       /*
+                        * We use pfn_offset to avoid touching the pageframes
+                        * of this compound page.
+                        */
+                       goto same_page;
+               }
        }
        spin_unlock(&mm->page_table_lock);
        *length = remainder;