]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
V4L/DVB (7285): em28xx: Correct use of ! and &
[linux-2.6] / mm / hugetlb.c
index 20e04c64468dd9b42b32d9af70a4386bd8336786..74c1b6b0b37b82dce75e06533c989ab73001afeb 100644 (file)
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+       int nid;
+       struct page *page = NULL;
+
+       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+               if (!list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
+                       break;
+               }
+       }
+       return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                                unsigned long address)
 {
        int nid;
@@ -268,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
 
        spin_lock(&hugetlb_lock);
        if (page) {
+               /*
+                * This page is now managed by the hugetlb allocator and has
+                * no users -- drop the buddy allocator's reference.
+                */
+               put_page_testzero(page);
+               VM_BUG_ON(page_count(page));
                nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
                /*
@@ -351,13 +375,14 @@ free:
                        enqueue_huge_page(page);
                else {
                        /*
-                        * Decrement the refcount and free the page using its
-                        * destructor.  This must be done with hugetlb_lock
+                        * The page has a reference count of zero already, so
+                        * call free_huge_page directly instead of using
+                        * put_page.  This must be done with hugetlb_lock
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
                        spin_unlock(&hugetlb_lock);
-                       put_page(page);
+                       free_huge_page(page);
                        spin_lock(&hugetlb_lock);
                }
        }
@@ -410,7 +435,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
        struct page *page;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page(vma, addr);
+       page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        return page ? page : ERR_PTR(-VM_FAULT_OOM);
 }
@@ -425,7 +450,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
 
        spin_lock(&hugetlb_lock);
        if (free_huge_pages > resv_huge_pages)
-               page = dequeue_huge_page(vma, addr);
+               page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        if (!page) {
                page = alloc_buddy_huge_page(vma, addr);
@@ -578,7 +603,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
        min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
-               struct page *page = dequeue_huge_page(NULL, 0);
+               struct page *page = dequeue_huge_page();
                if (!page)
                        break;
                update_and_free_page(page);