]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
SLUB: direct pass through of page size or higher kmalloc requests
[linux-2.6] / mm / hugetlb.c
index 15fc7b00077287665fc5e65d2d41ac87c4fffc47..eab8c428cc932028e202b4a8b8d547642fd10fa6 100644 (file)
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
        might_sleep();
        for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
                cond_resched();
-               clear_user_highpage(page + i, addr);
+               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
        }
 }
 
@@ -71,8 +71,9 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
 {
        int nid;
        struct page *page = NULL;
+       struct mempolicy *mpol;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                               htlb_alloc_mask);
+                                       htlb_alloc_mask, &mpol);
        struct zone **z;
 
        for (z = zonelist->zones; *z; z++) {
@@ -84,8 +85,10 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
                        list_del(&page->lru);
                        free_huge_pages--;
                        free_huge_pages_node[nid]--;
+                       break;
                }
        }
+       mpol_free(mpol);        /* unref if mpol !NULL */
        return page;
 }
 
@@ -104,15 +107,19 @@ static int alloc_fresh_huge_page(void)
 {
        static int prev_nid;
        struct page *page;
-       static DEFINE_SPINLOCK(nid_lock);
        int nid;
 
-       spin_lock(&nid_lock);
+       /*
+        * Copy static prev_nid to local nid, work on that, then copy it
+        * back to prev_nid afterwards: otherwise there's a window in which
+        * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
+        * But we don't need to use a spin_lock here: it really doesn't
+        * matter if occasionally a racer chooses the same nid as we do.
+        */
        nid = next_node(prev_nid, node_online_map);
        if (nid == MAX_NUMNODES)
                nid = first_node(node_online_map);
        prev_nid = nid;
-       spin_unlock(&nid_lock);
 
        page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
                                        HUGETLB_PAGE_ORDER);
@@ -204,7 +211,7 @@ static void update_and_free_page(struct page *page)
                                1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1<< PG_writeback);
        }
-       page[1].lru.next = NULL;
+       set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
@@ -638,7 +645,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        spin_unlock(&mm->page_table_lock);
                        ret = hugetlb_fault(mm, vma, vaddr, 0);
                        spin_lock(&mm->page_table_lock);
-                       if (!(ret & VM_FAULT_MAJOR))
+                       if (!(ret & VM_FAULT_ERROR))
                                continue;
 
                        remainder = 0;