]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / mm / hugetlb.c
index efd78527ad1ee62159d8955069756fdaddf893a1..8b809ecefa39e4b54f4bfbed8830009a9e260e96 100644 (file)
@@ -32,6 +32,7 @@ static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
 int hugetlb_dynamic_pool;
+static int hugetlb_next_nid;
 
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -165,36 +166,56 @@ static int adjust_pool_surplus(int delta)
        return ret;
 }
 
-static int alloc_fresh_huge_page(void)
+static struct page *alloc_fresh_huge_page_node(int nid)
 {
-       static int prev_nid;
        struct page *page;
-       int nid;
 
-       /*
-        * Copy static prev_nid to local nid, work on that, then copy it
-        * back to prev_nid afterwards: otherwise there's a window in which
-        * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
-        * But we don't need to use a spin_lock here: it really doesn't
-        * matter if occasionally a racer chooses the same nid as we do.
-        */
-       nid = next_node(prev_nid, node_online_map);
-       if (nid == MAX_NUMNODES)
-               nid = first_node(node_online_map);
-       prev_nid = nid;
-
-       page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
-                                       HUGETLB_PAGE_ORDER);
+       page = alloc_pages_node(nid,
+               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+               HUGETLB_PAGE_ORDER);
        if (page) {
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
-               nr_huge_pages_node[page_to_nid(page)]++;
+               nr_huge_pages_node[nid]++;
                spin_unlock(&hugetlb_lock);
                put_page(page); /* free it into the hugepage allocator */
-               return 1;
        }
-       return 0;
+
+       return page;
+}
+
+static int alloc_fresh_huge_page(void)
+{
+       struct page *page;
+       int start_nid;
+       int next_nid;
+       int ret = 0;
+
+       start_nid = hugetlb_next_nid;
+
+       do {
+               page = alloc_fresh_huge_page_node(hugetlb_next_nid);
+               if (page)
+                       ret = 1;
+               /*
+                * Use a helper variable to find the next node and then
+                * copy it back to hugetlb_next_nid afterwards:
+                * otherwise there's a window in which a racer might
+                * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+                * But we don't need to use a spin_lock here: it really
+                * doesn't matter if occasionally a racer chooses the
+                * same nid as we do.  Move nid forward in the mask even
+                * if we just successfully allocated a hugepage so that
+                * the next caller gets hugepages on the next node.
+                */
+               next_nid = next_node(hugetlb_next_nid, node_online_map);
+               if (next_nid == MAX_NUMNODES)
+                       next_nid = first_node(node_online_map);
+               hugetlb_next_nid = next_nid;
+       } while (!page && hugetlb_next_nid != start_nid);
+
+       return ret;
 }
 
 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
@@ -281,8 +302,17 @@ free:
                list_del(&page->lru);
                if ((--needed) >= 0)
                        enqueue_huge_page(page);
-               else
-                       update_and_free_page(page);
+               else {
+                       /*
+                        * Decrement the refcount and free the page using its
+                        * destructor.  This must be done with hugetlb_lock
+                        * unlocked which is safe because free_huge_page takes
+                        * hugetlb_lock before deciding how to free the page.
+                        */
+                       spin_unlock(&hugetlb_lock);
+                       put_page(page);
+                       spin_lock(&hugetlb_lock);
+               }
        }
 
        return ret;
@@ -365,6 +395,8 @@ static int __init hugetlb_init(void)
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&hugepage_freelists[i]);
 
+       hugetlb_next_nid = first_node(node_online_map);
+
        for (i = 0; i < max_huge_pages; ++i) {
                if (!alloc_fresh_huge_page())
                        break;
@@ -403,14 +435,14 @@ static void try_to_free_low(unsigned long count)
        for (i = 0; i < MAX_NUMNODES; ++i) {
                struct page *page, *next;
                list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
+                       if (count >= nr_huge_pages)
+                               return;
                        if (PageHighMem(page))
                                continue;
                        list_del(&page->lru);
                        update_and_free_page(page);
                        free_huge_pages--;
                        free_huge_pages_node[page_to_nid(page)]--;
-                       if (count >= nr_huge_pages)
-                               return;
                }
        }
 }
@@ -450,8 +482,6 @@ static unsigned long set_max_huge_pages(unsigned long count)
                        goto out;
 
        }
-       if (count >= persistent_huge_pages)
-               goto out;
 
        /*
         * Decrease the pool size
@@ -460,7 +490,8 @@ static unsigned long set_max_huge_pages(unsigned long count)
         * pages into surplus state as needed so the pool will shrink
         * to the desired size as pages become free.
         */
-       min_count = max(count, resv_huge_pages);
+       min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
+       min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
                struct page *page = dequeue_huge_page(NULL, 0);
@@ -986,10 +1017,10 @@ static long region_chg(struct list_head *head, long f, long t)
 
        /* If we are below the current region then a new region is required.
         * Subtle, allocate a new region at the position but make it zero
-        * size such that we can guarentee to record the reservation. */
+        * size such that we can guarantee to record the reservation. */
        if (&rg->link == head || t < rg->from) {
                nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-               if (nrg == 0)
+               if (!nrg)
                        return -ENOMEM;
                nrg->from = f;
                nrg->to   = f;