]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / mm / hugetlb.c
index 8768e5250323fddff14c38ee2734e0dc90b31cfa..8b809ecefa39e4b54f4bfbed8830009a9e260e96 100644 (file)
@@ -31,6 +31,8 @@ static unsigned int free_huge_pages_node[MAX_NUMNODES];
 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
+int hugetlb_dynamic_pool;
+static int hugetlb_next_nid;
 
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -87,6 +89,8 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
                        list_del(&page->lru);
                        free_huge_pages--;
                        free_huge_pages_node[nid]--;
+                       if (vma && vma->vm_flags & VM_MAYSHARE)
+                               resv_huge_pages--;
                        break;
                }
        }
@@ -162,36 +166,56 @@ static int adjust_pool_surplus(int delta)
        return ret;
 }
 
-static int alloc_fresh_huge_page(void)
+static struct page *alloc_fresh_huge_page_node(int nid)
 {
-       static int prev_nid;
        struct page *page;
-       int nid;
-
-       /*
-        * Copy static prev_nid to local nid, work on that, then copy it
-        * back to prev_nid afterwards: otherwise there's a window in which
-        * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
-        * But we don't need to use a spin_lock here: it really doesn't
-        * matter if occasionally a racer chooses the same nid as we do.
-        */
-       nid = next_node(prev_nid, node_online_map);
-       if (nid == MAX_NUMNODES)
-               nid = first_node(node_online_map);
-       prev_nid = nid;
 
-       page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
-                                       HUGETLB_PAGE_ORDER);
+       page = alloc_pages_node(nid,
+               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+               HUGETLB_PAGE_ORDER);
        if (page) {
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
-               nr_huge_pages_node[page_to_nid(page)]++;
+               nr_huge_pages_node[nid]++;
                spin_unlock(&hugetlb_lock);
                put_page(page); /* free it into the hugepage allocator */
-               return 1;
        }
-       return 0;
+
+       return page;
+}
+
+static int alloc_fresh_huge_page(void)
+{
+       struct page *page;
+       int start_nid;
+       int next_nid;
+       int ret = 0;
+
+       start_nid = hugetlb_next_nid;
+
+       do {
+               page = alloc_fresh_huge_page_node(hugetlb_next_nid);
+               if (page)
+                       ret = 1;
+               /*
+                * Use a helper variable to find the next node and then
+                * copy it back to hugetlb_next_nid afterwards:
+                * otherwise there's a window in which a racer might
+                * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+                * But we don't need to use a spin_lock here: it really
+                * doesn't matter if occasionally a racer chooses the
+                * same nid as we do.  Move nid forward in the mask even
+                * if we just successfully allocated a hugepage so that
+                * the next caller gets hugepages on the next node.
+                */
+               next_nid = next_node(hugetlb_next_nid, node_online_map);
+               if (next_nid == MAX_NUMNODES)
+                       next_nid = first_node(node_online_map);
+               hugetlb_next_nid = next_nid;
+       } while (!page && hugetlb_next_nid != start_nid);
+
+       return ret;
 }
 
 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
@@ -199,6 +223,10 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
 {
        struct page *page;
 
+       /* Check if the dynamic pool is enabled */
+       if (!hugetlb_dynamic_pool)
+               return NULL;
+
        page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
                                        HUGETLB_PAGE_ORDER);
        if (page) {
@@ -214,15 +242,125 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
        return page;
 }
 
+/*
+ * Increase the hugetlb pool such that it can accomodate a reservation
+ * of size 'delta'.
+ */
+static int gather_surplus_pages(int delta)
+{
+       struct list_head surplus_list;
+       struct page *page, *tmp;
+       int ret, i;
+       int needed, allocated;
+
+       needed = (resv_huge_pages + delta) - free_huge_pages;
+       if (needed <= 0)
+               return 0;
+
+       allocated = 0;
+       INIT_LIST_HEAD(&surplus_list);
+
+       ret = -ENOMEM;
+retry:
+       spin_unlock(&hugetlb_lock);
+       for (i = 0; i < needed; i++) {
+               page = alloc_buddy_huge_page(NULL, 0);
+               if (!page) {
+                       /*
+                        * We were not able to allocate enough pages to
+                        * satisfy the entire reservation so we free what
+                        * we've allocated so far.
+                        */
+                       spin_lock(&hugetlb_lock);
+                       needed = 0;
+                       goto free;
+               }
+
+               list_add(&page->lru, &surplus_list);
+       }
+       allocated += needed;
+
+       /*
+        * After retaking hugetlb_lock, we need to recalculate 'needed'
+        * because either resv_huge_pages or free_huge_pages may have changed.
+        */
+       spin_lock(&hugetlb_lock);
+       needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
+       if (needed > 0)
+               goto retry;
+
+       /*
+        * The surplus_list now contains _at_least_ the number of extra pages
+        * needed to accomodate the reservation.  Add the appropriate number
+        * of pages to the hugetlb pool and free the extras back to the buddy
+        * allocator.
+        */
+       needed += allocated;
+       ret = 0;
+free:
+       list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               list_del(&page->lru);
+               if ((--needed) >= 0)
+                       enqueue_huge_page(page);
+               else {
+                       /*
+                        * Decrement the refcount and free the page using its
+                        * destructor.  This must be done with hugetlb_lock
+                        * unlocked which is safe because free_huge_page takes
+                        * hugetlb_lock before deciding how to free the page.
+                        */
+                       spin_unlock(&hugetlb_lock);
+                       put_page(page);
+                       spin_lock(&hugetlb_lock);
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * When releasing a hugetlb pool reservation, any surplus pages that were
+ * allocated to satisfy the reservation must be explicitly freed if they were
+ * never used.
+ */
+void return_unused_surplus_pages(unsigned long unused_resv_pages)
+{
+       static int nid = -1;
+       struct page *page;
+       unsigned long nr_pages;
+
+       nr_pages = min(unused_resv_pages, surplus_huge_pages);
+
+       while (nr_pages) {
+               nid = next_node(nid, node_online_map);
+               if (nid == MAX_NUMNODES)
+                       nid = first_node(node_online_map);
+
+               if (!surplus_huge_pages_node[nid])
+                       continue;
+
+               if (!list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       update_and_free_page(page);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
+                       surplus_huge_pages--;
+                       surplus_huge_pages_node[nid]--;
+                       nr_pages--;
+               }
+       }
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
                                    unsigned long addr)
 {
        struct page *page = NULL;
+       int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
 
        spin_lock(&hugetlb_lock);
-       if (vma->vm_flags & VM_MAYSHARE)
-               resv_huge_pages--;
-       else if (free_huge_pages <= resv_huge_pages)
+       if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
                goto fail;
 
        page = dequeue_huge_page(vma, addr);
@@ -234,8 +372,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        return page;
 
 fail:
-       if (vma->vm_flags & VM_MAYSHARE)
-               resv_huge_pages++;
        spin_unlock(&hugetlb_lock);
 
        /*
@@ -243,7 +379,7 @@ fail:
         * may have failed due to an undersized hugetlb pool.  Try to grab a
         * surplus huge page from the buddy allocator.
         */
-       if (!(vma->vm_flags & VM_MAYSHARE))
+       if (!use_reserved_page)
                page = alloc_buddy_huge_page(vma, addr);
 
        return page;
@@ -259,6 +395,8 @@ static int __init hugetlb_init(void)
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&hugepage_freelists[i]);
 
+       hugetlb_next_nid = first_node(node_online_map);
+
        for (i = 0; i < max_huge_pages; ++i) {
                if (!alloc_fresh_huge_page())
                        break;
@@ -297,14 +435,14 @@ static void try_to_free_low(unsigned long count)
        for (i = 0; i < MAX_NUMNODES; ++i) {
                struct page *page, *next;
                list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
+                       if (count >= nr_huge_pages)
+                               return;
                        if (PageHighMem(page))
                                continue;
                        list_del(&page->lru);
                        update_and_free_page(page);
                        free_huge_pages--;
                        free_huge_pages_node[page_to_nid(page)]--;
-                       if (count >= nr_huge_pages)
-                               return;
                }
        }
 }
@@ -344,8 +482,6 @@ static unsigned long set_max_huge_pages(unsigned long count)
                        goto out;
 
        }
-       if (count >= persistent_huge_pages)
-               goto out;
 
        /*
         * Decrease the pool size
@@ -354,7 +490,8 @@ static unsigned long set_max_huge_pages(unsigned long count)
         * pages into surplus state as needed so the pool will shrink
         * to the desired size as pages become free.
         */
-       min_count = max(count, resv_huge_pages);
+       min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
+       min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
                struct page *page = dequeue_huge_page(NULL, 0);
@@ -880,10 +1017,10 @@ static long region_chg(struct list_head *head, long f, long t)
 
        /* If we are below the current region then a new region is required.
         * Subtle, allocate a new region at the position but make it zero
-        * size such that we can guarentee to record the reservation. */
+        * size such that we can guarantee to record the reservation. */
        if (&rg->link == head || t < rg->from) {
                nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-               if (nrg == 0)
+               if (!nrg)
                        return -ENOMEM;
                nrg->from = f;
                nrg->to   = f;
@@ -952,21 +1089,6 @@ static int hugetlb_acct_memory(long delta)
        int ret = -ENOMEM;
 
        spin_lock(&hugetlb_lock);
-       if ((delta + resv_huge_pages) <= free_huge_pages) {
-               resv_huge_pages += delta;
-               ret = 0;
-       }
-       spin_unlock(&hugetlb_lock);
-       return ret;
-}
-
-int hugetlb_reserve_pages(struct inode *inode, long from, long to)
-{
-       long ret, chg;
-
-       chg = region_chg(&inode->i_mapping->private_list, from, to);
-       if (chg < 0)
-               return chg;
        /*
         * When cpuset is configured, it breaks the strict hugetlb page
         * reservation as the accounting is done on a global variable. Such
@@ -984,8 +1106,31 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
         * a best attempt and hopefully to minimize the impact of changing
         * semantics that cpuset has.
         */
-       if (chg > cpuset_mems_nr(free_huge_pages_node))
-               return -ENOMEM;
+       if (delta > 0) {
+               if (gather_surplus_pages(delta) < 0)
+                       goto out;
+
+               if (delta > cpuset_mems_nr(free_huge_pages_node))
+                       goto out;
+       }
+
+       ret = 0;
+       resv_huge_pages += delta;
+       if (delta < 0)
+               return_unused_surplus_pages((unsigned long) -delta);
+
+out:
+       spin_unlock(&hugetlb_lock);
+       return ret;
+}
+
+int hugetlb_reserve_pages(struct inode *inode, long from, long to)
+{
+       long ret, chg;
+
+       chg = region_chg(&inode->i_mapping->private_list, from, to);
+       if (chg < 0)
+               return chg;
 
        ret = hugetlb_acct_memory(chg);
        if (ret < 0)