]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
[PATCH] Add NUMA policy support for huge pages.
[linux-2.6] / mm / hugetlb.c
index da8a211414c94950014365f745f86c5e3e67dfc9..eb405565949da5bd2bee78df2ea2b6dbb478c64c 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/highmem.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
+#include <linux/mempolicy.h>
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
@@ -36,18 +38,21 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(void)
+static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+                               unsigned long address)
 {
        int nid = numa_node_id();
        struct page *page = NULL;
+       struct zonelist *zonelist = huge_zonelist(vma, address);
+       struct zone **z;
 
-       if (list_empty(&hugepage_freelists[nid])) {
-               for (nid = 0; nid < MAX_NUMNODES; ++nid)
-                       if (!list_empty(&hugepage_freelists[nid]))
-                               break;
+       for (z = zonelist->zones; *z; z++) {
+               nid = (*z)->zone_pgdat->node_id;
+               if (!list_empty(&hugepage_freelists[nid]))
+                       break;
        }
-       if (nid >= 0 && nid < MAX_NUMNODES &&
-           !list_empty(&hugepage_freelists[nid])) {
+
+       if (*z) {
                page = list_entry(hugepage_freelists[nid].next,
                                  struct page, lru);
                list_del(&page->lru);
@@ -85,13 +90,13 @@ void free_huge_page(struct page *page)
        spin_unlock(&hugetlb_lock);
 }
 
-struct page *alloc_huge_page(void)
+struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *page;
        int i;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page();
+       page = dequeue_huge_page(vma, addr);
        if (!page) {
                spin_unlock(&hugetlb_lock);
                return NULL;
@@ -194,7 +199,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
        spin_lock(&hugetlb_lock);
        try_to_free_low(count);
        while (count < nr_huge_pages) {
-               struct page *page = dequeue_huge_page();
+               struct page *page = dequeue_huge_page(NULL, 0);
                if (!page)
                        break;
                update_and_free_page(page);
@@ -363,8 +368,9 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        flush_tlb_range(vma, start, end);
 }
 
-static struct page *find_or_alloc_huge_page(struct address_space *mapping,
-                               unsigned long idx, int shared)
+static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma,
+                       unsigned long addr, struct address_space *mapping,
+                       unsigned long idx, int shared)
 {
        struct page *page;
        int err;
@@ -376,7 +382,7 @@ retry:
 
        if (hugetlb_get_quota(mapping))
                goto out;
-       page = alloc_huge_page();
+       page = alloc_huge_page(vma, addr);
        if (!page) {
                hugetlb_put_quota(mapping);
                goto out;
@@ -416,7 +422,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        page_cache_get(old_page);
-       new_page = alloc_huge_page();
+       new_page = alloc_huge_page(vma, address);
 
        if (!new_page) {
                page_cache_release(old_page);
@@ -465,7 +471,7 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Use page lock to guard against racing truncation
         * before we get page_table_lock.
         */
-       page = find_or_alloc_huge_page(mapping, idx,
+       page = find_or_alloc_huge_page(vma, address, mapping, idx,
                        vma->vm_flags & VM_SHARED);
        if (!page)
                goto out;