]> err.no Git - linux-2.6/blobdiff - mm/hugetlb.c
Linux 2.6.27-rc6
[linux-2.6] / mm / hugetlb.c
index 2a2f6e869401c8cbcd1b7f0a4763a047a0194f88..67a71191136ed8f6ad9dad03b472440baa322210 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sysctl.h>
 #include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
@@ -19,6 +20,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/io.h>
 
 #include <linux/hugetlb.h>
 #include "internal.h"
@@ -31,6 +33,8 @@ static int max_hstate;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
 
+__initdata LIST_HEAD(huge_boot_pages);
+
 /* for command line parsing */
 static struct hstate * __initdata parsed_hstate;
 static unsigned long __initdata default_hstate_max_huge_pages;
@@ -340,13 +344,13 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_private_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma)
 {
        if (vma->vm_flags & VM_SHARED)
-               return 0;
-       if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-               return 0;
-       return 1;
+               return 1;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+               return 1;
+       return 0;
 }
 
 static void clear_huge_page(struct page *page,
@@ -418,7 +422,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
         * have no page reserves. This check ensures that reservations are
         * not "stolen". The child may still get SIGKILLed
         */
-       if (!vma_has_private_reserves(vma) &&
+       if (!vma_has_reserves(vma) &&
                        h->free_huge_pages - h->resv_huge_pages == 0)
                return NULL;
 
@@ -561,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
                huge_page_order(h));
        if (page) {
                if (arch_prepare_hugepage(page)) {
-                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       __free_pages(page, huge_page_order(h));
                        return NULL;
                }
                prep_new_huge_page(h, page, nid);
@@ -661,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
                                        __GFP_REPEAT|__GFP_NOWARN,
                                        huge_page_order(h));
 
+       if (page && arch_prepare_hugepage(page)) {
+               __free_pages(page, huge_page_order(h));
+               return NULL;
+       }
+
        spin_lock(&hugetlb_lock);
        if (page) {
                /*
@@ -925,14 +934,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        return page;
 }
 
-static __initdata LIST_HEAD(huge_boot_pages);
-
-struct huge_bootmem_page {
-       struct list_head list;
-       struct hstate *hstate;
-};
-
-static int __init alloc_bootmem_huge_page(struct hstate *h)
+__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
        int nr_nodes = nodes_weight(node_online_map);
@@ -1031,7 +1033,6 @@ static void __init report_hugepages(void)
        }
 }
 
-#ifdef CONFIG_SYSCTL
 #ifdef CONFIG_HIGHMEM
 static void try_to_free_low(struct hstate *h, unsigned long count)
 {
@@ -1287,7 +1288,12 @@ module_exit(hugetlb_exit);
 
 static int __init hugetlb_init(void)
 {
-       BUILD_BUG_ON(HPAGE_SHIFT == 0);
+       /* Some platform decide whether they support huge pages at boot
+        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+        * there is no such support
+        */
+       if (HPAGE_SHIFT == 0)
+               return 0;
 
        if (!size_to_hstate(default_hstate_size)) {
                default_hstate_size = HPAGE_SIZE;
@@ -1391,6 +1397,7 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
        return nr;
 }
 
+#ifdef CONFIG_SYSCTL
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
                           struct file *file, void __user *buffer,
                           size_t *length, loff_t *ppos)
@@ -1557,8 +1564,10 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 
                kref_put(&reservations->refs, resv_map_release);
 
-               if (reserve)
+               if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
+                       hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+               }
        }
 }
 
@@ -1675,6 +1684,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
@@ -1716,6 +1726,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                list_del(&page->lru);
                put_page(page);
@@ -1931,6 +1942,18 @@ retry:
                        lock_page(page);
        }
 
+       /*
+        * If we are going to COW a private mapping later, we examine the
+        * pending reservations for this page now. This will ensure that
+        * any allocations necessary to record that reservation occur outside
+        * the spinlock.
+        */
+       if (write_access && !(vma->vm_flags & VM_SHARED))
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto backout_unlocked;
+               }
+
        spin_lock(&mm->page_table_lock);
        size = i_size_read(mapping->host) >> huge_page_shift(h);
        if (idx >= size)
@@ -1956,6 +1979,7 @@ out:
 
 backout:
        spin_unlock(&mm->page_table_lock);
+backout_unlocked:
        unlock_page(page);
        put_page(page);
        goto out;
@@ -1967,6 +1991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t *ptep;
        pte_t entry;
        int ret;
+       struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
@@ -1983,25 +2008,44 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        entry = huge_ptep_get(ptep);
        if (huge_pte_none(entry)) {
                ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
-               mutex_unlock(&hugetlb_instantiation_mutex);
-               return ret;
+               goto out_unlock;
        }
 
        ret = 0;
 
+       /*
+        * If we are going to COW the mapping later, we examine the pending
+        * reservations for this page now. This will ensure that any
+        * allocations necessary to record that reservation occur outside the
+        * spinlock. For private mappings, we also lookup the pagecache
+        * page now as it is used to determine if a reservation has been
+        * consumed.
+        */
+       if (write_access && !pte_write(entry)) {
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+
+               if (!(vma->vm_flags & VM_SHARED))
+                       pagecache_page = hugetlbfs_pagecache_page(h,
+                                                               vma, address);
+       }
+
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
        if (likely(pte_same(entry, huge_ptep_get(ptep))))
-               if (write_access && !pte_write(entry)) {
-                       struct page *page;
-                       page = hugetlbfs_pagecache_page(h, vma, address);
-                       ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
-                       if (page) {
-                               unlock_page(page);
-                               put_page(page);
-                       }
-               }
+               if (write_access && !pte_write(entry))
+                       ret = hugetlb_cow(mm, vma, address, ptep, entry,
+                                                       pagecache_page);
        spin_unlock(&mm->page_table_lock);
+
+       if (pagecache_page) {
+               unlock_page(pagecache_page);
+               put_page(pagecache_page);
+       }
+
+out_unlock:
        mutex_unlock(&hugetlb_instantiation_mutex);
 
        return ret;