]> err.no Git - linux-2.6/commitdiff
[PATCH] hugetlb: prepare_hugepage_range check offset too
authorHugh Dickins <hugh@veritas.com>
Tue, 14 Nov 2006 10:03:32 +0000 (02:03 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Tue, 14 Nov 2006 17:09:27 +0000 (09:09 -0800)
(David:)

If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example,
because the given file offset is not hugepage aligned - then do_mmap_pgoff
will go to the unmap_and_free_vma backout path.

But at this stage the vma hasn't been marked as hugepage, and the backout path
will call unmap_region() on it.  That will eventually call down to the
non-hugepage version of unmap_page_range().  On ppc64, at least, that will
cause serious problems if there are any existing hugepage pagetable entries in
the vicinity - for example if there are any other hugepage mappings under the
same PUD.  unmap_page_range() will trigger a bad_pud() on the hugepage pud
entries.  I suspect this will also cause bad problems on ia64, though I don't
have a machine to test it on.

(Hugh:)

prepare_hugepage_range() should check file offset alignment when it checks
virtual address and length, to stop MAP_FIXED with a bad huge offset from
unmapping before it fails further down.  PowerPC should apply the same
prepare_hugepage_range alignment checks as ia64 and all the others do.

Then none of the alignment checks in hugetlbfs_file_mmap are required (nor
is the check for too small a mapping); but even so, move up setting of
VM_HUGETLB and add a comment to warn of what David Gibson discovered - if
hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region
when unwinding from error will go the non-huge way, which may cause bad
behaviour on architectures (powerpc and ia64) which segregate their huge
mappings into a separate region of the address space.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/ia64/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
include/linux/hugetlb.h
mm/mmap.c

index eee5c1cfbe3223c1376c4d0a9239d4def44be824..f3a9585e98a8337a8cbd8b06c6916d5a8b3a8612 100644 (file)
@@ -70,8 +70,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
  * Don't actually need to do any preparation, but need to make sure
  * the address is in the right region.
  */
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
 {
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
index fd68b74c07c3c4e5c457371c1dd54c52767cec8a..506d89768d455ba8625ed480d4122c7d1c8264c9 100644 (file)
@@ -491,11 +491,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
        return 0;
 }
 
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
 {
        int err = 0;
 
-       if ( (addr+len) < addr )
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
                return -EINVAL;
 
        if (addr < 0x100000000UL)
index 0bea6a619e100e0baa4eee8bc689c935b88ff2cc..7f4756963d05e067464dcb9570db9ebba54b1170 100644 (file)
@@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        loff_t len, vma_len;
        int ret;
 
-       if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
-               return -EINVAL;
-
-       if (vma->vm_start & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_end & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
-               return -EINVAL;
+       /*
+        * vma alignment has already been checked by prepare_hugepage_range.
+        * If you add any error returns here, do so after setting VM_HUGETLB,
+        * so is_vm_hugetlb_page tests below unmap_region go the right way
+        * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+        */
+       vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
+       vma->vm_ops = &hugetlb_vm_ops;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
 
        mutex_lock(&inode->i_mutex);
        file_accessed(file);
-       vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
-       vma->vm_ops = &hugetlb_vm_ops;
 
        ret = -ENOMEM;
        len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
index 5081d27bfa27ac22979dd4a2ad7a865b81701893..ace64e57e17f4291a813c7a9f61d1779fb560d43 100644 (file)
@@ -60,8 +60,11 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
+                                               pgoff_t pgoff)
 {
+       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
+               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -69,7 +72,8 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
        return 0;
 }
 #else
-int prepare_hugepage_range(unsigned long addr, unsigned long len);
+int prepare_hugepage_range(unsigned long addr, unsigned long len,
+                                               pgoff_t pgoff);
 #endif
 
 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -107,7 +111,7 @@ static inline unsigned long hugetlb_total_pages(void)
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define prepare_hugepage_range(addr, len)      (-EINVAL)
+#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
index 497e502dfd6b6e54fc2bf902ea55fac36e70ac72..bdace87d7c019a75c0dbb26e29548bbd888869ed 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1379,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                 * Check if the given range is hugepage aligned, and
                 * can be made suitable for hugepages.
                 */
-               ret = prepare_hugepage_range(addr, len);
+               ret = prepare_hugepage_range(addr, len, pgoff);
        } else {
                /*
                 * Ensure that a normal request is not falling in a