]> err.no Git - linux-2.6/blobdiff - mm/mmap.c
[PATCH] h8300 build error fix
[linux-2.6] / mm / mmap.c
index 0fa87a5ae2ccd80bde1403171022c4643dbb41f5..de54acd9942f9929004921042721df5cdfe2b6c7 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -937,9 +937,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
+               locked = len >> PAGE_SHIFT;
+               locked += mm->locked_vm;
                lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-               locked += len;
+               lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
@@ -1009,8 +1010,7 @@ munmap_back:
        }
 
        /* Check against address space limit. */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->signal->rlim[RLIMIT_AS].rlim_cur)
+       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (accountable && (!(flags & MAP_NORESERVE) ||
@@ -1244,7 +1244,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        addr = mm->free_area_cache;
 
        /* make sure it can fit in the remaining address space */
-       if (addr >= len) {
+       if (addr > len) {
                vma = find_vma(mm, addr-len);
                if (!vma || addr <= vma->vm_start)
                        /* remember the address as a hint for next time */
@@ -1266,7 +1266,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
                /* try just below the current vma->vm_start */
                addr = vma->vm_start-len;
-       } while (len <= vma->vm_start);
+       } while (len < vma->vm_start);
 
        /*
         * A failed mmap() very likely causes application failure,
@@ -1302,37 +1302,40 @@ unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       if (flags & MAP_FIXED) {
-               unsigned long ret;
+       unsigned long ret;
 
-               if (addr > TASK_SIZE - len)
-                       return -ENOMEM;
-               if (addr & ~PAGE_MASK)
-                       return -EINVAL;
-               if (file && is_file_hugepages(file))  {
-                       /*
-                        * Check if the given range is hugepage aligned, and
-                        * can be made suitable for hugepages.
-                        */
-                       ret = prepare_hugepage_range(addr, len);
-               } else {
-                       /*
-                        * Ensure that a normal request is not falling in a
-                        * reserved hugepage range.  For some archs like IA-64,
-                        * there is a separate region for hugepages.
-                        */
-                       ret = is_hugepage_only_range(current->mm, addr, len);
-               }
-               if (ret)
-                       return -EINVAL;
-               return addr;
-       }
+       if (!(flags & MAP_FIXED)) {
+               unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-       if (file && file->f_op && file->f_op->get_unmapped_area)
-               return file->f_op->get_unmapped_area(file, addr, len,
-                                               pgoff, flags);
+               get_area = current->mm->get_unmapped_area;
+               if (file && file->f_op && file->f_op->get_unmapped_area)
+                       get_area = file->f_op->get_unmapped_area;
+               addr = get_area(file, addr, len, pgoff, flags);
+               if (IS_ERR_VALUE(addr))
+                       return addr;
+       }
 
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       if (addr > TASK_SIZE - len)
+               return -ENOMEM;
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+       if (file && is_file_hugepages(file))  {
+               /*
+                * Check if the given range is hugepage aligned, and
+                * can be made suitable for hugepages.
+                */
+               ret = prepare_hugepage_range(addr, len);
+       } else {
+               /*
+                * Ensure that a normal request is not falling in a
+                * reserved hugepage range.  For some archs like IA-64,
+                * there is a separate region for hugepages.
+                */
+               ret = is_hugepage_only_range(current->mm, addr, len);
+       }
+       if (ret)
+               return -EINVAL;
+       return addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1421,7 +1424,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
        struct rlimit *rlim = current->signal->rlim;
 
        /* address space limit tests */
-       if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT)
+       if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
@@ -1630,7 +1633,7 @@ static void unmap_region(struct mm_struct *mm,
        tlb = tlb_gather_mmu(mm, 0);
        unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, prev? prev->vm_end: 0,
+       free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
        spin_unlock(&mm->page_table_lock);
@@ -1823,9 +1826,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         */
        if (mm->def_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
+               locked = len >> PAGE_SHIFT;
+               locked += mm->locked_vm;
                lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-               locked += len;
+               lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
@@ -1848,8 +1852,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        }
 
        /* Check against address space limits *after* clearing old maps... */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->signal->rlim[RLIMIT_AS].rlim_cur)
+       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
@@ -1910,7 +1913,7 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, 0, 0);
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
 
        mm->mmap = mm->mmap_cache = NULL;
@@ -1931,7 +1934,7 @@ void exit_mmap(struct mm_struct *mm)
                vma = next;
        }
 
-       BUG_ON(mm->nr_ptes);    /* This is just debugging */
+       BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
 
 /* Insert vm structure into process list sorted by address
@@ -2019,3 +2022,19 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        }
        return new_vma;
 }
+
+/*
+ * Return true if the calling process may expand its vm space by the passed
+ * number of pages
+ */
+int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+{
+       unsigned long cur = mm->total_vm;       /* pages */
+       unsigned long lim;
+
+       lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+
+       if (cur + npages > lim)
+               return 0;
+       return 1;
+}