X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmmap.c;h=de54acd9942f9929004921042721df5cdfe2b6c7;hb=8e2894e51d6407e47226a60c0d19bf384642c55a;hp=f8c61b2385ff4100979927d8169240920a659d9b;hpb=ee39b37b23da0b6ec53a8ebe90ff41c016f8ae27;p=linux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index f8c61b2385..de54acd994 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -937,9 +937,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, /* mlock MCL_FUTURE? */ if (vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; - locked = mm->locked_vm << PAGE_SHIFT; + locked = len >> PAGE_SHIFT; + locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; - locked += len; + lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } @@ -1009,8 +1010,7 @@ munmap_back: } /* Check against address space limit. */ - if ((mm->total_vm << PAGE_SHIFT) + len - > current->signal->rlim[RLIMIT_AS].rlim_cur) + if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; if (accountable && (!(flags & MAP_NORESERVE) || @@ -1244,7 +1244,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = mm->free_area_cache; /* make sure it can fit in the remaining address space */ - if (addr >= len) { + if (addr > len) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) /* remember the address as a hint for next time */ @@ -1266,7 +1266,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* try just below the current vma->vm_start */ addr = vma->vm_start-len; - } while (len <= vma->vm_start); + } while (len < vma->vm_start); /* * A failed mmap() very likely causes application failure, @@ -1302,37 +1302,40 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - if (flags & MAP_FIXED) { - unsigned long ret; + unsigned long ret; - if (addr > TASK_SIZE - len) - return -ENOMEM; - if (addr & ~PAGE_MASK) - return -EINVAL; - if (file && is_file_hugepages(file)) { - /* - * Check if the given range is hugepage aligned, and - * can be made suitable for hugepages. - */ - ret = prepare_hugepage_range(addr, len); - } else { - /* - * Ensure that a normal request is not falling in a - * reserved hugepage range. For some archs like IA-64, - * there is a separate region for hugepages. - */ - ret = is_hugepage_only_range(current->mm, addr, len); - } - if (ret) - return -EINVAL; - return addr; - } + if (!(flags & MAP_FIXED)) { + unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - if (file && file->f_op && file->f_op->get_unmapped_area) - return file->f_op->get_unmapped_area(file, addr, len, - pgoff, flags); + get_area = current->mm->get_unmapped_area; + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + } - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + if (addr > TASK_SIZE - len) + return -ENOMEM; + if (addr & ~PAGE_MASK) + return -EINVAL; + if (file && is_file_hugepages(file)) { + /* + * Check if the given range is hugepage aligned, and + * can be made suitable for hugepages. + */ + ret = prepare_hugepage_range(addr, len); + } else { + /* + * Ensure that a normal request is not falling in a + * reserved hugepage range. For some archs like IA-64, + * there is a separate region for hugepages. + */ + ret = is_hugepage_only_range(current->mm, addr, len); + } + if (ret) + return -EINVAL; + return addr; } EXPORT_SYMBOL(get_unmapped_area); @@ -1421,7 +1424,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un struct rlimit *rlim = current->signal->rlim; /* address space limit tests */ - if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT) + if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ @@ -1602,14 +1605,13 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) * Ok - we have the memory areas we should free on the 'free' list, * so release them, and do the vma updates. */ -static void unmap_vma_list(struct mm_struct *mm, - struct vm_area_struct *mpnt) +static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { do { - struct vm_area_struct *next = mpnt->vm_next; - unmap_vma(mm, mpnt); - mpnt = next; - } while (mpnt != NULL); + struct vm_area_struct *next = vma->vm_next; + unmap_vma(mm, vma); + vma = next; + } while (vma); validate_mm(mm); } @@ -1631,7 +1633,7 @@ static void unmap_region(struct mm_struct *mm, tlb = tlb_gather_mmu(mm, 0); unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); - free_pgtables(&tlb, vma, prev? prev->vm_end: 0, + free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, next? next->vm_start: 0); tlb_finish_mmu(tlb, start, end); spin_unlock(&mm->page_table_lock); @@ -1720,7 +1722,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) { unsigned long end; - struct vm_area_struct *mpnt, *prev, *last; + struct vm_area_struct *vma, *prev, *last; if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; @@ -1729,14 +1731,14 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return -EINVAL; /* Find the first overlapping VMA */ - mpnt = find_vma_prev(mm, start, &prev); - if (!mpnt) + vma = find_vma_prev(mm, start, &prev); + if (!vma) return 0; - /* we have start < mpnt->vm_end */ + /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; - if (mpnt->vm_start >= end) + if (vma->vm_start >= end) return 0; /* @@ -1746,11 +1748,11 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ - if (start > mpnt->vm_start) { - int error = split_vma(mm, mpnt, start, 0); + if (start > vma->vm_start) { + int error = split_vma(mm, vma, start, 0); if (error) return error; - prev = mpnt; + prev = vma; } /* Does it split the last one? */ @@ -1760,16 +1762,16 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) if (error) return error; } - mpnt = prev? prev->vm_next: mm->mmap; + vma = prev? prev->vm_next: mm->mmap; /* * Remove the vma's, and unmap the actual pages */ - detach_vmas_to_be_unmapped(mm, mpnt, prev, end); - unmap_region(mm, mpnt, prev, start, end); + detach_vmas_to_be_unmapped(mm, vma, prev, end); + unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ - unmap_vma_list(mm, mpnt); + unmap_vma_list(mm, vma); return 0; } @@ -1824,9 +1826,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len) */ if (mm->def_flags & VM_LOCKED) { unsigned long locked, lock_limit; - locked = mm->locked_vm << PAGE_SHIFT; + locked = len >> PAGE_SHIFT; + locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; - locked += len; + lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } @@ -1849,8 +1852,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) } /* Check against address space limits *after* clearing old maps... */ - if ((mm->total_vm << PAGE_SHIFT) + len - > current->signal->rlim[RLIMIT_AS].rlim_cur) + if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) @@ -1911,7 +1913,7 @@ void exit_mmap(struct mm_struct *mm) /* Use -1 here to ensure all VMAs in the mm are unmapped */ end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); - free_pgtables(&tlb, vma, 0, 0); + free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); mm->mmap = mm->mmap_cache = NULL; @@ -1932,7 +1934,7 @@ void exit_mmap(struct mm_struct *mm) vma = next; } - BUG_ON(mm->nr_ptes); /* This is just debugging */ + BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } /* Insert vm structure into process list sorted by address @@ -2020,3 +2022,19 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, } return new_vma; } + +/* + * Return true if the calling process may expand its vm space by the passed + * number of pages + */ +int may_expand_vm(struct mm_struct *mm, unsigned long npages) +{ + unsigned long cur = mm->total_vm; /* pages */ + unsigned long lim; + + lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; + + if (cur + npages > lim) + return 0; + return 1; +}