X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmmap.c;h=d2b6d44962b7c7b28b89e20cdbf1af0b0c6ada31;hb=77b4c255af34e73ea1efd1c3384bbe91361c81e6;hp=4275e81e25ba69cf49b777ca2819fb6295d7b084;hpb=fe537c0ee86b27fbe0690a7869815da80f492dbd;p=linux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index 4275e81e25..d2b6d44962 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -251,7 +251,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk) * not page aligned -Ram Gupta */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) goto out; newbrk = PAGE_ALIGN(brk); @@ -912,6 +913,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, if (!len) return -EINVAL; + if (!(flags & MAP_FIXED)) + addr = round_hint_to_min(addr); + error = arch_mmap_check(addr, len, flags); if (error) return error; @@ -1048,8 +1052,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) /* The open routine did something to the protections already? */ if (pgprot_val(vma->vm_page_prot) != - pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) + pgprot_val(vm_get_page_prot(vm_flags))) return 0; /* Specialty mapping? */ @@ -1130,8 +1133,7 @@ munmap_back: vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { @@ -1173,8 +1175,7 @@ munmap_back: vm_flags = vma->vm_flags; if (vma_wants_writenotify(vma)) - vma->vm_page_prot = - protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { @@ -1618,6 +1619,12 @@ static inline int expand_downwards(struct vm_area_struct *vma, */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; + + address &= PAGE_MASK; + error = security_file_mmap(NULL, 0, 0, 0, address, 1); + if (error) + return error; + anon_vma_lock(vma); /* @@ -1625,8 +1632,6 @@ static inline int expand_downwards(struct vm_area_struct *vma, * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ - address &= PAGE_MASK; - error = 0; /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -1937,6 +1942,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; + error = security_file_mmap(NULL, 0, 0, 0, addr, 1); + if (error) + return error; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = arch_mmap_check(addr, len, flags); @@ -2002,8 +2011,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; @@ -2209,7 +2217,7 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags; - vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages;