X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmmap.c;h=7be110e98d4c845e66ab29f8b804cf9a9298a5c9;hb=940cc2df935d00e704c7b8366bdaedda379abf85;hp=c1868ecdbc5facfd8ba25835e71e05cfee9d9680;hpb=347ce434d57da80fd5809c0c836f206a50999c26;p=linux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index c1868ecdbc..7be110e98d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -30,6 +30,10 @@ #include #include +#ifndef arch_mmap_check +#define arch_mmap_check(addr, len, flags) (0) +#endif + static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); @@ -60,6 +64,13 @@ pgprot_t protection_map[16] = { __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; +} +EXPORT_SYMBOL(vm_get_page_prot); + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; @@ -105,7 +116,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ - free += atomic_read(&slab_reclaim_pages); + free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave the last 3% for root @@ -889,17 +900,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, int accountable = 1; unsigned long charged = 0, reqprot = prot; - if (file) { - if (is_file_hugepages(file)) - accountable = 0; - - if (!file->f_op || !file->f_op->mmap) - return -ENODEV; - - if ((prot & PROT_EXEC) && - (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)) - return -EPERM; - } /* * Does the application expect PROT_READ to imply PROT_EXEC? * @@ -913,6 +913,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, if (!len) return -EINVAL; + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len || len > TASK_SIZE) @@ -985,6 +989,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; + if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) { + if (vm_flags & VM_EXEC) + return -EPERM; + vm_flags &= ~VM_MAYEXEC; + } + if (is_file_hugepages(file)) + accountable = 0; + + if (!file->f_op || !file->f_op->mmap) + return -ENODEV; break; default: @@ -1090,12 +1104,6 @@ munmap_back: goto free_vma; } - /* Don't make the VMA automatically writable if it's shared, but the - * backer wishes to know when pages are first written to */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) - vma->vm_page_prot = - protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; - /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) * that memory reservation must be checked; but that reservation @@ -1113,6 +1121,10 @@ munmap_back: pgoff = vma->vm_pgoff; vm_flags = vma->vm_flags; + if (vma_wants_writenotify(vma)) + vma->vm_page_prot = + protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { file = vma->vm_file; @@ -1367,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, * Check if the given range is hugepage aligned, and * can be made suitable for hugepages. */ - ret = prepare_hugepage_range(addr, len); + ret = prepare_hugepage_range(addr, len, pgoff); } else { /* * Ensure that a normal request is not falling in a @@ -1724,7 +1736,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return -ENOMEM; @@ -1859,6 +1871,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) unsigned long flags; struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; len = PAGE_ALIGN(len); if (!len) @@ -1867,6 +1880,15 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if ((addr + len) > TASK_SIZE || (addr + len) < addr) return -EINVAL; + if (is_hugepage_only_range(mm, addr, len)) + return -EINVAL; + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* * mlock MCL_FUTURE? */ @@ -1907,8 +1929,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (security_vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - /* Can we just expand an old private anonymous mapping? */ if (vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL)) @@ -2037,7 +2057,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, vma_start < new_vma->vm_end) *vmap = new_vma; } else { - new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (new_vma) { *new_vma = *vma; pol = mpol_copy(vma_policy(vma));