]> err.no Git - linux-2.6/blobdiff - mm/mmap.c
[PATCH] make 1-bit bitfields unsigned
[linux-2.6] / mm / mmap.c
index 497e502dfd6b6e54fc2bf902ea55fac36e70ac72..7be110e98d4c845e66ab29f8b804cf9a9298a5c9 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1379,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                 * Check if the given range is hugepage aligned, and
                 * can be made suitable for hugepages.
                 */
-               ret = prepare_hugepage_range(addr, len);
+               ret = prepare_hugepage_range(addr, len, pgoff);
        } else {
                /*
                 * Ensure that a normal request is not falling in a
@@ -1736,7 +1736,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -1880,6 +1880,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
 
+       if (is_hugepage_only_range(mm, addr, len))
+               return -EINVAL;
+
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = arch_mmap_check(addr, len, flags);
@@ -2054,7 +2057,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                    vma_start < new_vma->vm_end)
                        *vmap = new_vma;
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
                        pol = mpol_copy(vma_policy(vma));