From 4d7672b46244abffea1953e55688c0ea143dd617 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 16 Dec 2005 10:21:23 -0800 Subject: [PATCH] Make sure we copy pages inserted with "vm_insert_page()" on fork The logic that decides that a fork() might be able to avoid copying a VM area when it can be re-created by page faults didn't know about the new vm_insert_page() case. Also make some things a bit more anal wrt VM_PFNMAP. Pointed out by Hugh Dickins Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/memory.c | 3 ++- mm/mmap.c | 2 +- mm/mremap.c | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e5677f4567..a06a84d347 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ +#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS diff --git a/mm/memory.c b/mm/memory.c index d22f78c8a3..d8dde07a36 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -574,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ - if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { if (!vma->anon_vma) return 0; } @@ -1228,6 +1228,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page * return -EFAULT; if (!page_count(page)) return -EINVAL; + vma->vm_flags |= VM_INSERTPAGE; return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); diff --git a/mm/mmap.c b/mm/mmap.c index 11ca5927d5..64ba4dbcb7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -611,7 +611,7 @@ again: remove_next = 1 + (end > next->vm_end); * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ -#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) +#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) diff --git a/mm/mremap.c b/mm/mremap.c index b535438c36..ddaeee9a0b 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -323,7 +323,7 @@ unsigned long do_mremap(unsigned long addr, /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto out; - if (vma->vm_flags & VM_DONTEXPAND) { + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { if (new_len > old_len) goto out; } -- 2.39.5