]> err.no Git - linux-2.6/commitdiff
[PATCH] mm: tracking shared dirty pages
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 26 Sep 2006 06:30:57 +0000 (23:30 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 26 Sep 2006 15:48:44 +0000 (08:48 -0700)
Tracking of dirty pages in shared writeable mmap()s.

The idea is simple: write protect clean shared writeable pages, catch the
write-fault, make writeable and set dirty.  On page write-back clean all the
PTE dirty bits and write protect them once again.

The implementation is a tad harder, mainly because the default
backing_dev_info capabilities were too loosely maintained.  Hence it is not
enough to test the backing_dev_info for cap_account_dirty.

The current heuristic is as follows, a VMA is eligible when:
 - its shared writeable
    (vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)
 - it is not a 'special' mapping
    (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) == 0
 - the backing_dev_info is cap_account_dirty
    mapping_cap_account_dirty(vma->vm_file->f_mapping)
 - f_op->mmap() didn't change the default page protection

Page from remap_pfn_range() are explicitly excluded because their COW
semantics are already horrid enough (see vm_normal_page() in do_wp_page()) and
because they don't have a backing store anyway.

mprotect() is taught about the new behaviour as well.  However it overrides
the last condition.

Cleaning the pages on write-back is done with page_mkclean() a new rmap call.
It can be called on any page, but is currently only implemented for mapped
pages, if the page is found the be of a VMA that accounts dirty pages it will
also wrprotect the PTE.

Finally, in fs/buffers.c:try_to_free_buffers(); remove clear_page_dirty() from
under ->private_lock.  This seems to be safe, since ->private_lock is used to
serialize access to the buffers, not the page itself.  This is needed because
clear_page_dirty() will call into page_mkclean() and would thereby violate
locking order.

[dhowells@redhat.com: Provide a page_mkclean() implementation for NOMMU]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/buffer.c
include/linux/mm.h
include/linux/rmap.h
mm/memory.c
mm/mmap.c
mm/mprotect.c
mm/page-writeback.c
mm/rmap.c

index 71649ef9b6586696c695e2d80c000ceedaa51282..3b6d701073e7f09a311f10297a58d8c06e715df5 100644 (file)
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page)
 
        spin_lock(&mapping->private_lock);
        ret = drop_buffers(page, &buffers_to_free);
+       spin_unlock(&mapping->private_lock);
        if (ret) {
                /*
                 * If the filesystem writes its buffers by hand (eg ext3)
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page)
                 */
                clear_page_dirty(page);
        }
-       spin_unlock(&mapping->private_lock);
 out:
        if (buffers_to_free) {
                struct buffer_head *bh = buffers_to_free;
index 7d20b25c58fcb055f1644759af6552dd394f3636..449841413cf194145a8d7a965737a8e76829992f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/fs.h>
 #include <linux/mutex.h>
 #include <linux/debug_locks.h>
+#include <linux/backing-dev.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -810,6 +811,39 @@ struct shrinker;
 extern struct shrinker *set_shrinker(int, shrinker_t);
 extern void remove_shrinker(struct shrinker *shrinker);
 
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+static inline int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+       unsigned int vm_flags = vma->vm_flags;
+
+       /* If it was private or non-writable, the write bit is already clear */
+       if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+               return 0;
+
+       /* The backer wishes to know when pages are first written to? */
+       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+               return 1;
+
+       /* The open routine did something to the protections already? */
+       if (pgprot_val(vma->vm_page_prot) !=
+           pgprot_val(protection_map[vm_flags &
+                   (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
+               return 0;
+
+       /* Specialty mapping? */
+       if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+               return 0;
+
+       /* Can the mapping track the dirty pages? */
+       return vma->vm_file && vma->vm_file->f_mapping &&
+               mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
 extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
 
 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
index bf97b0900014ae204869139f0febb671a7988df6..db2c1df4fef96acdc9715b5fb7baaa88f27c8e0f 100644 (file)
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *,
  */
 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 
+/*
+ * Cleans the PTEs of shared mappings.
+ * (and since clean PTEs should also be readonly, write protects them too)
+ *
+ * returns the number of cleaned PTEs.
+ */
+int page_mkclean(struct page *);
+
 #else  /* !CONFIG_MMU */
 
 #define anon_vma_init()                do {} while (0)
@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 #define page_referenced(page,l) TestClearPageReferenced(page)
 #define try_to_unmap(page, refs) SWAP_FAIL
 
+static inline int page_mkclean(struct page *page)
+{
+       return 0;
+}
+
+
 #endif /* CONFIG_MMU */
 
 /*
index 109e9866237ec7992daa1e7c441e07dbfe976151..fa941b169071d4573b3deb6bb0dd0cb2f2856c17 100644 (file)
@@ -1458,14 +1458,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *old_page, *new_page;
        pte_t entry;
-       int reuse, ret = VM_FAULT_MINOR;
+       int reuse = 0, ret = VM_FAULT_MINOR;
+       struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
        if (!old_page)
                goto gotten;
 
-       if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) ==
-                               (VM_SHARED|VM_WRITE))) {
+       /*
+        * Only catch write-faults on shared writable pages, read-only
+        * shared pages can get COWed by get_user_pages(.write=1, .force=1).
+        */
+       if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+                                       (VM_WRITE|VM_SHARED))) {
                if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
                        /*
                         * Notify the address space that the page is about to
@@ -1494,13 +1499,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (!pte_same(*page_table, orig_pte))
                                goto unlock;
                }
-
+               dirty_page = old_page;
+               get_page(dirty_page);
                reuse = 1;
        } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
                reuse = can_share_swap_page(old_page);
                unlock_page(old_page);
-       } else {
-               reuse = 0;
        }
 
        if (reuse) {
@@ -1566,6 +1570,10 @@ gotten:
                page_cache_release(old_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
+       if (dirty_page) {
+               set_page_dirty(dirty_page);
+               put_page(dirty_page);
+       }
        return ret;
 oom:
        if (old_page)
@@ -2098,6 +2106,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned int sequence = 0;
        int ret = VM_FAULT_MINOR;
        int anon = 0;
+       struct page *dirty_page = NULL;
 
        pte_unmap(page_table);
        BUG_ON(vma->vm_flags & VM_PFNMAP);
@@ -2192,6 +2201,10 @@ retry:
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(new_page);
+                       if (write_access) {
+                               dirty_page = new_page;
+                               get_page(dirty_page);
+                       }
                }
        } else {
                /* One of our sibling threads was faster, back out. */
@@ -2204,6 +2217,10 @@ retry:
        lazy_mmu_prot_update(entry);
 unlock:
        pte_unmap_unlock(page_table, ptl);
+       if (dirty_page) {
+               set_page_dirty(dirty_page);
+               put_page(dirty_page);
+       }
        return ret;
 oom:
        page_cache_release(new_page);
index d799d896d74aae5f19c604928eb6a52738676d66..8507ee9cd573c3e99b859e910d76c6e397fe2cdd 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1105,12 +1105,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       /* Don't make the VMA automatically writable if it's shared, but the
-        * backer wishes to know when pages are first written to */
-       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
-               vma->vm_page_prot =
-                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
-
        /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
         * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
         * that memory reservation must be checked; but that reservation
@@ -1128,6 +1122,10 @@ munmap_back:
        pgoff = vma->vm_pgoff;
        vm_flags = vma->vm_flags;
 
+       if (vma_wants_writenotify(vma))
+               vma->vm_page_prot =
+                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
        if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
                file = vma->vm_file;
index 638edabaff7118206b379c0436ffd6aa3e2fda40..367b7f6c063736958e7f266e3a21fd32c3613245 100644 (file)
@@ -123,8 +123,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        unsigned long oldflags = vma->vm_flags;
        long nrpages = (end - start) >> PAGE_SHIFT;
        unsigned long charged = 0;
-       unsigned int mask;
-       pgprot_t newprot;
        pgoff_t pgoff;
        int error;
 
@@ -176,24 +174,21 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        }
 
 success:
-       /* Don't make the VMA automatically writable if it's shared, but the
-        * backer wishes to know when pages are first written to */
-       mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
-       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
-               mask &= ~VM_SHARED;
-
-       newprot = protection_map[newflags & mask];
-
        /*
         * vm_flags and vm_page_prot are protected by the mmap_sem
         * held in write mode.
         */
        vma->vm_flags = newflags;
-       vma->vm_page_prot = newprot;
+       vma->vm_page_prot = protection_map[newflags &
+               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       if (vma_wants_writenotify(vma))
+               vma->vm_page_prot = protection_map[newflags &
+                       (VM_READ|VM_WRITE|VM_EXEC)];
+
        if (is_vm_hugetlb_page(vma))
-               hugetlb_change_protection(vma, start, end, newprot);
+               hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
        else
-               change_protection(vma, start, end, newprot);
+               change_protection(vma, start, end, vma->vm_page_prot);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
        return 0;
index 77a0bc4e261aba1a9290a389f99f299251cc38c8..1c87430b7a25ea40f74397502e285d5b1fed671f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/backing-dev.h>
 #include <linux/blkdev.h>
 #include <linux/mpage.h>
+#include <linux/rmap.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
@@ -550,7 +551,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
                return 0;
        wbc->for_writepages = 1;
        if (mapping->a_ops->writepages)
-               ret =  mapping->a_ops->writepages(mapping, wbc);
+               ret = mapping->a_ops->writepages(mapping, wbc);
        else
                ret = generic_writepages(mapping, wbc);
        wbc->for_writepages = 0;
@@ -712,9 +713,15 @@ int test_clear_page_dirty(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-                       if (mapping_cap_account_dirty(mapping))
-                               __dec_zone_page_state(page, NR_FILE_DIRTY);
                        write_unlock_irqrestore(&mapping->tree_lock, flags);
+                       /*
+                        * We can continue to use `mapping' here because the
+                        * page is locked, which pins the address_space
+                        */
+                       if (mapping_cap_account_dirty(mapping)) {
+                               page_mkclean(page);
+                               dec_zone_page_state(page, NR_FILE_DIRTY);
+                       }
                        return 1;
                }
                write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -744,8 +751,10 @@ int clear_page_dirty_for_io(struct page *page)
 
        if (mapping) {
                if (TestClearPageDirty(page)) {
-                       if (mapping_cap_account_dirty(mapping))
+                       if (mapping_cap_account_dirty(mapping)) {
+                               page_mkclean(page);
                                dec_zone_page_state(page, NR_FILE_DIRTY);
+                       }
                        return 1;
                }
                return 0;
index 40158b59729ec3f6dac4390ecd5a57db7c719b52..e2155d791d9967a6e1bbf496ee9b8728dd9bb4c5 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -434,6 +434,71 @@ int page_referenced(struct page *page, int is_locked)
        return referenced;
 }
 
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte, entry;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       address = vma_address(page, vma);
+       if (address == -EFAULT)
+               goto out;
+
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
+
+       if (!pte_dirty(*pte) && !pte_write(*pte))
+               goto unlock;
+
+       entry = ptep_get_and_clear(mm, address, pte);
+       entry = pte_mkclean(entry);
+       entry = pte_wrprotect(entry);
+       ptep_establish(vma, address, pte, entry);
+       lazy_mmu_prot_update(entry);
+       ret = 1;
+
+unlock:
+       pte_unmap_unlock(pte, ptl);
+out:
+       return ret;
+}
+
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
+{
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = 0;
+
+       BUG_ON(PageAnon(page));
+
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               if (vma->vm_flags & VM_SHARED)
+                       ret += page_mkclean_one(page, vma);
+       }
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int page_mkclean(struct page *page)
+{
+       int ret = 0;
+
+       BUG_ON(!PageLocked(page));
+
+       if (page_mapped(page)) {
+               struct address_space *mapping = page_mapping(page);
+               if (mapping)
+                       ret = page_mkclean_file(mapping, page);
+       }
+
+       return ret;
+}
+
 /**
  * page_set_anon_rmap - setup new anonymous rmap
  * @page:      the page to add the mapping to