]> err.no Git - linux-2.6/blobdiff - fs/proc/task_mmu.c
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[linux-2.6] / fs / proc / task_mmu.c
index 199088ee969b1ec956fb85337e614c9ec42c92ab..c24d81a5a040e2a91825673434339e3c2f1dcdda 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/mount.h>
 #include <linux/seq_file.h>
 #include <linux/highmem.h>
+#include <linux/ptrace.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
 
@@ -142,6 +143,9 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
        dev_t dev = 0;
        int len;
 
+       if (maps_protect && !ptrace_may_attach(task))
+               return -EACCES;
+
        if (file) {
                struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
                dev = inode->i_sb->s_dev;
@@ -195,7 +199,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
                           "Shared_Dirty:   %8lu kB\n"
                           "Private_Clean:  %8lu kB\n"
                           "Private_Dirty:  %8lu kB\n"
-                          "Pgs_Referenced: %8lu kB\n",
+                          "Referenced:     %8lu kB\n",
                           (vma->vm_end - vma->vm_start) >> 10,
                           mss->resident >> 10,
                           mss->shared_clean  >> 10,
@@ -214,9 +218,9 @@ static int show_map(struct seq_file *m, void *v)
        return show_map_internal(m, v, NULL);
 }
 
-static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-                         unsigned long addr, unsigned long end,
-                         void *private)
+static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+                           unsigned long addr, unsigned long end,
+                           void *private)
 {
        struct mem_size_stats *mss = private;
        pte_t *pte, ptent;
@@ -254,8 +258,34 @@ static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        cond_resched();
 }
 
-static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
-                                      unsigned long addr, unsigned long end)
+static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+                                unsigned long addr, unsigned long end,
+                                void *private)
+{
+       pte_t *pte, ptent;
+       spinlock_t *ptl;
+       struct page *page;
+
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+               if (!pte_present(ptent))
+                       continue;
+
+               page = vm_normal_page(vma, addr, ptent);
+               if (!page)
+                       continue;
+
+               /* Clear accessed and referenced bits. */
+               ptep_test_and_clear_young(vma, addr, pte);
+               ClearPageReferenced(page);
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+}
+
+static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
+                                 unsigned long addr, unsigned long end)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -269,8 +299,8 @@ static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
        }
 }
 
-static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
-                                      unsigned long addr, unsigned long end)
+static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
+                                 unsigned long addr, unsigned long end)
 {
        pud_t *pud;
        unsigned long next;
@@ -280,15 +310,24 @@ static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               for_each_pmd_in_pud(walker, pud, addr, next);
+               walk_pmd_range(walker, pud, addr, next);
        }
 }
 
-static inline void for_each_pmd(struct vm_area_struct *vma,
-                               void (*action)(struct vm_area_struct *, pmd_t *,
-                                              unsigned long, unsigned long,
-                                              void *),
-                               void *private)
+/*
+ * walk_page_range - walk the page tables of a VMA with a callback
+ * @vma - VMA to walk
+ * @action - callback invoked for every bottom-level (PTE) page table
+ * @private - private data passed to the callback function
+ *
+ * Recursively walk the page table for the memory area in a VMA, calling
+ * a callback for every bottom-level (PTE) page table.
+ */
+static inline void walk_page_range(struct vm_area_struct *vma,
+                                  void (*action)(struct vm_area_struct *,
+                                                 pmd_t *, unsigned long,
+                                                 unsigned long, void *),
+                                  void *private)
 {
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
@@ -305,7 +344,7 @@ static inline void for_each_pmd(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               for_each_pud_in_pgd(&walker, pgd, addr, next);
+               walk_pud_range(&walker, pgd, addr, next);
        }
 }
 
@@ -316,10 +355,22 @@ static int show_smap(struct seq_file *m, void *v)
 
        memset(&mss, 0, sizeof mss);
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-               for_each_pmd(vma, smaps_one_pmd, &mss);
+               walk_page_range(vma, smaps_pte_range, &mss);
        return show_map_internal(m, v, &mss);
 }
 
+void clear_refs_smap(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next)
+               if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+                       walk_page_range(vma, clear_refs_pte_range, NULL);
+       flush_tlb_mm(mm);
+       up_read(&mm->mmap_sem);
+}
+
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
        struct proc_maps_private *priv = m->private;
@@ -465,11 +516,22 @@ const struct file_operations proc_maps_operations = {
 #ifdef CONFIG_NUMA
 extern int show_numa_map(struct seq_file *m, void *v);
 
+static int show_numa_map_checked(struct seq_file *m, void *v)
+{
+       struct proc_maps_private *priv = m->private;
+       struct task_struct *task = priv->task;
+
+       if (maps_protect && !ptrace_may_attach(task))
+               return -EACCES;
+
+       return show_numa_map(m, v);
+}
+
 static struct seq_operations proc_pid_numa_maps_op = {
         .start  = m_start,
         .next   = m_next,
         .stop   = m_stop,
-        .show   = show_numa_map
+        .show   = show_numa_map_checked
 };
 
 static int numa_maps_open(struct inode *inode, struct file *file)