]> err.no Git - linux-2.6/commitdiff
[PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED
authorChristoph Lameter <clameter@sgi.com>
Fri, 30 Jun 2006 08:55:36 +0000 (01:55 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 30 Jun 2006 18:25:35 +0000 (11:25 -0700)
The current NR_FILE_MAPPED is used by zone reclaim and the dirty load
calculation as the number of mapped pagecache pages.  However, that is not
true.  NR_FILE_MAPPED includes the mapped anonymous pages.  This patch
separates those and therefore allows an accurate tracking of the anonymous
pages per zone.

It then becomes possible to determine the number of unmapped pages per zone
and we can avoid scanning for unmapped pages if there are none.

Also it may now be possible to determine the mapped/unmapped ratio in
get_dirty_limit.  Isnt the number of anonymous pages irrelevant in that
calculation?

Note that this will change the meaning of the number of mapped pages reported
in /proc/vmstat /proc/meminfo and in the per node statistics.  This may affect
user space tools that monitor these counters!  NR_FILE_MAPPED works like
NR_FILE_DIRTY.  It is only valid for pagecache pages.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/base/node.c
fs/proc/proc_misc.c
include/linux/mmzone.h
mm/page-writeback.c
mm/rmap.c
mm/vmscan.c
mm/vmstat.c

index ae9e3fea4b31263c24328320e9bc6241306ebf75..c3bf05158c6d1bba7afb9e05e145980f5596bb77 100644 (file)
@@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       "Node %d Writeback:    %8lu kB\n"
                       "Node %d FilePages:    %8lu kB\n"
                       "Node %d Mapped:       %8lu kB\n"
+                      "Node %d AnonPages:    %8lu kB\n"
                       "Node %d Slab:         %8lu kB\n",
                       nid, K(i.totalram),
                       nid, K(i.freeram),
@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       nid, K(ps.nr_writeback),
                       nid, K(node_page_state(nid, NR_FILE_PAGES)),
                       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
+                      nid, K(node_page_state(nid, NR_ANON_PAGES)),
                       nid, K(ps.nr_slab));
        n += hugetlb_report_node_meminfo(nid, buf + n);
        return n;
index 1af12fd77fe6869030640b03dce00999e7e0e2bc..ff809656ce3153ff24ede45510fdea713e2468c6 100644 (file)
@@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                "SwapFree:     %8lu kB\n"
                "Dirty:        %8lu kB\n"
                "Writeback:    %8lu kB\n"
+               "AnonPages:    %8lu kB\n"
                "Mapped:       %8lu kB\n"
                "Slab:         %8lu kB\n"
                "CommitLimit:  %8lu kB\n"
@@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                K(i.freeswap),
                K(ps.nr_dirty),
                K(ps.nr_writeback),
+               K(global_page_state(NR_ANON_PAGES)),
                K(global_page_state(NR_FILE_MAPPED)),
                K(ps.nr_slab),
                K(allowed),
index 08be91e6cecf9fe05d6ae95a3234f42d95ae9ff2..4833abd4458bec70b2d6573a4d26ed1ac2441677 100644 (file)
@@ -47,7 +47,8 @@ struct zone_padding {
 #endif
 
 enum zone_stat_item {
-       NR_FILE_MAPPED, /* mapped into pagetables.
+       NR_ANON_PAGES,  /* Mapped anonymous pages */
+       NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
                           only modified from process context */
        NR_FILE_PAGES,
        NR_VM_ZONE_STAT_ITEMS };
index 60c7244c42e48760700840debe0b61408fcac7c9..0faacfe189093b88c0cb2b23d904639e29e0e72f 100644 (file)
@@ -111,7 +111,8 @@ static void get_writeback_state(struct writeback_state *wbs)
 {
        wbs->nr_dirty = read_page_state(nr_dirty);
        wbs->nr_unstable = read_page_state(nr_unstable);
-       wbs->nr_mapped = global_page_state(NR_FILE_MAPPED);
+       wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES);
        wbs->nr_writeback = read_page_state(nr_writeback);
 }
 
index af5e9808e65dc1ab3ecfb79ed5cd8b78c24955ed..40158b59729ec3f6dac4390ecd5a57db7c719b52 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
         * nr_mapped state can be updated without turning off
         * interrupts because it is not modified via interrupt.
         */
-       __inc_zone_page_state(page, NR_FILE_MAPPED);
+       __inc_zone_page_state(page, NR_ANON_PAGES);
 }
 
 /**
@@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page)
                 */
                if (page_test_and_clear_dirty(page))
                        set_page_dirty(page);
-               __dec_zone_page_state(page, NR_FILE_MAPPED);
+               __dec_zone_page_state(page,
+                               PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
        }
 }
 
index 08bc54e808627f9351960c887aaf1bb4046f18b1..2f0390161c0e65e4f3f801e15875664d8c5578ee 100644 (file)
@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * how much memory
                 * is mapped.
                 */
-               mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) /
+               mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES)) * 100) /
                                        vm_total_pages;
 
                /*
index f16b33eb6d5cf94640b150cb1e7fe53b3c5f8efb..3baf4dffa62a57c3c5c96fa5657ee22d8e7e63c3 100644 (file)
@@ -395,6 +395,7 @@ struct seq_operations fragmentation_op = {
 
 static char *vmstat_text[] = {
        /* Zoned VM counters */
+       "nr_anon_pages",
        "nr_mapped",
        "nr_file_pages",