From: Christoph Lameter Date: Fri, 30 Jun 2006 08:55:36 +0000 (-0700) Subject: [PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED X-Git-Tag: v2.6.18-rc1~310 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f3dbd34460ff54962d3e3244b6bcb7f5295356e6;p=linux-2.6 [PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED The current NR_FILE_MAPPED is used by zone reclaim and the dirty load calculation as the number of mapped pagecache pages. However, that is not true. NR_FILE_MAPPED includes the mapped anonymous pages. This patch separates those and therefore allows an accurate tracking of the anonymous pages per zone. It then becomes possible to determine the number of unmapped pages per zone and we can avoid scanning for unmapped pages if there are none. Also it may now be possible to determine the mapped/unmapped ratio in get_dirty_limit. Isnt the number of anonymous pages irrelevant in that calculation? Note that this will change the meaning of the number of mapped pages reported in /proc/vmstat /proc/meminfo and in the per node statistics. This may affect user space tools that monitor these counters! NR_FILE_MAPPED works like NR_FILE_DIRTY. It is only valid for pagecache pages. Signed-off-by: Christoph Lameter Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/drivers/base/node.c b/drivers/base/node.c index ae9e3fea4b..c3bf05158c 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) "Node %d Writeback: %8lu kB\n" "Node %d FilePages: %8lu kB\n" "Node %d Mapped: %8lu kB\n" + "Node %d AnonPages: %8lu kB\n" "Node %d Slab: %8lu kB\n", nid, K(i.totalram), nid, K(i.freeram), @@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) nid, K(ps.nr_writeback), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), + nid, K(node_page_state(nid, NR_ANON_PAGES)), nid, K(ps.nr_slab)); n += hugetlb_report_node_meminfo(nid, buf + n); return n; diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1af12fd77f..ff809656ce 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" + "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "CommitLimit: %8lu kB\n" @@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, K(i.freeswap), K(ps.nr_dirty), K(ps.nr_writeback), + K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), K(ps.nr_slab), K(allowed), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 08be91e6ce..4833abd445 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,7 +47,8 @@ struct zone_padding { #endif enum zone_stat_item { - NR_FILE_MAPPED, /* mapped into pagetables. + NR_ANON_PAGES, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, NR_VM_ZONE_STAT_ITEMS }; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 60c7244c42..0faacfe189 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -111,7 +111,8 @@ static void get_writeback_state(struct writeback_state *wbs) { wbs->nr_dirty = read_page_state(nr_dirty); wbs->nr_unstable = read_page_state(nr_unstable); - wbs->nr_mapped = global_page_state(NR_FILE_MAPPED); + wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + + global_page_state(NR_ANON_PAGES); wbs->nr_writeback = read_page_state(nr_writeback); } diff --git a/mm/rmap.c b/mm/rmap.c index af5e9808e6..40158b5972 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page, * nr_mapped state can be updated without turning off * interrupts because it is not modified via interrupt. */ - __inc_zone_page_state(page, NR_FILE_MAPPED); + __inc_zone_page_state(page, NR_ANON_PAGES); } /** @@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page) */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); - __dec_zone_page_state(page, NR_FILE_MAPPED); + __dec_zone_page_state(page, + PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } } diff --git a/mm/vmscan.c b/mm/vmscan.c index 08bc54e808..2f0390161c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -742,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * how much memory * is mapped. */ - mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) / + mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + + global_page_state(NR_ANON_PAGES)) * 100) / vm_total_pages; /* diff --git a/mm/vmstat.c b/mm/vmstat.c index f16b33eb6d..3baf4dffa6 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -395,6 +395,7 @@ struct seq_operations fragmentation_op = { static char *vmstat_text[] = { /* Zoned VM counters */ + "nr_anon_pages", "nr_mapped", "nr_file_pages",