X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=mm%2Fvmstat.c;h=c64d169537bfcfe65fc963b009779de85c9b051a;hb=50ef295119645215c076700e7ddf4869df5f56ce;hp=9d824643a22f167800864a5f24b86645a25cfcdb;hpb=d1187ed21026fd512b87851d0ca26d9ae16f9059;p=linux-2.6 diff --git a/mm/vmstat.c b/mm/vmstat.c index 9d824643a2..c64d169537 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -10,8 +10,10 @@ */ #include +#include #include #include +#include #ifdef CONFIG_VM_EVENT_COUNTERS DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; @@ -281,6 +283,17 @@ EXPORT_SYMBOL(dec_zone_page_state); /* * Update the zone counters for one cpu. + * + * Note that refresh_cpu_vm_stats strives to only access + * node local memory. The per cpu pagesets on remote zones are placed + * in the memory local to the processor using that pageset. So the + * loop over all zones will access a series of cachelines local to + * the processor. + * + * The call to zone_page_state_add updates the cachelines with the + * statistics in the remote zone struct as well as the global cachelines + * with the global counters. These could cause remote node cache line + * bouncing and will have to be only done when necessary. */ void refresh_cpu_vm_stats(int cpu) { @@ -289,21 +302,54 @@ void refresh_cpu_vm_stats(int cpu) unsigned long flags; for_each_zone(zone) { - struct per_cpu_pageset *pcp; + struct per_cpu_pageset *p; if (!populated_zone(zone)) continue; - pcp = zone_pcp(zone, cpu); + p = zone_pcp(zone, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (pcp->vm_stat_diff[i]) { + if (p->vm_stat_diff[i]) { local_irq_save(flags); - zone_page_state_add(pcp->vm_stat_diff[i], + zone_page_state_add(p->vm_stat_diff[i], zone, i); - pcp->vm_stat_diff[i] = 0; + p->vm_stat_diff[i] = 0; +#ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ + p->expire = 3; +#endif local_irq_restore(flags); } +#ifdef CONFIG_NUMA + /* + * Deal with draining the remote pageset of this + * processor + * + * Check if there are pages remaining in this pageset + * if not then there is nothing to expire. + */ + if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) + continue; + + /* + * We never drain zones local to this processor. + */ + if (zone_to_nid(zone) == numa_node_id()) { + p->expire = 0; + continue; + } + + p->expire--; + if (p->expire) + continue; + + if (p->pcp[0].count) + drain_zone_pages(zone, p->pcp + 0); + + if (p->pcp[1].count) + drain_zone_pages(zone, p->pcp + 1); +#endif } } @@ -427,13 +473,13 @@ const struct seq_operations fragmentation_op = { #endif #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ - TEXT_FOR_HIGHMEM(xx) + TEXT_FOR_HIGHMEM(xx) xx "_movable", static const char * const vmstat_text[] = { /* Zoned VM counters */ "nr_free_pages", - "nr_active", "nr_inactive", + "nr_active", "nr_anon_pages", "nr_mapped", "nr_file_pages", @@ -641,18 +687,20 @@ const struct seq_operations vmstat_op = { #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct delayed_work, vmstat_work); +int sysctl_stat_interval __read_mostly = HZ; static void vmstat_update(struct work_struct *w) { refresh_cpu_vm_stats(smp_processor_id()); - schedule_delayed_work(&__get_cpu_var(vmstat_work), HZ); + schedule_delayed_work(&__get_cpu_var(vmstat_work), + sysctl_stat_interval); } static void __devinit start_cpu_timer(int cpu) { struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); - INIT_DELAYED_WORK(vmstat_work, vmstat_update); + INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); }