X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmstat.c;h=c64d169537bfcfe65fc963b009779de85c9b051a;hb=75ecb1a4d148b274aa9acd1d6ccaca0a4654784e;hp=2386716f1754ed0867a9f6430dc3b7acd75ce8b7;hpb=d23ad42324cc4378132e51f2fc5c9ba6cbe75182;p=linux-2.6 diff --git a/mm/vmstat.c b/mm/vmstat.c index 2386716f17..c64d169537 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -10,24 +10,10 @@ */ #include +#include #include #include - -void __get_zone_counts(unsigned long *active, unsigned long *inactive, - unsigned long *free, struct pglist_data *pgdat) -{ - *active = node_page_state(pgdat->node_id, NR_ACTIVE); - *inactive = node_page_state(pgdat->node_id, NR_INACTIVE); - *free = node_page_state(pgdat->node_id, NR_FREE_PAGES); -} - -void get_zone_counts(unsigned long *active, - unsigned long *inactive, unsigned long *free) -{ - *active = global_page_state(NR_ACTIVE); - *inactive = global_page_state(NR_INACTIVE); - *free = global_page_state(NR_FREE_PAGES); -} +#include #ifdef CONFIG_VM_EVENT_COUNTERS DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; @@ -297,6 +283,17 @@ EXPORT_SYMBOL(dec_zone_page_state); /* * Update the zone counters for one cpu. + * + * Note that refresh_cpu_vm_stats strives to only access + * node local memory. The per cpu pagesets on remote zones are placed + * in the memory local to the processor using that pageset. So the + * loop over all zones will access a series of cachelines local to + * the processor. + * + * The call to zone_page_state_add updates the cachelines with the + * statistics in the remote zone struct as well as the global cachelines + * with the global counters. These could cause remote node cache line + * bouncing and will have to be only done when necessary. */ void refresh_cpu_vm_stats(int cpu) { @@ -305,21 +302,54 @@ void refresh_cpu_vm_stats(int cpu) unsigned long flags; for_each_zone(zone) { - struct per_cpu_pageset *pcp; + struct per_cpu_pageset *p; if (!populated_zone(zone)) continue; - pcp = zone_pcp(zone, cpu); + p = zone_pcp(zone, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (pcp->vm_stat_diff[i]) { + if (p->vm_stat_diff[i]) { local_irq_save(flags); - zone_page_state_add(pcp->vm_stat_diff[i], + zone_page_state_add(p->vm_stat_diff[i], zone, i); - pcp->vm_stat_diff[i] = 0; + p->vm_stat_diff[i] = 0; +#ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ + p->expire = 3; +#endif local_irq_restore(flags); } +#ifdef CONFIG_NUMA + /* + * Deal with draining the remote pageset of this + * processor + * + * Check if there are pages remaining in this pageset + * if not then there is nothing to expire. + */ + if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) + continue; + + /* + * We never drain zones local to this processor. + */ + if (zone_to_nid(zone) == numa_node_id()) { + p->expire = 0; + continue; + } + + p->expire--; + if (p->expire) + continue; + + if (p->pcp[0].count) + drain_zone_pages(zone, p->pcp + 0); + + if (p->pcp[1].count) + drain_zone_pages(zone, p->pcp + 1); +#endif } } @@ -424,6 +454,12 @@ const struct seq_operations fragmentation_op = { .show = frag_show, }; +#ifdef CONFIG_ZONE_DMA +#define TEXT_FOR_DMA(xx) xx "_dma", +#else +#define TEXT_FOR_DMA(xx) +#endif + #ifdef CONFIG_ZONE_DMA32 #define TEXT_FOR_DMA32(xx) xx "_dma32", #else @@ -436,22 +472,22 @@ const struct seq_operations fragmentation_op = { #define TEXT_FOR_HIGHMEM(xx) #endif -#define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ - TEXT_FOR_HIGHMEM(xx) +#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ + TEXT_FOR_HIGHMEM(xx) xx "_movable", static const char * const vmstat_text[] = { /* Zoned VM counters */ "nr_free_pages", - "nr_active", "nr_inactive", + "nr_active", "nr_anon_pages", "nr_mapped", "nr_file_pages", + "nr_dirty", + "nr_writeback", "nr_slab_reclaimable", "nr_slab_unreclaimable", "nr_page_table_pages", - "nr_dirty", - "nr_writeback", "nr_unstable", "nr_bounce", "nr_vmscan_write", @@ -650,6 +686,24 @@ const struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP +static DEFINE_PER_CPU(struct delayed_work, vmstat_work); +int sysctl_stat_interval __read_mostly = HZ; + +static void vmstat_update(struct work_struct *w) +{ + refresh_cpu_vm_stats(smp_processor_id()); + schedule_delayed_work(&__get_cpu_var(vmstat_work), + sysctl_stat_interval); +} + +static void __devinit start_cpu_timer(int cpu) +{ + struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); + + INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); + schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); +} + /* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. @@ -658,10 +712,24 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { + long cpu = (long)hcpu; + switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_CANCELED: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + start_cpu_timer(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); + per_cpu(vmstat_work, cpu).work.func = NULL; + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + start_cpu_timer(cpu); + break; case CPU_DEAD: + case CPU_DEAD_FROZEN: refresh_zone_stat_thresholds(); break; default: @@ -675,8 +743,13 @@ static struct notifier_block __cpuinitdata vmstat_notifier = int __init setup_vmstat(void) { + int cpu; + refresh_zone_stat_thresholds(); register_cpu_notifier(&vmstat_notifier); + + for_each_online_cpu(cpu) + start_cpu_timer(cpu); return 0; } module_init(setup_vmstat)