]> err.no Git - linux-2.6/blob - mm/vmstat.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
[linux-2.6] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17
18 #ifdef CONFIG_VM_EVENT_COUNTERS
19 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
20 EXPORT_PER_CPU_SYMBOL(vm_event_states);
21
22 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
23 {
24         int cpu;
25         int i;
26
27         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28
29         for_each_cpu_mask(cpu, *cpumask) {
30                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
31
32                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
33                         ret[i] += this->event[i];
34         }
35 }
36
37 /*
38  * Accumulate the vm event counters across all CPUs.
39  * The result is unavoidably approximate - it can change
40  * during and after execution of this function.
41 */
42 void all_vm_events(unsigned long *ret)
43 {
44         get_online_cpus();
45         sum_vm_events(ret, &cpu_online_map);
46         put_online_cpus();
47 }
48 EXPORT_SYMBOL_GPL(all_vm_events);
49
50 #ifdef CONFIG_HOTPLUG
51 /*
52  * Fold the foreign cpu events into our own.
53  *
54  * This is adding to the events on one processor
55  * but keeps the global counts constant.
56  */
57 void vm_events_fold_cpu(int cpu)
58 {
59         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
60         int i;
61
62         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
63                 count_vm_events(i, fold_state->event[i]);
64                 fold_state->event[i] = 0;
65         }
66 }
67 #endif /* CONFIG_HOTPLUG */
68
69 #endif /* CONFIG_VM_EVENT_COUNTERS */
70
71 /*
72  * Manage combined zone based / global counters
73  *
74  * vm_stat contains the global counters
75  */
76 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77 EXPORT_SYMBOL(vm_stat);
78
79 #ifdef CONFIG_SMP
80
81 static int calculate_threshold(struct zone *zone)
82 {
83         int threshold;
84         int mem;        /* memory in 128 MB units */
85
86         /*
87          * The threshold scales with the number of processors and the amount
88          * of memory per zone. More memory means that we can defer updates for
89          * longer, more processors could lead to more contention.
90          * fls() is used to have a cheap way of logarithmic scaling.
91          *
92          * Some sample thresholds:
93          *
94          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
95          * ------------------------------------------------------------------
96          * 8            1               1       0.9-1 GB        4
97          * 16           2               2       0.9-1 GB        4
98          * 20           2               2       1-2 GB          5
99          * 24           2               2       2-4 GB          6
100          * 28           2               2       4-8 GB          7
101          * 32           2               2       8-16 GB         8
102          * 4            2               2       <128M           1
103          * 30           4               3       2-4 GB          5
104          * 48           4               3       8-16 GB         8
105          * 32           8               4       1-2 GB          4
106          * 32           8               4       0.9-1GB         4
107          * 10           16              5       <128M           1
108          * 40           16              5       900M            4
109          * 70           64              7       2-4 GB          5
110          * 84           64              7       4-8 GB          6
111          * 108          512             9       4-8 GB          6
112          * 125          1024            10      8-16 GB         8
113          * 125          1024            10      16-32 GB        9
114          */
115
116         mem = zone->present_pages >> (27 - PAGE_SHIFT);
117
118         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
119
120         /*
121          * Maximum threshold is 125
122          */
123         threshold = min(125, threshold);
124
125         return threshold;
126 }
127
128 /*
129  * Refresh the thresholds for each zone.
130  */
131 static void refresh_zone_stat_thresholds(void)
132 {
133         struct zone *zone;
134         int cpu;
135         int threshold;
136
137         for_each_zone(zone) {
138
139                 if (!zone->present_pages)
140                         continue;
141
142                 threshold = calculate_threshold(zone);
143
144                 for_each_online_cpu(cpu)
145                         zone_pcp(zone, cpu)->stat_threshold = threshold;
146         }
147 }
148
149 /*
150  * For use when we know that interrupts are disabled.
151  */
152 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
153                                 int delta)
154 {
155         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
156         s8 *p = pcp->vm_stat_diff + item;
157         long x;
158
159         x = delta + *p;
160
161         if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
162                 zone_page_state_add(x, zone, item);
163                 x = 0;
164         }
165         *p = x;
166 }
167 EXPORT_SYMBOL(__mod_zone_page_state);
168
169 /*
170  * For an unknown interrupt state
171  */
172 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
173                                         int delta)
174 {
175         unsigned long flags;
176
177         local_irq_save(flags);
178         __mod_zone_page_state(zone, item, delta);
179         local_irq_restore(flags);
180 }
181 EXPORT_SYMBOL(mod_zone_page_state);
182
183 /*
184  * Optimized increment and decrement functions.
185  *
186  * These are only for a single page and therefore can take a struct page *
187  * argument instead of struct zone *. This allows the inclusion of the code
188  * generated for page_zone(page) into the optimized functions.
189  *
190  * No overflow check is necessary and therefore the differential can be
191  * incremented or decremented in place which may allow the compilers to
192  * generate better code.
193  * The increment or decrement is known and therefore one boundary check can
194  * be omitted.
195  *
196  * NOTE: These functions are very performance sensitive. Change only
197  * with care.
198  *
199  * Some processors have inc/dec instructions that are atomic vs an interrupt.
200  * However, the code must first determine the differential location in a zone
201  * based on the processor number and then inc/dec the counter. There is no
202  * guarantee without disabling preemption that the processor will not change
203  * in between and therefore the atomicity vs. interrupt cannot be exploited
204  * in a useful way here.
205  */
206 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
207 {
208         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
209         s8 *p = pcp->vm_stat_diff + item;
210
211         (*p)++;
212
213         if (unlikely(*p > pcp->stat_threshold)) {
214                 int overstep = pcp->stat_threshold / 2;
215
216                 zone_page_state_add(*p + overstep, zone, item);
217                 *p = -overstep;
218         }
219 }
220
221 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
222 {
223         __inc_zone_state(page_zone(page), item);
224 }
225 EXPORT_SYMBOL(__inc_zone_page_state);
226
227 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
228 {
229         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
230         s8 *p = pcp->vm_stat_diff + item;
231
232         (*p)--;
233
234         if (unlikely(*p < - pcp->stat_threshold)) {
235                 int overstep = pcp->stat_threshold / 2;
236
237                 zone_page_state_add(*p - overstep, zone, item);
238                 *p = overstep;
239         }
240 }
241
242 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
243 {
244         __dec_zone_state(page_zone(page), item);
245 }
246 EXPORT_SYMBOL(__dec_zone_page_state);
247
248 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
249 {
250         unsigned long flags;
251
252         local_irq_save(flags);
253         __inc_zone_state(zone, item);
254         local_irq_restore(flags);
255 }
256
257 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
258 {
259         unsigned long flags;
260         struct zone *zone;
261
262         zone = page_zone(page);
263         local_irq_save(flags);
264         __inc_zone_state(zone, item);
265         local_irq_restore(flags);
266 }
267 EXPORT_SYMBOL(inc_zone_page_state);
268
269 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
270 {
271         unsigned long flags;
272
273         local_irq_save(flags);
274         __dec_zone_page_state(page, item);
275         local_irq_restore(flags);
276 }
277 EXPORT_SYMBOL(dec_zone_page_state);
278
279 /*
280  * Update the zone counters for one cpu.
281  *
282  * The cpu specified must be either the current cpu or a processor that
283  * is not online. If it is the current cpu then the execution thread must
284  * be pinned to the current cpu.
285  *
286  * Note that refresh_cpu_vm_stats strives to only access
287  * node local memory. The per cpu pagesets on remote zones are placed
288  * in the memory local to the processor using that pageset. So the
289  * loop over all zones will access a series of cachelines local to
290  * the processor.
291  *
292  * The call to zone_page_state_add updates the cachelines with the
293  * statistics in the remote zone struct as well as the global cachelines
294  * with the global counters. These could cause remote node cache line
295  * bouncing and will have to be only done when necessary.
296  */
297 void refresh_cpu_vm_stats(int cpu)
298 {
299         struct zone *zone;
300         int i;
301         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
302
303         for_each_zone(zone) {
304                 struct per_cpu_pageset *p;
305
306                 if (!populated_zone(zone))
307                         continue;
308
309                 p = zone_pcp(zone, cpu);
310
311                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
312                         if (p->vm_stat_diff[i]) {
313                                 unsigned long flags;
314                                 int v;
315
316                                 local_irq_save(flags);
317                                 v = p->vm_stat_diff[i];
318                                 p->vm_stat_diff[i] = 0;
319                                 local_irq_restore(flags);
320                                 atomic_long_add(v, &zone->vm_stat[i]);
321                                 global_diff[i] += v;
322 #ifdef CONFIG_NUMA
323                                 /* 3 seconds idle till flush */
324                                 p->expire = 3;
325 #endif
326                         }
327                 cond_resched();
328 #ifdef CONFIG_NUMA
329                 /*
330                  * Deal with draining the remote pageset of this
331                  * processor
332                  *
333                  * Check if there are pages remaining in this pageset
334                  * if not then there is nothing to expire.
335                  */
336                 if (!p->expire || !p->pcp.count)
337                         continue;
338
339                 /*
340                  * We never drain zones local to this processor.
341                  */
342                 if (zone_to_nid(zone) == numa_node_id()) {
343                         p->expire = 0;
344                         continue;
345                 }
346
347                 p->expire--;
348                 if (p->expire)
349                         continue;
350
351                 if (p->pcp.count)
352                         drain_zone_pages(zone, &p->pcp);
353 #endif
354         }
355
356         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
357                 if (global_diff[i])
358                         atomic_long_add(global_diff[i], &vm_stat[i]);
359 }
360
361 #endif
362
363 #ifdef CONFIG_NUMA
364 /*
365  * zonelist = the list of zones passed to the allocator
366  * z        = the zone from which the allocation occurred.
367  *
368  * Must be called with interrupts disabled.
369  */
370 void zone_statistics(struct zone *preferred_zone, struct zone *z)
371 {
372         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
373                 __inc_zone_state(z, NUMA_HIT);
374         } else {
375                 __inc_zone_state(z, NUMA_MISS);
376                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
377         }
378         if (z->node == numa_node_id())
379                 __inc_zone_state(z, NUMA_LOCAL);
380         else
381                 __inc_zone_state(z, NUMA_OTHER);
382 }
383 #endif
384
385 #ifdef CONFIG_PROC_FS
386
387 #include <linux/seq_file.h>
388
389 static char * const migratetype_names[MIGRATE_TYPES] = {
390         "Unmovable",
391         "Reclaimable",
392         "Movable",
393         "Reserve",
394         "Isolate",
395 };
396
397 static void *frag_start(struct seq_file *m, loff_t *pos)
398 {
399         pg_data_t *pgdat;
400         loff_t node = *pos;
401         for (pgdat = first_online_pgdat();
402              pgdat && node;
403              pgdat = next_online_pgdat(pgdat))
404                 --node;
405
406         return pgdat;
407 }
408
409 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
410 {
411         pg_data_t *pgdat = (pg_data_t *)arg;
412
413         (*pos)++;
414         return next_online_pgdat(pgdat);
415 }
416
417 static void frag_stop(struct seq_file *m, void *arg)
418 {
419 }
420
421 /* Walk all the zones in a node and print using a callback */
422 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
423                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
424 {
425         struct zone *zone;
426         struct zone *node_zones = pgdat->node_zones;
427         unsigned long flags;
428
429         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
430                 if (!populated_zone(zone))
431                         continue;
432
433                 spin_lock_irqsave(&zone->lock, flags);
434                 print(m, pgdat, zone);
435                 spin_unlock_irqrestore(&zone->lock, flags);
436         }
437 }
438
439 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
440                                                 struct zone *zone)
441 {
442         int order;
443
444         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
445         for (order = 0; order < MAX_ORDER; ++order)
446                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
447         seq_putc(m, '\n');
448 }
449
450 /*
451  * This walks the free areas for each zone.
452  */
453 static int frag_show(struct seq_file *m, void *arg)
454 {
455         pg_data_t *pgdat = (pg_data_t *)arg;
456         walk_zones_in_node(m, pgdat, frag_show_print);
457         return 0;
458 }
459
460 static void pagetypeinfo_showfree_print(struct seq_file *m,
461                                         pg_data_t *pgdat, struct zone *zone)
462 {
463         int order, mtype;
464
465         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
466                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
467                                         pgdat->node_id,
468                                         zone->name,
469                                         migratetype_names[mtype]);
470                 for (order = 0; order < MAX_ORDER; ++order) {
471                         unsigned long freecount = 0;
472                         struct free_area *area;
473                         struct list_head *curr;
474
475                         area = &(zone->free_area[order]);
476
477                         list_for_each(curr, &area->free_list[mtype])
478                                 freecount++;
479                         seq_printf(m, "%6lu ", freecount);
480                 }
481                 seq_putc(m, '\n');
482         }
483 }
484
485 /* Print out the free pages at each order for each migatetype */
486 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
487 {
488         int order;
489         pg_data_t *pgdat = (pg_data_t *)arg;
490
491         /* Print header */
492         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
493         for (order = 0; order < MAX_ORDER; ++order)
494                 seq_printf(m, "%6d ", order);
495         seq_putc(m, '\n');
496
497         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
498
499         return 0;
500 }
501
502 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
503                                         pg_data_t *pgdat, struct zone *zone)
504 {
505         int mtype;
506         unsigned long pfn;
507         unsigned long start_pfn = zone->zone_start_pfn;
508         unsigned long end_pfn = start_pfn + zone->spanned_pages;
509         unsigned long count[MIGRATE_TYPES] = { 0, };
510
511         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
512                 struct page *page;
513
514                 if (!pfn_valid(pfn))
515                         continue;
516
517                 page = pfn_to_page(pfn);
518                 mtype = get_pageblock_migratetype(page);
519
520                 count[mtype]++;
521         }
522
523         /* Print counts */
524         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
525         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
526                 seq_printf(m, "%12lu ", count[mtype]);
527         seq_putc(m, '\n');
528 }
529
530 /* Print out the free pages at each order for each migratetype */
531 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
532 {
533         int mtype;
534         pg_data_t *pgdat = (pg_data_t *)arg;
535
536         seq_printf(m, "\n%-23s", "Number of blocks type ");
537         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
538                 seq_printf(m, "%12s ", migratetype_names[mtype]);
539         seq_putc(m, '\n');
540         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
541
542         return 0;
543 }
544
545 /*
546  * This prints out statistics in relation to grouping pages by mobility.
547  * It is expensive to collect so do not constantly read the file.
548  */
549 static int pagetypeinfo_show(struct seq_file *m, void *arg)
550 {
551         pg_data_t *pgdat = (pg_data_t *)arg;
552
553         /* check memoryless node */
554         if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
555                 return 0;
556
557         seq_printf(m, "Page block order: %d\n", pageblock_order);
558         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
559         seq_putc(m, '\n');
560         pagetypeinfo_showfree(m, pgdat);
561         pagetypeinfo_showblockcount(m, pgdat);
562
563         return 0;
564 }
565
566 const struct seq_operations fragmentation_op = {
567         .start  = frag_start,
568         .next   = frag_next,
569         .stop   = frag_stop,
570         .show   = frag_show,
571 };
572
573 const struct seq_operations pagetypeinfo_op = {
574         .start  = frag_start,
575         .next   = frag_next,
576         .stop   = frag_stop,
577         .show   = pagetypeinfo_show,
578 };
579
580 #ifdef CONFIG_ZONE_DMA
581 #define TEXT_FOR_DMA(xx) xx "_dma",
582 #else
583 #define TEXT_FOR_DMA(xx)
584 #endif
585
586 #ifdef CONFIG_ZONE_DMA32
587 #define TEXT_FOR_DMA32(xx) xx "_dma32",
588 #else
589 #define TEXT_FOR_DMA32(xx)
590 #endif
591
592 #ifdef CONFIG_HIGHMEM
593 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
594 #else
595 #define TEXT_FOR_HIGHMEM(xx)
596 #endif
597
598 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
599                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
600
601 static const char * const vmstat_text[] = {
602         /* Zoned VM counters */
603         "nr_free_pages",
604         "nr_inactive",
605         "nr_active",
606         "nr_anon_pages",
607         "nr_mapped",
608         "nr_file_pages",
609         "nr_dirty",
610         "nr_writeback",
611         "nr_slab_reclaimable",
612         "nr_slab_unreclaimable",
613         "nr_page_table_pages",
614         "nr_unstable",
615         "nr_bounce",
616         "nr_vmscan_write",
617         "nr_writeback_temp",
618
619 #ifdef CONFIG_NUMA
620         "numa_hit",
621         "numa_miss",
622         "numa_foreign",
623         "numa_interleave",
624         "numa_local",
625         "numa_other",
626 #endif
627
628 #ifdef CONFIG_VM_EVENT_COUNTERS
629         "pgpgin",
630         "pgpgout",
631         "pswpin",
632         "pswpout",
633
634         TEXTS_FOR_ZONES("pgalloc")
635
636         "pgfree",
637         "pgactivate",
638         "pgdeactivate",
639
640         "pgfault",
641         "pgmajfault",
642
643         TEXTS_FOR_ZONES("pgrefill")
644         TEXTS_FOR_ZONES("pgsteal")
645         TEXTS_FOR_ZONES("pgscan_kswapd")
646         TEXTS_FOR_ZONES("pgscan_direct")
647
648         "pginodesteal",
649         "slabs_scanned",
650         "kswapd_steal",
651         "kswapd_inodesteal",
652         "pageoutrun",
653         "allocstall",
654
655         "pgrotated",
656 #ifdef CONFIG_HUGETLB_PAGE
657         "htlb_buddy_alloc_success",
658         "htlb_buddy_alloc_fail",
659 #endif
660 #endif
661 };
662
663 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
664                                                         struct zone *zone)
665 {
666         int i;
667         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
668         seq_printf(m,
669                    "\n  pages free     %lu"
670                    "\n        min      %lu"
671                    "\n        low      %lu"
672                    "\n        high     %lu"
673                    "\n        scanned  %lu (a: %lu i: %lu)"
674                    "\n        spanned  %lu"
675                    "\n        present  %lu",
676                    zone_page_state(zone, NR_FREE_PAGES),
677                    zone->pages_min,
678                    zone->pages_low,
679                    zone->pages_high,
680                    zone->pages_scanned,
681                    zone->nr_scan_active, zone->nr_scan_inactive,
682                    zone->spanned_pages,
683                    zone->present_pages);
684
685         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
686                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
687                                 zone_page_state(zone, i));
688
689         seq_printf(m,
690                    "\n        protection: (%lu",
691                    zone->lowmem_reserve[0]);
692         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
693                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
694         seq_printf(m,
695                    ")"
696                    "\n  pagesets");
697         for_each_online_cpu(i) {
698                 struct per_cpu_pageset *pageset;
699
700                 pageset = zone_pcp(zone, i);
701                 seq_printf(m,
702                            "\n    cpu: %i"
703                            "\n              count: %i"
704                            "\n              high:  %i"
705                            "\n              batch: %i",
706                            i,
707                            pageset->pcp.count,
708                            pageset->pcp.high,
709                            pageset->pcp.batch);
710 #ifdef CONFIG_SMP
711                 seq_printf(m, "\n  vm stats threshold: %d",
712                                 pageset->stat_threshold);
713 #endif
714         }
715         seq_printf(m,
716                    "\n  all_unreclaimable: %u"
717                    "\n  prev_priority:     %i"
718                    "\n  start_pfn:         %lu",
719                            zone_is_all_unreclaimable(zone),
720                    zone->prev_priority,
721                    zone->zone_start_pfn);
722         seq_putc(m, '\n');
723 }
724
725 /*
726  * Output information about zones in @pgdat.
727  */
728 static int zoneinfo_show(struct seq_file *m, void *arg)
729 {
730         pg_data_t *pgdat = (pg_data_t *)arg;
731         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
732         return 0;
733 }
734
735 const struct seq_operations zoneinfo_op = {
736         .start  = frag_start, /* iterate over all zones. The same as in
737                                * fragmentation. */
738         .next   = frag_next,
739         .stop   = frag_stop,
740         .show   = zoneinfo_show,
741 };
742
743 static void *vmstat_start(struct seq_file *m, loff_t *pos)
744 {
745         unsigned long *v;
746 #ifdef CONFIG_VM_EVENT_COUNTERS
747         unsigned long *e;
748 #endif
749         int i;
750
751         if (*pos >= ARRAY_SIZE(vmstat_text))
752                 return NULL;
753
754 #ifdef CONFIG_VM_EVENT_COUNTERS
755         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
756                         + sizeof(struct vm_event_state), GFP_KERNEL);
757 #else
758         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
759                         GFP_KERNEL);
760 #endif
761         m->private = v;
762         if (!v)
763                 return ERR_PTR(-ENOMEM);
764         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
765                 v[i] = global_page_state(i);
766 #ifdef CONFIG_VM_EVENT_COUNTERS
767         e = v + NR_VM_ZONE_STAT_ITEMS;
768         all_vm_events(e);
769         e[PGPGIN] /= 2;         /* sectors -> kbytes */
770         e[PGPGOUT] /= 2;
771 #endif
772         return v + *pos;
773 }
774
775 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
776 {
777         (*pos)++;
778         if (*pos >= ARRAY_SIZE(vmstat_text))
779                 return NULL;
780         return (unsigned long *)m->private + *pos;
781 }
782
783 static int vmstat_show(struct seq_file *m, void *arg)
784 {
785         unsigned long *l = arg;
786         unsigned long off = l - (unsigned long *)m->private;
787
788         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
789         return 0;
790 }
791
792 static void vmstat_stop(struct seq_file *m, void *arg)
793 {
794         kfree(m->private);
795         m->private = NULL;
796 }
797
798 const struct seq_operations vmstat_op = {
799         .start  = vmstat_start,
800         .next   = vmstat_next,
801         .stop   = vmstat_stop,
802         .show   = vmstat_show,
803 };
804
805 #endif /* CONFIG_PROC_FS */
806
807 #ifdef CONFIG_SMP
808 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
809 int sysctl_stat_interval __read_mostly = HZ;
810
811 static void vmstat_update(struct work_struct *w)
812 {
813         refresh_cpu_vm_stats(smp_processor_id());
814         schedule_delayed_work(&__get_cpu_var(vmstat_work),
815                 sysctl_stat_interval);
816 }
817
818 static void __cpuinit start_cpu_timer(int cpu)
819 {
820         struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
821
822         INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
823         schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
824 }
825
826 /*
827  * Use the cpu notifier to insure that the thresholds are recalculated
828  * when necessary.
829  */
830 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
831                 unsigned long action,
832                 void *hcpu)
833 {
834         long cpu = (long)hcpu;
835
836         switch (action) {
837         case CPU_ONLINE:
838         case CPU_ONLINE_FROZEN:
839                 start_cpu_timer(cpu);
840                 break;
841         case CPU_DOWN_PREPARE:
842         case CPU_DOWN_PREPARE_FROZEN:
843                 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
844                 per_cpu(vmstat_work, cpu).work.func = NULL;
845                 break;
846         case CPU_DOWN_FAILED:
847         case CPU_DOWN_FAILED_FROZEN:
848                 start_cpu_timer(cpu);
849                 break;
850         case CPU_DEAD:
851         case CPU_DEAD_FROZEN:
852                 refresh_zone_stat_thresholds();
853                 break;
854         default:
855                 break;
856         }
857         return NOTIFY_OK;
858 }
859
860 static struct notifier_block __cpuinitdata vmstat_notifier =
861         { &vmstat_cpuup_callback, NULL, 0 };
862
863 static int __init setup_vmstat(void)
864 {
865         int cpu;
866
867         refresh_zone_stat_thresholds();
868         register_cpu_notifier(&vmstat_notifier);
869
870         for_each_online_cpu(cpu)
871                 start_cpu_timer(cpu);
872         return 0;
873 }
874 module_init(setup_vmstat)
875 #endif