1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
11 * Global page accounting. One instance per CPU. Only unsigned longs are
14 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15 * any time safely (which protects the instance from modification by
17 * - The __xxx_page_state variants can be used safely when interrupts are
19 * - The __xxx_page_state variants can be used if the field is only
20 * modified from process context and protected from preemption, or only
21 * modified from interrupt context. In this case, the field should be
25 unsigned long pgpgin; /* Disk reads */
26 unsigned long pgpgout; /* Disk writes */
27 unsigned long pswpin; /* swap reads */
28 unsigned long pswpout; /* swap writes */
30 unsigned long pgalloc_high; /* page allocations */
31 unsigned long pgalloc_normal;
32 unsigned long pgalloc_dma32;
33 unsigned long pgalloc_dma;
35 unsigned long pgfree; /* page freeings */
36 unsigned long pgactivate; /* pages moved inactive->active */
37 unsigned long pgdeactivate; /* pages moved active->inactive */
39 unsigned long pgfault; /* faults (major+minor) */
40 unsigned long pgmajfault; /* faults (major only) */
42 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
43 unsigned long pgrefill_normal;
44 unsigned long pgrefill_dma32;
45 unsigned long pgrefill_dma;
47 unsigned long pgsteal_high; /* total highmem pages reclaimed */
48 unsigned long pgsteal_normal;
49 unsigned long pgsteal_dma32;
50 unsigned long pgsteal_dma;
52 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
53 unsigned long pgscan_kswapd_normal;
54 unsigned long pgscan_kswapd_dma32;
55 unsigned long pgscan_kswapd_dma;
57 unsigned long pgscan_direct_high;/* total highmem pages scanned */
58 unsigned long pgscan_direct_normal;
59 unsigned long pgscan_direct_dma32;
60 unsigned long pgscan_direct_dma;
62 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
63 unsigned long slabs_scanned; /* slab objects scanned */
64 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
65 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
66 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
67 unsigned long allocstall; /* direct reclaim calls */
69 unsigned long pgrotated; /* pages rotated to tail of the LRU */
72 extern void get_full_page_state(struct page_state *ret);
73 extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
74 extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
76 #define mod_page_state(member, delta) \
77 mod_page_state_offset(offsetof(struct page_state, member), (delta))
79 #define __mod_page_state(member, delta) \
80 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
82 #define inc_page_state(member) mod_page_state(member, 1UL)
83 #define dec_page_state(member) mod_page_state(member, 0UL - 1)
84 #define add_page_state(member,delta) mod_page_state(member, (delta))
85 #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
87 #define __inc_page_state(member) __mod_page_state(member, 1UL)
88 #define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
89 #define __add_page_state(member,delta) __mod_page_state(member, (delta))
90 #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
92 #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
94 #define state_zone_offset(zone, member) \
97 if (is_highmem(zone)) \
98 offset = offsetof(struct page_state, member##_high); \
99 else if (is_normal(zone)) \
100 offset = offsetof(struct page_state, member##_normal); \
101 else if (is_dma32(zone)) \
102 offset = offsetof(struct page_state, member##_dma32); \
104 offset = offsetof(struct page_state, member##_dma); \
108 #define __mod_page_state_zone(zone, member, delta) \
110 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
113 #define mod_page_state_zone(zone, member, delta) \
115 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
118 DECLARE_PER_CPU(struct page_state, page_states);
121 * Zone based page accounting with per cpu differentials.
123 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
125 static inline void zone_page_state_add(long x, struct zone *zone,
126 enum zone_stat_item item)
128 atomic_long_add(x, &zone->vm_stat[item]);
129 atomic_long_add(x, &vm_stat[item]);
132 static inline unsigned long global_page_state(enum zone_stat_item item)
134 long x = atomic_long_read(&vm_stat[item]);
142 static inline unsigned long zone_page_state(struct zone *zone,
143 enum zone_stat_item item)
145 long x = atomic_long_read(&zone->vm_stat[item]);
155 * Determine the per node value of a stat item. This function
156 * is called frequently in a NUMA machine, so try to be as
157 * frugal as possible.
159 static inline unsigned long node_page_state(int node,
160 enum zone_stat_item item)
162 struct zone *zones = NODE_DATA(node)->node_zones;
165 #ifndef CONFIG_DMA_IS_NORMAL
166 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
167 zone_page_state(&zones[ZONE_DMA32], item) +
169 zone_page_state(&zones[ZONE_NORMAL], item) +
171 #ifdef CONFIG_HIGHMEM
172 zone_page_state(&zones[ZONE_HIGHMEM], item) +
174 zone_page_state(&zones[ZONE_DMA], item);
177 #define node_page_state(node, item) global_page_state(item)
180 #define __add_zone_page_state(__z, __i, __d) \
181 __mod_zone_page_state(__z, __i, __d)
182 #define __sub_zone_page_state(__z, __i, __d) \
183 __mod_zone_page_state(__z, __i,-(__d))
185 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
186 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
188 static inline void zap_zone_vm_stats(struct zone *zone)
190 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
194 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
195 void __inc_zone_page_state(struct page *, enum zone_stat_item);
196 void __dec_zone_page_state(struct page *, enum zone_stat_item);
198 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
199 void inc_zone_page_state(struct page *, enum zone_stat_item);
200 void dec_zone_page_state(struct page *, enum zone_stat_item);
202 extern void inc_zone_state(struct zone *, enum zone_stat_item);
204 void refresh_cpu_vm_stats(int);
205 void refresh_vm_stats(void);
207 #else /* CONFIG_SMP */
210 * We do not maintain differentials in a single processor configuration.
211 * The functions directly modify the zone and global counters.
213 static inline void __mod_zone_page_state(struct zone *zone,
214 enum zone_stat_item item, int delta)
216 zone_page_state_add(delta, zone, item);
219 static inline void __inc_zone_page_state(struct page *page,
220 enum zone_stat_item item)
222 atomic_long_inc(&page_zone(page)->vm_stat[item]);
223 atomic_long_inc(&vm_stat[item]);
226 static inline void __dec_zone_page_state(struct page *page,
227 enum zone_stat_item item)
229 atomic_long_dec(&page_zone(page)->vm_stat[item]);
230 atomic_long_dec(&vm_stat[item]);
234 * We only use atomic operations to update counters. So there is no need to
235 * disable interrupts.
237 #define inc_zone_page_state __inc_zone_page_state
238 #define dec_zone_page_state __dec_zone_page_state
239 #define mod_zone_page_state __mod_zone_page_state
241 static inline void refresh_cpu_vm_stats(int cpu) { }
242 static inline void refresh_vm_stats(void) { }
245 #endif /* _LINUX_VMSTAT_H */