1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
19 #define DMA32_ZONE(xx)
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
25 #define HIGHMEM_ZONE(xx)
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item
{ PGPGIN
, PGPGOUT
, PSWPIN
, PSWPOUT
,
32 FOR_ALL_ZONES(PGALLOC
),
33 PGFREE
, PGACTIVATE
, PGDEACTIVATE
,
35 FOR_ALL_ZONES(PGREFILL
),
36 FOR_ALL_ZONES(PGSTEAL
),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD
),
38 FOR_ALL_ZONES(PGSCAN_DIRECT
),
40 PGSCAN_ZONE_RECLAIM_FAILED
,
42 PGINODESTEAL
, SLABS_SCANNED
, KSWAPD_STEAL
, KSWAPD_INODESTEAL
,
43 PAGEOUTRUN
, ALLOCSTALL
, PGROTATED
,
44 #ifdef CONFIG_HUGETLB_PAGE
45 HTLB_BUDDY_PGALLOC
, HTLB_BUDDY_PGALLOC_FAIL
,
47 UNEVICTABLE_PGCULLED
, /* culled to noreclaim list */
48 UNEVICTABLE_PGSCANNED
, /* scanned for reclaimability */
49 UNEVICTABLE_PGRESCUED
, /* rescued from noreclaim list */
50 UNEVICTABLE_PGMLOCKED
,
51 UNEVICTABLE_PGMUNLOCKED
,
52 UNEVICTABLE_PGCLEARED
, /* on COW, page truncate */
53 UNEVICTABLE_PGSTRANDED
, /* unable to isolate on unlock */
54 UNEVICTABLE_MLOCKFREED
,
58 extern int sysctl_stat_interval
;
60 #ifdef CONFIG_VM_EVENT_COUNTERS
62 * Light weight per cpu counter implementation.
64 * Counters should only be incremented and no critical kernel component
65 * should rely on the counter values.
67 * Counters are handled completely inline. On many platforms the code
68 * generated will simply be the increment of a global address.
71 struct vm_event_state
{
72 unsigned long event
[NR_VM_EVENT_ITEMS
];
75 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
77 static inline void __count_vm_event(enum vm_event_item item
)
79 __get_cpu_var(vm_event_states
).event
[item
]++;
82 static inline void count_vm_event(enum vm_event_item item
)
84 get_cpu_var(vm_event_states
).event
[item
]++;
88 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
90 __get_cpu_var(vm_event_states
).event
[item
] += delta
;
93 static inline void count_vm_events(enum vm_event_item item
, long delta
)
95 get_cpu_var(vm_event_states
).event
[item
] += delta
;
99 extern void all_vm_events(unsigned long *);
100 #ifdef CONFIG_HOTPLUG
101 extern void vm_events_fold_cpu(int cpu
);
103 static inline void vm_events_fold_cpu(int cpu
)
110 /* Disable counters */
111 static inline void count_vm_event(enum vm_event_item item
)
114 static inline void count_vm_events(enum vm_event_item item
, long delta
)
117 static inline void __count_vm_event(enum vm_event_item item
)
120 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
123 static inline void all_vm_events(unsigned long *ret
)
126 static inline void vm_events_fold_cpu(int cpu
)
130 #endif /* CONFIG_VM_EVENT_COUNTERS */
132 #define __count_zone_vm_events(item, zone, delta) \
133 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 zone_idx(zone), delta)
137 * Zone based page accounting with per cpu differentials.
139 extern atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
141 static inline void zone_page_state_add(long x
, struct zone
*zone
,
142 enum zone_stat_item item
)
144 atomic_long_add(x
, &zone
->vm_stat
[item
]);
145 atomic_long_add(x
, &vm_stat
[item
]);
148 static inline unsigned long global_page_state(enum zone_stat_item item
)
150 long x
= atomic_long_read(&vm_stat
[item
]);
158 static inline unsigned long zone_page_state(struct zone
*zone
,
159 enum zone_stat_item item
)
161 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
170 * More accurate version that also considers the currently pending
171 * deltas. For that we need to loop over all cpus to find the current
172 * deltas. There is no synchronization so the result cannot be
173 * exactly accurate either.
175 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
176 enum zone_stat_item item
)
178 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
182 for_each_online_cpu(cpu
)
183 x
+= zone_pcp(zone
, cpu
)->vm_stat_diff
[item
];
191 extern unsigned long global_reclaimable_pages(void);
192 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
196 * Determine the per node value of a stat item. This function
197 * is called frequently in a NUMA machine, so try to be as
198 * frugal as possible.
200 static inline unsigned long node_page_state(int node
,
201 enum zone_stat_item item
)
203 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
206 #ifdef CONFIG_ZONE_DMA
207 zone_page_state(&zones
[ZONE_DMA
], item
) +
209 #ifdef CONFIG_ZONE_DMA32
210 zone_page_state(&zones
[ZONE_DMA32
], item
) +
212 #ifdef CONFIG_HIGHMEM
213 zone_page_state(&zones
[ZONE_HIGHMEM
], item
) +
215 zone_page_state(&zones
[ZONE_NORMAL
], item
) +
216 zone_page_state(&zones
[ZONE_MOVABLE
], item
);
219 extern void zone_statistics(struct zone
*, struct zone
*);
223 #define node_page_state(node, item) global_page_state(item)
224 #define zone_statistics(_zl,_z) do { } while (0)
226 #endif /* CONFIG_NUMA */
228 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
229 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
231 static inline void zap_zone_vm_stats(struct zone
*zone
)
233 memset(zone
->vm_stat
, 0, sizeof(zone
->vm_stat
));
236 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
239 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, int);
240 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
241 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
243 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, int);
244 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
245 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
247 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
248 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
249 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
250 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
252 void refresh_cpu_vm_stats(int);
253 #else /* CONFIG_SMP */
256 * We do not maintain differentials in a single processor configuration.
257 * The functions directly modify the zone and global counters.
259 static inline void __mod_zone_page_state(struct zone
*zone
,
260 enum zone_stat_item item
, int delta
)
262 zone_page_state_add(delta
, zone
, item
);
265 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
267 atomic_long_inc(&zone
->vm_stat
[item
]);
268 atomic_long_inc(&vm_stat
[item
]);
271 static inline void __inc_zone_page_state(struct page
*page
,
272 enum zone_stat_item item
)
274 __inc_zone_state(page_zone(page
), item
);
277 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
279 atomic_long_dec(&zone
->vm_stat
[item
]);
280 atomic_long_dec(&vm_stat
[item
]);
283 static inline void __dec_zone_page_state(struct page
*page
,
284 enum zone_stat_item item
)
286 __dec_zone_state(page_zone(page
), item
);
290 * We only use atomic operations to update counters. So there is no need to
291 * disable interrupts.
293 #define inc_zone_page_state __inc_zone_page_state
294 #define dec_zone_page_state __dec_zone_page_state
295 #define mod_zone_page_state __mod_zone_page_state
297 static inline void refresh_cpu_vm_stats(int cpu
) { }
300 #endif /* _LINUX_VMSTAT_H */