1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mmzone.h>
7 #include <linux/vm_event_item.h>
8 #include <linux/atomic.h>
10 extern int sysctl_stat_interval
;
12 #ifdef CONFIG_VM_EVENT_COUNTERS
14 * Light weight per cpu counter implementation.
16 * Counters should only be incremented and no critical kernel component
17 * should rely on the counter values.
19 * Counters are handled completely inline. On many platforms the code
20 * generated will simply be the increment of a global address.
23 struct vm_event_state
{
24 unsigned long event
[NR_VM_EVENT_ITEMS
];
27 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
30 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
31 * local_irq_disable overhead.
33 static inline void __count_vm_event(enum vm_event_item item
)
35 raw_cpu_inc(vm_event_states
.event
[item
]);
38 static inline void count_vm_event(enum vm_event_item item
)
40 this_cpu_inc(vm_event_states
.event
[item
]);
43 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
45 raw_cpu_add(vm_event_states
.event
[item
], delta
);
48 static inline void count_vm_events(enum vm_event_item item
, long delta
)
50 this_cpu_add(vm_event_states
.event
[item
], delta
);
53 extern void all_vm_events(unsigned long *);
55 extern void vm_events_fold_cpu(int cpu
);
59 /* Disable counters */
60 static inline void count_vm_event(enum vm_event_item item
)
63 static inline void count_vm_events(enum vm_event_item item
, long delta
)
66 static inline void __count_vm_event(enum vm_event_item item
)
69 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
72 static inline void all_vm_events(unsigned long *ret
)
75 static inline void vm_events_fold_cpu(int cpu
)
79 #endif /* CONFIG_VM_EVENT_COUNTERS */
81 #ifdef CONFIG_NUMA_BALANCING
82 #define count_vm_numa_event(x) count_vm_event(x)
83 #define count_vm_numa_events(x, y) count_vm_events(x, y)
85 #define count_vm_numa_event(x) do {} while (0)
86 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
87 #endif /* CONFIG_NUMA_BALANCING */
89 #ifdef CONFIG_DEBUG_TLBFLUSH
90 #define count_vm_tlb_event(x) count_vm_event(x)
91 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
93 #define count_vm_tlb_event(x) do {} while (0)
94 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
97 #ifdef CONFIG_DEBUG_VM_VMACACHE
98 #define count_vm_vmacache_event(x) count_vm_event(x)
100 #define count_vm_vmacache_event(x) do {} while (0)
103 #define __count_zid_vm_events(item, zid, delta) \
104 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
107 * Zone and node-based page accounting with per cpu differentials.
109 extern atomic_long_t vm_zone_stat
[NR_VM_ZONE_STAT_ITEMS
];
110 extern atomic_long_t vm_node_stat
[NR_VM_NODE_STAT_ITEMS
];
112 static inline void zone_page_state_add(long x
, struct zone
*zone
,
113 enum zone_stat_item item
)
115 atomic_long_add(x
, &zone
->vm_stat
[item
]);
116 atomic_long_add(x
, &vm_zone_stat
[item
]);
119 static inline void node_page_state_add(long x
, struct pglist_data
*pgdat
,
120 enum node_stat_item item
)
122 atomic_long_add(x
, &pgdat
->vm_stat
[item
]);
123 atomic_long_add(x
, &vm_node_stat
[item
]);
126 static inline unsigned long global_page_state(enum zone_stat_item item
)
128 long x
= atomic_long_read(&vm_zone_stat
[item
]);
136 static inline unsigned long global_node_page_state(enum node_stat_item item
)
138 long x
= atomic_long_read(&vm_node_stat
[item
]);
146 static inline unsigned long zone_page_state(struct zone
*zone
,
147 enum zone_stat_item item
)
149 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
158 * More accurate version that also considers the currently pending
159 * deltas. For that we need to loop over all cpus to find the current
160 * deltas. There is no synchronization so the result cannot be
161 * exactly accurate either.
163 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
164 enum zone_stat_item item
)
166 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
170 for_each_online_cpu(cpu
)
171 x
+= per_cpu_ptr(zone
->pageset
, cpu
)->vm_stat_diff
[item
];
179 static inline unsigned long node_page_state_snapshot(pg_data_t
*pgdat
,
180 enum node_stat_item item
)
182 long x
= atomic_long_read(&pgdat
->vm_stat
[item
]);
186 for_each_online_cpu(cpu
)
187 x
+= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
)->vm_node_stat_diff
[item
];
197 extern unsigned long sum_zone_node_page_state(int node
,
198 enum zone_stat_item item
);
199 extern unsigned long node_page_state(struct pglist_data
*pgdat
,
200 enum node_stat_item item
);
202 #define sum_zone_node_page_state(node, item) global_page_state(item)
203 #define node_page_state(node, item) global_node_page_state(item)
204 #endif /* CONFIG_NUMA */
206 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
207 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
208 #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
209 #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
212 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, long);
213 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
214 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
216 void __mod_node_page_state(struct pglist_data
*, enum node_stat_item item
, long);
217 void __inc_node_page_state(struct page
*, enum node_stat_item
);
218 void __dec_node_page_state(struct page
*, enum node_stat_item
);
220 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, long);
221 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
222 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
224 void mod_node_page_state(struct pglist_data
*, enum node_stat_item
, long);
225 void inc_node_page_state(struct page
*, enum node_stat_item
);
226 void dec_node_page_state(struct page
*, enum node_stat_item
);
228 extern void inc_node_state(struct pglist_data
*, enum node_stat_item
);
229 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
230 extern void __inc_node_state(struct pglist_data
*, enum node_stat_item
);
231 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
232 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
233 extern void __dec_node_state(struct pglist_data
*, enum node_stat_item
);
235 void quiet_vmstat(void);
236 void cpu_vm_stats_fold(int cpu
);
237 void refresh_zone_stat_thresholds(void);
240 int vmstat_refresh(struct ctl_table
*, int write
,
241 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
);
243 void drain_zonestat(struct zone
*zone
, struct per_cpu_pageset
*);
245 int calculate_pressure_threshold(struct zone
*zone
);
246 int calculate_normal_threshold(struct zone
*zone
);
247 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
248 int (*calculate_pressure
)(struct zone
*));
249 #else /* CONFIG_SMP */
252 * We do not maintain differentials in a single processor configuration.
253 * The functions directly modify the zone and global counters.
255 static inline void __mod_zone_page_state(struct zone
*zone
,
256 enum zone_stat_item item
, long delta
)
258 zone_page_state_add(delta
, zone
, item
);
261 static inline void __mod_node_page_state(struct pglist_data
*pgdat
,
262 enum node_stat_item item
, int delta
)
264 node_page_state_add(delta
, pgdat
, item
);
267 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
269 atomic_long_inc(&zone
->vm_stat
[item
]);
270 atomic_long_inc(&vm_zone_stat
[item
]);
273 static inline void __inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
275 atomic_long_inc(&pgdat
->vm_stat
[item
]);
276 atomic_long_inc(&vm_node_stat
[item
]);
279 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
281 atomic_long_dec(&zone
->vm_stat
[item
]);
282 atomic_long_dec(&vm_zone_stat
[item
]);
285 static inline void __dec_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
287 atomic_long_dec(&pgdat
->vm_stat
[item
]);
288 atomic_long_dec(&vm_node_stat
[item
]);
291 static inline void __inc_zone_page_state(struct page
*page
,
292 enum zone_stat_item item
)
294 __inc_zone_state(page_zone(page
), item
);
297 static inline void __inc_node_page_state(struct page
*page
,
298 enum node_stat_item item
)
300 __inc_node_state(page_pgdat(page
), item
);
304 static inline void __dec_zone_page_state(struct page
*page
,
305 enum zone_stat_item item
)
307 __dec_zone_state(page_zone(page
), item
);
310 static inline void __dec_node_page_state(struct page
*page
,
311 enum node_stat_item item
)
313 __dec_node_state(page_pgdat(page
), item
);
318 * We only use atomic operations to update counters. So there is no need to
319 * disable interrupts.
321 #define inc_zone_page_state __inc_zone_page_state
322 #define dec_zone_page_state __dec_zone_page_state
323 #define mod_zone_page_state __mod_zone_page_state
325 #define inc_node_page_state __inc_node_page_state
326 #define dec_node_page_state __dec_node_page_state
327 #define mod_node_page_state __mod_node_page_state
329 #define inc_zone_state __inc_zone_state
330 #define inc_node_state __inc_node_state
331 #define dec_zone_state __dec_zone_state
333 #define set_pgdat_percpu_threshold(pgdat, callback) { }
335 static inline void refresh_zone_stat_thresholds(void) { }
336 static inline void cpu_vm_stats_fold(int cpu
) { }
337 static inline void quiet_vmstat(void) { }
339 static inline void drain_zonestat(struct zone
*zone
,
340 struct per_cpu_pageset
*pset
) { }
341 #endif /* CONFIG_SMP */
343 static inline void __mod_zone_freepage_state(struct zone
*zone
, int nr_pages
,
346 __mod_zone_page_state(zone
, NR_FREE_PAGES
, nr_pages
);
347 if (is_migrate_cma(migratetype
))
348 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
, nr_pages
);
351 extern const char * const vmstat_text
[];
353 #endif /* _LINUX_VMSTAT_H */