1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
13 extern int sysctl_stat_interval
;
16 #define ENABLE_NUMA_STAT 1
17 #define DISABLE_NUMA_STAT 0
18 extern int sysctl_vm_numa_stat
;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key
);
20 int sysctl_vm_numa_stat_handler(const struct ctl_table
*table
, int write
,
21 void *buffer
, size_t *length
, loff_t
*ppos
);
26 unsigned nr_unqueued_dirty
;
27 unsigned nr_congested
;
28 unsigned nr_writeback
;
29 unsigned nr_immediate
;
31 unsigned nr_activate
[ANON_AND_FILE
];
33 unsigned nr_unmap_fail
;
34 unsigned nr_lazyfree_fail
;
38 /* Stat data for system wide items */
41 NR_DIRTY_BG_THRESHOLD
,
42 NR_MEMMAP_PAGES
, /* page metadata allocated through buddy allocator */
43 NR_MEMMAP_BOOT_PAGES
, /* page metadata allocated through boot allocator */
47 #ifdef CONFIG_VM_EVENT_COUNTERS
49 * Light weight per cpu counter implementation.
51 * Counters should only be incremented and no critical kernel component
52 * should rely on the counter values.
54 * Counters are handled completely inline. On many platforms the code
55 * generated will simply be the increment of a global address.
58 struct vm_event_state
{
59 unsigned long event
[NR_VM_EVENT_ITEMS
];
62 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
65 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
66 * local_irq_disable overhead.
68 static inline void __count_vm_event(enum vm_event_item item
)
70 raw_cpu_inc(vm_event_states
.event
[item
]);
73 static inline void count_vm_event(enum vm_event_item item
)
75 this_cpu_inc(vm_event_states
.event
[item
]);
78 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
80 raw_cpu_add(vm_event_states
.event
[item
], delta
);
83 static inline void count_vm_events(enum vm_event_item item
, long delta
)
85 this_cpu_add(vm_event_states
.event
[item
], delta
);
88 extern void all_vm_events(unsigned long *);
90 extern void vm_events_fold_cpu(int cpu
);
94 /* Disable counters */
95 static inline void count_vm_event(enum vm_event_item item
)
98 static inline void count_vm_events(enum vm_event_item item
, long delta
)
101 static inline void __count_vm_event(enum vm_event_item item
)
104 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
107 static inline void all_vm_events(unsigned long *ret
)
110 static inline void vm_events_fold_cpu(int cpu
)
114 #endif /* CONFIG_VM_EVENT_COUNTERS */
116 #ifdef CONFIG_NUMA_BALANCING
117 #define count_vm_numa_event(x) count_vm_event(x)
118 #define count_vm_numa_events(x, y) count_vm_events(x, y)
120 #define count_vm_numa_event(x) do {} while (0)
121 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
122 #endif /* CONFIG_NUMA_BALANCING */
124 #ifdef CONFIG_DEBUG_TLBFLUSH
125 #define count_vm_tlb_event(x) count_vm_event(x)
126 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
128 #define count_vm_tlb_event(x) do {} while (0)
129 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
132 #ifdef CONFIG_PER_VMA_LOCK_STATS
133 #define count_vm_vma_lock_event(x) count_vm_event(x)
135 #define count_vm_vma_lock_event(x) do {} while (0)
138 #define __count_zid_vm_events(item, zid, delta) \
139 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
142 * Zone and node-based page accounting with per cpu differentials.
144 extern atomic_long_t vm_zone_stat
[NR_VM_ZONE_STAT_ITEMS
];
145 extern atomic_long_t vm_node_stat
[NR_VM_NODE_STAT_ITEMS
];
146 extern atomic_long_t vm_numa_event
[NR_VM_NUMA_EVENT_ITEMS
];
149 static inline void zone_numa_event_add(long x
, struct zone
*zone
,
150 enum numa_stat_item item
)
152 atomic_long_add(x
, &zone
->vm_numa_event
[item
]);
153 atomic_long_add(x
, &vm_numa_event
[item
]);
156 static inline unsigned long zone_numa_event_state(struct zone
*zone
,
157 enum numa_stat_item item
)
159 return atomic_long_read(&zone
->vm_numa_event
[item
]);
162 static inline unsigned long
163 global_numa_event_state(enum numa_stat_item item
)
165 return atomic_long_read(&vm_numa_event
[item
]);
167 #endif /* CONFIG_NUMA */
169 static inline void zone_page_state_add(long x
, struct zone
*zone
,
170 enum zone_stat_item item
)
172 atomic_long_add(x
, &zone
->vm_stat
[item
]);
173 atomic_long_add(x
, &vm_zone_stat
[item
]);
176 static inline void node_page_state_add(long x
, struct pglist_data
*pgdat
,
177 enum node_stat_item item
)
179 atomic_long_add(x
, &pgdat
->vm_stat
[item
]);
180 atomic_long_add(x
, &vm_node_stat
[item
]);
183 static inline unsigned long global_zone_page_state(enum zone_stat_item item
)
185 long x
= atomic_long_read(&vm_zone_stat
[item
]);
194 unsigned long global_node_page_state_pages(enum node_stat_item item
)
196 long x
= atomic_long_read(&vm_node_stat
[item
]);
204 static inline unsigned long global_node_page_state(enum node_stat_item item
)
206 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item
));
208 return global_node_page_state_pages(item
);
211 static inline unsigned long zone_page_state(struct zone
*zone
,
212 enum zone_stat_item item
)
214 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
223 * More accurate version that also considers the currently pending
224 * deltas. For that we need to loop over all cpus to find the current
225 * deltas. There is no synchronization so the result cannot be
226 * exactly accurate either.
228 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
229 enum zone_stat_item item
)
231 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
235 for_each_online_cpu(cpu
)
236 x
+= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
)->vm_stat_diff
[item
];
245 /* See __count_vm_event comment on why raw_cpu_inc is used. */
247 __count_numa_event(struct zone
*zone
, enum numa_stat_item item
)
249 struct per_cpu_zonestat __percpu
*pzstats
= zone
->per_cpu_zonestats
;
251 raw_cpu_inc(pzstats
->vm_numa_event
[item
]);
255 __count_numa_events(struct zone
*zone
, enum numa_stat_item item
, long delta
)
257 struct per_cpu_zonestat __percpu
*pzstats
= zone
->per_cpu_zonestats
;
259 raw_cpu_add(pzstats
->vm_numa_event
[item
], delta
);
262 extern unsigned long sum_zone_node_page_state(int node
,
263 enum zone_stat_item item
);
264 extern unsigned long sum_zone_numa_event_state(int node
, enum numa_stat_item item
);
265 extern unsigned long node_page_state(struct pglist_data
*pgdat
,
266 enum node_stat_item item
);
267 extern unsigned long node_page_state_pages(struct pglist_data
*pgdat
,
268 enum node_stat_item item
);
269 extern void fold_vm_numa_events(void);
271 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
272 #define node_page_state(node, item) global_node_page_state(item)
273 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
274 static inline void fold_vm_numa_events(void)
277 #endif /* CONFIG_NUMA */
280 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, long);
281 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
282 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
284 void __mod_node_page_state(struct pglist_data
*, enum node_stat_item item
, long);
285 void __inc_node_page_state(struct page
*, enum node_stat_item
);
286 void __dec_node_page_state(struct page
*, enum node_stat_item
);
288 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, long);
289 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
290 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
292 void mod_node_page_state(struct pglist_data
*, enum node_stat_item
, long);
293 void inc_node_page_state(struct page
*, enum node_stat_item
);
294 void dec_node_page_state(struct page
*, enum node_stat_item
);
296 extern void inc_node_state(struct pglist_data
*, enum node_stat_item
);
297 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
298 extern void __inc_node_state(struct pglist_data
*, enum node_stat_item
);
299 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
300 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
301 extern void __dec_node_state(struct pglist_data
*, enum node_stat_item
);
303 void quiet_vmstat(void);
304 void cpu_vm_stats_fold(int cpu
);
305 void refresh_zone_stat_thresholds(void);
308 int vmstat_refresh(const struct ctl_table
*, int write
, void *buffer
, size_t *lenp
,
311 void drain_zonestat(struct zone
*zone
, struct per_cpu_zonestat
*);
313 int calculate_pressure_threshold(struct zone
*zone
);
314 int calculate_normal_threshold(struct zone
*zone
);
315 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
316 int (*calculate_pressure
)(struct zone
*));
317 #else /* CONFIG_SMP */
320 * We do not maintain differentials in a single processor configuration.
321 * The functions directly modify the zone and global counters.
323 static inline void __mod_zone_page_state(struct zone
*zone
,
324 enum zone_stat_item item
, long delta
)
326 zone_page_state_add(delta
, zone
, item
);
329 static inline void __mod_node_page_state(struct pglist_data
*pgdat
,
330 enum node_stat_item item
, int delta
)
332 if (vmstat_item_in_bytes(item
)) {
334 * Only cgroups use subpage accounting right now; at
335 * the global level, these items still change in
336 * multiples of whole pages. Store them as pages
337 * internally to keep the per-cpu counters compact.
339 VM_WARN_ON_ONCE(delta
& (PAGE_SIZE
- 1));
340 delta
>>= PAGE_SHIFT
;
343 node_page_state_add(delta
, pgdat
, item
);
346 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
348 atomic_long_inc(&zone
->vm_stat
[item
]);
349 atomic_long_inc(&vm_zone_stat
[item
]);
352 static inline void __inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
354 atomic_long_inc(&pgdat
->vm_stat
[item
]);
355 atomic_long_inc(&vm_node_stat
[item
]);
358 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
360 atomic_long_dec(&zone
->vm_stat
[item
]);
361 atomic_long_dec(&vm_zone_stat
[item
]);
364 static inline void __dec_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
366 atomic_long_dec(&pgdat
->vm_stat
[item
]);
367 atomic_long_dec(&vm_node_stat
[item
]);
370 static inline void __inc_zone_page_state(struct page
*page
,
371 enum zone_stat_item item
)
373 __inc_zone_state(page_zone(page
), item
);
376 static inline void __inc_node_page_state(struct page
*page
,
377 enum node_stat_item item
)
379 __inc_node_state(page_pgdat(page
), item
);
383 static inline void __dec_zone_page_state(struct page
*page
,
384 enum zone_stat_item item
)
386 __dec_zone_state(page_zone(page
), item
);
389 static inline void __dec_node_page_state(struct page
*page
,
390 enum node_stat_item item
)
392 __dec_node_state(page_pgdat(page
), item
);
397 * We only use atomic operations to update counters. So there is no need to
398 * disable interrupts.
400 #define inc_zone_page_state __inc_zone_page_state
401 #define dec_zone_page_state __dec_zone_page_state
402 #define mod_zone_page_state __mod_zone_page_state
404 #define inc_node_page_state __inc_node_page_state
405 #define dec_node_page_state __dec_node_page_state
406 #define mod_node_page_state __mod_node_page_state
408 #define inc_zone_state __inc_zone_state
409 #define inc_node_state __inc_node_state
410 #define dec_zone_state __dec_zone_state
412 #define set_pgdat_percpu_threshold(pgdat, callback) { }
414 static inline void refresh_zone_stat_thresholds(void) { }
415 static inline void cpu_vm_stats_fold(int cpu
) { }
416 static inline void quiet_vmstat(void) { }
418 static inline void drain_zonestat(struct zone
*zone
,
419 struct per_cpu_zonestat
*pzstats
) { }
420 #endif /* CONFIG_SMP */
422 static inline void __zone_stat_mod_folio(struct folio
*folio
,
423 enum zone_stat_item item
, long nr
)
425 __mod_zone_page_state(folio_zone(folio
), item
, nr
);
428 static inline void __zone_stat_add_folio(struct folio
*folio
,
429 enum zone_stat_item item
)
431 __mod_zone_page_state(folio_zone(folio
), item
, folio_nr_pages(folio
));
434 static inline void __zone_stat_sub_folio(struct folio
*folio
,
435 enum zone_stat_item item
)
437 __mod_zone_page_state(folio_zone(folio
), item
, -folio_nr_pages(folio
));
440 static inline void zone_stat_mod_folio(struct folio
*folio
,
441 enum zone_stat_item item
, long nr
)
443 mod_zone_page_state(folio_zone(folio
), item
, nr
);
446 static inline void zone_stat_add_folio(struct folio
*folio
,
447 enum zone_stat_item item
)
449 mod_zone_page_state(folio_zone(folio
), item
, folio_nr_pages(folio
));
452 static inline void zone_stat_sub_folio(struct folio
*folio
,
453 enum zone_stat_item item
)
455 mod_zone_page_state(folio_zone(folio
), item
, -folio_nr_pages(folio
));
458 static inline void __node_stat_mod_folio(struct folio
*folio
,
459 enum node_stat_item item
, long nr
)
461 __mod_node_page_state(folio_pgdat(folio
), item
, nr
);
464 static inline void __node_stat_add_folio(struct folio
*folio
,
465 enum node_stat_item item
)
467 __mod_node_page_state(folio_pgdat(folio
), item
, folio_nr_pages(folio
));
470 static inline void __node_stat_sub_folio(struct folio
*folio
,
471 enum node_stat_item item
)
473 __mod_node_page_state(folio_pgdat(folio
), item
, -folio_nr_pages(folio
));
476 static inline void node_stat_mod_folio(struct folio
*folio
,
477 enum node_stat_item item
, long nr
)
479 mod_node_page_state(folio_pgdat(folio
), item
, nr
);
482 static inline void node_stat_add_folio(struct folio
*folio
,
483 enum node_stat_item item
)
485 mod_node_page_state(folio_pgdat(folio
), item
, folio_nr_pages(folio
));
488 static inline void node_stat_sub_folio(struct folio
*folio
,
489 enum node_stat_item item
)
491 mod_node_page_state(folio_pgdat(folio
), item
, -folio_nr_pages(folio
));
494 extern const char * const vmstat_text
[];
496 static inline const char *zone_stat_name(enum zone_stat_item item
)
498 return vmstat_text
[item
];
502 static inline const char *numa_stat_name(enum numa_stat_item item
)
504 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
507 #endif /* CONFIG_NUMA */
509 static inline const char *node_stat_name(enum node_stat_item item
)
511 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
512 NR_VM_NUMA_EVENT_ITEMS
+
516 static inline const char *lru_list_name(enum lru_list lru
)
518 return node_stat_name(NR_LRU_BASE
+ lru
) + 3; // skip "nr_"
521 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
522 static inline const char *vm_event_name(enum vm_event_item item
)
524 return vmstat_text
[NR_VM_ZONE_STAT_ITEMS
+
525 NR_VM_NUMA_EVENT_ITEMS
+
526 NR_VM_NODE_STAT_ITEMS
+
530 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
534 void __mod_lruvec_state(struct lruvec
*lruvec
, enum node_stat_item idx
,
537 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
538 enum node_stat_item idx
, int val
)
542 local_irq_save(flags
);
543 __mod_lruvec_state(lruvec
, idx
, val
);
544 local_irq_restore(flags
);
547 void __lruvec_stat_mod_folio(struct folio
*folio
,
548 enum node_stat_item idx
, int val
);
550 static inline void lruvec_stat_mod_folio(struct folio
*folio
,
551 enum node_stat_item idx
, int val
)
555 local_irq_save(flags
);
556 __lruvec_stat_mod_folio(folio
, idx
, val
);
557 local_irq_restore(flags
);
560 static inline void mod_lruvec_page_state(struct page
*page
,
561 enum node_stat_item idx
, int val
)
563 lruvec_stat_mod_folio(page_folio(page
), idx
, val
);
568 static inline void __mod_lruvec_state(struct lruvec
*lruvec
,
569 enum node_stat_item idx
, int val
)
571 __mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
574 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
575 enum node_stat_item idx
, int val
)
577 mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
580 static inline void __lruvec_stat_mod_folio(struct folio
*folio
,
581 enum node_stat_item idx
, int val
)
583 __mod_node_page_state(folio_pgdat(folio
), idx
, val
);
586 static inline void lruvec_stat_mod_folio(struct folio
*folio
,
587 enum node_stat_item idx
, int val
)
589 mod_node_page_state(folio_pgdat(folio
), idx
, val
);
592 static inline void mod_lruvec_page_state(struct page
*page
,
593 enum node_stat_item idx
, int val
)
595 mod_node_page_state(page_pgdat(page
), idx
, val
);
598 #endif /* CONFIG_MEMCG */
600 static inline void __lruvec_stat_add_folio(struct folio
*folio
,
601 enum node_stat_item idx
)
603 __lruvec_stat_mod_folio(folio
, idx
, folio_nr_pages(folio
));
606 static inline void __lruvec_stat_sub_folio(struct folio
*folio
,
607 enum node_stat_item idx
)
609 __lruvec_stat_mod_folio(folio
, idx
, -folio_nr_pages(folio
));
612 static inline void lruvec_stat_add_folio(struct folio
*folio
,
613 enum node_stat_item idx
)
615 lruvec_stat_mod_folio(folio
, idx
, folio_nr_pages(folio
));
618 static inline void lruvec_stat_sub_folio(struct folio
*folio
,
619 enum node_stat_item idx
)
621 lruvec_stat_mod_folio(folio
, idx
, -folio_nr_pages(folio
));
624 void memmap_boot_pages_add(long delta
);
625 void memmap_pages_add(long delta
);
626 #endif /* _LINUX_VMSTAT_H */