4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 * Copyright (C) 2008-2014 Christoph Lameter
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
33 #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
36 int sysctl_vm_numa_stat
= ENABLE_NUMA_STAT
;
38 /* zero numa counters within a zone */
39 static void zero_zone_numa_counters(struct zone
*zone
)
43 for (item
= 0; item
< NR_VM_NUMA_STAT_ITEMS
; item
++) {
44 atomic_long_set(&zone
->vm_numa_stat
[item
], 0);
45 for_each_online_cpu(cpu
)
46 per_cpu_ptr(zone
->pageset
, cpu
)->vm_numa_stat_diff
[item
]
51 /* zero numa counters of all the populated zones */
52 static void zero_zones_numa_counters(void)
56 for_each_populated_zone(zone
)
57 zero_zone_numa_counters(zone
);
60 /* zero global numa counters */
61 static void zero_global_numa_counters(void)
65 for (item
= 0; item
< NR_VM_NUMA_STAT_ITEMS
; item
++)
66 atomic_long_set(&vm_numa_stat
[item
], 0);
69 static void invalid_numa_statistics(void)
71 zero_zones_numa_counters();
72 zero_global_numa_counters();
75 static DEFINE_MUTEX(vm_numa_stat_lock
);
77 int sysctl_vm_numa_stat_handler(struct ctl_table
*table
, int write
,
78 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
82 mutex_lock(&vm_numa_stat_lock
);
84 oldval
= sysctl_vm_numa_stat
;
85 ret
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
89 if (oldval
== sysctl_vm_numa_stat
)
91 else if (sysctl_vm_numa_stat
== ENABLE_NUMA_STAT
) {
92 static_branch_enable(&vm_numa_stat_key
);
93 pr_info("enable numa statistics\n");
95 static_branch_disable(&vm_numa_stat_key
);
96 invalid_numa_statistics();
97 pr_info("disable numa statistics, and clear numa counters\n");
101 mutex_unlock(&vm_numa_stat_lock
);
106 #ifdef CONFIG_VM_EVENT_COUNTERS
107 DEFINE_PER_CPU(struct vm_event_state
, vm_event_states
) = {{0}};
108 EXPORT_PER_CPU_SYMBOL(vm_event_states
);
110 static void sum_vm_events(unsigned long *ret
)
115 memset(ret
, 0, NR_VM_EVENT_ITEMS
* sizeof(unsigned long));
117 for_each_online_cpu(cpu
) {
118 struct vm_event_state
*this = &per_cpu(vm_event_states
, cpu
);
120 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++)
121 ret
[i
] += this->event
[i
];
126 * Accumulate the vm event counters across all CPUs.
127 * The result is unavoidably approximate - it can change
128 * during and after execution of this function.
130 void all_vm_events(unsigned long *ret
)
136 EXPORT_SYMBOL_GPL(all_vm_events
);
139 * Fold the foreign cpu events into our own.
141 * This is adding to the events on one processor
142 * but keeps the global counts constant.
144 void vm_events_fold_cpu(int cpu
)
146 struct vm_event_state
*fold_state
= &per_cpu(vm_event_states
, cpu
);
149 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++) {
150 count_vm_events(i
, fold_state
->event
[i
]);
151 fold_state
->event
[i
] = 0;
155 #endif /* CONFIG_VM_EVENT_COUNTERS */
158 * Manage combined zone based / global counters
160 * vm_stat contains the global counters
162 atomic_long_t vm_zone_stat
[NR_VM_ZONE_STAT_ITEMS
] __cacheline_aligned_in_smp
;
163 atomic_long_t vm_numa_stat
[NR_VM_NUMA_STAT_ITEMS
] __cacheline_aligned_in_smp
;
164 atomic_long_t vm_node_stat
[NR_VM_NODE_STAT_ITEMS
] __cacheline_aligned_in_smp
;
165 EXPORT_SYMBOL(vm_zone_stat
);
166 EXPORT_SYMBOL(vm_numa_stat
);
167 EXPORT_SYMBOL(vm_node_stat
);
171 int calculate_pressure_threshold(struct zone
*zone
)
174 int watermark_distance
;
177 * As vmstats are not up to date, there is drift between the estimated
178 * and real values. For high thresholds and a high number of CPUs, it
179 * is possible for the min watermark to be breached while the estimated
180 * value looks fine. The pressure threshold is a reduced value such
181 * that even the maximum amount of drift will not accidentally breach
184 watermark_distance
= low_wmark_pages(zone
) - min_wmark_pages(zone
);
185 threshold
= max(1, (int)(watermark_distance
/ num_online_cpus()));
188 * Maximum threshold is 125
190 threshold
= min(125, threshold
);
195 int calculate_normal_threshold(struct zone
*zone
)
198 int mem
; /* memory in 128 MB units */
201 * The threshold scales with the number of processors and the amount
202 * of memory per zone. More memory means that we can defer updates for
203 * longer, more processors could lead to more contention.
204 * fls() is used to have a cheap way of logarithmic scaling.
206 * Some sample thresholds:
208 * Threshold Processors (fls) Zonesize fls(mem+1)
209 * ------------------------------------------------------------------
226 * 125 1024 10 8-16 GB 8
227 * 125 1024 10 16-32 GB 9
230 mem
= zone
->managed_pages
>> (27 - PAGE_SHIFT
);
232 threshold
= 2 * fls(num_online_cpus()) * (1 + fls(mem
));
235 * Maximum threshold is 125
237 threshold
= min(125, threshold
);
243 * Refresh the thresholds for each zone.
245 void refresh_zone_stat_thresholds(void)
247 struct pglist_data
*pgdat
;
252 /* Zero current pgdat thresholds */
253 for_each_online_pgdat(pgdat
) {
254 for_each_online_cpu(cpu
) {
255 per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
)->stat_threshold
= 0;
259 for_each_populated_zone(zone
) {
260 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
261 unsigned long max_drift
, tolerate_drift
;
263 threshold
= calculate_normal_threshold(zone
);
265 for_each_online_cpu(cpu
) {
268 per_cpu_ptr(zone
->pageset
, cpu
)->stat_threshold
271 /* Base nodestat threshold on the largest populated zone. */
272 pgdat_threshold
= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
)->stat_threshold
;
273 per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
)->stat_threshold
274 = max(threshold
, pgdat_threshold
);
278 * Only set percpu_drift_mark if there is a danger that
279 * NR_FREE_PAGES reports the low watermark is ok when in fact
280 * the min watermark could be breached by an allocation
282 tolerate_drift
= low_wmark_pages(zone
) - min_wmark_pages(zone
);
283 max_drift
= num_online_cpus() * threshold
;
284 if (max_drift
> tolerate_drift
)
285 zone
->percpu_drift_mark
= high_wmark_pages(zone
) +
290 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
291 int (*calculate_pressure
)(struct zone
*))
298 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
299 zone
= &pgdat
->node_zones
[i
];
300 if (!zone
->percpu_drift_mark
)
303 threshold
= (*calculate_pressure
)(zone
);
304 for_each_online_cpu(cpu
)
305 per_cpu_ptr(zone
->pageset
, cpu
)->stat_threshold
311 * For use when we know that interrupts are disabled,
312 * or when we know that preemption is disabled and that
313 * particular counter cannot be updated from interrupt context.
315 void __mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
318 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
319 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
323 x
= delta
+ __this_cpu_read(*p
);
325 t
= __this_cpu_read(pcp
->stat_threshold
);
327 if (unlikely(x
> t
|| x
< -t
)) {
328 zone_page_state_add(x
, zone
, item
);
331 __this_cpu_write(*p
, x
);
333 EXPORT_SYMBOL(__mod_zone_page_state
);
335 void __mod_node_page_state(struct pglist_data
*pgdat
, enum node_stat_item item
,
338 struct per_cpu_nodestat __percpu
*pcp
= pgdat
->per_cpu_nodestats
;
339 s8 __percpu
*p
= pcp
->vm_node_stat_diff
+ item
;
343 x
= delta
+ __this_cpu_read(*p
);
345 t
= __this_cpu_read(pcp
->stat_threshold
);
347 if (unlikely(x
> t
|| x
< -t
)) {
348 node_page_state_add(x
, pgdat
, item
);
351 __this_cpu_write(*p
, x
);
353 EXPORT_SYMBOL(__mod_node_page_state
);
356 * Optimized increment and decrement functions.
358 * These are only for a single page and therefore can take a struct page *
359 * argument instead of struct zone *. This allows the inclusion of the code
360 * generated for page_zone(page) into the optimized functions.
362 * No overflow check is necessary and therefore the differential can be
363 * incremented or decremented in place which may allow the compilers to
364 * generate better code.
365 * The increment or decrement is known and therefore one boundary check can
368 * NOTE: These functions are very performance sensitive. Change only
371 * Some processors have inc/dec instructions that are atomic vs an interrupt.
372 * However, the code must first determine the differential location in a zone
373 * based on the processor number and then inc/dec the counter. There is no
374 * guarantee without disabling preemption that the processor will not change
375 * in between and therefore the atomicity vs. interrupt cannot be exploited
376 * in a useful way here.
378 void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
380 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
381 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
384 v
= __this_cpu_inc_return(*p
);
385 t
= __this_cpu_read(pcp
->stat_threshold
);
386 if (unlikely(v
> t
)) {
387 s8 overstep
= t
>> 1;
389 zone_page_state_add(v
+ overstep
, zone
, item
);
390 __this_cpu_write(*p
, -overstep
);
394 void __inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
396 struct per_cpu_nodestat __percpu
*pcp
= pgdat
->per_cpu_nodestats
;
397 s8 __percpu
*p
= pcp
->vm_node_stat_diff
+ item
;
400 v
= __this_cpu_inc_return(*p
);
401 t
= __this_cpu_read(pcp
->stat_threshold
);
402 if (unlikely(v
> t
)) {
403 s8 overstep
= t
>> 1;
405 node_page_state_add(v
+ overstep
, pgdat
, item
);
406 __this_cpu_write(*p
, -overstep
);
410 void __inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
412 __inc_zone_state(page_zone(page
), item
);
414 EXPORT_SYMBOL(__inc_zone_page_state
);
416 void __inc_node_page_state(struct page
*page
, enum node_stat_item item
)
418 __inc_node_state(page_pgdat(page
), item
);
420 EXPORT_SYMBOL(__inc_node_page_state
);
422 void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
424 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
425 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
428 v
= __this_cpu_dec_return(*p
);
429 t
= __this_cpu_read(pcp
->stat_threshold
);
430 if (unlikely(v
< - t
)) {
431 s8 overstep
= t
>> 1;
433 zone_page_state_add(v
- overstep
, zone
, item
);
434 __this_cpu_write(*p
, overstep
);
438 void __dec_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
440 struct per_cpu_nodestat __percpu
*pcp
= pgdat
->per_cpu_nodestats
;
441 s8 __percpu
*p
= pcp
->vm_node_stat_diff
+ item
;
444 v
= __this_cpu_dec_return(*p
);
445 t
= __this_cpu_read(pcp
->stat_threshold
);
446 if (unlikely(v
< - t
)) {
447 s8 overstep
= t
>> 1;
449 node_page_state_add(v
- overstep
, pgdat
, item
);
450 __this_cpu_write(*p
, overstep
);
454 void __dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
456 __dec_zone_state(page_zone(page
), item
);
458 EXPORT_SYMBOL(__dec_zone_page_state
);
460 void __dec_node_page_state(struct page
*page
, enum node_stat_item item
)
462 __dec_node_state(page_pgdat(page
), item
);
464 EXPORT_SYMBOL(__dec_node_page_state
);
466 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
468 * If we have cmpxchg_local support then we do not need to incur the overhead
469 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
471 * mod_state() modifies the zone counter state through atomic per cpu
474 * Overstep mode specifies how overstep should handled:
476 * 1 Overstepping half of threshold
477 * -1 Overstepping minus half of threshold
479 static inline void mod_zone_state(struct zone
*zone
,
480 enum zone_stat_item item
, long delta
, int overstep_mode
)
482 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
483 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
487 z
= 0; /* overflow to zone counters */
490 * The fetching of the stat_threshold is racy. We may apply
491 * a counter threshold to the wrong the cpu if we get
492 * rescheduled while executing here. However, the next
493 * counter update will apply the threshold again and
494 * therefore bring the counter under the threshold again.
496 * Most of the time the thresholds are the same anyways
497 * for all cpus in a zone.
499 t
= this_cpu_read(pcp
->stat_threshold
);
501 o
= this_cpu_read(*p
);
504 if (n
> t
|| n
< -t
) {
505 int os
= overstep_mode
* (t
>> 1) ;
507 /* Overflow must be added to zone counters */
511 } while (this_cpu_cmpxchg(*p
, o
, n
) != o
);
514 zone_page_state_add(z
, zone
, item
);
517 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
520 mod_zone_state(zone
, item
, delta
, 0);
522 EXPORT_SYMBOL(mod_zone_page_state
);
524 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
526 mod_zone_state(page_zone(page
), item
, 1, 1);
528 EXPORT_SYMBOL(inc_zone_page_state
);
530 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
532 mod_zone_state(page_zone(page
), item
, -1, -1);
534 EXPORT_SYMBOL(dec_zone_page_state
);
536 static inline void mod_node_state(struct pglist_data
*pgdat
,
537 enum node_stat_item item
, int delta
, int overstep_mode
)
539 struct per_cpu_nodestat __percpu
*pcp
= pgdat
->per_cpu_nodestats
;
540 s8 __percpu
*p
= pcp
->vm_node_stat_diff
+ item
;
544 z
= 0; /* overflow to node counters */
547 * The fetching of the stat_threshold is racy. We may apply
548 * a counter threshold to the wrong the cpu if we get
549 * rescheduled while executing here. However, the next
550 * counter update will apply the threshold again and
551 * therefore bring the counter under the threshold again.
553 * Most of the time the thresholds are the same anyways
554 * for all cpus in a node.
556 t
= this_cpu_read(pcp
->stat_threshold
);
558 o
= this_cpu_read(*p
);
561 if (n
> t
|| n
< -t
) {
562 int os
= overstep_mode
* (t
>> 1) ;
564 /* Overflow must be added to node counters */
568 } while (this_cpu_cmpxchg(*p
, o
, n
) != o
);
571 node_page_state_add(z
, pgdat
, item
);
574 void mod_node_page_state(struct pglist_data
*pgdat
, enum node_stat_item item
,
577 mod_node_state(pgdat
, item
, delta
, 0);
579 EXPORT_SYMBOL(mod_node_page_state
);
581 void inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
583 mod_node_state(pgdat
, item
, 1, 1);
586 void inc_node_page_state(struct page
*page
, enum node_stat_item item
)
588 mod_node_state(page_pgdat(page
), item
, 1, 1);
590 EXPORT_SYMBOL(inc_node_page_state
);
592 void dec_node_page_state(struct page
*page
, enum node_stat_item item
)
594 mod_node_state(page_pgdat(page
), item
, -1, -1);
596 EXPORT_SYMBOL(dec_node_page_state
);
599 * Use interrupt disable to serialize counter updates
601 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
606 local_irq_save(flags
);
607 __mod_zone_page_state(zone
, item
, delta
);
608 local_irq_restore(flags
);
610 EXPORT_SYMBOL(mod_zone_page_state
);
612 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
617 zone
= page_zone(page
);
618 local_irq_save(flags
);
619 __inc_zone_state(zone
, item
);
620 local_irq_restore(flags
);
622 EXPORT_SYMBOL(inc_zone_page_state
);
624 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
628 local_irq_save(flags
);
629 __dec_zone_page_state(page
, item
);
630 local_irq_restore(flags
);
632 EXPORT_SYMBOL(dec_zone_page_state
);
634 void inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
638 local_irq_save(flags
);
639 __inc_node_state(pgdat
, item
);
640 local_irq_restore(flags
);
642 EXPORT_SYMBOL(inc_node_state
);
644 void mod_node_page_state(struct pglist_data
*pgdat
, enum node_stat_item item
,
649 local_irq_save(flags
);
650 __mod_node_page_state(pgdat
, item
, delta
);
651 local_irq_restore(flags
);
653 EXPORT_SYMBOL(mod_node_page_state
);
655 void inc_node_page_state(struct page
*page
, enum node_stat_item item
)
658 struct pglist_data
*pgdat
;
660 pgdat
= page_pgdat(page
);
661 local_irq_save(flags
);
662 __inc_node_state(pgdat
, item
);
663 local_irq_restore(flags
);
665 EXPORT_SYMBOL(inc_node_page_state
);
667 void dec_node_page_state(struct page
*page
, enum node_stat_item item
)
671 local_irq_save(flags
);
672 __dec_node_page_state(page
, item
);
673 local_irq_restore(flags
);
675 EXPORT_SYMBOL(dec_node_page_state
);
679 * Fold a differential into the global counters.
680 * Returns the number of counters updated.
683 static int fold_diff(int *zone_diff
, int *numa_diff
, int *node_diff
)
688 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
690 atomic_long_add(zone_diff
[i
], &vm_zone_stat
[i
]);
694 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
696 atomic_long_add(numa_diff
[i
], &vm_numa_stat
[i
]);
700 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++)
702 atomic_long_add(node_diff
[i
], &vm_node_stat
[i
]);
708 static int fold_diff(int *zone_diff
, int *node_diff
)
713 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
715 atomic_long_add(zone_diff
[i
], &vm_zone_stat
[i
]);
719 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++)
721 atomic_long_add(node_diff
[i
], &vm_node_stat
[i
]);
726 #endif /* CONFIG_NUMA */
729 * Update the zone counters for the current cpu.
731 * Note that refresh_cpu_vm_stats strives to only access
732 * node local memory. The per cpu pagesets on remote zones are placed
733 * in the memory local to the processor using that pageset. So the
734 * loop over all zones will access a series of cachelines local to
737 * The call to zone_page_state_add updates the cachelines with the
738 * statistics in the remote zone struct as well as the global cachelines
739 * with the global counters. These could cause remote node cache line
740 * bouncing and will have to be only done when necessary.
742 * The function returns the number of global counters updated.
744 static int refresh_cpu_vm_stats(bool do_pagesets
)
746 struct pglist_data
*pgdat
;
749 int global_zone_diff
[NR_VM_ZONE_STAT_ITEMS
] = { 0, };
751 int global_numa_diff
[NR_VM_NUMA_STAT_ITEMS
] = { 0, };
753 int global_node_diff
[NR_VM_NODE_STAT_ITEMS
] = { 0, };
756 for_each_populated_zone(zone
) {
757 struct per_cpu_pageset __percpu
*p
= zone
->pageset
;
759 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++) {
762 v
= this_cpu_xchg(p
->vm_stat_diff
[i
], 0);
765 atomic_long_add(v
, &zone
->vm_stat
[i
]);
766 global_zone_diff
[i
] += v
;
768 /* 3 seconds idle till flush */
769 __this_cpu_write(p
->expire
, 3);
774 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++) {
777 v
= this_cpu_xchg(p
->vm_numa_stat_diff
[i
], 0);
780 atomic_long_add(v
, &zone
->vm_numa_stat
[i
]);
781 global_numa_diff
[i
] += v
;
782 __this_cpu_write(p
->expire
, 3);
789 * Deal with draining the remote pageset of this
792 * Check if there are pages remaining in this pageset
793 * if not then there is nothing to expire.
795 if (!__this_cpu_read(p
->expire
) ||
796 !__this_cpu_read(p
->pcp
.count
))
800 * We never drain zones local to this processor.
802 if (zone_to_nid(zone
) == numa_node_id()) {
803 __this_cpu_write(p
->expire
, 0);
807 if (__this_cpu_dec_return(p
->expire
))
810 if (__this_cpu_read(p
->pcp
.count
)) {
811 drain_zone_pages(zone
, this_cpu_ptr(&p
->pcp
));
818 for_each_online_pgdat(pgdat
) {
819 struct per_cpu_nodestat __percpu
*p
= pgdat
->per_cpu_nodestats
;
821 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++) {
824 v
= this_cpu_xchg(p
->vm_node_stat_diff
[i
], 0);
826 atomic_long_add(v
, &pgdat
->vm_stat
[i
]);
827 global_node_diff
[i
] += v
;
833 changes
+= fold_diff(global_zone_diff
, global_numa_diff
,
836 changes
+= fold_diff(global_zone_diff
, global_node_diff
);
842 * Fold the data for an offline cpu into the global array.
843 * There cannot be any access by the offline cpu and therefore
844 * synchronization is simplified.
846 void cpu_vm_stats_fold(int cpu
)
848 struct pglist_data
*pgdat
;
851 int global_zone_diff
[NR_VM_ZONE_STAT_ITEMS
] = { 0, };
853 int global_numa_diff
[NR_VM_NUMA_STAT_ITEMS
] = { 0, };
855 int global_node_diff
[NR_VM_NODE_STAT_ITEMS
] = { 0, };
857 for_each_populated_zone(zone
) {
858 struct per_cpu_pageset
*p
;
860 p
= per_cpu_ptr(zone
->pageset
, cpu
);
862 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
863 if (p
->vm_stat_diff
[i
]) {
866 v
= p
->vm_stat_diff
[i
];
867 p
->vm_stat_diff
[i
] = 0;
868 atomic_long_add(v
, &zone
->vm_stat
[i
]);
869 global_zone_diff
[i
] += v
;
873 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
874 if (p
->vm_numa_stat_diff
[i
]) {
877 v
= p
->vm_numa_stat_diff
[i
];
878 p
->vm_numa_stat_diff
[i
] = 0;
879 atomic_long_add(v
, &zone
->vm_numa_stat
[i
]);
880 global_numa_diff
[i
] += v
;
885 for_each_online_pgdat(pgdat
) {
886 struct per_cpu_nodestat
*p
;
888 p
= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
);
890 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++)
891 if (p
->vm_node_stat_diff
[i
]) {
894 v
= p
->vm_node_stat_diff
[i
];
895 p
->vm_node_stat_diff
[i
] = 0;
896 atomic_long_add(v
, &pgdat
->vm_stat
[i
]);
897 global_node_diff
[i
] += v
;
902 fold_diff(global_zone_diff
, global_numa_diff
, global_node_diff
);
904 fold_diff(global_zone_diff
, global_node_diff
);
909 * this is only called if !populated_zone(zone), which implies no other users of
910 * pset->vm_stat_diff[] exsist.
912 void drain_zonestat(struct zone
*zone
, struct per_cpu_pageset
*pset
)
916 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
917 if (pset
->vm_stat_diff
[i
]) {
918 int v
= pset
->vm_stat_diff
[i
];
919 pset
->vm_stat_diff
[i
] = 0;
920 atomic_long_add(v
, &zone
->vm_stat
[i
]);
921 atomic_long_add(v
, &vm_zone_stat
[i
]);
925 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
926 if (pset
->vm_numa_stat_diff
[i
]) {
927 int v
= pset
->vm_numa_stat_diff
[i
];
929 pset
->vm_numa_stat_diff
[i
] = 0;
930 atomic_long_add(v
, &zone
->vm_numa_stat
[i
]);
931 atomic_long_add(v
, &vm_numa_stat
[i
]);
938 void __inc_numa_state(struct zone
*zone
,
939 enum numa_stat_item item
)
941 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
942 u16 __percpu
*p
= pcp
->vm_numa_stat_diff
+ item
;
945 v
= __this_cpu_inc_return(*p
);
947 if (unlikely(v
> NUMA_STATS_THRESHOLD
)) {
948 zone_numa_state_add(v
, zone
, item
);
949 __this_cpu_write(*p
, 0);
954 * Determine the per node value of a stat item. This function
955 * is called frequently in a NUMA machine, so try to be as
956 * frugal as possible.
958 unsigned long sum_zone_node_page_state(int node
,
959 enum zone_stat_item item
)
961 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
963 unsigned long count
= 0;
965 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
966 count
+= zone_page_state(zones
+ i
, item
);
972 * Determine the per node value of a numa stat item. To avoid deviation,
973 * the per cpu stat number in vm_numa_stat_diff[] is also included.
975 unsigned long sum_zone_numa_state(int node
,
976 enum numa_stat_item item
)
978 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
980 unsigned long count
= 0;
982 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
983 count
+= zone_numa_state_snapshot(zones
+ i
, item
);
989 * Determine the per node value of a stat item.
991 unsigned long node_page_state(struct pglist_data
*pgdat
,
992 enum node_stat_item item
)
994 long x
= atomic_long_read(&pgdat
->vm_stat
[item
]);
1003 #ifdef CONFIG_COMPACTION
1005 struct contig_page_info
{
1006 unsigned long free_pages
;
1007 unsigned long free_blocks_total
;
1008 unsigned long free_blocks_suitable
;
1012 * Calculate the number of free pages in a zone, how many contiguous
1013 * pages are free and how many are large enough to satisfy an allocation of
1014 * the target size. Note that this function makes no attempt to estimate
1015 * how many suitable free blocks there *might* be if MOVABLE pages were
1016 * migrated. Calculating that is possible, but expensive and can be
1017 * figured out from userspace
1019 static void fill_contig_page_info(struct zone
*zone
,
1020 unsigned int suitable_order
,
1021 struct contig_page_info
*info
)
1025 info
->free_pages
= 0;
1026 info
->free_blocks_total
= 0;
1027 info
->free_blocks_suitable
= 0;
1029 for (order
= 0; order
< MAX_ORDER
; order
++) {
1030 unsigned long blocks
;
1032 /* Count number of free blocks */
1033 blocks
= zone
->free_area
[order
].nr_free
;
1034 info
->free_blocks_total
+= blocks
;
1036 /* Count free base pages */
1037 info
->free_pages
+= blocks
<< order
;
1039 /* Count the suitable free blocks */
1040 if (order
>= suitable_order
)
1041 info
->free_blocks_suitable
+= blocks
<<
1042 (order
- suitable_order
);
1047 * A fragmentation index only makes sense if an allocation of a requested
1048 * size would fail. If that is true, the fragmentation index indicates
1049 * whether external fragmentation or a lack of memory was the problem.
1050 * The value can be used to determine if page reclaim or compaction
1053 static int __fragmentation_index(unsigned int order
, struct contig_page_info
*info
)
1055 unsigned long requested
= 1UL << order
;
1057 if (WARN_ON_ONCE(order
>= MAX_ORDER
))
1060 if (!info
->free_blocks_total
)
1063 /* Fragmentation index only makes sense when a request would fail */
1064 if (info
->free_blocks_suitable
)
1068 * Index is between 0 and 1 so return within 3 decimal places
1070 * 0 => allocation would fail due to lack of memory
1071 * 1 => allocation would fail due to fragmentation
1073 return 1000 - div_u64( (1000+(div_u64(info
->free_pages
* 1000ULL, requested
))), info
->free_blocks_total
);
1076 /* Same as __fragmentation index but allocs contig_page_info on stack */
1077 int fragmentation_index(struct zone
*zone
, unsigned int order
)
1079 struct contig_page_info info
;
1081 fill_contig_page_info(zone
, order
, &info
);
1082 return __fragmentation_index(order
, &info
);
1086 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1087 #ifdef CONFIG_ZONE_DMA
1088 #define TEXT_FOR_DMA(xx) xx "_dma",
1090 #define TEXT_FOR_DMA(xx)
1093 #ifdef CONFIG_ZONE_DMA32
1094 #define TEXT_FOR_DMA32(xx) xx "_dma32",
1096 #define TEXT_FOR_DMA32(xx)
1099 #ifdef CONFIG_HIGHMEM
1100 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1102 #define TEXT_FOR_HIGHMEM(xx)
1105 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1106 TEXT_FOR_HIGHMEM(xx) xx "_movable",
1108 const char * const vmstat_text
[] = {
1109 /* enum zone_stat_item countes */
1111 "nr_zone_inactive_anon",
1112 "nr_zone_active_anon",
1113 "nr_zone_inactive_file",
1114 "nr_zone_active_file",
1115 "nr_zone_unevictable",
1116 "nr_zone_write_pending",
1118 "nr_page_table_pages",
1121 #if IS_ENABLED(CONFIG_ZSMALLOC)
1126 /* enum numa_stat_item counters */
1136 /* Node-based counters */
1142 "nr_slab_reclaimable",
1143 "nr_slab_unreclaimable",
1146 "workingset_refault",
1147 "workingset_activate",
1148 "workingset_nodereclaim",
1154 "nr_writeback_temp",
1156 "nr_shmem_hugepages",
1157 "nr_shmem_pmdmapped",
1158 "nr_anon_transparent_hugepages",
1161 "nr_vmscan_immediate_reclaim",
1164 "", /* nr_indirectly_reclaimable */
1166 /* enum writeback_stat_item counters */
1167 "nr_dirty_threshold",
1168 "nr_dirty_background_threshold",
1170 #ifdef CONFIG_VM_EVENT_COUNTERS
1171 /* enum vm_event_item counters */
1177 TEXTS_FOR_ZONES("pgalloc")
1178 TEXTS_FOR_ZONES("allocstall")
1179 TEXTS_FOR_ZONES("pgskip")
1195 "pgscan_direct_throttle",
1198 "zone_reclaim_failed",
1202 "kswapd_inodesteal",
1203 "kswapd_low_wmark_hit_quickly",
1204 "kswapd_high_wmark_hit_quickly",
1213 #ifdef CONFIG_NUMA_BALANCING
1215 "numa_huge_pte_updates",
1217 "numa_hint_faults_local",
1218 "numa_pages_migrated",
1220 #ifdef CONFIG_MIGRATION
1221 "pgmigrate_success",
1224 #ifdef CONFIG_COMPACTION
1225 "compact_migrate_scanned",
1226 "compact_free_scanned",
1231 "compact_daemon_wake",
1232 "compact_daemon_migrate_scanned",
1233 "compact_daemon_free_scanned",
1236 #ifdef CONFIG_HUGETLB_PAGE
1237 "htlb_buddy_alloc_success",
1238 "htlb_buddy_alloc_fail",
1240 "unevictable_pgs_culled",
1241 "unevictable_pgs_scanned",
1242 "unevictable_pgs_rescued",
1243 "unevictable_pgs_mlocked",
1244 "unevictable_pgs_munlocked",
1245 "unevictable_pgs_cleared",
1246 "unevictable_pgs_stranded",
1248 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1250 "thp_fault_fallback",
1251 "thp_collapse_alloc",
1252 "thp_collapse_alloc_failed",
1256 "thp_split_page_failed",
1257 "thp_deferred_split_page",
1259 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1262 "thp_zero_page_alloc",
1263 "thp_zero_page_alloc_failed",
1265 "thp_swpout_fallback",
1267 #ifdef CONFIG_MEMORY_BALLOON
1270 #ifdef CONFIG_BALLOON_COMPACTION
1273 #endif /* CONFIG_MEMORY_BALLOON */
1274 #ifdef CONFIG_DEBUG_TLBFLUSH
1275 "nr_tlb_remote_flush",
1276 "nr_tlb_remote_flush_received",
1277 "nr_tlb_local_flush_all",
1278 "nr_tlb_local_flush_one",
1279 #endif /* CONFIG_DEBUG_TLBFLUSH */
1281 #ifdef CONFIG_DEBUG_VM_VMACACHE
1282 "vmacache_find_calls",
1283 "vmacache_find_hits",
1289 #endif /* CONFIG_VM_EVENTS_COUNTERS */
1291 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1293 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1294 defined(CONFIG_PROC_FS)
1295 static void *frag_start(struct seq_file
*m
, loff_t
*pos
)
1300 for (pgdat
= first_online_pgdat();
1302 pgdat
= next_online_pgdat(pgdat
))
1308 static void *frag_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
1310 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1313 return next_online_pgdat(pgdat
);
1316 static void frag_stop(struct seq_file
*m
, void *arg
)
1321 * Walk zones in a node and print using a callback.
1322 * If @assert_populated is true, only use callback for zones that are populated.
1324 static void walk_zones_in_node(struct seq_file
*m
, pg_data_t
*pgdat
,
1325 bool assert_populated
, bool nolock
,
1326 void (*print
)(struct seq_file
*m
, pg_data_t
*, struct zone
*))
1329 struct zone
*node_zones
= pgdat
->node_zones
;
1330 unsigned long flags
;
1332 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
1333 if (assert_populated
&& !populated_zone(zone
))
1337 spin_lock_irqsave(&zone
->lock
, flags
);
1338 print(m
, pgdat
, zone
);
1340 spin_unlock_irqrestore(&zone
->lock
, flags
);
1345 #ifdef CONFIG_PROC_FS
1346 static void frag_show_print(struct seq_file
*m
, pg_data_t
*pgdat
,
1351 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
1352 for (order
= 0; order
< MAX_ORDER
; ++order
)
1353 seq_printf(m
, "%6lu ", zone
->free_area
[order
].nr_free
);
1358 * This walks the free areas for each zone.
1360 static int frag_show(struct seq_file
*m
, void *arg
)
1362 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1363 walk_zones_in_node(m
, pgdat
, true, false, frag_show_print
);
1367 static void pagetypeinfo_showfree_print(struct seq_file
*m
,
1368 pg_data_t
*pgdat
, struct zone
*zone
)
1372 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++) {
1373 seq_printf(m
, "Node %4d, zone %8s, type %12s ",
1376 migratetype_names
[mtype
]);
1377 for (order
= 0; order
< MAX_ORDER
; ++order
) {
1378 unsigned long freecount
= 0;
1379 struct free_area
*area
;
1380 struct list_head
*curr
;
1382 area
= &(zone
->free_area
[order
]);
1384 list_for_each(curr
, &area
->free_list
[mtype
])
1386 seq_printf(m
, "%6lu ", freecount
);
1392 /* Print out the free pages at each order for each migatetype */
1393 static int pagetypeinfo_showfree(struct seq_file
*m
, void *arg
)
1396 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1399 seq_printf(m
, "%-43s ", "Free pages count per migrate type at order");
1400 for (order
= 0; order
< MAX_ORDER
; ++order
)
1401 seq_printf(m
, "%6d ", order
);
1404 walk_zones_in_node(m
, pgdat
, true, false, pagetypeinfo_showfree_print
);
1409 static void pagetypeinfo_showblockcount_print(struct seq_file
*m
,
1410 pg_data_t
*pgdat
, struct zone
*zone
)
1414 unsigned long start_pfn
= zone
->zone_start_pfn
;
1415 unsigned long end_pfn
= zone_end_pfn(zone
);
1416 unsigned long count
[MIGRATE_TYPES
] = { 0, };
1418 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
1421 page
= pfn_to_online_page(pfn
);
1425 /* Watch for unexpected holes punched in the memmap */
1426 if (!memmap_valid_within(pfn
, page
, zone
))
1429 if (page_zone(page
) != zone
)
1432 mtype
= get_pageblock_migratetype(page
);
1434 if (mtype
< MIGRATE_TYPES
)
1439 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
1440 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
1441 seq_printf(m
, "%12lu ", count
[mtype
]);
1445 /* Print out the number of pageblocks for each migratetype */
1446 static int pagetypeinfo_showblockcount(struct seq_file
*m
, void *arg
)
1449 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1451 seq_printf(m
, "\n%-23s", "Number of blocks type ");
1452 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
1453 seq_printf(m
, "%12s ", migratetype_names
[mtype
]);
1455 walk_zones_in_node(m
, pgdat
, true, false,
1456 pagetypeinfo_showblockcount_print
);
1462 * Print out the number of pageblocks for each migratetype that contain pages
1463 * of other types. This gives an indication of how well fallbacks are being
1464 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1465 * to determine what is going on
1467 static void pagetypeinfo_showmixedcount(struct seq_file
*m
, pg_data_t
*pgdat
)
1469 #ifdef CONFIG_PAGE_OWNER
1472 if (!static_branch_unlikely(&page_owner_inited
))
1475 drain_all_pages(NULL
);
1477 seq_printf(m
, "\n%-23s", "Number of mixed blocks ");
1478 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
1479 seq_printf(m
, "%12s ", migratetype_names
[mtype
]);
1482 walk_zones_in_node(m
, pgdat
, true, true,
1483 pagetypeinfo_showmixedcount_print
);
1484 #endif /* CONFIG_PAGE_OWNER */
1488 * This prints out statistics in relation to grouping pages by mobility.
1489 * It is expensive to collect so do not constantly read the file.
1491 static int pagetypeinfo_show(struct seq_file
*m
, void *arg
)
1493 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1495 /* check memoryless node */
1496 if (!node_state(pgdat
->node_id
, N_MEMORY
))
1499 seq_printf(m
, "Page block order: %d\n", pageblock_order
);
1500 seq_printf(m
, "Pages per block: %lu\n", pageblock_nr_pages
);
1502 pagetypeinfo_showfree(m
, pgdat
);
1503 pagetypeinfo_showblockcount(m
, pgdat
);
1504 pagetypeinfo_showmixedcount(m
, pgdat
);
1509 static const struct seq_operations fragmentation_op
= {
1510 .start
= frag_start
,
1516 static const struct seq_operations pagetypeinfo_op
= {
1517 .start
= frag_start
,
1520 .show
= pagetypeinfo_show
,
1523 static bool is_zone_first_populated(pg_data_t
*pgdat
, struct zone
*zone
)
1527 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
1528 struct zone
*compare
= &pgdat
->node_zones
[zid
];
1530 if (populated_zone(compare
))
1531 return zone
== compare
;
1537 static void zoneinfo_show_print(struct seq_file
*m
, pg_data_t
*pgdat
,
1541 seq_printf(m
, "Node %d, zone %8s", pgdat
->node_id
, zone
->name
);
1542 if (is_zone_first_populated(pgdat
, zone
)) {
1543 seq_printf(m
, "\n per-node stats");
1544 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++) {
1545 /* Skip hidden vmstat items. */
1546 if (*vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
+
1547 NR_VM_NUMA_STAT_ITEMS
] == '\0')
1549 seq_printf(m
, "\n %-12s %lu",
1550 vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
+
1551 NR_VM_NUMA_STAT_ITEMS
],
1552 node_page_state(pgdat
, i
));
1563 zone_page_state(zone
, NR_FREE_PAGES
),
1564 min_wmark_pages(zone
),
1565 low_wmark_pages(zone
),
1566 high_wmark_pages(zone
),
1567 zone
->spanned_pages
,
1568 zone
->present_pages
,
1569 zone
->managed_pages
);
1572 "\n protection: (%ld",
1573 zone
->lowmem_reserve
[0]);
1574 for (i
= 1; i
< ARRAY_SIZE(zone
->lowmem_reserve
); i
++)
1575 seq_printf(m
, ", %ld", zone
->lowmem_reserve
[i
]);
1578 /* If unpopulated, no other information is useful */
1579 if (!populated_zone(zone
)) {
1584 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
1585 seq_printf(m
, "\n %-12s %lu", vmstat_text
[i
],
1586 zone_page_state(zone
, i
));
1589 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
1590 seq_printf(m
, "\n %-12s %lu",
1591 vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
],
1592 zone_numa_state_snapshot(zone
, i
));
1595 seq_printf(m
, "\n pagesets");
1596 for_each_online_cpu(i
) {
1597 struct per_cpu_pageset
*pageset
;
1599 pageset
= per_cpu_ptr(zone
->pageset
, i
);
1608 pageset
->pcp
.batch
);
1610 seq_printf(m
, "\n vm stats threshold: %d",
1611 pageset
->stat_threshold
);
1615 "\n node_unreclaimable: %u"
1616 "\n start_pfn: %lu",
1617 pgdat
->kswapd_failures
>= MAX_RECLAIM_RETRIES
,
1618 zone
->zone_start_pfn
);
1623 * Output information about zones in @pgdat. All zones are printed regardless
1624 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1625 * set of all zones and userspace would not be aware of such zones if they are
1626 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1628 static int zoneinfo_show(struct seq_file
*m
, void *arg
)
1630 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1631 walk_zones_in_node(m
, pgdat
, false, false, zoneinfo_show_print
);
1635 static const struct seq_operations zoneinfo_op
= {
1636 .start
= frag_start
, /* iterate over all zones. The same as in
1640 .show
= zoneinfo_show
,
1643 enum writeback_stat_item
{
1645 NR_DIRTY_BG_THRESHOLD
,
1646 NR_VM_WRITEBACK_STAT_ITEMS
,
1649 static void *vmstat_start(struct seq_file
*m
, loff_t
*pos
)
1652 int i
, stat_items_size
;
1654 if (*pos
>= ARRAY_SIZE(vmstat_text
))
1656 stat_items_size
= NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long) +
1657 NR_VM_NUMA_STAT_ITEMS
* sizeof(unsigned long) +
1658 NR_VM_NODE_STAT_ITEMS
* sizeof(unsigned long) +
1659 NR_VM_WRITEBACK_STAT_ITEMS
* sizeof(unsigned long);
1661 #ifdef CONFIG_VM_EVENT_COUNTERS
1662 stat_items_size
+= sizeof(struct vm_event_state
);
1665 v
= kmalloc(stat_items_size
, GFP_KERNEL
);
1668 return ERR_PTR(-ENOMEM
);
1669 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
1670 v
[i
] = global_zone_page_state(i
);
1671 v
+= NR_VM_ZONE_STAT_ITEMS
;
1674 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++)
1675 v
[i
] = global_numa_state(i
);
1676 v
+= NR_VM_NUMA_STAT_ITEMS
;
1679 for (i
= 0; i
< NR_VM_NODE_STAT_ITEMS
; i
++)
1680 v
[i
] = global_node_page_state(i
);
1681 v
+= NR_VM_NODE_STAT_ITEMS
;
1683 global_dirty_limits(v
+ NR_DIRTY_BG_THRESHOLD
,
1684 v
+ NR_DIRTY_THRESHOLD
);
1685 v
+= NR_VM_WRITEBACK_STAT_ITEMS
;
1687 #ifdef CONFIG_VM_EVENT_COUNTERS
1689 v
[PGPGIN
] /= 2; /* sectors -> kbytes */
1692 return (unsigned long *)m
->private + *pos
;
1695 static void *vmstat_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
1698 if (*pos
>= ARRAY_SIZE(vmstat_text
))
1700 return (unsigned long *)m
->private + *pos
;
1703 static int vmstat_show(struct seq_file
*m
, void *arg
)
1705 unsigned long *l
= arg
;
1706 unsigned long off
= l
- (unsigned long *)m
->private;
1708 /* Skip hidden vmstat items. */
1709 if (*vmstat_text
[off
] == '\0')
1712 seq_puts(m
, vmstat_text
[off
]);
1713 seq_put_decimal_ull(m
, " ", *l
);
1718 static void vmstat_stop(struct seq_file
*m
, void *arg
)
1724 static const struct seq_operations vmstat_op
= {
1725 .start
= vmstat_start
,
1726 .next
= vmstat_next
,
1727 .stop
= vmstat_stop
,
1728 .show
= vmstat_show
,
1730 #endif /* CONFIG_PROC_FS */
1733 static DEFINE_PER_CPU(struct delayed_work
, vmstat_work
);
1734 int sysctl_stat_interval __read_mostly
= HZ
;
1736 #ifdef CONFIG_PROC_FS
1737 static void refresh_vm_stats(struct work_struct
*work
)
1739 refresh_cpu_vm_stats(true);
1742 int vmstat_refresh(struct ctl_table
*table
, int write
,
1743 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1750 * The regular update, every sysctl_stat_interval, may come later
1751 * than expected: leaving a significant amount in per_cpu buckets.
1752 * This is particularly misleading when checking a quantity of HUGE
1753 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1754 * which can equally be echo'ed to or cat'ted from (by root),
1755 * can be used to update the stats just before reading them.
1757 * Oh, and since global_zone_page_state() etc. are so careful to hide
1758 * transiently negative values, report an error here if any of
1759 * the stats is negative, so we know to go looking for imbalance.
1761 err
= schedule_on_each_cpu(refresh_vm_stats
);
1764 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++) {
1765 val
= atomic_long_read(&vm_zone_stat
[i
]);
1767 pr_warn("%s: %s %ld\n",
1768 __func__
, vmstat_text
[i
], val
);
1773 for (i
= 0; i
< NR_VM_NUMA_STAT_ITEMS
; i
++) {
1774 val
= atomic_long_read(&vm_numa_stat
[i
]);
1776 pr_warn("%s: %s %ld\n",
1777 __func__
, vmstat_text
[i
+ NR_VM_ZONE_STAT_ITEMS
], val
);
1790 #endif /* CONFIG_PROC_FS */
1792 static void vmstat_update(struct work_struct
*w
)
1794 if (refresh_cpu_vm_stats(true)) {
1796 * Counters were updated so we expect more updates
1797 * to occur in the future. Keep on running the
1798 * update worker thread.
1800 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq
,
1801 this_cpu_ptr(&vmstat_work
),
1802 round_jiffies_relative(sysctl_stat_interval
));
1807 * Switch off vmstat processing and then fold all the remaining differentials
1808 * until the diffs stay at zero. The function is used by NOHZ and can only be
1809 * invoked when tick processing is not active.
1812 * Check if the diffs for a certain cpu indicate that
1813 * an update is needed.
1815 static bool need_update(int cpu
)
1819 for_each_populated_zone(zone
) {
1820 struct per_cpu_pageset
*p
= per_cpu_ptr(zone
->pageset
, cpu
);
1822 BUILD_BUG_ON(sizeof(p
->vm_stat_diff
[0]) != 1);
1824 BUILD_BUG_ON(sizeof(p
->vm_numa_stat_diff
[0]) != 2);
1828 * The fast way of checking if there are any vmstat diffs.
1829 * This works because the diffs are byte sized items.
1831 if (memchr_inv(p
->vm_stat_diff
, 0, NR_VM_ZONE_STAT_ITEMS
))
1834 if (memchr_inv(p
->vm_numa_stat_diff
, 0, NR_VM_NUMA_STAT_ITEMS
))
1842 * Switch off vmstat processing and then fold all the remaining differentials
1843 * until the diffs stay at zero. The function is used by NOHZ and can only be
1844 * invoked when tick processing is not active.
1846 void quiet_vmstat(void)
1848 if (system_state
!= SYSTEM_RUNNING
)
1851 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work
)))
1854 if (!need_update(smp_processor_id()))
1858 * Just refresh counters and do not care about the pending delayed
1859 * vmstat_update. It doesn't fire that often to matter and canceling
1860 * it would be too expensive from this path.
1861 * vmstat_shepherd will take care about that for us.
1863 refresh_cpu_vm_stats(false);
1867 * Shepherd worker thread that checks the
1868 * differentials of processors that have their worker
1869 * threads for vm statistics updates disabled because of
1872 static void vmstat_shepherd(struct work_struct
*w
);
1874 static DECLARE_DEFERRABLE_WORK(shepherd
, vmstat_shepherd
);
1876 static void vmstat_shepherd(struct work_struct
*w
)
1881 /* Check processors whose vmstat worker threads have been disabled */
1882 for_each_online_cpu(cpu
) {
1883 struct delayed_work
*dw
= &per_cpu(vmstat_work
, cpu
);
1885 if (!delayed_work_pending(dw
) && need_update(cpu
))
1886 queue_delayed_work_on(cpu
, mm_percpu_wq
, dw
, 0);
1890 schedule_delayed_work(&shepherd
,
1891 round_jiffies_relative(sysctl_stat_interval
));
1894 static void __init
start_shepherd_timer(void)
1898 for_each_possible_cpu(cpu
)
1899 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work
, cpu
),
1902 schedule_delayed_work(&shepherd
,
1903 round_jiffies_relative(sysctl_stat_interval
));
1906 static void __init
init_cpu_node_state(void)
1910 for_each_online_node(node
) {
1911 if (cpumask_weight(cpumask_of_node(node
)) > 0)
1912 node_set_state(node
, N_CPU
);
1916 static int vmstat_cpu_online(unsigned int cpu
)
1918 refresh_zone_stat_thresholds();
1919 node_set_state(cpu_to_node(cpu
), N_CPU
);
1923 static int vmstat_cpu_down_prep(unsigned int cpu
)
1925 cancel_delayed_work_sync(&per_cpu(vmstat_work
, cpu
));
1929 static int vmstat_cpu_dead(unsigned int cpu
)
1931 const struct cpumask
*node_cpus
;
1934 node
= cpu_to_node(cpu
);
1936 refresh_zone_stat_thresholds();
1937 node_cpus
= cpumask_of_node(node
);
1938 if (cpumask_weight(node_cpus
) > 0)
1941 node_clear_state(node
, N_CPU
);
1947 struct workqueue_struct
*mm_percpu_wq
;
1949 void __init
init_mm_internals(void)
1951 int ret __maybe_unused
;
1953 mm_percpu_wq
= alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM
, 0);
1956 ret
= cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD
, "mm/vmstat:dead",
1957 NULL
, vmstat_cpu_dead
);
1959 pr_err("vmstat: failed to register 'dead' hotplug state\n");
1961 ret
= cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "mm/vmstat:online",
1963 vmstat_cpu_down_prep
);
1965 pr_err("vmstat: failed to register 'online' hotplug state\n");
1968 init_cpu_node_state();
1971 start_shepherd_timer();
1973 #ifdef CONFIG_PROC_FS
1974 proc_create_seq("buddyinfo", 0444, NULL
, &fragmentation_op
);
1975 proc_create_seq("pagetypeinfo", 0444, NULL
, &pagetypeinfo_op
);
1976 proc_create_seq("vmstat", 0444, NULL
, &vmstat_op
);
1977 proc_create_seq("zoneinfo", 0444, NULL
, &zoneinfo_op
);
1981 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1984 * Return an index indicating how much of the available free memory is
1985 * unusable for an allocation of the requested size.
1987 static int unusable_free_index(unsigned int order
,
1988 struct contig_page_info
*info
)
1990 /* No free memory is interpreted as all free memory is unusable */
1991 if (info
->free_pages
== 0)
1995 * Index should be a value between 0 and 1. Return a value to 3
1998 * 0 => no fragmentation
1999 * 1 => high fragmentation
2001 return div_u64((info
->free_pages
- (info
->free_blocks_suitable
<< order
)) * 1000ULL, info
->free_pages
);
2005 static void unusable_show_print(struct seq_file
*m
,
2006 pg_data_t
*pgdat
, struct zone
*zone
)
2010 struct contig_page_info info
;
2012 seq_printf(m
, "Node %d, zone %8s ",
2015 for (order
= 0; order
< MAX_ORDER
; ++order
) {
2016 fill_contig_page_info(zone
, order
, &info
);
2017 index
= unusable_free_index(order
, &info
);
2018 seq_printf(m
, "%d.%03d ", index
/ 1000, index
% 1000);
2025 * Display unusable free space index
2027 * The unusable free space index measures how much of the available free
2028 * memory cannot be used to satisfy an allocation of a given size and is a
2029 * value between 0 and 1. The higher the value, the more of free memory is
2030 * unusable and by implication, the worse the external fragmentation is. This
2031 * can be expressed as a percentage by multiplying by 100.
2033 static int unusable_show(struct seq_file
*m
, void *arg
)
2035 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
2037 /* check memoryless node */
2038 if (!node_state(pgdat
->node_id
, N_MEMORY
))
2041 walk_zones_in_node(m
, pgdat
, true, false, unusable_show_print
);
2046 static const struct seq_operations unusable_op
= {
2047 .start
= frag_start
,
2050 .show
= unusable_show
,
2053 static int unusable_open(struct inode
*inode
, struct file
*file
)
2055 return seq_open(file
, &unusable_op
);
2058 static const struct file_operations unusable_file_ops
= {
2059 .open
= unusable_open
,
2061 .llseek
= seq_lseek
,
2062 .release
= seq_release
,
2065 static void extfrag_show_print(struct seq_file
*m
,
2066 pg_data_t
*pgdat
, struct zone
*zone
)
2071 /* Alloc on stack as interrupts are disabled for zone walk */
2072 struct contig_page_info info
;
2074 seq_printf(m
, "Node %d, zone %8s ",
2077 for (order
= 0; order
< MAX_ORDER
; ++order
) {
2078 fill_contig_page_info(zone
, order
, &info
);
2079 index
= __fragmentation_index(order
, &info
);
2080 seq_printf(m
, "%d.%03d ", index
/ 1000, index
% 1000);
2087 * Display fragmentation index for orders that allocations would fail for
2089 static int extfrag_show(struct seq_file
*m
, void *arg
)
2091 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
2093 walk_zones_in_node(m
, pgdat
, true, false, extfrag_show_print
);
2098 static const struct seq_operations extfrag_op
= {
2099 .start
= frag_start
,
2102 .show
= extfrag_show
,
2105 static int extfrag_open(struct inode
*inode
, struct file
*file
)
2107 return seq_open(file
, &extfrag_op
);
2110 static const struct file_operations extfrag_file_ops
= {
2111 .open
= extfrag_open
,
2113 .llseek
= seq_lseek
,
2114 .release
= seq_release
,
2117 static int __init
extfrag_debug_init(void)
2119 struct dentry
*extfrag_debug_root
;
2121 extfrag_debug_root
= debugfs_create_dir("extfrag", NULL
);
2122 if (!extfrag_debug_root
)
2125 if (!debugfs_create_file("unusable_index", 0444,
2126 extfrag_debug_root
, NULL
, &unusable_file_ops
))
2129 if (!debugfs_create_file("extfrag_index", 0444,
2130 extfrag_debug_root
, NULL
, &extfrag_file_ops
))
2135 debugfs_remove_recursive(extfrag_debug_root
);
2139 module_init(extfrag_debug_init
);