4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 * Copyright (C) 2008-2014 Christoph Lameter
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state
, vm_event_states
) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states
);
37 static void sum_vm_events(unsigned long *ret
)
42 memset(ret
, 0, NR_VM_EVENT_ITEMS
* sizeof(unsigned long));
44 for_each_online_cpu(cpu
) {
45 struct vm_event_state
*this = &per_cpu(vm_event_states
, cpu
);
47 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++)
48 ret
[i
] += this->event
[i
];
53 * Accumulate the vm event counters across all CPUs.
54 * The result is unavoidably approximate - it can change
55 * during and after execution of this function.
57 void all_vm_events(unsigned long *ret
)
63 EXPORT_SYMBOL_GPL(all_vm_events
);
66 * Fold the foreign cpu events into our own.
68 * This is adding to the events on one processor
69 * but keeps the global counts constant.
71 void vm_events_fold_cpu(int cpu
)
73 struct vm_event_state
*fold_state
= &per_cpu(vm_event_states
, cpu
);
76 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++) {
77 count_vm_events(i
, fold_state
->event
[i
]);
78 fold_state
->event
[i
] = 0;
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
85 * Manage combined zone based / global counters
87 * vm_stat contains the global counters
89 atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
] __cacheline_aligned_in_smp
;
90 EXPORT_SYMBOL(vm_stat
);
94 int calculate_pressure_threshold(struct zone
*zone
)
97 int watermark_distance
;
100 * As vmstats are not up to date, there is drift between the estimated
101 * and real values. For high thresholds and a high number of CPUs, it
102 * is possible for the min watermark to be breached while the estimated
103 * value looks fine. The pressure threshold is a reduced value such
104 * that even the maximum amount of drift will not accidentally breach
107 watermark_distance
= low_wmark_pages(zone
) - min_wmark_pages(zone
);
108 threshold
= max(1, (int)(watermark_distance
/ num_online_cpus()));
111 * Maximum threshold is 125
113 threshold
= min(125, threshold
);
118 int calculate_normal_threshold(struct zone
*zone
)
121 int mem
; /* memory in 128 MB units */
124 * The threshold scales with the number of processors and the amount
125 * of memory per zone. More memory means that we can defer updates for
126 * longer, more processors could lead to more contention.
127 * fls() is used to have a cheap way of logarithmic scaling.
129 * Some sample thresholds:
131 * Threshold Processors (fls) Zonesize fls(mem+1)
132 * ------------------------------------------------------------------
149 * 125 1024 10 8-16 GB 8
150 * 125 1024 10 16-32 GB 9
153 mem
= zone
->managed_pages
>> (27 - PAGE_SHIFT
);
155 threshold
= 2 * fls(num_online_cpus()) * (1 + fls(mem
));
158 * Maximum threshold is 125
160 threshold
= min(125, threshold
);
166 * Refresh the thresholds for each zone.
168 void refresh_zone_stat_thresholds(void)
174 for_each_populated_zone(zone
) {
175 unsigned long max_drift
, tolerate_drift
;
177 threshold
= calculate_normal_threshold(zone
);
179 for_each_online_cpu(cpu
)
180 per_cpu_ptr(zone
->pageset
, cpu
)->stat_threshold
184 * Only set percpu_drift_mark if there is a danger that
185 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 * the min watermark could be breached by an allocation
188 tolerate_drift
= low_wmark_pages(zone
) - min_wmark_pages(zone
);
189 max_drift
= num_online_cpus() * threshold
;
190 if (max_drift
> tolerate_drift
)
191 zone
->percpu_drift_mark
= high_wmark_pages(zone
) +
196 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
197 int (*calculate_pressure
)(struct zone
*))
204 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
205 zone
= &pgdat
->node_zones
[i
];
206 if (!zone
->percpu_drift_mark
)
209 threshold
= (*calculate_pressure
)(zone
);
210 for_each_online_cpu(cpu
)
211 per_cpu_ptr(zone
->pageset
, cpu
)->stat_threshold
217 * For use when we know that interrupts are disabled,
218 * or when we know that preemption is disabled and that
219 * particular counter cannot be updated from interrupt context.
221 void __mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
224 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
225 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
229 x
= delta
+ __this_cpu_read(*p
);
231 t
= __this_cpu_read(pcp
->stat_threshold
);
233 if (unlikely(x
> t
|| x
< -t
)) {
234 zone_page_state_add(x
, zone
, item
);
237 __this_cpu_write(*p
, x
);
239 EXPORT_SYMBOL(__mod_zone_page_state
);
242 * Optimized increment and decrement functions.
244 * These are only for a single page and therefore can take a struct page *
245 * argument instead of struct zone *. This allows the inclusion of the code
246 * generated for page_zone(page) into the optimized functions.
248 * No overflow check is necessary and therefore the differential can be
249 * incremented or decremented in place which may allow the compilers to
250 * generate better code.
251 * The increment or decrement is known and therefore one boundary check can
254 * NOTE: These functions are very performance sensitive. Change only
257 * Some processors have inc/dec instructions that are atomic vs an interrupt.
258 * However, the code must first determine the differential location in a zone
259 * based on the processor number and then inc/dec the counter. There is no
260 * guarantee without disabling preemption that the processor will not change
261 * in between and therefore the atomicity vs. interrupt cannot be exploited
262 * in a useful way here.
264 void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
266 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
267 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
270 v
= __this_cpu_inc_return(*p
);
271 t
= __this_cpu_read(pcp
->stat_threshold
);
272 if (unlikely(v
> t
)) {
273 s8 overstep
= t
>> 1;
275 zone_page_state_add(v
+ overstep
, zone
, item
);
276 __this_cpu_write(*p
, -overstep
);
280 void __inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
282 __inc_zone_state(page_zone(page
), item
);
284 EXPORT_SYMBOL(__inc_zone_page_state
);
286 void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
288 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
289 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
292 v
= __this_cpu_dec_return(*p
);
293 t
= __this_cpu_read(pcp
->stat_threshold
);
294 if (unlikely(v
< - t
)) {
295 s8 overstep
= t
>> 1;
297 zone_page_state_add(v
- overstep
, zone
, item
);
298 __this_cpu_write(*p
, overstep
);
302 void __dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
304 __dec_zone_state(page_zone(page
), item
);
306 EXPORT_SYMBOL(__dec_zone_page_state
);
308 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
310 * If we have cmpxchg_local support then we do not need to incur the overhead
311 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
313 * mod_state() modifies the zone counter state through atomic per cpu
316 * Overstep mode specifies how overstep should handled:
318 * 1 Overstepping half of threshold
319 * -1 Overstepping minus half of threshold
321 static inline void mod_state(struct zone
*zone
, enum zone_stat_item item
,
322 long delta
, int overstep_mode
)
324 struct per_cpu_pageset __percpu
*pcp
= zone
->pageset
;
325 s8 __percpu
*p
= pcp
->vm_stat_diff
+ item
;
329 z
= 0; /* overflow to zone counters */
332 * The fetching of the stat_threshold is racy. We may apply
333 * a counter threshold to the wrong the cpu if we get
334 * rescheduled while executing here. However, the next
335 * counter update will apply the threshold again and
336 * therefore bring the counter under the threshold again.
338 * Most of the time the thresholds are the same anyways
339 * for all cpus in a zone.
341 t
= this_cpu_read(pcp
->stat_threshold
);
343 o
= this_cpu_read(*p
);
346 if (n
> t
|| n
< -t
) {
347 int os
= overstep_mode
* (t
>> 1) ;
349 /* Overflow must be added to zone counters */
353 } while (this_cpu_cmpxchg(*p
, o
, n
) != o
);
356 zone_page_state_add(z
, zone
, item
);
359 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
362 mod_state(zone
, item
, delta
, 0);
364 EXPORT_SYMBOL(mod_zone_page_state
);
366 void inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
368 mod_state(zone
, item
, 1, 1);
371 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
373 mod_state(page_zone(page
), item
, 1, 1);
375 EXPORT_SYMBOL(inc_zone_page_state
);
377 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
379 mod_state(page_zone(page
), item
, -1, -1);
381 EXPORT_SYMBOL(dec_zone_page_state
);
384 * Use interrupt disable to serialize counter updates
386 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
391 local_irq_save(flags
);
392 __mod_zone_page_state(zone
, item
, delta
);
393 local_irq_restore(flags
);
395 EXPORT_SYMBOL(mod_zone_page_state
);
397 void inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
401 local_irq_save(flags
);
402 __inc_zone_state(zone
, item
);
403 local_irq_restore(flags
);
406 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
411 zone
= page_zone(page
);
412 local_irq_save(flags
);
413 __inc_zone_state(zone
, item
);
414 local_irq_restore(flags
);
416 EXPORT_SYMBOL(inc_zone_page_state
);
418 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
422 local_irq_save(flags
);
423 __dec_zone_page_state(page
, item
);
424 local_irq_restore(flags
);
426 EXPORT_SYMBOL(dec_zone_page_state
);
431 * Fold a differential into the global counters.
432 * Returns the number of counters updated.
434 static int fold_diff(int *diff
)
439 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
441 atomic_long_add(diff
[i
], &vm_stat
[i
]);
448 * Update the zone counters for the current cpu.
450 * Note that refresh_cpu_vm_stats strives to only access
451 * node local memory. The per cpu pagesets on remote zones are placed
452 * in the memory local to the processor using that pageset. So the
453 * loop over all zones will access a series of cachelines local to
456 * The call to zone_page_state_add updates the cachelines with the
457 * statistics in the remote zone struct as well as the global cachelines
458 * with the global counters. These could cause remote node cache line
459 * bouncing and will have to be only done when necessary.
461 * The function returns the number of global counters updated.
463 static int refresh_cpu_vm_stats(bool do_pagesets
)
467 int global_diff
[NR_VM_ZONE_STAT_ITEMS
] = { 0, };
470 for_each_populated_zone(zone
) {
471 struct per_cpu_pageset __percpu
*p
= zone
->pageset
;
473 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++) {
476 v
= this_cpu_xchg(p
->vm_stat_diff
[i
], 0);
479 atomic_long_add(v
, &zone
->vm_stat
[i
]);
482 /* 3 seconds idle till flush */
483 __this_cpu_write(p
->expire
, 3);
491 * Deal with draining the remote pageset of this
494 * Check if there are pages remaining in this pageset
495 * if not then there is nothing to expire.
497 if (!__this_cpu_read(p
->expire
) ||
498 !__this_cpu_read(p
->pcp
.count
))
502 * We never drain zones local to this processor.
504 if (zone_to_nid(zone
) == numa_node_id()) {
505 __this_cpu_write(p
->expire
, 0);
509 if (__this_cpu_dec_return(p
->expire
))
512 if (__this_cpu_read(p
->pcp
.count
)) {
513 drain_zone_pages(zone
, this_cpu_ptr(&p
->pcp
));
519 changes
+= fold_diff(global_diff
);
524 * Fold the data for an offline cpu into the global array.
525 * There cannot be any access by the offline cpu and therefore
526 * synchronization is simplified.
528 void cpu_vm_stats_fold(int cpu
)
532 int global_diff
[NR_VM_ZONE_STAT_ITEMS
] = { 0, };
534 for_each_populated_zone(zone
) {
535 struct per_cpu_pageset
*p
;
537 p
= per_cpu_ptr(zone
->pageset
, cpu
);
539 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
540 if (p
->vm_stat_diff
[i
]) {
543 v
= p
->vm_stat_diff
[i
];
544 p
->vm_stat_diff
[i
] = 0;
545 atomic_long_add(v
, &zone
->vm_stat
[i
]);
550 fold_diff(global_diff
);
554 * this is only called if !populated_zone(zone), which implies no other users of
555 * pset->vm_stat_diff[] exsist.
557 void drain_zonestat(struct zone
*zone
, struct per_cpu_pageset
*pset
)
561 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
562 if (pset
->vm_stat_diff
[i
]) {
563 int v
= pset
->vm_stat_diff
[i
];
564 pset
->vm_stat_diff
[i
] = 0;
565 atomic_long_add(v
, &zone
->vm_stat
[i
]);
566 atomic_long_add(v
, &vm_stat
[i
]);
573 * Determine the per node value of a stat item.
575 unsigned long node_page_state(int node
, enum zone_stat_item item
)
577 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
579 unsigned long count
= 0;
581 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
582 count
+= zone_page_state(zones
+ i
, item
);
589 #ifdef CONFIG_COMPACTION
591 struct contig_page_info
{
592 unsigned long free_pages
;
593 unsigned long free_blocks_total
;
594 unsigned long free_blocks_suitable
;
598 * Calculate the number of free pages in a zone, how many contiguous
599 * pages are free and how many are large enough to satisfy an allocation of
600 * the target size. Note that this function makes no attempt to estimate
601 * how many suitable free blocks there *might* be if MOVABLE pages were
602 * migrated. Calculating that is possible, but expensive and can be
603 * figured out from userspace
605 static void fill_contig_page_info(struct zone
*zone
,
606 unsigned int suitable_order
,
607 struct contig_page_info
*info
)
611 info
->free_pages
= 0;
612 info
->free_blocks_total
= 0;
613 info
->free_blocks_suitable
= 0;
615 for (order
= 0; order
< MAX_ORDER
; order
++) {
616 unsigned long blocks
;
618 /* Count number of free blocks */
619 blocks
= zone
->free_area
[order
].nr_free
;
620 info
->free_blocks_total
+= blocks
;
622 /* Count free base pages */
623 info
->free_pages
+= blocks
<< order
;
625 /* Count the suitable free blocks */
626 if (order
>= suitable_order
)
627 info
->free_blocks_suitable
+= blocks
<<
628 (order
- suitable_order
);
633 * A fragmentation index only makes sense if an allocation of a requested
634 * size would fail. If that is true, the fragmentation index indicates
635 * whether external fragmentation or a lack of memory was the problem.
636 * The value can be used to determine if page reclaim or compaction
639 static int __fragmentation_index(unsigned int order
, struct contig_page_info
*info
)
641 unsigned long requested
= 1UL << order
;
643 if (!info
->free_blocks_total
)
646 /* Fragmentation index only makes sense when a request would fail */
647 if (info
->free_blocks_suitable
)
651 * Index is between 0 and 1 so return within 3 decimal places
653 * 0 => allocation would fail due to lack of memory
654 * 1 => allocation would fail due to fragmentation
656 return 1000 - div_u64( (1000+(div_u64(info
->free_pages
* 1000ULL, requested
))), info
->free_blocks_total
);
659 /* Same as __fragmentation index but allocs contig_page_info on stack */
660 int fragmentation_index(struct zone
*zone
, unsigned int order
)
662 struct contig_page_info info
;
664 fill_contig_page_info(zone
, order
, &info
);
665 return __fragmentation_index(order
, &info
);
669 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
670 #ifdef CONFIG_ZONE_DMA
671 #define TEXT_FOR_DMA(xx) xx "_dma",
673 #define TEXT_FOR_DMA(xx)
676 #ifdef CONFIG_ZONE_DMA32
677 #define TEXT_FOR_DMA32(xx) xx "_dma32",
679 #define TEXT_FOR_DMA32(xx)
682 #ifdef CONFIG_HIGHMEM
683 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
685 #define TEXT_FOR_HIGHMEM(xx)
688 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
689 TEXT_FOR_HIGHMEM(xx) xx "_movable",
691 const char * const vmstat_text
[] = {
692 /* enum zone_stat_item countes */
706 "nr_slab_reclaimable",
707 "nr_slab_unreclaimable",
708 "nr_page_table_pages",
713 "nr_vmscan_immediate_reclaim",
730 "workingset_refault",
731 "workingset_activate",
732 "workingset_nodereclaim",
733 "nr_anon_transparent_hugepages",
736 /* enum writeback_stat_item counters */
737 "nr_dirty_threshold",
738 "nr_dirty_background_threshold",
740 #ifdef CONFIG_VM_EVENT_COUNTERS
741 /* enum vm_event_item counters */
747 TEXTS_FOR_ZONES("pgalloc")
757 TEXTS_FOR_ZONES("pgrefill")
758 TEXTS_FOR_ZONES("pgsteal_kswapd")
759 TEXTS_FOR_ZONES("pgsteal_direct")
760 TEXTS_FOR_ZONES("pgscan_kswapd")
761 TEXTS_FOR_ZONES("pgscan_direct")
762 "pgscan_direct_throttle",
765 "zone_reclaim_failed",
770 "kswapd_low_wmark_hit_quickly",
771 "kswapd_high_wmark_hit_quickly",
780 #ifdef CONFIG_NUMA_BALANCING
782 "numa_huge_pte_updates",
784 "numa_hint_faults_local",
785 "numa_pages_migrated",
787 #ifdef CONFIG_MIGRATION
791 #ifdef CONFIG_COMPACTION
792 "compact_migrate_scanned",
793 "compact_free_scanned",
798 "compact_daemon_wake",
801 #ifdef CONFIG_HUGETLB_PAGE
802 "htlb_buddy_alloc_success",
803 "htlb_buddy_alloc_fail",
805 "unevictable_pgs_culled",
806 "unevictable_pgs_scanned",
807 "unevictable_pgs_rescued",
808 "unevictable_pgs_mlocked",
809 "unevictable_pgs_munlocked",
810 "unevictable_pgs_cleared",
811 "unevictable_pgs_stranded",
813 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
815 "thp_fault_fallback",
816 "thp_collapse_alloc",
817 "thp_collapse_alloc_failed",
819 "thp_split_page_failed",
820 "thp_deferred_split_page",
822 "thp_zero_page_alloc",
823 "thp_zero_page_alloc_failed",
825 #ifdef CONFIG_MEMORY_BALLOON
828 #ifdef CONFIG_BALLOON_COMPACTION
831 #endif /* CONFIG_MEMORY_BALLOON */
832 #ifdef CONFIG_DEBUG_TLBFLUSH
834 "nr_tlb_remote_flush",
835 "nr_tlb_remote_flush_received",
836 #endif /* CONFIG_SMP */
837 "nr_tlb_local_flush_all",
838 "nr_tlb_local_flush_one",
839 #endif /* CONFIG_DEBUG_TLBFLUSH */
841 #ifdef CONFIG_DEBUG_VM_VMACACHE
842 "vmacache_find_calls",
843 "vmacache_find_hits",
844 "vmacache_full_flushes",
846 #endif /* CONFIG_VM_EVENTS_COUNTERS */
848 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
851 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
852 defined(CONFIG_PROC_FS)
853 static void *frag_start(struct seq_file
*m
, loff_t
*pos
)
858 for (pgdat
= first_online_pgdat();
860 pgdat
= next_online_pgdat(pgdat
))
866 static void *frag_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
868 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
871 return next_online_pgdat(pgdat
);
874 static void frag_stop(struct seq_file
*m
, void *arg
)
878 /* Walk all the zones in a node and print using a callback */
879 static void walk_zones_in_node(struct seq_file
*m
, pg_data_t
*pgdat
,
880 void (*print
)(struct seq_file
*m
, pg_data_t
*, struct zone
*))
883 struct zone
*node_zones
= pgdat
->node_zones
;
886 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
887 if (!populated_zone(zone
))
890 spin_lock_irqsave(&zone
->lock
, flags
);
891 print(m
, pgdat
, zone
);
892 spin_unlock_irqrestore(&zone
->lock
, flags
);
897 #ifdef CONFIG_PROC_FS
898 static void frag_show_print(struct seq_file
*m
, pg_data_t
*pgdat
,
903 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
904 for (order
= 0; order
< MAX_ORDER
; ++order
)
905 seq_printf(m
, "%6lu ", zone
->free_area
[order
].nr_free
);
910 * This walks the free areas for each zone.
912 static int frag_show(struct seq_file
*m
, void *arg
)
914 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
915 walk_zones_in_node(m
, pgdat
, frag_show_print
);
919 static void pagetypeinfo_showfree_print(struct seq_file
*m
,
920 pg_data_t
*pgdat
, struct zone
*zone
)
924 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++) {
925 seq_printf(m
, "Node %4d, zone %8s, type %12s ",
928 migratetype_names
[mtype
]);
929 for (order
= 0; order
< MAX_ORDER
; ++order
) {
930 unsigned long freecount
= 0;
931 struct free_area
*area
;
932 struct list_head
*curr
;
934 area
= &(zone
->free_area
[order
]);
936 list_for_each(curr
, &area
->free_list
[mtype
])
938 seq_printf(m
, "%6lu ", freecount
);
944 /* Print out the free pages at each order for each migatetype */
945 static int pagetypeinfo_showfree(struct seq_file
*m
, void *arg
)
948 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
951 seq_printf(m
, "%-43s ", "Free pages count per migrate type at order");
952 for (order
= 0; order
< MAX_ORDER
; ++order
)
953 seq_printf(m
, "%6d ", order
);
956 walk_zones_in_node(m
, pgdat
, pagetypeinfo_showfree_print
);
961 static void pagetypeinfo_showblockcount_print(struct seq_file
*m
,
962 pg_data_t
*pgdat
, struct zone
*zone
)
966 unsigned long start_pfn
= zone
->zone_start_pfn
;
967 unsigned long end_pfn
= zone_end_pfn(zone
);
968 unsigned long count
[MIGRATE_TYPES
] = { 0, };
970 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
976 page
= pfn_to_page(pfn
);
978 /* Watch for unexpected holes punched in the memmap */
979 if (!memmap_valid_within(pfn
, page
, zone
))
982 if (page_zone(page
) != zone
)
985 mtype
= get_pageblock_migratetype(page
);
987 if (mtype
< MIGRATE_TYPES
)
992 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
993 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
994 seq_printf(m
, "%12lu ", count
[mtype
]);
998 /* Print out the free pages at each order for each migratetype */
999 static int pagetypeinfo_showblockcount(struct seq_file
*m
, void *arg
)
1002 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1004 seq_printf(m
, "\n%-23s", "Number of blocks type ");
1005 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
1006 seq_printf(m
, "%12s ", migratetype_names
[mtype
]);
1008 walk_zones_in_node(m
, pgdat
, pagetypeinfo_showblockcount_print
);
1013 #ifdef CONFIG_PAGE_OWNER
1014 static void pagetypeinfo_showmixedcount_print(struct seq_file
*m
,
1019 struct page_ext
*page_ext
;
1020 unsigned long pfn
= zone
->zone_start_pfn
, block_end_pfn
;
1021 unsigned long end_pfn
= pfn
+ zone
->spanned_pages
;
1022 unsigned long count
[MIGRATE_TYPES
] = { 0, };
1023 int pageblock_mt
, page_mt
;
1026 /* Scan block by block. First and last block may be incomplete */
1027 pfn
= zone
->zone_start_pfn
;
1030 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1031 * a zone boundary, it will be double counted between zones. This does
1032 * not matter as the mixed block count will still be correct
1034 for (; pfn
< end_pfn
; ) {
1035 if (!pfn_valid(pfn
)) {
1036 pfn
= ALIGN(pfn
+ 1, MAX_ORDER_NR_PAGES
);
1040 block_end_pfn
= ALIGN(pfn
+ 1, pageblock_nr_pages
);
1041 block_end_pfn
= min(block_end_pfn
, end_pfn
);
1043 page
= pfn_to_page(pfn
);
1044 pageblock_mt
= get_pageblock_migratetype(page
);
1046 for (; pfn
< block_end_pfn
; pfn
++) {
1047 if (!pfn_valid_within(pfn
))
1050 page
= pfn_to_page(pfn
);
1052 if (page_zone(page
) != zone
)
1055 if (PageBuddy(page
)) {
1056 pfn
+= (1UL << page_order(page
)) - 1;
1060 if (PageReserved(page
))
1063 page_ext
= lookup_page_ext(page
);
1064 if (unlikely(!page_ext
))
1067 if (!test_bit(PAGE_EXT_OWNER
, &page_ext
->flags
))
1070 page_mt
= gfpflags_to_migratetype(page_ext
->gfp_mask
);
1071 if (pageblock_mt
!= page_mt
) {
1072 if (is_migrate_cma(pageblock_mt
))
1073 count
[MIGRATE_MOVABLE
]++;
1075 count
[pageblock_mt
]++;
1077 pfn
= block_end_pfn
;
1080 pfn
+= (1UL << page_ext
->order
) - 1;
1085 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
1086 for (i
= 0; i
< MIGRATE_TYPES
; i
++)
1087 seq_printf(m
, "%12lu ", count
[i
]);
1090 #endif /* CONFIG_PAGE_OWNER */
1093 * Print out the number of pageblocks for each migratetype that contain pages
1094 * of other types. This gives an indication of how well fallbacks are being
1095 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1096 * to determine what is going on
1098 static void pagetypeinfo_showmixedcount(struct seq_file
*m
, pg_data_t
*pgdat
)
1100 #ifdef CONFIG_PAGE_OWNER
1103 if (!static_branch_unlikely(&page_owner_inited
))
1106 drain_all_pages(NULL
);
1108 seq_printf(m
, "\n%-23s", "Number of mixed blocks ");
1109 for (mtype
= 0; mtype
< MIGRATE_TYPES
; mtype
++)
1110 seq_printf(m
, "%12s ", migratetype_names
[mtype
]);
1113 walk_zones_in_node(m
, pgdat
, pagetypeinfo_showmixedcount_print
);
1114 #endif /* CONFIG_PAGE_OWNER */
1118 * This prints out statistics in relation to grouping pages by mobility.
1119 * It is expensive to collect so do not constantly read the file.
1121 static int pagetypeinfo_show(struct seq_file
*m
, void *arg
)
1123 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1125 /* check memoryless node */
1126 if (!node_state(pgdat
->node_id
, N_MEMORY
))
1129 seq_printf(m
, "Page block order: %d\n", pageblock_order
);
1130 seq_printf(m
, "Pages per block: %lu\n", pageblock_nr_pages
);
1132 pagetypeinfo_showfree(m
, pgdat
);
1133 pagetypeinfo_showblockcount(m
, pgdat
);
1134 pagetypeinfo_showmixedcount(m
, pgdat
);
1139 static const struct seq_operations fragmentation_op
= {
1140 .start
= frag_start
,
1146 static int fragmentation_open(struct inode
*inode
, struct file
*file
)
1148 return seq_open(file
, &fragmentation_op
);
1151 static const struct file_operations fragmentation_file_operations
= {
1152 .open
= fragmentation_open
,
1154 .llseek
= seq_lseek
,
1155 .release
= seq_release
,
1158 static const struct seq_operations pagetypeinfo_op
= {
1159 .start
= frag_start
,
1162 .show
= pagetypeinfo_show
,
1165 static int pagetypeinfo_open(struct inode
*inode
, struct file
*file
)
1167 return seq_open(file
, &pagetypeinfo_op
);
1170 static const struct file_operations pagetypeinfo_file_ops
= {
1171 .open
= pagetypeinfo_open
,
1173 .llseek
= seq_lseek
,
1174 .release
= seq_release
,
1177 static void zoneinfo_show_print(struct seq_file
*m
, pg_data_t
*pgdat
,
1181 seq_printf(m
, "Node %d, zone %8s", pgdat
->node_id
, zone
->name
);
1191 zone_page_state(zone
, NR_FREE_PAGES
),
1192 min_wmark_pages(zone
),
1193 low_wmark_pages(zone
),
1194 high_wmark_pages(zone
),
1195 zone_page_state(zone
, NR_PAGES_SCANNED
),
1196 zone
->spanned_pages
,
1197 zone
->present_pages
,
1198 zone
->managed_pages
);
1200 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
1201 seq_printf(m
, "\n %-12s %lu", vmstat_text
[i
],
1202 zone_page_state(zone
, i
));
1205 "\n protection: (%ld",
1206 zone
->lowmem_reserve
[0]);
1207 for (i
= 1; i
< ARRAY_SIZE(zone
->lowmem_reserve
); i
++)
1208 seq_printf(m
, ", %ld", zone
->lowmem_reserve
[i
]);
1212 for_each_online_cpu(i
) {
1213 struct per_cpu_pageset
*pageset
;
1215 pageset
= per_cpu_ptr(zone
->pageset
, i
);
1224 pageset
->pcp
.batch
);
1226 seq_printf(m
, "\n vm stats threshold: %d",
1227 pageset
->stat_threshold
);
1231 "\n all_unreclaimable: %u"
1233 "\n inactive_ratio: %u",
1234 !zone_reclaimable(zone
),
1235 zone
->zone_start_pfn
,
1236 zone
->inactive_ratio
);
1241 * Output information about zones in @pgdat.
1243 static int zoneinfo_show(struct seq_file
*m
, void *arg
)
1245 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1246 walk_zones_in_node(m
, pgdat
, zoneinfo_show_print
);
1250 static const struct seq_operations zoneinfo_op
= {
1251 .start
= frag_start
, /* iterate over all zones. The same as in
1255 .show
= zoneinfo_show
,
1258 static int zoneinfo_open(struct inode
*inode
, struct file
*file
)
1260 return seq_open(file
, &zoneinfo_op
);
1263 static const struct file_operations proc_zoneinfo_file_operations
= {
1264 .open
= zoneinfo_open
,
1266 .llseek
= seq_lseek
,
1267 .release
= seq_release
,
1270 enum writeback_stat_item
{
1272 NR_DIRTY_BG_THRESHOLD
,
1273 NR_VM_WRITEBACK_STAT_ITEMS
,
1276 static void *vmstat_start(struct seq_file
*m
, loff_t
*pos
)
1279 int i
, stat_items_size
;
1281 if (*pos
>= ARRAY_SIZE(vmstat_text
))
1283 stat_items_size
= NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long) +
1284 NR_VM_WRITEBACK_STAT_ITEMS
* sizeof(unsigned long);
1286 #ifdef CONFIG_VM_EVENT_COUNTERS
1287 stat_items_size
+= sizeof(struct vm_event_state
);
1290 v
= kmalloc(stat_items_size
, GFP_KERNEL
);
1293 return ERR_PTR(-ENOMEM
);
1294 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
1295 v
[i
] = global_page_state(i
);
1296 v
+= NR_VM_ZONE_STAT_ITEMS
;
1298 global_dirty_limits(v
+ NR_DIRTY_BG_THRESHOLD
,
1299 v
+ NR_DIRTY_THRESHOLD
);
1300 v
+= NR_VM_WRITEBACK_STAT_ITEMS
;
1302 #ifdef CONFIG_VM_EVENT_COUNTERS
1304 v
[PGPGIN
] /= 2; /* sectors -> kbytes */
1307 return (unsigned long *)m
->private + *pos
;
1310 static void *vmstat_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
1313 if (*pos
>= ARRAY_SIZE(vmstat_text
))
1315 return (unsigned long *)m
->private + *pos
;
1318 static int vmstat_show(struct seq_file
*m
, void *arg
)
1320 unsigned long *l
= arg
;
1321 unsigned long off
= l
- (unsigned long *)m
->private;
1323 seq_printf(m
, "%s %lu\n", vmstat_text
[off
], *l
);
1327 static void vmstat_stop(struct seq_file
*m
, void *arg
)
1333 static const struct seq_operations vmstat_op
= {
1334 .start
= vmstat_start
,
1335 .next
= vmstat_next
,
1336 .stop
= vmstat_stop
,
1337 .show
= vmstat_show
,
1340 static int vmstat_open(struct inode
*inode
, struct file
*file
)
1342 return seq_open(file
, &vmstat_op
);
1345 static const struct file_operations proc_vmstat_file_operations
= {
1346 .open
= vmstat_open
,
1348 .llseek
= seq_lseek
,
1349 .release
= seq_release
,
1351 #endif /* CONFIG_PROC_FS */
1354 static struct workqueue_struct
*vmstat_wq
;
1355 static DEFINE_PER_CPU(struct delayed_work
, vmstat_work
);
1356 int sysctl_stat_interval __read_mostly
= HZ
;
1358 #ifdef CONFIG_PROC_FS
1359 static void refresh_vm_stats(struct work_struct
*work
)
1361 refresh_cpu_vm_stats(true);
1364 int vmstat_refresh(struct ctl_table
*table
, int write
,
1365 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1372 * The regular update, every sysctl_stat_interval, may come later
1373 * than expected: leaving a significant amount in per_cpu buckets.
1374 * This is particularly misleading when checking a quantity of HUGE
1375 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1376 * which can equally be echo'ed to or cat'ted from (by root),
1377 * can be used to update the stats just before reading them.
1379 * Oh, and since global_page_state() etc. are so careful to hide
1380 * transiently negative values, report an error here if any of
1381 * the stats is negative, so we know to go looking for imbalance.
1383 err
= schedule_on_each_cpu(refresh_vm_stats
);
1386 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++) {
1387 val
= atomic_long_read(&vm_stat
[i
]);
1390 case NR_ALLOC_BATCH
:
1391 case NR_PAGES_SCANNED
:
1393 * These are often seen to go negative in
1394 * recent kernels, but not to go permanently
1395 * negative. Whilst it would be nicer not to
1396 * have exceptions, rooting them out would be
1397 * another task, of rather low priority.
1401 pr_warn("%s: %s %ld\n",
1402 __func__
, vmstat_text
[i
], val
);
1416 #endif /* CONFIG_PROC_FS */
1418 static void vmstat_update(struct work_struct
*w
)
1420 if (refresh_cpu_vm_stats(true)) {
1422 * Counters were updated so we expect more updates
1423 * to occur in the future. Keep on running the
1424 * update worker thread.
1426 queue_delayed_work_on(smp_processor_id(), vmstat_wq
,
1427 this_cpu_ptr(&vmstat_work
),
1428 round_jiffies_relative(sysctl_stat_interval
));
1433 * Switch off vmstat processing and then fold all the remaining differentials
1434 * until the diffs stay at zero. The function is used by NOHZ and can only be
1435 * invoked when tick processing is not active.
1438 * Check if the diffs for a certain cpu indicate that
1439 * an update is needed.
1441 static bool need_update(int cpu
)
1445 for_each_populated_zone(zone
) {
1446 struct per_cpu_pageset
*p
= per_cpu_ptr(zone
->pageset
, cpu
);
1448 BUILD_BUG_ON(sizeof(p
->vm_stat_diff
[0]) != 1);
1450 * The fast way of checking if there are any vmstat diffs.
1451 * This works because the diffs are byte sized items.
1453 if (memchr_inv(p
->vm_stat_diff
, 0, NR_VM_ZONE_STAT_ITEMS
))
1461 * Switch off vmstat processing and then fold all the remaining differentials
1462 * until the diffs stay at zero. The function is used by NOHZ and can only be
1463 * invoked when tick processing is not active.
1465 void quiet_vmstat(void)
1467 if (system_state
!= SYSTEM_RUNNING
)
1470 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work
)))
1473 if (!need_update(smp_processor_id()))
1477 * Just refresh counters and do not care about the pending delayed
1478 * vmstat_update. It doesn't fire that often to matter and canceling
1479 * it would be too expensive from this path.
1480 * vmstat_shepherd will take care about that for us.
1482 refresh_cpu_vm_stats(false);
1486 * Shepherd worker thread that checks the
1487 * differentials of processors that have their worker
1488 * threads for vm statistics updates disabled because of
1491 static void vmstat_shepherd(struct work_struct
*w
);
1493 static DECLARE_DEFERRABLE_WORK(shepherd
, vmstat_shepherd
);
1495 static void vmstat_shepherd(struct work_struct
*w
)
1500 /* Check processors whose vmstat worker threads have been disabled */
1501 for_each_online_cpu(cpu
) {
1502 struct delayed_work
*dw
= &per_cpu(vmstat_work
, cpu
);
1504 if (!delayed_work_pending(dw
) && need_update(cpu
))
1505 queue_delayed_work_on(cpu
, vmstat_wq
, dw
, 0);
1509 schedule_delayed_work(&shepherd
,
1510 round_jiffies_relative(sysctl_stat_interval
));
1513 static void __init
start_shepherd_timer(void)
1517 for_each_possible_cpu(cpu
)
1518 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work
, cpu
),
1521 vmstat_wq
= alloc_workqueue("vmstat", WQ_FREEZABLE
|WQ_MEM_RECLAIM
, 0);
1522 schedule_delayed_work(&shepherd
,
1523 round_jiffies_relative(sysctl_stat_interval
));
1526 static void vmstat_cpu_dead(int node
)
1531 for_each_online_cpu(cpu
)
1532 if (cpu_to_node(cpu
) == node
)
1535 node_clear_state(node
, N_CPU
);
1541 * Use the cpu notifier to insure that the thresholds are recalculated
1544 static int vmstat_cpuup_callback(struct notifier_block
*nfb
,
1545 unsigned long action
,
1548 long cpu
= (long)hcpu
;
1552 case CPU_ONLINE_FROZEN
:
1553 refresh_zone_stat_thresholds();
1554 node_set_state(cpu_to_node(cpu
), N_CPU
);
1556 case CPU_DOWN_PREPARE
:
1557 case CPU_DOWN_PREPARE_FROZEN
:
1558 cancel_delayed_work_sync(&per_cpu(vmstat_work
, cpu
));
1560 case CPU_DOWN_FAILED
:
1561 case CPU_DOWN_FAILED_FROZEN
:
1564 case CPU_DEAD_FROZEN
:
1565 refresh_zone_stat_thresholds();
1566 vmstat_cpu_dead(cpu_to_node(cpu
));
1574 static struct notifier_block vmstat_notifier
=
1575 { &vmstat_cpuup_callback
, NULL
, 0 };
1578 static int __init
setup_vmstat(void)
1581 cpu_notifier_register_begin();
1582 __register_cpu_notifier(&vmstat_notifier
);
1584 start_shepherd_timer();
1585 cpu_notifier_register_done();
1587 #ifdef CONFIG_PROC_FS
1588 proc_create("buddyinfo", S_IRUGO
, NULL
, &fragmentation_file_operations
);
1589 proc_create("pagetypeinfo", S_IRUGO
, NULL
, &pagetypeinfo_file_ops
);
1590 proc_create("vmstat", S_IRUGO
, NULL
, &proc_vmstat_file_operations
);
1591 proc_create("zoneinfo", S_IRUGO
, NULL
, &proc_zoneinfo_file_operations
);
1595 module_init(setup_vmstat
)
1597 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1600 * Return an index indicating how much of the available free memory is
1601 * unusable for an allocation of the requested size.
1603 static int unusable_free_index(unsigned int order
,
1604 struct contig_page_info
*info
)
1606 /* No free memory is interpreted as all free memory is unusable */
1607 if (info
->free_pages
== 0)
1611 * Index should be a value between 0 and 1. Return a value to 3
1614 * 0 => no fragmentation
1615 * 1 => high fragmentation
1617 return div_u64((info
->free_pages
- (info
->free_blocks_suitable
<< order
)) * 1000ULL, info
->free_pages
);
1621 static void unusable_show_print(struct seq_file
*m
,
1622 pg_data_t
*pgdat
, struct zone
*zone
)
1626 struct contig_page_info info
;
1628 seq_printf(m
, "Node %d, zone %8s ",
1631 for (order
= 0; order
< MAX_ORDER
; ++order
) {
1632 fill_contig_page_info(zone
, order
, &info
);
1633 index
= unusable_free_index(order
, &info
);
1634 seq_printf(m
, "%d.%03d ", index
/ 1000, index
% 1000);
1641 * Display unusable free space index
1643 * The unusable free space index measures how much of the available free
1644 * memory cannot be used to satisfy an allocation of a given size and is a
1645 * value between 0 and 1. The higher the value, the more of free memory is
1646 * unusable and by implication, the worse the external fragmentation is. This
1647 * can be expressed as a percentage by multiplying by 100.
1649 static int unusable_show(struct seq_file
*m
, void *arg
)
1651 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1653 /* check memoryless node */
1654 if (!node_state(pgdat
->node_id
, N_MEMORY
))
1657 walk_zones_in_node(m
, pgdat
, unusable_show_print
);
1662 static const struct seq_operations unusable_op
= {
1663 .start
= frag_start
,
1666 .show
= unusable_show
,
1669 static int unusable_open(struct inode
*inode
, struct file
*file
)
1671 return seq_open(file
, &unusable_op
);
1674 static const struct file_operations unusable_file_ops
= {
1675 .open
= unusable_open
,
1677 .llseek
= seq_lseek
,
1678 .release
= seq_release
,
1681 static void extfrag_show_print(struct seq_file
*m
,
1682 pg_data_t
*pgdat
, struct zone
*zone
)
1687 /* Alloc on stack as interrupts are disabled for zone walk */
1688 struct contig_page_info info
;
1690 seq_printf(m
, "Node %d, zone %8s ",
1693 for (order
= 0; order
< MAX_ORDER
; ++order
) {
1694 fill_contig_page_info(zone
, order
, &info
);
1695 index
= __fragmentation_index(order
, &info
);
1696 seq_printf(m
, "%d.%03d ", index
/ 1000, index
% 1000);
1703 * Display fragmentation index for orders that allocations would fail for
1705 static int extfrag_show(struct seq_file
*m
, void *arg
)
1707 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
1709 walk_zones_in_node(m
, pgdat
, extfrag_show_print
);
1714 static const struct seq_operations extfrag_op
= {
1715 .start
= frag_start
,
1718 .show
= extfrag_show
,
1721 static int extfrag_open(struct inode
*inode
, struct file
*file
)
1723 return seq_open(file
, &extfrag_op
);
1726 static const struct file_operations extfrag_file_ops
= {
1727 .open
= extfrag_open
,
1729 .llseek
= seq_lseek
,
1730 .release
= seq_release
,
1733 static int __init
extfrag_debug_init(void)
1735 struct dentry
*extfrag_debug_root
;
1737 extfrag_debug_root
= debugfs_create_dir("extfrag", NULL
);
1738 if (!extfrag_debug_root
)
1741 if (!debugfs_create_file("unusable_index", 0444,
1742 extfrag_debug_root
, NULL
, &unusable_file_ops
))
1745 if (!debugfs_create_file("extfrag_index", 0444,
1746 extfrag_debug_root
, NULL
, &extfrag_file_ops
))
1751 debugfs_remove_recursive(extfrag_debug_root
);
1755 module_init(extfrag_debug_init
);