1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic show_mem() implementation
5 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
8 #include <linux/blkdev.h>
10 #include <linux/cpuset.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
14 #include <linux/mmzone.h>
15 #include <linux/swap.h>
16 #include <linux/vmstat.h>
21 atomic_long_t _totalram_pages __read_mostly
;
22 EXPORT_SYMBOL(_totalram_pages
);
23 unsigned long totalreserve_pages __read_mostly
;
24 unsigned long totalcma_pages __read_mostly
;
26 static inline void show_node(struct zone
*zone
)
28 if (IS_ENABLED(CONFIG_NUMA
))
29 printk("Node %d ", zone_to_nid(zone
));
32 long si_mem_available(void)
35 unsigned long pagecache
;
36 unsigned long wmark_low
= 0;
37 unsigned long reclaimable
;
41 wmark_low
+= low_wmark_pages(zone
);
44 * Estimate the amount of memory available for userspace allocations,
45 * without causing swapping or OOM.
47 available
= global_zone_page_state(NR_FREE_PAGES
) - totalreserve_pages
;
50 * Not all the page cache can be freed, otherwise the system will
51 * start swapping or thrashing. Assume at least half of the page
52 * cache, or the low watermark worth of cache, needs to stay.
54 pagecache
= global_node_page_state(NR_ACTIVE_FILE
) +
55 global_node_page_state(NR_INACTIVE_FILE
);
56 pagecache
-= min(pagecache
/ 2, wmark_low
);
57 available
+= pagecache
;
60 * Part of the reclaimable slab and other kernel memory consists of
61 * items that are in use, and cannot be freed. Cap this estimate at the
64 reclaimable
= global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
) +
65 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
);
66 reclaimable
-= min(reclaimable
/ 2, wmark_low
);
67 available
+= reclaimable
;
73 EXPORT_SYMBOL_GPL(si_mem_available
);
75 void si_meminfo(struct sysinfo
*val
)
77 val
->totalram
= totalram_pages();
78 val
->sharedram
= global_node_page_state(NR_SHMEM
);
79 val
->freeram
= global_zone_page_state(NR_FREE_PAGES
);
80 val
->bufferram
= nr_blockdev_pages();
81 val
->totalhigh
= totalhigh_pages();
82 val
->freehigh
= nr_free_highpages();
83 val
->mem_unit
= PAGE_SIZE
;
86 EXPORT_SYMBOL(si_meminfo
);
89 void si_meminfo_node(struct sysinfo
*val
, int nid
)
91 int zone_type
; /* needs to be signed */
92 unsigned long managed_pages
= 0;
93 unsigned long managed_highpages
= 0;
94 unsigned long free_highpages
= 0;
95 pg_data_t
*pgdat
= NODE_DATA(nid
);
97 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++)
98 managed_pages
+= zone_managed_pages(&pgdat
->node_zones
[zone_type
]);
99 val
->totalram
= managed_pages
;
100 val
->sharedram
= node_page_state(pgdat
, NR_SHMEM
);
101 val
->freeram
= sum_zone_node_page_state(nid
, NR_FREE_PAGES
);
102 #ifdef CONFIG_HIGHMEM
103 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++) {
104 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
106 if (is_highmem(zone
)) {
107 managed_highpages
+= zone_managed_pages(zone
);
108 free_highpages
+= zone_page_state(zone
, NR_FREE_PAGES
);
111 val
->totalhigh
= managed_highpages
;
112 val
->freehigh
= free_highpages
;
114 val
->totalhigh
= managed_highpages
;
115 val
->freehigh
= free_highpages
;
117 val
->mem_unit
= PAGE_SIZE
;
122 * Determine whether the node should be displayed or not, depending on whether
123 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
125 static bool show_mem_node_skip(unsigned int flags
, int nid
, nodemask_t
*nodemask
)
127 if (!(flags
& SHOW_MEM_FILTER_NODES
))
131 * no node mask - aka implicit memory numa policy. Do not bother with
132 * the synchronization - read_mems_allowed_begin - because we do not
133 * have to be precise here.
136 nodemask
= &cpuset_current_mems_allowed
;
138 return !node_isset(nid
, *nodemask
);
141 static void show_migration_types(unsigned char type
)
143 static const char types
[MIGRATE_TYPES
] = {
144 [MIGRATE_UNMOVABLE
] = 'U',
145 [MIGRATE_MOVABLE
] = 'M',
146 [MIGRATE_RECLAIMABLE
] = 'E',
147 [MIGRATE_HIGHATOMIC
] = 'H',
151 #ifdef CONFIG_MEMORY_ISOLATION
152 [MIGRATE_ISOLATE
] = 'I',
155 char tmp
[MIGRATE_TYPES
+ 1];
159 for (i
= 0; i
< MIGRATE_TYPES
; i
++) {
165 printk(KERN_CONT
"(%s) ", tmp
);
168 static bool node_has_managed_zones(pg_data_t
*pgdat
, int max_zone_idx
)
171 for (zone_idx
= 0; zone_idx
<= max_zone_idx
; zone_idx
++)
172 if (zone_managed_pages(pgdat
->node_zones
+ zone_idx
))
178 * Show free area list (used inside shift_scroll-lock stuff)
179 * We also calculate the percentage fragmentation. We do this by counting the
180 * memory on each free list with the exception of the first item on the list.
183 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
186 static void show_free_areas(unsigned int filter
, nodemask_t
*nodemask
, int max_zone_idx
)
188 unsigned long free_pcp
= 0;
193 for_each_populated_zone(zone
) {
194 if (zone_idx(zone
) > max_zone_idx
)
196 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
199 for_each_online_cpu(cpu
)
200 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
203 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
204 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
205 " unevictable:%lu dirty:%lu writeback:%lu\n"
206 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
207 " mapped:%lu shmem:%lu pagetables:%lu\n"
208 " sec_pagetables:%lu bounce:%lu\n"
209 " kernel_misc_reclaimable:%lu\n"
210 " free:%lu free_pcp:%lu free_cma:%lu\n",
211 global_node_page_state(NR_ACTIVE_ANON
),
212 global_node_page_state(NR_INACTIVE_ANON
),
213 global_node_page_state(NR_ISOLATED_ANON
),
214 global_node_page_state(NR_ACTIVE_FILE
),
215 global_node_page_state(NR_INACTIVE_FILE
),
216 global_node_page_state(NR_ISOLATED_FILE
),
217 global_node_page_state(NR_UNEVICTABLE
),
218 global_node_page_state(NR_FILE_DIRTY
),
219 global_node_page_state(NR_WRITEBACK
),
220 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
),
221 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B
),
222 global_node_page_state(NR_FILE_MAPPED
),
223 global_node_page_state(NR_SHMEM
),
224 global_node_page_state(NR_PAGETABLE
),
225 global_node_page_state(NR_SECONDARY_PAGETABLE
),
226 global_zone_page_state(NR_BOUNCE
),
227 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
),
228 global_zone_page_state(NR_FREE_PAGES
),
230 global_zone_page_state(NR_FREE_CMA_PAGES
));
232 for_each_online_pgdat(pgdat
) {
233 if (show_mem_node_skip(filter
, pgdat
->node_id
, nodemask
))
235 if (!node_has_managed_zones(pgdat
, max_zone_idx
))
240 " inactive_anon:%lukB"
242 " inactive_file:%lukB"
244 " isolated(anon):%lukB"
245 " isolated(file):%lukB"
250 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
252 " shmem_pmdmapped:%lukB"
255 " writeback_tmp:%lukB"
256 " kernel_stack:%lukB"
257 #ifdef CONFIG_SHADOW_CALL_STACK
258 " shadow_call_stack:%lukB"
261 " sec_pagetables:%lukB"
262 " all_unreclaimable? %s"
265 K(node_page_state(pgdat
, NR_ACTIVE_ANON
)),
266 K(node_page_state(pgdat
, NR_INACTIVE_ANON
)),
267 K(node_page_state(pgdat
, NR_ACTIVE_FILE
)),
268 K(node_page_state(pgdat
, NR_INACTIVE_FILE
)),
269 K(node_page_state(pgdat
, NR_UNEVICTABLE
)),
270 K(node_page_state(pgdat
, NR_ISOLATED_ANON
)),
271 K(node_page_state(pgdat
, NR_ISOLATED_FILE
)),
272 K(node_page_state(pgdat
, NR_FILE_MAPPED
)),
273 K(node_page_state(pgdat
, NR_FILE_DIRTY
)),
274 K(node_page_state(pgdat
, NR_WRITEBACK
)),
275 K(node_page_state(pgdat
, NR_SHMEM
)),
276 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
277 K(node_page_state(pgdat
, NR_SHMEM_THPS
)),
278 K(node_page_state(pgdat
, NR_SHMEM_PMDMAPPED
)),
279 K(node_page_state(pgdat
, NR_ANON_THPS
)),
281 K(node_page_state(pgdat
, NR_WRITEBACK_TEMP
)),
282 node_page_state(pgdat
, NR_KERNEL_STACK_KB
),
283 #ifdef CONFIG_SHADOW_CALL_STACK
284 node_page_state(pgdat
, NR_KERNEL_SCS_KB
),
286 K(node_page_state(pgdat
, NR_PAGETABLE
)),
287 K(node_page_state(pgdat
, NR_SECONDARY_PAGETABLE
)),
288 str_yes_no(pgdat
->kswapd_failures
>= MAX_RECLAIM_RETRIES
));
291 for_each_populated_zone(zone
) {
294 if (zone_idx(zone
) > max_zone_idx
)
296 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
300 for_each_online_cpu(cpu
)
301 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
311 " reserved_highatomic:%luKB"
313 " inactive_anon:%lukB"
315 " inactive_file:%lukB"
317 " writepending:%lukB"
327 K(zone_page_state(zone
, NR_FREE_PAGES
)),
328 K(zone
->watermark_boost
),
329 K(min_wmark_pages(zone
)),
330 K(low_wmark_pages(zone
)),
331 K(high_wmark_pages(zone
)),
332 K(zone
->nr_reserved_highatomic
),
333 K(zone_page_state(zone
, NR_ZONE_ACTIVE_ANON
)),
334 K(zone_page_state(zone
, NR_ZONE_INACTIVE_ANON
)),
335 K(zone_page_state(zone
, NR_ZONE_ACTIVE_FILE
)),
336 K(zone_page_state(zone
, NR_ZONE_INACTIVE_FILE
)),
337 K(zone_page_state(zone
, NR_ZONE_UNEVICTABLE
)),
338 K(zone_page_state(zone
, NR_ZONE_WRITE_PENDING
)),
339 K(zone
->present_pages
),
340 K(zone_managed_pages(zone
)),
341 K(zone_page_state(zone
, NR_MLOCK
)),
342 K(zone_page_state(zone
, NR_BOUNCE
)),
344 K(this_cpu_read(zone
->per_cpu_pageset
->count
)),
345 K(zone_page_state(zone
, NR_FREE_CMA_PAGES
)));
346 printk("lowmem_reserve[]:");
347 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
348 printk(KERN_CONT
" %ld", zone
->lowmem_reserve
[i
]);
349 printk(KERN_CONT
"\n");
352 for_each_populated_zone(zone
) {
354 unsigned long nr
[NR_PAGE_ORDERS
], flags
, total
= 0;
355 unsigned char types
[NR_PAGE_ORDERS
];
357 if (zone_idx(zone
) > max_zone_idx
)
359 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
362 printk(KERN_CONT
"%s: ", zone
->name
);
364 spin_lock_irqsave(&zone
->lock
, flags
);
365 for (order
= 0; order
< NR_PAGE_ORDERS
; order
++) {
366 struct free_area
*area
= &zone
->free_area
[order
];
369 nr
[order
] = area
->nr_free
;
370 total
+= nr
[order
] << order
;
373 for (type
= 0; type
< MIGRATE_TYPES
; type
++) {
374 if (!free_area_empty(area
, type
))
375 types
[order
] |= 1 << type
;
378 spin_unlock_irqrestore(&zone
->lock
, flags
);
379 for (order
= 0; order
< NR_PAGE_ORDERS
; order
++) {
380 printk(KERN_CONT
"%lu*%lukB ",
381 nr
[order
], K(1UL) << order
);
383 show_migration_types(types
[order
]);
385 printk(KERN_CONT
"= %lukB\n", K(total
));
388 for_each_online_node(nid
) {
389 if (show_mem_node_skip(filter
, nid
, nodemask
))
391 hugetlb_show_meminfo_node(nid
);
394 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES
));
396 show_swap_cache_info();
399 void __show_mem(unsigned int filter
, nodemask_t
*nodemask
, int max_zone_idx
)
401 unsigned long total
= 0, reserved
= 0, highmem
= 0;
404 printk("Mem-Info:\n");
405 show_free_areas(filter
, nodemask
, max_zone_idx
);
407 for_each_populated_zone(zone
) {
409 total
+= zone
->present_pages
;
410 reserved
+= zone
->present_pages
- zone_managed_pages(zone
);
412 if (is_highmem(zone
))
413 highmem
+= zone
->present_pages
;
416 printk("%lu pages RAM\n", total
);
417 printk("%lu pages HighMem/MovableOnly\n", highmem
);
418 printk("%lu pages reserved\n", reserved
);
420 printk("%lu pages cma reserved\n", totalcma_pages
);
422 #ifdef CONFIG_MEMORY_FAILURE
423 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages
));
425 #ifdef CONFIG_MEM_ALLOC_PROFILING
427 struct codetag_bytes tags
[10];
430 nr
= alloc_tag_top_users(tags
, ARRAY_SIZE(tags
), false);
432 pr_notice("Memory allocations:\n");
433 for (i
= 0; i
< nr
; i
++) {
434 struct codetag
*ct
= tags
[i
].ct
;
435 struct alloc_tag
*tag
= ct_to_alloc_tag(ct
);
436 struct alloc_tag_counters counter
= alloc_tag_read(tag
);
439 string_get_size(counter
.bytes
, 1, STRING_UNITS_2
, bytes
, sizeof(bytes
));
441 /* Same as alloc_tag_to_text() but w/o intermediate buffer */
443 pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
444 bytes
, counter
.calls
, ct
->filename
,
445 ct
->lineno
, ct
->modname
, ct
->function
);
447 pr_notice("%12s %8llu %s:%u func:%s\n",
448 bytes
, counter
.calls
, ct
->filename
,
449 ct
->lineno
, ct
->function
);