1 // SPDX-License-Identifier: GPL-2.0
5 * mm/ specific debug routines.
9 #include <linux/kernel.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
19 #include <trace/events/migrate.h>
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
30 const char *migrate_reason_names
[MR_TYPES
] = {
34 const struct trace_print_flags pageflag_names
[] = {
39 const struct trace_print_flags gfpflag_names
[] = {
44 const struct trace_print_flags vmaflag_names
[] = {
49 #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
51 static const char *page_type_names
[] = {
52 DEF_PAGETYPE_NAME(slab
),
53 DEF_PAGETYPE_NAME(hugetlb
),
54 DEF_PAGETYPE_NAME(offline
),
55 DEF_PAGETYPE_NAME(guard
),
56 DEF_PAGETYPE_NAME(table
),
57 DEF_PAGETYPE_NAME(buddy
),
58 DEF_PAGETYPE_NAME(unaccepted
),
61 static const char *page_type_name(unsigned int page_type
)
63 unsigned i
= (page_type
>> 24) - 0xf0;
65 if (i
>= ARRAY_SIZE(page_type_names
))
67 return page_type_names
[i
];
70 static void __dump_folio(struct folio
*folio
, struct page
*page
,
71 unsigned long pfn
, unsigned long idx
)
73 struct address_space
*mapping
= folio_mapping(folio
);
74 int mapcount
= atomic_read(&page
->_mapcount
);
77 mapcount
= page_mapcount_is_type(mapcount
) ? 0 : mapcount
+ 1;
78 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
79 folio_ref_count(folio
), mapcount
, mapping
,
80 folio
->index
+ idx
, pfn
);
81 if (folio_test_large(folio
)) {
82 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
84 folio_mapcount(folio
),
85 folio_entire_mapcount(folio
),
86 folio_nr_pages_mapped(folio
),
87 atomic_read(&folio
->_pincount
));
91 if (folio
->memcg_data
)
92 pr_warn("memcg:%lx\n", folio
->memcg_data
);
94 if (folio_test_ksm(folio
))
96 else if (folio_test_anon(folio
))
99 dump_mapping(mapping
);
100 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names
) != __NR_PAGEFLAGS
+ 1);
103 * Accessing the pageblock without the zone lock. It could change to
104 * "isolate" again in the meantime, but since we are just dumping the
105 * state for debugging, it should be fine to accept a bit of
106 * inaccuracy here due to racing.
108 pr_warn("%sflags: %pGp%s\n", type
, &folio
->flags
,
109 is_migrate_cma_folio(folio
, pfn
) ? " CMA" : "");
110 if (page_has_type(&folio
->page
))
111 pr_warn("page_type: %x(%s)\n", folio
->page
.page_type
>> 24,
112 page_type_name(folio
->page
.page_type
));
114 print_hex_dump(KERN_WARNING
, "raw: ", DUMP_PREFIX_NONE
, 32,
115 sizeof(unsigned long), page
,
116 sizeof(struct page
), false);
117 if (folio_test_large(folio
))
118 print_hex_dump(KERN_WARNING
, "head: ", DUMP_PREFIX_NONE
, 32,
119 sizeof(unsigned long), folio
,
120 2 * sizeof(struct page
), false);
123 static void __dump_page(const struct page
*page
)
125 struct folio
*foliop
, folio
;
127 unsigned long pfn
= page_to_pfn(page
);
128 unsigned long idx
, nr_pages
= 1;
132 memcpy(&precise
, page
, sizeof(*page
));
133 foliop
= page_folio(&precise
);
134 if (foliop
== (struct folio
*)&precise
) {
136 if (!folio_test_large(foliop
))
138 foliop
= (struct folio
*)page
;
140 idx
= folio_page_idx(foliop
, page
);
143 if (idx
< MAX_FOLIO_NR_PAGES
) {
144 memcpy(&folio
, foliop
, 2 * sizeof(struct page
));
145 nr_pages
= folio_nr_pages(&folio
);
149 if (idx
> nr_pages
) {
152 pr_warn("page does not match folio\n");
153 precise
.compound_head
&= ~1UL;
154 foliop
= (struct folio
*)&precise
;
159 __dump_folio(foliop
, &precise
, pfn
, idx
);
162 void dump_page(const struct page
*page
, const char *reason
)
164 if (PagePoisoned(page
))
165 pr_warn("page:%p is uninitialized and poisoned", page
);
169 pr_warn("page dumped because: %s\n", reason
);
170 dump_page_owner(page
);
172 EXPORT_SYMBOL(dump_page
);
174 #ifdef CONFIG_DEBUG_VM
176 void dump_vma(const struct vm_area_struct
*vma
)
178 pr_emerg("vma %px start %px end %px mm %px\n"
179 "prot %lx anon_vma %px vm_ops %px\n"
180 "pgoff %lx file %px private_data %px\n"
181 "flags: %#lx(%pGv)\n",
182 vma
, (void *)vma
->vm_start
, (void *)vma
->vm_end
, vma
->vm_mm
,
183 (unsigned long)pgprot_val(vma
->vm_page_prot
),
184 vma
->anon_vma
, vma
->vm_ops
, vma
->vm_pgoff
,
185 vma
->vm_file
, vma
->vm_private_data
,
186 vma
->vm_flags
, &vma
->vm_flags
);
188 EXPORT_SYMBOL(dump_vma
);
190 void dump_mm(const struct mm_struct
*mm
)
192 pr_emerg("mm %px task_size %lu\n"
193 "mmap_base %lu mmap_legacy_base %lu\n"
194 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
195 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
196 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
197 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
198 "start_brk %lx brk %lx start_stack %lx\n"
199 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
200 "binfmt %px flags %lx\n"
208 #ifdef CONFIG_MMU_NOTIFIER
209 "notifier_subscriptions %px\n"
211 #ifdef CONFIG_NUMA_BALANCING
212 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
214 "tlb_flush_pending %d\n"
215 "def_flags: %#lx(%pGv)\n",
218 mm
->mmap_base
, mm
->mmap_legacy_base
,
219 mm
->pgd
, atomic_read(&mm
->mm_users
),
220 atomic_read(&mm
->mm_count
),
221 mm_pgtables_bytes(mm
),
223 mm
->hiwater_rss
, mm
->hiwater_vm
, mm
->total_vm
, mm
->locked_vm
,
224 (u64
)atomic64_read(&mm
->pinned_vm
),
225 mm
->data_vm
, mm
->exec_vm
, mm
->stack_vm
,
226 mm
->start_code
, mm
->end_code
, mm
->start_data
, mm
->end_data
,
227 mm
->start_brk
, mm
->brk
, mm
->start_stack
,
228 mm
->arg_start
, mm
->arg_end
, mm
->env_start
, mm
->env_end
,
229 mm
->binfmt
, mm
->flags
,
237 #ifdef CONFIG_MMU_NOTIFIER
238 mm
->notifier_subscriptions
,
240 #ifdef CONFIG_NUMA_BALANCING
241 mm
->numa_next_scan
, mm
->numa_scan_offset
, mm
->numa_scan_seq
,
243 atomic_read(&mm
->tlb_flush_pending
),
244 mm
->def_flags
, &mm
->def_flags
247 EXPORT_SYMBOL(dump_mm
);
249 static bool page_init_poisoning __read_mostly
= true;
251 static int __init
setup_vm_debug(char *str
)
253 bool __page_init_poisoning
= true;
256 * Calling vm_debug with no arguments is equivalent to requesting
257 * to enable all debugging options we can control.
259 if (*str
++ != '=' || !*str
)
262 __page_init_poisoning
= false;
267 switch (tolower(*str
)) {
269 __page_init_poisoning
= true;
272 pr_err("vm_debug option '%c' unknown. skipped\n",
279 if (page_init_poisoning
&& !__page_init_poisoning
)
280 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
282 page_init_poisoning
= __page_init_poisoning
;
286 __setup("vm_debug", setup_vm_debug
);
288 void page_init_poison(struct page
*page
, size_t size
)
290 if (page_init_poisoning
)
291 memset(page
, PAGE_POISON_PATTERN
, size
);
294 void vma_iter_dump_tree(const struct vma_iterator
*vmi
)
296 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
298 mt_dump(vmi
->mas
.tree
, mt_dump_hex
);
299 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
302 #endif /* CONFIG_DEBUG_VM */