Linux 6.14-rc1
[linux-stable.git] / mm / debug.c
blob8d2acf432385e1f87f57c0825f7c0a368d44a14b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/debug.c
5 * mm/ specific debug routines.
7 */
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
18 #include "internal.h"
19 #include <trace/events/migrate.h>
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
25 #undef EM
26 #undef EMe
27 #define EM(a, b) b,
28 #define EMe(a, b) b
30 const char *migrate_reason_names[MR_TYPES] = {
31 MIGRATE_REASON
34 const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
39 const struct trace_print_flags gfpflag_names[] = {
40 __def_gfpflag_names,
41 {0, NULL}
44 const struct trace_print_flags vmaflag_names[] = {
45 __def_vmaflag_names,
46 {0, NULL}
49 #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
51 static const char *page_type_names[] = {
52 DEF_PAGETYPE_NAME(slab),
53 DEF_PAGETYPE_NAME(hugetlb),
54 DEF_PAGETYPE_NAME(offline),
55 DEF_PAGETYPE_NAME(guard),
56 DEF_PAGETYPE_NAME(table),
57 DEF_PAGETYPE_NAME(buddy),
58 DEF_PAGETYPE_NAME(unaccepted),
61 static const char *page_type_name(unsigned int page_type)
63 unsigned i = (page_type >> 24) - 0xf0;
65 if (i >= ARRAY_SIZE(page_type_names))
66 return "unknown";
67 return page_type_names[i];
70 static void __dump_folio(struct folio *folio, struct page *page,
71 unsigned long pfn, unsigned long idx)
73 struct address_space *mapping = folio_mapping(folio);
74 int mapcount = atomic_read(&page->_mapcount);
75 char *type = "";
77 mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
78 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
79 folio_ref_count(folio), mapcount, mapping,
80 folio->index + idx, pfn);
81 if (folio_test_large(folio)) {
82 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
83 folio_order(folio),
84 folio_mapcount(folio),
85 folio_entire_mapcount(folio),
86 folio_nr_pages_mapped(folio),
87 atomic_read(&folio->_pincount));
90 #ifdef CONFIG_MEMCG
91 if (folio->memcg_data)
92 pr_warn("memcg:%lx\n", folio->memcg_data);
93 #endif
94 if (folio_test_ksm(folio))
95 type = "ksm ";
96 else if (folio_test_anon(folio))
97 type = "anon ";
98 else if (mapping)
99 dump_mapping(mapping);
100 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
103 * Accessing the pageblock without the zone lock. It could change to
104 * "isolate" again in the meantime, but since we are just dumping the
105 * state for debugging, it should be fine to accept a bit of
106 * inaccuracy here due to racing.
108 pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
109 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
110 if (page_has_type(&folio->page))
111 pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
112 page_type_name(folio->page.page_type));
114 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
115 sizeof(unsigned long), page,
116 sizeof(struct page), false);
117 if (folio_test_large(folio))
118 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
119 sizeof(unsigned long), folio,
120 2 * sizeof(struct page), false);
123 static void __dump_page(const struct page *page)
125 struct folio *foliop, folio;
126 struct page precise;
127 unsigned long head;
128 unsigned long pfn = page_to_pfn(page);
129 unsigned long idx, nr_pages = 1;
130 int loops = 5;
132 again:
133 memcpy(&precise, page, sizeof(*page));
134 head = precise.compound_head;
135 if ((head & 1) == 0) {
136 foliop = (struct folio *)&precise;
137 idx = 0;
138 if (!folio_test_large(foliop))
139 goto dump;
140 foliop = (struct folio *)page;
141 } else {
142 foliop = (struct folio *)(head - 1);
143 idx = folio_page_idx(foliop, page);
146 if (idx < MAX_FOLIO_NR_PAGES) {
147 memcpy(&folio, foliop, 2 * sizeof(struct page));
148 nr_pages = folio_nr_pages(&folio);
149 foliop = &folio;
152 if (idx > nr_pages) {
153 if (loops-- > 0)
154 goto again;
155 pr_warn("page does not match folio\n");
156 precise.compound_head &= ~1UL;
157 foliop = (struct folio *)&precise;
158 idx = 0;
161 dump:
162 __dump_folio(foliop, &precise, pfn, idx);
165 void dump_page(const struct page *page, const char *reason)
167 if (PagePoisoned(page))
168 pr_warn("page:%p is uninitialized and poisoned", page);
169 else
170 __dump_page(page);
171 if (reason)
172 pr_warn("page dumped because: %s\n", reason);
173 dump_page_owner(page);
175 EXPORT_SYMBOL(dump_page);
177 #ifdef CONFIG_DEBUG_VM
179 void dump_vma(const struct vm_area_struct *vma)
181 pr_emerg("vma %px start %px end %px mm %px\n"
182 "prot %lx anon_vma %px vm_ops %px\n"
183 "pgoff %lx file %px private_data %px\n"
184 "flags: %#lx(%pGv)\n",
185 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
186 (unsigned long)pgprot_val(vma->vm_page_prot),
187 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
188 vma->vm_file, vma->vm_private_data,
189 vma->vm_flags, &vma->vm_flags);
191 EXPORT_SYMBOL(dump_vma);
193 void dump_mm(const struct mm_struct *mm)
195 pr_emerg("mm %px task_size %lu\n"
196 "mmap_base %lu mmap_legacy_base %lu\n"
197 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
198 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
199 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
200 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
201 "start_brk %lx brk %lx start_stack %lx\n"
202 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
203 "binfmt %px flags %lx\n"
204 #ifdef CONFIG_AIO
205 "ioctx_table %px\n"
206 #endif
207 #ifdef CONFIG_MEMCG
208 "owner %px "
209 #endif
210 "exe_file %px\n"
211 #ifdef CONFIG_MMU_NOTIFIER
212 "notifier_subscriptions %px\n"
213 #endif
214 #ifdef CONFIG_NUMA_BALANCING
215 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
216 #endif
217 "tlb_flush_pending %d\n"
218 "def_flags: %#lx(%pGv)\n",
220 mm, mm->task_size,
221 mm->mmap_base, mm->mmap_legacy_base,
222 mm->pgd, atomic_read(&mm->mm_users),
223 atomic_read(&mm->mm_count),
224 mm_pgtables_bytes(mm),
225 mm->map_count,
226 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
227 (u64)atomic64_read(&mm->pinned_vm),
228 mm->data_vm, mm->exec_vm, mm->stack_vm,
229 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
230 mm->start_brk, mm->brk, mm->start_stack,
231 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
232 mm->binfmt, mm->flags,
233 #ifdef CONFIG_AIO
234 mm->ioctx_table,
235 #endif
236 #ifdef CONFIG_MEMCG
237 mm->owner,
238 #endif
239 mm->exe_file,
240 #ifdef CONFIG_MMU_NOTIFIER
241 mm->notifier_subscriptions,
242 #endif
243 #ifdef CONFIG_NUMA_BALANCING
244 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
245 #endif
246 atomic_read(&mm->tlb_flush_pending),
247 mm->def_flags, &mm->def_flags
250 EXPORT_SYMBOL(dump_mm);
252 void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
254 if (reason)
255 pr_warn("vmg %px dumped because: %s\n", vmg, reason);
257 if (!vmg) {
258 pr_warn("vmg %px state: (NULL)\n", vmg);
259 return;
262 pr_warn("vmg %px state: mm %px pgoff %lx\n"
263 "vmi %px [%lx,%lx)\n"
264 "prev %px next %px vma %px\n"
265 "start %lx end %lx flags %lx\n"
266 "file %px anon_vma %px policy %px\n"
267 "uffd_ctx %px\n"
268 "anon_name %px\n"
269 "merge_flags %x state %x\n",
270 vmg, vmg->mm, vmg->pgoff,
271 vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
272 vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
273 vmg->prev, vmg->next, vmg->vma,
274 vmg->start, vmg->end, vmg->flags,
275 vmg->file, vmg->anon_vma, vmg->policy,
276 #ifdef CONFIG_USERFAULTFD
277 vmg->uffd_ctx.ctx,
278 #else
279 (void *)0,
280 #endif
281 vmg->anon_name,
282 (int)vmg->merge_flags, (int)vmg->state);
284 if (vmg->mm) {
285 pr_warn("vmg %px mm:\n", vmg);
286 dump_mm(vmg->mm);
287 } else {
288 pr_warn("vmg %px mm: (NULL)\n", vmg);
291 if (vmg->vma) {
292 pr_warn("vmg %px vma:\n", vmg);
293 dump_vma(vmg->vma);
294 } else {
295 pr_warn("vmg %px vma: (NULL)\n", vmg);
298 if (vmg->prev) {
299 pr_warn("vmg %px prev:\n", vmg);
300 dump_vma(vmg->prev);
301 } else {
302 pr_warn("vmg %px prev: (NULL)\n", vmg);
305 if (vmg->next) {
306 pr_warn("vmg %px next:\n", vmg);
307 dump_vma(vmg->next);
308 } else {
309 pr_warn("vmg %px next: (NULL)\n", vmg);
312 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
313 if (vmg->vmi) {
314 pr_warn("vmg %px vmi:\n", vmg);
315 vma_iter_dump_tree(vmg->vmi);
316 } else {
317 pr_warn("vmg %px vmi: (NULL)\n", vmg);
319 #endif
321 EXPORT_SYMBOL(dump_vmg);
323 static bool page_init_poisoning __read_mostly = true;
325 static int __init setup_vm_debug(char *str)
327 bool __page_init_poisoning = true;
330 * Calling vm_debug with no arguments is equivalent to requesting
331 * to enable all debugging options we can control.
333 if (*str++ != '=' || !*str)
334 goto out;
336 __page_init_poisoning = false;
337 if (*str == '-')
338 goto out;
340 while (*str) {
341 switch (tolower(*str)) {
342 case'p':
343 __page_init_poisoning = true;
344 break;
345 default:
346 pr_err("vm_debug option '%c' unknown. skipped\n",
347 *str);
350 str++;
352 out:
353 if (page_init_poisoning && !__page_init_poisoning)
354 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
356 page_init_poisoning = __page_init_poisoning;
358 return 1;
360 __setup("vm_debug", setup_vm_debug);
362 void page_init_poison(struct page *page, size_t size)
364 if (page_init_poisoning)
365 memset(page, PAGE_POISON_PATTERN, size);
368 void vma_iter_dump_tree(const struct vma_iterator *vmi)
370 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
371 mas_dump(&vmi->mas);
372 mt_dump(vmi->mas.tree, mt_dump_hex);
373 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
376 #endif /* CONFIG_DEBUG_VM */