1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20 * to use off stack temporal storage
22 #define PAGE_OWNER_STACK_DEPTH (16)
26 short last_migrate_reason
;
28 depot_stack_handle_t handle
;
29 depot_stack_handle_t free_handle
;
32 char comm
[TASK_COMM_LEN
];
40 struct stack_record
*stack_record
;
43 static struct stack dummy_stack
;
44 static struct stack failure_stack
;
45 static struct stack
*stack_list
;
46 static DEFINE_SPINLOCK(stack_list_lock
);
48 static bool page_owner_enabled __initdata
;
49 DEFINE_STATIC_KEY_FALSE(page_owner_inited
);
51 static depot_stack_handle_t dummy_handle
;
52 static depot_stack_handle_t failure_handle
;
53 static depot_stack_handle_t early_handle
;
55 static void init_early_allocated_pages(void);
57 static inline void set_current_in_page_owner(void)
62 * We might need to allocate more memory from page_owner code, so make
63 * sure to signal it in order to avoid recursion.
65 current
->in_page_owner
= 1;
68 static inline void unset_current_in_page_owner(void)
70 current
->in_page_owner
= 0;
73 static int __init
early_page_owner_param(char *buf
)
75 int ret
= kstrtobool(buf
, &page_owner_enabled
);
77 if (page_owner_enabled
)
78 stack_depot_request_early_init();
82 early_param("page_owner", early_page_owner_param
);
84 static __init
bool need_page_owner(void)
86 return page_owner_enabled
;
89 static __always_inline depot_stack_handle_t
create_dummy_stack(void)
91 unsigned long entries
[4];
92 unsigned int nr_entries
;
94 nr_entries
= stack_trace_save(entries
, ARRAY_SIZE(entries
), 0);
95 return stack_depot_save(entries
, nr_entries
, GFP_KERNEL
);
98 static noinline
void register_dummy_stack(void)
100 dummy_handle
= create_dummy_stack();
103 static noinline
void register_failure_stack(void)
105 failure_handle
= create_dummy_stack();
108 static noinline
void register_early_stack(void)
110 early_handle
= create_dummy_stack();
113 static __init
void init_page_owner(void)
115 if (!page_owner_enabled
)
118 register_dummy_stack();
119 register_failure_stack();
120 register_early_stack();
121 init_early_allocated_pages();
122 /* Initialize dummy and failure stacks and link them to stack_list */
123 dummy_stack
.stack_record
= __stack_depot_get_stack_record(dummy_handle
);
124 failure_stack
.stack_record
= __stack_depot_get_stack_record(failure_handle
);
125 if (dummy_stack
.stack_record
)
126 refcount_set(&dummy_stack
.stack_record
->count
, 1);
127 if (failure_stack
.stack_record
)
128 refcount_set(&failure_stack
.stack_record
->count
, 1);
129 dummy_stack
.next
= &failure_stack
;
130 stack_list
= &dummy_stack
;
131 static_branch_enable(&page_owner_inited
);
134 struct page_ext_operations page_owner_ops
= {
135 .size
= sizeof(struct page_owner
),
136 .need
= need_page_owner
,
137 .init
= init_page_owner
,
138 .need_shared_flags
= true,
141 static inline struct page_owner
*get_page_owner(struct page_ext
*page_ext
)
143 return page_ext_data(page_ext
, &page_owner_ops
);
146 static noinline depot_stack_handle_t
save_stack(gfp_t flags
)
148 unsigned long entries
[PAGE_OWNER_STACK_DEPTH
];
149 depot_stack_handle_t handle
;
150 unsigned int nr_entries
;
152 if (current
->in_page_owner
)
155 set_current_in_page_owner();
156 nr_entries
= stack_trace_save(entries
, ARRAY_SIZE(entries
), 2);
157 handle
= stack_depot_save(entries
, nr_entries
, flags
);
159 handle
= failure_handle
;
160 unset_current_in_page_owner();
165 static void add_stack_record_to_list(struct stack_record
*stack_record
,
171 set_current_in_page_owner();
172 stack
= kmalloc(sizeof(*stack
), gfp_nested_mask(gfp_mask
));
174 unset_current_in_page_owner();
177 unset_current_in_page_owner();
179 stack
->stack_record
= stack_record
;
182 spin_lock_irqsave(&stack_list_lock
, flags
);
183 stack
->next
= stack_list
;
185 * This pairs with smp_load_acquire() from function
186 * stack_start(). This guarantees that stack_start()
187 * will see an updated stack_list before starting to
190 smp_store_release(&stack_list
, stack
);
191 spin_unlock_irqrestore(&stack_list_lock
, flags
);
194 static void inc_stack_record_count(depot_stack_handle_t handle
, gfp_t gfp_mask
,
197 struct stack_record
*stack_record
= __stack_depot_get_stack_record(handle
);
203 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
204 * with REFCOUNT_SATURATED to catch spurious increments of their
206 * Since we do not use STACK_DEPOT_FLAG_GET API, let us
207 * set a refcount of 1 ourselves.
209 if (refcount_read(&stack_record
->count
) == REFCOUNT_SATURATED
) {
210 int old
= REFCOUNT_SATURATED
;
212 if (atomic_try_cmpxchg_relaxed(&stack_record
->count
.refs
, &old
, 1))
213 /* Add the new stack_record to our list */
214 add_stack_record_to_list(stack_record
, gfp_mask
);
216 refcount_add(nr_base_pages
, &stack_record
->count
);
219 static void dec_stack_record_count(depot_stack_handle_t handle
,
222 struct stack_record
*stack_record
= __stack_depot_get_stack_record(handle
);
227 if (refcount_sub_and_test(nr_base_pages
, &stack_record
->count
))
228 pr_warn("%s: refcount went to 0 for %u handle\n", __func__
,
232 static inline void __update_page_owner_handle(struct page_ext
*page_ext
,
233 depot_stack_handle_t handle
,
234 unsigned short order
,
236 short last_migrate_reason
, u64 ts_nsec
,
237 pid_t pid
, pid_t tgid
, char *comm
)
240 struct page_owner
*page_owner
;
242 for (i
= 0; i
< (1 << order
); i
++) {
243 page_owner
= get_page_owner(page_ext
);
244 page_owner
->handle
= handle
;
245 page_owner
->order
= order
;
246 page_owner
->gfp_mask
= gfp_mask
;
247 page_owner
->last_migrate_reason
= last_migrate_reason
;
248 page_owner
->pid
= pid
;
249 page_owner
->tgid
= tgid
;
250 page_owner
->ts_nsec
= ts_nsec
;
251 strscpy(page_owner
->comm
, comm
,
252 sizeof(page_owner
->comm
));
253 __set_bit(PAGE_EXT_OWNER
, &page_ext
->flags
);
254 __set_bit(PAGE_EXT_OWNER_ALLOCATED
, &page_ext
->flags
);
255 page_ext
= page_ext_next(page_ext
);
259 static inline void __update_page_owner_free_handle(struct page_ext
*page_ext
,
260 depot_stack_handle_t handle
,
261 unsigned short order
,
262 pid_t pid
, pid_t tgid
,
266 struct page_owner
*page_owner
;
268 for (i
= 0; i
< (1 << order
); i
++) {
269 page_owner
= get_page_owner(page_ext
);
270 /* Only __reset_page_owner() wants to clear the bit */
272 __clear_bit(PAGE_EXT_OWNER_ALLOCATED
, &page_ext
->flags
);
273 page_owner
->free_handle
= handle
;
275 page_owner
->free_ts_nsec
= free_ts_nsec
;
276 page_owner
->free_pid
= current
->pid
;
277 page_owner
->free_tgid
= current
->tgid
;
278 page_ext
= page_ext_next(page_ext
);
282 void __reset_page_owner(struct page
*page
, unsigned short order
)
284 struct page_ext
*page_ext
;
285 depot_stack_handle_t handle
;
286 depot_stack_handle_t alloc_handle
;
287 struct page_owner
*page_owner
;
288 u64 free_ts_nsec
= local_clock();
290 page_ext
= page_ext_get(page
);
291 if (unlikely(!page_ext
))
294 page_owner
= get_page_owner(page_ext
);
295 alloc_handle
= page_owner
->handle
;
297 handle
= save_stack(GFP_NOWAIT
| __GFP_NOWARN
);
298 __update_page_owner_free_handle(page_ext
, handle
, order
, current
->pid
,
299 current
->tgid
, free_ts_nsec
);
300 page_ext_put(page_ext
);
302 if (alloc_handle
!= early_handle
)
304 * early_handle is being set as a handle for all those
305 * early allocated pages. See init_pages_in_zone().
306 * Since their refcount is not being incremented because
307 * the machinery is not ready yet, we cannot decrement
308 * their refcount either.
310 dec_stack_record_count(alloc_handle
, 1 << order
);
313 noinline
void __set_page_owner(struct page
*page
, unsigned short order
,
316 struct page_ext
*page_ext
;
317 u64 ts_nsec
= local_clock();
318 depot_stack_handle_t handle
;
320 handle
= save_stack(gfp_mask
);
322 page_ext
= page_ext_get(page
);
323 if (unlikely(!page_ext
))
325 __update_page_owner_handle(page_ext
, handle
, order
, gfp_mask
, -1,
326 ts_nsec
, current
->pid
, current
->tgid
,
328 page_ext_put(page_ext
);
329 inc_stack_record_count(handle
, gfp_mask
, 1 << order
);
332 void __set_page_owner_migrate_reason(struct page
*page
, int reason
)
334 struct page_ext
*page_ext
= page_ext_get(page
);
335 struct page_owner
*page_owner
;
337 if (unlikely(!page_ext
))
340 page_owner
= get_page_owner(page_ext
);
341 page_owner
->last_migrate_reason
= reason
;
342 page_ext_put(page_ext
);
345 void __split_page_owner(struct page
*page
, int old_order
, int new_order
)
348 struct page_ext
*page_ext
= page_ext_get(page
);
349 struct page_owner
*page_owner
;
351 if (unlikely(!page_ext
))
354 for (i
= 0; i
< (1 << old_order
); i
++) {
355 page_owner
= get_page_owner(page_ext
);
356 page_owner
->order
= new_order
;
357 page_ext
= page_ext_next(page_ext
);
359 page_ext_put(page_ext
);
362 void __folio_copy_owner(struct folio
*newfolio
, struct folio
*old
)
365 struct page_ext
*old_ext
;
366 struct page_ext
*new_ext
;
367 struct page_owner
*old_page_owner
;
368 struct page_owner
*new_page_owner
;
369 depot_stack_handle_t migrate_handle
;
371 old_ext
= page_ext_get(&old
->page
);
372 if (unlikely(!old_ext
))
375 new_ext
= page_ext_get(&newfolio
->page
);
376 if (unlikely(!new_ext
)) {
377 page_ext_put(old_ext
);
381 old_page_owner
= get_page_owner(old_ext
);
382 new_page_owner
= get_page_owner(new_ext
);
383 migrate_handle
= new_page_owner
->handle
;
384 __update_page_owner_handle(new_ext
, old_page_owner
->handle
,
385 old_page_owner
->order
, old_page_owner
->gfp_mask
,
386 old_page_owner
->last_migrate_reason
,
387 old_page_owner
->ts_nsec
, old_page_owner
->pid
,
388 old_page_owner
->tgid
, old_page_owner
->comm
);
390 * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio
391 * will be freed after migration. Keep them until then as they may be
394 __update_page_owner_free_handle(new_ext
, 0, old_page_owner
->order
,
395 old_page_owner
->free_pid
,
396 old_page_owner
->free_tgid
,
397 old_page_owner
->free_ts_nsec
);
399 * We linked the original stack to the new folio, we need to do the same
400 * for the new one and the old folio otherwise there will be an imbalance
401 * when subtracting those pages from the stack.
403 for (i
= 0; i
< (1 << new_page_owner
->order
); i
++) {
404 old_page_owner
->handle
= migrate_handle
;
405 old_ext
= page_ext_next(old_ext
);
406 old_page_owner
= get_page_owner(old_ext
);
409 page_ext_put(new_ext
);
410 page_ext_put(old_ext
);
413 void pagetypeinfo_showmixedcount_print(struct seq_file
*m
,
414 pg_data_t
*pgdat
, struct zone
*zone
)
417 struct page_ext
*page_ext
;
418 struct page_owner
*page_owner
;
419 unsigned long pfn
, block_end_pfn
;
420 unsigned long end_pfn
= zone_end_pfn(zone
);
421 unsigned long count
[MIGRATE_TYPES
] = { 0, };
422 int pageblock_mt
, page_mt
;
425 /* Scan block by block. First and last block may be incomplete */
426 pfn
= zone
->zone_start_pfn
;
429 * Walk the zone in pageblock_nr_pages steps. If a page block spans
430 * a zone boundary, it will be double counted between zones. This does
431 * not matter as the mixed block count will still be correct
433 for (; pfn
< end_pfn
; ) {
434 page
= pfn_to_online_page(pfn
);
436 pfn
= ALIGN(pfn
+ 1, MAX_ORDER_NR_PAGES
);
440 block_end_pfn
= pageblock_end_pfn(pfn
);
441 block_end_pfn
= min(block_end_pfn
, end_pfn
);
443 pageblock_mt
= get_pageblock_migratetype(page
);
445 for (; pfn
< block_end_pfn
; pfn
++) {
446 /* The pageblock is online, no need to recheck. */
447 page
= pfn_to_page(pfn
);
449 if (page_zone(page
) != zone
)
452 if (PageBuddy(page
)) {
453 unsigned long freepage_order
;
455 freepage_order
= buddy_order_unsafe(page
);
456 if (freepage_order
<= MAX_PAGE_ORDER
)
457 pfn
+= (1UL << freepage_order
) - 1;
461 if (PageReserved(page
))
464 page_ext
= page_ext_get(page
);
465 if (unlikely(!page_ext
))
468 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED
, &page_ext
->flags
))
469 goto ext_put_continue
;
471 page_owner
= get_page_owner(page_ext
);
472 page_mt
= gfp_migratetype(page_owner
->gfp_mask
);
473 if (pageblock_mt
!= page_mt
) {
474 if (is_migrate_cma(pageblock_mt
))
475 count
[MIGRATE_MOVABLE
]++;
477 count
[pageblock_mt
]++;
480 page_ext_put(page_ext
);
483 pfn
+= (1UL << page_owner
->order
) - 1;
485 page_ext_put(page_ext
);
490 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
491 for (i
= 0; i
< MIGRATE_TYPES
; i
++)
492 seq_printf(m
, "%12lu ", count
[i
]);
497 * Looking for memcg information and print it out
499 static inline int print_page_owner_memcg(char *kbuf
, size_t count
, int ret
,
503 unsigned long memcg_data
;
504 struct mem_cgroup
*memcg
;
509 memcg_data
= READ_ONCE(page
->memcg_data
);
513 if (memcg_data
& MEMCG_DATA_OBJEXTS
)
514 ret
+= scnprintf(kbuf
+ ret
, count
- ret
,
515 "Slab cache page\n");
517 memcg
= page_memcg_check(page
);
521 online
= (memcg
->css
.flags
& CSS_ONLINE
);
522 cgroup_name(memcg
->css
.cgroup
, name
, sizeof(name
));
523 ret
+= scnprintf(kbuf
+ ret
, count
- ret
,
524 "Charged %sto %smemcg %s\n",
525 PageMemcgKmem(page
) ? "(via objcg) " : "",
526 online
? "" : "offline ",
530 #endif /* CONFIG_MEMCG */
536 print_page_owner(char __user
*buf
, size_t count
, unsigned long pfn
,
537 struct page
*page
, struct page_owner
*page_owner
,
538 depot_stack_handle_t handle
)
540 int ret
, pageblock_mt
, page_mt
;
543 count
= min_t(size_t, count
, PAGE_SIZE
);
544 kbuf
= kmalloc(count
, GFP_KERNEL
);
548 ret
= scnprintf(kbuf
, count
,
549 "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
550 page_owner
->order
, page_owner
->gfp_mask
,
551 &page_owner
->gfp_mask
, page_owner
->pid
,
552 page_owner
->tgid
, page_owner
->comm
,
553 page_owner
->ts_nsec
);
555 /* Print information relevant to grouping pages by mobility */
556 pageblock_mt
= get_pageblock_migratetype(page
);
557 page_mt
= gfp_migratetype(page_owner
->gfp_mask
);
558 ret
+= scnprintf(kbuf
+ ret
, count
- ret
,
559 "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
561 migratetype_names
[page_mt
],
562 pfn
>> pageblock_order
,
563 migratetype_names
[pageblock_mt
],
566 ret
+= stack_depot_snprint(handle
, kbuf
+ ret
, count
- ret
, 0);
570 if (page_owner
->last_migrate_reason
!= -1) {
571 ret
+= scnprintf(kbuf
+ ret
, count
- ret
,
572 "Page has been migrated, last migrate reason: %s\n",
573 migrate_reason_names
[page_owner
->last_migrate_reason
]);
576 ret
= print_page_owner_memcg(kbuf
, count
, ret
, page
);
578 ret
+= snprintf(kbuf
+ ret
, count
- ret
, "\n");
582 if (copy_to_user(buf
, kbuf
, ret
))
593 void __dump_page_owner(const struct page
*page
)
595 struct page_ext
*page_ext
= page_ext_get((void *)page
);
596 struct page_owner
*page_owner
;
597 depot_stack_handle_t handle
;
601 if (unlikely(!page_ext
)) {
602 pr_alert("There is not page extension available.\n");
606 page_owner
= get_page_owner(page_ext
);
607 gfp_mask
= page_owner
->gfp_mask
;
608 mt
= gfp_migratetype(gfp_mask
);
610 if (!test_bit(PAGE_EXT_OWNER
, &page_ext
->flags
)) {
611 pr_alert("page_owner info is not present (never set?)\n");
612 page_ext_put(page_ext
);
616 if (test_bit(PAGE_EXT_OWNER_ALLOCATED
, &page_ext
->flags
))
617 pr_alert("page_owner tracks the page as allocated\n");
619 pr_alert("page_owner tracks the page as freed\n");
621 pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
622 page_owner
->order
, migratetype_names
[mt
], gfp_mask
, &gfp_mask
,
623 page_owner
->pid
, page_owner
->tgid
, page_owner
->comm
,
624 page_owner
->ts_nsec
, page_owner
->free_ts_nsec
);
626 handle
= READ_ONCE(page_owner
->handle
);
628 pr_alert("page_owner allocation stack trace missing\n");
630 stack_depot_print(handle
);
632 handle
= READ_ONCE(page_owner
->free_handle
);
634 pr_alert("page_owner free stack trace missing\n");
636 pr_alert("page last free pid %d tgid %d stack trace:\n",
637 page_owner
->free_pid
, page_owner
->free_tgid
);
638 stack_depot_print(handle
);
641 if (page_owner
->last_migrate_reason
!= -1)
642 pr_alert("page has been migrated, last migrate reason: %s\n",
643 migrate_reason_names
[page_owner
->last_migrate_reason
]);
644 page_ext_put(page_ext
);
648 read_page_owner(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
652 struct page_ext
*page_ext
;
653 struct page_owner
*page_owner
;
654 depot_stack_handle_t handle
;
656 if (!static_branch_unlikely(&page_owner_inited
))
664 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
665 while (!pfn_valid(pfn
) && (pfn
& (MAX_ORDER_NR_PAGES
- 1)) != 0)
668 /* Find an allocated page */
669 for (; pfn
< max_pfn
; pfn
++) {
671 * This temporary page_owner is required so
672 * that we can avoid the context switches while holding
673 * the rcu lock and copying the page owner information to
674 * user through copy_to_user() or GFP_KERNEL allocations.
676 struct page_owner page_owner_tmp
;
679 * If the new page is in a new MAX_ORDER_NR_PAGES area,
680 * validate the area as existing, skip it if not
682 if ((pfn
& (MAX_ORDER_NR_PAGES
- 1)) == 0 && !pfn_valid(pfn
)) {
683 pfn
+= MAX_ORDER_NR_PAGES
- 1;
687 page
= pfn_to_page(pfn
);
688 if (PageBuddy(page
)) {
689 unsigned long freepage_order
= buddy_order_unsafe(page
);
691 if (freepage_order
<= MAX_PAGE_ORDER
)
692 pfn
+= (1UL << freepage_order
) - 1;
696 page_ext
= page_ext_get(page
);
697 if (unlikely(!page_ext
))
701 * Some pages could be missed by concurrent allocation or free,
702 * because we don't hold the zone lock.
704 if (!test_bit(PAGE_EXT_OWNER
, &page_ext
->flags
))
705 goto ext_put_continue
;
708 * Although we do have the info about past allocation of free
709 * pages, it's not relevant for current memory usage.
711 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED
, &page_ext
->flags
))
712 goto ext_put_continue
;
714 page_owner
= get_page_owner(page_ext
);
717 * Don't print "tail" pages of high-order allocations as that
718 * would inflate the stats.
720 if (!IS_ALIGNED(pfn
, 1 << page_owner
->order
))
721 goto ext_put_continue
;
724 * Access to page_ext->handle isn't synchronous so we should
725 * be careful to access it.
727 handle
= READ_ONCE(page_owner
->handle
);
729 goto ext_put_continue
;
731 /* Record the next PFN to read in the file offset */
734 page_owner_tmp
= *page_owner
;
735 page_ext_put(page_ext
);
736 return print_page_owner(buf
, count
, pfn
, page
,
737 &page_owner_tmp
, handle
);
739 page_ext_put(page_ext
);
745 static loff_t
lseek_page_owner(struct file
*file
, loff_t offset
, int orig
)
749 file
->f_pos
= offset
;
752 file
->f_pos
+= offset
;
760 static void init_pages_in_zone(pg_data_t
*pgdat
, struct zone
*zone
)
762 unsigned long pfn
= zone
->zone_start_pfn
;
763 unsigned long end_pfn
= zone_end_pfn(zone
);
764 unsigned long count
= 0;
767 * Walk the zone in pageblock_nr_pages steps. If a page block spans
768 * a zone boundary, it will be double counted between zones. This does
769 * not matter as the mixed block count will still be correct
771 for (; pfn
< end_pfn
; ) {
772 unsigned long block_end_pfn
;
774 if (!pfn_valid(pfn
)) {
775 pfn
= ALIGN(pfn
+ 1, MAX_ORDER_NR_PAGES
);
779 block_end_pfn
= pageblock_end_pfn(pfn
);
780 block_end_pfn
= min(block_end_pfn
, end_pfn
);
782 for (; pfn
< block_end_pfn
; pfn
++) {
783 struct page
*page
= pfn_to_page(pfn
);
784 struct page_ext
*page_ext
;
786 if (page_zone(page
) != zone
)
790 * To avoid having to grab zone->lock, be a little
791 * careful when reading buddy page order. The only
792 * danger is that we skip too much and potentially miss
793 * some early allocated pages, which is better than
794 * heavy lock contention.
796 if (PageBuddy(page
)) {
797 unsigned long order
= buddy_order_unsafe(page
);
799 if (order
> 0 && order
<= MAX_PAGE_ORDER
)
800 pfn
+= (1UL << order
) - 1;
804 if (PageReserved(page
))
807 page_ext
= page_ext_get(page
);
808 if (unlikely(!page_ext
))
811 /* Maybe overlapping zone */
812 if (test_bit(PAGE_EXT_OWNER
, &page_ext
->flags
))
813 goto ext_put_continue
;
815 /* Found early allocated page */
816 __update_page_owner_handle(page_ext
, early_handle
, 0, 0,
817 -1, local_clock(), current
->pid
,
818 current
->tgid
, current
->comm
);
821 page_ext_put(page_ext
);
826 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
827 pgdat
->node_id
, zone
->name
, count
);
830 static void init_zones_in_node(pg_data_t
*pgdat
)
833 struct zone
*node_zones
= pgdat
->node_zones
;
835 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
836 if (!populated_zone(zone
))
839 init_pages_in_zone(pgdat
, zone
);
843 static void init_early_allocated_pages(void)
847 for_each_online_pgdat(pgdat
)
848 init_zones_in_node(pgdat
);
851 static const struct file_operations proc_page_owner_operations
= {
852 .read
= read_page_owner
,
853 .llseek
= lseek_page_owner
,
856 static void *stack_start(struct seq_file
*m
, loff_t
*ppos
)
865 * This pairs with smp_store_release() from function
866 * add_stack_record_to_list(), so we get a consistent
867 * value of stack_list.
869 stack
= smp_load_acquire(&stack_list
);
878 static void *stack_next(struct seq_file
*m
, void *v
, loff_t
*ppos
)
880 struct stack
*stack
= v
;
883 *ppos
= stack
? *ppos
+ 1 : -1UL;
889 static unsigned long page_owner_pages_threshold
;
891 static int stack_print(struct seq_file
*m
, void *v
)
893 int i
, nr_base_pages
;
894 struct stack
*stack
= v
;
895 unsigned long *entries
;
896 unsigned long nr_entries
;
897 struct stack_record
*stack_record
= stack
->stack_record
;
899 if (!stack
->stack_record
)
902 nr_entries
= stack_record
->size
;
903 entries
= stack_record
->entries
;
904 nr_base_pages
= refcount_read(&stack_record
->count
) - 1;
906 if (nr_base_pages
< 1 || nr_base_pages
< page_owner_pages_threshold
)
909 for (i
= 0; i
< nr_entries
; i
++)
910 seq_printf(m
, " %pS\n", (void *)entries
[i
]);
911 seq_printf(m
, "nr_base_pages: %d\n\n", nr_base_pages
);
916 static void stack_stop(struct seq_file
*m
, void *v
)
920 static const struct seq_operations page_owner_stack_op
= {
921 .start
= stack_start
,
927 static int page_owner_stack_open(struct inode
*inode
, struct file
*file
)
929 return seq_open_private(file
, &page_owner_stack_op
, 0);
932 static const struct file_operations page_owner_stack_operations
= {
933 .open
= page_owner_stack_open
,
936 .release
= seq_release
,
939 static int page_owner_threshold_get(void *data
, u64
*val
)
941 *val
= READ_ONCE(page_owner_pages_threshold
);
945 static int page_owner_threshold_set(void *data
, u64 val
)
947 WRITE_ONCE(page_owner_pages_threshold
, val
);
951 DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_threshold
, &page_owner_threshold_get
,
952 &page_owner_threshold_set
, "%llu");
955 static int __init
pageowner_init(void)
959 if (!static_branch_unlikely(&page_owner_inited
)) {
960 pr_info("page_owner is disabled\n");
964 debugfs_create_file("page_owner", 0400, NULL
, NULL
,
965 &proc_page_owner_operations
);
966 dir
= debugfs_create_dir("page_owner_stacks", NULL
);
967 debugfs_create_file("show_stacks", 0400, dir
, NULL
,
968 &page_owner_stack_operations
);
969 debugfs_create_file("count_threshold", 0600, dir
, NULL
,
970 &proc_page_owner_threshold
);
974 late_initcall(pageowner_init
)