1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/vmacache.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/seq_file.h>
8 #include <linux/highmem.h>
9 #include <linux/ptrace.h>
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempolicy.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/sched/mm.h>
16 #include <linux/swapops.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/page_idle.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/uaccess.h>
21 #include <linux/pkeys.h>
25 #include <asm/tlbflush.h>
28 #define SEQ_PUT_DEC(str, val) \
29 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
30 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
32 unsigned long text
, lib
, swap
, anon
, file
, shmem
;
33 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
35 anon
= get_mm_counter(mm
, MM_ANONPAGES
);
36 file
= get_mm_counter(mm
, MM_FILEPAGES
);
37 shmem
= get_mm_counter(mm
, MM_SHMEMPAGES
);
40 * Note: to minimize their overhead, mm maintains hiwater_vm and
41 * hiwater_rss only when about to *lower* total_vm or rss. Any
42 * collector of these hiwater stats must therefore get total_vm
43 * and rss too, which will usually be the higher. Barriers? not
44 * worth the effort, such snapshots can always be inconsistent.
46 hiwater_vm
= total_vm
= mm
->total_vm
;
47 if (hiwater_vm
< mm
->hiwater_vm
)
48 hiwater_vm
= mm
->hiwater_vm
;
49 hiwater_rss
= total_rss
= anon
+ file
+ shmem
;
50 if (hiwater_rss
< mm
->hiwater_rss
)
51 hiwater_rss
= mm
->hiwater_rss
;
53 /* split executable areas between text and lib */
54 text
= PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
);
55 text
= min(text
, mm
->exec_vm
<< PAGE_SHIFT
);
56 lib
= (mm
->exec_vm
<< PAGE_SHIFT
) - text
;
58 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
59 SEQ_PUT_DEC("VmPeak:\t", hiwater_vm
);
60 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm
);
61 SEQ_PUT_DEC(" kB\nVmLck:\t", mm
->locked_vm
);
62 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm
->pinned_vm
));
63 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss
);
64 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss
);
65 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon
);
66 SEQ_PUT_DEC(" kB\nRssFile:\t", file
);
67 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem
);
68 SEQ_PUT_DEC(" kB\nVmData:\t", mm
->data_vm
);
69 SEQ_PUT_DEC(" kB\nVmStk:\t", mm
->stack_vm
);
70 seq_put_decimal_ull_width(m
,
71 " kB\nVmExe:\t", text
>> 10, 8);
72 seq_put_decimal_ull_width(m
,
73 " kB\nVmLib:\t", lib
>> 10, 8);
74 seq_put_decimal_ull_width(m
,
75 " kB\nVmPTE:\t", mm_pgtables_bytes(mm
) >> 10, 8);
76 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap
);
78 hugetlb_report_usage(m
, mm
);
82 unsigned long task_vsize(struct mm_struct
*mm
)
84 return PAGE_SIZE
* mm
->total_vm
;
87 unsigned long task_statm(struct mm_struct
*mm
,
88 unsigned long *shared
, unsigned long *text
,
89 unsigned long *data
, unsigned long *resident
)
91 *shared
= get_mm_counter(mm
, MM_FILEPAGES
) +
92 get_mm_counter(mm
, MM_SHMEMPAGES
);
93 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
95 *data
= mm
->data_vm
+ mm
->stack_vm
;
96 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
102 * Save get_task_policy() for show_numa_map().
104 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
106 struct task_struct
*task
= priv
->task
;
109 priv
->task_mempolicy
= get_task_policy(task
);
110 mpol_get(priv
->task_mempolicy
);
113 static void release_task_mempolicy(struct proc_maps_private
*priv
)
115 mpol_put(priv
->task_mempolicy
);
118 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
121 static void release_task_mempolicy(struct proc_maps_private
*priv
)
126 static void *m_start(struct seq_file
*m
, loff_t
*ppos
)
128 struct proc_maps_private
*priv
= m
->private;
129 unsigned long last_addr
= *ppos
;
130 struct mm_struct
*mm
;
131 struct vm_area_struct
*vma
;
133 /* See m_next(). Zero at the start or after lseek. */
134 if (last_addr
== -1UL)
137 priv
->task
= get_proc_task(priv
->inode
);
139 return ERR_PTR(-ESRCH
);
142 if (!mm
|| !mmget_not_zero(mm
)) {
143 put_task_struct(priv
->task
);
148 if (down_read_killable(&mm
->mmap_sem
)) {
150 put_task_struct(priv
->task
);
152 return ERR_PTR(-EINTR
);
155 hold_task_mempolicy(priv
);
156 priv
->tail_vma
= get_gate_vma(mm
);
158 vma
= find_vma(mm
, last_addr
);
162 return priv
->tail_vma
;
165 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*ppos
)
167 struct proc_maps_private
*priv
= m
->private;
168 struct vm_area_struct
*next
, *vma
= v
;
170 if (vma
== priv
->tail_vma
)
172 else if (vma
->vm_next
)
175 next
= priv
->tail_vma
;
177 *ppos
= next
? next
->vm_start
: -1UL;
182 static void m_stop(struct seq_file
*m
, void *v
)
184 struct proc_maps_private
*priv
= m
->private;
185 struct mm_struct
*mm
= priv
->mm
;
190 release_task_mempolicy(priv
);
191 up_read(&mm
->mmap_sem
);
193 put_task_struct(priv
->task
);
197 static int proc_maps_open(struct inode
*inode
, struct file
*file
,
198 const struct seq_operations
*ops
, int psize
)
200 struct proc_maps_private
*priv
= __seq_open_private(file
, ops
, psize
);
206 priv
->mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
207 if (IS_ERR(priv
->mm
)) {
208 int err
= PTR_ERR(priv
->mm
);
210 seq_release_private(inode
, file
);
217 static int proc_map_release(struct inode
*inode
, struct file
*file
)
219 struct seq_file
*seq
= file
->private_data
;
220 struct proc_maps_private
*priv
= seq
->private;
225 return seq_release_private(inode
, file
);
228 static int do_maps_open(struct inode
*inode
, struct file
*file
,
229 const struct seq_operations
*ops
)
231 return proc_maps_open(inode
, file
, ops
,
232 sizeof(struct proc_maps_private
));
236 * Indicate if the VMA is a stack for the given task; for
237 * /proc/PID/maps that is the stack of the main task.
239 static int is_stack(struct vm_area_struct
*vma
)
242 * We make no effort to guess what a given thread considers to be
243 * its "stack". It's not even well-defined for programs written
246 return vma
->vm_start
<= vma
->vm_mm
->start_stack
&&
247 vma
->vm_end
>= vma
->vm_mm
->start_stack
;
250 static void show_vma_header_prefix(struct seq_file
*m
,
251 unsigned long start
, unsigned long end
,
252 vm_flags_t flags
, unsigned long long pgoff
,
253 dev_t dev
, unsigned long ino
)
255 seq_setwidth(m
, 25 + sizeof(void *) * 6 - 1);
256 seq_put_hex_ll(m
, NULL
, start
, 8);
257 seq_put_hex_ll(m
, "-", end
, 8);
259 seq_putc(m
, flags
& VM_READ
? 'r' : '-');
260 seq_putc(m
, flags
& VM_WRITE
? 'w' : '-');
261 seq_putc(m
, flags
& VM_EXEC
? 'x' : '-');
262 seq_putc(m
, flags
& VM_MAYSHARE
? 's' : 'p');
263 seq_put_hex_ll(m
, " ", pgoff
, 8);
264 seq_put_hex_ll(m
, " ", MAJOR(dev
), 2);
265 seq_put_hex_ll(m
, ":", MINOR(dev
), 2);
266 seq_put_decimal_ull(m
, " ", ino
);
271 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
273 struct mm_struct
*mm
= vma
->vm_mm
;
274 struct file
*file
= vma
->vm_file
;
275 vm_flags_t flags
= vma
->vm_flags
;
276 unsigned long ino
= 0;
277 unsigned long long pgoff
= 0;
278 unsigned long start
, end
;
280 const char *name
= NULL
;
283 struct inode
*inode
= file_inode(vma
->vm_file
);
284 dev
= inode
->i_sb
->s_dev
;
286 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
289 start
= vma
->vm_start
;
291 show_vma_header_prefix(m
, start
, end
, flags
, pgoff
, dev
, ino
);
294 * Print the dentry name for named mappings, and a
295 * special [heap] marker for the heap:
299 seq_file_path(m
, file
, "\n");
303 if (vma
->vm_ops
&& vma
->vm_ops
->name
) {
304 name
= vma
->vm_ops
->name(vma
);
309 name
= arch_vma_name(vma
);
316 if (vma
->vm_start
<= mm
->brk
&&
317 vma
->vm_end
>= mm
->start_brk
) {
334 static int show_map(struct seq_file
*m
, void *v
)
340 static const struct seq_operations proc_pid_maps_op
= {
347 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
349 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
352 const struct file_operations proc_pid_maps_operations
= {
353 .open
= pid_maps_open
,
356 .release
= proc_map_release
,
360 * Proportional Set Size(PSS): my share of RSS.
362 * PSS of a process is the count of pages it has in memory, where each
363 * page is divided by the number of processes sharing it. So if a
364 * process has 1000 pages all to itself, and 1000 shared with one other
365 * process, its PSS will be 1500.
367 * To keep (accumulated) division errors low, we adopt a 64bit
368 * fixed-point pss counter to minimize division errors. So (pss >>
369 * PSS_SHIFT) would be the real byte count.
371 * A shift of 12 before division means (assuming 4K page size):
372 * - 1M 3-user-pages add up to 8KB errors;
373 * - supports mapcount up to 2^24, or 16M;
374 * - supports PSS up to 2^52 bytes, or 4PB.
378 #ifdef CONFIG_PROC_PAGE_MONITOR
379 struct mem_size_stats
{
380 unsigned long resident
;
381 unsigned long shared_clean
;
382 unsigned long shared_dirty
;
383 unsigned long private_clean
;
384 unsigned long private_dirty
;
385 unsigned long referenced
;
386 unsigned long anonymous
;
387 unsigned long lazyfree
;
388 unsigned long anonymous_thp
;
389 unsigned long shmem_thp
;
390 unsigned long file_thp
;
392 unsigned long shared_hugetlb
;
393 unsigned long private_hugetlb
;
400 bool check_shmem_swap
;
403 static void smaps_page_accumulate(struct mem_size_stats
*mss
,
404 struct page
*page
, unsigned long size
, unsigned long pss
,
405 bool dirty
, bool locked
, bool private)
410 mss
->pss_anon
+= pss
;
411 else if (PageSwapBacked(page
))
412 mss
->pss_shmem
+= pss
;
414 mss
->pss_file
+= pss
;
417 mss
->pss_locked
+= pss
;
419 if (dirty
|| PageDirty(page
)) {
421 mss
->private_dirty
+= size
;
423 mss
->shared_dirty
+= size
;
426 mss
->private_clean
+= size
;
428 mss
->shared_clean
+= size
;
432 static void smaps_account(struct mem_size_stats
*mss
, struct page
*page
,
433 bool compound
, bool young
, bool dirty
, bool locked
)
435 int i
, nr
= compound
? compound_nr(page
) : 1;
436 unsigned long size
= nr
* PAGE_SIZE
;
439 * First accumulate quantities that depend only on |size| and the type
440 * of the compound page.
442 if (PageAnon(page
)) {
443 mss
->anonymous
+= size
;
444 if (!PageSwapBacked(page
) && !dirty
&& !PageDirty(page
))
445 mss
->lazyfree
+= size
;
448 mss
->resident
+= size
;
449 /* Accumulate the size in pages that have been accessed. */
450 if (young
|| page_is_young(page
) || PageReferenced(page
))
451 mss
->referenced
+= size
;
454 * Then accumulate quantities that may depend on sharing, or that may
455 * differ page-by-page.
457 * page_count(page) == 1 guarantees the page is mapped exactly once.
458 * If any subpage of the compound page mapped with PTE it would elevate
461 if (page_count(page
) == 1) {
462 smaps_page_accumulate(mss
, page
, size
, size
<< PSS_SHIFT
, dirty
,
466 for (i
= 0; i
< nr
; i
++, page
++) {
467 int mapcount
= page_mapcount(page
);
468 unsigned long pss
= PAGE_SIZE
<< PSS_SHIFT
;
471 smaps_page_accumulate(mss
, page
, PAGE_SIZE
, pss
, dirty
, locked
,
477 static int smaps_pte_hole(unsigned long addr
, unsigned long end
,
478 __always_unused
int depth
, struct mm_walk
*walk
)
480 struct mem_size_stats
*mss
= walk
->private;
482 mss
->swap
+= shmem_partial_swap_usage(
483 walk
->vma
->vm_file
->f_mapping
, addr
, end
);
488 #define smaps_pte_hole NULL
489 #endif /* CONFIG_SHMEM */
491 static void smaps_pte_entry(pte_t
*pte
, unsigned long addr
,
492 struct mm_walk
*walk
)
494 struct mem_size_stats
*mss
= walk
->private;
495 struct vm_area_struct
*vma
= walk
->vma
;
496 bool locked
= !!(vma
->vm_flags
& VM_LOCKED
);
497 struct page
*page
= NULL
;
499 if (pte_present(*pte
)) {
500 page
= vm_normal_page(vma
, addr
, *pte
);
501 } else if (is_swap_pte(*pte
)) {
502 swp_entry_t swpent
= pte_to_swp_entry(*pte
);
504 if (!non_swap_entry(swpent
)) {
507 mss
->swap
+= PAGE_SIZE
;
508 mapcount
= swp_swapcount(swpent
);
510 u64 pss_delta
= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
512 do_div(pss_delta
, mapcount
);
513 mss
->swap_pss
+= pss_delta
;
515 mss
->swap_pss
+= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
517 } else if (is_migration_entry(swpent
))
518 page
= migration_entry_to_page(swpent
);
519 else if (is_device_private_entry(swpent
))
520 page
= device_private_entry_to_page(swpent
);
521 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM
) && mss
->check_shmem_swap
522 && pte_none(*pte
))) {
523 page
= find_get_entry(vma
->vm_file
->f_mapping
,
524 linear_page_index(vma
, addr
));
528 if (xa_is_value(page
))
529 mss
->swap
+= PAGE_SIZE
;
539 smaps_account(mss
, page
, false, pte_young(*pte
), pte_dirty(*pte
), locked
);
542 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
543 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
544 struct mm_walk
*walk
)
546 struct mem_size_stats
*mss
= walk
->private;
547 struct vm_area_struct
*vma
= walk
->vma
;
548 bool locked
= !!(vma
->vm_flags
& VM_LOCKED
);
551 /* FOLL_DUMP will return -EFAULT on huge zero page */
552 page
= follow_trans_huge_pmd(vma
, addr
, pmd
, FOLL_DUMP
);
553 if (IS_ERR_OR_NULL(page
))
556 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
557 else if (PageSwapBacked(page
))
558 mss
->shmem_thp
+= HPAGE_PMD_SIZE
;
559 else if (is_zone_device_page(page
))
562 mss
->file_thp
+= HPAGE_PMD_SIZE
;
563 smaps_account(mss
, page
, true, pmd_young(*pmd
), pmd_dirty(*pmd
), locked
);
566 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
567 struct mm_walk
*walk
)
572 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
573 struct mm_walk
*walk
)
575 struct vm_area_struct
*vma
= walk
->vma
;
579 ptl
= pmd_trans_huge_lock(pmd
, vma
);
581 if (pmd_present(*pmd
))
582 smaps_pmd_entry(pmd
, addr
, walk
);
587 if (pmd_trans_unstable(pmd
))
590 * The mmap_sem held all the way back in m_start() is what
591 * keeps khugepaged out of here and from collapsing things
594 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
595 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
596 smaps_pte_entry(pte
, addr
, walk
);
597 pte_unmap_unlock(pte
- 1, ptl
);
603 static void show_smap_vma_flags(struct seq_file
*m
, struct vm_area_struct
*vma
)
606 * Don't forget to update Documentation/ on changes.
608 static const char mnemonics
[BITS_PER_LONG
][2] = {
610 * In case if we meet a flag we don't know about.
612 [0 ... (BITS_PER_LONG
-1)] = "??",
614 [ilog2(VM_READ
)] = "rd",
615 [ilog2(VM_WRITE
)] = "wr",
616 [ilog2(VM_EXEC
)] = "ex",
617 [ilog2(VM_SHARED
)] = "sh",
618 [ilog2(VM_MAYREAD
)] = "mr",
619 [ilog2(VM_MAYWRITE
)] = "mw",
620 [ilog2(VM_MAYEXEC
)] = "me",
621 [ilog2(VM_MAYSHARE
)] = "ms",
622 [ilog2(VM_GROWSDOWN
)] = "gd",
623 [ilog2(VM_PFNMAP
)] = "pf",
624 [ilog2(VM_DENYWRITE
)] = "dw",
625 #ifdef CONFIG_X86_INTEL_MPX
626 [ilog2(VM_MPX
)] = "mp",
628 [ilog2(VM_LOCKED
)] = "lo",
629 [ilog2(VM_IO
)] = "io",
630 [ilog2(VM_SEQ_READ
)] = "sr",
631 [ilog2(VM_RAND_READ
)] = "rr",
632 [ilog2(VM_DONTCOPY
)] = "dc",
633 [ilog2(VM_DONTEXPAND
)] = "de",
634 [ilog2(VM_ACCOUNT
)] = "ac",
635 [ilog2(VM_NORESERVE
)] = "nr",
636 [ilog2(VM_HUGETLB
)] = "ht",
637 [ilog2(VM_SYNC
)] = "sf",
638 [ilog2(VM_ARCH_1
)] = "ar",
639 [ilog2(VM_WIPEONFORK
)] = "wf",
640 [ilog2(VM_DONTDUMP
)] = "dd",
641 #ifdef CONFIG_MEM_SOFT_DIRTY
642 [ilog2(VM_SOFTDIRTY
)] = "sd",
644 [ilog2(VM_MIXEDMAP
)] = "mm",
645 [ilog2(VM_HUGEPAGE
)] = "hg",
646 [ilog2(VM_NOHUGEPAGE
)] = "nh",
647 [ilog2(VM_MERGEABLE
)] = "mg",
648 [ilog2(VM_UFFD_MISSING
)]= "um",
649 [ilog2(VM_UFFD_WP
)] = "uw",
650 #ifdef CONFIG_ARCH_HAS_PKEYS
651 /* These come out via ProtectionKey: */
652 [ilog2(VM_PKEY_BIT0
)] = "",
653 [ilog2(VM_PKEY_BIT1
)] = "",
654 [ilog2(VM_PKEY_BIT2
)] = "",
655 [ilog2(VM_PKEY_BIT3
)] = "",
657 [ilog2(VM_PKEY_BIT4
)] = "",
659 #endif /* CONFIG_ARCH_HAS_PKEYS */
663 seq_puts(m
, "VmFlags: ");
664 for (i
= 0; i
< BITS_PER_LONG
; i
++) {
665 if (!mnemonics
[i
][0])
667 if (vma
->vm_flags
& (1UL << i
)) {
668 seq_putc(m
, mnemonics
[i
][0]);
669 seq_putc(m
, mnemonics
[i
][1]);
676 #ifdef CONFIG_HUGETLB_PAGE
677 static int smaps_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
678 unsigned long addr
, unsigned long end
,
679 struct mm_walk
*walk
)
681 struct mem_size_stats
*mss
= walk
->private;
682 struct vm_area_struct
*vma
= walk
->vma
;
683 struct page
*page
= NULL
;
685 if (pte_present(*pte
)) {
686 page
= vm_normal_page(vma
, addr
, *pte
);
687 } else if (is_swap_pte(*pte
)) {
688 swp_entry_t swpent
= pte_to_swp_entry(*pte
);
690 if (is_migration_entry(swpent
))
691 page
= migration_entry_to_page(swpent
);
692 else if (is_device_private_entry(swpent
))
693 page
= device_private_entry_to_page(swpent
);
696 int mapcount
= page_mapcount(page
);
699 mss
->shared_hugetlb
+= huge_page_size(hstate_vma(vma
));
701 mss
->private_hugetlb
+= huge_page_size(hstate_vma(vma
));
706 #define smaps_hugetlb_range NULL
707 #endif /* HUGETLB_PAGE */
709 static const struct mm_walk_ops smaps_walk_ops
= {
710 .pmd_entry
= smaps_pte_range
,
711 .hugetlb_entry
= smaps_hugetlb_range
,
714 static const struct mm_walk_ops smaps_shmem_walk_ops
= {
715 .pmd_entry
= smaps_pte_range
,
716 .hugetlb_entry
= smaps_hugetlb_range
,
717 .pte_hole
= smaps_pte_hole
,
720 static void smap_gather_stats(struct vm_area_struct
*vma
,
721 struct mem_size_stats
*mss
)
724 /* In case of smaps_rollup, reset the value from previous vma */
725 mss
->check_shmem_swap
= false;
726 if (vma
->vm_file
&& shmem_mapping(vma
->vm_file
->f_mapping
)) {
728 * For shared or readonly shmem mappings we know that all
729 * swapped out pages belong to the shmem object, and we can
730 * obtain the swap value much more efficiently. For private
731 * writable mappings, we might have COW pages that are
732 * not affected by the parent swapped out pages of the shmem
733 * object, so we have to distinguish them during the page walk.
734 * Unless we know that the shmem object (or the part mapped by
735 * our VMA) has no swapped out pages at all.
737 unsigned long shmem_swapped
= shmem_swap_usage(vma
);
739 if (!shmem_swapped
|| (vma
->vm_flags
& VM_SHARED
) ||
740 !(vma
->vm_flags
& VM_WRITE
)) {
741 mss
->swap
+= shmem_swapped
;
743 mss
->check_shmem_swap
= true;
744 walk_page_vma(vma
, &smaps_shmem_walk_ops
, mss
);
749 /* mmap_sem is held in m_start */
750 walk_page_vma(vma
, &smaps_walk_ops
, mss
);
753 #define SEQ_PUT_DEC(str, val) \
754 seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
756 /* Show the contents common for smaps and smaps_rollup */
757 static void __show_smap(struct seq_file
*m
, const struct mem_size_stats
*mss
,
760 SEQ_PUT_DEC("Rss: ", mss
->resident
);
761 SEQ_PUT_DEC(" kB\nPss: ", mss
->pss
>> PSS_SHIFT
);
764 * These are meaningful only for smaps_rollup, otherwise two of
765 * them are zero, and the other one is the same as Pss.
767 SEQ_PUT_DEC(" kB\nPss_Anon: ",
768 mss
->pss_anon
>> PSS_SHIFT
);
769 SEQ_PUT_DEC(" kB\nPss_File: ",
770 mss
->pss_file
>> PSS_SHIFT
);
771 SEQ_PUT_DEC(" kB\nPss_Shmem: ",
772 mss
->pss_shmem
>> PSS_SHIFT
);
774 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss
->shared_clean
);
775 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss
->shared_dirty
);
776 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss
->private_clean
);
777 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss
->private_dirty
);
778 SEQ_PUT_DEC(" kB\nReferenced: ", mss
->referenced
);
779 SEQ_PUT_DEC(" kB\nAnonymous: ", mss
->anonymous
);
780 SEQ_PUT_DEC(" kB\nLazyFree: ", mss
->lazyfree
);
781 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss
->anonymous_thp
);
782 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss
->shmem_thp
);
783 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss
->file_thp
);
784 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss
->shared_hugetlb
);
785 seq_put_decimal_ull_width(m
, " kB\nPrivate_Hugetlb: ",
786 mss
->private_hugetlb
>> 10, 7);
787 SEQ_PUT_DEC(" kB\nSwap: ", mss
->swap
);
788 SEQ_PUT_DEC(" kB\nSwapPss: ",
789 mss
->swap_pss
>> PSS_SHIFT
);
790 SEQ_PUT_DEC(" kB\nLocked: ",
791 mss
->pss_locked
>> PSS_SHIFT
);
792 seq_puts(m
, " kB\n");
795 static int show_smap(struct seq_file
*m
, void *v
)
797 struct vm_area_struct
*vma
= v
;
798 struct mem_size_stats mss
;
800 memset(&mss
, 0, sizeof(mss
));
802 smap_gather_stats(vma
, &mss
);
804 show_map_vma(m
, vma
);
806 SEQ_PUT_DEC("Size: ", vma
->vm_end
- vma
->vm_start
);
807 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma
));
808 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma
));
809 seq_puts(m
, " kB\n");
811 __show_smap(m
, &mss
, false);
813 seq_printf(m
, "THPeligible: %d\n",
814 transparent_hugepage_enabled(vma
));
816 if (arch_pkeys_enabled())
817 seq_printf(m
, "ProtectionKey: %8u\n", vma_pkey(vma
));
818 show_smap_vma_flags(m
, vma
);
823 static int show_smaps_rollup(struct seq_file
*m
, void *v
)
825 struct proc_maps_private
*priv
= m
->private;
826 struct mem_size_stats mss
;
827 struct mm_struct
*mm
;
828 struct vm_area_struct
*vma
;
829 unsigned long last_vma_end
= 0;
832 priv
->task
= get_proc_task(priv
->inode
);
837 if (!mm
|| !mmget_not_zero(mm
)) {
842 memset(&mss
, 0, sizeof(mss
));
844 ret
= down_read_killable(&mm
->mmap_sem
);
848 hold_task_mempolicy(priv
);
850 for (vma
= priv
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
851 smap_gather_stats(vma
, &mss
);
852 last_vma_end
= vma
->vm_end
;
855 show_vma_header_prefix(m
, priv
->mm
->mmap
->vm_start
,
856 last_vma_end
, 0, 0, 0, 0);
858 seq_puts(m
, "[rollup]\n");
860 __show_smap(m
, &mss
, true);
862 release_task_mempolicy(priv
);
863 up_read(&mm
->mmap_sem
);
868 put_task_struct(priv
->task
);
875 static const struct seq_operations proc_pid_smaps_op
= {
882 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
884 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
887 static int smaps_rollup_open(struct inode
*inode
, struct file
*file
)
890 struct proc_maps_private
*priv
;
892 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL_ACCOUNT
);
896 ret
= single_open(file
, show_smaps_rollup
, priv
);
901 priv
->mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
902 if (IS_ERR(priv
->mm
)) {
903 ret
= PTR_ERR(priv
->mm
);
905 single_release(inode
, file
);
916 static int smaps_rollup_release(struct inode
*inode
, struct file
*file
)
918 struct seq_file
*seq
= file
->private_data
;
919 struct proc_maps_private
*priv
= seq
->private;
925 return single_release(inode
, file
);
928 const struct file_operations proc_pid_smaps_operations
= {
929 .open
= pid_smaps_open
,
932 .release
= proc_map_release
,
935 const struct file_operations proc_pid_smaps_rollup_operations
= {
936 .open
= smaps_rollup_open
,
939 .release
= smaps_rollup_release
,
942 enum clear_refs_types
{
946 CLEAR_REFS_SOFT_DIRTY
,
947 CLEAR_REFS_MM_HIWATER_RSS
,
951 struct clear_refs_private
{
952 enum clear_refs_types type
;
955 #ifdef CONFIG_MEM_SOFT_DIRTY
956 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
957 unsigned long addr
, pte_t
*pte
)
960 * The soft-dirty tracker uses #PF-s to catch writes
961 * to pages, so write-protect the pte as well. See the
962 * Documentation/admin-guide/mm/soft-dirty.rst for full description
963 * of how soft-dirty works.
967 if (pte_present(ptent
)) {
970 old_pte
= ptep_modify_prot_start(vma
, addr
, pte
);
971 ptent
= pte_wrprotect(old_pte
);
972 ptent
= pte_clear_soft_dirty(ptent
);
973 ptep_modify_prot_commit(vma
, addr
, pte
, old_pte
, ptent
);
974 } else if (is_swap_pte(ptent
)) {
975 ptent
= pte_swp_clear_soft_dirty(ptent
);
976 set_pte_at(vma
->vm_mm
, addr
, pte
, ptent
);
980 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
981 unsigned long addr
, pte_t
*pte
)
986 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
987 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
988 unsigned long addr
, pmd_t
*pmdp
)
990 pmd_t old
, pmd
= *pmdp
;
992 if (pmd_present(pmd
)) {
993 /* See comment in change_huge_pmd() */
994 old
= pmdp_invalidate(vma
, addr
, pmdp
);
996 pmd
= pmd_mkdirty(pmd
);
998 pmd
= pmd_mkyoung(pmd
);
1000 pmd
= pmd_wrprotect(pmd
);
1001 pmd
= pmd_clear_soft_dirty(pmd
);
1003 set_pmd_at(vma
->vm_mm
, addr
, pmdp
, pmd
);
1004 } else if (is_migration_entry(pmd_to_swp_entry(pmd
))) {
1005 pmd
= pmd_swp_clear_soft_dirty(pmd
);
1006 set_pmd_at(vma
->vm_mm
, addr
, pmdp
, pmd
);
1010 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
1011 unsigned long addr
, pmd_t
*pmdp
)
1016 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
1017 unsigned long end
, struct mm_walk
*walk
)
1019 struct clear_refs_private
*cp
= walk
->private;
1020 struct vm_area_struct
*vma
= walk
->vma
;
1025 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1027 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
1028 clear_soft_dirty_pmd(vma
, addr
, pmd
);
1032 if (!pmd_present(*pmd
))
1035 page
= pmd_page(*pmd
);
1037 /* Clear accessed and referenced bits. */
1038 pmdp_test_and_clear_young(vma
, addr
, pmd
);
1039 test_and_clear_page_young(page
);
1040 ClearPageReferenced(page
);
1046 if (pmd_trans_unstable(pmd
))
1049 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
1050 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
1053 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
1054 clear_soft_dirty(vma
, addr
, pte
);
1058 if (!pte_present(ptent
))
1061 page
= vm_normal_page(vma
, addr
, ptent
);
1065 /* Clear accessed and referenced bits. */
1066 ptep_test_and_clear_young(vma
, addr
, pte
);
1067 test_and_clear_page_young(page
);
1068 ClearPageReferenced(page
);
1070 pte_unmap_unlock(pte
- 1, ptl
);
1075 static int clear_refs_test_walk(unsigned long start
, unsigned long end
,
1076 struct mm_walk
*walk
)
1078 struct clear_refs_private
*cp
= walk
->private;
1079 struct vm_area_struct
*vma
= walk
->vma
;
1081 if (vma
->vm_flags
& VM_PFNMAP
)
1085 * Writing 1 to /proc/pid/clear_refs affects all pages.
1086 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1087 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1088 * Writing 4 to /proc/pid/clear_refs affects all pages.
1090 if (cp
->type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
1092 if (cp
->type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
1097 static const struct mm_walk_ops clear_refs_walk_ops
= {
1098 .pmd_entry
= clear_refs_pte_range
,
1099 .test_walk
= clear_refs_test_walk
,
1102 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
1103 size_t count
, loff_t
*ppos
)
1105 struct task_struct
*task
;
1106 char buffer
[PROC_NUMBUF
];
1107 struct mm_struct
*mm
;
1108 struct vm_area_struct
*vma
;
1109 enum clear_refs_types type
;
1110 struct mmu_gather tlb
;
1114 memset(buffer
, 0, sizeof(buffer
));
1115 if (count
> sizeof(buffer
) - 1)
1116 count
= sizeof(buffer
) - 1;
1117 if (copy_from_user(buffer
, buf
, count
))
1119 rv
= kstrtoint(strstrip(buffer
), 10, &itype
);
1122 type
= (enum clear_refs_types
)itype
;
1123 if (type
< CLEAR_REFS_ALL
|| type
>= CLEAR_REFS_LAST
)
1126 task
= get_proc_task(file_inode(file
));
1129 mm
= get_task_mm(task
);
1131 struct mmu_notifier_range range
;
1132 struct clear_refs_private cp
= {
1136 if (type
== CLEAR_REFS_MM_HIWATER_RSS
) {
1137 if (down_write_killable(&mm
->mmap_sem
)) {
1143 * Writing 5 to /proc/pid/clear_refs resets the peak
1144 * resident set size to this mm's current rss value.
1146 reset_mm_hiwater_rss(mm
);
1147 up_write(&mm
->mmap_sem
);
1151 if (down_read_killable(&mm
->mmap_sem
)) {
1155 tlb_gather_mmu(&tlb
, mm
, 0, -1);
1156 if (type
== CLEAR_REFS_SOFT_DIRTY
) {
1157 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1158 if (!(vma
->vm_flags
& VM_SOFTDIRTY
))
1160 up_read(&mm
->mmap_sem
);
1161 if (down_write_killable(&mm
->mmap_sem
)) {
1166 * Avoid to modify vma->vm_flags
1167 * without locked ops while the
1168 * coredump reads the vm_flags.
1170 if (!mmget_still_valid(mm
)) {
1172 * Silently return "count"
1173 * like if get_task_mm()
1174 * failed. FIXME: should this
1175 * function have returned
1176 * -ESRCH if get_task_mm()
1178 * get_proc_task() fails?
1180 up_write(&mm
->mmap_sem
);
1183 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1184 vma
->vm_flags
&= ~VM_SOFTDIRTY
;
1185 vma_set_page_prot(vma
);
1187 downgrade_write(&mm
->mmap_sem
);
1191 mmu_notifier_range_init(&range
, MMU_NOTIFY_SOFT_DIRTY
,
1192 0, NULL
, mm
, 0, -1UL);
1193 mmu_notifier_invalidate_range_start(&range
);
1195 walk_page_range(mm
, 0, mm
->highest_vm_end
, &clear_refs_walk_ops
,
1197 if (type
== CLEAR_REFS_SOFT_DIRTY
)
1198 mmu_notifier_invalidate_range_end(&range
);
1199 tlb_finish_mmu(&tlb
, 0, -1);
1200 up_read(&mm
->mmap_sem
);
1204 put_task_struct(task
);
1209 const struct file_operations proc_clear_refs_operations
= {
1210 .write
= clear_refs_write
,
1211 .llseek
= noop_llseek
,
1218 struct pagemapread
{
1219 int pos
, len
; /* units: PM_ENTRY_BYTES, not bytes */
1220 pagemap_entry_t
*buffer
;
1224 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
1225 #define PAGEMAP_WALK_MASK (PMD_MASK)
1227 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1228 #define PM_PFRAME_BITS 55
1229 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1230 #define PM_SOFT_DIRTY BIT_ULL(55)
1231 #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
1232 #define PM_FILE BIT_ULL(61)
1233 #define PM_SWAP BIT_ULL(62)
1234 #define PM_PRESENT BIT_ULL(63)
1236 #define PM_END_OF_BUFFER 1
1238 static inline pagemap_entry_t
make_pme(u64 frame
, u64 flags
)
1240 return (pagemap_entry_t
) { .pme
= (frame
& PM_PFRAME_MASK
) | flags
};
1243 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
1244 struct pagemapread
*pm
)
1246 pm
->buffer
[pm
->pos
++] = *pme
;
1247 if (pm
->pos
>= pm
->len
)
1248 return PM_END_OF_BUFFER
;
1252 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
1253 __always_unused
int depth
, struct mm_walk
*walk
)
1255 struct pagemapread
*pm
= walk
->private;
1256 unsigned long addr
= start
;
1259 while (addr
< end
) {
1260 struct vm_area_struct
*vma
= find_vma(walk
->mm
, addr
);
1261 pagemap_entry_t pme
= make_pme(0, 0);
1262 /* End of address space hole, which we mark as non-present. */
1263 unsigned long hole_end
;
1266 hole_end
= min(end
, vma
->vm_start
);
1270 for (; addr
< hole_end
; addr
+= PAGE_SIZE
) {
1271 err
= add_to_pagemap(addr
, &pme
, pm
);
1279 /* Addresses in the VMA. */
1280 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1281 pme
= make_pme(0, PM_SOFT_DIRTY
);
1282 for (; addr
< min(end
, vma
->vm_end
); addr
+= PAGE_SIZE
) {
1283 err
= add_to_pagemap(addr
, &pme
, pm
);
1292 static pagemap_entry_t
pte_to_pagemap_entry(struct pagemapread
*pm
,
1293 struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
1295 u64 frame
= 0, flags
= 0;
1296 struct page
*page
= NULL
;
1298 if (pte_present(pte
)) {
1300 frame
= pte_pfn(pte
);
1301 flags
|= PM_PRESENT
;
1302 page
= vm_normal_page(vma
, addr
, pte
);
1303 if (pte_soft_dirty(pte
))
1304 flags
|= PM_SOFT_DIRTY
;
1305 } else if (is_swap_pte(pte
)) {
1307 if (pte_swp_soft_dirty(pte
))
1308 flags
|= PM_SOFT_DIRTY
;
1309 entry
= pte_to_swp_entry(pte
);
1311 frame
= swp_type(entry
) |
1312 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
1314 if (is_migration_entry(entry
))
1315 page
= migration_entry_to_page(entry
);
1317 if (is_device_private_entry(entry
))
1318 page
= device_private_entry_to_page(entry
);
1321 if (page
&& !PageAnon(page
))
1323 if (page
&& page_mapcount(page
) == 1)
1324 flags
|= PM_MMAP_EXCLUSIVE
;
1325 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1326 flags
|= PM_SOFT_DIRTY
;
1328 return make_pme(frame
, flags
);
1331 static int pagemap_pmd_range(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
1332 struct mm_walk
*walk
)
1334 struct vm_area_struct
*vma
= walk
->vma
;
1335 struct pagemapread
*pm
= walk
->private;
1337 pte_t
*pte
, *orig_pte
;
1340 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1341 ptl
= pmd_trans_huge_lock(pmdp
, vma
);
1343 u64 flags
= 0, frame
= 0;
1345 struct page
*page
= NULL
;
1347 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1348 flags
|= PM_SOFT_DIRTY
;
1350 if (pmd_present(pmd
)) {
1351 page
= pmd_page(pmd
);
1353 flags
|= PM_PRESENT
;
1354 if (pmd_soft_dirty(pmd
))
1355 flags
|= PM_SOFT_DIRTY
;
1357 frame
= pmd_pfn(pmd
) +
1358 ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
1360 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1361 else if (is_swap_pmd(pmd
)) {
1362 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1363 unsigned long offset
;
1366 offset
= swp_offset(entry
) +
1367 ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
1368 frame
= swp_type(entry
) |
1369 (offset
<< MAX_SWAPFILES_SHIFT
);
1372 if (pmd_swp_soft_dirty(pmd
))
1373 flags
|= PM_SOFT_DIRTY
;
1374 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1375 page
= migration_entry_to_page(entry
);
1379 if (page
&& page_mapcount(page
) == 1)
1380 flags
|= PM_MMAP_EXCLUSIVE
;
1382 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1383 pagemap_entry_t pme
= make_pme(frame
, flags
);
1385 err
= add_to_pagemap(addr
, &pme
, pm
);
1389 if (flags
& PM_PRESENT
)
1391 else if (flags
& PM_SWAP
)
1392 frame
+= (1 << MAX_SWAPFILES_SHIFT
);
1399 if (pmd_trans_unstable(pmdp
))
1401 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1404 * We can assume that @vma always points to a valid one and @end never
1405 * goes beyond vma->vm_end.
1407 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmdp
, addr
, &ptl
);
1408 for (; addr
< end
; pte
++, addr
+= PAGE_SIZE
) {
1409 pagemap_entry_t pme
;
1411 pme
= pte_to_pagemap_entry(pm
, vma
, addr
, *pte
);
1412 err
= add_to_pagemap(addr
, &pme
, pm
);
1416 pte_unmap_unlock(orig_pte
, ptl
);
1423 #ifdef CONFIG_HUGETLB_PAGE
1424 /* This function walks within one hugetlb entry in the single call */
1425 static int pagemap_hugetlb_range(pte_t
*ptep
, unsigned long hmask
,
1426 unsigned long addr
, unsigned long end
,
1427 struct mm_walk
*walk
)
1429 struct pagemapread
*pm
= walk
->private;
1430 struct vm_area_struct
*vma
= walk
->vma
;
1431 u64 flags
= 0, frame
= 0;
1435 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1436 flags
|= PM_SOFT_DIRTY
;
1438 pte
= huge_ptep_get(ptep
);
1439 if (pte_present(pte
)) {
1440 struct page
*page
= pte_page(pte
);
1442 if (!PageAnon(page
))
1445 if (page_mapcount(page
) == 1)
1446 flags
|= PM_MMAP_EXCLUSIVE
;
1448 flags
|= PM_PRESENT
;
1450 frame
= pte_pfn(pte
) +
1451 ((addr
& ~hmask
) >> PAGE_SHIFT
);
1454 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1455 pagemap_entry_t pme
= make_pme(frame
, flags
);
1457 err
= add_to_pagemap(addr
, &pme
, pm
);
1460 if (pm
->show_pfn
&& (flags
& PM_PRESENT
))
1469 #define pagemap_hugetlb_range NULL
1470 #endif /* HUGETLB_PAGE */
1472 static const struct mm_walk_ops pagemap_ops
= {
1473 .pmd_entry
= pagemap_pmd_range
,
1474 .pte_hole
= pagemap_pte_hole
,
1475 .hugetlb_entry
= pagemap_hugetlb_range
,
1479 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1481 * For each page in the address space, this file contains one 64-bit entry
1482 * consisting of the following:
1484 * Bits 0-54 page frame number (PFN) if present
1485 * Bits 0-4 swap type if swapped
1486 * Bits 5-54 swap offset if swapped
1487 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1488 * Bit 56 page exclusively mapped
1490 * Bit 61 page is file-page or shared-anon
1491 * Bit 62 page swapped
1492 * Bit 63 page present
1494 * If the page is not present but in swap, then the PFN contains an
1495 * encoding of the swap file number and the page's offset into the
1496 * swap. Unmapped pages return a null PFN. This allows determining
1497 * precisely which pages are mapped (or in swap) and comparing mapped
1498 * pages between processes.
1500 * Efficient users of this interface will use /proc/pid/maps to
1501 * determine which areas of memory are actually mapped and llseek to
1502 * skip over unmapped regions.
1504 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
1505 size_t count
, loff_t
*ppos
)
1507 struct mm_struct
*mm
= file
->private_data
;
1508 struct pagemapread pm
;
1510 unsigned long svpfn
;
1511 unsigned long start_vaddr
;
1512 unsigned long end_vaddr
;
1513 int ret
= 0, copied
= 0;
1515 if (!mm
|| !mmget_not_zero(mm
))
1519 /* file position must be aligned */
1520 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
1527 /* do not disclose physical addresses: attack vector */
1528 pm
.show_pfn
= file_ns_capable(file
, &init_user_ns
, CAP_SYS_ADMIN
);
1530 pm
.len
= (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
1531 pm
.buffer
= kmalloc_array(pm
.len
, PM_ENTRY_BYTES
, GFP_KERNEL
);
1537 svpfn
= src
/ PM_ENTRY_BYTES
;
1538 start_vaddr
= svpfn
<< PAGE_SHIFT
;
1539 end_vaddr
= mm
->task_size
;
1541 /* watch out for wraparound */
1542 if (svpfn
> mm
->task_size
>> PAGE_SHIFT
)
1543 start_vaddr
= end_vaddr
;
1546 * The odds are that this will stop walking way
1547 * before end_vaddr, because the length of the
1548 * user buffer is tracked in "pm", and the walk
1549 * will stop when we hit the end of the buffer.
1552 while (count
&& (start_vaddr
< end_vaddr
)) {
1557 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
1559 if (end
< start_vaddr
|| end
> end_vaddr
)
1561 ret
= down_read_killable(&mm
->mmap_sem
);
1564 ret
= walk_page_range(mm
, start_vaddr
, end
, &pagemap_ops
, &pm
);
1565 up_read(&mm
->mmap_sem
);
1568 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
1569 if (copy_to_user(buf
, pm
.buffer
, len
)) {
1578 if (!ret
|| ret
== PM_END_OF_BUFFER
)
1589 static int pagemap_open(struct inode
*inode
, struct file
*file
)
1591 struct mm_struct
*mm
;
1593 mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
1596 file
->private_data
= mm
;
1600 static int pagemap_release(struct inode
*inode
, struct file
*file
)
1602 struct mm_struct
*mm
= file
->private_data
;
1609 const struct file_operations proc_pagemap_operations
= {
1610 .llseek
= mem_lseek
, /* borrow this */
1611 .read
= pagemap_read
,
1612 .open
= pagemap_open
,
1613 .release
= pagemap_release
,
1615 #endif /* CONFIG_PROC_PAGE_MONITOR */
1620 unsigned long pages
;
1622 unsigned long active
;
1623 unsigned long writeback
;
1624 unsigned long mapcount_max
;
1625 unsigned long dirty
;
1626 unsigned long swapcache
;
1627 unsigned long node
[MAX_NUMNODES
];
1630 struct numa_maps_private
{
1631 struct proc_maps_private proc_maps
;
1632 struct numa_maps md
;
1635 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1636 unsigned long nr_pages
)
1638 int count
= page_mapcount(page
);
1640 md
->pages
+= nr_pages
;
1641 if (pte_dirty
|| PageDirty(page
))
1642 md
->dirty
+= nr_pages
;
1644 if (PageSwapCache(page
))
1645 md
->swapcache
+= nr_pages
;
1647 if (PageActive(page
) || PageUnevictable(page
))
1648 md
->active
+= nr_pages
;
1650 if (PageWriteback(page
))
1651 md
->writeback
+= nr_pages
;
1654 md
->anon
+= nr_pages
;
1656 if (count
> md
->mapcount_max
)
1657 md
->mapcount_max
= count
;
1659 md
->node
[page_to_nid(page
)] += nr_pages
;
1662 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1668 if (!pte_present(pte
))
1671 page
= vm_normal_page(vma
, addr
, pte
);
1675 if (PageReserved(page
))
1678 nid
= page_to_nid(page
);
1679 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1685 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1686 static struct page
*can_gather_numa_stats_pmd(pmd_t pmd
,
1687 struct vm_area_struct
*vma
,
1693 if (!pmd_present(pmd
))
1696 page
= vm_normal_page_pmd(vma
, addr
, pmd
);
1700 if (PageReserved(page
))
1703 nid
= page_to_nid(page
);
1704 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1711 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1712 unsigned long end
, struct mm_walk
*walk
)
1714 struct numa_maps
*md
= walk
->private;
1715 struct vm_area_struct
*vma
= walk
->vma
;
1720 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1721 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1725 page
= can_gather_numa_stats_pmd(*pmd
, vma
, addr
);
1727 gather_stats(page
, md
, pmd_dirty(*pmd
),
1728 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1733 if (pmd_trans_unstable(pmd
))
1736 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1738 struct page
*page
= can_gather_numa_stats(*pte
, vma
, addr
);
1741 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1743 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1744 pte_unmap_unlock(orig_pte
, ptl
);
1748 #ifdef CONFIG_HUGETLB_PAGE
1749 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1750 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1752 pte_t huge_pte
= huge_ptep_get(pte
);
1753 struct numa_maps
*md
;
1756 if (!pte_present(huge_pte
))
1759 page
= pte_page(huge_pte
);
1764 gather_stats(page
, md
, pte_dirty(huge_pte
), 1);
1769 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1770 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1776 static const struct mm_walk_ops show_numa_ops
= {
1777 .hugetlb_entry
= gather_hugetlb_stats
,
1778 .pmd_entry
= gather_pte_stats
,
1782 * Display pages allocated per node and memory policy via /proc.
1784 static int show_numa_map(struct seq_file
*m
, void *v
)
1786 struct numa_maps_private
*numa_priv
= m
->private;
1787 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1788 struct vm_area_struct
*vma
= v
;
1789 struct numa_maps
*md
= &numa_priv
->md
;
1790 struct file
*file
= vma
->vm_file
;
1791 struct mm_struct
*mm
= vma
->vm_mm
;
1792 struct mempolicy
*pol
;
1799 /* Ensure we start with an empty set of numa_maps statistics. */
1800 memset(md
, 0, sizeof(*md
));
1802 pol
= __get_vma_policy(vma
, vma
->vm_start
);
1804 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1807 mpol_to_str(buffer
, sizeof(buffer
), proc_priv
->task_mempolicy
);
1810 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1813 seq_puts(m
, " file=");
1814 seq_file_path(m
, file
, "\n\t= ");
1815 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1816 seq_puts(m
, " heap");
1817 } else if (is_stack(vma
)) {
1818 seq_puts(m
, " stack");
1821 if (is_vm_hugetlb_page(vma
))
1822 seq_puts(m
, " huge");
1824 /* mmap_sem is held by m_start */
1825 walk_page_vma(vma
, &show_numa_ops
, md
);
1831 seq_printf(m
, " anon=%lu", md
->anon
);
1834 seq_printf(m
, " dirty=%lu", md
->dirty
);
1836 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1837 seq_printf(m
, " mapped=%lu", md
->pages
);
1839 if (md
->mapcount_max
> 1)
1840 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1843 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1845 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1846 seq_printf(m
, " active=%lu", md
->active
);
1849 seq_printf(m
, " writeback=%lu", md
->writeback
);
1851 for_each_node_state(nid
, N_MEMORY
)
1853 seq_printf(m
, " N%d=%lu", nid
, md
->node
[nid
]);
1855 seq_printf(m
, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma
) >> 10);
1861 static const struct seq_operations proc_pid_numa_maps_op
= {
1865 .show
= show_numa_map
,
1868 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1870 return proc_maps_open(inode
, file
, &proc_pid_numa_maps_op
,
1871 sizeof(struct numa_maps_private
));
1874 const struct file_operations proc_pid_numa_maps_operations
= {
1875 .open
= pid_numa_maps_open
,
1877 .llseek
= seq_lseek
,
1878 .release
= proc_map_release
,
1881 #endif /* CONFIG_NUMA */