2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/mempolicy.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
13 #include <asm/uaccess.h>
14 #include <asm/tlbflush.h>
17 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
19 unsigned long data
, text
, lib
;
20 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
23 * Note: to minimize their overhead, mm maintains hiwater_vm and
24 * hiwater_rss only when about to *lower* total_vm or rss. Any
25 * collector of these hiwater stats must therefore get total_vm
26 * and rss too, which will usually be the higher. Barriers? not
27 * worth the effort, such snapshots can always be inconsistent.
29 hiwater_vm
= total_vm
= mm
->total_vm
;
30 if (hiwater_vm
< mm
->hiwater_vm
)
31 hiwater_vm
= mm
->hiwater_vm
;
32 hiwater_rss
= total_rss
= get_mm_rss(mm
);
33 if (hiwater_rss
< mm
->hiwater_rss
)
34 hiwater_rss
= mm
->hiwater_rss
;
36 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
37 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
38 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
50 hiwater_vm
<< (PAGE_SHIFT
-10),
51 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
52 mm
->locked_vm
<< (PAGE_SHIFT
-10),
53 hiwater_rss
<< (PAGE_SHIFT
-10),
54 total_rss
<< (PAGE_SHIFT
-10),
55 data
<< (PAGE_SHIFT
-10),
56 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
57 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
60 unsigned long task_vsize(struct mm_struct
*mm
)
62 return PAGE_SIZE
* mm
->total_vm
;
65 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
66 int *data
, int *resident
)
68 *shared
= get_mm_counter(mm
, file_rss
);
69 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
71 *data
= mm
->total_vm
- mm
->shared_vm
;
72 *resident
= *shared
+ get_mm_counter(mm
, anon_rss
);
76 static void pad_len_spaces(struct seq_file
*m
, int len
)
78 len
= 25 + sizeof(void*) * 6 - len
;
81 seq_printf(m
, "%*c", len
, ' ');
84 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
86 if (vma
&& vma
!= priv
->tail_vma
) {
87 struct mm_struct
*mm
= vma
->vm_mm
;
88 up_read(&mm
->mmap_sem
);
93 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
95 struct proc_maps_private
*priv
= m
->private;
96 unsigned long last_addr
= m
->version
;
98 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
101 /* Clear the per syscall fields in priv */
103 priv
->tail_vma
= NULL
;
106 * We remember last_addr rather than next_addr to hit with
107 * mmap_cache most of the time. We have zero last_addr at
108 * the beginning and also after lseek. We will have -1 last_addr
109 * after the end of the vmas.
112 if (last_addr
== -1UL)
115 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
119 mm
= mm_for_maps(priv
->task
);
123 tail_vma
= get_gate_vma(priv
->task
);
124 priv
->tail_vma
= tail_vma
;
126 /* Start with last addr hint */
127 vma
= find_vma(mm
, last_addr
);
128 if (last_addr
&& vma
) {
134 * Check the vma index is within the range and do
135 * sequential scan until m_index.
138 if ((unsigned long)l
< mm
->map_count
) {
145 if (l
!= mm
->map_count
)
146 tail_vma
= NULL
; /* After gate vma */
152 /* End of vmas has been reached */
153 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
154 up_read(&mm
->mmap_sem
);
159 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
161 struct proc_maps_private
*priv
= m
->private;
162 struct vm_area_struct
*vma
= v
;
163 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
166 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
169 return (vma
!= tail_vma
)? tail_vma
: NULL
;
172 static void m_stop(struct seq_file
*m
, void *v
)
174 struct proc_maps_private
*priv
= m
->private;
175 struct vm_area_struct
*vma
= v
;
179 put_task_struct(priv
->task
);
182 static int do_maps_open(struct inode
*inode
, struct file
*file
,
183 const struct seq_operations
*ops
)
185 struct proc_maps_private
*priv
;
187 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
189 priv
->pid
= proc_pid(inode
);
190 ret
= seq_open(file
, ops
);
192 struct seq_file
*m
= file
->private_data
;
201 static int show_map(struct seq_file
*m
, void *v
)
203 struct proc_maps_private
*priv
= m
->private;
204 struct task_struct
*task
= priv
->task
;
205 struct vm_area_struct
*vma
= v
;
206 struct mm_struct
*mm
= vma
->vm_mm
;
207 struct file
*file
= vma
->vm_file
;
208 int flags
= vma
->vm_flags
;
209 unsigned long ino
= 0;
213 if (maps_protect
&& !ptrace_may_attach(task
))
217 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
218 dev
= inode
->i_sb
->s_dev
;
222 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
225 flags
& VM_READ
? 'r' : '-',
226 flags
& VM_WRITE
? 'w' : '-',
227 flags
& VM_EXEC
? 'x' : '-',
228 flags
& VM_MAYSHARE
? 's' : 'p',
229 vma
->vm_pgoff
<< PAGE_SHIFT
,
230 MAJOR(dev
), MINOR(dev
), ino
, &len
);
233 * Print the dentry name for named mappings, and a
234 * special [heap] marker for the heap:
237 pad_len_spaces(m
, len
);
238 seq_path(m
, &file
->f_path
, "\n");
240 const char *name
= arch_vma_name(vma
);
243 if (vma
->vm_start
<= mm
->start_brk
&&
244 vma
->vm_end
>= mm
->brk
) {
246 } else if (vma
->vm_start
<= mm
->start_stack
&&
247 vma
->vm_end
>= mm
->start_stack
) {
255 pad_len_spaces(m
, len
);
261 if (m
->count
< m
->size
) /* vma is copied successfully */
262 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
266 static const struct seq_operations proc_pid_maps_op
= {
273 static int maps_open(struct inode
*inode
, struct file
*file
)
275 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
278 const struct file_operations proc_maps_operations
= {
282 .release
= seq_release_private
,
286 * Proportional Set Size(PSS): my share of RSS.
288 * PSS of a process is the count of pages it has in memory, where each
289 * page is divided by the number of processes sharing it. So if a
290 * process has 1000 pages all to itself, and 1000 shared with one other
291 * process, its PSS will be 1500.
293 * To keep (accumulated) division errors low, we adopt a 64bit
294 * fixed-point pss counter to minimize division errors. So (pss >>
295 * PSS_SHIFT) would be the real byte count.
297 * A shift of 12 before division means (assuming 4K page size):
298 * - 1M 3-user-pages add up to 8KB errors;
299 * - supports mapcount up to 2^24, or 16M;
300 * - supports PSS up to 2^52 bytes, or 4PB.
304 #ifdef CONFIG_PROC_PAGE_MONITOR
305 struct mem_size_stats
{
306 struct vm_area_struct
*vma
;
307 unsigned long resident
;
308 unsigned long shared_clean
;
309 unsigned long shared_dirty
;
310 unsigned long private_clean
;
311 unsigned long private_dirty
;
312 unsigned long referenced
;
317 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
320 struct mem_size_stats
*mss
= private;
321 struct vm_area_struct
*vma
= mss
->vma
;
327 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
328 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
331 if (is_swap_pte(ptent
)) {
332 mss
->swap
+= PAGE_SIZE
;
336 if (!pte_present(ptent
))
339 mss
->resident
+= PAGE_SIZE
;
341 page
= vm_normal_page(vma
, addr
, ptent
);
345 /* Accumulate the size in pages that have been accessed. */
346 if (pte_young(ptent
) || PageReferenced(page
))
347 mss
->referenced
+= PAGE_SIZE
;
348 mapcount
= page_mapcount(page
);
350 if (pte_dirty(ptent
))
351 mss
->shared_dirty
+= PAGE_SIZE
;
353 mss
->shared_clean
+= PAGE_SIZE
;
354 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
) / mapcount
;
356 if (pte_dirty(ptent
))
357 mss
->private_dirty
+= PAGE_SIZE
;
359 mss
->private_clean
+= PAGE_SIZE
;
360 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
);
363 pte_unmap_unlock(pte
- 1, ptl
);
368 static struct mm_walk smaps_walk
= { .pmd_entry
= smaps_pte_range
};
370 static int show_smap(struct seq_file
*m
, void *v
)
372 struct vm_area_struct
*vma
= v
;
373 struct mem_size_stats mss
;
376 memset(&mss
, 0, sizeof mss
);
378 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
379 walk_page_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
,
382 ret
= show_map(m
, v
);
390 "Shared_Clean: %8lu kB\n"
391 "Shared_Dirty: %8lu kB\n"
392 "Private_Clean: %8lu kB\n"
393 "Private_Dirty: %8lu kB\n"
394 "Referenced: %8lu kB\n"
396 (vma
->vm_end
- vma
->vm_start
) >> 10,
398 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
399 mss
.shared_clean
>> 10,
400 mss
.shared_dirty
>> 10,
401 mss
.private_clean
>> 10,
402 mss
.private_dirty
>> 10,
403 mss
.referenced
>> 10,
409 static const struct seq_operations proc_pid_smaps_op
= {
416 static int smaps_open(struct inode
*inode
, struct file
*file
)
418 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
421 const struct file_operations proc_smaps_operations
= {
425 .release
= seq_release_private
,
428 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
429 unsigned long end
, void *private)
431 struct vm_area_struct
*vma
= private;
436 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
437 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
439 if (!pte_present(ptent
))
442 page
= vm_normal_page(vma
, addr
, ptent
);
446 /* Clear accessed and referenced bits. */
447 ptep_test_and_clear_young(vma
, addr
, pte
);
448 ClearPageReferenced(page
);
450 pte_unmap_unlock(pte
- 1, ptl
);
455 static struct mm_walk clear_refs_walk
= { .pmd_entry
= clear_refs_pte_range
};
457 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
458 size_t count
, loff_t
*ppos
)
460 struct task_struct
*task
;
461 char buffer
[PROC_NUMBUF
], *end
;
462 struct mm_struct
*mm
;
463 struct vm_area_struct
*vma
;
465 memset(buffer
, 0, sizeof(buffer
));
466 if (count
> sizeof(buffer
) - 1)
467 count
= sizeof(buffer
) - 1;
468 if (copy_from_user(buffer
, buf
, count
))
470 if (!simple_strtol(buffer
, &end
, 0))
474 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
477 mm
= get_task_mm(task
);
479 down_read(&mm
->mmap_sem
);
480 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
481 if (!is_vm_hugetlb_page(vma
))
482 walk_page_range(mm
, vma
->vm_start
, vma
->vm_end
,
483 &clear_refs_walk
, vma
);
485 up_read(&mm
->mmap_sem
);
488 put_task_struct(task
);
489 if (end
- buffer
== 0)
494 const struct file_operations proc_clear_refs_operations
= {
495 .write
= clear_refs_write
,
499 char __user
*out
, *end
;
502 #define PM_ENTRY_BYTES sizeof(u64)
503 #define PM_STATUS_BITS 3
504 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
505 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
506 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
507 #define PM_PSHIFT_BITS 6
508 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
509 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
510 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
511 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
512 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
514 #define PM_PRESENT PM_STATUS(4LL)
515 #define PM_SWAP PM_STATUS(2LL)
516 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
517 #define PM_END_OF_BUFFER 1
519 static int add_to_pagemap(unsigned long addr
, u64 pfn
,
520 struct pagemapread
*pm
)
523 * Make sure there's room in the buffer for an
524 * entire entry. Otherwise, only copy part of
527 if (pm
->out
+ PM_ENTRY_BYTES
>= pm
->end
) {
528 if (copy_to_user(pm
->out
, &pfn
, pm
->end
- pm
->out
))
531 return PM_END_OF_BUFFER
;
534 if (put_user(pfn
, pm
->out
))
536 pm
->out
+= PM_ENTRY_BYTES
;
540 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
543 struct pagemapread
*pm
= private;
546 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
547 err
= add_to_pagemap(addr
, PM_NOT_PRESENT
, pm
);
554 static u64
swap_pte_to_pagemap_entry(pte_t pte
)
556 swp_entry_t e
= pte_to_swp_entry(pte
);
557 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
560 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
563 struct pagemapread
*pm
= private;
567 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
568 u64 pfn
= PM_NOT_PRESENT
;
569 pte
= pte_offset_map(pmd
, addr
);
570 if (is_swap_pte(*pte
))
571 pfn
= PM_PFRAME(swap_pte_to_pagemap_entry(*pte
))
572 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
;
573 else if (pte_present(*pte
))
574 pfn
= PM_PFRAME(pte_pfn(*pte
))
575 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
576 /* unmap so we're not in atomic when we copy to userspace */
578 err
= add_to_pagemap(addr
, pfn
, pm
);
588 static struct mm_walk pagemap_walk
= {
589 .pmd_entry
= pagemap_pte_range
,
590 .pte_hole
= pagemap_pte_hole
594 * /proc/pid/pagemap - an array mapping virtual pages to pfns
596 * For each page in the address space, this file contains one 64-bit entry
597 * consisting of the following:
599 * Bits 0-55 page frame number (PFN) if present
600 * Bits 0-4 swap type if swapped
601 * Bits 5-55 swap offset if swapped
602 * Bits 55-60 page shift (page size = 1<<page shift)
603 * Bit 61 reserved for future use
604 * Bit 62 page swapped
605 * Bit 63 page present
607 * If the page is not present but in swap, then the PFN contains an
608 * encoding of the swap file number and the page's offset into the
609 * swap. Unmapped pages return a null PFN. This allows determining
610 * precisely which pages are mapped (or in swap) and comparing mapped
611 * pages between processes.
613 * Efficient users of this interface will use /proc/pid/maps to
614 * determine which areas of memory are actually mapped and llseek to
615 * skip over unmapped regions.
617 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
618 size_t count
, loff_t
*ppos
)
620 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
621 struct page
**pages
, *page
;
622 unsigned long uaddr
, uend
;
623 struct mm_struct
*mm
;
624 struct pagemapread pm
;
632 if (!ptrace_may_attach(task
))
636 /* file position must be aligned */
637 if (*ppos
% PM_ENTRY_BYTES
)
641 mm
= get_task_mm(task
);
646 uaddr
= (unsigned long)buf
& PAGE_MASK
;
647 uend
= (unsigned long)(buf
+ count
);
648 pagecount
= (PAGE_ALIGN(uend
) - uaddr
) / PAGE_SIZE
;
649 pages
= kmalloc(pagecount
* sizeof(struct page
*), GFP_KERNEL
);
653 down_read(¤t
->mm
->mmap_sem
);
654 ret
= get_user_pages(current
, current
->mm
, uaddr
, pagecount
,
656 up_read(¤t
->mm
->mmap_sem
);
661 if (ret
!= pagecount
) {
668 pm
.end
= buf
+ count
;
670 if (!ptrace_may_attach(task
)) {
673 unsigned long src
= *ppos
;
674 unsigned long svpfn
= src
/ PM_ENTRY_BYTES
;
675 unsigned long start_vaddr
= svpfn
<< PAGE_SHIFT
;
676 unsigned long end_vaddr
= TASK_SIZE_OF(task
);
678 /* watch out for wraparound */
679 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
680 start_vaddr
= end_vaddr
;
683 * The odds are that this will stop walking way
684 * before end_vaddr, because the length of the
685 * user buffer is tracked in "pm", and the walk
686 * will stop when we hit the end of the buffer.
688 ret
= walk_page_range(mm
, start_vaddr
, end_vaddr
,
690 if (ret
== PM_END_OF_BUFFER
)
692 /* don't need mmap_sem for these, but this looks cleaner */
693 *ppos
+= pm
.out
- buf
;
699 for (; pagecount
; pagecount
--) {
700 page
= pages
[pagecount
-1];
701 if (!PageReserved(page
))
703 page_cache_release(page
);
710 put_task_struct(task
);
715 const struct file_operations proc_pagemap_operations
= {
716 .llseek
= mem_lseek
, /* borrow this */
717 .read
= pagemap_read
,
719 #endif /* CONFIG_PROC_PAGE_MONITOR */
722 extern int show_numa_map(struct seq_file
*m
, void *v
);
724 static int show_numa_map_checked(struct seq_file
*m
, void *v
)
726 struct proc_maps_private
*priv
= m
->private;
727 struct task_struct
*task
= priv
->task
;
729 if (maps_protect
&& !ptrace_may_attach(task
))
732 return show_numa_map(m
, v
);
735 static const struct seq_operations proc_pid_numa_maps_op
= {
739 .show
= show_numa_map_checked
742 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
744 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
747 const struct file_operations proc_numa_maps_operations
= {
748 .open
= numa_maps_open
,
751 .release
= seq_release_private
,