2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
14 char *task_mem(struct mm_struct
*mm
, char *buffer
)
16 unsigned long data
, text
, lib
;
17 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm
= total_vm
= mm
->total_vm
;
27 if (hiwater_vm
< mm
->hiwater_vm
)
28 hiwater_vm
= mm
->hiwater_vm
;
29 hiwater_rss
= total_rss
= get_mm_rss(mm
);
30 if (hiwater_rss
< mm
->hiwater_rss
)
31 hiwater_rss
= mm
->hiwater_rss
;
33 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
34 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
35 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
36 buffer
+= sprintf(buffer
,
47 hiwater_vm
<< (PAGE_SHIFT
-10),
48 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
49 mm
->locked_vm
<< (PAGE_SHIFT
-10),
50 hiwater_rss
<< (PAGE_SHIFT
-10),
51 total_rss
<< (PAGE_SHIFT
-10),
52 data
<< (PAGE_SHIFT
-10),
53 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
54 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
58 unsigned long task_vsize(struct mm_struct
*mm
)
60 return PAGE_SIZE
* mm
->total_vm
;
63 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
64 int *data
, int *resident
)
66 *shared
= get_mm_counter(mm
, file_rss
);
67 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
69 *data
= mm
->total_vm
- mm
->shared_vm
;
70 *resident
= *shared
+ get_mm_counter(mm
, anon_rss
);
74 int proc_exe_link(struct inode
*inode
, struct dentry
**dentry
, struct vfsmount
**mnt
)
76 struct vm_area_struct
* vma
;
78 struct task_struct
*task
= get_proc_task(inode
);
79 struct mm_struct
* mm
= NULL
;
82 mm
= get_task_mm(task
);
83 put_task_struct(task
);
87 down_read(&mm
->mmap_sem
);
91 if ((vma
->vm_flags
& VM_EXECUTABLE
) && vma
->vm_file
)
97 *mnt
= mntget(vma
->vm_file
->f_vfsmnt
);
98 *dentry
= dget(vma
->vm_file
->f_dentry
);
102 up_read(&mm
->mmap_sem
);
108 static void pad_len_spaces(struct seq_file
*m
, int len
)
110 len
= 25 + sizeof(void*) * 6 - len
;
113 seq_printf(m
, "%*c", len
, ' ');
116 struct mem_size_stats
118 unsigned long resident
;
119 unsigned long shared_clean
;
120 unsigned long shared_dirty
;
121 unsigned long private_clean
;
122 unsigned long private_dirty
;
125 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
130 static int show_map_internal(struct seq_file
*m
, void *v
, struct mem_size_stats
*mss
)
132 struct proc_maps_private
*priv
= m
->private;
133 struct task_struct
*task
= priv
->task
;
134 struct vm_area_struct
*vma
= v
;
135 struct mm_struct
*mm
= vma
->vm_mm
;
136 struct file
*file
= vma
->vm_file
;
137 int flags
= vma
->vm_flags
;
138 unsigned long ino
= 0;
143 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
144 dev
= inode
->i_sb
->s_dev
;
148 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
151 flags
& VM_READ
? 'r' : '-',
152 flags
& VM_WRITE
? 'w' : '-',
153 flags
& VM_EXEC
? 'x' : '-',
154 flags
& VM_MAYSHARE
? 's' : 'p',
155 vma
->vm_pgoff
<< PAGE_SHIFT
,
156 MAJOR(dev
), MINOR(dev
), ino
, &len
);
159 * Print the dentry name for named mappings, and a
160 * special [heap] marker for the heap:
163 pad_len_spaces(m
, len
);
164 seq_path(m
, file
->f_vfsmnt
, file
->f_dentry
, "\n");
166 const char *name
= arch_vma_name(vma
);
169 if (vma
->vm_start
<= mm
->start_brk
&&
170 vma
->vm_end
>= mm
->brk
) {
172 } else if (vma
->vm_start
<= mm
->start_stack
&&
173 vma
->vm_end
>= mm
->start_stack
) {
181 pad_len_spaces(m
, len
);
191 "Shared_Clean: %8lu kB\n"
192 "Shared_Dirty: %8lu kB\n"
193 "Private_Clean: %8lu kB\n"
194 "Private_Dirty: %8lu kB\n",
195 (vma
->vm_end
- vma
->vm_start
) >> 10,
197 mss
->shared_clean
>> 10,
198 mss
->shared_dirty
>> 10,
199 mss
->private_clean
>> 10,
200 mss
->private_dirty
>> 10);
202 if (m
->count
< m
->size
) /* vma is copied successfully */
203 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
207 static int show_map(struct seq_file
*m
, void *v
)
209 return show_map_internal(m
, v
, NULL
);
212 static void smaps_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
213 unsigned long addr
, unsigned long end
,
214 struct mem_size_stats
*mss
)
220 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
223 if (!pte_present(ptent
))
226 mss
->resident
+= PAGE_SIZE
;
228 page
= vm_normal_page(vma
, addr
, ptent
);
232 if (page_mapcount(page
) >= 2) {
233 if (pte_dirty(ptent
))
234 mss
->shared_dirty
+= PAGE_SIZE
;
236 mss
->shared_clean
+= PAGE_SIZE
;
238 if (pte_dirty(ptent
))
239 mss
->private_dirty
+= PAGE_SIZE
;
241 mss
->private_clean
+= PAGE_SIZE
;
243 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
244 pte_unmap_unlock(pte
- 1, ptl
);
248 static inline void smaps_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
249 unsigned long addr
, unsigned long end
,
250 struct mem_size_stats
*mss
)
255 pmd
= pmd_offset(pud
, addr
);
257 next
= pmd_addr_end(addr
, end
);
258 if (pmd_none_or_clear_bad(pmd
))
260 smaps_pte_range(vma
, pmd
, addr
, next
, mss
);
261 } while (pmd
++, addr
= next
, addr
!= end
);
264 static inline void smaps_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
265 unsigned long addr
, unsigned long end
,
266 struct mem_size_stats
*mss
)
271 pud
= pud_offset(pgd
, addr
);
273 next
= pud_addr_end(addr
, end
);
274 if (pud_none_or_clear_bad(pud
))
276 smaps_pmd_range(vma
, pud
, addr
, next
, mss
);
277 } while (pud
++, addr
= next
, addr
!= end
);
280 static inline void smaps_pgd_range(struct vm_area_struct
*vma
,
281 unsigned long addr
, unsigned long end
,
282 struct mem_size_stats
*mss
)
287 pgd
= pgd_offset(vma
->vm_mm
, addr
);
289 next
= pgd_addr_end(addr
, end
);
290 if (pgd_none_or_clear_bad(pgd
))
292 smaps_pud_range(vma
, pgd
, addr
, next
, mss
);
293 } while (pgd
++, addr
= next
, addr
!= end
);
296 static int show_smap(struct seq_file
*m
, void *v
)
298 struct vm_area_struct
*vma
= v
;
299 struct mem_size_stats mss
;
301 memset(&mss
, 0, sizeof mss
);
302 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
303 smaps_pgd_range(vma
, vma
->vm_start
, vma
->vm_end
, &mss
);
304 return show_map_internal(m
, v
, &mss
);
307 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
309 struct proc_maps_private
*priv
= m
->private;
310 unsigned long last_addr
= m
->version
;
311 struct mm_struct
*mm
;
312 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
315 /* Clear the per syscall fields in priv */
317 priv
->tail_vma
= NULL
;
320 * We remember last_addr rather than next_addr to hit with
321 * mmap_cache most of the time. We have zero last_addr at
322 * the beginning and also after lseek. We will have -1 last_addr
323 * after the end of the vmas.
326 if (last_addr
== -1UL)
329 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
333 mm
= get_task_mm(priv
->task
);
337 priv
->tail_vma
= tail_vma
= get_gate_vma(priv
->task
);
338 down_read(&mm
->mmap_sem
);
340 /* Start with last addr hint */
341 if (last_addr
&& (vma
= find_vma(mm
, last_addr
))) {
347 * Check the vma index is within the range and do
348 * sequential scan until m_index.
351 if ((unsigned long)l
< mm
->map_count
) {
358 if (l
!= mm
->map_count
)
359 tail_vma
= NULL
; /* After gate vma */
365 /* End of vmas has been reached */
366 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
367 up_read(&mm
->mmap_sem
);
372 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
374 if (vma
&& vma
!= priv
->tail_vma
) {
375 struct mm_struct
*mm
= vma
->vm_mm
;
376 up_read(&mm
->mmap_sem
);
381 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
383 struct proc_maps_private
*priv
= m
->private;
384 struct vm_area_struct
*vma
= v
;
385 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
388 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
391 return (vma
!= tail_vma
)? tail_vma
: NULL
;
394 static void m_stop(struct seq_file
*m
, void *v
)
396 struct proc_maps_private
*priv
= m
->private;
397 struct vm_area_struct
*vma
= v
;
401 put_task_struct(priv
->task
);
404 static struct seq_operations proc_pid_maps_op
= {
411 static struct seq_operations proc_pid_smaps_op
= {
418 static int do_maps_open(struct inode
*inode
, struct file
*file
,
419 struct seq_operations
*ops
)
421 struct proc_maps_private
*priv
;
423 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
425 priv
->pid
= proc_pid(inode
);
426 ret
= seq_open(file
, ops
);
428 struct seq_file
*m
= file
->private_data
;
437 static int maps_open(struct inode
*inode
, struct file
*file
)
439 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
442 struct file_operations proc_maps_operations
= {
446 .release
= seq_release_private
,
450 extern int show_numa_map(struct seq_file
*m
, void *v
);
452 static struct seq_operations proc_pid_numa_maps_op
= {
456 .show
= show_numa_map
459 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
461 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
464 struct file_operations proc_numa_maps_operations
= {
465 .open
= numa_maps_open
,
468 .release
= seq_release_private
,
472 static int smaps_open(struct inode
*inode
, struct file
*file
)
474 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
477 struct file_operations proc_smaps_operations
= {
481 .release
= seq_release_private
,