[HTB]: Remove lock macro.
[hh.org.git] / fs / proc / task_mmu.c
blob0a163a4f7764059f7401c8b43cae42ca9c2bef43
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
9 #include <asm/elf.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
12 #include "internal.h"
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 unsigned long data, text, lib;
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
37 "VmPeak:\t%8lu kB\n"
38 "VmSize:\t%8lu kB\n"
39 "VmLck:\t%8lu kB\n"
40 "VmHWM:\t%8lu kB\n"
41 "VmRSS:\t%8lu kB\n"
42 "VmData:\t%8lu kB\n"
43 "VmStk:\t%8lu kB\n"
44 "VmExe:\t%8lu kB\n"
45 "VmLib:\t%8lu kB\n"
46 "VmPTE:\t%8lu kB\n",
47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49 mm->locked_vm << (PAGE_SHIFT-10),
50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
52 data << (PAGE_SHIFT-10),
53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 return buffer;
58 unsigned long task_vsize(struct mm_struct *mm)
60 return PAGE_SIZE * mm->total_vm;
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64 int *data, int *resident)
66 *shared = get_mm_counter(mm, file_rss);
67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 >> PAGE_SHIFT;
69 *data = mm->total_vm - mm->shared_vm;
70 *resident = *shared + get_mm_counter(mm, anon_rss);
71 return mm->total_vm;
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
76 struct vm_area_struct * vma;
77 int result = -ENOENT;
78 struct task_struct *task = get_proc_task(inode);
79 struct mm_struct * mm = NULL;
81 if (task) {
82 mm = get_task_mm(task);
83 put_task_struct(task);
85 if (!mm)
86 goto out;
87 down_read(&mm->mmap_sem);
89 vma = mm->mmap;
90 while (vma) {
91 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
92 break;
93 vma = vma->vm_next;
96 if (vma) {
97 *mnt = mntget(vma->vm_file->f_vfsmnt);
98 *dentry = dget(vma->vm_file->f_dentry);
99 result = 0;
102 up_read(&mm->mmap_sem);
103 mmput(mm);
104 out:
105 return result;
108 static void pad_len_spaces(struct seq_file *m, int len)
110 len = 25 + sizeof(void*) * 6 - len;
111 if (len < 1)
112 len = 1;
113 seq_printf(m, "%*c", len, ' ');
116 struct mem_size_stats
118 unsigned long resident;
119 unsigned long shared_clean;
120 unsigned long shared_dirty;
121 unsigned long private_clean;
122 unsigned long private_dirty;
125 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
127 return NULL;
130 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
132 struct proc_maps_private *priv = m->private;
133 struct task_struct *task = priv->task;
134 struct vm_area_struct *vma = v;
135 struct mm_struct *mm = vma->vm_mm;
136 struct file *file = vma->vm_file;
137 int flags = vma->vm_flags;
138 unsigned long ino = 0;
139 dev_t dev = 0;
140 int len;
142 if (file) {
143 struct inode *inode = vma->vm_file->f_dentry->d_inode;
144 dev = inode->i_sb->s_dev;
145 ino = inode->i_ino;
148 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
149 vma->vm_start,
150 vma->vm_end,
151 flags & VM_READ ? 'r' : '-',
152 flags & VM_WRITE ? 'w' : '-',
153 flags & VM_EXEC ? 'x' : '-',
154 flags & VM_MAYSHARE ? 's' : 'p',
155 vma->vm_pgoff << PAGE_SHIFT,
156 MAJOR(dev), MINOR(dev), ino, &len);
159 * Print the dentry name for named mappings, and a
160 * special [heap] marker for the heap:
162 if (file) {
163 pad_len_spaces(m, len);
164 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
165 } else {
166 const char *name = arch_vma_name(vma);
167 if (!name) {
168 if (mm) {
169 if (vma->vm_start <= mm->start_brk &&
170 vma->vm_end >= mm->brk) {
171 name = "[heap]";
172 } else if (vma->vm_start <= mm->start_stack &&
173 vma->vm_end >= mm->start_stack) {
174 name = "[stack]";
176 } else {
177 name = "[vdso]";
180 if (name) {
181 pad_len_spaces(m, len);
182 seq_puts(m, name);
185 seq_putc(m, '\n');
187 if (mss)
188 seq_printf(m,
189 "Size: %8lu kB\n"
190 "Rss: %8lu kB\n"
191 "Shared_Clean: %8lu kB\n"
192 "Shared_Dirty: %8lu kB\n"
193 "Private_Clean: %8lu kB\n"
194 "Private_Dirty: %8lu kB\n",
195 (vma->vm_end - vma->vm_start) >> 10,
196 mss->resident >> 10,
197 mss->shared_clean >> 10,
198 mss->shared_dirty >> 10,
199 mss->private_clean >> 10,
200 mss->private_dirty >> 10);
202 if (m->count < m->size) /* vma is copied successfully */
203 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
204 return 0;
207 static int show_map(struct seq_file *m, void *v)
209 return show_map_internal(m, v, NULL);
212 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
213 unsigned long addr, unsigned long end,
214 struct mem_size_stats *mss)
216 pte_t *pte, ptent;
217 spinlock_t *ptl;
218 struct page *page;
220 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
221 do {
222 ptent = *pte;
223 if (!pte_present(ptent))
224 continue;
226 mss->resident += PAGE_SIZE;
228 page = vm_normal_page(vma, addr, ptent);
229 if (!page)
230 continue;
232 if (page_mapcount(page) >= 2) {
233 if (pte_dirty(ptent))
234 mss->shared_dirty += PAGE_SIZE;
235 else
236 mss->shared_clean += PAGE_SIZE;
237 } else {
238 if (pte_dirty(ptent))
239 mss->private_dirty += PAGE_SIZE;
240 else
241 mss->private_clean += PAGE_SIZE;
243 } while (pte++, addr += PAGE_SIZE, addr != end);
244 pte_unmap_unlock(pte - 1, ptl);
245 cond_resched();
248 static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
249 unsigned long addr, unsigned long end,
250 struct mem_size_stats *mss)
252 pmd_t *pmd;
253 unsigned long next;
255 pmd = pmd_offset(pud, addr);
256 do {
257 next = pmd_addr_end(addr, end);
258 if (pmd_none_or_clear_bad(pmd))
259 continue;
260 smaps_pte_range(vma, pmd, addr, next, mss);
261 } while (pmd++, addr = next, addr != end);
264 static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
265 unsigned long addr, unsigned long end,
266 struct mem_size_stats *mss)
268 pud_t *pud;
269 unsigned long next;
271 pud = pud_offset(pgd, addr);
272 do {
273 next = pud_addr_end(addr, end);
274 if (pud_none_or_clear_bad(pud))
275 continue;
276 smaps_pmd_range(vma, pud, addr, next, mss);
277 } while (pud++, addr = next, addr != end);
280 static inline void smaps_pgd_range(struct vm_area_struct *vma,
281 unsigned long addr, unsigned long end,
282 struct mem_size_stats *mss)
284 pgd_t *pgd;
285 unsigned long next;
287 pgd = pgd_offset(vma->vm_mm, addr);
288 do {
289 next = pgd_addr_end(addr, end);
290 if (pgd_none_or_clear_bad(pgd))
291 continue;
292 smaps_pud_range(vma, pgd, addr, next, mss);
293 } while (pgd++, addr = next, addr != end);
296 static int show_smap(struct seq_file *m, void *v)
298 struct vm_area_struct *vma = v;
299 struct mem_size_stats mss;
301 memset(&mss, 0, sizeof mss);
302 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
303 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
304 return show_map_internal(m, v, &mss);
307 static void *m_start(struct seq_file *m, loff_t *pos)
309 struct proc_maps_private *priv = m->private;
310 unsigned long last_addr = m->version;
311 struct mm_struct *mm;
312 struct vm_area_struct *vma, *tail_vma = NULL;
313 loff_t l = *pos;
315 /* Clear the per syscall fields in priv */
316 priv->task = NULL;
317 priv->tail_vma = NULL;
320 * We remember last_addr rather than next_addr to hit with
321 * mmap_cache most of the time. We have zero last_addr at
322 * the beginning and also after lseek. We will have -1 last_addr
323 * after the end of the vmas.
326 if (last_addr == -1UL)
327 return NULL;
329 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
330 if (!priv->task)
331 return NULL;
333 mm = get_task_mm(priv->task);
334 if (!mm)
335 return NULL;
337 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
338 down_read(&mm->mmap_sem);
340 /* Start with last addr hint */
341 if (last_addr && (vma = find_vma(mm, last_addr))) {
342 vma = vma->vm_next;
343 goto out;
347 * Check the vma index is within the range and do
348 * sequential scan until m_index.
350 vma = NULL;
351 if ((unsigned long)l < mm->map_count) {
352 vma = mm->mmap;
353 while (l-- && vma)
354 vma = vma->vm_next;
355 goto out;
358 if (l != mm->map_count)
359 tail_vma = NULL; /* After gate vma */
361 out:
362 if (vma)
363 return vma;
365 /* End of vmas has been reached */
366 m->version = (tail_vma != NULL)? 0: -1UL;
367 up_read(&mm->mmap_sem);
368 mmput(mm);
369 return tail_vma;
372 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
374 if (vma && vma != priv->tail_vma) {
375 struct mm_struct *mm = vma->vm_mm;
376 up_read(&mm->mmap_sem);
377 mmput(mm);
381 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
383 struct proc_maps_private *priv = m->private;
384 struct vm_area_struct *vma = v;
385 struct vm_area_struct *tail_vma = priv->tail_vma;
387 (*pos)++;
388 if (vma && (vma != tail_vma) && vma->vm_next)
389 return vma->vm_next;
390 vma_stop(priv, vma);
391 return (vma != tail_vma)? tail_vma: NULL;
394 static void m_stop(struct seq_file *m, void *v)
396 struct proc_maps_private *priv = m->private;
397 struct vm_area_struct *vma = v;
399 vma_stop(priv, vma);
400 if (priv->task)
401 put_task_struct(priv->task);
404 static struct seq_operations proc_pid_maps_op = {
405 .start = m_start,
406 .next = m_next,
407 .stop = m_stop,
408 .show = show_map
411 static struct seq_operations proc_pid_smaps_op = {
412 .start = m_start,
413 .next = m_next,
414 .stop = m_stop,
415 .show = show_smap
418 static int do_maps_open(struct inode *inode, struct file *file,
419 struct seq_operations *ops)
421 struct proc_maps_private *priv;
422 int ret = -ENOMEM;
423 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
424 if (priv) {
425 priv->pid = proc_pid(inode);
426 ret = seq_open(file, ops);
427 if (!ret) {
428 struct seq_file *m = file->private_data;
429 m->private = priv;
430 } else {
431 kfree(priv);
434 return ret;
437 static int maps_open(struct inode *inode, struct file *file)
439 return do_maps_open(inode, file, &proc_pid_maps_op);
442 struct file_operations proc_maps_operations = {
443 .open = maps_open,
444 .read = seq_read,
445 .llseek = seq_lseek,
446 .release = seq_release_private,
449 #ifdef CONFIG_NUMA
450 extern int show_numa_map(struct seq_file *m, void *v);
452 static struct seq_operations proc_pid_numa_maps_op = {
453 .start = m_start,
454 .next = m_next,
455 .stop = m_stop,
456 .show = show_numa_map
459 static int numa_maps_open(struct inode *inode, struct file *file)
461 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
464 struct file_operations proc_numa_maps_operations = {
465 .open = numa_maps_open,
466 .read = seq_read,
467 .llseek = seq_lseek,
468 .release = seq_release_private,
470 #endif
472 static int smaps_open(struct inode *inode, struct file *file)
474 return do_maps_open(inode, file, &proc_pid_smaps_op);
477 struct file_operations proc_smaps_operations = {
478 .open = smaps_open,
479 .read = seq_read,
480 .llseek = seq_lseek,
481 .release = seq_release_private,