2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
6 #include <asm/uaccess.h>
9 char *task_mem(struct mm_struct
*mm
, char *buffer
)
11 unsigned long data
, text
, lib
;
13 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
14 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
15 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
16 buffer
+= sprintf(buffer
,
25 (mm
->total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
26 mm
->locked_vm
<< (PAGE_SHIFT
-10),
27 get_mm_counter(mm
, rss
) << (PAGE_SHIFT
-10),
28 data
<< (PAGE_SHIFT
-10),
29 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
30 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
34 unsigned long task_vsize(struct mm_struct
*mm
)
36 return PAGE_SIZE
* mm
->total_vm
;
39 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
40 int *data
, int *resident
)
42 int rss
= get_mm_counter(mm
, rss
);
44 *shared
= rss
- get_mm_counter(mm
, anon_rss
);
45 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
47 *data
= mm
->total_vm
- mm
->shared_vm
;
52 int proc_exe_link(struct inode
*inode
, struct dentry
**dentry
, struct vfsmount
**mnt
)
54 struct vm_area_struct
* vma
;
56 struct task_struct
*task
= proc_task(inode
);
57 struct mm_struct
* mm
= get_task_mm(task
);
61 down_read(&mm
->mmap_sem
);
65 if ((vma
->vm_flags
& VM_EXECUTABLE
) && vma
->vm_file
)
71 *mnt
= mntget(vma
->vm_file
->f_vfsmnt
);
72 *dentry
= dget(vma
->vm_file
->f_dentry
);
76 up_read(&mm
->mmap_sem
);
82 static void pad_len_spaces(struct seq_file
*m
, int len
)
84 len
= 25 + sizeof(void*) * 6 - len
;
87 seq_printf(m
, "%*c", len
, ' ');
90 static int show_map(struct seq_file
*m
, void *v
)
92 struct task_struct
*task
= m
->private;
93 struct vm_area_struct
*map
= v
;
94 struct mm_struct
*mm
= map
->vm_mm
;
95 struct file
*file
= map
->vm_file
;
96 int flags
= map
->vm_flags
;
97 unsigned long ino
= 0;
102 struct inode
*inode
= map
->vm_file
->f_dentry
->d_inode
;
103 dev
= inode
->i_sb
->s_dev
;
107 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
110 flags
& VM_READ
? 'r' : '-',
111 flags
& VM_WRITE
? 'w' : '-',
112 flags
& VM_EXEC
? 'x' : '-',
113 flags
& VM_MAYSHARE
? 's' : 'p',
114 map
->vm_pgoff
<< PAGE_SHIFT
,
115 MAJOR(dev
), MINOR(dev
), ino
, &len
);
118 * Print the dentry name for named mappings, and a
119 * special [heap] marker for the heap:
122 pad_len_spaces(m
, len
);
123 seq_path(m
, file
->f_vfsmnt
, file
->f_dentry
, "");
126 if (map
->vm_start
<= mm
->start_brk
&&
127 map
->vm_end
>= mm
->brk
) {
128 pad_len_spaces(m
, len
);
129 seq_puts(m
, "[heap]");
131 if (map
->vm_start
<= mm
->start_stack
&&
132 map
->vm_end
>= mm
->start_stack
) {
134 pad_len_spaces(m
, len
);
135 seq_puts(m
, "[stack]");
139 pad_len_spaces(m
, len
);
140 seq_puts(m
, "[vdso]");
144 if (m
->count
< m
->size
) /* map is copied successfully */
145 m
->version
= (map
!= get_gate_vma(task
))? map
->vm_start
: 0;
149 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
151 struct task_struct
*task
= m
->private;
152 unsigned long last_addr
= m
->version
;
153 struct mm_struct
*mm
;
154 struct vm_area_struct
*map
, *tail_map
;
158 * We remember last_addr rather than next_addr to hit with
159 * mmap_cache most of the time. We have zero last_addr at
160 * the begining and also after lseek. We will have -1 last_addr
161 * after the end of the maps.
164 if (last_addr
== -1UL)
167 mm
= get_task_mm(task
);
171 tail_map
= get_gate_vma(task
);
172 down_read(&mm
->mmap_sem
);
174 /* Start with last addr hint */
175 if (last_addr
&& (map
= find_vma(mm
, last_addr
))) {
181 * Check the map index is within the range and do
182 * sequential scan until m_index.
185 if ((unsigned long)l
< mm
->map_count
) {
192 if (l
!= mm
->map_count
)
193 tail_map
= NULL
; /* After gate map */
199 /* End of maps has reached */
200 m
->version
= (tail_map
!= NULL
)? 0: -1UL;
201 up_read(&mm
->mmap_sem
);
206 static void m_stop(struct seq_file
*m
, void *v
)
208 struct task_struct
*task
= m
->private;
209 struct vm_area_struct
*map
= v
;
210 if (map
&& map
!= get_gate_vma(task
)) {
211 struct mm_struct
*mm
= map
->vm_mm
;
212 up_read(&mm
->mmap_sem
);
217 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
219 struct task_struct
*task
= m
->private;
220 struct vm_area_struct
*map
= v
;
221 struct vm_area_struct
*tail_map
= get_gate_vma(task
);
224 if (map
&& (map
!= tail_map
) && map
->vm_next
)
227 return (map
!= tail_map
)? tail_map
: NULL
;
230 struct seq_operations proc_pid_maps_op
= {