Merge tag 'byteswap-for-linus-20121219' of git://git.infradead.org/users/dwmw2/byteswap
[linux/fpc-iii.git] / fs / proc / task_nommu.c
blob1ccfa537f5f5dfaac3c1351dabe391f0ca3e6d22
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/fs_struct.h>
6 #include <linux/mount.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include "internal.h"
13 * Logic: we've got two memory sums for each process, "shared", and
14 * "non-shared". Shared memory may get counted more than once, for
15 * each process that owns it. Non-shared memory is counted
16 * accurately.
18 void task_mem(struct seq_file *m, struct mm_struct *mm)
20 struct vm_area_struct *vma;
21 struct vm_region *region;
22 struct rb_node *p;
23 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
25 down_read(&mm->mmap_sem);
26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
27 vma = rb_entry(p, struct vm_area_struct, vm_rb);
29 bytes += kobjsize(vma);
31 region = vma->vm_region;
32 if (region) {
33 size = kobjsize(region);
34 size += region->vm_end - region->vm_start;
35 } else {
36 size = vma->vm_end - vma->vm_start;
39 if (atomic_read(&mm->mm_count) > 1 ||
40 vma->vm_flags & VM_MAYSHARE) {
41 sbytes += size;
42 } else {
43 bytes += size;
44 if (region)
45 slack = region->vm_end - vma->vm_end;
49 if (atomic_read(&mm->mm_count) > 1)
50 sbytes += kobjsize(mm);
51 else
52 bytes += kobjsize(mm);
54 if (current->fs && current->fs->users > 1)
55 sbytes += kobjsize(current->fs);
56 else
57 bytes += kobjsize(current->fs);
59 if (current->files && atomic_read(&current->files->count) > 1)
60 sbytes += kobjsize(current->files);
61 else
62 bytes += kobjsize(current->files);
64 if (current->sighand && atomic_read(&current->sighand->count) > 1)
65 sbytes += kobjsize(current->sighand);
66 else
67 bytes += kobjsize(current->sighand);
69 bytes += kobjsize(current); /* includes kernel stack */
71 seq_printf(m,
72 "Mem:\t%8lu bytes\n"
73 "Slack:\t%8lu bytes\n"
74 "Shared:\t%8lu bytes\n",
75 bytes, slack, sbytes);
77 up_read(&mm->mmap_sem);
80 unsigned long task_vsize(struct mm_struct *mm)
82 struct vm_area_struct *vma;
83 struct rb_node *p;
84 unsigned long vsize = 0;
86 down_read(&mm->mmap_sem);
87 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
88 vma = rb_entry(p, struct vm_area_struct, vm_rb);
89 vsize += vma->vm_end - vma->vm_start;
91 up_read(&mm->mmap_sem);
92 return vsize;
95 unsigned long task_statm(struct mm_struct *mm,
96 unsigned long *shared, unsigned long *text,
97 unsigned long *data, unsigned long *resident)
99 struct vm_area_struct *vma;
100 struct vm_region *region;
101 struct rb_node *p;
102 unsigned long size = kobjsize(mm);
104 down_read(&mm->mmap_sem);
105 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106 vma = rb_entry(p, struct vm_area_struct, vm_rb);
107 size += kobjsize(vma);
108 region = vma->vm_region;
109 if (region) {
110 size += kobjsize(region);
111 size += region->vm_end - region->vm_start;
115 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116 >> PAGE_SHIFT;
117 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118 >> PAGE_SHIFT;
119 up_read(&mm->mmap_sem);
120 size >>= PAGE_SHIFT;
121 size += *text + *data;
122 *resident = size;
123 return size;
126 static void pad_len_spaces(struct seq_file *m, int len)
128 len = 25 + sizeof(void*) * 6 - len;
129 if (len < 1)
130 len = 1;
131 seq_printf(m, "%*c", len, ' ');
135 * display a single VMA to a sequenced file
137 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
138 int is_pid)
140 struct mm_struct *mm = vma->vm_mm;
141 struct proc_maps_private *priv = m->private;
142 unsigned long ino = 0;
143 struct file *file;
144 dev_t dev = 0;
145 int flags, len;
146 unsigned long long pgoff = 0;
148 flags = vma->vm_flags;
149 file = vma->vm_file;
151 if (file) {
152 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
153 dev = inode->i_sb->s_dev;
154 ino = inode->i_ino;
155 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
158 seq_printf(m,
159 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
160 vma->vm_start,
161 vma->vm_end,
162 flags & VM_READ ? 'r' : '-',
163 flags & VM_WRITE ? 'w' : '-',
164 flags & VM_EXEC ? 'x' : '-',
165 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
166 pgoff,
167 MAJOR(dev), MINOR(dev), ino, &len);
169 if (file) {
170 pad_len_spaces(m, len);
171 seq_path(m, &file->f_path, "");
172 } else if (mm) {
173 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
175 if (tid != 0) {
176 pad_len_spaces(m, len);
178 * Thread stack in /proc/PID/task/TID/maps or
179 * the main process stack.
181 if (!is_pid || (vma->vm_start <= mm->start_stack &&
182 vma->vm_end >= mm->start_stack))
183 seq_printf(m, "[stack]");
184 else
185 seq_printf(m, "[stack:%d]", tid);
189 seq_putc(m, '\n');
190 return 0;
194 * display mapping lines for a particular process's /proc/pid/maps
196 static int show_map(struct seq_file *m, void *_p, int is_pid)
198 struct rb_node *p = _p;
200 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
201 is_pid);
204 static int show_pid_map(struct seq_file *m, void *_p)
206 return show_map(m, _p, 1);
209 static int show_tid_map(struct seq_file *m, void *_p)
211 return show_map(m, _p, 0);
214 static void *m_start(struct seq_file *m, loff_t *pos)
216 struct proc_maps_private *priv = m->private;
217 struct mm_struct *mm;
218 struct rb_node *p;
219 loff_t n = *pos;
221 /* pin the task and mm whilst we play with them */
222 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
223 if (!priv->task)
224 return ERR_PTR(-ESRCH);
226 mm = mm_access(priv->task, PTRACE_MODE_READ);
227 if (!mm || IS_ERR(mm)) {
228 put_task_struct(priv->task);
229 priv->task = NULL;
230 return mm;
232 down_read(&mm->mmap_sem);
234 /* start from the Nth VMA */
235 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
236 if (n-- == 0)
237 return p;
238 return NULL;
241 static void m_stop(struct seq_file *m, void *_vml)
243 struct proc_maps_private *priv = m->private;
245 if (priv->task) {
246 struct mm_struct *mm = priv->task->mm;
247 up_read(&mm->mmap_sem);
248 mmput(mm);
249 put_task_struct(priv->task);
253 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
255 struct rb_node *p = _p;
257 (*pos)++;
258 return p ? rb_next(p) : NULL;
261 static const struct seq_operations proc_pid_maps_ops = {
262 .start = m_start,
263 .next = m_next,
264 .stop = m_stop,
265 .show = show_pid_map
268 static const struct seq_operations proc_tid_maps_ops = {
269 .start = m_start,
270 .next = m_next,
271 .stop = m_stop,
272 .show = show_tid_map
275 static int maps_open(struct inode *inode, struct file *file,
276 const struct seq_operations *ops)
278 struct proc_maps_private *priv;
279 int ret = -ENOMEM;
281 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
282 if (priv) {
283 priv->pid = proc_pid(inode);
284 ret = seq_open(file, ops);
285 if (!ret) {
286 struct seq_file *m = file->private_data;
287 m->private = priv;
288 } else {
289 kfree(priv);
292 return ret;
295 static int pid_maps_open(struct inode *inode, struct file *file)
297 return maps_open(inode, file, &proc_pid_maps_ops);
300 static int tid_maps_open(struct inode *inode, struct file *file)
302 return maps_open(inode, file, &proc_tid_maps_ops);
305 const struct file_operations proc_pid_maps_operations = {
306 .open = pid_maps_open,
307 .read = seq_read,
308 .llseek = seq_lseek,
309 .release = seq_release_private,
312 const struct file_operations proc_tid_maps_operations = {
313 .open = tid_maps_open,
314 .read = seq_read,
315 .llseek = seq_lseek,
316 .release = seq_release_private,