xfs: fix type usage
[linux/fpc-iii.git] / fs / proc / task_nommu.c
blob5b62f57bd9bceed80b386057b93fbce3a43602b3
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/mm.h>
4 #include <linux/file.h>
5 #include <linux/fdtable.h>
6 #include <linux/fs_struct.h>
7 #include <linux/mount.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/mm.h>
13 #include "internal.h"
16 * Logic: we've got two memory sums for each process, "shared", and
17 * "non-shared". Shared memory may get counted more than once, for
18 * each process that owns it. Non-shared memory is counted
19 * accurately.
21 void task_mem(struct seq_file *m, struct mm_struct *mm)
23 struct vm_area_struct *vma;
24 struct vm_region *region;
25 struct rb_node *p;
26 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
28 down_read(&mm->mmap_sem);
29 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
30 vma = rb_entry(p, struct vm_area_struct, vm_rb);
32 bytes += kobjsize(vma);
34 region = vma->vm_region;
35 if (region) {
36 size = kobjsize(region);
37 size += region->vm_end - region->vm_start;
38 } else {
39 size = vma->vm_end - vma->vm_start;
42 if (atomic_read(&mm->mm_count) > 1 ||
43 vma->vm_flags & VM_MAYSHARE) {
44 sbytes += size;
45 } else {
46 bytes += size;
47 if (region)
48 slack = region->vm_end - vma->vm_end;
52 if (atomic_read(&mm->mm_count) > 1)
53 sbytes += kobjsize(mm);
54 else
55 bytes += kobjsize(mm);
57 if (current->fs && current->fs->users > 1)
58 sbytes += kobjsize(current->fs);
59 else
60 bytes += kobjsize(current->fs);
62 if (current->files && atomic_read(&current->files->count) > 1)
63 sbytes += kobjsize(current->files);
64 else
65 bytes += kobjsize(current->files);
67 if (current->sighand && atomic_read(&current->sighand->count) > 1)
68 sbytes += kobjsize(current->sighand);
69 else
70 bytes += kobjsize(current->sighand);
72 bytes += kobjsize(current); /* includes kernel stack */
74 seq_printf(m,
75 "Mem:\t%8lu bytes\n"
76 "Slack:\t%8lu bytes\n"
77 "Shared:\t%8lu bytes\n",
78 bytes, slack, sbytes);
80 up_read(&mm->mmap_sem);
83 unsigned long task_vsize(struct mm_struct *mm)
85 struct vm_area_struct *vma;
86 struct rb_node *p;
87 unsigned long vsize = 0;
89 down_read(&mm->mmap_sem);
90 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
91 vma = rb_entry(p, struct vm_area_struct, vm_rb);
92 vsize += vma->vm_end - vma->vm_start;
94 up_read(&mm->mmap_sem);
95 return vsize;
98 unsigned long task_statm(struct mm_struct *mm,
99 unsigned long *shared, unsigned long *text,
100 unsigned long *data, unsigned long *resident)
102 struct vm_area_struct *vma;
103 struct vm_region *region;
104 struct rb_node *p;
105 unsigned long size = kobjsize(mm);
107 down_read(&mm->mmap_sem);
108 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
109 vma = rb_entry(p, struct vm_area_struct, vm_rb);
110 size += kobjsize(vma);
111 region = vma->vm_region;
112 if (region) {
113 size += kobjsize(region);
114 size += region->vm_end - region->vm_start;
118 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
119 >> PAGE_SHIFT;
120 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
121 >> PAGE_SHIFT;
122 up_read(&mm->mmap_sem);
123 size >>= PAGE_SHIFT;
124 size += *text + *data;
125 *resident = size;
126 return size;
129 static int is_stack(struct vm_area_struct *vma)
131 struct mm_struct *mm = vma->vm_mm;
134 * We make no effort to guess what a given thread considers to be
135 * its "stack". It's not even well-defined for programs written
136 * languages like Go.
138 return vma->vm_start <= mm->start_stack &&
139 vma->vm_end >= mm->start_stack;
143 * display a single VMA to a sequenced file
145 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
146 int is_pid)
148 struct mm_struct *mm = vma->vm_mm;
149 unsigned long ino = 0;
150 struct file *file;
151 dev_t dev = 0;
152 int flags;
153 unsigned long long pgoff = 0;
155 flags = vma->vm_flags;
156 file = vma->vm_file;
158 if (file) {
159 struct inode *inode = file_inode(vma->vm_file);
160 dev = inode->i_sb->s_dev;
161 ino = inode->i_ino;
162 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
165 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
166 seq_printf(m,
167 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
168 vma->vm_start,
169 vma->vm_end,
170 flags & VM_READ ? 'r' : '-',
171 flags & VM_WRITE ? 'w' : '-',
172 flags & VM_EXEC ? 'x' : '-',
173 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
174 pgoff,
175 MAJOR(dev), MINOR(dev), ino);
177 if (file) {
178 seq_pad(m, ' ');
179 seq_file_path(m, file, "");
180 } else if (mm && is_stack(vma)) {
181 seq_pad(m, ' ');
182 seq_printf(m, "[stack]");
185 seq_putc(m, '\n');
186 return 0;
190 * display mapping lines for a particular process's /proc/pid/maps
192 static int show_map(struct seq_file *m, void *_p, int is_pid)
194 struct rb_node *p = _p;
196 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
197 is_pid);
200 static int show_pid_map(struct seq_file *m, void *_p)
202 return show_map(m, _p, 1);
205 static int show_tid_map(struct seq_file *m, void *_p)
207 return show_map(m, _p, 0);
210 static void *m_start(struct seq_file *m, loff_t *pos)
212 struct proc_maps_private *priv = m->private;
213 struct mm_struct *mm;
214 struct rb_node *p;
215 loff_t n = *pos;
217 /* pin the task and mm whilst we play with them */
218 priv->task = get_proc_task(priv->inode);
219 if (!priv->task)
220 return ERR_PTR(-ESRCH);
222 mm = priv->mm;
223 if (!mm || !mmget_not_zero(mm))
224 return NULL;
226 down_read(&mm->mmap_sem);
227 /* start from the Nth VMA */
228 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
229 if (n-- == 0)
230 return p;
232 up_read(&mm->mmap_sem);
233 mmput(mm);
234 return NULL;
237 static void m_stop(struct seq_file *m, void *_vml)
239 struct proc_maps_private *priv = m->private;
241 if (!IS_ERR_OR_NULL(_vml)) {
242 up_read(&priv->mm->mmap_sem);
243 mmput(priv->mm);
245 if (priv->task) {
246 put_task_struct(priv->task);
247 priv->task = NULL;
251 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
253 struct rb_node *p = _p;
255 (*pos)++;
256 return p ? rb_next(p) : NULL;
259 static const struct seq_operations proc_pid_maps_ops = {
260 .start = m_start,
261 .next = m_next,
262 .stop = m_stop,
263 .show = show_pid_map
266 static const struct seq_operations proc_tid_maps_ops = {
267 .start = m_start,
268 .next = m_next,
269 .stop = m_stop,
270 .show = show_tid_map
273 static int maps_open(struct inode *inode, struct file *file,
274 const struct seq_operations *ops)
276 struct proc_maps_private *priv;
278 priv = __seq_open_private(file, ops, sizeof(*priv));
279 if (!priv)
280 return -ENOMEM;
282 priv->inode = inode;
283 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
284 if (IS_ERR(priv->mm)) {
285 int err = PTR_ERR(priv->mm);
287 seq_release_private(inode, file);
288 return err;
291 return 0;
295 static int map_release(struct inode *inode, struct file *file)
297 struct seq_file *seq = file->private_data;
298 struct proc_maps_private *priv = seq->private;
300 if (priv->mm)
301 mmdrop(priv->mm);
303 return seq_release_private(inode, file);
306 static int pid_maps_open(struct inode *inode, struct file *file)
308 return maps_open(inode, file, &proc_pid_maps_ops);
311 static int tid_maps_open(struct inode *inode, struct file *file)
313 return maps_open(inode, file, &proc_tid_maps_ops);
316 const struct file_operations proc_pid_maps_operations = {
317 .open = pid_maps_open,
318 .read = seq_read,
319 .llseek = seq_lseek,
320 .release = map_release,
323 const struct file_operations proc_tid_maps_operations = {
324 .open = tid_maps_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = map_release,