1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/file.h>
5 #include <linux/fdtable.h>
6 #include <linux/fs_struct.h>
7 #include <linux/mount.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/mm.h>
16 * Logic: we've got two memory sums for each process, "shared", and
17 * "non-shared". Shared memory may get counted more than once, for
18 * each process that owns it. Non-shared memory is counted
21 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
23 VMA_ITERATOR(vmi
, mm
, 0);
24 struct vm_area_struct
*vma
;
25 struct vm_region
*region
;
26 unsigned long bytes
= 0, sbytes
= 0, slack
= 0, size
;
29 for_each_vma(vmi
, vma
) {
30 bytes
+= kobjsize(vma
);
32 region
= vma
->vm_region
;
34 size
= kobjsize(region
);
35 size
+= region
->vm_end
- region
->vm_start
;
37 size
= vma
->vm_end
- vma
->vm_start
;
40 if (atomic_read(&mm
->mm_count
) > 1 ||
41 is_nommu_shared_mapping(vma
->vm_flags
)) {
46 slack
= region
->vm_end
- vma
->vm_end
;
50 if (atomic_read(&mm
->mm_count
) > 1)
51 sbytes
+= kobjsize(mm
);
53 bytes
+= kobjsize(mm
);
55 if (current
->fs
&& current
->fs
->users
> 1)
56 sbytes
+= kobjsize(current
->fs
);
58 bytes
+= kobjsize(current
->fs
);
60 if (current
->files
&& atomic_read(¤t
->files
->count
) > 1)
61 sbytes
+= kobjsize(current
->files
);
63 bytes
+= kobjsize(current
->files
);
65 if (current
->sighand
&& refcount_read(¤t
->sighand
->count
) > 1)
66 sbytes
+= kobjsize(current
->sighand
);
68 bytes
+= kobjsize(current
->sighand
);
70 bytes
+= kobjsize(current
); /* includes kernel stack */
76 "Slack:\t%8lu bytes\n"
77 "Shared:\t%8lu bytes\n",
78 bytes
, slack
, sbytes
);
81 unsigned long task_vsize(struct mm_struct
*mm
)
83 VMA_ITERATOR(vmi
, mm
, 0);
84 struct vm_area_struct
*vma
;
85 unsigned long vsize
= 0;
88 for_each_vma(vmi
, vma
)
89 vsize
+= vma
->vm_end
- vma
->vm_start
;
94 unsigned long task_statm(struct mm_struct
*mm
,
95 unsigned long *shared
, unsigned long *text
,
96 unsigned long *data
, unsigned long *resident
)
98 VMA_ITERATOR(vmi
, mm
, 0);
99 struct vm_area_struct
*vma
;
100 struct vm_region
*region
;
101 unsigned long size
= kobjsize(mm
);
104 for_each_vma(vmi
, vma
) {
105 size
+= kobjsize(vma
);
106 region
= vma
->vm_region
;
108 size
+= kobjsize(region
);
109 size
+= region
->vm_end
- region
->vm_start
;
113 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
115 *data
= (PAGE_ALIGN(mm
->start_stack
) - (mm
->start_data
& PAGE_MASK
))
117 mmap_read_unlock(mm
);
119 size
+= *text
+ *data
;
125 * display a single VMA to a sequenced file
127 static int nommu_vma_show(struct seq_file
*m
, struct vm_area_struct
*vma
)
129 struct mm_struct
*mm
= vma
->vm_mm
;
130 unsigned long ino
= 0;
134 unsigned long long pgoff
= 0;
136 flags
= vma
->vm_flags
;
140 struct inode
*inode
= file_inode(vma
->vm_file
);
141 dev
= inode
->i_sb
->s_dev
;
143 pgoff
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
146 seq_setwidth(m
, 25 + sizeof(void *) * 6 - 1);
148 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
151 flags
& VM_READ
? 'r' : '-',
152 flags
& VM_WRITE
? 'w' : '-',
153 flags
& VM_EXEC
? 'x' : '-',
154 flags
& VM_MAYSHARE
? flags
& VM_SHARED
? 'S' : 's' : 'p',
156 MAJOR(dev
), MINOR(dev
), ino
);
160 seq_path(m
, file_user_path(file
), "");
161 } else if (mm
&& vma_is_initial_stack(vma
)) {
163 seq_puts(m
, "[stack]");
171 * display mapping lines for a particular process's /proc/pid/maps
173 static int show_map(struct seq_file
*m
, void *_p
)
175 return nommu_vma_show(m
, _p
);
178 static struct vm_area_struct
*proc_get_vma(struct proc_maps_private
*priv
,
181 struct vm_area_struct
*vma
= vma_next(&priv
->iter
);
184 *ppos
= vma
->vm_start
;
192 static void *m_start(struct seq_file
*m
, loff_t
*ppos
)
194 struct proc_maps_private
*priv
= m
->private;
195 unsigned long last_addr
= *ppos
;
196 struct mm_struct
*mm
;
198 /* See proc_get_vma(). Zero at the start or after lseek. */
199 if (last_addr
== -1UL)
202 /* pin the task and mm whilst we play with them */
203 priv
->task
= get_proc_task(priv
->inode
);
205 return ERR_PTR(-ESRCH
);
208 if (!mm
|| !mmget_not_zero(mm
)) {
209 put_task_struct(priv
->task
);
214 if (mmap_read_lock_killable(mm
)) {
216 put_task_struct(priv
->task
);
218 return ERR_PTR(-EINTR
);
221 vma_iter_init(&priv
->iter
, mm
, last_addr
);
223 return proc_get_vma(priv
, ppos
);
226 static void m_stop(struct seq_file
*m
, void *v
)
228 struct proc_maps_private
*priv
= m
->private;
229 struct mm_struct
*mm
= priv
->mm
;
234 mmap_read_unlock(mm
);
236 put_task_struct(priv
->task
);
240 static void *m_next(struct seq_file
*m
, void *_p
, loff_t
*ppos
)
242 return proc_get_vma(m
->private, ppos
);
245 static const struct seq_operations proc_pid_maps_ops
= {
252 static int maps_open(struct inode
*inode
, struct file
*file
,
253 const struct seq_operations
*ops
)
255 struct proc_maps_private
*priv
;
257 priv
= __seq_open_private(file
, ops
, sizeof(*priv
));
262 priv
->mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
263 if (IS_ERR(priv
->mm
)) {
264 int err
= PTR_ERR(priv
->mm
);
266 seq_release_private(inode
, file
);
274 static int map_release(struct inode
*inode
, struct file
*file
)
276 struct seq_file
*seq
= file
->private_data
;
277 struct proc_maps_private
*priv
= seq
->private;
282 return seq_release_private(inode
, file
);
285 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
287 return maps_open(inode
, file
, &proc_pid_maps_ops
);
290 const struct file_operations proc_pid_maps_operations
= {
291 .open
= pid_maps_open
,
294 .release
= map_release
,