[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / fs / proc / task_mmu.c
blob28b4a0253a92161fff797dcfae5f79585fafd022
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <asm/elf.h>
6 #include <asm/uaccess.h>
7 #include "internal.h"
9 char *task_mem(struct mm_struct *mm, char *buffer)
11 unsigned long data, text, lib;
13 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
14 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
15 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
16 buffer += sprintf(buffer,
17 "VmSize:\t%8lu kB\n"
18 "VmLck:\t%8lu kB\n"
19 "VmRSS:\t%8lu kB\n"
20 "VmData:\t%8lu kB\n"
21 "VmStk:\t%8lu kB\n"
22 "VmExe:\t%8lu kB\n"
23 "VmLib:\t%8lu kB\n"
24 "VmPTE:\t%8lu kB\n",
25 (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
26 mm->locked_vm << (PAGE_SHIFT-10),
27 get_mm_counter(mm, rss) << (PAGE_SHIFT-10),
28 data << (PAGE_SHIFT-10),
29 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
30 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
31 return buffer;
34 unsigned long task_vsize(struct mm_struct *mm)
36 return PAGE_SIZE * mm->total_vm;
39 int task_statm(struct mm_struct *mm, int *shared, int *text,
40 int *data, int *resident)
42 int rss = get_mm_counter(mm, rss);
44 *shared = rss - get_mm_counter(mm, anon_rss);
45 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
46 >> PAGE_SHIFT;
47 *data = mm->total_vm - mm->shared_vm;
48 *resident = rss;
49 return mm->total_vm;
52 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
54 struct vm_area_struct * vma;
55 int result = -ENOENT;
56 struct task_struct *task = proc_task(inode);
57 struct mm_struct * mm = get_task_mm(task);
59 if (!mm)
60 goto out;
61 down_read(&mm->mmap_sem);
63 vma = mm->mmap;
64 while (vma) {
65 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
66 break;
67 vma = vma->vm_next;
70 if (vma) {
71 *mnt = mntget(vma->vm_file->f_vfsmnt);
72 *dentry = dget(vma->vm_file->f_dentry);
73 result = 0;
76 up_read(&mm->mmap_sem);
77 mmput(mm);
78 out:
79 return result;
82 static void pad_len_spaces(struct seq_file *m, int len)
84 len = 25 + sizeof(void*) * 6 - len;
85 if (len < 1)
86 len = 1;
87 seq_printf(m, "%*c", len, ' ');
90 static int show_map(struct seq_file *m, void *v)
92 struct task_struct *task = m->private;
93 struct vm_area_struct *map = v;
94 struct mm_struct *mm = map->vm_mm;
95 struct file *file = map->vm_file;
96 int flags = map->vm_flags;
97 unsigned long ino = 0;
98 dev_t dev = 0;
99 int len;
101 if (file) {
102 struct inode *inode = map->vm_file->f_dentry->d_inode;
103 dev = inode->i_sb->s_dev;
104 ino = inode->i_ino;
107 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
108 map->vm_start,
109 map->vm_end,
110 flags & VM_READ ? 'r' : '-',
111 flags & VM_WRITE ? 'w' : '-',
112 flags & VM_EXEC ? 'x' : '-',
113 flags & VM_MAYSHARE ? 's' : 'p',
114 map->vm_pgoff << PAGE_SHIFT,
115 MAJOR(dev), MINOR(dev), ino, &len);
118 * Print the dentry name for named mappings, and a
119 * special [heap] marker for the heap:
121 if (map->vm_file) {
122 pad_len_spaces(m, len);
123 seq_path(m, file->f_vfsmnt, file->f_dentry, "");
124 } else {
125 if (mm) {
126 if (map->vm_start <= mm->start_brk &&
127 map->vm_end >= mm->brk) {
128 pad_len_spaces(m, len);
129 seq_puts(m, "[heap]");
130 } else {
131 if (map->vm_start <= mm->start_stack &&
132 map->vm_end >= mm->start_stack) {
134 pad_len_spaces(m, len);
135 seq_puts(m, "[stack]");
138 } else {
139 pad_len_spaces(m, len);
140 seq_puts(m, "[vdso]");
143 seq_putc(m, '\n');
144 if (m->count < m->size) /* map is copied successfully */
145 m->version = (map != get_gate_vma(task))? map->vm_start: 0;
146 return 0;
149 static void *m_start(struct seq_file *m, loff_t *pos)
151 struct task_struct *task = m->private;
152 unsigned long last_addr = m->version;
153 struct mm_struct *mm;
154 struct vm_area_struct *map, *tail_map;
155 loff_t l = *pos;
158 * We remember last_addr rather than next_addr to hit with
159 * mmap_cache most of the time. We have zero last_addr at
160 * the begining and also after lseek. We will have -1 last_addr
161 * after the end of the maps.
164 if (last_addr == -1UL)
165 return NULL;
167 mm = get_task_mm(task);
168 if (!mm)
169 return NULL;
171 tail_map = get_gate_vma(task);
172 down_read(&mm->mmap_sem);
174 /* Start with last addr hint */
175 if (last_addr && (map = find_vma(mm, last_addr))) {
176 map = map->vm_next;
177 goto out;
181 * Check the map index is within the range and do
182 * sequential scan until m_index.
184 map = NULL;
185 if ((unsigned long)l < mm->map_count) {
186 map = mm->mmap;
187 while (l-- && map)
188 map = map->vm_next;
189 goto out;
192 if (l != mm->map_count)
193 tail_map = NULL; /* After gate map */
195 out:
196 if (map)
197 return map;
199 /* End of maps has reached */
200 m->version = (tail_map != NULL)? 0: -1UL;
201 up_read(&mm->mmap_sem);
202 mmput(mm);
203 return tail_map;
206 static void m_stop(struct seq_file *m, void *v)
208 struct task_struct *task = m->private;
209 struct vm_area_struct *map = v;
210 if (map && map != get_gate_vma(task)) {
211 struct mm_struct *mm = map->vm_mm;
212 up_read(&mm->mmap_sem);
213 mmput(mm);
217 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
219 struct task_struct *task = m->private;
220 struct vm_area_struct *map = v;
221 struct vm_area_struct *tail_map = get_gate_vma(task);
223 (*pos)++;
224 if (map && (map != tail_map) && map->vm_next)
225 return map->vm_next;
226 m_stop(m, v);
227 return (map != tail_map)? tail_map: NULL;
230 struct seq_operations proc_pid_maps_op = {
231 .start = m_start,
232 .next = m_next,
233 .stop = m_stop,
234 .show = show_map