2 * fs/proc/kcore.c kernel ELF core dumper
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/proc_fs.h>
14 #include <linux/kcore.h>
15 #include <linux/user.h>
16 #include <linux/capability.h>
17 #include <linux/elf.h>
18 #include <linux/elfcore.h>
19 #include <linux/notifier.h>
20 #include <linux/vmalloc.h>
21 #include <linux/highmem.h>
22 #include <linux/printk.h>
23 #include <linux/bootmem.h>
24 #include <linux/init.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ioport.h>
30 #include <linux/memory.h>
31 #include <asm/sections.h>
34 #define CORE_STR "CORE"
36 #ifndef ELF_CORE_EFLAGS
37 #define ELF_CORE_EFLAGS 0
40 static struct proc_dir_entry
*proc_root_kcore
;
43 #ifndef kc_vaddr_to_offset
44 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
46 #ifndef kc_offset_to_vaddr
47 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
50 /* An ELF note in memory */
59 static LIST_HEAD(kclist_head
);
60 static DEFINE_RWLOCK(kclist_lock
);
61 static int kcore_need_update
= 1;
64 kclist_add(struct kcore_list
*new, void *addr
, size_t size
, int type
)
66 new->addr
= (unsigned long)addr
;
70 write_lock(&kclist_lock
);
71 list_add_tail(&new->list
, &kclist_head
);
72 write_unlock(&kclist_lock
);
75 static size_t get_kcore_size(int *nphdr
, size_t *elf_buflen
)
80 *nphdr
= 1; /* PT_NOTE */
83 list_for_each_entry(m
, &kclist_head
, list
) {
84 try = kc_vaddr_to_offset((size_t)m
->addr
+ m
->size
);
89 *elf_buflen
= sizeof(struct elfhdr
) +
90 (*nphdr
+ 2)*sizeof(struct elf_phdr
) +
91 3 * ((sizeof(struct elf_note
)) +
92 roundup(sizeof(CORE_STR
), 4)) +
93 roundup(sizeof(struct elf_prstatus
), 4) +
94 roundup(sizeof(struct elf_prpsinfo
), 4) +
95 roundup(arch_task_struct_size
, 4);
96 *elf_buflen
= PAGE_ALIGN(*elf_buflen
);
97 return size
+ *elf_buflen
;
100 static void free_kclist_ents(struct list_head
*head
)
102 struct kcore_list
*tmp
, *pos
;
104 list_for_each_entry_safe(pos
, tmp
, head
, list
) {
105 list_del(&pos
->list
);
110 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
112 static void __kcore_update_ram(struct list_head
*list
)
116 struct kcore_list
*tmp
, *pos
;
119 write_lock(&kclist_lock
);
120 if (kcore_need_update
) {
121 list_for_each_entry_safe(pos
, tmp
, &kclist_head
, list
) {
122 if (pos
->type
== KCORE_RAM
123 || pos
->type
== KCORE_VMEMMAP
)
124 list_move(&pos
->list
, &garbage
);
126 list_splice_tail(list
, &kclist_head
);
128 list_splice(list
, &garbage
);
129 kcore_need_update
= 0;
130 proc_root_kcore
->size
= get_kcore_size(&nphdr
, &size
);
131 write_unlock(&kclist_lock
);
133 free_kclist_ents(&garbage
);
137 #ifdef CONFIG_HIGHMEM
139 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
140 * because memory hole is not as big as !HIGHMEM case.
141 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
143 static int kcore_update_ram(void)
146 struct kcore_list
*ent
;
149 ent
= kmalloc(sizeof(*ent
), GFP_KERNEL
);
152 ent
->addr
= (unsigned long)__va(0);
153 ent
->size
= max_low_pfn
<< PAGE_SHIFT
;
154 ent
->type
= KCORE_RAM
;
155 list_add(&ent
->list
, &head
);
156 __kcore_update_ram(&head
);
160 #else /* !CONFIG_HIGHMEM */
162 #ifdef CONFIG_SPARSEMEM_VMEMMAP
163 /* calculate vmemmap's address from given system ram pfn and register it */
165 get_sparsemem_vmemmap_info(struct kcore_list
*ent
, struct list_head
*head
)
167 unsigned long pfn
= __pa(ent
->addr
) >> PAGE_SHIFT
;
168 unsigned long nr_pages
= ent
->size
>> PAGE_SHIFT
;
169 unsigned long start
, end
;
170 struct kcore_list
*vmm
, *tmp
;
173 start
= ((unsigned long)pfn_to_page(pfn
)) & PAGE_MASK
;
174 end
= ((unsigned long)pfn_to_page(pfn
+ nr_pages
)) - 1;
175 end
= PAGE_ALIGN(end
);
176 /* overlap check (because we have to align page */
177 list_for_each_entry(tmp
, head
, list
) {
178 if (tmp
->type
!= KCORE_VMEMMAP
)
180 if (start
< tmp
->addr
+ tmp
->size
)
185 vmm
= kmalloc(sizeof(*vmm
), GFP_KERNEL
);
189 vmm
->size
= end
- start
;
190 vmm
->type
= KCORE_VMEMMAP
;
191 list_add_tail(&vmm
->list
, head
);
198 get_sparsemem_vmemmap_info(struct kcore_list
*ent
, struct list_head
*head
)
206 kclist_add_private(unsigned long pfn
, unsigned long nr_pages
, void *arg
)
208 struct list_head
*head
= (struct list_head
*)arg
;
209 struct kcore_list
*ent
;
211 ent
= kmalloc(sizeof(*ent
), GFP_KERNEL
);
214 ent
->addr
= (unsigned long)__va((pfn
<< PAGE_SHIFT
));
215 ent
->size
= nr_pages
<< PAGE_SHIFT
;
217 /* Sanity check: Can happen in 32bit arch...maybe */
218 if (ent
->addr
< (unsigned long) __va(0))
221 /* cut not-mapped area. ....from ppc-32 code. */
222 if (ULONG_MAX
- ent
->addr
< ent
->size
)
223 ent
->size
= ULONG_MAX
- ent
->addr
;
225 /* cut when vmalloc() area is higher than direct-map area */
226 if (VMALLOC_START
> (unsigned long)__va(0)) {
227 if (ent
->addr
> VMALLOC_START
)
229 if (VMALLOC_START
- ent
->addr
< ent
->size
)
230 ent
->size
= VMALLOC_START
- ent
->addr
;
233 ent
->type
= KCORE_RAM
;
234 list_add_tail(&ent
->list
, head
);
236 if (!get_sparsemem_vmemmap_info(ent
, head
)) {
237 list_del(&ent
->list
);
247 static int kcore_update_ram(void)
250 unsigned long end_pfn
;
253 /* Not inialized....update now */
254 /* find out "max pfn" */
256 for_each_node_state(nid
, N_MEMORY
) {
257 unsigned long node_end
;
258 node_end
= node_end_pfn(nid
);
259 if (end_pfn
< node_end
)
262 /* scan 0 to max_pfn */
263 ret
= walk_system_ram_range(0, end_pfn
, &head
, kclist_add_private
);
265 free_kclist_ents(&head
);
268 __kcore_update_ram(&head
);
271 #endif /* CONFIG_HIGHMEM */
273 /*****************************************************************************/
275 * determine size of ELF note
277 static int notesize(struct memelfnote
*en
)
281 sz
= sizeof(struct elf_note
);
282 sz
+= roundup((strlen(en
->name
) + 1), 4);
283 sz
+= roundup(en
->datasz
, 4);
286 } /* end notesize() */
288 /*****************************************************************************/
290 * store a note in the header buffer
292 static char *storenote(struct memelfnote
*men
, char *bufp
)
296 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
298 en
.n_namesz
= strlen(men
->name
) + 1;
299 en
.n_descsz
= men
->datasz
;
300 en
.n_type
= men
->type
;
302 DUMP_WRITE(&en
, sizeof(en
));
303 DUMP_WRITE(men
->name
, en
.n_namesz
);
305 /* XXX - cast from long long to long to avoid need for libgcc.a */
306 bufp
= (char*) roundup((unsigned long)bufp
,4);
307 DUMP_WRITE(men
->data
, men
->datasz
);
308 bufp
= (char*) roundup((unsigned long)bufp
,4);
313 } /* end storenote() */
316 * store an ELF coredump header in the supplied buffer
317 * nphdr is the number of elf_phdr to insert
319 static void elf_kcore_store_hdr(char *bufp
, int nphdr
, int dataoff
)
321 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
322 struct elf_prpsinfo prpsinfo
; /* NT_PRPSINFO */
323 struct elf_phdr
*nhdr
, *phdr
;
325 struct memelfnote notes
[3];
327 struct kcore_list
*m
;
329 /* setup ELF header */
330 elf
= (struct elfhdr
*) bufp
;
331 bufp
+= sizeof(struct elfhdr
);
332 offset
+= sizeof(struct elfhdr
);
333 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
334 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
335 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
336 elf
->e_ident
[EI_VERSION
]= EV_CURRENT
;
337 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
338 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
339 elf
->e_type
= ET_CORE
;
340 elf
->e_machine
= ELF_ARCH
;
341 elf
->e_version
= EV_CURRENT
;
343 elf
->e_phoff
= sizeof(struct elfhdr
);
345 elf
->e_flags
= ELF_CORE_EFLAGS
;
346 elf
->e_ehsize
= sizeof(struct elfhdr
);
347 elf
->e_phentsize
= sizeof(struct elf_phdr
);
348 elf
->e_phnum
= nphdr
;
353 /* setup ELF PT_NOTE program header */
354 nhdr
= (struct elf_phdr
*) bufp
;
355 bufp
+= sizeof(struct elf_phdr
);
356 offset
+= sizeof(struct elf_phdr
);
357 nhdr
->p_type
= PT_NOTE
;
366 /* setup ELF PT_LOAD program header for every area */
367 list_for_each_entry(m
, &kclist_head
, list
) {
368 phdr
= (struct elf_phdr
*) bufp
;
369 bufp
+= sizeof(struct elf_phdr
);
370 offset
+= sizeof(struct elf_phdr
);
372 phdr
->p_type
= PT_LOAD
;
373 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
374 phdr
->p_offset
= kc_vaddr_to_offset(m
->addr
) + dataoff
;
375 phdr
->p_vaddr
= (size_t)m
->addr
;
377 phdr
->p_filesz
= phdr
->p_memsz
= m
->size
;
378 phdr
->p_align
= PAGE_SIZE
;
382 * Set up the notes in similar form to SVR4 core dumps made
383 * with info from their /proc.
385 nhdr
->p_offset
= offset
;
387 /* set up the process status */
388 notes
[0].name
= CORE_STR
;
389 notes
[0].type
= NT_PRSTATUS
;
390 notes
[0].datasz
= sizeof(struct elf_prstatus
);
391 notes
[0].data
= &prstatus
;
393 memset(&prstatus
, 0, sizeof(struct elf_prstatus
));
395 nhdr
->p_filesz
= notesize(¬es
[0]);
396 bufp
= storenote(¬es
[0], bufp
);
398 /* set up the process info */
399 notes
[1].name
= CORE_STR
;
400 notes
[1].type
= NT_PRPSINFO
;
401 notes
[1].datasz
= sizeof(struct elf_prpsinfo
);
402 notes
[1].data
= &prpsinfo
;
404 memset(&prpsinfo
, 0, sizeof(struct elf_prpsinfo
));
405 prpsinfo
.pr_state
= 0;
406 prpsinfo
.pr_sname
= 'R';
407 prpsinfo
.pr_zomb
= 0;
409 strcpy(prpsinfo
.pr_fname
, "vmlinux");
410 strlcpy(prpsinfo
.pr_psargs
, saved_command_line
, sizeof(prpsinfo
.pr_psargs
));
412 nhdr
->p_filesz
+= notesize(¬es
[1]);
413 bufp
= storenote(¬es
[1], bufp
);
415 /* set up the task structure */
416 notes
[2].name
= CORE_STR
;
417 notes
[2].type
= NT_TASKSTRUCT
;
418 notes
[2].datasz
= arch_task_struct_size
;
419 notes
[2].data
= current
;
421 nhdr
->p_filesz
+= notesize(¬es
[2]);
422 bufp
= storenote(¬es
[2], bufp
);
424 } /* end elf_kcore_store_hdr() */
426 /*****************************************************************************/
428 * read from the ELF header and then kernel memory
431 read_kcore(struct file
*file
, char __user
*buffer
, size_t buflen
, loff_t
*fpos
)
439 read_lock(&kclist_lock
);
440 size
= get_kcore_size(&nphdr
, &elf_buflen
);
442 if (buflen
== 0 || *fpos
>= size
) {
443 read_unlock(&kclist_lock
);
447 /* trim buflen to not go beyond EOF */
448 if (buflen
> size
- *fpos
)
449 buflen
= size
- *fpos
;
451 /* construct an ELF core header if we'll need some of it */
452 if (*fpos
< elf_buflen
) {
455 tsz
= elf_buflen
- *fpos
;
458 elf_buf
= kzalloc(elf_buflen
, GFP_ATOMIC
);
460 read_unlock(&kclist_lock
);
463 elf_kcore_store_hdr(elf_buf
, nphdr
, elf_buflen
);
464 read_unlock(&kclist_lock
);
465 if (copy_to_user(buffer
, elf_buf
+ *fpos
, tsz
)) {
475 /* leave now if filled buffer already */
479 read_unlock(&kclist_lock
);
482 * Check to see if our file offset matches with any of
483 * the addresses in the elf_phdr on our list.
485 start
= kc_offset_to_vaddr(*fpos
- elf_buflen
);
486 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
490 struct kcore_list
*m
;
492 read_lock(&kclist_lock
);
493 list_for_each_entry(m
, &kclist_head
, list
) {
494 if (start
>= m
->addr
&& start
< (m
->addr
+m
->size
))
497 read_unlock(&kclist_lock
);
499 if (&m
->list
== &kclist_head
) {
500 if (clear_user(buffer
, tsz
))
502 } else if (is_vmalloc_or_module_addr((void *)start
)) {
505 elf_buf
= kzalloc(tsz
, GFP_KERNEL
);
508 vread(elf_buf
, (char *)start
, tsz
);
509 /* we have to zero-fill user buffer even if no read */
510 if (copy_to_user(buffer
, elf_buf
, tsz
)) {
516 if (kern_addr_valid(start
)) {
519 n
= copy_to_user(buffer
, (char *)start
, tsz
);
521 * We cannot distinguish between fault on source
522 * and fault on destination. When this happens
523 * we clear too and hope it will trigger the
527 if (clear_user(buffer
+ tsz
- n
,
532 if (clear_user(buffer
, tsz
))
541 tsz
= (buflen
> PAGE_SIZE
? PAGE_SIZE
: buflen
);
548 static int open_kcore(struct inode
*inode
, struct file
*filp
)
550 if (!capable(CAP_SYS_RAWIO
))
552 if (kcore_need_update
)
554 if (i_size_read(inode
) != proc_root_kcore
->size
) {
555 mutex_lock(&inode
->i_mutex
);
556 i_size_write(inode
, proc_root_kcore
->size
);
557 mutex_unlock(&inode
->i_mutex
);
563 static const struct file_operations proc_kcore_operations
= {
566 .llseek
= default_llseek
,
569 /* just remember that we have to update kcore */
570 static int __meminit
kcore_callback(struct notifier_block
*self
,
571 unsigned long action
, void *arg
)
576 write_lock(&kclist_lock
);
577 kcore_need_update
= 1;
578 write_unlock(&kclist_lock
);
583 static struct notifier_block kcore_callback_nb __meminitdata
= {
584 .notifier_call
= kcore_callback
,
588 static struct kcore_list kcore_vmalloc
;
590 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
591 static struct kcore_list kcore_text
;
593 * If defined, special segment is used for mapping kernel text instead of
594 * direct-map area. We need to create special TEXT section.
596 static void __init
proc_kcore_text_init(void)
598 kclist_add(&kcore_text
, _text
, _end
- _text
, KCORE_TEXT
);
601 static void __init
proc_kcore_text_init(void)
606 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
608 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
610 struct kcore_list kcore_modules
;
611 static void __init
add_modules_range(void)
613 if (MODULES_VADDR
!= VMALLOC_START
&& MODULES_END
!= VMALLOC_END
) {
614 kclist_add(&kcore_modules
, (void *)MODULES_VADDR
,
615 MODULES_END
- MODULES_VADDR
, KCORE_VMALLOC
);
619 static void __init
add_modules_range(void)
624 static int __init
proc_kcore_init(void)
626 proc_root_kcore
= proc_create("kcore", S_IRUSR
, NULL
,
627 &proc_kcore_operations
);
628 if (!proc_root_kcore
) {
629 pr_err("couldn't create /proc/kcore\n");
630 return 0; /* Always returns 0. */
632 /* Store text area if it's special */
633 proc_kcore_text_init();
634 /* Store vmalloc area */
635 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
636 VMALLOC_END
- VMALLOC_START
, KCORE_VMALLOC
);
638 /* Store direct-map area from physical memory map */
640 register_hotmemory_notifier(&kcore_callback_nb
);
644 fs_initcall(proc_kcore_init
);