Linux 4.8.3
[linux/fpc-iii.git] / fs / proc / vmcore.c
blob8ab782d8b33ddc7f4ac9e2fec72230ca0d4a060b
1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 */
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include "internal.h"
29 /* List representing chunks of contiguous memory areas and their offsets in
30 * vmcore file.
32 static LIST_HEAD(vmcore_list);
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
45 static struct proc_dir_entry *proc_vmcore;
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
55 if (oldmem_pfn_is_ram)
56 return -EBUSY;
57 oldmem_pfn_is_ram = fn;
58 return 0;
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
62 void unregister_oldmem_pfn_is_ram(void)
64 oldmem_pfn_is_ram = NULL;
65 wmb();
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
69 static int pfn_is_ram(unsigned long pfn)
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
73 int ret = 1;
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
80 fn = oldmem_pfn_is_ram;
81 if (fn)
82 ret = fn(pfn);
84 return ret;
87 /* Reads a page from the oldmem device from given offset. */
88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
91 unsigned long pfn, offset;
92 size_t nr_bytes;
93 ssize_t read = 0, tmp;
95 if (!count)
96 return 0;
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
101 do {
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
104 else
105 nr_bytes = count;
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0)
109 memset(buf, 0, nr_bytes);
110 else {
111 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 offset, userbuf);
113 if (tmp < 0)
114 return tmp;
116 *ppos += nr_bytes;
117 count -= nr_bytes;
118 buf += nr_bytes;
119 read += nr_bytes;
120 ++pfn;
121 offset = 0;
122 } while (count);
124 return read;
128 * Architectures may override this function to allocate ELF header in 2nd kernel
130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
132 return 0;
136 * Architectures may override this function to free header
138 void __weak elfcorehdr_free(unsigned long long addr)
142 * Architectures may override this function to read from ELF header
144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
146 return read_from_oldmem(buf, count, ppos, 0);
150 * Architectures may override this function to read from notes sections
152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
154 return read_from_oldmem(buf, count, ppos, 0);
158 * Architectures may override this function to map oldmem
160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
164 return remap_pfn_range(vma, from, pfn, size, prot);
168 * Copy to either kernel or user space
170 static int copy_to(void *target, void *src, size_t size, int userbuf)
172 if (userbuf) {
173 if (copy_to_user((char __user *) target, src, size))
174 return -EFAULT;
175 } else {
176 memcpy(target, src, size);
178 return 0;
181 /* Read from the ELF header and then the crash dump. On error, negative value is
182 * returned otherwise number of bytes read are returned.
184 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
185 int userbuf)
187 ssize_t acc = 0, tmp;
188 size_t tsz;
189 u64 start;
190 struct vmcore *m = NULL;
192 if (buflen == 0 || *fpos >= vmcore_size)
193 return 0;
195 /* trim buflen to not go beyond EOF */
196 if (buflen > vmcore_size - *fpos)
197 buflen = vmcore_size - *fpos;
199 /* Read ELF core header */
200 if (*fpos < elfcorebuf_sz) {
201 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
202 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
203 return -EFAULT;
204 buflen -= tsz;
205 *fpos += tsz;
206 buffer += tsz;
207 acc += tsz;
209 /* leave now if filled buffer already */
210 if (buflen == 0)
211 return acc;
214 /* Read Elf note segment */
215 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
216 void *kaddr;
218 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
219 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
220 if (copy_to(buffer, kaddr, tsz, userbuf))
221 return -EFAULT;
222 buflen -= tsz;
223 *fpos += tsz;
224 buffer += tsz;
225 acc += tsz;
227 /* leave now if filled buffer already */
228 if (buflen == 0)
229 return acc;
232 list_for_each_entry(m, &vmcore_list, list) {
233 if (*fpos < m->offset + m->size) {
234 tsz = (size_t)min_t(unsigned long long,
235 m->offset + m->size - *fpos,
236 buflen);
237 start = m->paddr + *fpos - m->offset;
238 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
239 if (tmp < 0)
240 return tmp;
241 buflen -= tsz;
242 *fpos += tsz;
243 buffer += tsz;
244 acc += tsz;
246 /* leave now if filled buffer already */
247 if (buflen == 0)
248 return acc;
252 return acc;
255 static ssize_t read_vmcore(struct file *file, char __user *buffer,
256 size_t buflen, loff_t *fpos)
258 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
262 * The vmcore fault handler uses the page cache and fills data using the
263 * standard __vmcore_read() function.
265 * On s390 the fault handler is used for memory regions that can't be mapped
266 * directly with remap_pfn_range().
268 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
270 #ifdef CONFIG_S390
271 struct address_space *mapping = vma->vm_file->f_mapping;
272 pgoff_t index = vmf->pgoff;
273 struct page *page;
274 loff_t offset;
275 char *buf;
276 int rc;
278 page = find_or_create_page(mapping, index, GFP_KERNEL);
279 if (!page)
280 return VM_FAULT_OOM;
281 if (!PageUptodate(page)) {
282 offset = (loff_t) index << PAGE_SHIFT;
283 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
285 if (rc < 0) {
286 unlock_page(page);
287 put_page(page);
288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
290 SetPageUptodate(page);
292 unlock_page(page);
293 vmf->page = page;
294 return 0;
295 #else
296 return VM_FAULT_SIGBUS;
297 #endif
300 static const struct vm_operations_struct vmcore_mmap_ops = {
301 .fault = mmap_vmcore_fault,
305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
306 * vmalloc memory
308 * @notes_sz: size of buffer
310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
311 * the buffer to user-space by means of remap_vmalloc_range().
313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
314 * disabled and there's no need to allow users to mmap the buffer.
316 static inline char *alloc_elfnotes_buf(size_t notes_sz)
318 #ifdef CONFIG_MMU
319 return vmalloc_user(notes_sz);
320 #else
321 return vzalloc(notes_sz);
322 #endif
326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
327 * essential for mmap_vmcore() in order to map physically
328 * non-contiguous objects (ELF header, ELF note segment and memory
329 * regions in the 1st kernel pointed to by PT_LOAD entries) into
330 * virtually contiguous user-space in ELF layout.
332 #ifdef CONFIG_MMU
334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
335 * reported as not being ram with the zero page.
337 * @vma: vm_area_struct describing requested mapping
338 * @from: start remapping from
339 * @pfn: page frame number to start remapping to
340 * @size: remapping size
341 * @prot: protection bits
343 * Returns zero on success, -EAGAIN on failure.
345 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
346 unsigned long from, unsigned long pfn,
347 unsigned long size, pgprot_t prot)
349 unsigned long map_size;
350 unsigned long pos_start, pos_end, pos;
351 unsigned long zeropage_pfn = my_zero_pfn(0);
352 size_t len = 0;
354 pos_start = pfn;
355 pos_end = pfn + (size >> PAGE_SHIFT);
357 for (pos = pos_start; pos < pos_end; ++pos) {
358 if (!pfn_is_ram(pos)) {
360 * We hit a page which is not ram. Remap the continuous
361 * region between pos_start and pos-1 and replace
362 * the non-ram page at pos with the zero page.
364 if (pos > pos_start) {
365 /* Remap continuous region */
366 map_size = (pos - pos_start) << PAGE_SHIFT;
367 if (remap_oldmem_pfn_range(vma, from + len,
368 pos_start, map_size,
369 prot))
370 goto fail;
371 len += map_size;
373 /* Remap the zero page */
374 if (remap_oldmem_pfn_range(vma, from + len,
375 zeropage_pfn,
376 PAGE_SIZE, prot))
377 goto fail;
378 len += PAGE_SIZE;
379 pos_start = pos + 1;
382 if (pos > pos_start) {
383 /* Remap the rest */
384 map_size = (pos - pos_start) << PAGE_SHIFT;
385 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
386 map_size, prot))
387 goto fail;
389 return 0;
390 fail:
391 do_munmap(vma->vm_mm, from, len);
392 return -EAGAIN;
395 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
396 unsigned long from, unsigned long pfn,
397 unsigned long size, pgprot_t prot)
400 * Check if oldmem_pfn_is_ram was registered to avoid
401 * looping over all pages without a reason.
403 if (oldmem_pfn_is_ram)
404 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
405 else
406 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
409 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
411 size_t size = vma->vm_end - vma->vm_start;
412 u64 start, end, len, tsz;
413 struct vmcore *m;
415 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
416 end = start + size;
418 if (size > vmcore_size || end > vmcore_size)
419 return -EINVAL;
421 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
422 return -EPERM;
424 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
425 vma->vm_flags |= VM_MIXEDMAP;
426 vma->vm_ops = &vmcore_mmap_ops;
428 len = 0;
430 if (start < elfcorebuf_sz) {
431 u64 pfn;
433 tsz = min(elfcorebuf_sz - (size_t)start, size);
434 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
435 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
436 vma->vm_page_prot))
437 return -EAGAIN;
438 size -= tsz;
439 start += tsz;
440 len += tsz;
442 if (size == 0)
443 return 0;
446 if (start < elfcorebuf_sz + elfnotes_sz) {
447 void *kaddr;
449 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
450 kaddr = elfnotes_buf + start - elfcorebuf_sz;
451 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
452 kaddr, tsz))
453 goto fail;
454 size -= tsz;
455 start += tsz;
456 len += tsz;
458 if (size == 0)
459 return 0;
462 list_for_each_entry(m, &vmcore_list, list) {
463 if (start < m->offset + m->size) {
464 u64 paddr = 0;
466 tsz = (size_t)min_t(unsigned long long,
467 m->offset + m->size - start, size);
468 paddr = m->paddr + start - m->offset;
469 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
470 paddr >> PAGE_SHIFT, tsz,
471 vma->vm_page_prot))
472 goto fail;
473 size -= tsz;
474 start += tsz;
475 len += tsz;
477 if (size == 0)
478 return 0;
482 return 0;
483 fail:
484 do_munmap(vma->vm_mm, vma->vm_start, len);
485 return -EAGAIN;
487 #else
488 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
490 return -ENOSYS;
492 #endif
494 static const struct file_operations proc_vmcore_operations = {
495 .read = read_vmcore,
496 .llseek = default_llseek,
497 .mmap = mmap_vmcore,
500 static struct vmcore* __init get_new_element(void)
502 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
505 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
506 struct list_head *vc_list)
508 u64 size;
509 struct vmcore *m;
511 size = elfsz + elfnotesegsz;
512 list_for_each_entry(m, vc_list, list) {
513 size += m->size;
515 return size;
519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
521 * @ehdr_ptr: ELF header
523 * This function updates p_memsz member of each PT_NOTE entry in the
524 * program header table pointed to by @ehdr_ptr to real size of ELF
525 * note segment.
527 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
529 int i, rc=0;
530 Elf64_Phdr *phdr_ptr;
531 Elf64_Nhdr *nhdr_ptr;
533 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
534 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
535 void *notes_section;
536 u64 offset, max_sz, sz, real_sz = 0;
537 if (phdr_ptr->p_type != PT_NOTE)
538 continue;
539 max_sz = phdr_ptr->p_memsz;
540 offset = phdr_ptr->p_offset;
541 notes_section = kmalloc(max_sz, GFP_KERNEL);
542 if (!notes_section)
543 return -ENOMEM;
544 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
545 if (rc < 0) {
546 kfree(notes_section);
547 return rc;
549 nhdr_ptr = notes_section;
550 while (nhdr_ptr->n_namesz != 0) {
551 sz = sizeof(Elf64_Nhdr) +
552 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
553 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
554 if ((real_sz + sz) > max_sz) {
555 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
556 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
557 break;
559 real_sz += sz;
560 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
562 kfree(notes_section);
563 phdr_ptr->p_memsz = real_sz;
564 if (real_sz == 0) {
565 pr_warn("Warning: Zero PT_NOTE entries found\n");
569 return 0;
573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
574 * headers and sum of real size of their ELF note segment headers and
575 * data.
577 * @ehdr_ptr: ELF header
578 * @nr_ptnote: buffer for the number of PT_NOTE program headers
579 * @sz_ptnote: buffer for size of unique PT_NOTE program header
581 * This function is used to merge multiple PT_NOTE program headers
582 * into a unique single one. The resulting unique entry will have
583 * @sz_ptnote in its phdr->p_mem.
585 * It is assumed that program headers with PT_NOTE type pointed to by
586 * @ehdr_ptr has already been updated by update_note_header_size_elf64
587 * and each of PT_NOTE program headers has actual ELF note segment
588 * size in its p_memsz member.
590 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
591 int *nr_ptnote, u64 *sz_ptnote)
593 int i;
594 Elf64_Phdr *phdr_ptr;
596 *nr_ptnote = *sz_ptnote = 0;
598 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
599 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
600 if (phdr_ptr->p_type != PT_NOTE)
601 continue;
602 *nr_ptnote += 1;
603 *sz_ptnote += phdr_ptr->p_memsz;
606 return 0;
610 * copy_notes_elf64 - copy ELF note segments in a given buffer
612 * @ehdr_ptr: ELF header
613 * @notes_buf: buffer into which ELF note segments are copied
615 * This function is used to copy ELF note segment in the 1st kernel
616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
617 * size of the buffer @notes_buf is equal to or larger than sum of the
618 * real ELF note segment headers and data.
620 * It is assumed that program headers with PT_NOTE type pointed to by
621 * @ehdr_ptr has already been updated by update_note_header_size_elf64
622 * and each of PT_NOTE program headers has actual ELF note segment
623 * size in its p_memsz member.
625 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
627 int i, rc=0;
628 Elf64_Phdr *phdr_ptr;
630 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
632 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
633 u64 offset;
634 if (phdr_ptr->p_type != PT_NOTE)
635 continue;
636 offset = phdr_ptr->p_offset;
637 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
638 &offset);
639 if (rc < 0)
640 return rc;
641 notes_buf += phdr_ptr->p_memsz;
644 return 0;
647 /* Merges all the PT_NOTE headers into one. */
648 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
649 char **notes_buf, size_t *notes_sz)
651 int i, nr_ptnote=0, rc=0;
652 char *tmp;
653 Elf64_Ehdr *ehdr_ptr;
654 Elf64_Phdr phdr;
655 u64 phdr_sz = 0, note_off;
657 ehdr_ptr = (Elf64_Ehdr *)elfptr;
659 rc = update_note_header_size_elf64(ehdr_ptr);
660 if (rc < 0)
661 return rc;
663 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
664 if (rc < 0)
665 return rc;
667 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
668 *notes_buf = alloc_elfnotes_buf(*notes_sz);
669 if (!*notes_buf)
670 return -ENOMEM;
672 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
673 if (rc < 0)
674 return rc;
676 /* Prepare merged PT_NOTE program header. */
677 phdr.p_type = PT_NOTE;
678 phdr.p_flags = 0;
679 note_off = sizeof(Elf64_Ehdr) +
680 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
681 phdr.p_offset = roundup(note_off, PAGE_SIZE);
682 phdr.p_vaddr = phdr.p_paddr = 0;
683 phdr.p_filesz = phdr.p_memsz = phdr_sz;
684 phdr.p_align = 0;
686 /* Add merged PT_NOTE program header*/
687 tmp = elfptr + sizeof(Elf64_Ehdr);
688 memcpy(tmp, &phdr, sizeof(phdr));
689 tmp += sizeof(phdr);
691 /* Remove unwanted PT_NOTE program headers. */
692 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
693 *elfsz = *elfsz - i;
694 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
695 memset(elfptr + *elfsz, 0, i);
696 *elfsz = roundup(*elfsz, PAGE_SIZE);
698 /* Modify e_phnum to reflect merged headers. */
699 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
701 return 0;
705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
707 * @ehdr_ptr: ELF header
709 * This function updates p_memsz member of each PT_NOTE entry in the
710 * program header table pointed to by @ehdr_ptr to real size of ELF
711 * note segment.
713 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
715 int i, rc=0;
716 Elf32_Phdr *phdr_ptr;
717 Elf32_Nhdr *nhdr_ptr;
719 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
720 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
721 void *notes_section;
722 u64 offset, max_sz, sz, real_sz = 0;
723 if (phdr_ptr->p_type != PT_NOTE)
724 continue;
725 max_sz = phdr_ptr->p_memsz;
726 offset = phdr_ptr->p_offset;
727 notes_section = kmalloc(max_sz, GFP_KERNEL);
728 if (!notes_section)
729 return -ENOMEM;
730 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
731 if (rc < 0) {
732 kfree(notes_section);
733 return rc;
735 nhdr_ptr = notes_section;
736 while (nhdr_ptr->n_namesz != 0) {
737 sz = sizeof(Elf32_Nhdr) +
738 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
739 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
740 if ((real_sz + sz) > max_sz) {
741 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
742 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
743 break;
745 real_sz += sz;
746 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
748 kfree(notes_section);
749 phdr_ptr->p_memsz = real_sz;
750 if (real_sz == 0) {
751 pr_warn("Warning: Zero PT_NOTE entries found\n");
755 return 0;
759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
760 * headers and sum of real size of their ELF note segment headers and
761 * data.
763 * @ehdr_ptr: ELF header
764 * @nr_ptnote: buffer for the number of PT_NOTE program headers
765 * @sz_ptnote: buffer for size of unique PT_NOTE program header
767 * This function is used to merge multiple PT_NOTE program headers
768 * into a unique single one. The resulting unique entry will have
769 * @sz_ptnote in its phdr->p_mem.
771 * It is assumed that program headers with PT_NOTE type pointed to by
772 * @ehdr_ptr has already been updated by update_note_header_size_elf32
773 * and each of PT_NOTE program headers has actual ELF note segment
774 * size in its p_memsz member.
776 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
777 int *nr_ptnote, u64 *sz_ptnote)
779 int i;
780 Elf32_Phdr *phdr_ptr;
782 *nr_ptnote = *sz_ptnote = 0;
784 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
785 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
786 if (phdr_ptr->p_type != PT_NOTE)
787 continue;
788 *nr_ptnote += 1;
789 *sz_ptnote += phdr_ptr->p_memsz;
792 return 0;
796 * copy_notes_elf32 - copy ELF note segments in a given buffer
798 * @ehdr_ptr: ELF header
799 * @notes_buf: buffer into which ELF note segments are copied
801 * This function is used to copy ELF note segment in the 1st kernel
802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
803 * size of the buffer @notes_buf is equal to or larger than sum of the
804 * real ELF note segment headers and data.
806 * It is assumed that program headers with PT_NOTE type pointed to by
807 * @ehdr_ptr has already been updated by update_note_header_size_elf32
808 * and each of PT_NOTE program headers has actual ELF note segment
809 * size in its p_memsz member.
811 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
813 int i, rc=0;
814 Elf32_Phdr *phdr_ptr;
816 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
818 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
819 u64 offset;
820 if (phdr_ptr->p_type != PT_NOTE)
821 continue;
822 offset = phdr_ptr->p_offset;
823 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
824 &offset);
825 if (rc < 0)
826 return rc;
827 notes_buf += phdr_ptr->p_memsz;
830 return 0;
833 /* Merges all the PT_NOTE headers into one. */
834 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
835 char **notes_buf, size_t *notes_sz)
837 int i, nr_ptnote=0, rc=0;
838 char *tmp;
839 Elf32_Ehdr *ehdr_ptr;
840 Elf32_Phdr phdr;
841 u64 phdr_sz = 0, note_off;
843 ehdr_ptr = (Elf32_Ehdr *)elfptr;
845 rc = update_note_header_size_elf32(ehdr_ptr);
846 if (rc < 0)
847 return rc;
849 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
850 if (rc < 0)
851 return rc;
853 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
854 *notes_buf = alloc_elfnotes_buf(*notes_sz);
855 if (!*notes_buf)
856 return -ENOMEM;
858 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
859 if (rc < 0)
860 return rc;
862 /* Prepare merged PT_NOTE program header. */
863 phdr.p_type = PT_NOTE;
864 phdr.p_flags = 0;
865 note_off = sizeof(Elf32_Ehdr) +
866 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
867 phdr.p_offset = roundup(note_off, PAGE_SIZE);
868 phdr.p_vaddr = phdr.p_paddr = 0;
869 phdr.p_filesz = phdr.p_memsz = phdr_sz;
870 phdr.p_align = 0;
872 /* Add merged PT_NOTE program header*/
873 tmp = elfptr + sizeof(Elf32_Ehdr);
874 memcpy(tmp, &phdr, sizeof(phdr));
875 tmp += sizeof(phdr);
877 /* Remove unwanted PT_NOTE program headers. */
878 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
879 *elfsz = *elfsz - i;
880 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
881 memset(elfptr + *elfsz, 0, i);
882 *elfsz = roundup(*elfsz, PAGE_SIZE);
884 /* Modify e_phnum to reflect merged headers. */
885 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
887 return 0;
890 /* Add memory chunks represented by program headers to vmcore list. Also update
891 * the new offset fields of exported program headers. */
892 static int __init process_ptload_program_headers_elf64(char *elfptr,
893 size_t elfsz,
894 size_t elfnotes_sz,
895 struct list_head *vc_list)
897 int i;
898 Elf64_Ehdr *ehdr_ptr;
899 Elf64_Phdr *phdr_ptr;
900 loff_t vmcore_off;
901 struct vmcore *new;
903 ehdr_ptr = (Elf64_Ehdr *)elfptr;
904 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
906 /* Skip Elf header, program headers and Elf note segment. */
907 vmcore_off = elfsz + elfnotes_sz;
909 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
910 u64 paddr, start, end, size;
912 if (phdr_ptr->p_type != PT_LOAD)
913 continue;
915 paddr = phdr_ptr->p_offset;
916 start = rounddown(paddr, PAGE_SIZE);
917 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
918 size = end - start;
920 /* Add this contiguous chunk of memory to vmcore list.*/
921 new = get_new_element();
922 if (!new)
923 return -ENOMEM;
924 new->paddr = start;
925 new->size = size;
926 list_add_tail(&new->list, vc_list);
928 /* Update the program header offset. */
929 phdr_ptr->p_offset = vmcore_off + (paddr - start);
930 vmcore_off = vmcore_off + size;
932 return 0;
935 static int __init process_ptload_program_headers_elf32(char *elfptr,
936 size_t elfsz,
937 size_t elfnotes_sz,
938 struct list_head *vc_list)
940 int i;
941 Elf32_Ehdr *ehdr_ptr;
942 Elf32_Phdr *phdr_ptr;
943 loff_t vmcore_off;
944 struct vmcore *new;
946 ehdr_ptr = (Elf32_Ehdr *)elfptr;
947 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
949 /* Skip Elf header, program headers and Elf note segment. */
950 vmcore_off = elfsz + elfnotes_sz;
952 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
953 u64 paddr, start, end, size;
955 if (phdr_ptr->p_type != PT_LOAD)
956 continue;
958 paddr = phdr_ptr->p_offset;
959 start = rounddown(paddr, PAGE_SIZE);
960 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
961 size = end - start;
963 /* Add this contiguous chunk of memory to vmcore list.*/
964 new = get_new_element();
965 if (!new)
966 return -ENOMEM;
967 new->paddr = start;
968 new->size = size;
969 list_add_tail(&new->list, vc_list);
971 /* Update the program header offset */
972 phdr_ptr->p_offset = vmcore_off + (paddr - start);
973 vmcore_off = vmcore_off + size;
975 return 0;
978 /* Sets offset fields of vmcore elements. */
979 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
980 struct list_head *vc_list)
982 loff_t vmcore_off;
983 struct vmcore *m;
985 /* Skip Elf header, program headers and Elf note segment. */
986 vmcore_off = elfsz + elfnotes_sz;
988 list_for_each_entry(m, vc_list, list) {
989 m->offset = vmcore_off;
990 vmcore_off += m->size;
994 static void free_elfcorebuf(void)
996 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
997 elfcorebuf = NULL;
998 vfree(elfnotes_buf);
999 elfnotes_buf = NULL;
1002 static int __init parse_crash_elf64_headers(void)
1004 int rc=0;
1005 Elf64_Ehdr ehdr;
1006 u64 addr;
1008 addr = elfcorehdr_addr;
1010 /* Read Elf header */
1011 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1012 if (rc < 0)
1013 return rc;
1015 /* Do some basic Verification. */
1016 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1017 (ehdr.e_type != ET_CORE) ||
1018 !vmcore_elf64_check_arch(&ehdr) ||
1019 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1020 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1021 ehdr.e_version != EV_CURRENT ||
1022 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1023 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1024 ehdr.e_phnum == 0) {
1025 pr_warn("Warning: Core image elf header is not sane\n");
1026 return -EINVAL;
1029 /* Read in all elf headers. */
1030 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1031 ehdr.e_phnum * sizeof(Elf64_Phdr);
1032 elfcorebuf_sz = elfcorebuf_sz_orig;
1033 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1034 get_order(elfcorebuf_sz_orig));
1035 if (!elfcorebuf)
1036 return -ENOMEM;
1037 addr = elfcorehdr_addr;
1038 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1039 if (rc < 0)
1040 goto fail;
1042 /* Merge all PT_NOTE headers into one. */
1043 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1044 &elfnotes_buf, &elfnotes_sz);
1045 if (rc)
1046 goto fail;
1047 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1048 elfnotes_sz, &vmcore_list);
1049 if (rc)
1050 goto fail;
1051 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1052 return 0;
1053 fail:
1054 free_elfcorebuf();
1055 return rc;
1058 static int __init parse_crash_elf32_headers(void)
1060 int rc=0;
1061 Elf32_Ehdr ehdr;
1062 u64 addr;
1064 addr = elfcorehdr_addr;
1066 /* Read Elf header */
1067 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1068 if (rc < 0)
1069 return rc;
1071 /* Do some basic Verification. */
1072 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1073 (ehdr.e_type != ET_CORE) ||
1074 !vmcore_elf32_check_arch(&ehdr) ||
1075 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1076 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1077 ehdr.e_version != EV_CURRENT ||
1078 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1079 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1080 ehdr.e_phnum == 0) {
1081 pr_warn("Warning: Core image elf header is not sane\n");
1082 return -EINVAL;
1085 /* Read in all elf headers. */
1086 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1087 elfcorebuf_sz = elfcorebuf_sz_orig;
1088 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1089 get_order(elfcorebuf_sz_orig));
1090 if (!elfcorebuf)
1091 return -ENOMEM;
1092 addr = elfcorehdr_addr;
1093 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1094 if (rc < 0)
1095 goto fail;
1097 /* Merge all PT_NOTE headers into one. */
1098 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1099 &elfnotes_buf, &elfnotes_sz);
1100 if (rc)
1101 goto fail;
1102 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1103 elfnotes_sz, &vmcore_list);
1104 if (rc)
1105 goto fail;
1106 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1107 return 0;
1108 fail:
1109 free_elfcorebuf();
1110 return rc;
1113 static int __init parse_crash_elf_headers(void)
1115 unsigned char e_ident[EI_NIDENT];
1116 u64 addr;
1117 int rc=0;
1119 addr = elfcorehdr_addr;
1120 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1121 if (rc < 0)
1122 return rc;
1123 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1124 pr_warn("Warning: Core image elf header not found\n");
1125 return -EINVAL;
1128 if (e_ident[EI_CLASS] == ELFCLASS64) {
1129 rc = parse_crash_elf64_headers();
1130 if (rc)
1131 return rc;
1132 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1133 rc = parse_crash_elf32_headers();
1134 if (rc)
1135 return rc;
1136 } else {
1137 pr_warn("Warning: Core image elf header is not sane\n");
1138 return -EINVAL;
1141 /* Determine vmcore size. */
1142 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1143 &vmcore_list);
1145 return 0;
1148 /* Init function for vmcore module. */
1149 static int __init vmcore_init(void)
1151 int rc = 0;
1153 /* Allow architectures to allocate ELF header in 2nd kernel */
1154 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1155 if (rc)
1156 return rc;
1158 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1159 * then capture the dump.
1161 if (!(is_vmcore_usable()))
1162 return rc;
1163 rc = parse_crash_elf_headers();
1164 if (rc) {
1165 pr_warn("Kdump: vmcore not initialized\n");
1166 return rc;
1168 elfcorehdr_free(elfcorehdr_addr);
1169 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1171 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1172 if (proc_vmcore)
1173 proc_vmcore->size = vmcore_size;
1174 return 0;
1176 fs_initcall(vmcore_init);
1178 /* Cleanup function for vmcore module. */
1179 void vmcore_cleanup(void)
1181 struct list_head *pos, *next;
1183 if (proc_vmcore) {
1184 proc_remove(proc_vmcore);
1185 proc_vmcore = NULL;
1188 /* clear the vmcore list. */
1189 list_for_each_safe(pos, next, &vmcore_list) {
1190 struct vmcore *m;
1192 m = list_entry(pos, struct vmcore, list);
1193 list_del(&m->list);
1194 kfree(m);
1196 free_elfcorebuf();