mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / proc / vmcore.c
blobaaa7486b6f0d9e42a850c9f154d3dcfdceb34e95
1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 */
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <linux/uaccess.h>
26 #include <asm/io.h>
27 #include "internal.h"
29 /* List representing chunks of contiguous memory areas and their offsets in
30 * vmcore file.
32 static LIST_HEAD(vmcore_list);
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
45 static struct proc_dir_entry *proc_vmcore;
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
55 if (oldmem_pfn_is_ram)
56 return -EBUSY;
57 oldmem_pfn_is_ram = fn;
58 return 0;
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
62 void unregister_oldmem_pfn_is_ram(void)
64 oldmem_pfn_is_ram = NULL;
65 wmb();
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
69 static int pfn_is_ram(unsigned long pfn)
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
73 int ret = 1;
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
80 fn = oldmem_pfn_is_ram;
81 if (fn)
82 ret = fn(pfn);
84 return ret;
87 /* Reads a page from the oldmem device from given offset. */
88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
91 unsigned long pfn, offset;
92 size_t nr_bytes;
93 ssize_t read = 0, tmp;
95 if (!count)
96 return 0;
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
101 do {
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
104 else
105 nr_bytes = count;
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0)
109 memset(buf, 0, nr_bytes);
110 else {
111 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 offset, userbuf);
113 if (tmp < 0)
114 return tmp;
116 *ppos += nr_bytes;
117 count -= nr_bytes;
118 buf += nr_bytes;
119 read += nr_bytes;
120 ++pfn;
121 offset = 0;
122 } while (count);
124 return read;
128 * Architectures may override this function to allocate ELF header in 2nd kernel
130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
132 return 0;
136 * Architectures may override this function to free header
138 void __weak elfcorehdr_free(unsigned long long addr)
142 * Architectures may override this function to read from ELF header
144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
146 return read_from_oldmem(buf, count, ppos, 0);
150 * Architectures may override this function to read from notes sections
152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
154 return read_from_oldmem(buf, count, ppos, 0);
158 * Architectures may override this function to map oldmem
160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
164 return remap_pfn_range(vma, from, pfn, size, prot);
168 * Architectures which support memory encryption override this.
170 ssize_t __weak
171 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
172 unsigned long offset, int userbuf)
174 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
178 * Copy to either kernel or user space
180 static int copy_to(void *target, void *src, size_t size, int userbuf)
182 if (userbuf) {
183 if (copy_to_user((char __user *) target, src, size))
184 return -EFAULT;
185 } else {
186 memcpy(target, src, size);
188 return 0;
191 /* Read from the ELF header and then the crash dump. On error, negative value is
192 * returned otherwise number of bytes read are returned.
194 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
195 int userbuf)
197 ssize_t acc = 0, tmp;
198 size_t tsz;
199 u64 start;
200 struct vmcore *m = NULL;
202 if (buflen == 0 || *fpos >= vmcore_size)
203 return 0;
205 /* trim buflen to not go beyond EOF */
206 if (buflen > vmcore_size - *fpos)
207 buflen = vmcore_size - *fpos;
209 /* Read ELF core header */
210 if (*fpos < elfcorebuf_sz) {
211 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
212 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
213 return -EFAULT;
214 buflen -= tsz;
215 *fpos += tsz;
216 buffer += tsz;
217 acc += tsz;
219 /* leave now if filled buffer already */
220 if (buflen == 0)
221 return acc;
224 /* Read Elf note segment */
225 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
226 void *kaddr;
228 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
229 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
230 if (copy_to(buffer, kaddr, tsz, userbuf))
231 return -EFAULT;
232 buflen -= tsz;
233 *fpos += tsz;
234 buffer += tsz;
235 acc += tsz;
237 /* leave now if filled buffer already */
238 if (buflen == 0)
239 return acc;
242 list_for_each_entry(m, &vmcore_list, list) {
243 if (*fpos < m->offset + m->size) {
244 tsz = (size_t)min_t(unsigned long long,
245 m->offset + m->size - *fpos,
246 buflen);
247 start = m->paddr + *fpos - m->offset;
248 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
249 if (tmp < 0)
250 return tmp;
251 buflen -= tsz;
252 *fpos += tsz;
253 buffer += tsz;
254 acc += tsz;
256 /* leave now if filled buffer already */
257 if (buflen == 0)
258 return acc;
262 return acc;
265 static ssize_t read_vmcore(struct file *file, char __user *buffer,
266 size_t buflen, loff_t *fpos)
268 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
272 * The vmcore fault handler uses the page cache and fills data using the
273 * standard __vmcore_read() function.
275 * On s390 the fault handler is used for memory regions that can't be mapped
276 * directly with remap_pfn_range().
278 static int mmap_vmcore_fault(struct vm_fault *vmf)
280 #ifdef CONFIG_S390
281 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
282 pgoff_t index = vmf->pgoff;
283 struct page *page;
284 loff_t offset;
285 char *buf;
286 int rc;
288 page = find_or_create_page(mapping, index, GFP_KERNEL);
289 if (!page)
290 return VM_FAULT_OOM;
291 if (!PageUptodate(page)) {
292 offset = (loff_t) index << PAGE_SHIFT;
293 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
294 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
295 if (rc < 0) {
296 unlock_page(page);
297 put_page(page);
298 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
300 SetPageUptodate(page);
302 unlock_page(page);
303 vmf->page = page;
304 return 0;
305 #else
306 return VM_FAULT_SIGBUS;
307 #endif
310 static const struct vm_operations_struct vmcore_mmap_ops = {
311 .fault = mmap_vmcore_fault,
315 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
316 * vmalloc memory
318 * @notes_sz: size of buffer
320 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
321 * the buffer to user-space by means of remap_vmalloc_range().
323 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
324 * disabled and there's no need to allow users to mmap the buffer.
326 static inline char *alloc_elfnotes_buf(size_t notes_sz)
328 #ifdef CONFIG_MMU
329 return vmalloc_user(notes_sz);
330 #else
331 return vzalloc(notes_sz);
332 #endif
336 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
337 * essential for mmap_vmcore() in order to map physically
338 * non-contiguous objects (ELF header, ELF note segment and memory
339 * regions in the 1st kernel pointed to by PT_LOAD entries) into
340 * virtually contiguous user-space in ELF layout.
342 #ifdef CONFIG_MMU
344 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
345 * reported as not being ram with the zero page.
347 * @vma: vm_area_struct describing requested mapping
348 * @from: start remapping from
349 * @pfn: page frame number to start remapping to
350 * @size: remapping size
351 * @prot: protection bits
353 * Returns zero on success, -EAGAIN on failure.
355 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
356 unsigned long from, unsigned long pfn,
357 unsigned long size, pgprot_t prot)
359 unsigned long map_size;
360 unsigned long pos_start, pos_end, pos;
361 unsigned long zeropage_pfn = my_zero_pfn(0);
362 size_t len = 0;
364 pos_start = pfn;
365 pos_end = pfn + (size >> PAGE_SHIFT);
367 for (pos = pos_start; pos < pos_end; ++pos) {
368 if (!pfn_is_ram(pos)) {
370 * We hit a page which is not ram. Remap the continuous
371 * region between pos_start and pos-1 and replace
372 * the non-ram page at pos with the zero page.
374 if (pos > pos_start) {
375 /* Remap continuous region */
376 map_size = (pos - pos_start) << PAGE_SHIFT;
377 if (remap_oldmem_pfn_range(vma, from + len,
378 pos_start, map_size,
379 prot))
380 goto fail;
381 len += map_size;
383 /* Remap the zero page */
384 if (remap_oldmem_pfn_range(vma, from + len,
385 zeropage_pfn,
386 PAGE_SIZE, prot))
387 goto fail;
388 len += PAGE_SIZE;
389 pos_start = pos + 1;
392 if (pos > pos_start) {
393 /* Remap the rest */
394 map_size = (pos - pos_start) << PAGE_SHIFT;
395 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
396 map_size, prot))
397 goto fail;
399 return 0;
400 fail:
401 do_munmap(vma->vm_mm, from, len, NULL);
402 return -EAGAIN;
405 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
406 unsigned long from, unsigned long pfn,
407 unsigned long size, pgprot_t prot)
410 * Check if oldmem_pfn_is_ram was registered to avoid
411 * looping over all pages without a reason.
413 if (oldmem_pfn_is_ram)
414 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
415 else
416 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
419 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
421 size_t size = vma->vm_end - vma->vm_start;
422 u64 start, end, len, tsz;
423 struct vmcore *m;
425 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
426 end = start + size;
428 if (size > vmcore_size || end > vmcore_size)
429 return -EINVAL;
431 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
432 return -EPERM;
434 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
435 vma->vm_flags |= VM_MIXEDMAP;
436 vma->vm_ops = &vmcore_mmap_ops;
438 len = 0;
440 if (start < elfcorebuf_sz) {
441 u64 pfn;
443 tsz = min(elfcorebuf_sz - (size_t)start, size);
444 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
445 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
446 vma->vm_page_prot))
447 return -EAGAIN;
448 size -= tsz;
449 start += tsz;
450 len += tsz;
452 if (size == 0)
453 return 0;
456 if (start < elfcorebuf_sz + elfnotes_sz) {
457 void *kaddr;
459 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
460 kaddr = elfnotes_buf + start - elfcorebuf_sz;
461 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
462 kaddr, 0, tsz))
463 goto fail;
464 size -= tsz;
465 start += tsz;
466 len += tsz;
468 if (size == 0)
469 return 0;
472 list_for_each_entry(m, &vmcore_list, list) {
473 if (start < m->offset + m->size) {
474 u64 paddr = 0;
476 tsz = (size_t)min_t(unsigned long long,
477 m->offset + m->size - start, size);
478 paddr = m->paddr + start - m->offset;
479 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
480 paddr >> PAGE_SHIFT, tsz,
481 vma->vm_page_prot))
482 goto fail;
483 size -= tsz;
484 start += tsz;
485 len += tsz;
487 if (size == 0)
488 return 0;
492 return 0;
493 fail:
494 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
495 return -EAGAIN;
497 #else
498 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
500 return -ENOSYS;
502 #endif
504 static const struct file_operations proc_vmcore_operations = {
505 .read = read_vmcore,
506 .llseek = default_llseek,
507 .mmap = mmap_vmcore,
510 static struct vmcore* __init get_new_element(void)
512 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
515 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
516 struct list_head *vc_list)
518 u64 size;
519 struct vmcore *m;
521 size = elfsz + elfnotesegsz;
522 list_for_each_entry(m, vc_list, list) {
523 size += m->size;
525 return size;
529 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
531 * @ehdr_ptr: ELF header
533 * This function updates p_memsz member of each PT_NOTE entry in the
534 * program header table pointed to by @ehdr_ptr to real size of ELF
535 * note segment.
537 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
539 int i, rc=0;
540 Elf64_Phdr *phdr_ptr;
541 Elf64_Nhdr *nhdr_ptr;
543 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
544 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
545 void *notes_section;
546 u64 offset, max_sz, sz, real_sz = 0;
547 if (phdr_ptr->p_type != PT_NOTE)
548 continue;
549 max_sz = phdr_ptr->p_memsz;
550 offset = phdr_ptr->p_offset;
551 notes_section = kmalloc(max_sz, GFP_KERNEL);
552 if (!notes_section)
553 return -ENOMEM;
554 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
555 if (rc < 0) {
556 kfree(notes_section);
557 return rc;
559 nhdr_ptr = notes_section;
560 while (nhdr_ptr->n_namesz != 0) {
561 sz = sizeof(Elf64_Nhdr) +
562 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
563 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
564 if ((real_sz + sz) > max_sz) {
565 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
566 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
567 break;
569 real_sz += sz;
570 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
572 kfree(notes_section);
573 phdr_ptr->p_memsz = real_sz;
574 if (real_sz == 0) {
575 pr_warn("Warning: Zero PT_NOTE entries found\n");
579 return 0;
583 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
584 * headers and sum of real size of their ELF note segment headers and
585 * data.
587 * @ehdr_ptr: ELF header
588 * @nr_ptnote: buffer for the number of PT_NOTE program headers
589 * @sz_ptnote: buffer for size of unique PT_NOTE program header
591 * This function is used to merge multiple PT_NOTE program headers
592 * into a unique single one. The resulting unique entry will have
593 * @sz_ptnote in its phdr->p_mem.
595 * It is assumed that program headers with PT_NOTE type pointed to by
596 * @ehdr_ptr has already been updated by update_note_header_size_elf64
597 * and each of PT_NOTE program headers has actual ELF note segment
598 * size in its p_memsz member.
600 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
601 int *nr_ptnote, u64 *sz_ptnote)
603 int i;
604 Elf64_Phdr *phdr_ptr;
606 *nr_ptnote = *sz_ptnote = 0;
608 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
609 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
610 if (phdr_ptr->p_type != PT_NOTE)
611 continue;
612 *nr_ptnote += 1;
613 *sz_ptnote += phdr_ptr->p_memsz;
616 return 0;
620 * copy_notes_elf64 - copy ELF note segments in a given buffer
622 * @ehdr_ptr: ELF header
623 * @notes_buf: buffer into which ELF note segments are copied
625 * This function is used to copy ELF note segment in the 1st kernel
626 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
627 * size of the buffer @notes_buf is equal to or larger than sum of the
628 * real ELF note segment headers and data.
630 * It is assumed that program headers with PT_NOTE type pointed to by
631 * @ehdr_ptr has already been updated by update_note_header_size_elf64
632 * and each of PT_NOTE program headers has actual ELF note segment
633 * size in its p_memsz member.
635 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
637 int i, rc=0;
638 Elf64_Phdr *phdr_ptr;
640 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
642 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
643 u64 offset;
644 if (phdr_ptr->p_type != PT_NOTE)
645 continue;
646 offset = phdr_ptr->p_offset;
647 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
648 &offset);
649 if (rc < 0)
650 return rc;
651 notes_buf += phdr_ptr->p_memsz;
654 return 0;
657 /* Merges all the PT_NOTE headers into one. */
658 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
659 char **notes_buf, size_t *notes_sz)
661 int i, nr_ptnote=0, rc=0;
662 char *tmp;
663 Elf64_Ehdr *ehdr_ptr;
664 Elf64_Phdr phdr;
665 u64 phdr_sz = 0, note_off;
667 ehdr_ptr = (Elf64_Ehdr *)elfptr;
669 rc = update_note_header_size_elf64(ehdr_ptr);
670 if (rc < 0)
671 return rc;
673 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
674 if (rc < 0)
675 return rc;
677 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
678 *notes_buf = alloc_elfnotes_buf(*notes_sz);
679 if (!*notes_buf)
680 return -ENOMEM;
682 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
683 if (rc < 0)
684 return rc;
686 /* Prepare merged PT_NOTE program header. */
687 phdr.p_type = PT_NOTE;
688 phdr.p_flags = 0;
689 note_off = sizeof(Elf64_Ehdr) +
690 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
691 phdr.p_offset = roundup(note_off, PAGE_SIZE);
692 phdr.p_vaddr = phdr.p_paddr = 0;
693 phdr.p_filesz = phdr.p_memsz = phdr_sz;
694 phdr.p_align = 0;
696 /* Add merged PT_NOTE program header*/
697 tmp = elfptr + sizeof(Elf64_Ehdr);
698 memcpy(tmp, &phdr, sizeof(phdr));
699 tmp += sizeof(phdr);
701 /* Remove unwanted PT_NOTE program headers. */
702 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
703 *elfsz = *elfsz - i;
704 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
705 memset(elfptr + *elfsz, 0, i);
706 *elfsz = roundup(*elfsz, PAGE_SIZE);
708 /* Modify e_phnum to reflect merged headers. */
709 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
711 return 0;
715 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
717 * @ehdr_ptr: ELF header
719 * This function updates p_memsz member of each PT_NOTE entry in the
720 * program header table pointed to by @ehdr_ptr to real size of ELF
721 * note segment.
723 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
725 int i, rc=0;
726 Elf32_Phdr *phdr_ptr;
727 Elf32_Nhdr *nhdr_ptr;
729 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
730 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
731 void *notes_section;
732 u64 offset, max_sz, sz, real_sz = 0;
733 if (phdr_ptr->p_type != PT_NOTE)
734 continue;
735 max_sz = phdr_ptr->p_memsz;
736 offset = phdr_ptr->p_offset;
737 notes_section = kmalloc(max_sz, GFP_KERNEL);
738 if (!notes_section)
739 return -ENOMEM;
740 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
741 if (rc < 0) {
742 kfree(notes_section);
743 return rc;
745 nhdr_ptr = notes_section;
746 while (nhdr_ptr->n_namesz != 0) {
747 sz = sizeof(Elf32_Nhdr) +
748 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
749 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
750 if ((real_sz + sz) > max_sz) {
751 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
752 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
753 break;
755 real_sz += sz;
756 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
758 kfree(notes_section);
759 phdr_ptr->p_memsz = real_sz;
760 if (real_sz == 0) {
761 pr_warn("Warning: Zero PT_NOTE entries found\n");
765 return 0;
769 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
770 * headers and sum of real size of their ELF note segment headers and
771 * data.
773 * @ehdr_ptr: ELF header
774 * @nr_ptnote: buffer for the number of PT_NOTE program headers
775 * @sz_ptnote: buffer for size of unique PT_NOTE program header
777 * This function is used to merge multiple PT_NOTE program headers
778 * into a unique single one. The resulting unique entry will have
779 * @sz_ptnote in its phdr->p_mem.
781 * It is assumed that program headers with PT_NOTE type pointed to by
782 * @ehdr_ptr has already been updated by update_note_header_size_elf32
783 * and each of PT_NOTE program headers has actual ELF note segment
784 * size in its p_memsz member.
786 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
787 int *nr_ptnote, u64 *sz_ptnote)
789 int i;
790 Elf32_Phdr *phdr_ptr;
792 *nr_ptnote = *sz_ptnote = 0;
794 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
795 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
796 if (phdr_ptr->p_type != PT_NOTE)
797 continue;
798 *nr_ptnote += 1;
799 *sz_ptnote += phdr_ptr->p_memsz;
802 return 0;
806 * copy_notes_elf32 - copy ELF note segments in a given buffer
808 * @ehdr_ptr: ELF header
809 * @notes_buf: buffer into which ELF note segments are copied
811 * This function is used to copy ELF note segment in the 1st kernel
812 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
813 * size of the buffer @notes_buf is equal to or larger than sum of the
814 * real ELF note segment headers and data.
816 * It is assumed that program headers with PT_NOTE type pointed to by
817 * @ehdr_ptr has already been updated by update_note_header_size_elf32
818 * and each of PT_NOTE program headers has actual ELF note segment
819 * size in its p_memsz member.
821 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
823 int i, rc=0;
824 Elf32_Phdr *phdr_ptr;
826 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
828 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
829 u64 offset;
830 if (phdr_ptr->p_type != PT_NOTE)
831 continue;
832 offset = phdr_ptr->p_offset;
833 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
834 &offset);
835 if (rc < 0)
836 return rc;
837 notes_buf += phdr_ptr->p_memsz;
840 return 0;
843 /* Merges all the PT_NOTE headers into one. */
844 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
845 char **notes_buf, size_t *notes_sz)
847 int i, nr_ptnote=0, rc=0;
848 char *tmp;
849 Elf32_Ehdr *ehdr_ptr;
850 Elf32_Phdr phdr;
851 u64 phdr_sz = 0, note_off;
853 ehdr_ptr = (Elf32_Ehdr *)elfptr;
855 rc = update_note_header_size_elf32(ehdr_ptr);
856 if (rc < 0)
857 return rc;
859 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
860 if (rc < 0)
861 return rc;
863 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
864 *notes_buf = alloc_elfnotes_buf(*notes_sz);
865 if (!*notes_buf)
866 return -ENOMEM;
868 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
869 if (rc < 0)
870 return rc;
872 /* Prepare merged PT_NOTE program header. */
873 phdr.p_type = PT_NOTE;
874 phdr.p_flags = 0;
875 note_off = sizeof(Elf32_Ehdr) +
876 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
877 phdr.p_offset = roundup(note_off, PAGE_SIZE);
878 phdr.p_vaddr = phdr.p_paddr = 0;
879 phdr.p_filesz = phdr.p_memsz = phdr_sz;
880 phdr.p_align = 0;
882 /* Add merged PT_NOTE program header*/
883 tmp = elfptr + sizeof(Elf32_Ehdr);
884 memcpy(tmp, &phdr, sizeof(phdr));
885 tmp += sizeof(phdr);
887 /* Remove unwanted PT_NOTE program headers. */
888 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
889 *elfsz = *elfsz - i;
890 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
891 memset(elfptr + *elfsz, 0, i);
892 *elfsz = roundup(*elfsz, PAGE_SIZE);
894 /* Modify e_phnum to reflect merged headers. */
895 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
897 return 0;
900 /* Add memory chunks represented by program headers to vmcore list. Also update
901 * the new offset fields of exported program headers. */
902 static int __init process_ptload_program_headers_elf64(char *elfptr,
903 size_t elfsz,
904 size_t elfnotes_sz,
905 struct list_head *vc_list)
907 int i;
908 Elf64_Ehdr *ehdr_ptr;
909 Elf64_Phdr *phdr_ptr;
910 loff_t vmcore_off;
911 struct vmcore *new;
913 ehdr_ptr = (Elf64_Ehdr *)elfptr;
914 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
916 /* Skip Elf header, program headers and Elf note segment. */
917 vmcore_off = elfsz + elfnotes_sz;
919 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
920 u64 paddr, start, end, size;
922 if (phdr_ptr->p_type != PT_LOAD)
923 continue;
925 paddr = phdr_ptr->p_offset;
926 start = rounddown(paddr, PAGE_SIZE);
927 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
928 size = end - start;
930 /* Add this contiguous chunk of memory to vmcore list.*/
931 new = get_new_element();
932 if (!new)
933 return -ENOMEM;
934 new->paddr = start;
935 new->size = size;
936 list_add_tail(&new->list, vc_list);
938 /* Update the program header offset. */
939 phdr_ptr->p_offset = vmcore_off + (paddr - start);
940 vmcore_off = vmcore_off + size;
942 return 0;
945 static int __init process_ptload_program_headers_elf32(char *elfptr,
946 size_t elfsz,
947 size_t elfnotes_sz,
948 struct list_head *vc_list)
950 int i;
951 Elf32_Ehdr *ehdr_ptr;
952 Elf32_Phdr *phdr_ptr;
953 loff_t vmcore_off;
954 struct vmcore *new;
956 ehdr_ptr = (Elf32_Ehdr *)elfptr;
957 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
959 /* Skip Elf header, program headers and Elf note segment. */
960 vmcore_off = elfsz + elfnotes_sz;
962 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
963 u64 paddr, start, end, size;
965 if (phdr_ptr->p_type != PT_LOAD)
966 continue;
968 paddr = phdr_ptr->p_offset;
969 start = rounddown(paddr, PAGE_SIZE);
970 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
971 size = end - start;
973 /* Add this contiguous chunk of memory to vmcore list.*/
974 new = get_new_element();
975 if (!new)
976 return -ENOMEM;
977 new->paddr = start;
978 new->size = size;
979 list_add_tail(&new->list, vc_list);
981 /* Update the program header offset */
982 phdr_ptr->p_offset = vmcore_off + (paddr - start);
983 vmcore_off = vmcore_off + size;
985 return 0;
988 /* Sets offset fields of vmcore elements. */
989 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
990 struct list_head *vc_list)
992 loff_t vmcore_off;
993 struct vmcore *m;
995 /* Skip Elf header, program headers and Elf note segment. */
996 vmcore_off = elfsz + elfnotes_sz;
998 list_for_each_entry(m, vc_list, list) {
999 m->offset = vmcore_off;
1000 vmcore_off += m->size;
1004 static void free_elfcorebuf(void)
1006 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1007 elfcorebuf = NULL;
1008 vfree(elfnotes_buf);
1009 elfnotes_buf = NULL;
1012 static int __init parse_crash_elf64_headers(void)
1014 int rc=0;
1015 Elf64_Ehdr ehdr;
1016 u64 addr;
1018 addr = elfcorehdr_addr;
1020 /* Read Elf header */
1021 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1022 if (rc < 0)
1023 return rc;
1025 /* Do some basic Verification. */
1026 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1027 (ehdr.e_type != ET_CORE) ||
1028 !vmcore_elf64_check_arch(&ehdr) ||
1029 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1030 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1031 ehdr.e_version != EV_CURRENT ||
1032 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1033 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1034 ehdr.e_phnum == 0) {
1035 pr_warn("Warning: Core image elf header is not sane\n");
1036 return -EINVAL;
1039 /* Read in all elf headers. */
1040 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1041 ehdr.e_phnum * sizeof(Elf64_Phdr);
1042 elfcorebuf_sz = elfcorebuf_sz_orig;
1043 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1044 get_order(elfcorebuf_sz_orig));
1045 if (!elfcorebuf)
1046 return -ENOMEM;
1047 addr = elfcorehdr_addr;
1048 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1049 if (rc < 0)
1050 goto fail;
1052 /* Merge all PT_NOTE headers into one. */
1053 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1054 &elfnotes_buf, &elfnotes_sz);
1055 if (rc)
1056 goto fail;
1057 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1058 elfnotes_sz, &vmcore_list);
1059 if (rc)
1060 goto fail;
1061 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1062 return 0;
1063 fail:
1064 free_elfcorebuf();
1065 return rc;
1068 static int __init parse_crash_elf32_headers(void)
1070 int rc=0;
1071 Elf32_Ehdr ehdr;
1072 u64 addr;
1074 addr = elfcorehdr_addr;
1076 /* Read Elf header */
1077 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1078 if (rc < 0)
1079 return rc;
1081 /* Do some basic Verification. */
1082 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1083 (ehdr.e_type != ET_CORE) ||
1084 !vmcore_elf32_check_arch(&ehdr) ||
1085 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1086 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1087 ehdr.e_version != EV_CURRENT ||
1088 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1089 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1090 ehdr.e_phnum == 0) {
1091 pr_warn("Warning: Core image elf header is not sane\n");
1092 return -EINVAL;
1095 /* Read in all elf headers. */
1096 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1097 elfcorebuf_sz = elfcorebuf_sz_orig;
1098 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1099 get_order(elfcorebuf_sz_orig));
1100 if (!elfcorebuf)
1101 return -ENOMEM;
1102 addr = elfcorehdr_addr;
1103 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1104 if (rc < 0)
1105 goto fail;
1107 /* Merge all PT_NOTE headers into one. */
1108 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1109 &elfnotes_buf, &elfnotes_sz);
1110 if (rc)
1111 goto fail;
1112 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1113 elfnotes_sz, &vmcore_list);
1114 if (rc)
1115 goto fail;
1116 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1117 return 0;
1118 fail:
1119 free_elfcorebuf();
1120 return rc;
1123 static int __init parse_crash_elf_headers(void)
1125 unsigned char e_ident[EI_NIDENT];
1126 u64 addr;
1127 int rc=0;
1129 addr = elfcorehdr_addr;
1130 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1131 if (rc < 0)
1132 return rc;
1133 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1134 pr_warn("Warning: Core image elf header not found\n");
1135 return -EINVAL;
1138 if (e_ident[EI_CLASS] == ELFCLASS64) {
1139 rc = parse_crash_elf64_headers();
1140 if (rc)
1141 return rc;
1142 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1143 rc = parse_crash_elf32_headers();
1144 if (rc)
1145 return rc;
1146 } else {
1147 pr_warn("Warning: Core image elf header is not sane\n");
1148 return -EINVAL;
1151 /* Determine vmcore size. */
1152 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1153 &vmcore_list);
1155 return 0;
1158 /* Init function for vmcore module. */
1159 static int __init vmcore_init(void)
1161 int rc = 0;
1163 /* Allow architectures to allocate ELF header in 2nd kernel */
1164 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1165 if (rc)
1166 return rc;
1168 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1169 * then capture the dump.
1171 if (!(is_vmcore_usable()))
1172 return rc;
1173 rc = parse_crash_elf_headers();
1174 if (rc) {
1175 pr_warn("Kdump: vmcore not initialized\n");
1176 return rc;
1178 elfcorehdr_free(elfcorehdr_addr);
1179 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1181 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1182 if (proc_vmcore)
1183 proc_vmcore->size = vmcore_size;
1184 return 0;
1186 fs_initcall(vmcore_init);
1188 /* Cleanup function for vmcore module. */
1189 void vmcore_cleanup(void)
1191 struct list_head *pos, *next;
1193 if (proc_vmcore) {
1194 proc_remove(proc_vmcore);
1195 proc_vmcore = NULL;
1198 /* clear the vmcore list. */
1199 list_for_each_safe(pos, next, &vmcore_list) {
1200 struct vmcore *m;
1202 m = list_entry(pos, struct vmcore, list);
1203 list_del(&m->list);
1204 kfree(m);
1206 free_elfcorebuf();