1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/mem_encrypt.h>
30 #include <asm/pgtable.h>
34 /* List representing chunks of contiguous memory areas and their offsets in
37 static LIST_HEAD(vmcore_list
);
39 /* Stores the pointer to the buffer containing kernel elf core headers. */
40 static char *elfcorebuf
;
41 static size_t elfcorebuf_sz
;
42 static size_t elfcorebuf_sz_orig
;
44 static char *elfnotes_buf
;
45 static size_t elfnotes_sz
;
46 /* Size of all notes minus the device dump notes */
47 static size_t elfnotes_orig_sz
;
49 /* Total size of vmcore file. */
50 static u64 vmcore_size
;
52 static struct proc_dir_entry
*proc_vmcore
;
54 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
55 /* Device Dump list and mutex to synchronize access to list */
56 static LIST_HEAD(vmcoredd_list
);
57 static DEFINE_MUTEX(vmcoredd_mutex
);
59 static bool vmcoredd_disabled
;
60 core_param(novmcoredd
, vmcoredd_disabled
, bool, 0);
61 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
63 /* Device Dump Size */
64 static size_t vmcoredd_orig_sz
;
67 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
68 * The called function has to take care of module refcounting.
70 static int (*oldmem_pfn_is_ram
)(unsigned long pfn
);
72 int register_oldmem_pfn_is_ram(int (*fn
)(unsigned long pfn
))
74 if (oldmem_pfn_is_ram
)
76 oldmem_pfn_is_ram
= fn
;
79 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram
);
81 void unregister_oldmem_pfn_is_ram(void)
83 oldmem_pfn_is_ram
= NULL
;
86 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram
);
88 static int pfn_is_ram(unsigned long pfn
)
90 int (*fn
)(unsigned long pfn
);
91 /* pfn is ram unless fn() checks pagetype */
95 * Ask hypervisor if the pfn is really ram.
96 * A ballooned page contains no data and reading from such a page
97 * will cause high load in the hypervisor.
99 fn
= oldmem_pfn_is_ram
;
106 /* Reads a page from the oldmem device from given offset. */
107 static ssize_t
read_from_oldmem(char *buf
, size_t count
,
108 u64
*ppos
, int userbuf
,
111 unsigned long pfn
, offset
;
113 ssize_t read
= 0, tmp
;
118 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
119 pfn
= (unsigned long)(*ppos
/ PAGE_SIZE
);
122 if (count
> (PAGE_SIZE
- offset
))
123 nr_bytes
= PAGE_SIZE
- offset
;
127 /* If pfn is not ram, return zeros for sparse dump files */
128 if (pfn_is_ram(pfn
) == 0)
129 memset(buf
, 0, nr_bytes
);
132 tmp
= copy_oldmem_page_encrypted(pfn
, buf
,
137 tmp
= copy_oldmem_page(pfn
, buf
, nr_bytes
,
155 * Architectures may override this function to allocate ELF header in 2nd kernel
157 int __weak
elfcorehdr_alloc(unsigned long long *addr
, unsigned long long *size
)
163 * Architectures may override this function to free header
165 void __weak
elfcorehdr_free(unsigned long long addr
)
169 * Architectures may override this function to read from ELF header
171 ssize_t __weak
elfcorehdr_read(char *buf
, size_t count
, u64
*ppos
)
173 return read_from_oldmem(buf
, count
, ppos
, 0, sev_active());
177 * Architectures may override this function to read from notes sections
179 ssize_t __weak
elfcorehdr_read_notes(char *buf
, size_t count
, u64
*ppos
)
181 return read_from_oldmem(buf
, count
, ppos
, 0, mem_encrypt_active());
185 * Architectures may override this function to map oldmem
187 int __weak
remap_oldmem_pfn_range(struct vm_area_struct
*vma
,
188 unsigned long from
, unsigned long pfn
,
189 unsigned long size
, pgprot_t prot
)
191 prot
= pgprot_encrypted(prot
);
192 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
196 * Architectures which support memory encryption override this.
199 copy_oldmem_page_encrypted(unsigned long pfn
, char *buf
, size_t csize
,
200 unsigned long offset
, int userbuf
)
202 return copy_oldmem_page(pfn
, buf
, csize
, offset
, userbuf
);
206 * Copy to either kernel or user space
208 static int copy_to(void *target
, void *src
, size_t size
, int userbuf
)
211 if (copy_to_user((char __user
*) target
, src
, size
))
214 memcpy(target
, src
, size
);
219 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
220 static int vmcoredd_copy_dumps(void *dst
, u64 start
, size_t size
, int userbuf
)
222 struct vmcoredd_node
*dump
;
228 mutex_lock(&vmcoredd_mutex
);
229 list_for_each_entry(dump
, &vmcoredd_list
, list
) {
230 if (start
< offset
+ dump
->size
) {
231 tsz
= min(offset
+ (u64
)dump
->size
- start
, (u64
)size
);
232 buf
= dump
->buf
+ start
- offset
;
233 if (copy_to(dst
, buf
, tsz
, userbuf
)) {
242 /* Leave now if buffer filled already */
246 offset
+= dump
->size
;
250 mutex_unlock(&vmcoredd_mutex
);
255 static int vmcoredd_mmap_dumps(struct vm_area_struct
*vma
, unsigned long dst
,
256 u64 start
, size_t size
)
258 struct vmcoredd_node
*dump
;
264 mutex_lock(&vmcoredd_mutex
);
265 list_for_each_entry(dump
, &vmcoredd_list
, list
) {
266 if (start
< offset
+ dump
->size
) {
267 tsz
= min(offset
+ (u64
)dump
->size
- start
, (u64
)size
);
268 buf
= dump
->buf
+ start
- offset
;
269 if (remap_vmalloc_range_partial(vma
, dst
, buf
, tsz
)) {
278 /* Leave now if buffer filled already */
282 offset
+= dump
->size
;
286 mutex_unlock(&vmcoredd_mutex
);
289 #endif /* CONFIG_MMU */
290 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
292 /* Read from the ELF header and then the crash dump. On error, negative value is
293 * returned otherwise number of bytes read are returned.
295 static ssize_t
__read_vmcore(char *buffer
, size_t buflen
, loff_t
*fpos
,
298 ssize_t acc
= 0, tmp
;
301 struct vmcore
*m
= NULL
;
303 if (buflen
== 0 || *fpos
>= vmcore_size
)
306 /* trim buflen to not go beyond EOF */
307 if (buflen
> vmcore_size
- *fpos
)
308 buflen
= vmcore_size
- *fpos
;
310 /* Read ELF core header */
311 if (*fpos
< elfcorebuf_sz
) {
312 tsz
= min(elfcorebuf_sz
- (size_t)*fpos
, buflen
);
313 if (copy_to(buffer
, elfcorebuf
+ *fpos
, tsz
, userbuf
))
320 /* leave now if filled buffer already */
325 /* Read Elf note segment */
326 if (*fpos
< elfcorebuf_sz
+ elfnotes_sz
) {
329 /* We add device dumps before other elf notes because the
330 * other elf notes may not fill the elf notes buffer
331 * completely and we will end up with zero-filled data
332 * between the elf notes and the device dumps. Tools will
333 * then try to decode this zero-filled data as valid notes
334 * and we don't want that. Hence, adding device dumps before
335 * the other elf notes ensure that zero-filled data can be
338 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
339 /* Read device dumps */
340 if (*fpos
< elfcorebuf_sz
+ vmcoredd_orig_sz
) {
341 tsz
= min(elfcorebuf_sz
+ vmcoredd_orig_sz
-
342 (size_t)*fpos
, buflen
);
343 start
= *fpos
- elfcorebuf_sz
;
344 if (vmcoredd_copy_dumps(buffer
, start
, tsz
, userbuf
))
352 /* leave now if filled buffer already */
356 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
358 /* Read remaining elf notes */
359 tsz
= min(elfcorebuf_sz
+ elfnotes_sz
- (size_t)*fpos
, buflen
);
360 kaddr
= elfnotes_buf
+ *fpos
- elfcorebuf_sz
- vmcoredd_orig_sz
;
361 if (copy_to(buffer
, kaddr
, tsz
, userbuf
))
369 /* leave now if filled buffer already */
374 list_for_each_entry(m
, &vmcore_list
, list
) {
375 if (*fpos
< m
->offset
+ m
->size
) {
376 tsz
= (size_t)min_t(unsigned long long,
377 m
->offset
+ m
->size
- *fpos
,
379 start
= m
->paddr
+ *fpos
- m
->offset
;
380 tmp
= read_from_oldmem(buffer
, tsz
, &start
,
381 userbuf
, mem_encrypt_active());
389 /* leave now if filled buffer already */
398 static ssize_t
read_vmcore(struct file
*file
, char __user
*buffer
,
399 size_t buflen
, loff_t
*fpos
)
401 return __read_vmcore((__force
char *) buffer
, buflen
, fpos
, 1);
405 * The vmcore fault handler uses the page cache and fills data using the
406 * standard __vmcore_read() function.
408 * On s390 the fault handler is used for memory regions that can't be mapped
409 * directly with remap_pfn_range().
411 static vm_fault_t
mmap_vmcore_fault(struct vm_fault
*vmf
)
414 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
415 pgoff_t index
= vmf
->pgoff
;
421 page
= find_or_create_page(mapping
, index
, GFP_KERNEL
);
424 if (!PageUptodate(page
)) {
425 offset
= (loff_t
) index
<< PAGE_SHIFT
;
426 buf
= __va((page_to_pfn(page
) << PAGE_SHIFT
));
427 rc
= __read_vmcore(buf
, PAGE_SIZE
, &offset
, 0);
431 return vmf_error(rc
);
433 SetPageUptodate(page
);
439 return VM_FAULT_SIGBUS
;
443 static const struct vm_operations_struct vmcore_mmap_ops
= {
444 .fault
= mmap_vmcore_fault
,
448 * vmcore_alloc_buf - allocate buffer in vmalloc memory
449 * @sizez: size of buffer
451 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
452 * the buffer to user-space by means of remap_vmalloc_range().
454 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
455 * disabled and there's no need to allow users to mmap the buffer.
457 static inline char *vmcore_alloc_buf(size_t size
)
460 return vmalloc_user(size
);
462 return vzalloc(size
);
467 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
468 * essential for mmap_vmcore() in order to map physically
469 * non-contiguous objects (ELF header, ELF note segment and memory
470 * regions in the 1st kernel pointed to by PT_LOAD entries) into
471 * virtually contiguous user-space in ELF layout.
475 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
476 * reported as not being ram with the zero page.
478 * @vma: vm_area_struct describing requested mapping
479 * @from: start remapping from
480 * @pfn: page frame number to start remapping to
481 * @size: remapping size
482 * @prot: protection bits
484 * Returns zero on success, -EAGAIN on failure.
486 static int remap_oldmem_pfn_checked(struct vm_area_struct
*vma
,
487 unsigned long from
, unsigned long pfn
,
488 unsigned long size
, pgprot_t prot
)
490 unsigned long map_size
;
491 unsigned long pos_start
, pos_end
, pos
;
492 unsigned long zeropage_pfn
= my_zero_pfn(0);
496 pos_end
= pfn
+ (size
>> PAGE_SHIFT
);
498 for (pos
= pos_start
; pos
< pos_end
; ++pos
) {
499 if (!pfn_is_ram(pos
)) {
501 * We hit a page which is not ram. Remap the continuous
502 * region between pos_start and pos-1 and replace
503 * the non-ram page at pos with the zero page.
505 if (pos
> pos_start
) {
506 /* Remap continuous region */
507 map_size
= (pos
- pos_start
) << PAGE_SHIFT
;
508 if (remap_oldmem_pfn_range(vma
, from
+ len
,
514 /* Remap the zero page */
515 if (remap_oldmem_pfn_range(vma
, from
+ len
,
523 if (pos
> pos_start
) {
525 map_size
= (pos
- pos_start
) << PAGE_SHIFT
;
526 if (remap_oldmem_pfn_range(vma
, from
+ len
, pos_start
,
532 do_munmap(vma
->vm_mm
, from
, len
, NULL
);
536 static int vmcore_remap_oldmem_pfn(struct vm_area_struct
*vma
,
537 unsigned long from
, unsigned long pfn
,
538 unsigned long size
, pgprot_t prot
)
541 * Check if oldmem_pfn_is_ram was registered to avoid
542 * looping over all pages without a reason.
544 if (oldmem_pfn_is_ram
)
545 return remap_oldmem_pfn_checked(vma
, from
, pfn
, size
, prot
);
547 return remap_oldmem_pfn_range(vma
, from
, pfn
, size
, prot
);
550 static int mmap_vmcore(struct file
*file
, struct vm_area_struct
*vma
)
552 size_t size
= vma
->vm_end
- vma
->vm_start
;
553 u64 start
, end
, len
, tsz
;
556 start
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
;
559 if (size
> vmcore_size
|| end
> vmcore_size
)
562 if (vma
->vm_flags
& (VM_WRITE
| VM_EXEC
))
565 vma
->vm_flags
&= ~(VM_MAYWRITE
| VM_MAYEXEC
);
566 vma
->vm_flags
|= VM_MIXEDMAP
;
567 vma
->vm_ops
= &vmcore_mmap_ops
;
571 if (start
< elfcorebuf_sz
) {
574 tsz
= min(elfcorebuf_sz
- (size_t)start
, size
);
575 pfn
= __pa(elfcorebuf
+ start
) >> PAGE_SHIFT
;
576 if (remap_pfn_range(vma
, vma
->vm_start
, pfn
, tsz
,
587 if (start
< elfcorebuf_sz
+ elfnotes_sz
) {
590 /* We add device dumps before other elf notes because the
591 * other elf notes may not fill the elf notes buffer
592 * completely and we will end up with zero-filled data
593 * between the elf notes and the device dumps. Tools will
594 * then try to decode this zero-filled data as valid notes
595 * and we don't want that. Hence, adding device dumps before
596 * the other elf notes ensure that zero-filled data can be
597 * avoided. This also ensures that the device dumps and
598 * other elf notes can be properly mmaped at page aligned
601 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
602 /* Read device dumps */
603 if (start
< elfcorebuf_sz
+ vmcoredd_orig_sz
) {
606 tsz
= min(elfcorebuf_sz
+ vmcoredd_orig_sz
-
607 (size_t)start
, size
);
608 start_off
= start
- elfcorebuf_sz
;
609 if (vmcoredd_mmap_dumps(vma
, vma
->vm_start
+ len
,
617 /* leave now if filled buffer already */
621 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
623 /* Read remaining elf notes */
624 tsz
= min(elfcorebuf_sz
+ elfnotes_sz
- (size_t)start
, size
);
625 kaddr
= elfnotes_buf
+ start
- elfcorebuf_sz
- vmcoredd_orig_sz
;
626 if (remap_vmalloc_range_partial(vma
, vma
->vm_start
+ len
,
638 list_for_each_entry(m
, &vmcore_list
, list
) {
639 if (start
< m
->offset
+ m
->size
) {
642 tsz
= (size_t)min_t(unsigned long long,
643 m
->offset
+ m
->size
- start
, size
);
644 paddr
= m
->paddr
+ start
- m
->offset
;
645 if (vmcore_remap_oldmem_pfn(vma
, vma
->vm_start
+ len
,
646 paddr
>> PAGE_SHIFT
, tsz
,
660 do_munmap(vma
->vm_mm
, vma
->vm_start
, len
, NULL
);
664 static int mmap_vmcore(struct file
*file
, struct vm_area_struct
*vma
)
670 static const struct file_operations proc_vmcore_operations
= {
672 .llseek
= default_llseek
,
676 static struct vmcore
* __init
get_new_element(void)
678 return kzalloc(sizeof(struct vmcore
), GFP_KERNEL
);
681 static u64
get_vmcore_size(size_t elfsz
, size_t elfnotesegsz
,
682 struct list_head
*vc_list
)
687 size
= elfsz
+ elfnotesegsz
;
688 list_for_each_entry(m
, vc_list
, list
) {
695 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
697 * @ehdr_ptr: ELF header
699 * This function updates p_memsz member of each PT_NOTE entry in the
700 * program header table pointed to by @ehdr_ptr to real size of ELF
703 static int __init
update_note_header_size_elf64(const Elf64_Ehdr
*ehdr_ptr
)
706 Elf64_Phdr
*phdr_ptr
;
707 Elf64_Nhdr
*nhdr_ptr
;
709 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
710 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
712 u64 offset
, max_sz
, sz
, real_sz
= 0;
713 if (phdr_ptr
->p_type
!= PT_NOTE
)
715 max_sz
= phdr_ptr
->p_memsz
;
716 offset
= phdr_ptr
->p_offset
;
717 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
720 rc
= elfcorehdr_read_notes(notes_section
, max_sz
, &offset
);
722 kfree(notes_section
);
725 nhdr_ptr
= notes_section
;
726 while (nhdr_ptr
->n_namesz
!= 0) {
727 sz
= sizeof(Elf64_Nhdr
) +
728 (((u64
)nhdr_ptr
->n_namesz
+ 3) & ~3) +
729 (((u64
)nhdr_ptr
->n_descsz
+ 3) & ~3);
730 if ((real_sz
+ sz
) > max_sz
) {
731 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
732 nhdr_ptr
->n_namesz
, nhdr_ptr
->n_descsz
);
736 nhdr_ptr
= (Elf64_Nhdr
*)((char*)nhdr_ptr
+ sz
);
738 kfree(notes_section
);
739 phdr_ptr
->p_memsz
= real_sz
;
741 pr_warn("Warning: Zero PT_NOTE entries found\n");
749 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
750 * headers and sum of real size of their ELF note segment headers and
753 * @ehdr_ptr: ELF header
754 * @nr_ptnote: buffer for the number of PT_NOTE program headers
755 * @sz_ptnote: buffer for size of unique PT_NOTE program header
757 * This function is used to merge multiple PT_NOTE program headers
758 * into a unique single one. The resulting unique entry will have
759 * @sz_ptnote in its phdr->p_mem.
761 * It is assumed that program headers with PT_NOTE type pointed to by
762 * @ehdr_ptr has already been updated by update_note_header_size_elf64
763 * and each of PT_NOTE program headers has actual ELF note segment
764 * size in its p_memsz member.
766 static int __init
get_note_number_and_size_elf64(const Elf64_Ehdr
*ehdr_ptr
,
767 int *nr_ptnote
, u64
*sz_ptnote
)
770 Elf64_Phdr
*phdr_ptr
;
772 *nr_ptnote
= *sz_ptnote
= 0;
774 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
775 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
776 if (phdr_ptr
->p_type
!= PT_NOTE
)
779 *sz_ptnote
+= phdr_ptr
->p_memsz
;
786 * copy_notes_elf64 - copy ELF note segments in a given buffer
788 * @ehdr_ptr: ELF header
789 * @notes_buf: buffer into which ELF note segments are copied
791 * This function is used to copy ELF note segment in the 1st kernel
792 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
793 * size of the buffer @notes_buf is equal to or larger than sum of the
794 * real ELF note segment headers and data.
796 * It is assumed that program headers with PT_NOTE type pointed to by
797 * @ehdr_ptr has already been updated by update_note_header_size_elf64
798 * and each of PT_NOTE program headers has actual ELF note segment
799 * size in its p_memsz member.
801 static int __init
copy_notes_elf64(const Elf64_Ehdr
*ehdr_ptr
, char *notes_buf
)
804 Elf64_Phdr
*phdr_ptr
;
806 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
808 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
810 if (phdr_ptr
->p_type
!= PT_NOTE
)
812 offset
= phdr_ptr
->p_offset
;
813 rc
= elfcorehdr_read_notes(notes_buf
, phdr_ptr
->p_memsz
,
817 notes_buf
+= phdr_ptr
->p_memsz
;
823 /* Merges all the PT_NOTE headers into one. */
824 static int __init
merge_note_headers_elf64(char *elfptr
, size_t *elfsz
,
825 char **notes_buf
, size_t *notes_sz
)
827 int i
, nr_ptnote
=0, rc
=0;
829 Elf64_Ehdr
*ehdr_ptr
;
831 u64 phdr_sz
= 0, note_off
;
833 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
835 rc
= update_note_header_size_elf64(ehdr_ptr
);
839 rc
= get_note_number_and_size_elf64(ehdr_ptr
, &nr_ptnote
, &phdr_sz
);
843 *notes_sz
= roundup(phdr_sz
, PAGE_SIZE
);
844 *notes_buf
= vmcore_alloc_buf(*notes_sz
);
848 rc
= copy_notes_elf64(ehdr_ptr
, *notes_buf
);
852 /* Prepare merged PT_NOTE program header. */
853 phdr
.p_type
= PT_NOTE
;
855 note_off
= sizeof(Elf64_Ehdr
) +
856 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf64_Phdr
);
857 phdr
.p_offset
= roundup(note_off
, PAGE_SIZE
);
858 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
859 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
862 /* Add merged PT_NOTE program header*/
863 tmp
= elfptr
+ sizeof(Elf64_Ehdr
);
864 memcpy(tmp
, &phdr
, sizeof(phdr
));
867 /* Remove unwanted PT_NOTE program headers. */
868 i
= (nr_ptnote
- 1) * sizeof(Elf64_Phdr
);
870 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf64_Ehdr
)-sizeof(Elf64_Phdr
)));
871 memset(elfptr
+ *elfsz
, 0, i
);
872 *elfsz
= roundup(*elfsz
, PAGE_SIZE
);
874 /* Modify e_phnum to reflect merged headers. */
875 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
877 /* Store the size of all notes. We need this to update the note
878 * header when the device dumps will be added.
880 elfnotes_orig_sz
= phdr
.p_memsz
;
886 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
888 * @ehdr_ptr: ELF header
890 * This function updates p_memsz member of each PT_NOTE entry in the
891 * program header table pointed to by @ehdr_ptr to real size of ELF
894 static int __init
update_note_header_size_elf32(const Elf32_Ehdr
*ehdr_ptr
)
897 Elf32_Phdr
*phdr_ptr
;
898 Elf32_Nhdr
*nhdr_ptr
;
900 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
901 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
903 u64 offset
, max_sz
, sz
, real_sz
= 0;
904 if (phdr_ptr
->p_type
!= PT_NOTE
)
906 max_sz
= phdr_ptr
->p_memsz
;
907 offset
= phdr_ptr
->p_offset
;
908 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
911 rc
= elfcorehdr_read_notes(notes_section
, max_sz
, &offset
);
913 kfree(notes_section
);
916 nhdr_ptr
= notes_section
;
917 while (nhdr_ptr
->n_namesz
!= 0) {
918 sz
= sizeof(Elf32_Nhdr
) +
919 (((u64
)nhdr_ptr
->n_namesz
+ 3) & ~3) +
920 (((u64
)nhdr_ptr
->n_descsz
+ 3) & ~3);
921 if ((real_sz
+ sz
) > max_sz
) {
922 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
923 nhdr_ptr
->n_namesz
, nhdr_ptr
->n_descsz
);
927 nhdr_ptr
= (Elf32_Nhdr
*)((char*)nhdr_ptr
+ sz
);
929 kfree(notes_section
);
930 phdr_ptr
->p_memsz
= real_sz
;
932 pr_warn("Warning: Zero PT_NOTE entries found\n");
940 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
941 * headers and sum of real size of their ELF note segment headers and
944 * @ehdr_ptr: ELF header
945 * @nr_ptnote: buffer for the number of PT_NOTE program headers
946 * @sz_ptnote: buffer for size of unique PT_NOTE program header
948 * This function is used to merge multiple PT_NOTE program headers
949 * into a unique single one. The resulting unique entry will have
950 * @sz_ptnote in its phdr->p_mem.
952 * It is assumed that program headers with PT_NOTE type pointed to by
953 * @ehdr_ptr has already been updated by update_note_header_size_elf32
954 * and each of PT_NOTE program headers has actual ELF note segment
955 * size in its p_memsz member.
957 static int __init
get_note_number_and_size_elf32(const Elf32_Ehdr
*ehdr_ptr
,
958 int *nr_ptnote
, u64
*sz_ptnote
)
961 Elf32_Phdr
*phdr_ptr
;
963 *nr_ptnote
= *sz_ptnote
= 0;
965 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
966 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
967 if (phdr_ptr
->p_type
!= PT_NOTE
)
970 *sz_ptnote
+= phdr_ptr
->p_memsz
;
977 * copy_notes_elf32 - copy ELF note segments in a given buffer
979 * @ehdr_ptr: ELF header
980 * @notes_buf: buffer into which ELF note segments are copied
982 * This function is used to copy ELF note segment in the 1st kernel
983 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
984 * size of the buffer @notes_buf is equal to or larger than sum of the
985 * real ELF note segment headers and data.
987 * It is assumed that program headers with PT_NOTE type pointed to by
988 * @ehdr_ptr has already been updated by update_note_header_size_elf32
989 * and each of PT_NOTE program headers has actual ELF note segment
990 * size in its p_memsz member.
992 static int __init
copy_notes_elf32(const Elf32_Ehdr
*ehdr_ptr
, char *notes_buf
)
995 Elf32_Phdr
*phdr_ptr
;
997 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
999 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1001 if (phdr_ptr
->p_type
!= PT_NOTE
)
1003 offset
= phdr_ptr
->p_offset
;
1004 rc
= elfcorehdr_read_notes(notes_buf
, phdr_ptr
->p_memsz
,
1008 notes_buf
+= phdr_ptr
->p_memsz
;
1014 /* Merges all the PT_NOTE headers into one. */
1015 static int __init
merge_note_headers_elf32(char *elfptr
, size_t *elfsz
,
1016 char **notes_buf
, size_t *notes_sz
)
1018 int i
, nr_ptnote
=0, rc
=0;
1020 Elf32_Ehdr
*ehdr_ptr
;
1022 u64 phdr_sz
= 0, note_off
;
1024 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
1026 rc
= update_note_header_size_elf32(ehdr_ptr
);
1030 rc
= get_note_number_and_size_elf32(ehdr_ptr
, &nr_ptnote
, &phdr_sz
);
1034 *notes_sz
= roundup(phdr_sz
, PAGE_SIZE
);
1035 *notes_buf
= vmcore_alloc_buf(*notes_sz
);
1039 rc
= copy_notes_elf32(ehdr_ptr
, *notes_buf
);
1043 /* Prepare merged PT_NOTE program header. */
1044 phdr
.p_type
= PT_NOTE
;
1046 note_off
= sizeof(Elf32_Ehdr
) +
1047 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf32_Phdr
);
1048 phdr
.p_offset
= roundup(note_off
, PAGE_SIZE
);
1049 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
1050 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
1053 /* Add merged PT_NOTE program header*/
1054 tmp
= elfptr
+ sizeof(Elf32_Ehdr
);
1055 memcpy(tmp
, &phdr
, sizeof(phdr
));
1056 tmp
+= sizeof(phdr
);
1058 /* Remove unwanted PT_NOTE program headers. */
1059 i
= (nr_ptnote
- 1) * sizeof(Elf32_Phdr
);
1060 *elfsz
= *elfsz
- i
;
1061 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf32_Ehdr
)-sizeof(Elf32_Phdr
)));
1062 memset(elfptr
+ *elfsz
, 0, i
);
1063 *elfsz
= roundup(*elfsz
, PAGE_SIZE
);
1065 /* Modify e_phnum to reflect merged headers. */
1066 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
1068 /* Store the size of all notes. We need this to update the note
1069 * header when the device dumps will be added.
1071 elfnotes_orig_sz
= phdr
.p_memsz
;
1076 /* Add memory chunks represented by program headers to vmcore list. Also update
1077 * the new offset fields of exported program headers. */
1078 static int __init
process_ptload_program_headers_elf64(char *elfptr
,
1081 struct list_head
*vc_list
)
1084 Elf64_Ehdr
*ehdr_ptr
;
1085 Elf64_Phdr
*phdr_ptr
;
1089 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
1090 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
)); /* PT_NOTE hdr */
1092 /* Skip Elf header, program headers and Elf note segment. */
1093 vmcore_off
= elfsz
+ elfnotes_sz
;
1095 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1096 u64 paddr
, start
, end
, size
;
1098 if (phdr_ptr
->p_type
!= PT_LOAD
)
1101 paddr
= phdr_ptr
->p_offset
;
1102 start
= rounddown(paddr
, PAGE_SIZE
);
1103 end
= roundup(paddr
+ phdr_ptr
->p_memsz
, PAGE_SIZE
);
1106 /* Add this contiguous chunk of memory to vmcore list.*/
1107 new = get_new_element();
1112 list_add_tail(&new->list
, vc_list
);
1114 /* Update the program header offset. */
1115 phdr_ptr
->p_offset
= vmcore_off
+ (paddr
- start
);
1116 vmcore_off
= vmcore_off
+ size
;
1121 static int __init
process_ptload_program_headers_elf32(char *elfptr
,
1124 struct list_head
*vc_list
)
1127 Elf32_Ehdr
*ehdr_ptr
;
1128 Elf32_Phdr
*phdr_ptr
;
1132 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
1133 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
)); /* PT_NOTE hdr */
1135 /* Skip Elf header, program headers and Elf note segment. */
1136 vmcore_off
= elfsz
+ elfnotes_sz
;
1138 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1139 u64 paddr
, start
, end
, size
;
1141 if (phdr_ptr
->p_type
!= PT_LOAD
)
1144 paddr
= phdr_ptr
->p_offset
;
1145 start
= rounddown(paddr
, PAGE_SIZE
);
1146 end
= roundup(paddr
+ phdr_ptr
->p_memsz
, PAGE_SIZE
);
1149 /* Add this contiguous chunk of memory to vmcore list.*/
1150 new = get_new_element();
1155 list_add_tail(&new->list
, vc_list
);
1157 /* Update the program header offset */
1158 phdr_ptr
->p_offset
= vmcore_off
+ (paddr
- start
);
1159 vmcore_off
= vmcore_off
+ size
;
1164 /* Sets offset fields of vmcore elements. */
1165 static void set_vmcore_list_offsets(size_t elfsz
, size_t elfnotes_sz
,
1166 struct list_head
*vc_list
)
1171 /* Skip Elf header, program headers and Elf note segment. */
1172 vmcore_off
= elfsz
+ elfnotes_sz
;
1174 list_for_each_entry(m
, vc_list
, list
) {
1175 m
->offset
= vmcore_off
;
1176 vmcore_off
+= m
->size
;
1180 static void free_elfcorebuf(void)
1182 free_pages((unsigned long)elfcorebuf
, get_order(elfcorebuf_sz_orig
));
1184 vfree(elfnotes_buf
);
1185 elfnotes_buf
= NULL
;
1188 static int __init
parse_crash_elf64_headers(void)
1194 addr
= elfcorehdr_addr
;
1196 /* Read Elf header */
1197 rc
= elfcorehdr_read((char *)&ehdr
, sizeof(Elf64_Ehdr
), &addr
);
1201 /* Do some basic Verification. */
1202 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
1203 (ehdr
.e_type
!= ET_CORE
) ||
1204 !vmcore_elf64_check_arch(&ehdr
) ||
1205 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS64
||
1206 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
1207 ehdr
.e_version
!= EV_CURRENT
||
1208 ehdr
.e_ehsize
!= sizeof(Elf64_Ehdr
) ||
1209 ehdr
.e_phentsize
!= sizeof(Elf64_Phdr
) ||
1210 ehdr
.e_phnum
== 0) {
1211 pr_warn("Warning: Core image elf header is not sane\n");
1215 /* Read in all elf headers. */
1216 elfcorebuf_sz_orig
= sizeof(Elf64_Ehdr
) +
1217 ehdr
.e_phnum
* sizeof(Elf64_Phdr
);
1218 elfcorebuf_sz
= elfcorebuf_sz_orig
;
1219 elfcorebuf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1220 get_order(elfcorebuf_sz_orig
));
1223 addr
= elfcorehdr_addr
;
1224 rc
= elfcorehdr_read(elfcorebuf
, elfcorebuf_sz_orig
, &addr
);
1228 /* Merge all PT_NOTE headers into one. */
1229 rc
= merge_note_headers_elf64(elfcorebuf
, &elfcorebuf_sz
,
1230 &elfnotes_buf
, &elfnotes_sz
);
1233 rc
= process_ptload_program_headers_elf64(elfcorebuf
, elfcorebuf_sz
,
1234 elfnotes_sz
, &vmcore_list
);
1237 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1244 static int __init
parse_crash_elf32_headers(void)
1250 addr
= elfcorehdr_addr
;
1252 /* Read Elf header */
1253 rc
= elfcorehdr_read((char *)&ehdr
, sizeof(Elf32_Ehdr
), &addr
);
1257 /* Do some basic Verification. */
1258 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
1259 (ehdr
.e_type
!= ET_CORE
) ||
1260 !vmcore_elf32_check_arch(&ehdr
) ||
1261 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS32
||
1262 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
1263 ehdr
.e_version
!= EV_CURRENT
||
1264 ehdr
.e_ehsize
!= sizeof(Elf32_Ehdr
) ||
1265 ehdr
.e_phentsize
!= sizeof(Elf32_Phdr
) ||
1266 ehdr
.e_phnum
== 0) {
1267 pr_warn("Warning: Core image elf header is not sane\n");
1271 /* Read in all elf headers. */
1272 elfcorebuf_sz_orig
= sizeof(Elf32_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf32_Phdr
);
1273 elfcorebuf_sz
= elfcorebuf_sz_orig
;
1274 elfcorebuf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1275 get_order(elfcorebuf_sz_orig
));
1278 addr
= elfcorehdr_addr
;
1279 rc
= elfcorehdr_read(elfcorebuf
, elfcorebuf_sz_orig
, &addr
);
1283 /* Merge all PT_NOTE headers into one. */
1284 rc
= merge_note_headers_elf32(elfcorebuf
, &elfcorebuf_sz
,
1285 &elfnotes_buf
, &elfnotes_sz
);
1288 rc
= process_ptload_program_headers_elf32(elfcorebuf
, elfcorebuf_sz
,
1289 elfnotes_sz
, &vmcore_list
);
1292 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1299 static int __init
parse_crash_elf_headers(void)
1301 unsigned char e_ident
[EI_NIDENT
];
1305 addr
= elfcorehdr_addr
;
1306 rc
= elfcorehdr_read(e_ident
, EI_NIDENT
, &addr
);
1309 if (memcmp(e_ident
, ELFMAG
, SELFMAG
) != 0) {
1310 pr_warn("Warning: Core image elf header not found\n");
1314 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
1315 rc
= parse_crash_elf64_headers();
1318 } else if (e_ident
[EI_CLASS
] == ELFCLASS32
) {
1319 rc
= parse_crash_elf32_headers();
1323 pr_warn("Warning: Core image elf header is not sane\n");
1327 /* Determine vmcore size. */
1328 vmcore_size
= get_vmcore_size(elfcorebuf_sz
, elfnotes_sz
,
1334 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1336 * vmcoredd_write_header - Write vmcore device dump header at the
1337 * beginning of the dump's buffer.
1338 * @buf: Output buffer where the note is written
1340 * @size: Size of the dump
1342 * Fills beginning of the dump's buffer with vmcore device dump header.
1344 static void vmcoredd_write_header(void *buf
, struct vmcoredd_data
*data
,
1347 struct vmcoredd_header
*vdd_hdr
= (struct vmcoredd_header
*)buf
;
1349 vdd_hdr
->n_namesz
= sizeof(vdd_hdr
->name
);
1350 vdd_hdr
->n_descsz
= size
+ sizeof(vdd_hdr
->dump_name
);
1351 vdd_hdr
->n_type
= NT_VMCOREDD
;
1353 strncpy((char *)vdd_hdr
->name
, VMCOREDD_NOTE_NAME
,
1354 sizeof(vdd_hdr
->name
));
1355 memcpy(vdd_hdr
->dump_name
, data
->dump_name
, sizeof(vdd_hdr
->dump_name
));
1359 * vmcoredd_update_program_headers - Update all Elf program headers
1360 * @elfptr: Pointer to elf header
1361 * @elfnotesz: Size of elf notes aligned to page size
1362 * @vmcoreddsz: Size of device dumps to be added to elf note header
1364 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1365 * Also update the offsets of all the program headers after the elf note header.
1367 static void vmcoredd_update_program_headers(char *elfptr
, size_t elfnotesz
,
1370 unsigned char *e_ident
= (unsigned char *)elfptr
;
1371 u64 start
, end
, size
;
1375 vmcore_off
= elfcorebuf_sz
+ elfnotesz
;
1377 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
1378 Elf64_Ehdr
*ehdr
= (Elf64_Ehdr
*)elfptr
;
1379 Elf64_Phdr
*phdr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
1381 /* Update all program headers */
1382 for (i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++) {
1383 if (phdr
->p_type
== PT_NOTE
) {
1384 /* Update note size */
1385 phdr
->p_memsz
= elfnotes_orig_sz
+ vmcoreddsz
;
1386 phdr
->p_filesz
= phdr
->p_memsz
;
1390 start
= rounddown(phdr
->p_offset
, PAGE_SIZE
);
1391 end
= roundup(phdr
->p_offset
+ phdr
->p_memsz
,
1394 phdr
->p_offset
= vmcore_off
+ (phdr
->p_offset
- start
);
1398 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)elfptr
;
1399 Elf32_Phdr
*phdr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
1401 /* Update all program headers */
1402 for (i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++) {
1403 if (phdr
->p_type
== PT_NOTE
) {
1404 /* Update note size */
1405 phdr
->p_memsz
= elfnotes_orig_sz
+ vmcoreddsz
;
1406 phdr
->p_filesz
= phdr
->p_memsz
;
1410 start
= rounddown(phdr
->p_offset
, PAGE_SIZE
);
1411 end
= roundup(phdr
->p_offset
+ phdr
->p_memsz
,
1414 phdr
->p_offset
= vmcore_off
+ (phdr
->p_offset
- start
);
1421 * vmcoredd_update_size - Update the total size of the device dumps and update
1423 * @dump_size: Size of the current device dump to be added to total size
1425 * Update the total size of all the device dumps and update the Elf program
1426 * headers. Calculate the new offsets for the vmcore list and update the
1427 * total vmcore size.
1429 static void vmcoredd_update_size(size_t dump_size
)
1431 vmcoredd_orig_sz
+= dump_size
;
1432 elfnotes_sz
= roundup(elfnotes_orig_sz
, PAGE_SIZE
) + vmcoredd_orig_sz
;
1433 vmcoredd_update_program_headers(elfcorebuf
, elfnotes_sz
,
1436 /* Update vmcore list offsets */
1437 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1439 vmcore_size
= get_vmcore_size(elfcorebuf_sz
, elfnotes_sz
,
1441 proc_vmcore
->size
= vmcore_size
;
1445 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1448 * Allocate a buffer and invoke the calling driver's dump collect routine.
1449 * Write Elf note at the beginning of the buffer to indicate vmcore device
1450 * dump and add the dump to global list.
1452 int vmcore_add_device_dump(struct vmcoredd_data
*data
)
1454 struct vmcoredd_node
*dump
;
1459 if (vmcoredd_disabled
) {
1460 pr_err_once("Device dump is disabled\n");
1464 if (!data
|| !strlen(data
->dump_name
) ||
1465 !data
->vmcoredd_callback
|| !data
->size
)
1468 dump
= vzalloc(sizeof(*dump
));
1474 /* Keep size of the buffer page aligned so that it can be mmaped */
1475 data_size
= roundup(sizeof(struct vmcoredd_header
) + data
->size
,
1478 /* Allocate buffer for driver's to write their dumps */
1479 buf
= vmcore_alloc_buf(data_size
);
1485 vmcoredd_write_header(buf
, data
, data_size
-
1486 sizeof(struct vmcoredd_header
));
1488 /* Invoke the driver's dump collection routing */
1489 ret
= data
->vmcoredd_callback(data
, buf
+
1490 sizeof(struct vmcoredd_header
));
1495 dump
->size
= data_size
;
1497 /* Add the dump to driver sysfs list */
1498 mutex_lock(&vmcoredd_mutex
);
1499 list_add_tail(&dump
->list
, &vmcoredd_list
);
1500 mutex_unlock(&vmcoredd_mutex
);
1502 vmcoredd_update_size(data_size
);
1514 EXPORT_SYMBOL(vmcore_add_device_dump
);
1515 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1517 /* Free all dumps in vmcore device dump list */
1518 static void vmcore_free_device_dumps(void)
1520 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1521 mutex_lock(&vmcoredd_mutex
);
1522 while (!list_empty(&vmcoredd_list
)) {
1523 struct vmcoredd_node
*dump
;
1525 dump
= list_first_entry(&vmcoredd_list
, struct vmcoredd_node
,
1527 list_del(&dump
->list
);
1531 mutex_unlock(&vmcoredd_mutex
);
1532 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1535 /* Init function for vmcore module. */
1536 static int __init
vmcore_init(void)
1540 /* Allow architectures to allocate ELF header in 2nd kernel */
1541 rc
= elfcorehdr_alloc(&elfcorehdr_addr
, &elfcorehdr_size
);
1545 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1546 * then capture the dump.
1548 if (!(is_vmcore_usable()))
1550 rc
= parse_crash_elf_headers();
1552 pr_warn("Kdump: vmcore not initialized\n");
1555 elfcorehdr_free(elfcorehdr_addr
);
1556 elfcorehdr_addr
= ELFCORE_ADDR_ERR
;
1558 proc_vmcore
= proc_create("vmcore", S_IRUSR
, NULL
, &proc_vmcore_operations
);
1560 proc_vmcore
->size
= vmcore_size
;
1563 fs_initcall(vmcore_init
);
1565 /* Cleanup function for vmcore module. */
1566 void vmcore_cleanup(void)
1569 proc_remove(proc_vmcore
);
1573 /* clear the vmcore list. */
1574 while (!list_empty(&vmcore_list
)) {
1577 m
= list_first_entry(&vmcore_list
, struct vmcore
, list
);
1583 /* clear vmcore device dump list */
1584 vmcore_free_device_dumps();