2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/io_apic.h>
34 #include <linux/kdebug.h>
36 #include <asm/reboot.h>
37 #include <asm/virtext.h>
38 #include <asm/intel_pt.h>
40 /* Alignment required for elf header segment */
41 #define ELF_CORE_HEADER_ALIGN 4096
43 /* This primarily represents number of split ranges due to exclusion */
44 #define CRASH_MAX_RANGES 16
46 struct crash_mem_range
{
51 unsigned int nr_ranges
;
52 struct crash_mem_range ranges
[CRASH_MAX_RANGES
];
55 /* Misc data about ram ranges needed to prepare elf headers */
56 struct crash_elf_data
{
59 * Total number of ram ranges we have after various adjustments for
60 * GART, crash reserved region etc.
62 unsigned int max_nr_ranges
;
63 unsigned long gart_start
, gart_end
;
65 /* Pointer to elf header */
67 /* Pointer to next phdr */
72 /* Used while preparing memory map entries for second kernel */
73 struct crash_memmap_data
{
74 struct boot_params
*params
;
80 * This is used to VMCLEAR all VMCSs loaded on the
81 * processor. And when loading kvm_intel module, the
82 * callback function pointer will be assigned.
86 crash_vmclear_fn __rcu
*crash_vmclear_loaded_vmcss
= NULL
;
87 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss
);
88 unsigned long crash_zero_bytes
;
90 static inline void cpu_crash_vmclear_loaded_vmcss(void)
92 crash_vmclear_fn
*do_vmclear_operation
= NULL
;
95 do_vmclear_operation
= rcu_dereference(crash_vmclear_loaded_vmcss
);
96 if (do_vmclear_operation
)
97 do_vmclear_operation();
101 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
103 static void kdump_nmi_callback(int cpu
, struct pt_regs
*regs
)
106 struct pt_regs fixed_regs
;
108 if (!user_mode(regs
)) {
109 crash_fixup_ss_esp(&fixed_regs
, regs
);
113 crash_save_cpu(regs
, cpu
);
116 * VMCLEAR VMCSs loaded on all cpus if needed.
118 cpu_crash_vmclear_loaded_vmcss();
120 /* Disable VMX or SVM if needed.
122 * We need to disable virtualization on all CPUs.
123 * Having VMX or SVM enabled on any CPU may break rebooting
124 * after the kdump kernel has finished its task.
126 cpu_emergency_vmxoff();
127 cpu_emergency_svm_disable();
130 * Disable Intel PT to stop its logging
132 cpu_emergency_stop_pt();
134 disable_local_APIC();
137 static void kdump_nmi_shootdown_cpus(void)
139 nmi_shootdown_cpus(kdump_nmi_callback
);
141 disable_local_APIC();
145 static void kdump_nmi_shootdown_cpus(void)
147 /* There are no cpus to shootdown */
151 void native_machine_crash_shutdown(struct pt_regs
*regs
)
153 /* This function is only called after the system
154 * has panicked or is otherwise in a critical state.
155 * The minimum amount of code to allow a kexec'd kernel
156 * to run successfully needs to happen here.
158 * In practice this means shooting down the other cpus in
161 /* The kernel is broken so disable interrupts */
164 kdump_nmi_shootdown_cpus();
167 * VMCLEAR VMCSs loaded on this cpu if needed.
169 cpu_crash_vmclear_loaded_vmcss();
171 /* Booting kdump kernel with VMX or SVM enabled won't work,
172 * because (among other limitations) we can't disable paging
173 * with the virt flags.
175 cpu_emergency_vmxoff();
176 cpu_emergency_svm_disable();
179 * Disable Intel PT to stop its logging
181 cpu_emergency_stop_pt();
183 #ifdef CONFIG_X86_IO_APIC
184 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
189 #ifdef CONFIG_HPET_TIMER
192 crash_save_cpu(regs
, safe_smp_processor_id());
195 #ifdef CONFIG_KEXEC_FILE
196 static int get_nr_ram_ranges_callback(u64 start
, u64 end
, void *arg
)
198 unsigned int *nr_ranges
= arg
;
204 static int get_gart_ranges_callback(u64 start
, u64 end
, void *arg
)
206 struct crash_elf_data
*ced
= arg
;
208 ced
->gart_start
= start
;
211 /* Not expecting more than 1 gart aperture */
216 /* Gather all the required information to prepare elf headers for ram regions */
217 static void fill_up_crash_elf_data(struct crash_elf_data
*ced
,
218 struct kimage
*image
)
220 unsigned int nr_ranges
= 0;
224 walk_system_ram_res(0, -1, &nr_ranges
,
225 get_nr_ram_ranges_callback
);
227 ced
->max_nr_ranges
= nr_ranges
;
230 * We don't create ELF headers for GART aperture as an attempt
231 * to dump this memory in second kernel leads to hang/crash.
232 * If gart aperture is present, one needs to exclude that region
233 * and that could lead to need of extra phdr.
235 walk_iomem_res("GART", IORESOURCE_MEM
, 0, -1,
236 ced
, get_gart_ranges_callback
);
239 * If we have gart region, excluding that could potentially split
240 * a memory range, resulting in extra header. Account for that.
243 ced
->max_nr_ranges
++;
245 /* Exclusion of crash region could split memory ranges */
246 ced
->max_nr_ranges
++;
248 /* If crashk_low_res is not 0, another range split possible */
249 if (crashk_low_res
.end
)
250 ced
->max_nr_ranges
++;
253 static int exclude_mem_range(struct crash_mem
*mem
,
254 unsigned long long mstart
, unsigned long long mend
)
257 unsigned long long start
, end
;
258 struct crash_mem_range temp_range
= {0, 0};
260 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
261 start
= mem
->ranges
[i
].start
;
262 end
= mem
->ranges
[i
].end
;
264 if (mstart
> end
|| mend
< start
)
267 /* Truncate any area outside of range */
273 /* Found completely overlapping range */
274 if (mstart
== start
&& mend
== end
) {
275 mem
->ranges
[i
].start
= 0;
276 mem
->ranges
[i
].end
= 0;
277 if (i
< mem
->nr_ranges
- 1) {
278 /* Shift rest of the ranges to left */
279 for (j
= i
; j
< mem
->nr_ranges
- 1; j
++) {
280 mem
->ranges
[j
].start
=
281 mem
->ranges
[j
+1].start
;
283 mem
->ranges
[j
+1].end
;
290 if (mstart
> start
&& mend
< end
) {
291 /* Split original range */
292 mem
->ranges
[i
].end
= mstart
- 1;
293 temp_range
.start
= mend
+ 1;
294 temp_range
.end
= end
;
295 } else if (mstart
!= start
)
296 mem
->ranges
[i
].end
= mstart
- 1;
298 mem
->ranges
[i
].start
= mend
+ 1;
302 /* If a split happend, add the split to array */
307 if (i
== CRASH_MAX_RANGES
- 1) {
308 pr_err("Too many crash ranges after split\n");
312 /* Location where new range should go */
314 if (j
< mem
->nr_ranges
) {
315 /* Move over all ranges one slot towards the end */
316 for (i
= mem
->nr_ranges
- 1; i
>= j
; i
--)
317 mem
->ranges
[i
+ 1] = mem
->ranges
[i
];
320 mem
->ranges
[j
].start
= temp_range
.start
;
321 mem
->ranges
[j
].end
= temp_range
.end
;
327 * Look for any unwanted ranges between mstart, mend and remove them. This
328 * might lead to split and split ranges are put in ced->mem.ranges[] array
330 static int elf_header_exclude_ranges(struct crash_elf_data
*ced
,
331 unsigned long long mstart
, unsigned long long mend
)
333 struct crash_mem
*cmem
= &ced
->mem
;
336 memset(cmem
->ranges
, 0, sizeof(cmem
->ranges
));
338 cmem
->ranges
[0].start
= mstart
;
339 cmem
->ranges
[0].end
= mend
;
342 /* Exclude crashkernel region */
343 ret
= exclude_mem_range(cmem
, crashk_res
.start
, crashk_res
.end
);
347 if (crashk_low_res
.end
) {
348 ret
= exclude_mem_range(cmem
, crashk_low_res
.start
, crashk_low_res
.end
);
353 /* Exclude GART region */
355 ret
= exclude_mem_range(cmem
, ced
->gart_start
, ced
->gart_end
);
363 static int prepare_elf64_ram_headers_callback(u64 start
, u64 end
, void *arg
)
365 struct crash_elf_data
*ced
= arg
;
368 unsigned long mstart
, mend
;
369 struct kimage
*image
= ced
->image
;
370 struct crash_mem
*cmem
;
375 /* Exclude unwanted mem ranges */
376 ret
= elf_header_exclude_ranges(ced
, start
, end
);
380 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
383 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
384 mstart
= cmem
->ranges
[i
].start
;
385 mend
= cmem
->ranges
[i
].end
;
388 ced
->bufp
+= sizeof(Elf64_Phdr
);
390 phdr
->p_type
= PT_LOAD
;
391 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
392 phdr
->p_offset
= mstart
;
395 * If a range matches backup region, adjust offset to backup
398 if (mstart
== image
->arch
.backup_src_start
&&
399 (mend
- mstart
+ 1) == image
->arch
.backup_src_sz
)
400 phdr
->p_offset
= image
->arch
.backup_load_addr
;
402 phdr
->p_paddr
= mstart
;
403 phdr
->p_vaddr
= (unsigned long long) __va(mstart
);
404 phdr
->p_filesz
= phdr
->p_memsz
= mend
- mstart
+ 1;
407 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
408 phdr
, phdr
->p_vaddr
, phdr
->p_paddr
, phdr
->p_filesz
,
409 ehdr
->e_phnum
, phdr
->p_offset
);
415 static int prepare_elf64_headers(struct crash_elf_data
*ced
,
416 void **addr
, unsigned long *sz
)
420 unsigned long nr_cpus
= num_possible_cpus(), nr_phdr
, elf_sz
;
421 unsigned char *buf
, *bufp
;
423 unsigned long long notes_addr
;
426 /* extra phdr for vmcoreinfo elf note */
427 nr_phdr
= nr_cpus
+ 1;
428 nr_phdr
+= ced
->max_nr_ranges
;
431 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
432 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
433 * I think this is required by tools like gdb. So same physical
434 * memory will be mapped in two elf headers. One will contain kernel
435 * text virtual addresses and other will have __va(physical) addresses.
439 elf_sz
= sizeof(Elf64_Ehdr
) + nr_phdr
* sizeof(Elf64_Phdr
);
440 elf_sz
= ALIGN(elf_sz
, ELF_CORE_HEADER_ALIGN
);
442 buf
= vzalloc(elf_sz
);
447 ehdr
= (Elf64_Ehdr
*)bufp
;
448 bufp
+= sizeof(Elf64_Ehdr
);
449 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
450 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
451 ehdr
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
452 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
453 ehdr
->e_ident
[EI_OSABI
] = ELF_OSABI
;
454 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
455 ehdr
->e_type
= ET_CORE
;
456 ehdr
->e_machine
= ELF_ARCH
;
457 ehdr
->e_version
= EV_CURRENT
;
458 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
459 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
460 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
462 /* Prepare one phdr of type PT_NOTE for each present cpu */
463 for_each_present_cpu(cpu
) {
464 phdr
= (Elf64_Phdr
*)bufp
;
465 bufp
+= sizeof(Elf64_Phdr
);
466 phdr
->p_type
= PT_NOTE
;
467 notes_addr
= per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes
, cpu
));
468 phdr
->p_offset
= phdr
->p_paddr
= notes_addr
;
469 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(note_buf_t
);
473 /* Prepare one PT_NOTE header for vmcoreinfo */
474 phdr
= (Elf64_Phdr
*)bufp
;
475 bufp
+= sizeof(Elf64_Phdr
);
476 phdr
->p_type
= PT_NOTE
;
477 phdr
->p_offset
= phdr
->p_paddr
= paddr_vmcoreinfo_note();
478 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(vmcoreinfo_note
);
482 /* Prepare PT_LOAD type program header for kernel text region */
483 phdr
= (Elf64_Phdr
*)bufp
;
484 bufp
+= sizeof(Elf64_Phdr
);
485 phdr
->p_type
= PT_LOAD
;
486 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
487 phdr
->p_vaddr
= (Elf64_Addr
)_text
;
488 phdr
->p_filesz
= phdr
->p_memsz
= _end
- _text
;
489 phdr
->p_offset
= phdr
->p_paddr
= __pa_symbol(_text
);
493 /* Prepare PT_LOAD headers for system ram chunks. */
496 ret
= walk_system_ram_res(0, -1, ced
,
497 prepare_elf64_ram_headers_callback
);
506 /* Prepare elf headers. Return addr and size */
507 static int prepare_elf_headers(struct kimage
*image
, void **addr
,
510 struct crash_elf_data
*ced
;
513 ced
= kzalloc(sizeof(*ced
), GFP_KERNEL
);
517 fill_up_crash_elf_data(ced
, image
);
519 /* By default prepare 64bit headers */
520 ret
= prepare_elf64_headers(ced
, addr
, sz
);
525 static int add_e820_entry(struct boot_params
*params
, struct e820entry
*entry
)
527 unsigned int nr_e820_entries
;
529 nr_e820_entries
= params
->e820_entries
;
530 if (nr_e820_entries
>= E820MAX
)
533 memcpy(¶ms
->e820_map
[nr_e820_entries
], entry
,
534 sizeof(struct e820entry
));
535 params
->e820_entries
++;
539 static int memmap_entry_callback(u64 start
, u64 end
, void *arg
)
541 struct crash_memmap_data
*cmd
= arg
;
542 struct boot_params
*params
= cmd
->params
;
546 ei
.size
= end
- start
+ 1;
548 add_e820_entry(params
, &ei
);
553 static int memmap_exclude_ranges(struct kimage
*image
, struct crash_mem
*cmem
,
554 unsigned long long mstart
,
555 unsigned long long mend
)
557 unsigned long start
, end
;
560 cmem
->ranges
[0].start
= mstart
;
561 cmem
->ranges
[0].end
= mend
;
564 /* Exclude Backup region */
565 start
= image
->arch
.backup_load_addr
;
566 end
= start
+ image
->arch
.backup_src_sz
- 1;
567 ret
= exclude_mem_range(cmem
, start
, end
);
571 /* Exclude elf header region */
572 start
= image
->arch
.elf_load_addr
;
573 end
= start
+ image
->arch
.elf_headers_sz
- 1;
574 return exclude_mem_range(cmem
, start
, end
);
577 /* Prepare memory map for crash dump kernel */
578 int crash_setup_memmap_entries(struct kimage
*image
, struct boot_params
*params
)
583 struct crash_memmap_data cmd
;
584 struct crash_mem
*cmem
;
586 cmem
= vzalloc(sizeof(struct crash_mem
));
590 memset(&cmd
, 0, sizeof(struct crash_memmap_data
));
593 /* Add first 640K segment */
594 ei
.addr
= image
->arch
.backup_src_start
;
595 ei
.size
= image
->arch
.backup_src_sz
;
597 add_e820_entry(params
, &ei
);
599 /* Add ACPI tables */
600 cmd
.type
= E820_ACPI
;
601 flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
602 walk_iomem_res("ACPI Tables", flags
, 0, -1, &cmd
,
603 memmap_entry_callback
);
605 /* Add ACPI Non-volatile Storage */
607 walk_iomem_res("ACPI Non-volatile Storage", flags
, 0, -1, &cmd
,
608 memmap_entry_callback
);
610 /* Add crashk_low_res region */
611 if (crashk_low_res
.end
) {
612 ei
.addr
= crashk_low_res
.start
;
613 ei
.size
= crashk_low_res
.end
- crashk_low_res
.start
+ 1;
615 add_e820_entry(params
, &ei
);
618 /* Exclude some ranges from crashk_res and add rest to memmap */
619 ret
= memmap_exclude_ranges(image
, cmem
, crashk_res
.start
,
624 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
625 ei
.size
= cmem
->ranges
[i
].end
- cmem
->ranges
[i
].start
+ 1;
627 /* If entry is less than a page, skip it */
628 if (ei
.size
< PAGE_SIZE
)
630 ei
.addr
= cmem
->ranges
[i
].start
;
632 add_e820_entry(params
, &ei
);
640 static int determine_backup_region(u64 start
, u64 end
, void *arg
)
642 struct kimage
*image
= arg
;
644 image
->arch
.backup_src_start
= start
;
645 image
->arch
.backup_src_sz
= end
- start
+ 1;
647 /* Expecting only one range for backup region */
651 int crash_load_segments(struct kimage
*image
)
653 unsigned long src_start
, src_sz
, elf_sz
;
658 * Determine and load a segment for backup area. First 640K RAM
659 * region is backup source
662 ret
= walk_system_ram_res(KEXEC_BACKUP_SRC_START
, KEXEC_BACKUP_SRC_END
,
663 image
, determine_backup_region
);
665 /* Zero or postive return values are ok */
669 src_start
= image
->arch
.backup_src_start
;
670 src_sz
= image
->arch
.backup_src_sz
;
672 /* Add backup segment. */
675 * Ideally there is no source for backup segment. This is
676 * copied in purgatory after crash. Just add a zero filled
677 * segment for now to make sure checksum logic works fine.
679 ret
= kexec_add_buffer(image
, (char *)&crash_zero_bytes
,
680 sizeof(crash_zero_bytes
), src_sz
,
682 &image
->arch
.backup_load_addr
);
685 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
686 image
->arch
.backup_load_addr
, src_start
, src_sz
);
689 /* Prepare elf headers and add a segment */
690 ret
= prepare_elf_headers(image
, &elf_addr
, &elf_sz
);
694 image
->arch
.elf_headers
= elf_addr
;
695 image
->arch
.elf_headers_sz
= elf_sz
;
697 ret
= kexec_add_buffer(image
, (char *)elf_addr
, elf_sz
, elf_sz
,
698 ELF_CORE_HEADER_ALIGN
, 0, -1, 0,
699 &image
->arch
.elf_load_addr
);
701 vfree((void *)image
->arch
.elf_headers
);
704 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
705 image
->arch
.elf_load_addr
, elf_sz
, elf_sz
);
709 #endif /* CONFIG_KEXEC_FILE */