2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/io_apic.h>
34 #include <linux/kdebug.h>
36 #include <asm/reboot.h>
37 #include <asm/virtext.h>
38 #include <asm/intel_pt.h>
40 /* Alignment required for elf header segment */
41 #define ELF_CORE_HEADER_ALIGN 4096
43 /* This primarily represents number of split ranges due to exclusion */
44 #define CRASH_MAX_RANGES 16
46 struct crash_mem_range
{
51 unsigned int nr_ranges
;
52 struct crash_mem_range ranges
[CRASH_MAX_RANGES
];
55 /* Misc data about ram ranges needed to prepare elf headers */
56 struct crash_elf_data
{
59 * Total number of ram ranges we have after various adjustments for
60 * crash reserved region, etc.
62 unsigned int max_nr_ranges
;
64 /* Pointer to elf header */
66 /* Pointer to next phdr */
71 /* Used while preparing memory map entries for second kernel */
72 struct crash_memmap_data
{
73 struct boot_params
*params
;
79 * This is used to VMCLEAR all VMCSs loaded on the
80 * processor. And when loading kvm_intel module, the
81 * callback function pointer will be assigned.
85 crash_vmclear_fn __rcu
*crash_vmclear_loaded_vmcss
= NULL
;
86 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss
);
87 unsigned long crash_zero_bytes
;
89 static inline void cpu_crash_vmclear_loaded_vmcss(void)
91 crash_vmclear_fn
*do_vmclear_operation
= NULL
;
94 do_vmclear_operation
= rcu_dereference(crash_vmclear_loaded_vmcss
);
95 if (do_vmclear_operation
)
96 do_vmclear_operation();
100 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
102 static void kdump_nmi_callback(int cpu
, struct pt_regs
*regs
)
105 struct pt_regs fixed_regs
;
107 if (!user_mode(regs
)) {
108 crash_fixup_ss_esp(&fixed_regs
, regs
);
112 crash_save_cpu(regs
, cpu
);
115 * VMCLEAR VMCSs loaded on all cpus if needed.
117 cpu_crash_vmclear_loaded_vmcss();
119 /* Disable VMX or SVM if needed.
121 * We need to disable virtualization on all CPUs.
122 * Having VMX or SVM enabled on any CPU may break rebooting
123 * after the kdump kernel has finished its task.
125 cpu_emergency_vmxoff();
126 cpu_emergency_svm_disable();
129 * Disable Intel PT to stop its logging
131 cpu_emergency_stop_pt();
133 disable_local_APIC();
136 static void kdump_nmi_shootdown_cpus(void)
138 nmi_shootdown_cpus(kdump_nmi_callback
);
140 disable_local_APIC();
144 static void kdump_nmi_shootdown_cpus(void)
146 /* There are no cpus to shootdown */
150 void native_machine_crash_shutdown(struct pt_regs
*regs
)
152 /* This function is only called after the system
153 * has panicked or is otherwise in a critical state.
154 * The minimum amount of code to allow a kexec'd kernel
155 * to run successfully needs to happen here.
157 * In practice this means shooting down the other cpus in
160 /* The kernel is broken so disable interrupts */
163 kdump_nmi_shootdown_cpus();
166 * VMCLEAR VMCSs loaded on this cpu if needed.
168 cpu_crash_vmclear_loaded_vmcss();
170 /* Booting kdump kernel with VMX or SVM enabled won't work,
171 * because (among other limitations) we can't disable paging
172 * with the virt flags.
174 cpu_emergency_vmxoff();
175 cpu_emergency_svm_disable();
178 * Disable Intel PT to stop its logging
180 cpu_emergency_stop_pt();
182 #ifdef CONFIG_X86_IO_APIC
183 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
188 #ifdef CONFIG_HPET_TIMER
191 crash_save_cpu(regs
, safe_smp_processor_id());
194 #ifdef CONFIG_KEXEC_FILE
195 static int get_nr_ram_ranges_callback(u64 start
, u64 end
, void *arg
)
197 unsigned int *nr_ranges
= arg
;
204 /* Gather all the required information to prepare elf headers for ram regions */
205 static void fill_up_crash_elf_data(struct crash_elf_data
*ced
,
206 struct kimage
*image
)
208 unsigned int nr_ranges
= 0;
212 walk_system_ram_res(0, -1, &nr_ranges
,
213 get_nr_ram_ranges_callback
);
215 ced
->max_nr_ranges
= nr_ranges
;
217 /* Exclusion of crash region could split memory ranges */
218 ced
->max_nr_ranges
++;
220 /* If crashk_low_res is not 0, another range split possible */
221 if (crashk_low_res
.end
)
222 ced
->max_nr_ranges
++;
225 static int exclude_mem_range(struct crash_mem
*mem
,
226 unsigned long long mstart
, unsigned long long mend
)
229 unsigned long long start
, end
;
230 struct crash_mem_range temp_range
= {0, 0};
232 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
233 start
= mem
->ranges
[i
].start
;
234 end
= mem
->ranges
[i
].end
;
236 if (mstart
> end
|| mend
< start
)
239 /* Truncate any area outside of range */
245 /* Found completely overlapping range */
246 if (mstart
== start
&& mend
== end
) {
247 mem
->ranges
[i
].start
= 0;
248 mem
->ranges
[i
].end
= 0;
249 if (i
< mem
->nr_ranges
- 1) {
250 /* Shift rest of the ranges to left */
251 for (j
= i
; j
< mem
->nr_ranges
- 1; j
++) {
252 mem
->ranges
[j
].start
=
253 mem
->ranges
[j
+1].start
;
255 mem
->ranges
[j
+1].end
;
262 if (mstart
> start
&& mend
< end
) {
263 /* Split original range */
264 mem
->ranges
[i
].end
= mstart
- 1;
265 temp_range
.start
= mend
+ 1;
266 temp_range
.end
= end
;
267 } else if (mstart
!= start
)
268 mem
->ranges
[i
].end
= mstart
- 1;
270 mem
->ranges
[i
].start
= mend
+ 1;
274 /* If a split happend, add the split to array */
279 if (i
== CRASH_MAX_RANGES
- 1) {
280 pr_err("Too many crash ranges after split\n");
284 /* Location where new range should go */
286 if (j
< mem
->nr_ranges
) {
287 /* Move over all ranges one slot towards the end */
288 for (i
= mem
->nr_ranges
- 1; i
>= j
; i
--)
289 mem
->ranges
[i
+ 1] = mem
->ranges
[i
];
292 mem
->ranges
[j
].start
= temp_range
.start
;
293 mem
->ranges
[j
].end
= temp_range
.end
;
299 * Look for any unwanted ranges between mstart, mend and remove them. This
300 * might lead to split and split ranges are put in ced->mem.ranges[] array
302 static int elf_header_exclude_ranges(struct crash_elf_data
*ced
,
303 unsigned long long mstart
, unsigned long long mend
)
305 struct crash_mem
*cmem
= &ced
->mem
;
308 memset(cmem
->ranges
, 0, sizeof(cmem
->ranges
));
310 cmem
->ranges
[0].start
= mstart
;
311 cmem
->ranges
[0].end
= mend
;
314 /* Exclude crashkernel region */
315 ret
= exclude_mem_range(cmem
, crashk_res
.start
, crashk_res
.end
);
319 if (crashk_low_res
.end
) {
320 ret
= exclude_mem_range(cmem
, crashk_low_res
.start
, crashk_low_res
.end
);
328 static int prepare_elf64_ram_headers_callback(u64 start
, u64 end
, void *arg
)
330 struct crash_elf_data
*ced
= arg
;
333 unsigned long mstart
, mend
;
334 struct kimage
*image
= ced
->image
;
335 struct crash_mem
*cmem
;
340 /* Exclude unwanted mem ranges */
341 ret
= elf_header_exclude_ranges(ced
, start
, end
);
345 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
348 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
349 mstart
= cmem
->ranges
[i
].start
;
350 mend
= cmem
->ranges
[i
].end
;
353 ced
->bufp
+= sizeof(Elf64_Phdr
);
355 phdr
->p_type
= PT_LOAD
;
356 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
357 phdr
->p_offset
= mstart
;
360 * If a range matches backup region, adjust offset to backup
363 if (mstart
== image
->arch
.backup_src_start
&&
364 (mend
- mstart
+ 1) == image
->arch
.backup_src_sz
)
365 phdr
->p_offset
= image
->arch
.backup_load_addr
;
367 phdr
->p_paddr
= mstart
;
368 phdr
->p_vaddr
= (unsigned long long) __va(mstart
);
369 phdr
->p_filesz
= phdr
->p_memsz
= mend
- mstart
+ 1;
372 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
373 phdr
, phdr
->p_vaddr
, phdr
->p_paddr
, phdr
->p_filesz
,
374 ehdr
->e_phnum
, phdr
->p_offset
);
380 static int prepare_elf64_headers(struct crash_elf_data
*ced
,
381 void **addr
, unsigned long *sz
)
385 unsigned long nr_cpus
= num_possible_cpus(), nr_phdr
, elf_sz
;
386 unsigned char *buf
, *bufp
;
388 unsigned long long notes_addr
;
391 /* extra phdr for vmcoreinfo elf note */
392 nr_phdr
= nr_cpus
+ 1;
393 nr_phdr
+= ced
->max_nr_ranges
;
396 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
397 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
398 * I think this is required by tools like gdb. So same physical
399 * memory will be mapped in two elf headers. One will contain kernel
400 * text virtual addresses and other will have __va(physical) addresses.
404 elf_sz
= sizeof(Elf64_Ehdr
) + nr_phdr
* sizeof(Elf64_Phdr
);
405 elf_sz
= ALIGN(elf_sz
, ELF_CORE_HEADER_ALIGN
);
407 buf
= vzalloc(elf_sz
);
412 ehdr
= (Elf64_Ehdr
*)bufp
;
413 bufp
+= sizeof(Elf64_Ehdr
);
414 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
415 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
416 ehdr
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
417 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
418 ehdr
->e_ident
[EI_OSABI
] = ELF_OSABI
;
419 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
420 ehdr
->e_type
= ET_CORE
;
421 ehdr
->e_machine
= ELF_ARCH
;
422 ehdr
->e_version
= EV_CURRENT
;
423 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
424 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
425 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
427 /* Prepare one phdr of type PT_NOTE for each present cpu */
428 for_each_present_cpu(cpu
) {
429 phdr
= (Elf64_Phdr
*)bufp
;
430 bufp
+= sizeof(Elf64_Phdr
);
431 phdr
->p_type
= PT_NOTE
;
432 notes_addr
= per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes
, cpu
));
433 phdr
->p_offset
= phdr
->p_paddr
= notes_addr
;
434 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(note_buf_t
);
438 /* Prepare one PT_NOTE header for vmcoreinfo */
439 phdr
= (Elf64_Phdr
*)bufp
;
440 bufp
+= sizeof(Elf64_Phdr
);
441 phdr
->p_type
= PT_NOTE
;
442 phdr
->p_offset
= phdr
->p_paddr
= paddr_vmcoreinfo_note();
443 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(vmcoreinfo_note
);
447 /* Prepare PT_LOAD type program header for kernel text region */
448 phdr
= (Elf64_Phdr
*)bufp
;
449 bufp
+= sizeof(Elf64_Phdr
);
450 phdr
->p_type
= PT_LOAD
;
451 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
452 phdr
->p_vaddr
= (Elf64_Addr
)_text
;
453 phdr
->p_filesz
= phdr
->p_memsz
= _end
- _text
;
454 phdr
->p_offset
= phdr
->p_paddr
= __pa_symbol(_text
);
458 /* Prepare PT_LOAD headers for system ram chunks. */
461 ret
= walk_system_ram_res(0, -1, ced
,
462 prepare_elf64_ram_headers_callback
);
471 /* Prepare elf headers. Return addr and size */
472 static int prepare_elf_headers(struct kimage
*image
, void **addr
,
475 struct crash_elf_data
*ced
;
478 ced
= kzalloc(sizeof(*ced
), GFP_KERNEL
);
482 fill_up_crash_elf_data(ced
, image
);
484 /* By default prepare 64bit headers */
485 ret
= prepare_elf64_headers(ced
, addr
, sz
);
490 static int add_e820_entry(struct boot_params
*params
, struct e820entry
*entry
)
492 unsigned int nr_e820_entries
;
494 nr_e820_entries
= params
->e820_entries
;
495 if (nr_e820_entries
>= E820MAX
)
498 memcpy(¶ms
->e820_map
[nr_e820_entries
], entry
,
499 sizeof(struct e820entry
));
500 params
->e820_entries
++;
504 static int memmap_entry_callback(u64 start
, u64 end
, void *arg
)
506 struct crash_memmap_data
*cmd
= arg
;
507 struct boot_params
*params
= cmd
->params
;
511 ei
.size
= end
- start
+ 1;
513 add_e820_entry(params
, &ei
);
518 static int memmap_exclude_ranges(struct kimage
*image
, struct crash_mem
*cmem
,
519 unsigned long long mstart
,
520 unsigned long long mend
)
522 unsigned long start
, end
;
525 cmem
->ranges
[0].start
= mstart
;
526 cmem
->ranges
[0].end
= mend
;
529 /* Exclude Backup region */
530 start
= image
->arch
.backup_load_addr
;
531 end
= start
+ image
->arch
.backup_src_sz
- 1;
532 ret
= exclude_mem_range(cmem
, start
, end
);
536 /* Exclude elf header region */
537 start
= image
->arch
.elf_load_addr
;
538 end
= start
+ image
->arch
.elf_headers_sz
- 1;
539 return exclude_mem_range(cmem
, start
, end
);
542 /* Prepare memory map for crash dump kernel */
543 int crash_setup_memmap_entries(struct kimage
*image
, struct boot_params
*params
)
548 struct crash_memmap_data cmd
;
549 struct crash_mem
*cmem
;
551 cmem
= vzalloc(sizeof(struct crash_mem
));
555 memset(&cmd
, 0, sizeof(struct crash_memmap_data
));
558 /* Add first 640K segment */
559 ei
.addr
= image
->arch
.backup_src_start
;
560 ei
.size
= image
->arch
.backup_src_sz
;
562 add_e820_entry(params
, &ei
);
564 /* Add ACPI tables */
565 cmd
.type
= E820_ACPI
;
566 flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
567 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES
, flags
, 0, -1, &cmd
,
568 memmap_entry_callback
);
570 /* Add ACPI Non-volatile Storage */
572 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE
, flags
, 0, -1, &cmd
,
573 memmap_entry_callback
);
575 /* Add crashk_low_res region */
576 if (crashk_low_res
.end
) {
577 ei
.addr
= crashk_low_res
.start
;
578 ei
.size
= crashk_low_res
.end
- crashk_low_res
.start
+ 1;
580 add_e820_entry(params
, &ei
);
583 /* Exclude some ranges from crashk_res and add rest to memmap */
584 ret
= memmap_exclude_ranges(image
, cmem
, crashk_res
.start
,
589 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
590 ei
.size
= cmem
->ranges
[i
].end
- cmem
->ranges
[i
].start
+ 1;
592 /* If entry is less than a page, skip it */
593 if (ei
.size
< PAGE_SIZE
)
595 ei
.addr
= cmem
->ranges
[i
].start
;
597 add_e820_entry(params
, &ei
);
605 static int determine_backup_region(u64 start
, u64 end
, void *arg
)
607 struct kimage
*image
= arg
;
609 image
->arch
.backup_src_start
= start
;
610 image
->arch
.backup_src_sz
= end
- start
+ 1;
612 /* Expecting only one range for backup region */
616 int crash_load_segments(struct kimage
*image
)
618 unsigned long src_start
, src_sz
, elf_sz
;
623 * Determine and load a segment for backup area. First 640K RAM
624 * region is backup source
627 ret
= walk_system_ram_res(KEXEC_BACKUP_SRC_START
, KEXEC_BACKUP_SRC_END
,
628 image
, determine_backup_region
);
630 /* Zero or postive return values are ok */
634 src_start
= image
->arch
.backup_src_start
;
635 src_sz
= image
->arch
.backup_src_sz
;
637 /* Add backup segment. */
640 * Ideally there is no source for backup segment. This is
641 * copied in purgatory after crash. Just add a zero filled
642 * segment for now to make sure checksum logic works fine.
644 ret
= kexec_add_buffer(image
, (char *)&crash_zero_bytes
,
645 sizeof(crash_zero_bytes
), src_sz
,
647 &image
->arch
.backup_load_addr
);
650 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
651 image
->arch
.backup_load_addr
, src_start
, src_sz
);
654 /* Prepare elf headers and add a segment */
655 ret
= prepare_elf_headers(image
, &elf_addr
, &elf_sz
);
659 image
->arch
.elf_headers
= elf_addr
;
660 image
->arch
.elf_headers_sz
= elf_sz
;
662 ret
= kexec_add_buffer(image
, (char *)elf_addr
, elf_sz
, elf_sz
,
663 ELF_CORE_HEADER_ALIGN
, 0, -1, 0,
664 &image
->arch
.elf_load_addr
);
666 vfree((void *)image
->arch
.elf_headers
);
669 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
670 image
->arch
.elf_load_addr
, elf_sz
, elf_sz
);
674 #endif /* CONFIG_KEXEC_FILE */