2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/e820/types.h>
33 #include <asm/io_apic.h>
35 #include <linux/kdebug.h>
37 #include <asm/reboot.h>
38 #include <asm/virtext.h>
39 #include <asm/intel_pt.h>
41 /* Alignment required for elf header segment */
42 #define ELF_CORE_HEADER_ALIGN 4096
44 /* This primarily represents number of split ranges due to exclusion */
45 #define CRASH_MAX_RANGES 16
47 struct crash_mem_range
{
52 unsigned int nr_ranges
;
53 struct crash_mem_range ranges
[CRASH_MAX_RANGES
];
56 /* Misc data about ram ranges needed to prepare elf headers */
57 struct crash_elf_data
{
60 * Total number of ram ranges we have after various adjustments for
61 * crash reserved region, etc.
63 unsigned int max_nr_ranges
;
65 /* Pointer to elf header */
67 /* Pointer to next phdr */
72 /* Used while preparing memory map entries for second kernel */
73 struct crash_memmap_data
{
74 struct boot_params
*params
;
80 * This is used to VMCLEAR all VMCSs loaded on the
81 * processor. And when loading kvm_intel module, the
82 * callback function pointer will be assigned.
86 crash_vmclear_fn __rcu
*crash_vmclear_loaded_vmcss
= NULL
;
87 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss
);
88 unsigned long crash_zero_bytes
;
90 static inline void cpu_crash_vmclear_loaded_vmcss(void)
92 crash_vmclear_fn
*do_vmclear_operation
= NULL
;
95 do_vmclear_operation
= rcu_dereference(crash_vmclear_loaded_vmcss
);
96 if (do_vmclear_operation
)
97 do_vmclear_operation();
101 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
103 static void kdump_nmi_callback(int cpu
, struct pt_regs
*regs
)
106 struct pt_regs fixed_regs
;
108 if (!user_mode(regs
)) {
109 crash_fixup_ss_esp(&fixed_regs
, regs
);
113 crash_save_cpu(regs
, cpu
);
116 * VMCLEAR VMCSs loaded on all cpus if needed.
118 cpu_crash_vmclear_loaded_vmcss();
120 /* Disable VMX or SVM if needed.
122 * We need to disable virtualization on all CPUs.
123 * Having VMX or SVM enabled on any CPU may break rebooting
124 * after the kdump kernel has finished its task.
126 cpu_emergency_vmxoff();
127 cpu_emergency_svm_disable();
130 * Disable Intel PT to stop its logging
132 cpu_emergency_stop_pt();
134 disable_local_APIC();
137 void kdump_nmi_shootdown_cpus(void)
139 nmi_shootdown_cpus(kdump_nmi_callback
);
141 disable_local_APIC();
144 /* Override the weak function in kernel/panic.c */
145 void crash_smp_send_stop(void)
147 static int cpus_stopped
;
152 if (smp_ops
.crash_stop_other_cpus
)
153 smp_ops
.crash_stop_other_cpus();
161 void crash_smp_send_stop(void)
163 /* There are no cpus to shootdown */
167 void native_machine_crash_shutdown(struct pt_regs
*regs
)
169 /* This function is only called after the system
170 * has panicked or is otherwise in a critical state.
171 * The minimum amount of code to allow a kexec'd kernel
172 * to run successfully needs to happen here.
174 * In practice this means shooting down the other cpus in
177 /* The kernel is broken so disable interrupts */
180 crash_smp_send_stop();
183 * VMCLEAR VMCSs loaded on this cpu if needed.
185 cpu_crash_vmclear_loaded_vmcss();
187 /* Booting kdump kernel with VMX or SVM enabled won't work,
188 * because (among other limitations) we can't disable paging
189 * with the virt flags.
191 cpu_emergency_vmxoff();
192 cpu_emergency_svm_disable();
195 * Disable Intel PT to stop its logging
197 cpu_emergency_stop_pt();
199 #ifdef CONFIG_X86_IO_APIC
200 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
205 #ifdef CONFIG_HPET_TIMER
208 crash_save_cpu(regs
, safe_smp_processor_id());
211 #ifdef CONFIG_KEXEC_FILE
212 static int get_nr_ram_ranges_callback(u64 start
, u64 end
, void *arg
)
214 unsigned int *nr_ranges
= arg
;
221 /* Gather all the required information to prepare elf headers for ram regions */
222 static void fill_up_crash_elf_data(struct crash_elf_data
*ced
,
223 struct kimage
*image
)
225 unsigned int nr_ranges
= 0;
229 walk_system_ram_res(0, -1, &nr_ranges
,
230 get_nr_ram_ranges_callback
);
232 ced
->max_nr_ranges
= nr_ranges
;
234 /* Exclusion of crash region could split memory ranges */
235 ced
->max_nr_ranges
++;
237 /* If crashk_low_res is not 0, another range split possible */
238 if (crashk_low_res
.end
)
239 ced
->max_nr_ranges
++;
242 static int exclude_mem_range(struct crash_mem
*mem
,
243 unsigned long long mstart
, unsigned long long mend
)
246 unsigned long long start
, end
;
247 struct crash_mem_range temp_range
= {0, 0};
249 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
250 start
= mem
->ranges
[i
].start
;
251 end
= mem
->ranges
[i
].end
;
253 if (mstart
> end
|| mend
< start
)
256 /* Truncate any area outside of range */
262 /* Found completely overlapping range */
263 if (mstart
== start
&& mend
== end
) {
264 mem
->ranges
[i
].start
= 0;
265 mem
->ranges
[i
].end
= 0;
266 if (i
< mem
->nr_ranges
- 1) {
267 /* Shift rest of the ranges to left */
268 for (j
= i
; j
< mem
->nr_ranges
- 1; j
++) {
269 mem
->ranges
[j
].start
=
270 mem
->ranges
[j
+1].start
;
272 mem
->ranges
[j
+1].end
;
279 if (mstart
> start
&& mend
< end
) {
280 /* Split original range */
281 mem
->ranges
[i
].end
= mstart
- 1;
282 temp_range
.start
= mend
+ 1;
283 temp_range
.end
= end
;
284 } else if (mstart
!= start
)
285 mem
->ranges
[i
].end
= mstart
- 1;
287 mem
->ranges
[i
].start
= mend
+ 1;
291 /* If a split happend, add the split to array */
296 if (i
== CRASH_MAX_RANGES
- 1) {
297 pr_err("Too many crash ranges after split\n");
301 /* Location where new range should go */
303 if (j
< mem
->nr_ranges
) {
304 /* Move over all ranges one slot towards the end */
305 for (i
= mem
->nr_ranges
- 1; i
>= j
; i
--)
306 mem
->ranges
[i
+ 1] = mem
->ranges
[i
];
309 mem
->ranges
[j
].start
= temp_range
.start
;
310 mem
->ranges
[j
].end
= temp_range
.end
;
316 * Look for any unwanted ranges between mstart, mend and remove them. This
317 * might lead to split and split ranges are put in ced->mem.ranges[] array
319 static int elf_header_exclude_ranges(struct crash_elf_data
*ced
,
320 unsigned long long mstart
, unsigned long long mend
)
322 struct crash_mem
*cmem
= &ced
->mem
;
325 memset(cmem
->ranges
, 0, sizeof(cmem
->ranges
));
327 cmem
->ranges
[0].start
= mstart
;
328 cmem
->ranges
[0].end
= mend
;
331 /* Exclude crashkernel region */
332 ret
= exclude_mem_range(cmem
, crashk_res
.start
, crashk_res
.end
);
336 if (crashk_low_res
.end
) {
337 ret
= exclude_mem_range(cmem
, crashk_low_res
.start
, crashk_low_res
.end
);
345 static int prepare_elf64_ram_headers_callback(u64 start
, u64 end
, void *arg
)
347 struct crash_elf_data
*ced
= arg
;
350 unsigned long mstart
, mend
;
351 struct kimage
*image
= ced
->image
;
352 struct crash_mem
*cmem
;
357 /* Exclude unwanted mem ranges */
358 ret
= elf_header_exclude_ranges(ced
, start
, end
);
362 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
365 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
366 mstart
= cmem
->ranges
[i
].start
;
367 mend
= cmem
->ranges
[i
].end
;
370 ced
->bufp
+= sizeof(Elf64_Phdr
);
372 phdr
->p_type
= PT_LOAD
;
373 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
374 phdr
->p_offset
= mstart
;
377 * If a range matches backup region, adjust offset to backup
380 if (mstart
== image
->arch
.backup_src_start
&&
381 (mend
- mstart
+ 1) == image
->arch
.backup_src_sz
)
382 phdr
->p_offset
= image
->arch
.backup_load_addr
;
384 phdr
->p_paddr
= mstart
;
385 phdr
->p_vaddr
= (unsigned long long) __va(mstart
);
386 phdr
->p_filesz
= phdr
->p_memsz
= mend
- mstart
+ 1;
389 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
390 phdr
, phdr
->p_vaddr
, phdr
->p_paddr
, phdr
->p_filesz
,
391 ehdr
->e_phnum
, phdr
->p_offset
);
397 static int prepare_elf64_headers(struct crash_elf_data
*ced
,
398 void **addr
, unsigned long *sz
)
402 unsigned long nr_cpus
= num_possible_cpus(), nr_phdr
, elf_sz
;
403 unsigned char *buf
, *bufp
;
405 unsigned long long notes_addr
;
408 /* extra phdr for vmcoreinfo elf note */
409 nr_phdr
= nr_cpus
+ 1;
410 nr_phdr
+= ced
->max_nr_ranges
;
413 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
414 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
415 * I think this is required by tools like gdb. So same physical
416 * memory will be mapped in two elf headers. One will contain kernel
417 * text virtual addresses and other will have __va(physical) addresses.
421 elf_sz
= sizeof(Elf64_Ehdr
) + nr_phdr
* sizeof(Elf64_Phdr
);
422 elf_sz
= ALIGN(elf_sz
, ELF_CORE_HEADER_ALIGN
);
424 buf
= vzalloc(elf_sz
);
429 ehdr
= (Elf64_Ehdr
*)bufp
;
430 bufp
+= sizeof(Elf64_Ehdr
);
431 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
432 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
433 ehdr
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
434 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
435 ehdr
->e_ident
[EI_OSABI
] = ELF_OSABI
;
436 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
437 ehdr
->e_type
= ET_CORE
;
438 ehdr
->e_machine
= ELF_ARCH
;
439 ehdr
->e_version
= EV_CURRENT
;
440 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
441 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
442 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
444 /* Prepare one phdr of type PT_NOTE for each present cpu */
445 for_each_present_cpu(cpu
) {
446 phdr
= (Elf64_Phdr
*)bufp
;
447 bufp
+= sizeof(Elf64_Phdr
);
448 phdr
->p_type
= PT_NOTE
;
449 notes_addr
= per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes
, cpu
));
450 phdr
->p_offset
= phdr
->p_paddr
= notes_addr
;
451 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(note_buf_t
);
455 /* Prepare one PT_NOTE header for vmcoreinfo */
456 phdr
= (Elf64_Phdr
*)bufp
;
457 bufp
+= sizeof(Elf64_Phdr
);
458 phdr
->p_type
= PT_NOTE
;
459 phdr
->p_offset
= phdr
->p_paddr
= paddr_vmcoreinfo_note();
460 phdr
->p_filesz
= phdr
->p_memsz
= VMCOREINFO_NOTE_SIZE
;
464 /* Prepare PT_LOAD type program header for kernel text region */
465 phdr
= (Elf64_Phdr
*)bufp
;
466 bufp
+= sizeof(Elf64_Phdr
);
467 phdr
->p_type
= PT_LOAD
;
468 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
469 phdr
->p_vaddr
= (Elf64_Addr
)_text
;
470 phdr
->p_filesz
= phdr
->p_memsz
= _end
- _text
;
471 phdr
->p_offset
= phdr
->p_paddr
= __pa_symbol(_text
);
475 /* Prepare PT_LOAD headers for system ram chunks. */
478 ret
= walk_system_ram_res(0, -1, ced
,
479 prepare_elf64_ram_headers_callback
);
488 /* Prepare elf headers. Return addr and size */
489 static int prepare_elf_headers(struct kimage
*image
, void **addr
,
492 struct crash_elf_data
*ced
;
495 ced
= kzalloc(sizeof(*ced
), GFP_KERNEL
);
499 fill_up_crash_elf_data(ced
, image
);
501 /* By default prepare 64bit headers */
502 ret
= prepare_elf64_headers(ced
, addr
, sz
);
507 static int add_e820_entry(struct boot_params
*params
, struct e820_entry
*entry
)
509 unsigned int nr_e820_entries
;
511 nr_e820_entries
= params
->e820_entries
;
512 if (nr_e820_entries
>= E820_MAX_ENTRIES_ZEROPAGE
)
515 memcpy(¶ms
->e820_table
[nr_e820_entries
], entry
,
516 sizeof(struct e820_entry
));
517 params
->e820_entries
++;
521 static int memmap_entry_callback(u64 start
, u64 end
, void *arg
)
523 struct crash_memmap_data
*cmd
= arg
;
524 struct boot_params
*params
= cmd
->params
;
525 struct e820_entry ei
;
528 ei
.size
= end
- start
+ 1;
530 add_e820_entry(params
, &ei
);
535 static int memmap_exclude_ranges(struct kimage
*image
, struct crash_mem
*cmem
,
536 unsigned long long mstart
,
537 unsigned long long mend
)
539 unsigned long start
, end
;
542 cmem
->ranges
[0].start
= mstart
;
543 cmem
->ranges
[0].end
= mend
;
546 /* Exclude Backup region */
547 start
= image
->arch
.backup_load_addr
;
548 end
= start
+ image
->arch
.backup_src_sz
- 1;
549 ret
= exclude_mem_range(cmem
, start
, end
);
553 /* Exclude elf header region */
554 start
= image
->arch
.elf_load_addr
;
555 end
= start
+ image
->arch
.elf_headers_sz
- 1;
556 return exclude_mem_range(cmem
, start
, end
);
559 /* Prepare memory map for crash dump kernel */
560 int crash_setup_memmap_entries(struct kimage
*image
, struct boot_params
*params
)
564 struct e820_entry ei
;
565 struct crash_memmap_data cmd
;
566 struct crash_mem
*cmem
;
568 cmem
= vzalloc(sizeof(struct crash_mem
));
572 memset(&cmd
, 0, sizeof(struct crash_memmap_data
));
575 /* Add first 640K segment */
576 ei
.addr
= image
->arch
.backup_src_start
;
577 ei
.size
= image
->arch
.backup_src_sz
;
578 ei
.type
= E820_TYPE_RAM
;
579 add_e820_entry(params
, &ei
);
581 /* Add ACPI tables */
582 cmd
.type
= E820_TYPE_ACPI
;
583 flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
584 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES
, flags
, 0, -1, &cmd
,
585 memmap_entry_callback
);
587 /* Add ACPI Non-volatile Storage */
588 cmd
.type
= E820_TYPE_NVS
;
589 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE
, flags
, 0, -1, &cmd
,
590 memmap_entry_callback
);
592 /* Add crashk_low_res region */
593 if (crashk_low_res
.end
) {
594 ei
.addr
= crashk_low_res
.start
;
595 ei
.size
= crashk_low_res
.end
- crashk_low_res
.start
+ 1;
596 ei
.type
= E820_TYPE_RAM
;
597 add_e820_entry(params
, &ei
);
600 /* Exclude some ranges from crashk_res and add rest to memmap */
601 ret
= memmap_exclude_ranges(image
, cmem
, crashk_res
.start
,
606 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
607 ei
.size
= cmem
->ranges
[i
].end
- cmem
->ranges
[i
].start
+ 1;
609 /* If entry is less than a page, skip it */
610 if (ei
.size
< PAGE_SIZE
)
612 ei
.addr
= cmem
->ranges
[i
].start
;
613 ei
.type
= E820_TYPE_RAM
;
614 add_e820_entry(params
, &ei
);
622 static int determine_backup_region(u64 start
, u64 end
, void *arg
)
624 struct kimage
*image
= arg
;
626 image
->arch
.backup_src_start
= start
;
627 image
->arch
.backup_src_sz
= end
- start
+ 1;
629 /* Expecting only one range for backup region */
633 int crash_load_segments(struct kimage
*image
)
636 struct kexec_buf kbuf
= { .image
= image
, .buf_min
= 0,
637 .buf_max
= ULONG_MAX
, .top_down
= false };
640 * Determine and load a segment for backup area. First 640K RAM
641 * region is backup source
644 ret
= walk_system_ram_res(KEXEC_BACKUP_SRC_START
, KEXEC_BACKUP_SRC_END
,
645 image
, determine_backup_region
);
647 /* Zero or postive return values are ok */
651 /* Add backup segment. */
652 if (image
->arch
.backup_src_sz
) {
653 kbuf
.buffer
= &crash_zero_bytes
;
654 kbuf
.bufsz
= sizeof(crash_zero_bytes
);
655 kbuf
.memsz
= image
->arch
.backup_src_sz
;
656 kbuf
.buf_align
= PAGE_SIZE
;
658 * Ideally there is no source for backup segment. This is
659 * copied in purgatory after crash. Just add a zero filled
660 * segment for now to make sure checksum logic works fine.
662 ret
= kexec_add_buffer(&kbuf
);
665 image
->arch
.backup_load_addr
= kbuf
.mem
;
666 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
667 image
->arch
.backup_load_addr
,
668 image
->arch
.backup_src_start
, kbuf
.memsz
);
671 /* Prepare elf headers and add a segment */
672 ret
= prepare_elf_headers(image
, &kbuf
.buffer
, &kbuf
.bufsz
);
676 image
->arch
.elf_headers
= kbuf
.buffer
;
677 image
->arch
.elf_headers_sz
= kbuf
.bufsz
;
679 kbuf
.memsz
= kbuf
.bufsz
;
680 kbuf
.buf_align
= ELF_CORE_HEADER_ALIGN
;
681 ret
= kexec_add_buffer(&kbuf
);
683 vfree((void *)image
->arch
.elf_headers
);
686 image
->arch
.elf_load_addr
= kbuf
.mem
;
687 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
688 image
->arch
.elf_load_addr
, kbuf
.bufsz
, kbuf
.bufsz
);
692 #endif /* CONFIG_KEXEC_FILE */