2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/io_apic.h>
34 #include <linux/kdebug.h>
36 #include <asm/reboot.h>
37 #include <asm/virtext.h>
38 #include <asm/intel_pt.h>
40 /* Alignment required for elf header segment */
41 #define ELF_CORE_HEADER_ALIGN 4096
43 /* This primarily represents number of split ranges due to exclusion */
44 #define CRASH_MAX_RANGES 16
46 struct crash_mem_range
{
51 unsigned int nr_ranges
;
52 struct crash_mem_range ranges
[CRASH_MAX_RANGES
];
55 /* Misc data about ram ranges needed to prepare elf headers */
56 struct crash_elf_data
{
59 * Total number of ram ranges we have after various adjustments for
60 * crash reserved region, etc.
62 unsigned int max_nr_ranges
;
64 /* Pointer to elf header */
66 /* Pointer to next phdr */
71 /* Used while preparing memory map entries for second kernel */
72 struct crash_memmap_data
{
73 struct boot_params
*params
;
79 * This is used to VMCLEAR all VMCSs loaded on the
80 * processor. And when loading kvm_intel module, the
81 * callback function pointer will be assigned.
85 crash_vmclear_fn __rcu
*crash_vmclear_loaded_vmcss
= NULL
;
86 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss
);
87 unsigned long crash_zero_bytes
;
89 static inline void cpu_crash_vmclear_loaded_vmcss(void)
91 crash_vmclear_fn
*do_vmclear_operation
= NULL
;
94 do_vmclear_operation
= rcu_dereference(crash_vmclear_loaded_vmcss
);
95 if (do_vmclear_operation
)
96 do_vmclear_operation();
100 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
102 static void kdump_nmi_callback(int cpu
, struct pt_regs
*regs
)
105 struct pt_regs fixed_regs
;
107 if (!user_mode(regs
)) {
108 crash_fixup_ss_esp(&fixed_regs
, regs
);
112 crash_save_cpu(regs
, cpu
);
115 * VMCLEAR VMCSs loaded on all cpus if needed.
117 cpu_crash_vmclear_loaded_vmcss();
119 /* Disable VMX or SVM if needed.
121 * We need to disable virtualization on all CPUs.
122 * Having VMX or SVM enabled on any CPU may break rebooting
123 * after the kdump kernel has finished its task.
125 cpu_emergency_vmxoff();
126 cpu_emergency_svm_disable();
129 * Disable Intel PT to stop its logging
131 cpu_emergency_stop_pt();
133 disable_local_APIC();
136 void kdump_nmi_shootdown_cpus(void)
138 nmi_shootdown_cpus(kdump_nmi_callback
);
140 disable_local_APIC();
143 /* Override the weak function in kernel/panic.c */
144 void crash_smp_send_stop(void)
146 static int cpus_stopped
;
151 if (smp_ops
.crash_stop_other_cpus
)
152 smp_ops
.crash_stop_other_cpus();
160 void crash_smp_send_stop(void)
162 /* There are no cpus to shootdown */
166 void native_machine_crash_shutdown(struct pt_regs
*regs
)
168 /* This function is only called after the system
169 * has panicked or is otherwise in a critical state.
170 * The minimum amount of code to allow a kexec'd kernel
171 * to run successfully needs to happen here.
173 * In practice this means shooting down the other cpus in
176 /* The kernel is broken so disable interrupts */
179 crash_smp_send_stop();
182 * VMCLEAR VMCSs loaded on this cpu if needed.
184 cpu_crash_vmclear_loaded_vmcss();
186 /* Booting kdump kernel with VMX or SVM enabled won't work,
187 * because (among other limitations) we can't disable paging
188 * with the virt flags.
190 cpu_emergency_vmxoff();
191 cpu_emergency_svm_disable();
194 * Disable Intel PT to stop its logging
196 cpu_emergency_stop_pt();
198 #ifdef CONFIG_X86_IO_APIC
199 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
204 #ifdef CONFIG_HPET_TIMER
207 crash_save_cpu(regs
, safe_smp_processor_id());
210 #ifdef CONFIG_KEXEC_FILE
211 static int get_nr_ram_ranges_callback(u64 start
, u64 end
, void *arg
)
213 unsigned int *nr_ranges
= arg
;
220 /* Gather all the required information to prepare elf headers for ram regions */
221 static void fill_up_crash_elf_data(struct crash_elf_data
*ced
,
222 struct kimage
*image
)
224 unsigned int nr_ranges
= 0;
228 walk_system_ram_res(0, -1, &nr_ranges
,
229 get_nr_ram_ranges_callback
);
231 ced
->max_nr_ranges
= nr_ranges
;
233 /* Exclusion of crash region could split memory ranges */
234 ced
->max_nr_ranges
++;
236 /* If crashk_low_res is not 0, another range split possible */
237 if (crashk_low_res
.end
)
238 ced
->max_nr_ranges
++;
241 static int exclude_mem_range(struct crash_mem
*mem
,
242 unsigned long long mstart
, unsigned long long mend
)
245 unsigned long long start
, end
;
246 struct crash_mem_range temp_range
= {0, 0};
248 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
249 start
= mem
->ranges
[i
].start
;
250 end
= mem
->ranges
[i
].end
;
252 if (mstart
> end
|| mend
< start
)
255 /* Truncate any area outside of range */
261 /* Found completely overlapping range */
262 if (mstart
== start
&& mend
== end
) {
263 mem
->ranges
[i
].start
= 0;
264 mem
->ranges
[i
].end
= 0;
265 if (i
< mem
->nr_ranges
- 1) {
266 /* Shift rest of the ranges to left */
267 for (j
= i
; j
< mem
->nr_ranges
- 1; j
++) {
268 mem
->ranges
[j
].start
=
269 mem
->ranges
[j
+1].start
;
271 mem
->ranges
[j
+1].end
;
278 if (mstart
> start
&& mend
< end
) {
279 /* Split original range */
280 mem
->ranges
[i
].end
= mstart
- 1;
281 temp_range
.start
= mend
+ 1;
282 temp_range
.end
= end
;
283 } else if (mstart
!= start
)
284 mem
->ranges
[i
].end
= mstart
- 1;
286 mem
->ranges
[i
].start
= mend
+ 1;
290 /* If a split happend, add the split to array */
295 if (i
== CRASH_MAX_RANGES
- 1) {
296 pr_err("Too many crash ranges after split\n");
300 /* Location where new range should go */
302 if (j
< mem
->nr_ranges
) {
303 /* Move over all ranges one slot towards the end */
304 for (i
= mem
->nr_ranges
- 1; i
>= j
; i
--)
305 mem
->ranges
[i
+ 1] = mem
->ranges
[i
];
308 mem
->ranges
[j
].start
= temp_range
.start
;
309 mem
->ranges
[j
].end
= temp_range
.end
;
315 * Look for any unwanted ranges between mstart, mend and remove them. This
316 * might lead to split and split ranges are put in ced->mem.ranges[] array
318 static int elf_header_exclude_ranges(struct crash_elf_data
*ced
,
319 unsigned long long mstart
, unsigned long long mend
)
321 struct crash_mem
*cmem
= &ced
->mem
;
324 memset(cmem
->ranges
, 0, sizeof(cmem
->ranges
));
326 cmem
->ranges
[0].start
= mstart
;
327 cmem
->ranges
[0].end
= mend
;
330 /* Exclude crashkernel region */
331 ret
= exclude_mem_range(cmem
, crashk_res
.start
, crashk_res
.end
);
335 if (crashk_low_res
.end
) {
336 ret
= exclude_mem_range(cmem
, crashk_low_res
.start
, crashk_low_res
.end
);
344 static int prepare_elf64_ram_headers_callback(u64 start
, u64 end
, void *arg
)
346 struct crash_elf_data
*ced
= arg
;
349 unsigned long mstart
, mend
;
350 struct kimage
*image
= ced
->image
;
351 struct crash_mem
*cmem
;
356 /* Exclude unwanted mem ranges */
357 ret
= elf_header_exclude_ranges(ced
, start
, end
);
361 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
364 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
365 mstart
= cmem
->ranges
[i
].start
;
366 mend
= cmem
->ranges
[i
].end
;
369 ced
->bufp
+= sizeof(Elf64_Phdr
);
371 phdr
->p_type
= PT_LOAD
;
372 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
373 phdr
->p_offset
= mstart
;
376 * If a range matches backup region, adjust offset to backup
379 if (mstart
== image
->arch
.backup_src_start
&&
380 (mend
- mstart
+ 1) == image
->arch
.backup_src_sz
)
381 phdr
->p_offset
= image
->arch
.backup_load_addr
;
383 phdr
->p_paddr
= mstart
;
384 phdr
->p_vaddr
= (unsigned long long) __va(mstart
);
385 phdr
->p_filesz
= phdr
->p_memsz
= mend
- mstart
+ 1;
388 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
389 phdr
, phdr
->p_vaddr
, phdr
->p_paddr
, phdr
->p_filesz
,
390 ehdr
->e_phnum
, phdr
->p_offset
);
396 static int prepare_elf64_headers(struct crash_elf_data
*ced
,
397 void **addr
, unsigned long *sz
)
401 unsigned long nr_cpus
= num_possible_cpus(), nr_phdr
, elf_sz
;
402 unsigned char *buf
, *bufp
;
404 unsigned long long notes_addr
;
407 /* extra phdr for vmcoreinfo elf note */
408 nr_phdr
= nr_cpus
+ 1;
409 nr_phdr
+= ced
->max_nr_ranges
;
412 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
413 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
414 * I think this is required by tools like gdb. So same physical
415 * memory will be mapped in two elf headers. One will contain kernel
416 * text virtual addresses and other will have __va(physical) addresses.
420 elf_sz
= sizeof(Elf64_Ehdr
) + nr_phdr
* sizeof(Elf64_Phdr
);
421 elf_sz
= ALIGN(elf_sz
, ELF_CORE_HEADER_ALIGN
);
423 buf
= vzalloc(elf_sz
);
428 ehdr
= (Elf64_Ehdr
*)bufp
;
429 bufp
+= sizeof(Elf64_Ehdr
);
430 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
431 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
432 ehdr
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
433 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
434 ehdr
->e_ident
[EI_OSABI
] = ELF_OSABI
;
435 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
436 ehdr
->e_type
= ET_CORE
;
437 ehdr
->e_machine
= ELF_ARCH
;
438 ehdr
->e_version
= EV_CURRENT
;
439 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
440 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
441 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
443 /* Prepare one phdr of type PT_NOTE for each present cpu */
444 for_each_present_cpu(cpu
) {
445 phdr
= (Elf64_Phdr
*)bufp
;
446 bufp
+= sizeof(Elf64_Phdr
);
447 phdr
->p_type
= PT_NOTE
;
448 notes_addr
= per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes
, cpu
));
449 phdr
->p_offset
= phdr
->p_paddr
= notes_addr
;
450 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(note_buf_t
);
454 /* Prepare one PT_NOTE header for vmcoreinfo */
455 phdr
= (Elf64_Phdr
*)bufp
;
456 bufp
+= sizeof(Elf64_Phdr
);
457 phdr
->p_type
= PT_NOTE
;
458 phdr
->p_offset
= phdr
->p_paddr
= paddr_vmcoreinfo_note();
459 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(vmcoreinfo_note
);
463 /* Prepare PT_LOAD type program header for kernel text region */
464 phdr
= (Elf64_Phdr
*)bufp
;
465 bufp
+= sizeof(Elf64_Phdr
);
466 phdr
->p_type
= PT_LOAD
;
467 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
468 phdr
->p_vaddr
= (Elf64_Addr
)_text
;
469 phdr
->p_filesz
= phdr
->p_memsz
= _end
- _text
;
470 phdr
->p_offset
= phdr
->p_paddr
= __pa_symbol(_text
);
474 /* Prepare PT_LOAD headers for system ram chunks. */
477 ret
= walk_system_ram_res(0, -1, ced
,
478 prepare_elf64_ram_headers_callback
);
487 /* Prepare elf headers. Return addr and size */
488 static int prepare_elf_headers(struct kimage
*image
, void **addr
,
491 struct crash_elf_data
*ced
;
494 ced
= kzalloc(sizeof(*ced
), GFP_KERNEL
);
498 fill_up_crash_elf_data(ced
, image
);
500 /* By default prepare 64bit headers */
501 ret
= prepare_elf64_headers(ced
, addr
, sz
);
506 static int add_e820_entry(struct boot_params
*params
, struct e820entry
*entry
)
508 unsigned int nr_e820_entries
;
510 nr_e820_entries
= params
->e820_entries
;
511 if (nr_e820_entries
>= E820MAX
)
514 memcpy(¶ms
->e820_map
[nr_e820_entries
], entry
,
515 sizeof(struct e820entry
));
516 params
->e820_entries
++;
520 static int memmap_entry_callback(u64 start
, u64 end
, void *arg
)
522 struct crash_memmap_data
*cmd
= arg
;
523 struct boot_params
*params
= cmd
->params
;
527 ei
.size
= end
- start
+ 1;
529 add_e820_entry(params
, &ei
);
534 static int memmap_exclude_ranges(struct kimage
*image
, struct crash_mem
*cmem
,
535 unsigned long long mstart
,
536 unsigned long long mend
)
538 unsigned long start
, end
;
541 cmem
->ranges
[0].start
= mstart
;
542 cmem
->ranges
[0].end
= mend
;
545 /* Exclude Backup region */
546 start
= image
->arch
.backup_load_addr
;
547 end
= start
+ image
->arch
.backup_src_sz
- 1;
548 ret
= exclude_mem_range(cmem
, start
, end
);
552 /* Exclude elf header region */
553 start
= image
->arch
.elf_load_addr
;
554 end
= start
+ image
->arch
.elf_headers_sz
- 1;
555 return exclude_mem_range(cmem
, start
, end
);
558 /* Prepare memory map for crash dump kernel */
559 int crash_setup_memmap_entries(struct kimage
*image
, struct boot_params
*params
)
564 struct crash_memmap_data cmd
;
565 struct crash_mem
*cmem
;
567 cmem
= vzalloc(sizeof(struct crash_mem
));
571 memset(&cmd
, 0, sizeof(struct crash_memmap_data
));
574 /* Add first 640K segment */
575 ei
.addr
= image
->arch
.backup_src_start
;
576 ei
.size
= image
->arch
.backup_src_sz
;
578 add_e820_entry(params
, &ei
);
580 /* Add ACPI tables */
581 cmd
.type
= E820_ACPI
;
582 flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
583 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES
, flags
, 0, -1, &cmd
,
584 memmap_entry_callback
);
586 /* Add ACPI Non-volatile Storage */
588 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE
, flags
, 0, -1, &cmd
,
589 memmap_entry_callback
);
591 /* Add crashk_low_res region */
592 if (crashk_low_res
.end
) {
593 ei
.addr
= crashk_low_res
.start
;
594 ei
.size
= crashk_low_res
.end
- crashk_low_res
.start
+ 1;
596 add_e820_entry(params
, &ei
);
599 /* Exclude some ranges from crashk_res and add rest to memmap */
600 ret
= memmap_exclude_ranges(image
, cmem
, crashk_res
.start
,
605 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
606 ei
.size
= cmem
->ranges
[i
].end
- cmem
->ranges
[i
].start
+ 1;
608 /* If entry is less than a page, skip it */
609 if (ei
.size
< PAGE_SIZE
)
611 ei
.addr
= cmem
->ranges
[i
].start
;
613 add_e820_entry(params
, &ei
);
621 static int determine_backup_region(u64 start
, u64 end
, void *arg
)
623 struct kimage
*image
= arg
;
625 image
->arch
.backup_src_start
= start
;
626 image
->arch
.backup_src_sz
= end
- start
+ 1;
628 /* Expecting only one range for backup region */
632 int crash_load_segments(struct kimage
*image
)
635 struct kexec_buf kbuf
= { .image
= image
, .buf_min
= 0,
636 .buf_max
= ULONG_MAX
, .top_down
= false };
639 * Determine and load a segment for backup area. First 640K RAM
640 * region is backup source
643 ret
= walk_system_ram_res(KEXEC_BACKUP_SRC_START
, KEXEC_BACKUP_SRC_END
,
644 image
, determine_backup_region
);
646 /* Zero or postive return values are ok */
650 /* Add backup segment. */
651 if (image
->arch
.backup_src_sz
) {
652 kbuf
.buffer
= &crash_zero_bytes
;
653 kbuf
.bufsz
= sizeof(crash_zero_bytes
);
654 kbuf
.memsz
= image
->arch
.backup_src_sz
;
655 kbuf
.buf_align
= PAGE_SIZE
;
657 * Ideally there is no source for backup segment. This is
658 * copied in purgatory after crash. Just add a zero filled
659 * segment for now to make sure checksum logic works fine.
661 ret
= kexec_add_buffer(&kbuf
);
664 image
->arch
.backup_load_addr
= kbuf
.mem
;
665 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
666 image
->arch
.backup_load_addr
,
667 image
->arch
.backup_src_start
, kbuf
.memsz
);
670 /* Prepare elf headers and add a segment */
671 ret
= prepare_elf_headers(image
, &kbuf
.buffer
, &kbuf
.bufsz
);
675 image
->arch
.elf_headers
= kbuf
.buffer
;
676 image
->arch
.elf_headers_sz
= kbuf
.bufsz
;
678 kbuf
.memsz
= kbuf
.bufsz
;
679 kbuf
.buf_align
= ELF_CORE_HEADER_ALIGN
;
680 ret
= kexec_add_buffer(&kbuf
);
682 vfree((void *)image
->arch
.elf_headers
);
685 image
->arch
.elf_load_addr
= kbuf
.mem
;
686 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
687 image
->arch
.elf_load_addr
, kbuf
.bufsz
, kbuf
.bufsz
);
691 #endif /* CONFIG_KEXEC_FILE */