2 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
3 * dump with assistance from firmware. This approach does not use kexec,
4 * instead firmware assists in booting the kdump kernel while preserving
5 * memory contents. The most of the code implementation has been adapted
6 * from phyp assisted dump implementation written by Linas Vepstas and
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Copyright 2011 IBM Corporation
24 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
28 #define pr_fmt(fmt) "fadump: " fmt
30 #include <linux/string.h>
31 #include <linux/memblock.h>
32 #include <linux/delay.h>
33 #include <linux/seq_file.h>
34 #include <linux/crash_dump.h>
35 #include <linux/kobject.h>
36 #include <linux/sysfs.h>
38 #include <asm/debugfs.h>
42 #include <asm/fadump.h>
43 #include <asm/setup.h>
45 static struct fw_dump fw_dump
;
46 static struct fadump_mem_struct fdm
;
47 static const struct fadump_mem_struct
*fdm_active
;
49 static DEFINE_MUTEX(fadump_mutex
);
50 struct fad_crash_memory_ranges crash_memory_ranges
[INIT_CRASHMEM_RANGES
];
53 /* Scan the Firmware Assisted dump configuration details. */
54 int __init
early_init_dt_scan_fw_dump(unsigned long node
,
55 const char *uname
, int depth
, void *data
)
57 const __be32
*sections
;
62 if (depth
!= 1 || strcmp(uname
, "rtas") != 0)
66 * Check if Firmware Assisted dump is supported. if yes, check
67 * if dump has been initiated on last reboot.
69 token
= of_get_flat_dt_prop(node
, "ibm,configure-kernel-dump", NULL
);
73 fw_dump
.fadump_supported
= 1;
74 fw_dump
.ibm_configure_kernel_dump
= be32_to_cpu(*token
);
77 * The 'ibm,kernel-dump' rtas node is present only if there is
78 * dump data waiting for us.
80 fdm_active
= of_get_flat_dt_prop(node
, "ibm,kernel-dump", NULL
);
82 fw_dump
.dump_active
= 1;
84 /* Get the sizes required to store dump data for the firmware provided
86 * For each dump section type supported, a 32bit cell which defines
87 * the ID of a supported section followed by two 32 bit cells which
88 * gives teh size of the section in bytes.
90 sections
= of_get_flat_dt_prop(node
, "ibm,configure-kernel-dump-sizes",
96 num_sections
= size
/ (3 * sizeof(u32
));
98 for (i
= 0; i
< num_sections
; i
++, sections
+= 3) {
99 u32 type
= (u32
)of_read_number(sections
, 1);
102 case FADUMP_CPU_STATE_DATA
:
103 fw_dump
.cpu_state_data_size
=
104 of_read_ulong(§ions
[1], 2);
106 case FADUMP_HPTE_REGION
:
107 fw_dump
.hpte_region_size
=
108 of_read_ulong(§ions
[1], 2);
117 * If fadump is registered, check if the memory provided
118 * falls within boot memory area.
120 int is_fadump_boot_memory_area(u64 addr
, ulong size
)
122 if (!fw_dump
.dump_registered
)
125 return (addr
+ size
) > RMA_START
&& addr
<= fw_dump
.boot_memory_size
;
128 int is_fadump_active(void)
130 return fw_dump
.dump_active
;
134 * Returns 1, if there are no holes in boot memory area,
137 static int is_boot_memory_area_contiguous(void)
139 struct memblock_region
*reg
;
140 unsigned long tstart
, tend
;
141 unsigned long start_pfn
= PHYS_PFN(RMA_START
);
142 unsigned long end_pfn
= PHYS_PFN(RMA_START
+ fw_dump
.boot_memory_size
);
143 unsigned int ret
= 0;
145 for_each_memblock(memory
, reg
) {
146 tstart
= max(start_pfn
, memblock_region_memory_base_pfn(reg
));
147 tend
= min(end_pfn
, memblock_region_memory_end_pfn(reg
));
149 /* Memory hole from start_pfn to tstart */
150 if (tstart
> start_pfn
)
153 if (tend
== end_pfn
) {
158 start_pfn
= tend
+ 1;
165 /* Print firmware assisted dump configurations for debugging purpose. */
166 static void fadump_show_config(void)
168 pr_debug("Support for firmware-assisted dump (fadump): %s\n",
169 (fw_dump
.fadump_supported
? "present" : "no support"));
171 if (!fw_dump
.fadump_supported
)
174 pr_debug("Fadump enabled : %s\n",
175 (fw_dump
.fadump_enabled
? "yes" : "no"));
176 pr_debug("Dump Active : %s\n",
177 (fw_dump
.dump_active
? "yes" : "no"));
178 pr_debug("Dump section sizes:\n");
179 pr_debug(" CPU state data size: %lx\n", fw_dump
.cpu_state_data_size
);
180 pr_debug(" HPTE region size : %lx\n", fw_dump
.hpte_region_size
);
181 pr_debug("Boot memory size : %lx\n", fw_dump
.boot_memory_size
);
184 static unsigned long init_fadump_mem_struct(struct fadump_mem_struct
*fdm
,
190 memset(fdm
, 0, sizeof(struct fadump_mem_struct
));
191 addr
= addr
& PAGE_MASK
;
193 fdm
->header
.dump_format_version
= cpu_to_be32(0x00000001);
194 fdm
->header
.dump_num_sections
= cpu_to_be16(3);
195 fdm
->header
.dump_status_flag
= 0;
196 fdm
->header
.offset_first_dump_section
=
197 cpu_to_be32((u32
)offsetof(struct fadump_mem_struct
, cpu_state_data
));
200 * Fields for disk dump option.
201 * We are not using disk dump option, hence set these fields to 0.
203 fdm
->header
.dd_block_size
= 0;
204 fdm
->header
.dd_block_offset
= 0;
205 fdm
->header
.dd_num_blocks
= 0;
206 fdm
->header
.dd_offset_disk_path
= 0;
208 /* set 0 to disable an automatic dump-reboot. */
209 fdm
->header
.max_time_auto
= 0;
211 /* Kernel dump sections */
212 /* cpu state data section. */
213 fdm
->cpu_state_data
.request_flag
= cpu_to_be32(FADUMP_REQUEST_FLAG
);
214 fdm
->cpu_state_data
.source_data_type
= cpu_to_be16(FADUMP_CPU_STATE_DATA
);
215 fdm
->cpu_state_data
.source_address
= 0;
216 fdm
->cpu_state_data
.source_len
= cpu_to_be64(fw_dump
.cpu_state_data_size
);
217 fdm
->cpu_state_data
.destination_address
= cpu_to_be64(addr
);
218 addr
+= fw_dump
.cpu_state_data_size
;
220 /* hpte region section */
221 fdm
->hpte_region
.request_flag
= cpu_to_be32(FADUMP_REQUEST_FLAG
);
222 fdm
->hpte_region
.source_data_type
= cpu_to_be16(FADUMP_HPTE_REGION
);
223 fdm
->hpte_region
.source_address
= 0;
224 fdm
->hpte_region
.source_len
= cpu_to_be64(fw_dump
.hpte_region_size
);
225 fdm
->hpte_region
.destination_address
= cpu_to_be64(addr
);
226 addr
+= fw_dump
.hpte_region_size
;
228 /* RMA region section */
229 fdm
->rmr_region
.request_flag
= cpu_to_be32(FADUMP_REQUEST_FLAG
);
230 fdm
->rmr_region
.source_data_type
= cpu_to_be16(FADUMP_REAL_MODE_REGION
);
231 fdm
->rmr_region
.source_address
= cpu_to_be64(RMA_START
);
232 fdm
->rmr_region
.source_len
= cpu_to_be64(fw_dump
.boot_memory_size
);
233 fdm
->rmr_region
.destination_address
= cpu_to_be64(addr
);
234 addr
+= fw_dump
.boot_memory_size
;
240 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
242 * Function to find the largest memory size we need to reserve during early
243 * boot process. This will be the size of the memory that is required for a
244 * kernel to boot successfully.
246 * This function has been taken from phyp-assisted dump feature implementation.
248 * returns larger of 256MB or 5% rounded down to multiples of 256MB.
250 * TODO: Come up with better approach to find out more accurate memory size
251 * that is required for a kernel to boot successfully.
254 static inline unsigned long fadump_calculate_reserve_size(void)
257 unsigned long long base
, size
;
259 if (fw_dump
.reserve_bootvar
)
260 pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
263 * Check if the size is specified through crashkernel= cmdline
264 * option. If yes, then use that but ignore base as fadump reserves
265 * memory at a predefined offset.
267 ret
= parse_crashkernel(boot_command_line
, memblock_phys_mem_size(),
269 if (ret
== 0 && size
> 0) {
270 unsigned long max_size
;
272 if (fw_dump
.reserve_bootvar
)
273 pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
275 fw_dump
.reserve_bootvar
= (unsigned long)size
;
278 * Adjust if the boot memory size specified is above
281 max_size
= memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO
;
282 if (fw_dump
.reserve_bootvar
> max_size
) {
283 fw_dump
.reserve_bootvar
= max_size
;
284 pr_info("Adjusted boot memory size to %luMB\n",
285 (fw_dump
.reserve_bootvar
>> 20));
288 return fw_dump
.reserve_bootvar
;
289 } else if (fw_dump
.reserve_bootvar
) {
291 * 'fadump_reserve_mem=' is being used to reserve memory
292 * for firmware-assisted dump.
294 return fw_dump
.reserve_bootvar
;
297 /* divide by 20 to get 5% of value */
298 size
= memblock_phys_mem_size() / 20;
300 /* round it down in multiples of 256 */
301 size
= size
& ~0x0FFFFFFFUL
;
303 /* Truncate to memory_limit. We don't want to over reserve the memory.*/
304 if (memory_limit
&& size
> memory_limit
)
307 return (size
> MIN_BOOT_MEM
? size
: MIN_BOOT_MEM
);
311 * Calculate the total memory size required to be reserved for
312 * firmware-assisted dump registration.
314 static unsigned long get_fadump_area_size(void)
316 unsigned long size
= 0;
318 size
+= fw_dump
.cpu_state_data_size
;
319 size
+= fw_dump
.hpte_region_size
;
320 size
+= fw_dump
.boot_memory_size
;
321 size
+= sizeof(struct fadump_crash_info_header
);
322 size
+= sizeof(struct elfhdr
); /* ELF core header.*/
323 size
+= sizeof(struct elf_phdr
); /* place holder for cpu notes */
324 /* Program headers for crash memory regions. */
325 size
+= sizeof(struct elf_phdr
) * (memblock_num_regions(memory
) + 2);
327 size
= PAGE_ALIGN(size
);
331 int __init
fadump_reserve_mem(void)
333 unsigned long base
, size
, memory_boundary
;
335 if (!fw_dump
.fadump_enabled
)
338 if (!fw_dump
.fadump_supported
) {
339 printk(KERN_INFO
"Firmware-assisted dump is not supported on"
341 fw_dump
.fadump_enabled
= 0;
345 * Initialize boot memory size
346 * If dump is active then we have already calculated the size during
350 fw_dump
.boot_memory_size
= be64_to_cpu(fdm_active
->rmr_region
.source_len
);
352 fw_dump
.boot_memory_size
= fadump_calculate_reserve_size();
355 * Calculate the memory boundary.
356 * If memory_limit is less than actual memory boundary then reserve
357 * the memory for fadump beyond the memory_limit and adjust the
358 * memory_limit accordingly, so that the running kernel can run with
359 * specified memory_limit.
361 if (memory_limit
&& memory_limit
< memblock_end_of_DRAM()) {
362 size
= get_fadump_area_size();
363 if ((memory_limit
+ size
) < memblock_end_of_DRAM())
364 memory_limit
+= size
;
366 memory_limit
= memblock_end_of_DRAM();
367 printk(KERN_INFO
"Adjusted memory_limit for firmware-assisted"
368 " dump, now %#016llx\n", memory_limit
);
371 memory_boundary
= memory_limit
;
373 memory_boundary
= memblock_end_of_DRAM();
375 if (fw_dump
.dump_active
) {
376 printk(KERN_INFO
"Firmware-assisted dump is active.\n");
378 * If last boot has crashed then reserve all the memory
379 * above boot_memory_size so that we don't touch it until
380 * dump is written to disk by userspace tool. This memory
381 * will be released for general use once the dump is saved.
383 base
= fw_dump
.boot_memory_size
;
384 size
= memory_boundary
- base
;
385 memblock_reserve(base
, size
);
386 printk(KERN_INFO
"Reserved %ldMB of memory at %ldMB "
387 "for saving crash dump\n",
388 (unsigned long)(size
>> 20),
389 (unsigned long)(base
>> 20));
391 fw_dump
.fadumphdr_addr
=
392 be64_to_cpu(fdm_active
->rmr_region
.destination_address
) +
393 be64_to_cpu(fdm_active
->rmr_region
.source_len
);
394 pr_debug("fadumphdr_addr = %p\n",
395 (void *) fw_dump
.fadumphdr_addr
);
397 size
= get_fadump_area_size();
400 * Reserve memory at an offset closer to bottom of the RAM to
401 * minimize the impact of memory hot-remove operation. We can't
402 * use memblock_find_in_range() here since it doesn't allocate
403 * from bottom to top.
405 for (base
= fw_dump
.boot_memory_size
;
406 base
<= (memory_boundary
- size
);
408 if (memblock_is_region_memory(base
, size
) &&
409 !memblock_is_region_reserved(base
, size
))
412 if ((base
> (memory_boundary
- size
)) ||
413 memblock_reserve(base
, size
)) {
414 pr_err("Failed to reserve memory\n");
418 pr_info("Reserved %ldMB of memory at %ldMB for firmware-"
419 "assisted dump (System RAM: %ldMB)\n",
420 (unsigned long)(size
>> 20),
421 (unsigned long)(base
>> 20),
422 (unsigned long)(memblock_phys_mem_size() >> 20));
425 fw_dump
.reserve_dump_area_start
= base
;
426 fw_dump
.reserve_dump_area_size
= size
;
430 unsigned long __init
arch_reserved_kernel_pages(void)
432 return memblock_reserved_size() / PAGE_SIZE
;
435 /* Look for fadump= cmdline option. */
436 static int __init
early_fadump_param(char *p
)
441 if (strncmp(p
, "on", 2) == 0)
442 fw_dump
.fadump_enabled
= 1;
443 else if (strncmp(p
, "off", 3) == 0)
444 fw_dump
.fadump_enabled
= 0;
448 early_param("fadump", early_fadump_param
);
451 * Look for fadump_reserve_mem= cmdline option
452 * TODO: Remove references to 'fadump_reserve_mem=' parameter,
453 * the sooner 'crashkernel=' parameter is accustomed to.
455 static int __init
early_fadump_reserve_mem(char *p
)
458 fw_dump
.reserve_bootvar
= memparse(p
, &p
);
461 early_param("fadump_reserve_mem", early_fadump_reserve_mem
);
463 static int register_fw_dump(struct fadump_mem_struct
*fdm
)
466 unsigned int wait_time
;
468 pr_debug("Registering for firmware-assisted kernel dump...\n");
470 /* TODO: Add upper time limit for the delay */
472 rc
= rtas_call(fw_dump
.ibm_configure_kernel_dump
, 3, 1, NULL
,
473 FADUMP_REGISTER
, fdm
,
474 sizeof(struct fadump_mem_struct
));
476 wait_time
= rtas_busy_delay_time(rc
);
485 pr_err("Failed to register. Unknown Error(%d).\n", rc
);
488 printk(KERN_ERR
"Failed to register firmware-assisted kernel"
489 " dump. Hardware Error(%d).\n", rc
);
492 if (!is_boot_memory_area_contiguous())
493 pr_err("Can't have holes in boot memory area while "
494 "registering fadump\n");
496 printk(KERN_ERR
"Failed to register firmware-assisted kernel"
497 " dump. Parameter Error(%d).\n", rc
);
501 printk(KERN_ERR
"firmware-assisted kernel dump is already "
503 fw_dump
.dump_registered
= 1;
507 printk(KERN_INFO
"firmware-assisted kernel dump registration"
509 fw_dump
.dump_registered
= 1;
516 void crash_fadump(struct pt_regs
*regs
, const char *str
)
518 struct fadump_crash_info_header
*fdh
= NULL
;
519 int old_cpu
, this_cpu
;
521 if (!fw_dump
.dump_registered
|| !fw_dump
.fadumphdr_addr
)
525 * old_cpu == -1 means this is the first CPU which has come here,
526 * go ahead and trigger fadump.
528 * old_cpu != -1 means some other CPU has already on it's way
529 * to trigger fadump, just keep looping here.
531 this_cpu
= smp_processor_id();
532 old_cpu
= cmpxchg(&crashing_cpu
, -1, this_cpu
);
536 * We can't loop here indefinitely. Wait as long as fadump
537 * is in force. If we race with fadump un-registration this
538 * loop will break and then we go down to normal panic path
539 * and reboot. If fadump is in force the first crashing
540 * cpu will definitely trigger fadump.
542 while (fw_dump
.dump_registered
)
547 fdh
= __va(fw_dump
.fadumphdr_addr
);
548 fdh
->crashing_cpu
= crashing_cpu
;
549 crash_save_vmcoreinfo();
554 ppc_save_regs(&fdh
->regs
);
556 fdh
->online_mask
= *cpu_online_mask
;
558 /* Call ibm,os-term rtas call to trigger firmware assisted dump */
559 rtas_os_term((char *)str
);
562 #define GPR_MASK 0xffffff0000000000
563 static inline int fadump_gpr_index(u64 id
)
568 if ((id
& GPR_MASK
) == REG_ID("GPR")) {
569 /* get the digits at the end */
574 str
[0] = (id
>> 8) & 0xff;
575 sscanf(str
, "%d", &i
);
582 static inline void fadump_set_regval(struct pt_regs
*regs
, u64 reg_id
,
587 i
= fadump_gpr_index(reg_id
);
589 regs
->gpr
[i
] = (unsigned long)reg_val
;
590 else if (reg_id
== REG_ID("NIA"))
591 regs
->nip
= (unsigned long)reg_val
;
592 else if (reg_id
== REG_ID("MSR"))
593 regs
->msr
= (unsigned long)reg_val
;
594 else if (reg_id
== REG_ID("CTR"))
595 regs
->ctr
= (unsigned long)reg_val
;
596 else if (reg_id
== REG_ID("LR"))
597 regs
->link
= (unsigned long)reg_val
;
598 else if (reg_id
== REG_ID("XER"))
599 regs
->xer
= (unsigned long)reg_val
;
600 else if (reg_id
== REG_ID("CR"))
601 regs
->ccr
= (unsigned long)reg_val
;
602 else if (reg_id
== REG_ID("DAR"))
603 regs
->dar
= (unsigned long)reg_val
;
604 else if (reg_id
== REG_ID("DSISR"))
605 regs
->dsisr
= (unsigned long)reg_val
;
608 static struct fadump_reg_entry
*
609 fadump_read_registers(struct fadump_reg_entry
*reg_entry
, struct pt_regs
*regs
)
611 memset(regs
, 0, sizeof(struct pt_regs
));
613 while (be64_to_cpu(reg_entry
->reg_id
) != REG_ID("CPUEND")) {
614 fadump_set_regval(regs
, be64_to_cpu(reg_entry
->reg_id
),
615 be64_to_cpu(reg_entry
->reg_value
));
622 static u32
*fadump_regs_to_elf_notes(u32
*buf
, struct pt_regs
*regs
)
624 struct elf_prstatus prstatus
;
626 memset(&prstatus
, 0, sizeof(prstatus
));
628 * FIXME: How do i get PID? Do I really need it?
629 * prstatus.pr_pid = ????
631 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
632 buf
= append_elf_note(buf
, CRASH_CORE_NOTE_NAME
, NT_PRSTATUS
,
633 &prstatus
, sizeof(prstatus
));
637 static void fadump_update_elfcore_header(char *bufp
)
640 struct elf_phdr
*phdr
;
642 elf
= (struct elfhdr
*)bufp
;
643 bufp
+= sizeof(struct elfhdr
);
645 /* First note is a place holder for cpu notes info. */
646 phdr
= (struct elf_phdr
*)bufp
;
648 if (phdr
->p_type
== PT_NOTE
) {
649 phdr
->p_paddr
= fw_dump
.cpu_notes_buf
;
650 phdr
->p_offset
= phdr
->p_paddr
;
651 phdr
->p_filesz
= fw_dump
.cpu_notes_buf_size
;
652 phdr
->p_memsz
= fw_dump
.cpu_notes_buf_size
;
657 static void *fadump_cpu_notes_buf_alloc(unsigned long size
)
661 unsigned long order
, count
, i
;
663 order
= get_order(size
);
664 vaddr
= (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
, order
);
669 page
= virt_to_page(vaddr
);
670 for (i
= 0; i
< count
; i
++)
671 SetPageReserved(page
+ i
);
675 static void fadump_cpu_notes_buf_free(unsigned long vaddr
, unsigned long size
)
678 unsigned long order
, count
, i
;
680 order
= get_order(size
);
682 page
= virt_to_page(vaddr
);
683 for (i
= 0; i
< count
; i
++)
684 ClearPageReserved(page
+ i
);
685 __free_pages(page
, order
);
689 * Read CPU state dump data and convert it into ELF notes.
690 * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be
691 * used to access the data to allow for additional fields to be added without
692 * affecting compatibility. Each list of registers for a CPU starts with
693 * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes,
694 * 8 Byte ASCII identifier and 8 Byte register value. The register entry
695 * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part
696 * of register value. For more details refer to PAPR document.
698 * Only for the crashing cpu we ignore the CPU dump data and get exact
699 * state from fadump crash info structure populated by first kernel at the
702 static int __init
fadump_build_cpu_notes(const struct fadump_mem_struct
*fdm
)
704 struct fadump_reg_save_area_header
*reg_header
;
705 struct fadump_reg_entry
*reg_entry
;
706 struct fadump_crash_info_header
*fdh
= NULL
;
709 u32 num_cpus
, *note_buf
;
711 int i
, rc
= 0, cpu
= 0;
713 if (!fdm
->cpu_state_data
.bytes_dumped
)
716 addr
= be64_to_cpu(fdm
->cpu_state_data
.destination_address
);
720 if (be64_to_cpu(reg_header
->magic_number
) != REGSAVE_AREA_MAGIC
) {
721 printk(KERN_ERR
"Unable to read register save area.\n");
724 pr_debug("--------CPU State Data------------\n");
725 pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header
->magic_number
));
726 pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header
->num_cpu_offset
));
728 vaddr
+= be32_to_cpu(reg_header
->num_cpu_offset
);
729 num_cpus
= be32_to_cpu(*((__be32
*)(vaddr
)));
730 pr_debug("NumCpus : %u\n", num_cpus
);
731 vaddr
+= sizeof(u32
);
732 reg_entry
= (struct fadump_reg_entry
*)vaddr
;
734 /* Allocate buffer to hold cpu crash notes. */
735 fw_dump
.cpu_notes_buf_size
= num_cpus
* sizeof(note_buf_t
);
736 fw_dump
.cpu_notes_buf_size
= PAGE_ALIGN(fw_dump
.cpu_notes_buf_size
);
737 note_buf
= fadump_cpu_notes_buf_alloc(fw_dump
.cpu_notes_buf_size
);
739 printk(KERN_ERR
"Failed to allocate 0x%lx bytes for "
740 "cpu notes buffer\n", fw_dump
.cpu_notes_buf_size
);
743 fw_dump
.cpu_notes_buf
= __pa(note_buf
);
745 pr_debug("Allocated buffer for cpu notes of size %ld at %p\n",
746 (num_cpus
* sizeof(note_buf_t
)), note_buf
);
748 if (fw_dump
.fadumphdr_addr
)
749 fdh
= __va(fw_dump
.fadumphdr_addr
);
751 for (i
= 0; i
< num_cpus
; i
++) {
752 if (be64_to_cpu(reg_entry
->reg_id
) != REG_ID("CPUSTRT")) {
753 printk(KERN_ERR
"Unable to read CPU state data\n");
757 /* Lower 4 bytes of reg_value contains logical cpu id */
758 cpu
= be64_to_cpu(reg_entry
->reg_value
) & FADUMP_CPU_ID_MASK
;
759 if (fdh
&& !cpumask_test_cpu(cpu
, &fdh
->online_mask
)) {
760 SKIP_TO_NEXT_CPU(reg_entry
);
763 pr_debug("Reading register data for cpu %d...\n", cpu
);
764 if (fdh
&& fdh
->crashing_cpu
== cpu
) {
766 note_buf
= fadump_regs_to_elf_notes(note_buf
, ®s
);
767 SKIP_TO_NEXT_CPU(reg_entry
);
770 reg_entry
= fadump_read_registers(reg_entry
, ®s
);
771 note_buf
= fadump_regs_to_elf_notes(note_buf
, ®s
);
774 final_note(note_buf
);
777 pr_debug("Updating elfcore header (%llx) with cpu notes\n",
778 fdh
->elfcorehdr_addr
);
779 fadump_update_elfcore_header((char *)__va(fdh
->elfcorehdr_addr
));
784 fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump
.cpu_notes_buf
),
785 fw_dump
.cpu_notes_buf_size
);
786 fw_dump
.cpu_notes_buf
= 0;
787 fw_dump
.cpu_notes_buf_size
= 0;
793 * Validate and process the dump data stored by firmware before exporting
794 * it through '/proc/vmcore'.
796 static int __init
process_fadump(const struct fadump_mem_struct
*fdm_active
)
798 struct fadump_crash_info_header
*fdh
;
801 if (!fdm_active
|| !fw_dump
.fadumphdr_addr
)
804 /* Check if the dump data is valid. */
805 if ((be16_to_cpu(fdm_active
->header
.dump_status_flag
) == FADUMP_ERROR_FLAG
) ||
806 (fdm_active
->cpu_state_data
.error_flags
!= 0) ||
807 (fdm_active
->rmr_region
.error_flags
!= 0)) {
808 printk(KERN_ERR
"Dump taken by platform is not valid\n");
811 if ((fdm_active
->rmr_region
.bytes_dumped
!=
812 fdm_active
->rmr_region
.source_len
) ||
813 !fdm_active
->cpu_state_data
.bytes_dumped
) {
814 printk(KERN_ERR
"Dump taken by platform is incomplete\n");
818 /* Validate the fadump crash info header */
819 fdh
= __va(fw_dump
.fadumphdr_addr
);
820 if (fdh
->magic_number
!= FADUMP_CRASH_INFO_MAGIC
) {
821 printk(KERN_ERR
"Crash info header is not valid.\n");
825 rc
= fadump_build_cpu_notes(fdm_active
);
830 * We are done validating dump info and elfcore header is now ready
831 * to be exported. set elfcorehdr_addr so that vmcore module will
832 * export the elfcore header through '/proc/vmcore'.
834 elfcorehdr_addr
= fdh
->elfcorehdr_addr
;
839 static inline void fadump_add_crash_memory(unsigned long long base
,
840 unsigned long long end
)
845 pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
846 crash_mem_ranges
, base
, end
- 1, (end
- base
));
847 crash_memory_ranges
[crash_mem_ranges
].base
= base
;
848 crash_memory_ranges
[crash_mem_ranges
].size
= end
- base
;
852 static void fadump_exclude_reserved_area(unsigned long long start
,
853 unsigned long long end
)
855 unsigned long long ra_start
, ra_end
;
857 ra_start
= fw_dump
.reserve_dump_area_start
;
858 ra_end
= ra_start
+ fw_dump
.reserve_dump_area_size
;
860 if ((ra_start
< end
) && (ra_end
> start
)) {
861 if ((start
< ra_start
) && (end
> ra_end
)) {
862 fadump_add_crash_memory(start
, ra_start
);
863 fadump_add_crash_memory(ra_end
, end
);
864 } else if (start
< ra_start
) {
865 fadump_add_crash_memory(start
, ra_start
);
866 } else if (ra_end
< end
) {
867 fadump_add_crash_memory(ra_end
, end
);
870 fadump_add_crash_memory(start
, end
);
873 static int fadump_init_elfcore_header(char *bufp
)
877 elf
= (struct elfhdr
*) bufp
;
878 bufp
+= sizeof(struct elfhdr
);
879 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
880 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
881 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
882 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
883 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
884 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
885 elf
->e_type
= ET_CORE
;
886 elf
->e_machine
= ELF_ARCH
;
887 elf
->e_version
= EV_CURRENT
;
889 elf
->e_phoff
= sizeof(struct elfhdr
);
891 #if defined(_CALL_ELF)
892 elf
->e_flags
= _CALL_ELF
;
896 elf
->e_ehsize
= sizeof(struct elfhdr
);
897 elf
->e_phentsize
= sizeof(struct elf_phdr
);
899 elf
->e_shentsize
= 0;
907 * Traverse through memblock structure and setup crash memory ranges. These
908 * ranges will be used create PT_LOAD program headers in elfcore header.
910 static void fadump_setup_crash_memory_ranges(void)
912 struct memblock_region
*reg
;
913 unsigned long long start
, end
;
915 pr_debug("Setup crash memory ranges.\n");
916 crash_mem_ranges
= 0;
918 * add the first memory chunk (RMA_START through boot_memory_size) as
919 * a separate memory chunk. The reason is, at the time crash firmware
920 * will move the content of this memory chunk to different location
921 * specified during fadump registration. We need to create a separate
922 * program header for this chunk with the correct offset.
924 fadump_add_crash_memory(RMA_START
, fw_dump
.boot_memory_size
);
926 for_each_memblock(memory
, reg
) {
927 start
= (unsigned long long)reg
->base
;
928 end
= start
+ (unsigned long long)reg
->size
;
931 * skip the first memory chunk that is already added (RMA_START
932 * through boot_memory_size). This logic needs a relook if and
933 * when RMA_START changes to a non-zero value.
935 BUILD_BUG_ON(RMA_START
!= 0);
936 if (start
< fw_dump
.boot_memory_size
) {
937 if (end
> fw_dump
.boot_memory_size
)
938 start
= fw_dump
.boot_memory_size
;
943 /* add this range excluding the reserved dump area. */
944 fadump_exclude_reserved_area(start
, end
);
949 * If the given physical address falls within the boot memory region then
950 * return the relocated address that points to the dump region reserved
951 * for saving initial boot memory contents.
953 static inline unsigned long fadump_relocate(unsigned long paddr
)
955 if (paddr
> RMA_START
&& paddr
< fw_dump
.boot_memory_size
)
956 return be64_to_cpu(fdm
.rmr_region
.destination_address
) + paddr
;
961 static int fadump_create_elfcore_headers(char *bufp
)
964 struct elf_phdr
*phdr
;
967 fadump_init_elfcore_header(bufp
);
968 elf
= (struct elfhdr
*)bufp
;
969 bufp
+= sizeof(struct elfhdr
);
972 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
973 * will be populated during second kernel boot after crash. Hence
974 * this PT_NOTE will always be the first elf note.
976 * NOTE: Any new ELF note addition should be placed after this note.
978 phdr
= (struct elf_phdr
*)bufp
;
979 bufp
+= sizeof(struct elf_phdr
);
980 phdr
->p_type
= PT_NOTE
;
992 /* setup ELF PT_NOTE for vmcoreinfo */
993 phdr
= (struct elf_phdr
*)bufp
;
994 bufp
+= sizeof(struct elf_phdr
);
995 phdr
->p_type
= PT_NOTE
;
1000 phdr
->p_paddr
= fadump_relocate(paddr_vmcoreinfo_note());
1001 phdr
->p_offset
= phdr
->p_paddr
;
1002 phdr
->p_memsz
= phdr
->p_filesz
= VMCOREINFO_NOTE_SIZE
;
1004 /* Increment number of program headers. */
1007 /* setup PT_LOAD sections. */
1009 for (i
= 0; i
< crash_mem_ranges
; i
++) {
1010 unsigned long long mbase
, msize
;
1011 mbase
= crash_memory_ranges
[i
].base
;
1012 msize
= crash_memory_ranges
[i
].size
;
1017 phdr
= (struct elf_phdr
*)bufp
;
1018 bufp
+= sizeof(struct elf_phdr
);
1019 phdr
->p_type
= PT_LOAD
;
1020 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
1021 phdr
->p_offset
= mbase
;
1023 if (mbase
== RMA_START
) {
1025 * The entire RMA region will be moved by firmware
1026 * to the specified destination_address. Hence set
1027 * the correct offset.
1029 phdr
->p_offset
= be64_to_cpu(fdm
.rmr_region
.destination_address
);
1032 phdr
->p_paddr
= mbase
;
1033 phdr
->p_vaddr
= (unsigned long)__va(mbase
);
1034 phdr
->p_filesz
= msize
;
1035 phdr
->p_memsz
= msize
;
1038 /* Increment number of program headers. */
1044 static unsigned long init_fadump_header(unsigned long addr
)
1046 struct fadump_crash_info_header
*fdh
;
1051 fw_dump
.fadumphdr_addr
= addr
;
1053 addr
+= sizeof(struct fadump_crash_info_header
);
1055 memset(fdh
, 0, sizeof(struct fadump_crash_info_header
));
1056 fdh
->magic_number
= FADUMP_CRASH_INFO_MAGIC
;
1057 fdh
->elfcorehdr_addr
= addr
;
1058 /* We will set the crashing cpu id in crash_fadump() during crash. */
1059 fdh
->crashing_cpu
= CPU_UNKNOWN
;
1064 static int register_fadump(void)
1070 * If no memory is reserved then we can not register for firmware-
1073 if (!fw_dump
.reserve_dump_area_size
)
1076 fadump_setup_crash_memory_ranges();
1078 addr
= be64_to_cpu(fdm
.rmr_region
.destination_address
) + be64_to_cpu(fdm
.rmr_region
.source_len
);
1079 /* Initialize fadump crash info header. */
1080 addr
= init_fadump_header(addr
);
1083 pr_debug("Creating ELF core headers at %#016lx\n", addr
);
1084 fadump_create_elfcore_headers(vaddr
);
1086 /* register the future kernel dump with firmware. */
1087 return register_fw_dump(&fdm
);
1090 static int fadump_unregister_dump(struct fadump_mem_struct
*fdm
)
1093 unsigned int wait_time
;
1095 pr_debug("Un-register firmware-assisted dump\n");
1097 /* TODO: Add upper time limit for the delay */
1099 rc
= rtas_call(fw_dump
.ibm_configure_kernel_dump
, 3, 1, NULL
,
1100 FADUMP_UNREGISTER
, fdm
,
1101 sizeof(struct fadump_mem_struct
));
1103 wait_time
= rtas_busy_delay_time(rc
);
1106 } while (wait_time
);
1109 printk(KERN_ERR
"Failed to un-register firmware-assisted dump."
1110 " unexpected error(%d).\n", rc
);
1113 fw_dump
.dump_registered
= 0;
1117 static int fadump_invalidate_dump(struct fadump_mem_struct
*fdm
)
1120 unsigned int wait_time
;
1122 pr_debug("Invalidating firmware-assisted dump registration\n");
1124 /* TODO: Add upper time limit for the delay */
1126 rc
= rtas_call(fw_dump
.ibm_configure_kernel_dump
, 3, 1, NULL
,
1127 FADUMP_INVALIDATE
, fdm
,
1128 sizeof(struct fadump_mem_struct
));
1130 wait_time
= rtas_busy_delay_time(rc
);
1133 } while (wait_time
);
1136 pr_err("Failed to invalidate firmware-assisted dump registration. Unexpected error (%d).\n", rc
);
1139 fw_dump
.dump_active
= 0;
1144 void fadump_cleanup(void)
1146 /* Invalidate the registration only if dump is active. */
1147 if (fw_dump
.dump_active
) {
1148 init_fadump_mem_struct(&fdm
,
1149 be64_to_cpu(fdm_active
->cpu_state_data
.destination_address
));
1150 fadump_invalidate_dump(&fdm
);
1154 static void fadump_free_reserved_memory(unsigned long start_pfn
,
1155 unsigned long end_pfn
)
1158 unsigned long time_limit
= jiffies
+ HZ
;
1160 pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
1161 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
1163 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1164 free_reserved_page(pfn_to_page(pfn
));
1166 if (time_after(jiffies
, time_limit
)) {
1168 time_limit
= jiffies
+ HZ
;
1174 * Skip memory holes and free memory that was actually reserved.
1176 static void fadump_release_reserved_area(unsigned long start
, unsigned long end
)
1178 struct memblock_region
*reg
;
1179 unsigned long tstart
, tend
;
1180 unsigned long start_pfn
= PHYS_PFN(start
);
1181 unsigned long end_pfn
= PHYS_PFN(end
);
1183 for_each_memblock(memory
, reg
) {
1184 tstart
= max(start_pfn
, memblock_region_memory_base_pfn(reg
));
1185 tend
= min(end_pfn
, memblock_region_memory_end_pfn(reg
));
1186 if (tstart
< tend
) {
1187 fadump_free_reserved_memory(tstart
, tend
);
1189 if (tend
== end_pfn
)
1192 start_pfn
= tend
+ 1;
1198 * Release the memory that was reserved in early boot to preserve the memory
1199 * contents. The released memory will be available for general use.
1201 static void fadump_release_memory(unsigned long begin
, unsigned long end
)
1203 unsigned long ra_start
, ra_end
;
1205 ra_start
= fw_dump
.reserve_dump_area_start
;
1206 ra_end
= ra_start
+ fw_dump
.reserve_dump_area_size
;
1209 * exclude the dump reserve area. Will reuse it for next
1210 * fadump registration.
1212 if (begin
< ra_end
&& end
> ra_start
) {
1213 if (begin
< ra_start
)
1214 fadump_release_reserved_area(begin
, ra_start
);
1216 fadump_release_reserved_area(ra_end
, end
);
1218 fadump_release_reserved_area(begin
, end
);
1221 static void fadump_invalidate_release_mem(void)
1223 unsigned long reserved_area_start
, reserved_area_end
;
1224 unsigned long destination_address
;
1226 mutex_lock(&fadump_mutex
);
1227 if (!fw_dump
.dump_active
) {
1228 mutex_unlock(&fadump_mutex
);
1232 destination_address
= be64_to_cpu(fdm_active
->cpu_state_data
.destination_address
);
1234 mutex_unlock(&fadump_mutex
);
1237 * Save the current reserved memory bounds we will require them
1238 * later for releasing the memory for general use.
1240 reserved_area_start
= fw_dump
.reserve_dump_area_start
;
1241 reserved_area_end
= reserved_area_start
+
1242 fw_dump
.reserve_dump_area_size
;
1244 * Setup reserve_dump_area_start and its size so that we can
1245 * reuse this reserved memory for Re-registration.
1247 fw_dump
.reserve_dump_area_start
= destination_address
;
1248 fw_dump
.reserve_dump_area_size
= get_fadump_area_size();
1250 fadump_release_memory(reserved_area_start
, reserved_area_end
);
1251 if (fw_dump
.cpu_notes_buf
) {
1252 fadump_cpu_notes_buf_free(
1253 (unsigned long)__va(fw_dump
.cpu_notes_buf
),
1254 fw_dump
.cpu_notes_buf_size
);
1255 fw_dump
.cpu_notes_buf
= 0;
1256 fw_dump
.cpu_notes_buf_size
= 0;
1258 /* Initialize the kernel dump memory structure for FAD registration. */
1259 init_fadump_mem_struct(&fdm
, fw_dump
.reserve_dump_area_start
);
1262 static ssize_t
fadump_release_memory_store(struct kobject
*kobj
,
1263 struct kobj_attribute
*attr
,
1264 const char *buf
, size_t count
)
1266 if (!fw_dump
.dump_active
)
1269 if (buf
[0] == '1') {
1271 * Take away the '/proc/vmcore'. We are releasing the dump
1272 * memory, hence it will not be valid anymore.
1274 #ifdef CONFIG_PROC_VMCORE
1277 fadump_invalidate_release_mem();
1284 static ssize_t
fadump_enabled_show(struct kobject
*kobj
,
1285 struct kobj_attribute
*attr
,
1288 return sprintf(buf
, "%d\n", fw_dump
.fadump_enabled
);
1291 static ssize_t
fadump_register_show(struct kobject
*kobj
,
1292 struct kobj_attribute
*attr
,
1295 return sprintf(buf
, "%d\n", fw_dump
.dump_registered
);
1298 static ssize_t
fadump_register_store(struct kobject
*kobj
,
1299 struct kobj_attribute
*attr
,
1300 const char *buf
, size_t count
)
1304 if (!fw_dump
.fadump_enabled
|| fdm_active
)
1307 mutex_lock(&fadump_mutex
);
1311 if (fw_dump
.dump_registered
== 0) {
1314 /* Un-register Firmware-assisted dump */
1315 fadump_unregister_dump(&fdm
);
1318 if (fw_dump
.dump_registered
== 1) {
1322 /* Register Firmware-assisted dump */
1323 ret
= register_fadump();
1331 mutex_unlock(&fadump_mutex
);
1332 return ret
< 0 ? ret
: count
;
1335 static int fadump_region_show(struct seq_file
*m
, void *private)
1337 const struct fadump_mem_struct
*fdm_ptr
;
1339 if (!fw_dump
.fadump_enabled
)
1342 mutex_lock(&fadump_mutex
);
1344 fdm_ptr
= fdm_active
;
1346 mutex_unlock(&fadump_mutex
);
1351 "CPU : [%#016llx-%#016llx] %#llx bytes, "
1353 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
),
1354 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
) +
1355 be64_to_cpu(fdm_ptr
->cpu_state_data
.source_len
) - 1,
1356 be64_to_cpu(fdm_ptr
->cpu_state_data
.source_len
),
1357 be64_to_cpu(fdm_ptr
->cpu_state_data
.bytes_dumped
));
1359 "HPTE: [%#016llx-%#016llx] %#llx bytes, "
1361 be64_to_cpu(fdm_ptr
->hpte_region
.destination_address
),
1362 be64_to_cpu(fdm_ptr
->hpte_region
.destination_address
) +
1363 be64_to_cpu(fdm_ptr
->hpte_region
.source_len
) - 1,
1364 be64_to_cpu(fdm_ptr
->hpte_region
.source_len
),
1365 be64_to_cpu(fdm_ptr
->hpte_region
.bytes_dumped
));
1367 "DUMP: [%#016llx-%#016llx] %#llx bytes, "
1369 be64_to_cpu(fdm_ptr
->rmr_region
.destination_address
),
1370 be64_to_cpu(fdm_ptr
->rmr_region
.destination_address
) +
1371 be64_to_cpu(fdm_ptr
->rmr_region
.source_len
) - 1,
1372 be64_to_cpu(fdm_ptr
->rmr_region
.source_len
),
1373 be64_to_cpu(fdm_ptr
->rmr_region
.bytes_dumped
));
1376 (fw_dump
.reserve_dump_area_start
==
1377 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
)))
1380 /* Dump is active. Show reserved memory region. */
1382 " : [%#016llx-%#016llx] %#llx bytes, "
1384 (unsigned long long)fw_dump
.reserve_dump_area_start
,
1385 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
) - 1,
1386 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
) -
1387 fw_dump
.reserve_dump_area_start
,
1388 be64_to_cpu(fdm_ptr
->cpu_state_data
.destination_address
) -
1389 fw_dump
.reserve_dump_area_start
);
1392 mutex_unlock(&fadump_mutex
);
1396 static struct kobj_attribute fadump_release_attr
= __ATTR(fadump_release_mem
,
1398 fadump_release_memory_store
);
1399 static struct kobj_attribute fadump_attr
= __ATTR(fadump_enabled
,
1400 0444, fadump_enabled_show
,
1402 static struct kobj_attribute fadump_register_attr
= __ATTR(fadump_registered
,
1403 0644, fadump_register_show
,
1404 fadump_register_store
);
1406 static int fadump_region_open(struct inode
*inode
, struct file
*file
)
1408 return single_open(file
, fadump_region_show
, inode
->i_private
);
1411 static const struct file_operations fadump_region_fops
= {
1412 .open
= fadump_region_open
,
1414 .llseek
= seq_lseek
,
1415 .release
= single_release
,
1418 static void fadump_init_files(void)
1420 struct dentry
*debugfs_file
;
1423 rc
= sysfs_create_file(kernel_kobj
, &fadump_attr
.attr
);
1425 printk(KERN_ERR
"fadump: unable to create sysfs file"
1426 " fadump_enabled (%d)\n", rc
);
1428 rc
= sysfs_create_file(kernel_kobj
, &fadump_register_attr
.attr
);
1430 printk(KERN_ERR
"fadump: unable to create sysfs file"
1431 " fadump_registered (%d)\n", rc
);
1433 debugfs_file
= debugfs_create_file("fadump_region", 0444,
1434 powerpc_debugfs_root
, NULL
,
1435 &fadump_region_fops
);
1437 printk(KERN_ERR
"fadump: unable to create debugfs file"
1438 " fadump_region\n");
1440 if (fw_dump
.dump_active
) {
1441 rc
= sysfs_create_file(kernel_kobj
, &fadump_release_attr
.attr
);
1443 printk(KERN_ERR
"fadump: unable to create sysfs file"
1444 " fadump_release_mem (%d)\n", rc
);
1450 * Prepare for firmware-assisted dump.
1452 int __init
setup_fadump(void)
1454 if (!fw_dump
.fadump_enabled
)
1457 if (!fw_dump
.fadump_supported
) {
1458 printk(KERN_ERR
"Firmware-assisted dump is not supported on"
1459 " this hardware\n");
1463 fadump_show_config();
1465 * If dump data is available then see if it is valid and prepare for
1466 * saving it to the disk.
1468 if (fw_dump
.dump_active
) {
1470 * if dump process fails then invalidate the registration
1471 * and release memory before proceeding for re-registration.
1473 if (process_fadump(fdm_active
) < 0)
1474 fadump_invalidate_release_mem();
1476 /* Initialize the kernel dump memory structure for FAD registration. */
1477 else if (fw_dump
.reserve_dump_area_size
)
1478 init_fadump_mem_struct(&fdm
, fw_dump
.reserve_dump_area_start
);
1479 fadump_init_files();
1483 subsys_initcall(setup_fadump
);