Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / powerpc / kernel / fadump.c
blob8482739d42f380bfcfeec9aec05dd1c79c466160
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
4 * dump with assistance from firmware. This approach does not use kexec,
5 * instead firmware assists in booting the kdump kernel while preserving
6 * memory contents. The most of the code implementation has been adapted
7 * from phyp assisted dump implementation written by Linas Vepstas and
8 * Manish Ahuja
10 * Copyright 2011 IBM Corporation
11 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
14 #undef DEBUG
15 #define pr_fmt(fmt) "fadump: " fmt
17 #include <linux/string.h>
18 #include <linux/memblock.h>
19 #include <linux/delay.h>
20 #include <linux/seq_file.h>
21 #include <linux/crash_dump.h>
22 #include <linux/kobject.h>
23 #include <linux/sysfs.h>
24 #include <linux/slab.h>
25 #include <linux/cma.h>
26 #include <linux/hugetlb.h>
28 #include <asm/debugfs.h>
29 #include <asm/page.h>
30 #include <asm/prom.h>
31 #include <asm/fadump.h>
32 #include <asm/fadump-internal.h>
33 #include <asm/setup.h>
36 * The CPU who acquired the lock to trigger the fadump crash should
37 * wait for other CPUs to enter.
39 * The timeout is in milliseconds.
41 #define CRASH_TIMEOUT 500
43 static struct fw_dump fw_dump;
45 static void __init fadump_reserve_crash_area(u64 base);
47 struct kobject *fadump_kobj;
49 #ifndef CONFIG_PRESERVE_FA_DUMP
51 static atomic_t cpus_in_fadump;
52 static DEFINE_MUTEX(fadump_mutex);
54 struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
56 #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
57 #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
58 sizeof(struct fadump_memory_range))
59 static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
60 struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
61 RESERVED_RNGS_SZ, 0,
62 RESERVED_RNGS_CNT, true };
64 static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
66 #ifdef CONFIG_CMA
67 static struct cma *fadump_cma;
70 * fadump_cma_init() - Initialize CMA area from a fadump reserved memory
72 * This function initializes CMA area from fadump reserved memory.
73 * The total size of fadump reserved memory covers for boot memory size
74 * + cpu data size + hpte size and metadata.
75 * Initialize only the area equivalent to boot memory size for CMA use.
76 * The reamining portion of fadump reserved memory will be not given
77 * to CMA and pages for thoes will stay reserved. boot memory size is
78 * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
79 * But for some reason even if it fails we still have the memory reservation
80 * with us and we can still continue doing fadump.
82 int __init fadump_cma_init(void)
84 unsigned long long base, size;
85 int rc;
87 if (!fw_dump.fadump_enabled)
88 return 0;
91 * Do not use CMA if user has provided fadump=nocma kernel parameter.
92 * Return 1 to continue with fadump old behaviour.
94 if (fw_dump.nocma)
95 return 1;
97 base = fw_dump.reserve_dump_area_start;
98 size = fw_dump.boot_memory_size;
100 if (!size)
101 return 0;
103 rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
104 if (rc) {
105 pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
107 * Though the CMA init has failed we still have memory
108 * reservation with us. The reserved memory will be
109 * blocked from production system usage. Hence return 1,
110 * so that we can continue with fadump.
112 return 1;
116 * So we now have successfully initialized cma area for fadump.
118 pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
119 "bytes of memory reserved for firmware-assisted dump\n",
120 cma_get_size(fadump_cma),
121 (unsigned long)cma_get_base(fadump_cma) >> 20,
122 fw_dump.reserve_dump_area_size);
123 return 1;
125 #else
126 static int __init fadump_cma_init(void) { return 1; }
127 #endif /* CONFIG_CMA */
129 /* Scan the Firmware Assisted dump configuration details. */
130 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
131 int depth, void *data)
133 if (depth == 0) {
134 early_init_dt_scan_reserved_ranges(node);
135 return 0;
138 if (depth != 1)
139 return 0;
141 if (strcmp(uname, "rtas") == 0) {
142 rtas_fadump_dt_scan(&fw_dump, node);
143 return 1;
146 if (strcmp(uname, "ibm,opal") == 0) {
147 opal_fadump_dt_scan(&fw_dump, node);
148 return 1;
151 return 0;
155 * If fadump is registered, check if the memory provided
156 * falls within boot memory area and reserved memory area.
158 int is_fadump_memory_area(u64 addr, unsigned long size)
160 u64 d_start, d_end;
162 if (!fw_dump.dump_registered)
163 return 0;
165 if (!size)
166 return 0;
168 d_start = fw_dump.reserve_dump_area_start;
169 d_end = d_start + fw_dump.reserve_dump_area_size;
170 if (((addr + size) > d_start) && (addr <= d_end))
171 return 1;
173 return (addr <= fw_dump.boot_mem_top);
176 int should_fadump_crash(void)
178 if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
179 return 0;
180 return 1;
183 int is_fadump_active(void)
185 return fw_dump.dump_active;
189 * Returns true, if there are no holes in memory area between d_start to d_end,
190 * false otherwise.
192 static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
194 phys_addr_t reg_start, reg_end;
195 bool ret = false;
196 u64 i, start, end;
198 for_each_mem_range(i, &reg_start, &reg_end) {
199 start = max_t(u64, d_start, reg_start);
200 end = min_t(u64, d_end, reg_end);
201 if (d_start < end) {
202 /* Memory hole from d_start to start */
203 if (start > d_start)
204 break;
206 if (end == d_end) {
207 ret = true;
208 break;
211 d_start = end + 1;
215 return ret;
219 * Returns true, if there are no holes in boot memory area,
220 * false otherwise.
222 bool is_fadump_boot_mem_contiguous(void)
224 unsigned long d_start, d_end;
225 bool ret = false;
226 int i;
228 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
229 d_start = fw_dump.boot_mem_addr[i];
230 d_end = d_start + fw_dump.boot_mem_sz[i];
232 ret = is_fadump_mem_area_contiguous(d_start, d_end);
233 if (!ret)
234 break;
237 return ret;
241 * Returns true, if there are no holes in reserved memory area,
242 * false otherwise.
244 bool is_fadump_reserved_mem_contiguous(void)
246 u64 d_start, d_end;
248 d_start = fw_dump.reserve_dump_area_start;
249 d_end = d_start + fw_dump.reserve_dump_area_size;
250 return is_fadump_mem_area_contiguous(d_start, d_end);
253 /* Print firmware assisted dump configurations for debugging purpose. */
254 static void fadump_show_config(void)
256 int i;
258 pr_debug("Support for firmware-assisted dump (fadump): %s\n",
259 (fw_dump.fadump_supported ? "present" : "no support"));
261 if (!fw_dump.fadump_supported)
262 return;
264 pr_debug("Fadump enabled : %s\n",
265 (fw_dump.fadump_enabled ? "yes" : "no"));
266 pr_debug("Dump Active : %s\n",
267 (fw_dump.dump_active ? "yes" : "no"));
268 pr_debug("Dump section sizes:\n");
269 pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
270 pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
271 pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
272 pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
273 pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt);
274 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
275 pr_debug("[%03d] base = %llx, size = %llx\n", i,
276 fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
281 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
283 * Function to find the largest memory size we need to reserve during early
284 * boot process. This will be the size of the memory that is required for a
285 * kernel to boot successfully.
287 * This function has been taken from phyp-assisted dump feature implementation.
289 * returns larger of 256MB or 5% rounded down to multiples of 256MB.
291 * TODO: Come up with better approach to find out more accurate memory size
292 * that is required for a kernel to boot successfully.
295 static inline u64 fadump_calculate_reserve_size(void)
297 u64 base, size, bootmem_min;
298 int ret;
300 if (fw_dump.reserve_bootvar)
301 pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
304 * Check if the size is specified through crashkernel= cmdline
305 * option. If yes, then use that but ignore base as fadump reserves
306 * memory at a predefined offset.
308 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
309 &size, &base);
310 if (ret == 0 && size > 0) {
311 unsigned long max_size;
313 if (fw_dump.reserve_bootvar)
314 pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
316 fw_dump.reserve_bootvar = (unsigned long)size;
319 * Adjust if the boot memory size specified is above
320 * the upper limit.
322 max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
323 if (fw_dump.reserve_bootvar > max_size) {
324 fw_dump.reserve_bootvar = max_size;
325 pr_info("Adjusted boot memory size to %luMB\n",
326 (fw_dump.reserve_bootvar >> 20));
329 return fw_dump.reserve_bootvar;
330 } else if (fw_dump.reserve_bootvar) {
332 * 'fadump_reserve_mem=' is being used to reserve memory
333 * for firmware-assisted dump.
335 return fw_dump.reserve_bootvar;
338 /* divide by 20 to get 5% of value */
339 size = memblock_phys_mem_size() / 20;
341 /* round it down in multiples of 256 */
342 size = size & ~0x0FFFFFFFUL;
344 /* Truncate to memory_limit. We don't want to over reserve the memory.*/
345 if (memory_limit && size > memory_limit)
346 size = memory_limit;
348 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
349 return (size > bootmem_min ? size : bootmem_min);
353 * Calculate the total memory size required to be reserved for
354 * firmware-assisted dump registration.
356 static unsigned long get_fadump_area_size(void)
358 unsigned long size = 0;
360 size += fw_dump.cpu_state_data_size;
361 size += fw_dump.hpte_region_size;
362 size += fw_dump.boot_memory_size;
363 size += sizeof(struct fadump_crash_info_header);
364 size += sizeof(struct elfhdr); /* ELF core header.*/
365 size += sizeof(struct elf_phdr); /* place holder for cpu notes */
366 /* Program headers for crash memory regions. */
367 size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
369 size = PAGE_ALIGN(size);
371 /* This is to hold kernel metadata on platforms that support it */
372 size += (fw_dump.ops->fadump_get_metadata_size ?
373 fw_dump.ops->fadump_get_metadata_size() : 0);
374 return size;
377 static int __init add_boot_mem_region(unsigned long rstart,
378 unsigned long rsize)
380 int i = fw_dump.boot_mem_regs_cnt++;
382 if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
383 fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
384 return 0;
387 pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n",
388 i, rstart, (rstart + rsize));
389 fw_dump.boot_mem_addr[i] = rstart;
390 fw_dump.boot_mem_sz[i] = rsize;
391 return 1;
395 * Firmware usually has a hard limit on the data it can copy per region.
396 * Honour that by splitting a memory range into multiple regions.
398 static int __init add_boot_mem_regions(unsigned long mstart,
399 unsigned long msize)
401 unsigned long rstart, rsize, max_size;
402 int ret = 1;
404 rstart = mstart;
405 max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize;
406 while (msize) {
407 if (msize > max_size)
408 rsize = max_size;
409 else
410 rsize = msize;
412 ret = add_boot_mem_region(rstart, rsize);
413 if (!ret)
414 break;
416 msize -= rsize;
417 rstart += rsize;
420 return ret;
423 static int __init fadump_get_boot_mem_regions(void)
425 unsigned long size, cur_size, hole_size, last_end;
426 unsigned long mem_size = fw_dump.boot_memory_size;
427 phys_addr_t reg_start, reg_end;
428 int ret = 1;
429 u64 i;
431 fw_dump.boot_mem_regs_cnt = 0;
433 last_end = 0;
434 hole_size = 0;
435 cur_size = 0;
436 for_each_mem_range(i, &reg_start, &reg_end) {
437 size = reg_end - reg_start;
438 hole_size += (reg_start - last_end);
440 if ((cur_size + size) >= mem_size) {
441 size = (mem_size - cur_size);
442 ret = add_boot_mem_regions(reg_start, size);
443 break;
446 mem_size -= size;
447 cur_size += size;
448 ret = add_boot_mem_regions(reg_start, size);
449 if (!ret)
450 break;
452 last_end = reg_end;
454 fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
456 return ret;
460 * Returns true, if the given range overlaps with reserved memory ranges
461 * starting at idx. Also, updates idx to index of overlapping memory range
462 * with the given memory range.
463 * False, otherwise.
465 static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
467 bool ret = false;
468 int i;
470 for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
471 u64 rbase = reserved_mrange_info.mem_ranges[i].base;
472 u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
474 if (end <= rbase)
475 break;
477 if ((end > rbase) && (base < rend)) {
478 *idx = i;
479 ret = true;
480 break;
484 return ret;
488 * Locate a suitable memory area to reserve memory for FADump. While at it,
489 * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
491 static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
493 struct fadump_memory_range *mrngs;
494 phys_addr_t mstart, mend;
495 int idx = 0;
496 u64 i, ret = 0;
498 mrngs = reserved_mrange_info.mem_ranges;
499 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
500 &mstart, &mend, NULL) {
501 pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
502 i, mstart, mend, base);
504 if (mstart > base)
505 base = PAGE_ALIGN(mstart);
507 while ((mend > base) && ((mend - base) >= size)) {
508 if (!overlaps_reserved_ranges(base, base+size, &idx)) {
509 ret = base;
510 goto out;
513 base = mrngs[idx].base + mrngs[idx].size;
514 base = PAGE_ALIGN(base);
518 out:
519 return ret;
522 int __init fadump_reserve_mem(void)
524 u64 base, size, mem_boundary, bootmem_min;
525 int ret = 1;
527 if (!fw_dump.fadump_enabled)
528 return 0;
530 if (!fw_dump.fadump_supported) {
531 pr_info("Firmware-Assisted Dump is not supported on this hardware\n");
532 goto error_out;
536 * Initialize boot memory size
537 * If dump is active then we have already calculated the size during
538 * first kernel.
540 if (!fw_dump.dump_active) {
541 fw_dump.boot_memory_size =
542 PAGE_ALIGN(fadump_calculate_reserve_size());
543 #ifdef CONFIG_CMA
544 if (!fw_dump.nocma) {
545 fw_dump.boot_memory_size =
546 ALIGN(fw_dump.boot_memory_size,
547 FADUMP_CMA_ALIGNMENT);
549 #endif
551 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
552 if (fw_dump.boot_memory_size < bootmem_min) {
553 pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
554 fw_dump.boot_memory_size, bootmem_min);
555 goto error_out;
558 if (!fadump_get_boot_mem_regions()) {
559 pr_err("Too many holes in boot memory area to enable fadump\n");
560 goto error_out;
565 * Calculate the memory boundary.
566 * If memory_limit is less than actual memory boundary then reserve
567 * the memory for fadump beyond the memory_limit and adjust the
568 * memory_limit accordingly, so that the running kernel can run with
569 * specified memory_limit.
571 if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
572 size = get_fadump_area_size();
573 if ((memory_limit + size) < memblock_end_of_DRAM())
574 memory_limit += size;
575 else
576 memory_limit = memblock_end_of_DRAM();
577 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
578 " dump, now %#016llx\n", memory_limit);
580 if (memory_limit)
581 mem_boundary = memory_limit;
582 else
583 mem_boundary = memblock_end_of_DRAM();
585 base = fw_dump.boot_mem_top;
586 size = get_fadump_area_size();
587 fw_dump.reserve_dump_area_size = size;
588 if (fw_dump.dump_active) {
589 pr_info("Firmware-assisted dump is active.\n");
591 #ifdef CONFIG_HUGETLB_PAGE
593 * FADump capture kernel doesn't care much about hugepages.
594 * In fact, handling hugepages in capture kernel is asking for
595 * trouble. So, disable HugeTLB support when fadump is active.
597 hugetlb_disabled = true;
598 #endif
600 * If last boot has crashed then reserve all the memory
601 * above boot memory size so that we don't touch it until
602 * dump is written to disk by userspace tool. This memory
603 * can be released for general use by invalidating fadump.
605 fadump_reserve_crash_area(base);
607 pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
608 pr_debug("Reserve dump area start address: 0x%lx\n",
609 fw_dump.reserve_dump_area_start);
610 } else {
612 * Reserve memory at an offset closer to bottom of the RAM to
613 * minimize the impact of memory hot-remove operation.
615 base = fadump_locate_reserve_mem(base, size);
617 if (!base || (base + size > mem_boundary)) {
618 pr_err("Failed to find memory chunk for reservation!\n");
619 goto error_out;
621 fw_dump.reserve_dump_area_start = base;
624 * Calculate the kernel metadata address and register it with
625 * f/w if the platform supports.
627 if (fw_dump.ops->fadump_setup_metadata &&
628 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
629 goto error_out;
631 if (memblock_reserve(base, size)) {
632 pr_err("Failed to reserve memory!\n");
633 goto error_out;
636 pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
637 (size >> 20), base, (memblock_phys_mem_size() >> 20));
639 ret = fadump_cma_init();
642 return ret;
643 error_out:
644 fw_dump.fadump_enabled = 0;
645 return 0;
648 /* Look for fadump= cmdline option. */
649 static int __init early_fadump_param(char *p)
651 if (!p)
652 return 1;
654 if (strncmp(p, "on", 2) == 0)
655 fw_dump.fadump_enabled = 1;
656 else if (strncmp(p, "off", 3) == 0)
657 fw_dump.fadump_enabled = 0;
658 else if (strncmp(p, "nocma", 5) == 0) {
659 fw_dump.fadump_enabled = 1;
660 fw_dump.nocma = 1;
663 return 0;
665 early_param("fadump", early_fadump_param);
668 * Look for fadump_reserve_mem= cmdline option
669 * TODO: Remove references to 'fadump_reserve_mem=' parameter,
670 * the sooner 'crashkernel=' parameter is accustomed to.
672 static int __init early_fadump_reserve_mem(char *p)
674 if (p)
675 fw_dump.reserve_bootvar = memparse(p, &p);
676 return 0;
678 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
680 void crash_fadump(struct pt_regs *regs, const char *str)
682 unsigned int msecs;
683 struct fadump_crash_info_header *fdh = NULL;
684 int old_cpu, this_cpu;
685 /* Do not include first CPU */
686 unsigned int ncpus = num_online_cpus() - 1;
688 if (!should_fadump_crash())
689 return;
692 * old_cpu == -1 means this is the first CPU which has come here,
693 * go ahead and trigger fadump.
695 * old_cpu != -1 means some other CPU has already on it's way
696 * to trigger fadump, just keep looping here.
698 this_cpu = smp_processor_id();
699 old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
701 if (old_cpu != -1) {
702 atomic_inc(&cpus_in_fadump);
705 * We can't loop here indefinitely. Wait as long as fadump
706 * is in force. If we race with fadump un-registration this
707 * loop will break and then we go down to normal panic path
708 * and reboot. If fadump is in force the first crashing
709 * cpu will definitely trigger fadump.
711 while (fw_dump.dump_registered)
712 cpu_relax();
713 return;
716 fdh = __va(fw_dump.fadumphdr_addr);
717 fdh->crashing_cpu = crashing_cpu;
718 crash_save_vmcoreinfo();
720 if (regs)
721 fdh->regs = *regs;
722 else
723 ppc_save_regs(&fdh->regs);
725 fdh->online_mask = *cpu_online_mask;
728 * If we came in via system reset, wait a while for the secondary
729 * CPUs to enter.
731 if (TRAP(&(fdh->regs)) == 0x100) {
732 msecs = CRASH_TIMEOUT;
733 while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
734 mdelay(1);
737 fw_dump.ops->fadump_trigger(fdh, str);
740 u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
742 struct elf_prstatus prstatus;
744 memset(&prstatus, 0, sizeof(prstatus));
746 * FIXME: How do i get PID? Do I really need it?
747 * prstatus.pr_pid = ????
749 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
750 buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
751 &prstatus, sizeof(prstatus));
752 return buf;
755 void fadump_update_elfcore_header(char *bufp)
757 struct elf_phdr *phdr;
759 bufp += sizeof(struct elfhdr);
761 /* First note is a place holder for cpu notes info. */
762 phdr = (struct elf_phdr *)bufp;
764 if (phdr->p_type == PT_NOTE) {
765 phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr);
766 phdr->p_offset = phdr->p_paddr;
767 phdr->p_filesz = fw_dump.cpu_notes_buf_size;
768 phdr->p_memsz = fw_dump.cpu_notes_buf_size;
770 return;
773 static void *fadump_alloc_buffer(unsigned long size)
775 unsigned long count, i;
776 struct page *page;
777 void *vaddr;
779 vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
780 if (!vaddr)
781 return NULL;
783 count = PAGE_ALIGN(size) / PAGE_SIZE;
784 page = virt_to_page(vaddr);
785 for (i = 0; i < count; i++)
786 mark_page_reserved(page + i);
787 return vaddr;
790 static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
792 free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
795 s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
797 /* Allocate buffer to hold cpu crash notes. */
798 fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
799 fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
800 fw_dump.cpu_notes_buf_vaddr =
801 (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size);
802 if (!fw_dump.cpu_notes_buf_vaddr) {
803 pr_err("Failed to allocate %ld bytes for CPU notes buffer\n",
804 fw_dump.cpu_notes_buf_size);
805 return -ENOMEM;
808 pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n",
809 fw_dump.cpu_notes_buf_size,
810 fw_dump.cpu_notes_buf_vaddr);
811 return 0;
814 void fadump_free_cpu_notes_buf(void)
816 if (!fw_dump.cpu_notes_buf_vaddr)
817 return;
819 fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr,
820 fw_dump.cpu_notes_buf_size);
821 fw_dump.cpu_notes_buf_vaddr = 0;
822 fw_dump.cpu_notes_buf_size = 0;
825 static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
827 if (mrange_info->is_static) {
828 mrange_info->mem_range_cnt = 0;
829 return;
832 kfree(mrange_info->mem_ranges);
833 memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
834 (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
838 * Allocate or reallocate mem_ranges array in incremental units
839 * of PAGE_SIZE.
841 static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
843 struct fadump_memory_range *new_array;
844 u64 new_size;
846 new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
847 pr_debug("Allocating %llu bytes of memory for %s memory ranges\n",
848 new_size, mrange_info->name);
850 new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL);
851 if (new_array == NULL) {
852 pr_err("Insufficient memory for setting up %s memory ranges\n",
853 mrange_info->name);
854 fadump_free_mem_ranges(mrange_info);
855 return -ENOMEM;
858 mrange_info->mem_ranges = new_array;
859 mrange_info->mem_ranges_sz = new_size;
860 mrange_info->max_mem_ranges = (new_size /
861 sizeof(struct fadump_memory_range));
862 return 0;
865 static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
866 u64 base, u64 end)
868 struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges;
869 bool is_adjacent = false;
870 u64 start, size;
872 if (base == end)
873 return 0;
876 * Fold adjacent memory ranges to bring down the memory ranges/
877 * PT_LOAD segments count.
879 if (mrange_info->mem_range_cnt) {
880 start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
881 size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
883 if ((start + size) == base)
884 is_adjacent = true;
886 if (!is_adjacent) {
887 /* resize the array on reaching the limit */
888 if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
889 int ret;
891 if (mrange_info->is_static) {
892 pr_err("Reached array size limit for %s memory ranges\n",
893 mrange_info->name);
894 return -ENOSPC;
897 ret = fadump_alloc_mem_ranges(mrange_info);
898 if (ret)
899 return ret;
901 /* Update to the new resized array */
902 mem_ranges = mrange_info->mem_ranges;
905 start = base;
906 mem_ranges[mrange_info->mem_range_cnt].base = start;
907 mrange_info->mem_range_cnt++;
910 mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start);
911 pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
912 mrange_info->name, (mrange_info->mem_range_cnt - 1),
913 start, end - 1, (end - start));
914 return 0;
917 static int fadump_exclude_reserved_area(u64 start, u64 end)
919 u64 ra_start, ra_end;
920 int ret = 0;
922 ra_start = fw_dump.reserve_dump_area_start;
923 ra_end = ra_start + fw_dump.reserve_dump_area_size;
925 if ((ra_start < end) && (ra_end > start)) {
926 if ((start < ra_start) && (end > ra_end)) {
927 ret = fadump_add_mem_range(&crash_mrange_info,
928 start, ra_start);
929 if (ret)
930 return ret;
932 ret = fadump_add_mem_range(&crash_mrange_info,
933 ra_end, end);
934 } else if (start < ra_start) {
935 ret = fadump_add_mem_range(&crash_mrange_info,
936 start, ra_start);
937 } else if (ra_end < end) {
938 ret = fadump_add_mem_range(&crash_mrange_info,
939 ra_end, end);
941 } else
942 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
944 return ret;
947 static int fadump_init_elfcore_header(char *bufp)
949 struct elfhdr *elf;
951 elf = (struct elfhdr *) bufp;
952 bufp += sizeof(struct elfhdr);
953 memcpy(elf->e_ident, ELFMAG, SELFMAG);
954 elf->e_ident[EI_CLASS] = ELF_CLASS;
955 elf->e_ident[EI_DATA] = ELF_DATA;
956 elf->e_ident[EI_VERSION] = EV_CURRENT;
957 elf->e_ident[EI_OSABI] = ELF_OSABI;
958 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
959 elf->e_type = ET_CORE;
960 elf->e_machine = ELF_ARCH;
961 elf->e_version = EV_CURRENT;
962 elf->e_entry = 0;
963 elf->e_phoff = sizeof(struct elfhdr);
964 elf->e_shoff = 0;
965 #if defined(_CALL_ELF)
966 elf->e_flags = _CALL_ELF;
967 #else
968 elf->e_flags = 0;
969 #endif
970 elf->e_ehsize = sizeof(struct elfhdr);
971 elf->e_phentsize = sizeof(struct elf_phdr);
972 elf->e_phnum = 0;
973 elf->e_shentsize = 0;
974 elf->e_shnum = 0;
975 elf->e_shstrndx = 0;
977 return 0;
981 * Traverse through memblock structure and setup crash memory ranges. These
982 * ranges will be used create PT_LOAD program headers in elfcore header.
984 static int fadump_setup_crash_memory_ranges(void)
986 u64 i, start, end;
987 int ret;
989 pr_debug("Setup crash memory ranges.\n");
990 crash_mrange_info.mem_range_cnt = 0;
993 * Boot memory region(s) registered with firmware are moved to
994 * different location at the time of crash. Create separate program
995 * header(s) for this memory chunk(s) with the correct offset.
997 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
998 start = fw_dump.boot_mem_addr[i];
999 end = start + fw_dump.boot_mem_sz[i];
1000 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
1001 if (ret)
1002 return ret;
1005 for_each_mem_range(i, &start, &end) {
1007 * skip the memory chunk that is already added
1008 * (0 through boot_memory_top).
1010 if (start < fw_dump.boot_mem_top) {
1011 if (end > fw_dump.boot_mem_top)
1012 start = fw_dump.boot_mem_top;
1013 else
1014 continue;
1017 /* add this range excluding the reserved dump area. */
1018 ret = fadump_exclude_reserved_area(start, end);
1019 if (ret)
1020 return ret;
1023 return 0;
1027 * If the given physical address falls within the boot memory region then
1028 * return the relocated address that points to the dump region reserved
1029 * for saving initial boot memory contents.
1031 static inline unsigned long fadump_relocate(unsigned long paddr)
1033 unsigned long raddr, rstart, rend, rlast, hole_size;
1034 int i;
1036 hole_size = 0;
1037 rlast = 0;
1038 raddr = paddr;
1039 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
1040 rstart = fw_dump.boot_mem_addr[i];
1041 rend = rstart + fw_dump.boot_mem_sz[i];
1042 hole_size += (rstart - rlast);
1044 if (paddr >= rstart && paddr < rend) {
1045 raddr += fw_dump.boot_mem_dest_addr - hole_size;
1046 break;
1049 rlast = rend;
1052 pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr);
1053 return raddr;
1056 static int fadump_create_elfcore_headers(char *bufp)
1058 unsigned long long raddr, offset;
1059 struct elf_phdr *phdr;
1060 struct elfhdr *elf;
1061 int i, j;
1063 fadump_init_elfcore_header(bufp);
1064 elf = (struct elfhdr *)bufp;
1065 bufp += sizeof(struct elfhdr);
1068 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
1069 * will be populated during second kernel boot after crash. Hence
1070 * this PT_NOTE will always be the first elf note.
1072 * NOTE: Any new ELF note addition should be placed after this note.
1074 phdr = (struct elf_phdr *)bufp;
1075 bufp += sizeof(struct elf_phdr);
1076 phdr->p_type = PT_NOTE;
1077 phdr->p_flags = 0;
1078 phdr->p_vaddr = 0;
1079 phdr->p_align = 0;
1081 phdr->p_offset = 0;
1082 phdr->p_paddr = 0;
1083 phdr->p_filesz = 0;
1084 phdr->p_memsz = 0;
1086 (elf->e_phnum)++;
1088 /* setup ELF PT_NOTE for vmcoreinfo */
1089 phdr = (struct elf_phdr *)bufp;
1090 bufp += sizeof(struct elf_phdr);
1091 phdr->p_type = PT_NOTE;
1092 phdr->p_flags = 0;
1093 phdr->p_vaddr = 0;
1094 phdr->p_align = 0;
1096 phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
1097 phdr->p_offset = phdr->p_paddr;
1098 phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
1100 /* Increment number of program headers. */
1101 (elf->e_phnum)++;
1103 /* setup PT_LOAD sections. */
1104 j = 0;
1105 offset = 0;
1106 raddr = fw_dump.boot_mem_addr[0];
1107 for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
1108 u64 mbase, msize;
1110 mbase = crash_mrange_info.mem_ranges[i].base;
1111 msize = crash_mrange_info.mem_ranges[i].size;
1112 if (!msize)
1113 continue;
1115 phdr = (struct elf_phdr *)bufp;
1116 bufp += sizeof(struct elf_phdr);
1117 phdr->p_type = PT_LOAD;
1118 phdr->p_flags = PF_R|PF_W|PF_X;
1119 phdr->p_offset = mbase;
1121 if (mbase == raddr) {
1123 * The entire real memory region will be moved by
1124 * firmware to the specified destination_address.
1125 * Hence set the correct offset.
1127 phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
1128 if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
1129 offset += fw_dump.boot_mem_sz[j];
1130 raddr = fw_dump.boot_mem_addr[++j];
1134 phdr->p_paddr = mbase;
1135 phdr->p_vaddr = (unsigned long)__va(mbase);
1136 phdr->p_filesz = msize;
1137 phdr->p_memsz = msize;
1138 phdr->p_align = 0;
1140 /* Increment number of program headers. */
1141 (elf->e_phnum)++;
1143 return 0;
1146 static unsigned long init_fadump_header(unsigned long addr)
1148 struct fadump_crash_info_header *fdh;
1150 if (!addr)
1151 return 0;
1153 fdh = __va(addr);
1154 addr += sizeof(struct fadump_crash_info_header);
1156 memset(fdh, 0, sizeof(struct fadump_crash_info_header));
1157 fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
1158 fdh->elfcorehdr_addr = addr;
1159 /* We will set the crashing cpu id in crash_fadump() during crash. */
1160 fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
1162 return addr;
1165 static int register_fadump(void)
1167 unsigned long addr;
1168 void *vaddr;
1169 int ret;
1172 * If no memory is reserved then we can not register for firmware-
1173 * assisted dump.
1175 if (!fw_dump.reserve_dump_area_size)
1176 return -ENODEV;
1178 ret = fadump_setup_crash_memory_ranges();
1179 if (ret)
1180 return ret;
1182 addr = fw_dump.fadumphdr_addr;
1184 /* Initialize fadump crash info header. */
1185 addr = init_fadump_header(addr);
1186 vaddr = __va(addr);
1188 pr_debug("Creating ELF core headers at %#016lx\n", addr);
1189 fadump_create_elfcore_headers(vaddr);
1191 /* register the future kernel dump with firmware. */
1192 pr_debug("Registering for firmware-assisted kernel dump...\n");
1193 return fw_dump.ops->fadump_register(&fw_dump);
1196 void fadump_cleanup(void)
1198 if (!fw_dump.fadump_supported)
1199 return;
1201 /* Invalidate the registration only if dump is active. */
1202 if (fw_dump.dump_active) {
1203 pr_debug("Invalidating firmware-assisted dump registration\n");
1204 fw_dump.ops->fadump_invalidate(&fw_dump);
1205 } else if (fw_dump.dump_registered) {
1206 /* Un-register Firmware-assisted dump if it was registered. */
1207 fw_dump.ops->fadump_unregister(&fw_dump);
1208 fadump_free_mem_ranges(&crash_mrange_info);
1211 if (fw_dump.ops->fadump_cleanup)
1212 fw_dump.ops->fadump_cleanup(&fw_dump);
1215 static void fadump_free_reserved_memory(unsigned long start_pfn,
1216 unsigned long end_pfn)
1218 unsigned long pfn;
1219 unsigned long time_limit = jiffies + HZ;
1221 pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
1222 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
1224 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1225 free_reserved_page(pfn_to_page(pfn));
1227 if (time_after(jiffies, time_limit)) {
1228 cond_resched();
1229 time_limit = jiffies + HZ;
1235 * Skip memory holes and free memory that was actually reserved.
1237 static void fadump_release_reserved_area(u64 start, u64 end)
1239 unsigned long reg_spfn, reg_epfn;
1240 u64 tstart, tend, spfn, epfn;
1241 int i;
1243 spfn = PHYS_PFN(start);
1244 epfn = PHYS_PFN(end);
1246 for_each_mem_pfn_range(i, MAX_NUMNODES, &reg_spfn, &reg_epfn, NULL) {
1247 tstart = max_t(u64, spfn, reg_spfn);
1248 tend = min_t(u64, epfn, reg_epfn);
1250 if (tstart < tend) {
1251 fadump_free_reserved_memory(tstart, tend);
1253 if (tend == epfn)
1254 break;
1256 spfn = tend;
1262 * Sort the mem ranges in-place and merge adjacent ranges
1263 * to minimize the memory ranges count.
1265 static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
1267 struct fadump_memory_range *mem_ranges;
1268 struct fadump_memory_range tmp_range;
1269 u64 base, size;
1270 int i, j, idx;
1272 if (!reserved_mrange_info.mem_range_cnt)
1273 return;
1275 /* Sort the memory ranges */
1276 mem_ranges = mrange_info->mem_ranges;
1277 for (i = 0; i < mrange_info->mem_range_cnt; i++) {
1278 idx = i;
1279 for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) {
1280 if (mem_ranges[idx].base > mem_ranges[j].base)
1281 idx = j;
1283 if (idx != i) {
1284 tmp_range = mem_ranges[idx];
1285 mem_ranges[idx] = mem_ranges[i];
1286 mem_ranges[i] = tmp_range;
1290 /* Merge adjacent reserved ranges */
1291 idx = 0;
1292 for (i = 1; i < mrange_info->mem_range_cnt; i++) {
1293 base = mem_ranges[i-1].base;
1294 size = mem_ranges[i-1].size;
1295 if (mem_ranges[i].base == (base + size))
1296 mem_ranges[idx].size += mem_ranges[i].size;
1297 else {
1298 idx++;
1299 if (i == idx)
1300 continue;
1302 mem_ranges[idx] = mem_ranges[i];
1305 mrange_info->mem_range_cnt = idx + 1;
1309 * Scan reserved-ranges to consider them while reserving/releasing
1310 * memory for FADump.
1312 static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
1314 const __be32 *prop;
1315 int len, ret = -1;
1316 unsigned long i;
1318 /* reserved-ranges already scanned */
1319 if (reserved_mrange_info.mem_range_cnt != 0)
1320 return;
1322 prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
1323 if (!prop)
1324 return;
1327 * Each reserved range is an (address,size) pair, 2 cells each,
1328 * totalling 4 cells per range.
1330 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
1331 u64 base, size;
1333 base = of_read_number(prop + (i * 4) + 0, 2);
1334 size = of_read_number(prop + (i * 4) + 2, 2);
1336 if (size) {
1337 ret = fadump_add_mem_range(&reserved_mrange_info,
1338 base, base + size);
1339 if (ret < 0) {
1340 pr_warn("some reserved ranges are ignored!\n");
1341 break;
1346 /* Compact reserved ranges */
1347 sort_and_merge_mem_ranges(&reserved_mrange_info);
1351 * Release the memory that was reserved during early boot to preserve the
1352 * crash'ed kernel's memory contents except reserved dump area (permanent
1353 * reservation) and reserved ranges used by F/W. The released memory will
1354 * be available for general use.
1356 static void fadump_release_memory(u64 begin, u64 end)
1358 u64 ra_start, ra_end, tstart;
1359 int i, ret;
1361 ra_start = fw_dump.reserve_dump_area_start;
1362 ra_end = ra_start + fw_dump.reserve_dump_area_size;
1365 * If reserved ranges array limit is hit, overwrite the last reserved
1366 * memory range with reserved dump area to ensure it is excluded from
1367 * the memory being released (reused for next FADump registration).
1369 if (reserved_mrange_info.mem_range_cnt ==
1370 reserved_mrange_info.max_mem_ranges)
1371 reserved_mrange_info.mem_range_cnt--;
1373 ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
1374 if (ret != 0)
1375 return;
1377 /* Get the reserved ranges list in order first. */
1378 sort_and_merge_mem_ranges(&reserved_mrange_info);
1380 /* Exclude reserved ranges and release remaining memory */
1381 tstart = begin;
1382 for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
1383 ra_start = reserved_mrange_info.mem_ranges[i].base;
1384 ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
1386 if (tstart >= ra_end)
1387 continue;
1389 if (tstart < ra_start)
1390 fadump_release_reserved_area(tstart, ra_start);
1391 tstart = ra_end;
1394 if (tstart < end)
1395 fadump_release_reserved_area(tstart, end);
1398 static void fadump_invalidate_release_mem(void)
1400 mutex_lock(&fadump_mutex);
1401 if (!fw_dump.dump_active) {
1402 mutex_unlock(&fadump_mutex);
1403 return;
1406 fadump_cleanup();
1407 mutex_unlock(&fadump_mutex);
1409 fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
1410 fadump_free_cpu_notes_buf();
1413 * Setup kernel metadata and initialize the kernel dump
1414 * memory structure for FADump re-registration.
1416 if (fw_dump.ops->fadump_setup_metadata &&
1417 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
1418 pr_warn("Failed to setup kernel metadata!\n");
1419 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
1422 static ssize_t release_mem_store(struct kobject *kobj,
1423 struct kobj_attribute *attr,
1424 const char *buf, size_t count)
1426 int input = -1;
1428 if (!fw_dump.dump_active)
1429 return -EPERM;
1431 if (kstrtoint(buf, 0, &input))
1432 return -EINVAL;
1434 if (input == 1) {
1436 * Take away the '/proc/vmcore'. We are releasing the dump
1437 * memory, hence it will not be valid anymore.
1439 #ifdef CONFIG_PROC_VMCORE
1440 vmcore_cleanup();
1441 #endif
1442 fadump_invalidate_release_mem();
1444 } else
1445 return -EINVAL;
1446 return count;
1449 /* Release the reserved memory and disable the FADump */
1450 static void unregister_fadump(void)
1452 fadump_cleanup();
1453 fadump_release_memory(fw_dump.reserve_dump_area_start,
1454 fw_dump.reserve_dump_area_size);
1455 fw_dump.fadump_enabled = 0;
1456 kobject_put(fadump_kobj);
1459 static ssize_t enabled_show(struct kobject *kobj,
1460 struct kobj_attribute *attr,
1461 char *buf)
1463 return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
1466 static ssize_t mem_reserved_show(struct kobject *kobj,
1467 struct kobj_attribute *attr,
1468 char *buf)
1470 return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
1473 static ssize_t registered_show(struct kobject *kobj,
1474 struct kobj_attribute *attr,
1475 char *buf)
1477 return sprintf(buf, "%d\n", fw_dump.dump_registered);
1480 static ssize_t registered_store(struct kobject *kobj,
1481 struct kobj_attribute *attr,
1482 const char *buf, size_t count)
1484 int ret = 0;
1485 int input = -1;
1487 if (!fw_dump.fadump_enabled || fw_dump.dump_active)
1488 return -EPERM;
1490 if (kstrtoint(buf, 0, &input))
1491 return -EINVAL;
1493 mutex_lock(&fadump_mutex);
1495 switch (input) {
1496 case 0:
1497 if (fw_dump.dump_registered == 0) {
1498 goto unlock_out;
1501 /* Un-register Firmware-assisted dump */
1502 pr_debug("Un-register firmware-assisted dump\n");
1503 fw_dump.ops->fadump_unregister(&fw_dump);
1504 break;
1505 case 1:
1506 if (fw_dump.dump_registered == 1) {
1507 /* Un-register Firmware-assisted dump */
1508 fw_dump.ops->fadump_unregister(&fw_dump);
1510 /* Register Firmware-assisted dump */
1511 ret = register_fadump();
1512 break;
1513 default:
1514 ret = -EINVAL;
1515 break;
1518 unlock_out:
1519 mutex_unlock(&fadump_mutex);
1520 return ret < 0 ? ret : count;
1523 static int fadump_region_show(struct seq_file *m, void *private)
1525 if (!fw_dump.fadump_enabled)
1526 return 0;
1528 mutex_lock(&fadump_mutex);
1529 fw_dump.ops->fadump_region_show(&fw_dump, m);
1530 mutex_unlock(&fadump_mutex);
1531 return 0;
1534 static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
1535 static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
1536 static struct kobj_attribute register_attr = __ATTR_RW(registered);
1537 static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
1539 static struct attribute *fadump_attrs[] = {
1540 &enable_attr.attr,
1541 &register_attr.attr,
1542 &mem_reserved_attr.attr,
1543 NULL,
1546 ATTRIBUTE_GROUPS(fadump);
1548 DEFINE_SHOW_ATTRIBUTE(fadump_region);
1550 static void fadump_init_files(void)
1552 int rc = 0;
1554 fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
1555 if (!fadump_kobj) {
1556 pr_err("failed to create fadump kobject\n");
1557 return;
1560 debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL,
1561 &fadump_region_fops);
1563 if (fw_dump.dump_active) {
1564 rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
1565 if (rc)
1566 pr_err("unable to create release_mem sysfs file (%d)\n",
1567 rc);
1570 rc = sysfs_create_groups(fadump_kobj, fadump_groups);
1571 if (rc) {
1572 pr_err("sysfs group creation failed (%d), unregistering FADump",
1573 rc);
1574 unregister_fadump();
1575 return;
1579 * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
1580 * create symlink at old location to maintain backward compatibility.
1582 * - fadump_enabled -> fadump/enabled
1583 * - fadump_registered -> fadump/registered
1584 * - fadump_release_mem -> fadump/release_mem
1586 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
1587 "enabled", "fadump_enabled");
1588 if (rc) {
1589 pr_err("unable to create fadump_enabled symlink (%d)", rc);
1590 return;
1593 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
1594 "registered",
1595 "fadump_registered");
1596 if (rc) {
1597 pr_err("unable to create fadump_registered symlink (%d)", rc);
1598 sysfs_remove_link(kernel_kobj, "fadump_enabled");
1599 return;
1602 if (fw_dump.dump_active) {
1603 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
1604 fadump_kobj,
1605 "release_mem",
1606 "fadump_release_mem");
1607 if (rc)
1608 pr_err("unable to create fadump_release_mem symlink (%d)",
1609 rc);
1611 return;
1615 * Prepare for firmware-assisted dump.
1617 int __init setup_fadump(void)
1619 if (!fw_dump.fadump_supported)
1620 return 0;
1622 fadump_init_files();
1623 fadump_show_config();
1625 if (!fw_dump.fadump_enabled)
1626 return 1;
1629 * If dump data is available then see if it is valid and prepare for
1630 * saving it to the disk.
1632 if (fw_dump.dump_active) {
1634 * if dump process fails then invalidate the registration
1635 * and release memory before proceeding for re-registration.
1637 if (fw_dump.ops->fadump_process(&fw_dump) < 0)
1638 fadump_invalidate_release_mem();
1640 /* Initialize the kernel dump memory structure for FAD registration. */
1641 else if (fw_dump.reserve_dump_area_size)
1642 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
1644 return 1;
1646 subsys_initcall(setup_fadump);
1647 #else /* !CONFIG_PRESERVE_FA_DUMP */
1649 /* Scan the Firmware Assisted dump configuration details. */
1650 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
1651 int depth, void *data)
1653 if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0))
1654 return 0;
1656 opal_fadump_dt_scan(&fw_dump, node);
1657 return 1;
1661 * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
1662 * preserve crash data. The subsequent memory preserving kernel boot
1663 * is likely to process this crash data.
1665 int __init fadump_reserve_mem(void)
1667 if (fw_dump.dump_active) {
1669 * If last boot has crashed then reserve all the memory
1670 * above boot memory to preserve crash data.
1672 pr_info("Preserving crash data for processing in next boot.\n");
1673 fadump_reserve_crash_area(fw_dump.boot_mem_top);
1674 } else
1675 pr_debug("FADump-aware kernel..\n");
1677 return 1;
1679 #endif /* CONFIG_PRESERVE_FA_DUMP */
1681 /* Preserve everything above the base address */
1682 static void __init fadump_reserve_crash_area(u64 base)
1684 u64 i, mstart, mend, msize;
1686 for_each_mem_range(i, &mstart, &mend) {
1687 msize = mend - mstart;
1689 if ((mstart + msize) < base)
1690 continue;
1692 if (mstart < base) {
1693 msize -= (base - mstart);
1694 mstart = base;
1697 pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data",
1698 (msize >> 20), mstart);
1699 memblock_reserve(mstart, msize);
1703 unsigned long __init arch_reserved_kernel_pages(void)
1705 return memblock_reserved_size() / PAGE_SIZE;