1 // SPDX-License-Identifier: GPL-2.0-only
3 * ppc64 code to implement the kexec_file_load syscall
5 * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6 * Copyright (C) 2004 IBM Corp.
7 * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8 * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9 * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10 * Copyright (C) 2020 IBM Corporation
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13 * Heavily modified for the kernel by
14 * Hari Bathini, IBM Corporation.
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
21 #include <linux/of_address.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <asm/setup.h>
26 #include <asm/drmem.h>
27 #include <asm/firmware.h>
28 #include <asm/kexec_ranges.h>
29 #include <asm/crashdump-ppc64.h>
30 #include <asm/mmzone.h>
31 #include <asm/iommu.h>
33 #include <asm/plpks.h>
34 #include <asm/cputhreads.h>
37 __be64
*buf
; /* data buffer for usable-memory property */
38 u32 size
; /* size allocated for the data buffer */
39 u32 max_entries
; /* maximum no. of entries */
40 u32 idx
; /* index of current entry */
42 /* usable memory ranges to look up */
43 unsigned int nr_ranges
;
44 const struct range
*ranges
;
47 const struct kexec_file_ops
* const kexec_file_loaders
[] = {
53 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
54 * in the memory regions between buf_min & buf_max
55 * for the buffer. If found, sets kbuf->mem.
56 * @kbuf: Buffer contents and memory parameters.
57 * @buf_min: Minimum address for the buffer.
58 * @buf_max: Maximum address for the buffer.
60 * Returns 0 on success, negative errno on error.
62 static int __locate_mem_hole_top_down(struct kexec_buf
*kbuf
,
63 u64 buf_min
, u64 buf_max
)
65 int ret
= -EADDRNOTAVAIL
;
66 phys_addr_t start
, end
;
69 for_each_mem_range_rev(i
, &start
, &end
) {
71 * memblock uses [start, end) convention while it is
72 * [start, end] here. Fix the off-by-one to have the
80 /* Memory hole not found */
84 /* Adjust memory region based on the given range */
90 start
= ALIGN(start
, kbuf
->buf_align
);
91 if (start
< end
&& (end
- start
+ 1) >= kbuf
->memsz
) {
92 /* Suitable memory range found. Set kbuf->mem */
93 kbuf
->mem
= ALIGN_DOWN(end
- kbuf
->memsz
+ 1,
104 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
105 * suitable buffer with top down approach.
106 * @kbuf: Buffer contents and memory parameters.
107 * @buf_min: Minimum address for the buffer.
108 * @buf_max: Maximum address for the buffer.
109 * @emem: Exclude memory ranges.
111 * Returns 0 on success, negative errno on error.
113 static int locate_mem_hole_top_down_ppc64(struct kexec_buf
*kbuf
,
114 u64 buf_min
, u64 buf_max
,
115 const struct crash_mem
*emem
)
117 int i
, ret
= 0, err
= -EADDRNOTAVAIL
;
118 u64 start
, end
, tmin
, tmax
;
121 for (i
= (emem
->nr_ranges
- 1); i
>= 0; i
--) {
122 start
= emem
->ranges
[i
].start
;
123 end
= emem
->ranges
[i
].end
;
129 tmin
= (end
< buf_min
? buf_min
: end
+ 1);
130 ret
= __locate_mem_hole_top_down(kbuf
, tmin
, tmax
);
137 if (tmax
< buf_min
) {
146 ret
= __locate_mem_hole_top_down(kbuf
, tmin
, tmax
);
152 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
153 * in the memory regions between buf_min & buf_max
154 * for the buffer. If found, sets kbuf->mem.
155 * @kbuf: Buffer contents and memory parameters.
156 * @buf_min: Minimum address for the buffer.
157 * @buf_max: Maximum address for the buffer.
159 * Returns 0 on success, negative errno on error.
161 static int __locate_mem_hole_bottom_up(struct kexec_buf
*kbuf
,
162 u64 buf_min
, u64 buf_max
)
164 int ret
= -EADDRNOTAVAIL
;
165 phys_addr_t start
, end
;
168 for_each_mem_range(i
, &start
, &end
) {
170 * memblock uses [start, end) convention while it is
171 * [start, end] here. Fix the off-by-one to have the
179 /* Memory hole not found */
183 /* Adjust memory region based on the given range */
189 start
= ALIGN(start
, kbuf
->buf_align
);
190 if (start
< end
&& (end
- start
+ 1) >= kbuf
->memsz
) {
191 /* Suitable memory range found. Set kbuf->mem */
202 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
203 * suitable buffer with bottom up approach.
204 * @kbuf: Buffer contents and memory parameters.
205 * @buf_min: Minimum address for the buffer.
206 * @buf_max: Maximum address for the buffer.
207 * @emem: Exclude memory ranges.
209 * Returns 0 on success, negative errno on error.
211 static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf
*kbuf
,
212 u64 buf_min
, u64 buf_max
,
213 const struct crash_mem
*emem
)
215 int i
, ret
= 0, err
= -EADDRNOTAVAIL
;
216 u64 start
, end
, tmin
, tmax
;
219 for (i
= 0; i
< emem
->nr_ranges
; i
++) {
220 start
= emem
->ranges
[i
].start
;
221 end
= emem
->ranges
[i
].end
;
227 tmax
= (start
> buf_max
? buf_max
: start
- 1);
228 ret
= __locate_mem_hole_bottom_up(kbuf
, tmin
, tmax
);
235 if (tmin
> buf_max
) {
244 ret
= __locate_mem_hole_bottom_up(kbuf
, tmin
, tmax
);
249 #ifdef CONFIG_CRASH_DUMP
251 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
252 * @um_info: Usable memory buffer and ranges info.
253 * @cnt: No. of entries to accommodate.
255 * Frees up the old buffer if memory reallocation fails.
257 * Returns buffer on success, NULL on error.
259 static __be64
*check_realloc_usable_mem(struct umem_info
*um_info
, int cnt
)
264 if ((um_info
->idx
+ cnt
) <= um_info
->max_entries
)
267 new_size
= um_info
->size
+ MEM_RANGE_CHUNK_SZ
;
268 tbuf
= krealloc(um_info
->buf
, new_size
, GFP_KERNEL
);
271 um_info
->size
= new_size
;
272 um_info
->max_entries
= (um_info
->size
/ sizeof(u64
));
279 * add_usable_mem - Add the usable memory ranges within the given memory range
281 * @um_info: Usable memory buffer and ranges info.
282 * @base: Base address of memory range to look for.
283 * @end: End address of memory range to look for.
285 * Returns 0 on success, negative errno on error.
287 static int add_usable_mem(struct umem_info
*um_info
, u64 base
, u64 end
)
289 u64 loc_base
, loc_end
;
293 for (i
= 0; i
< um_info
->nr_ranges
; i
++) {
295 loc_base
= um_info
->ranges
[i
].start
;
296 loc_end
= um_info
->ranges
[i
].end
;
297 if (loc_base
>= base
&& loc_end
<= end
)
299 else if (base
< loc_end
&& end
> loc_base
) {
308 if (!check_realloc_usable_mem(um_info
, 2))
311 um_info
->buf
[um_info
->idx
++] = cpu_to_be64(loc_base
);
312 um_info
->buf
[um_info
->idx
++] =
313 cpu_to_be64(loc_end
- loc_base
+ 1);
321 * kdump_setup_usable_lmb - This is a callback function that gets called by
322 * walk_drmem_lmbs for every LMB to set its
323 * usable memory ranges.
325 * @usm: linux,drconf-usable-memory property value.
326 * @data: Pointer to usable memory buffer and ranges info.
328 * Returns 0 on success, negative errno on error.
330 static int kdump_setup_usable_lmb(struct drmem_lmb
*lmb
, const __be32
**usm
,
333 struct umem_info
*um_info
;
338 * kdump load isn't supported on kernels already booted with
339 * linux,drconf-usable-memory property.
342 pr_err("linux,drconf-usable-memory property already exists!");
347 tmp_idx
= um_info
->idx
;
348 if (!check_realloc_usable_mem(um_info
, 1))
352 base
= lmb
->base_addr
;
353 end
= base
+ drmem_lmb_size() - 1;
354 ret
= add_usable_mem(um_info
, base
, end
);
357 * Update the no. of ranges added. Two entries (base & size)
358 * for every range added.
360 um_info
->buf
[tmp_idx
] =
361 cpu_to_be64((um_info
->idx
- tmp_idx
- 1) / 2);
367 #define NODE_PATH_LEN 256
369 * add_usable_mem_property - Add usable memory property for the given
371 * @fdt: Flattened device tree for the kdump kernel.
373 * @um_info: Usable memory buffer and ranges info.
375 * Returns 0 on success, negative errno on error.
377 static int add_usable_mem_property(void *fdt
, struct device_node
*dn
,
378 struct umem_info
*um_info
)
381 char path
[NODE_PATH_LEN
];
387 if (snprintf(path
, NODE_PATH_LEN
, "%pOF", dn
) > (NODE_PATH_LEN
- 1)) {
388 pr_err("Buffer (%d) too small for memory node: %pOF\n",
392 kexec_dprintk("Memory node path: %s\n", path
);
394 /* Now that we know the path, find its offset in kdump kernel's fdt */
395 node
= fdt_path_offset(fdt
, path
);
397 pr_err("Malformed device tree: error reading %s\n", path
);
403 if (!check_realloc_usable_mem(um_info
, 2)) {
409 * "reg" property represents sequence of (addr,size) tuples
410 * each representing a memory range.
413 ret
= of_property_read_reg(dn
, i
, &base
, &size
);
417 ret
= add_usable_mem(um_info
, base
, base
+ size
- 1);
422 // No reg or empty reg? Skip this node.
427 * No kdump kernel usable memory found in this memory node.
428 * Write (0,0) tuple in linux,usable-memory property for
429 * this region to be ignored.
431 if (um_info
->idx
== 0) {
437 ret
= fdt_setprop(fdt
, node
, "linux,usable-memory", um_info
->buf
,
438 (um_info
->idx
* sizeof(u64
)));
447 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
448 * and linux,drconf-usable-memory DT properties as
449 * appropriate to restrict its memory usage.
450 * @fdt: Flattened device tree for the kdump kernel.
451 * @usable_mem: Usable memory ranges for kdump kernel.
453 * Returns 0 on success, negative errno on error.
455 static int update_usable_mem_fdt(void *fdt
, struct crash_mem
*usable_mem
)
457 struct umem_info um_info
;
458 struct device_node
*dn
;
462 pr_err("Usable memory ranges for kdump kernel not found\n");
466 node
= fdt_path_offset(fdt
, "/ibm,dynamic-reconfiguration-memory");
467 if (node
== -FDT_ERR_NOTFOUND
)
468 kexec_dprintk("No dynamic reconfiguration memory found\n");
470 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
476 um_info
.max_entries
= 0;
478 /* Memory ranges to look up */
479 um_info
.ranges
= &(usable_mem
->ranges
[0]);
480 um_info
.nr_ranges
= usable_mem
->nr_ranges
;
482 dn
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
484 ret
= walk_drmem_lmbs(dn
, &um_info
, kdump_setup_usable_lmb
);
488 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
492 ret
= fdt_setprop(fdt
, node
, "linux,drconf-usable-memory",
493 um_info
.buf
, (um_info
.idx
* sizeof(u64
)));
495 pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
502 * Walk through each memory node and set linux,usable-memory property
503 * for the corresponding node in kdump kernel's fdt.
505 for_each_node_by_type(dn
, "memory") {
506 ret
= add_usable_mem_property(fdt
, dn
, &um_info
);
508 pr_err("Failed to set linux,usable-memory property for %s node",
521 * load_backup_segment - Locate a memory hole to place the backup region.
522 * @image: Kexec image.
523 * @kbuf: Buffer contents and memory parameters.
525 * Returns 0 on success, negative errno on error.
527 static int load_backup_segment(struct kimage
*image
, struct kexec_buf
*kbuf
)
533 * Setup a source buffer for backup segment.
535 * A source buffer has no meaning for backup region as data will
536 * be copied from backup source, after crash, in the purgatory.
537 * But as load segment code doesn't recognize such segments,
538 * setup a dummy source buffer to keep it happy for now.
540 buf
= vzalloc(BACKUP_SRC_SIZE
);
545 kbuf
->mem
= KEXEC_BUF_MEM_UNKNOWN
;
546 kbuf
->bufsz
= kbuf
->memsz
= BACKUP_SRC_SIZE
;
547 kbuf
->top_down
= false;
549 ret
= kexec_add_buffer(kbuf
);
555 image
->arch
.backup_buf
= buf
;
556 image
->arch
.backup_start
= kbuf
->mem
;
561 * update_backup_region_phdr - Update backup region's offset for the core to
562 * export the region appropriately.
563 * @image: Kexec image.
564 * @ehdr: ELF core header.
566 * Assumes an exclusive program header is setup for the backup region
571 static void update_backup_region_phdr(struct kimage
*image
, Elf64_Ehdr
*ehdr
)
576 phdr
= (Elf64_Phdr
*)(ehdr
+ 1);
577 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
578 if (phdr
->p_paddr
== BACKUP_SRC_START
) {
579 phdr
->p_offset
= image
->arch
.backup_start
;
580 kexec_dprintk("Backup region offset updated to 0x%lx\n",
581 image
->arch
.backup_start
);
587 static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem
*cmem
)
589 #if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
590 unsigned int extra_sz
= 0;
592 if (CONFIG_CRASH_MAX_MEMORY_RANGES
> (unsigned int)PN_XNUM
)
593 pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES
);
594 else if (cmem
->nr_ranges
>= CONFIG_CRASH_MAX_MEMORY_RANGES
)
595 pr_warn("Configured crash mem ranges may not be enough\n");
597 extra_sz
= (CONFIG_CRASH_MAX_MEMORY_RANGES
- cmem
->nr_ranges
) * sizeof(Elf64_Phdr
);
605 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
606 * segment needed to load kdump kernel.
607 * @image: Kexec image.
608 * @kbuf: Buffer contents and memory parameters.
610 * Returns 0 on success, negative errno on error.
612 static int load_elfcorehdr_segment(struct kimage
*image
, struct kexec_buf
*kbuf
)
614 struct crash_mem
*cmem
= NULL
;
615 unsigned long headers_sz
;
616 void *headers
= NULL
;
619 ret
= get_crash_memory_ranges(&cmem
);
623 /* Setup elfcorehdr segment */
624 ret
= crash_prepare_elf64_headers(cmem
, false, &headers
, &headers_sz
);
626 pr_err("Failed to prepare elf headers for the core\n");
630 /* Fix the offset for backup region in the ELF header */
631 update_backup_region_phdr(image
, headers
);
633 kbuf
->buffer
= headers
;
634 kbuf
->mem
= KEXEC_BUF_MEM_UNKNOWN
;
635 kbuf
->bufsz
= headers_sz
;
636 kbuf
->memsz
= headers_sz
+ kdump_extra_elfcorehdr_size(cmem
);
637 kbuf
->top_down
= false;
639 ret
= kexec_add_buffer(kbuf
);
645 image
->elf_load_addr
= kbuf
->mem
;
646 image
->elf_headers_sz
= headers_sz
;
647 image
->elf_headers
= headers
;
654 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
655 * to load kdump kernel.
656 * @image: Kexec image.
657 * @kbuf: Buffer contents and memory parameters.
659 * Returns 0 on success, negative errno on error.
661 int load_crashdump_segments_ppc64(struct kimage
*image
,
662 struct kexec_buf
*kbuf
)
666 /* Load backup segment - first 64K bytes of the crashing kernel */
667 ret
= load_backup_segment(image
, kbuf
);
669 pr_err("Failed to load backup segment\n");
672 kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf
->mem
);
674 /* Load elfcorehdr segment - to export crashing kernel's vmcore */
675 ret
= load_elfcorehdr_segment(image
, kbuf
);
677 pr_err("Failed to load elfcorehdr segment\n");
680 kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
681 image
->elf_load_addr
, kbuf
->bufsz
, kbuf
->memsz
);
688 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
689 * variables and call setup_purgatory() to initialize
690 * common global variable.
691 * @image: kexec image.
692 * @slave_code: Slave code for the purgatory.
693 * @fdt: Flattened device tree for the next kernel.
694 * @kernel_load_addr: Address where the kernel is loaded.
695 * @fdt_load_addr: Address where the flattened device tree is loaded.
697 * Returns 0 on success, negative errno on error.
699 int setup_purgatory_ppc64(struct kimage
*image
, const void *slave_code
,
700 const void *fdt
, unsigned long kernel_load_addr
,
701 unsigned long fdt_load_addr
)
703 struct device_node
*dn
= NULL
;
706 ret
= setup_purgatory(image
, slave_code
, fdt
, kernel_load_addr
,
711 if (image
->type
== KEXEC_TYPE_CRASH
) {
712 u32 my_run_at_load
= 1;
715 * Tell relocatable kernel to run at load address
716 * via the word meant for that at 0x5c.
718 ret
= kexec_purgatory_get_set_symbol(image
, "run_at_load",
720 sizeof(my_run_at_load
),
726 /* Tell purgatory where to look for backup region */
727 ret
= kexec_purgatory_get_set_symbol(image
, "backup_start",
728 &image
->arch
.backup_start
,
729 sizeof(image
->arch
.backup_start
),
734 /* Setup OPAL base & entry values */
735 dn
= of_find_node_by_path("/ibm,opal");
739 ret
= of_property_read_u64(dn
, "opal-base-address", &val
);
743 ret
= kexec_purgatory_get_set_symbol(image
, "opal_base", &val
,
748 ret
= of_property_read_u64(dn
, "opal-entry-address", &val
);
751 ret
= kexec_purgatory_get_set_symbol(image
, "opal_entry", &val
,
756 pr_err("Failed to setup purgatory symbols");
762 * cpu_node_size - Compute the size of a CPU node in the FDT.
763 * This should be done only once and the value is stored in
765 * Returns the max size of a CPU node in the FDT.
767 static unsigned int cpu_node_size(void)
769 static unsigned int size
;
770 struct device_node
*dn
;
774 * Don't compute it twice, we are assuming that the per CPU node size
775 * doesn't change during the system's life.
780 dn
= of_find_node_by_type(NULL
, "cpu");
781 if (WARN_ON_ONCE(!dn
)) {
782 // Unlikely to happen
787 * We compute the sub node size for a CPU node, assuming it
788 * will be the same for all.
790 size
+= strlen(dn
->name
) + 5;
791 for_each_property_of_node(dn
, pp
) {
792 size
+= strlen(pp
->name
);
800 static unsigned int kdump_extra_fdt_size_ppc64(struct kimage
*image
, unsigned int cpu_nodes
)
802 unsigned int extra_size
= 0;
804 #ifdef CONFIG_CRASH_HOTPLUG
805 unsigned int possible_cpu_nodes
;
808 if (!IS_ENABLED(CONFIG_CRASH_DUMP
) || image
->type
!= KEXEC_TYPE_CRASH
)
812 * For kdump kernel, account for linux,usable-memory and
813 * linux,drconf-usable-memory properties. Get an approximate on the
814 * number of usable memory entries and use for FDT size estimation.
816 if (drmem_lmb_size()) {
817 usm_entries
= ((memory_hotplug_max() / drmem_lmb_size()) +
818 (2 * (resource_size(&crashk_res
) / drmem_lmb_size())));
819 extra_size
+= (unsigned int)(usm_entries
* sizeof(u64
));
822 #ifdef CONFIG_CRASH_HOTPLUG
824 * Make sure enough space is reserved to accommodate possible CPU nodes
825 * in the crash FDT. This allows packing possible CPU nodes which are
826 * not yet present in the system without regenerating the entire FDT.
828 if (image
->type
== KEXEC_TYPE_CRASH
) {
829 possible_cpu_nodes
= num_possible_cpus() / threads_per_core
;
830 if (possible_cpu_nodes
> cpu_nodes
)
831 extra_size
+= (possible_cpu_nodes
- cpu_nodes
) * cpu_node_size();
839 * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
840 * setup FDT for kexec/kdump kernel.
841 * @image: kexec image being loaded.
843 * Returns the estimated extra size needed for kexec/kdump kernel FDT.
845 unsigned int kexec_extra_fdt_size_ppc64(struct kimage
*image
, struct crash_mem
*rmem
)
847 struct device_node
*dn
;
848 unsigned int cpu_nodes
= 0, extra_size
= 0;
850 // Budget some space for the password blob. There's already extra space
852 if (plpks_is_available())
853 extra_size
+= (unsigned int)plpks_get_passwordlen();
855 /* Get the number of CPU nodes in the current device tree */
856 for_each_node_by_type(dn
, "cpu") {
860 /* Consider extra space for CPU nodes added since the boot time */
861 if (cpu_nodes
> boot_cpu_node_count
)
862 extra_size
+= (cpu_nodes
- boot_cpu_node_count
) * cpu_node_size();
864 /* Consider extra space for reserved memory ranges if any */
865 if (rmem
->nr_ranges
> 0)
866 extra_size
+= sizeof(struct fdt_reserve_entry
) * rmem
->nr_ranges
;
868 return extra_size
+ kdump_extra_fdt_size_ppc64(image
, cpu_nodes
);
871 static int copy_property(void *fdt
, int node_offset
, const struct device_node
*dn
,
872 const char *propname
)
874 const void *prop
, *fdtprop
;
875 int len
= 0, fdtlen
= 0;
877 prop
= of_get_property(dn
, propname
, &len
);
878 fdtprop
= fdt_getprop(fdt
, node_offset
, propname
, &fdtlen
);
880 if (fdtprop
&& !prop
)
881 return fdt_delprop(fdt
, node_offset
, propname
);
883 return fdt_setprop(fdt
, node_offset
, propname
, prop
, len
);
885 return -FDT_ERR_NOTFOUND
;
888 static int update_pci_dma_nodes(void *fdt
, const char *dmapropname
)
890 struct device_node
*dn
;
891 int pci_offset
, root_offset
, ret
= 0;
893 if (!firmware_has_feature(FW_FEATURE_LPAR
))
896 root_offset
= fdt_path_offset(fdt
, "/");
897 for_each_node_with_property(dn
, dmapropname
) {
898 pci_offset
= fdt_subnode_offset(fdt
, root_offset
, of_node_full_name(dn
));
902 ret
= copy_property(fdt
, pci_offset
, dn
, "ibm,dma-window");
907 ret
= copy_property(fdt
, pci_offset
, dn
, dmapropname
);
918 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
920 * @image: kexec image being loaded.
921 * @fdt: Flattened device tree for the next kernel.
922 * @rmem: Reserved memory ranges.
924 * Returns 0 on success, negative errno on error.
926 int setup_new_fdt_ppc64(const struct kimage
*image
, void *fdt
, struct crash_mem
*rmem
)
928 struct crash_mem
*umem
= NULL
;
929 int i
, nr_ranges
, ret
;
931 #ifdef CONFIG_CRASH_DUMP
933 * Restrict memory usage for kdump kernel by setting up
934 * usable memory ranges and memory reserve map.
936 if (image
->type
== KEXEC_TYPE_CRASH
) {
937 ret
= get_usable_memory_ranges(&umem
);
941 ret
= update_usable_mem_fdt(fdt
, umem
);
943 pr_err("Error setting up usable-memory property for kdump kernel\n");
948 * Ensure we don't touch crashed kernel's memory except the
949 * first 64K of RAM, which will be backed up.
951 ret
= fdt_add_mem_rsv(fdt
, BACKUP_SRC_END
+ 1,
952 crashk_res
.start
- BACKUP_SRC_SIZE
);
954 pr_err("Error reserving crash memory: %s\n",
959 /* Ensure backup region is not used by kdump/capture kernel */
960 ret
= fdt_add_mem_rsv(fdt
, image
->arch
.backup_start
,
963 pr_err("Error reserving memory for backup: %s\n",
970 /* Update cpus nodes information to account hotplug CPUs. */
971 ret
= update_cpus_node(fdt
);
975 ret
= update_pci_dma_nodes(fdt
, DIRECT64_PROPNAME
);
979 ret
= update_pci_dma_nodes(fdt
, DMA64_PROPNAME
);
983 /* Update memory reserve map */
984 nr_ranges
= rmem
? rmem
->nr_ranges
: 0;
985 for (i
= 0; i
< nr_ranges
; i
++) {
988 base
= rmem
->ranges
[i
].start
;
989 size
= rmem
->ranges
[i
].end
- base
+ 1;
990 ret
= fdt_add_mem_rsv(fdt
, base
, size
);
992 pr_err("Error updating memory reserve map: %s\n",
998 // If we have PLPKS active, we need to provide the password to the new kernel
999 if (plpks_is_available())
1000 ret
= plpks_populate_fdt(fdt
);
1008 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1009 * tce-table, reserved-ranges & such (exclude
1010 * memory ranges) as they can't be used for kexec
1011 * segment buffer. Sets kbuf->mem when a suitable
1012 * memory hole is found.
1013 * @kbuf: Buffer contents and memory parameters.
1015 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1017 * Returns 0 on success, negative errno on error.
1019 int arch_kexec_locate_mem_hole(struct kexec_buf
*kbuf
)
1021 struct crash_mem
**emem
;
1022 u64 buf_min
, buf_max
;
1025 /* Look up the exclude ranges list while locating the memory hole */
1026 emem
= &(kbuf
->image
->arch
.exclude_ranges
);
1027 if (!(*emem
) || ((*emem
)->nr_ranges
== 0)) {
1028 pr_warn("No exclude range list. Using the default locate mem hole method\n");
1029 return kexec_locate_mem_hole(kbuf
);
1032 buf_min
= kbuf
->buf_min
;
1033 buf_max
= kbuf
->buf_max
;
1034 /* Segments for kdump kernel should be within crashkernel region */
1035 if (IS_ENABLED(CONFIG_CRASH_DUMP
) && kbuf
->image
->type
== KEXEC_TYPE_CRASH
) {
1036 buf_min
= (buf_min
< crashk_res
.start
?
1037 crashk_res
.start
: buf_min
);
1038 buf_max
= (buf_max
> crashk_res
.end
?
1039 crashk_res
.end
: buf_max
);
1042 if (buf_min
> buf_max
) {
1043 pr_err("Invalid buffer min and/or max values\n");
1048 ret
= locate_mem_hole_top_down_ppc64(kbuf
, buf_min
, buf_max
,
1051 ret
= locate_mem_hole_bottom_up_ppc64(kbuf
, buf_min
, buf_max
,
1054 /* Add the buffer allocated to the exclude list for the next lookup */
1056 add_mem_range(emem
, kbuf
->mem
, kbuf
->memsz
);
1057 sort_memory_ranges(*emem
, true);
1059 pr_err("Failed to locate memory buffer of size %lu\n",
1066 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1068 * @image: kexec image being loaded.
1069 * @buf: Buffer pointing to elf data.
1070 * @buf_len: Length of the buffer.
1072 * Returns 0 on success, negative errno on error.
1074 int arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
1075 unsigned long buf_len
)
1079 /* Get exclude memory ranges needed for setting up kexec segments */
1080 ret
= get_exclude_memory_ranges(&(image
->arch
.exclude_ranges
));
1082 pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1086 return kexec_image_probe_default(image
, buf
, buf_len
);
1090 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1091 * while loading the image.
1092 * @image: kexec image being loaded.
1094 * Returns 0 on success, negative errno on error.
1096 int arch_kimage_file_post_load_cleanup(struct kimage
*image
)
1098 kfree(image
->arch
.exclude_ranges
);
1099 image
->arch
.exclude_ranges
= NULL
;
1101 vfree(image
->arch
.backup_buf
);
1102 image
->arch
.backup_buf
= NULL
;
1104 vfree(image
->elf_headers
);
1105 image
->elf_headers
= NULL
;
1106 image
->elf_headers_sz
= 0;
1108 kvfree(image
->arch
.fdt
);
1109 image
->arch
.fdt
= NULL
;
1111 return kexec_image_post_load_cleanup_default(image
);