1 // SPDX-License-Identifier: GPL-2.0-only
3 * ppc64 code to implement the kexec_file_load syscall
5 * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6 * Copyright (C) 2004 IBM Corp.
7 * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8 * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9 * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10 * Copyright (C) 2020 IBM Corporation
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13 * Heavily modified for the kernel by
14 * Hari Bathini, IBM Corporation.
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
20 #include <linux/of_device.h>
21 #include <linux/memblock.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <asm/drmem.h>
25 #include <asm/kexec_ranges.h>
26 #include <asm/crashdump-ppc64.h>
29 u64
*buf
; /* data buffer for usable-memory property */
30 u32 size
; /* size allocated for the data buffer */
31 u32 max_entries
; /* maximum no. of entries */
32 u32 idx
; /* index of current entry */
34 /* usable memory ranges to look up */
35 unsigned int nr_ranges
;
36 const struct crash_mem_range
*ranges
;
39 const struct kexec_file_ops
* const kexec_file_loaders
[] = {
45 * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
46 * regions like opal/rtas, tce-table, initrd,
47 * kernel, htab which should be avoided while
48 * setting up kexec load segments.
49 * @mem_ranges: Range list to add the memory ranges to.
51 * Returns 0 on success, negative errno on error.
53 static int get_exclude_memory_ranges(struct crash_mem
**mem_ranges
)
57 ret
= add_tce_mem_ranges(mem_ranges
);
61 ret
= add_initrd_mem_range(mem_ranges
);
65 ret
= add_htab_mem_range(mem_ranges
);
69 ret
= add_kernel_mem_range(mem_ranges
);
73 ret
= add_rtas_mem_range(mem_ranges
);
77 ret
= add_opal_mem_range(mem_ranges
);
81 ret
= add_reserved_mem_ranges(mem_ranges
);
85 /* exclude memory ranges should be sorted for easy lookup */
86 sort_memory_ranges(*mem_ranges
, true);
89 pr_err("Failed to setup exclude memory ranges\n");
94 * get_usable_memory_ranges - Get usable memory ranges. This list includes
95 * regions like crashkernel, opal/rtas & tce-table,
96 * that kdump kernel could use.
97 * @mem_ranges: Range list to add the memory ranges to.
99 * Returns 0 on success, negative errno on error.
101 static int get_usable_memory_ranges(struct crash_mem
**mem_ranges
)
106 * Early boot failure observed on guests when low memory (first memory
107 * block?) is not added to usable memory. So, add [0, crashk_res.end]
108 * instead of [crashk_res.start, crashk_res.end] to workaround it.
109 * Also, crashed kernel's memory must be added to reserve map to
110 * avoid kdump kernel from using it.
112 ret
= add_mem_range(mem_ranges
, 0, crashk_res
.end
+ 1);
116 ret
= add_rtas_mem_range(mem_ranges
);
120 ret
= add_opal_mem_range(mem_ranges
);
124 ret
= add_tce_mem_ranges(mem_ranges
);
127 pr_err("Failed to setup usable memory ranges\n");
132 * get_crash_memory_ranges - Get crash memory ranges. This list includes
133 * first/crashing kernel's memory regions that
134 * would be exported via an elfcore.
135 * @mem_ranges: Range list to add the memory ranges to.
137 * Returns 0 on success, negative errno on error.
139 static int get_crash_memory_ranges(struct crash_mem
**mem_ranges
)
141 phys_addr_t base
, end
;
142 struct crash_mem
*tmem
;
146 for_each_mem_range(i
, &base
, &end
) {
147 u64 size
= end
- base
;
149 /* Skip backup memory region, which needs a separate entry */
150 if (base
== BACKUP_SRC_START
) {
151 if (size
> BACKUP_SRC_SIZE
) {
152 base
= BACKUP_SRC_END
+ 1;
153 size
-= BACKUP_SRC_SIZE
;
158 ret
= add_mem_range(mem_ranges
, base
, size
);
162 /* Try merging adjacent ranges before reallocation attempt */
163 if ((*mem_ranges
)->nr_ranges
== (*mem_ranges
)->max_nr_ranges
)
164 sort_memory_ranges(*mem_ranges
, true);
167 /* Reallocate memory ranges if there is no space to split ranges */
169 if (tmem
&& (tmem
->nr_ranges
== tmem
->max_nr_ranges
)) {
170 tmem
= realloc_mem_ranges(mem_ranges
);
175 /* Exclude crashkernel region */
176 ret
= crash_exclude_mem_range(tmem
, crashk_res
.start
, crashk_res
.end
);
181 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
182 * regions are exported to save their context at the time of
183 * crash, they should actually be backed up just like the
184 * first 64K bytes of memory.
186 ret
= add_rtas_mem_range(mem_ranges
);
190 ret
= add_opal_mem_range(mem_ranges
);
194 /* create a separate program header for the backup region */
195 ret
= add_mem_range(mem_ranges
, BACKUP_SRC_START
, BACKUP_SRC_SIZE
);
199 sort_memory_ranges(*mem_ranges
, false);
202 pr_err("Failed to setup crash memory ranges\n");
207 * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
208 * memory regions that should be added to the
209 * memory reserve map to ensure the region is
210 * protected from any mischief.
211 * @mem_ranges: Range list to add the memory ranges to.
213 * Returns 0 on success, negative errno on error.
215 static int get_reserved_memory_ranges(struct crash_mem
**mem_ranges
)
219 ret
= add_rtas_mem_range(mem_ranges
);
223 ret
= add_tce_mem_ranges(mem_ranges
);
227 ret
= add_reserved_mem_ranges(mem_ranges
);
230 pr_err("Failed to setup reserved memory ranges\n");
235 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
236 * in the memory regions between buf_min & buf_max
237 * for the buffer. If found, sets kbuf->mem.
238 * @kbuf: Buffer contents and memory parameters.
239 * @buf_min: Minimum address for the buffer.
240 * @buf_max: Maximum address for the buffer.
242 * Returns 0 on success, negative errno on error.
244 static int __locate_mem_hole_top_down(struct kexec_buf
*kbuf
,
245 u64 buf_min
, u64 buf_max
)
247 int ret
= -EADDRNOTAVAIL
;
248 phys_addr_t start
, end
;
251 for_each_mem_range_rev(i
, &start
, &end
) {
253 * memblock uses [start, end) convention while it is
254 * [start, end] here. Fix the off-by-one to have the
262 /* Memory hole not found */
266 /* Adjust memory region based on the given range */
272 start
= ALIGN(start
, kbuf
->buf_align
);
273 if (start
< end
&& (end
- start
+ 1) >= kbuf
->memsz
) {
274 /* Suitable memory range found. Set kbuf->mem */
275 kbuf
->mem
= ALIGN_DOWN(end
- kbuf
->memsz
+ 1,
286 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
287 * suitable buffer with top down approach.
288 * @kbuf: Buffer contents and memory parameters.
289 * @buf_min: Minimum address for the buffer.
290 * @buf_max: Maximum address for the buffer.
291 * @emem: Exclude memory ranges.
293 * Returns 0 on success, negative errno on error.
295 static int locate_mem_hole_top_down_ppc64(struct kexec_buf
*kbuf
,
296 u64 buf_min
, u64 buf_max
,
297 const struct crash_mem
*emem
)
299 int i
, ret
= 0, err
= -EADDRNOTAVAIL
;
300 u64 start
, end
, tmin
, tmax
;
303 for (i
= (emem
->nr_ranges
- 1); i
>= 0; i
--) {
304 start
= emem
->ranges
[i
].start
;
305 end
= emem
->ranges
[i
].end
;
311 tmin
= (end
< buf_min
? buf_min
: end
+ 1);
312 ret
= __locate_mem_hole_top_down(kbuf
, tmin
, tmax
);
319 if (tmax
< buf_min
) {
328 ret
= __locate_mem_hole_top_down(kbuf
, tmin
, tmax
);
334 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
335 * in the memory regions between buf_min & buf_max
336 * for the buffer. If found, sets kbuf->mem.
337 * @kbuf: Buffer contents and memory parameters.
338 * @buf_min: Minimum address for the buffer.
339 * @buf_max: Maximum address for the buffer.
341 * Returns 0 on success, negative errno on error.
343 static int __locate_mem_hole_bottom_up(struct kexec_buf
*kbuf
,
344 u64 buf_min
, u64 buf_max
)
346 int ret
= -EADDRNOTAVAIL
;
347 phys_addr_t start
, end
;
350 for_each_mem_range(i
, &start
, &end
) {
352 * memblock uses [start, end) convention while it is
353 * [start, end] here. Fix the off-by-one to have the
361 /* Memory hole not found */
365 /* Adjust memory region based on the given range */
371 start
= ALIGN(start
, kbuf
->buf_align
);
372 if (start
< end
&& (end
- start
+ 1) >= kbuf
->memsz
) {
373 /* Suitable memory range found. Set kbuf->mem */
384 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
385 * suitable buffer with bottom up approach.
386 * @kbuf: Buffer contents and memory parameters.
387 * @buf_min: Minimum address for the buffer.
388 * @buf_max: Maximum address for the buffer.
389 * @emem: Exclude memory ranges.
391 * Returns 0 on success, negative errno on error.
393 static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf
*kbuf
,
394 u64 buf_min
, u64 buf_max
,
395 const struct crash_mem
*emem
)
397 int i
, ret
= 0, err
= -EADDRNOTAVAIL
;
398 u64 start
, end
, tmin
, tmax
;
401 for (i
= 0; i
< emem
->nr_ranges
; i
++) {
402 start
= emem
->ranges
[i
].start
;
403 end
= emem
->ranges
[i
].end
;
409 tmax
= (start
> buf_max
? buf_max
: start
- 1);
410 ret
= __locate_mem_hole_bottom_up(kbuf
, tmin
, tmax
);
417 if (tmin
> buf_max
) {
426 ret
= __locate_mem_hole_bottom_up(kbuf
, tmin
, tmax
);
432 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
433 * @um_info: Usable memory buffer and ranges info.
434 * @cnt: No. of entries to accommodate.
436 * Frees up the old buffer if memory reallocation fails.
438 * Returns buffer on success, NULL on error.
440 static u64
*check_realloc_usable_mem(struct umem_info
*um_info
, int cnt
)
445 if ((um_info
->idx
+ cnt
) <= um_info
->max_entries
)
448 new_size
= um_info
->size
+ MEM_RANGE_CHUNK_SZ
;
449 tbuf
= krealloc(um_info
->buf
, new_size
, GFP_KERNEL
);
452 um_info
->size
= new_size
;
453 um_info
->max_entries
= (um_info
->size
/ sizeof(u64
));
460 * add_usable_mem - Add the usable memory ranges within the given memory range
462 * @um_info: Usable memory buffer and ranges info.
463 * @base: Base address of memory range to look for.
464 * @end: End address of memory range to look for.
466 * Returns 0 on success, negative errno on error.
468 static int add_usable_mem(struct umem_info
*um_info
, u64 base
, u64 end
)
470 u64 loc_base
, loc_end
;
474 for (i
= 0; i
< um_info
->nr_ranges
; i
++) {
476 loc_base
= um_info
->ranges
[i
].start
;
477 loc_end
= um_info
->ranges
[i
].end
;
478 if (loc_base
>= base
&& loc_end
<= end
)
480 else if (base
< loc_end
&& end
> loc_base
) {
489 if (!check_realloc_usable_mem(um_info
, 2))
492 um_info
->buf
[um_info
->idx
++] = cpu_to_be64(loc_base
);
493 um_info
->buf
[um_info
->idx
++] =
494 cpu_to_be64(loc_end
- loc_base
+ 1);
502 * kdump_setup_usable_lmb - This is a callback function that gets called by
503 * walk_drmem_lmbs for every LMB to set its
504 * usable memory ranges.
506 * @usm: linux,drconf-usable-memory property value.
507 * @data: Pointer to usable memory buffer and ranges info.
509 * Returns 0 on success, negative errno on error.
511 static int kdump_setup_usable_lmb(struct drmem_lmb
*lmb
, const __be32
**usm
,
514 struct umem_info
*um_info
;
519 * kdump load isn't supported on kernels already booted with
520 * linux,drconf-usable-memory property.
523 pr_err("linux,drconf-usable-memory property already exists!");
528 tmp_idx
= um_info
->idx
;
529 if (!check_realloc_usable_mem(um_info
, 1))
533 base
= lmb
->base_addr
;
534 end
= base
+ drmem_lmb_size() - 1;
535 ret
= add_usable_mem(um_info
, base
, end
);
538 * Update the no. of ranges added. Two entries (base & size)
539 * for every range added.
541 um_info
->buf
[tmp_idx
] =
542 cpu_to_be64((um_info
->idx
- tmp_idx
- 1) / 2);
548 #define NODE_PATH_LEN 256
550 * add_usable_mem_property - Add usable memory property for the given
552 * @fdt: Flattened device tree for the kdump kernel.
554 * @um_info: Usable memory buffer and ranges info.
556 * Returns 0 on success, negative errno on error.
558 static int add_usable_mem_property(void *fdt
, struct device_node
*dn
,
559 struct umem_info
*um_info
)
561 int n_mem_addr_cells
, n_mem_size_cells
, node
;
562 char path
[NODE_PATH_LEN
];
563 int i
, len
, ranges
, ret
;
569 if (snprintf(path
, NODE_PATH_LEN
, "%pOF", dn
) > (NODE_PATH_LEN
- 1)) {
570 pr_err("Buffer (%d) too small for memory node: %pOF\n",
574 pr_debug("Memory node path: %s\n", path
);
576 /* Now that we know the path, find its offset in kdump kernel's fdt */
577 node
= fdt_path_offset(fdt
, path
);
579 pr_err("Malformed device tree: error reading %s\n", path
);
584 /* Get the address & size cells */
585 n_mem_addr_cells
= of_n_addr_cells(dn
);
586 n_mem_size_cells
= of_n_size_cells(dn
);
587 pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells
,
591 if (!check_realloc_usable_mem(um_info
, 2)) {
596 prop
= of_get_property(dn
, "reg", &len
);
597 if (!prop
|| len
<= 0) {
603 * "reg" property represents sequence of (addr,size) tuples
604 * each representing a memory range.
606 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
608 for (i
= 0; i
< ranges
; i
++) {
609 base
= of_read_number(prop
, n_mem_addr_cells
);
610 prop
+= n_mem_addr_cells
;
611 end
= base
+ of_read_number(prop
, n_mem_size_cells
) - 1;
612 prop
+= n_mem_size_cells
;
614 ret
= add_usable_mem(um_info
, base
, end
);
620 * No kdump kernel usable memory found in this memory node.
621 * Write (0,0) tuple in linux,usable-memory property for
622 * this region to be ignored.
624 if (um_info
->idx
== 0) {
630 ret
= fdt_setprop(fdt
, node
, "linux,usable-memory", um_info
->buf
,
631 (um_info
->idx
* sizeof(u64
)));
640 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
641 * and linux,drconf-usable-memory DT properties as
642 * appropriate to restrict its memory usage.
643 * @fdt: Flattened device tree for the kdump kernel.
644 * @usable_mem: Usable memory ranges for kdump kernel.
646 * Returns 0 on success, negative errno on error.
648 static int update_usable_mem_fdt(void *fdt
, struct crash_mem
*usable_mem
)
650 struct umem_info um_info
;
651 struct device_node
*dn
;
655 pr_err("Usable memory ranges for kdump kernel not found\n");
659 node
= fdt_path_offset(fdt
, "/ibm,dynamic-reconfiguration-memory");
660 if (node
== -FDT_ERR_NOTFOUND
)
661 pr_debug("No dynamic reconfiguration memory found\n");
663 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
669 um_info
.max_entries
= 0;
671 /* Memory ranges to look up */
672 um_info
.ranges
= &(usable_mem
->ranges
[0]);
673 um_info
.nr_ranges
= usable_mem
->nr_ranges
;
675 dn
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
677 ret
= walk_drmem_lmbs(dn
, &um_info
, kdump_setup_usable_lmb
);
681 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
685 ret
= fdt_setprop(fdt
, node
, "linux,drconf-usable-memory",
686 um_info
.buf
, (um_info
.idx
* sizeof(u64
)));
688 pr_err("Failed to update fdt with linux,drconf-usable-memory property");
694 * Walk through each memory node and set linux,usable-memory property
695 * for the corresponding node in kdump kernel's fdt.
697 for_each_node_by_type(dn
, "memory") {
698 ret
= add_usable_mem_property(fdt
, dn
, &um_info
);
700 pr_err("Failed to set linux,usable-memory property for %s node",
712 * load_backup_segment - Locate a memory hole to place the backup region.
713 * @image: Kexec image.
714 * @kbuf: Buffer contents and memory parameters.
716 * Returns 0 on success, negative errno on error.
718 static int load_backup_segment(struct kimage
*image
, struct kexec_buf
*kbuf
)
724 * Setup a source buffer for backup segment.
726 * A source buffer has no meaning for backup region as data will
727 * be copied from backup source, after crash, in the purgatory.
728 * But as load segment code doesn't recognize such segments,
729 * setup a dummy source buffer to keep it happy for now.
731 buf
= vzalloc(BACKUP_SRC_SIZE
);
736 kbuf
->mem
= KEXEC_BUF_MEM_UNKNOWN
;
737 kbuf
->bufsz
= kbuf
->memsz
= BACKUP_SRC_SIZE
;
738 kbuf
->top_down
= false;
740 ret
= kexec_add_buffer(kbuf
);
746 image
->arch
.backup_buf
= buf
;
747 image
->arch
.backup_start
= kbuf
->mem
;
752 * update_backup_region_phdr - Update backup region's offset for the core to
753 * export the region appropriately.
754 * @image: Kexec image.
755 * @ehdr: ELF core header.
757 * Assumes an exclusive program header is setup for the backup region
762 static void update_backup_region_phdr(struct kimage
*image
, Elf64_Ehdr
*ehdr
)
767 phdr
= (Elf64_Phdr
*)(ehdr
+ 1);
768 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
769 if (phdr
->p_paddr
== BACKUP_SRC_START
) {
770 phdr
->p_offset
= image
->arch
.backup_start
;
771 pr_debug("Backup region offset updated to 0x%lx\n",
772 image
->arch
.backup_start
);
779 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
780 * segment needed to load kdump kernel.
781 * @image: Kexec image.
782 * @kbuf: Buffer contents and memory parameters.
784 * Returns 0 on success, negative errno on error.
786 static int load_elfcorehdr_segment(struct kimage
*image
, struct kexec_buf
*kbuf
)
788 struct crash_mem
*cmem
= NULL
;
789 unsigned long headers_sz
;
790 void *headers
= NULL
;
793 ret
= get_crash_memory_ranges(&cmem
);
797 /* Setup elfcorehdr segment */
798 ret
= crash_prepare_elf64_headers(cmem
, false, &headers
, &headers_sz
);
800 pr_err("Failed to prepare elf headers for the core\n");
804 /* Fix the offset for backup region in the ELF header */
805 update_backup_region_phdr(image
, headers
);
807 kbuf
->buffer
= headers
;
808 kbuf
->mem
= KEXEC_BUF_MEM_UNKNOWN
;
809 kbuf
->bufsz
= kbuf
->memsz
= headers_sz
;
810 kbuf
->top_down
= false;
812 ret
= kexec_add_buffer(kbuf
);
818 image
->arch
.elfcorehdr_addr
= kbuf
->mem
;
819 image
->arch
.elf_headers_sz
= headers_sz
;
820 image
->arch
.elf_headers
= headers
;
827 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
828 * to load kdump kernel.
829 * @image: Kexec image.
830 * @kbuf: Buffer contents and memory parameters.
832 * Returns 0 on success, negative errno on error.
834 int load_crashdump_segments_ppc64(struct kimage
*image
,
835 struct kexec_buf
*kbuf
)
839 /* Load backup segment - first 64K bytes of the crashing kernel */
840 ret
= load_backup_segment(image
, kbuf
);
842 pr_err("Failed to load backup segment\n");
845 pr_debug("Loaded the backup region at 0x%lx\n", kbuf
->mem
);
847 /* Load elfcorehdr segment - to export crashing kernel's vmcore */
848 ret
= load_elfcorehdr_segment(image
, kbuf
);
850 pr_err("Failed to load elfcorehdr segment\n");
853 pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
854 image
->arch
.elfcorehdr_addr
, kbuf
->bufsz
, kbuf
->memsz
);
860 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
861 * variables and call setup_purgatory() to initialize
862 * common global variable.
863 * @image: kexec image.
864 * @slave_code: Slave code for the purgatory.
865 * @fdt: Flattened device tree for the next kernel.
866 * @kernel_load_addr: Address where the kernel is loaded.
867 * @fdt_load_addr: Address where the flattened device tree is loaded.
869 * Returns 0 on success, negative errno on error.
871 int setup_purgatory_ppc64(struct kimage
*image
, const void *slave_code
,
872 const void *fdt
, unsigned long kernel_load_addr
,
873 unsigned long fdt_load_addr
)
875 struct device_node
*dn
= NULL
;
878 ret
= setup_purgatory(image
, slave_code
, fdt
, kernel_load_addr
,
883 if (image
->type
== KEXEC_TYPE_CRASH
) {
884 u32 my_run_at_load
= 1;
887 * Tell relocatable kernel to run at load address
888 * via the word meant for that at 0x5c.
890 ret
= kexec_purgatory_get_set_symbol(image
, "run_at_load",
892 sizeof(my_run_at_load
),
898 /* Tell purgatory where to look for backup region */
899 ret
= kexec_purgatory_get_set_symbol(image
, "backup_start",
900 &image
->arch
.backup_start
,
901 sizeof(image
->arch
.backup_start
),
906 /* Setup OPAL base & entry values */
907 dn
= of_find_node_by_path("/ibm,opal");
911 of_property_read_u64(dn
, "opal-base-address", &val
);
912 ret
= kexec_purgatory_get_set_symbol(image
, "opal_base", &val
,
917 of_property_read_u64(dn
, "opal-entry-address", &val
);
918 ret
= kexec_purgatory_get_set_symbol(image
, "opal_entry", &val
,
923 pr_err("Failed to setup purgatory symbols");
929 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
931 * @image: kexec image being loaded.
932 * @fdt: Flattened device tree for the next kernel.
933 * @initrd_load_addr: Address where the next initrd will be loaded.
934 * @initrd_len: Size of the next initrd, or 0 if there will be none.
935 * @cmdline: Command line for the next kernel, or NULL if there will
938 * Returns 0 on success, negative errno on error.
940 int setup_new_fdt_ppc64(const struct kimage
*image
, void *fdt
,
941 unsigned long initrd_load_addr
,
942 unsigned long initrd_len
, const char *cmdline
)
944 struct crash_mem
*umem
= NULL
, *rmem
= NULL
;
945 int i
, nr_ranges
, ret
;
947 ret
= setup_new_fdt(image
, fdt
, initrd_load_addr
, initrd_len
, cmdline
);
952 * Restrict memory usage for kdump kernel by setting up
953 * usable memory ranges and memory reserve map.
955 if (image
->type
== KEXEC_TYPE_CRASH
) {
956 ret
= get_usable_memory_ranges(&umem
);
960 ret
= update_usable_mem_fdt(fdt
, umem
);
962 pr_err("Error setting up usable-memory property for kdump kernel\n");
967 * Ensure we don't touch crashed kernel's memory except the
968 * first 64K of RAM, which will be backed up.
970 ret
= fdt_add_mem_rsv(fdt
, BACKUP_SRC_END
+ 1,
971 crashk_res
.start
- BACKUP_SRC_SIZE
);
973 pr_err("Error reserving crash memory: %s\n",
978 /* Ensure backup region is not used by kdump/capture kernel */
979 ret
= fdt_add_mem_rsv(fdt
, image
->arch
.backup_start
,
982 pr_err("Error reserving memory for backup: %s\n",
988 /* Update memory reserve map */
989 ret
= get_reserved_memory_ranges(&rmem
);
993 nr_ranges
= rmem
? rmem
->nr_ranges
: 0;
994 for (i
= 0; i
< nr_ranges
; i
++) {
997 base
= rmem
->ranges
[i
].start
;
998 size
= rmem
->ranges
[i
].end
- base
+ 1;
999 ret
= fdt_add_mem_rsv(fdt
, base
, size
);
1001 pr_err("Error updating memory reserve map: %s\n",
1014 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1015 * tce-table, reserved-ranges & such (exclude
1016 * memory ranges) as they can't be used for kexec
1017 * segment buffer. Sets kbuf->mem when a suitable
1018 * memory hole is found.
1019 * @kbuf: Buffer contents and memory parameters.
1021 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1023 * Returns 0 on success, negative errno on error.
1025 int arch_kexec_locate_mem_hole(struct kexec_buf
*kbuf
)
1027 struct crash_mem
**emem
;
1028 u64 buf_min
, buf_max
;
1031 /* Look up the exclude ranges list while locating the memory hole */
1032 emem
= &(kbuf
->image
->arch
.exclude_ranges
);
1033 if (!(*emem
) || ((*emem
)->nr_ranges
== 0)) {
1034 pr_warn("No exclude range list. Using the default locate mem hole method\n");
1035 return kexec_locate_mem_hole(kbuf
);
1038 buf_min
= kbuf
->buf_min
;
1039 buf_max
= kbuf
->buf_max
;
1040 /* Segments for kdump kernel should be within crashkernel region */
1041 if (kbuf
->image
->type
== KEXEC_TYPE_CRASH
) {
1042 buf_min
= (buf_min
< crashk_res
.start
?
1043 crashk_res
.start
: buf_min
);
1044 buf_max
= (buf_max
> crashk_res
.end
?
1045 crashk_res
.end
: buf_max
);
1048 if (buf_min
> buf_max
) {
1049 pr_err("Invalid buffer min and/or max values\n");
1054 ret
= locate_mem_hole_top_down_ppc64(kbuf
, buf_min
, buf_max
,
1057 ret
= locate_mem_hole_bottom_up_ppc64(kbuf
, buf_min
, buf_max
,
1060 /* Add the buffer allocated to the exclude list for the next lookup */
1062 add_mem_range(emem
, kbuf
->mem
, kbuf
->memsz
);
1063 sort_memory_ranges(*emem
, true);
1065 pr_err("Failed to locate memory buffer of size %lu\n",
1072 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1074 * @image: kexec image being loaded.
1075 * @buf: Buffer pointing to elf data.
1076 * @buf_len: Length of the buffer.
1078 * Returns 0 on success, negative errno on error.
1080 int arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
1081 unsigned long buf_len
)
1085 /* Get exclude memory ranges needed for setting up kexec segments */
1086 ret
= get_exclude_memory_ranges(&(image
->arch
.exclude_ranges
));
1088 pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1092 return kexec_image_probe_default(image
, buf
, buf_len
);
1096 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1097 * while loading the image.
1098 * @image: kexec image being loaded.
1100 * Returns 0 on success, negative errno on error.
1102 int arch_kimage_file_post_load_cleanup(struct kimage
*image
)
1104 kfree(image
->arch
.exclude_ranges
);
1105 image
->arch
.exclude_ranges
= NULL
;
1107 vfree(image
->arch
.backup_buf
);
1108 image
->arch
.backup_buf
= NULL
;
1110 vfree(image
->arch
.elf_headers
);
1111 image
->arch
.elf_headers
= NULL
;
1112 image
->arch
.elf_headers_sz
= 0;
1114 return kexec_image_post_load_cleanup_default(image
);