2 * kexec: kexec_file_load system call
4 * Copyright (C) 2014 Red Hat Inc.
6 * Vivek Goyal <vgoyal@redhat.com>
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/capability.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18 #include <linux/kexec.h>
19 #include <linux/mutex.h>
20 #include <linux/list.h>
22 #include <linux/ima.h>
23 #include <crypto/hash.h>
24 #include <crypto/sha.h>
25 #include <linux/elf.h>
26 #include <linux/elfcore.h>
27 #include <linux/kernel.h>
28 #include <linux/kexec.h>
29 #include <linux/slab.h>
30 #include <linux/syscalls.h>
31 #include <linux/vmalloc.h>
32 #include "kexec_internal.h"
34 static int kexec_calculate_store_digests(struct kimage
*image
);
37 * Currently this is the only default function that is exported as some
38 * architectures need it to do additional handlings.
39 * In the future, other default functions may be exported too if required.
41 int kexec_image_probe_default(struct kimage
*image
, void *buf
,
42 unsigned long buf_len
)
44 const struct kexec_file_ops
* const *fops
;
47 for (fops
= &kexec_file_loaders
[0]; *fops
&& (*fops
)->probe
; ++fops
) {
48 ret
= (*fops
)->probe(buf
, buf_len
);
58 /* Architectures can provide this probe function */
59 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
60 unsigned long buf_len
)
62 return kexec_image_probe_default(image
, buf
, buf_len
);
65 static void *kexec_image_load_default(struct kimage
*image
)
67 if (!image
->fops
|| !image
->fops
->load
)
68 return ERR_PTR(-ENOEXEC
);
70 return image
->fops
->load(image
, image
->kernel_buf
,
71 image
->kernel_buf_len
, image
->initrd_buf
,
72 image
->initrd_buf_len
, image
->cmdline_buf
,
73 image
->cmdline_buf_len
);
76 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
78 return kexec_image_load_default(image
);
81 static int kexec_image_post_load_cleanup_default(struct kimage
*image
)
83 if (!image
->fops
|| !image
->fops
->cleanup
)
86 return image
->fops
->cleanup(image
->image_loader_data
);
89 int __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
91 return kexec_image_post_load_cleanup_default(image
);
94 #ifdef CONFIG_KEXEC_VERIFY_SIG
95 static int kexec_image_verify_sig_default(struct kimage
*image
, void *buf
,
96 unsigned long buf_len
)
98 if (!image
->fops
|| !image
->fops
->verify_sig
) {
99 pr_debug("kernel loader does not support signature verification.\n");
100 return -EKEYREJECTED
;
103 return image
->fops
->verify_sig(buf
, buf_len
);
106 int __weak
arch_kexec_kernel_verify_sig(struct kimage
*image
, void *buf
,
107 unsigned long buf_len
)
109 return kexec_image_verify_sig_default(image
, buf
, buf_len
);
114 * arch_kexec_apply_relocations_add - apply relocations of type RELA
115 * @pi: Purgatory to be relocated.
116 * @section: Section relocations applying to.
117 * @relsec: Section containing RELAs.
118 * @symtab: Corresponding symtab.
120 * Return: 0 on success, negative errno on error.
123 arch_kexec_apply_relocations_add(struct purgatory_info
*pi
, Elf_Shdr
*section
,
124 const Elf_Shdr
*relsec
, const Elf_Shdr
*symtab
)
126 pr_err("RELA relocation unsupported.\n");
131 * arch_kexec_apply_relocations - apply relocations of type REL
132 * @pi: Purgatory to be relocated.
133 * @section: Section relocations applying to.
134 * @relsec: Section containing RELs.
135 * @symtab: Corresponding symtab.
137 * Return: 0 on success, negative errno on error.
140 arch_kexec_apply_relocations(struct purgatory_info
*pi
, Elf_Shdr
*section
,
141 const Elf_Shdr
*relsec
, const Elf_Shdr
*symtab
)
143 pr_err("REL relocation unsupported.\n");
148 * Free up memory used by kernel, initrd, and command line. This is temporary
149 * memory allocation which is not needed any more after these buffers have
150 * been loaded into separate segments and have been copied elsewhere.
152 void kimage_file_post_load_cleanup(struct kimage
*image
)
154 struct purgatory_info
*pi
= &image
->purgatory_info
;
156 vfree(image
->kernel_buf
);
157 image
->kernel_buf
= NULL
;
159 vfree(image
->initrd_buf
);
160 image
->initrd_buf
= NULL
;
162 kfree(image
->cmdline_buf
);
163 image
->cmdline_buf
= NULL
;
165 vfree(pi
->purgatory_buf
);
166 pi
->purgatory_buf
= NULL
;
171 /* See if architecture has anything to cleanup post load */
172 arch_kimage_file_post_load_cleanup(image
);
175 * Above call should have called into bootloader to free up
176 * any data stored in kimage->image_loader_data. It should
177 * be ok now to free it up.
179 kfree(image
->image_loader_data
);
180 image
->image_loader_data
= NULL
;
184 * In file mode list of segments is prepared by kernel. Copy relevant
185 * data from user space, do error checking, prepare segment list
188 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
189 const char __user
*cmdline_ptr
,
190 unsigned long cmdline_len
, unsigned flags
)
196 ret
= kernel_read_file_from_fd(kernel_fd
, &image
->kernel_buf
,
197 &size
, INT_MAX
, READING_KEXEC_IMAGE
);
200 image
->kernel_buf_len
= size
;
202 /* IMA needs to pass the measurement list to the next kernel. */
203 ima_add_kexec_buffer(image
);
205 /* Call arch image probe handlers */
206 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
207 image
->kernel_buf_len
);
211 #ifdef CONFIG_KEXEC_VERIFY_SIG
212 ret
= arch_kexec_kernel_verify_sig(image
, image
->kernel_buf
,
213 image
->kernel_buf_len
);
215 pr_debug("kernel signature verification failed.\n");
218 pr_debug("kernel signature verification successful.\n");
220 /* It is possible that there no initramfs is being loaded */
221 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
222 ret
= kernel_read_file_from_fd(initrd_fd
, &image
->initrd_buf
,
224 READING_KEXEC_INITRAMFS
);
227 image
->initrd_buf_len
= size
;
231 image
->cmdline_buf
= memdup_user(cmdline_ptr
, cmdline_len
);
232 if (IS_ERR(image
->cmdline_buf
)) {
233 ret
= PTR_ERR(image
->cmdline_buf
);
234 image
->cmdline_buf
= NULL
;
238 image
->cmdline_buf_len
= cmdline_len
;
240 /* command line should be a string with last byte null */
241 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
247 /* Call arch image load handlers */
248 ldata
= arch_kexec_kernel_image_load(image
);
251 ret
= PTR_ERR(ldata
);
255 image
->image_loader_data
= ldata
;
257 /* In case of error, free up all allocated memory in this function */
259 kimage_file_post_load_cleanup(image
);
264 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
265 int initrd_fd
, const char __user
*cmdline_ptr
,
266 unsigned long cmdline_len
, unsigned long flags
)
269 struct kimage
*image
;
270 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
272 image
= do_kimage_alloc_init();
276 image
->file_mode
= 1;
278 if (kexec_on_panic
) {
279 /* Enable special crash kernel control page alloc policy. */
280 image
->control_page
= crashk_res
.start
;
281 image
->type
= KEXEC_TYPE_CRASH
;
284 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
285 cmdline_ptr
, cmdline_len
, flags
);
289 ret
= sanity_check_segment_list(image
);
291 goto out_free_post_load_bufs
;
294 image
->control_code_page
= kimage_alloc_control_pages(image
,
295 get_order(KEXEC_CONTROL_PAGE_SIZE
));
296 if (!image
->control_code_page
) {
297 pr_err("Could not allocate control_code_buffer\n");
298 goto out_free_post_load_bufs
;
301 if (!kexec_on_panic
) {
302 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
303 if (!image
->swap_page
) {
304 pr_err("Could not allocate swap buffer\n");
305 goto out_free_control_pages
;
311 out_free_control_pages
:
312 kimage_free_page_list(&image
->control_pages
);
313 out_free_post_load_bufs
:
314 kimage_file_post_load_cleanup(image
);
320 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
321 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
322 unsigned long, flags
)
325 struct kimage
**dest_image
, *image
;
327 /* We only trust the superuser with rebooting the system. */
328 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
331 /* Make sure we have a legal set of flags */
332 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
337 if (!mutex_trylock(&kexec_mutex
))
340 dest_image
= &kexec_image
;
341 if (flags
& KEXEC_FILE_ON_CRASH
) {
342 dest_image
= &kexec_crash_image
;
343 if (kexec_crash_image
)
344 arch_kexec_unprotect_crashkres();
347 if (flags
& KEXEC_FILE_UNLOAD
)
351 * In case of crash, new kernel gets loaded in reserved region. It is
352 * same memory where old crash kernel might be loaded. Free any
353 * current crash dump kernel before we corrupt it.
355 if (flags
& KEXEC_FILE_ON_CRASH
)
356 kimage_free(xchg(&kexec_crash_image
, NULL
));
358 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
363 ret
= machine_kexec_prepare(image
);
368 * Some architecture(like S390) may touch the crash memory before
369 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
371 ret
= kimage_crash_copy_vmcoreinfo(image
);
375 ret
= kexec_calculate_store_digests(image
);
379 for (i
= 0; i
< image
->nr_segments
; i
++) {
380 struct kexec_segment
*ksegment
;
382 ksegment
= &image
->segment
[i
];
383 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
384 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
387 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
392 kimage_terminate(image
);
395 * Free up any temporary buffers allocated which are not needed
396 * after image has been loaded
398 kimage_file_post_load_cleanup(image
);
400 image
= xchg(dest_image
, image
);
402 if ((flags
& KEXEC_FILE_ON_CRASH
) && kexec_crash_image
)
403 arch_kexec_protect_crashkres();
405 mutex_unlock(&kexec_mutex
);
410 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
411 struct kexec_buf
*kbuf
)
413 struct kimage
*image
= kbuf
->image
;
414 unsigned long temp_start
, temp_end
;
416 temp_end
= min(end
, kbuf
->buf_max
);
417 temp_start
= temp_end
- kbuf
->memsz
;
420 /* align down start */
421 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
423 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
426 temp_end
= temp_start
+ kbuf
->memsz
- 1;
429 * Make sure this does not conflict with any of existing
432 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
433 temp_start
= temp_start
- PAGE_SIZE
;
437 /* We found a suitable memory range */
441 /* If we are here, we found a suitable memory range */
442 kbuf
->mem
= temp_start
;
444 /* Success, stop navigating through remaining System RAM ranges */
448 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
449 struct kexec_buf
*kbuf
)
451 struct kimage
*image
= kbuf
->image
;
452 unsigned long temp_start
, temp_end
;
454 temp_start
= max(start
, kbuf
->buf_min
);
457 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
458 temp_end
= temp_start
+ kbuf
->memsz
- 1;
460 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
463 * Make sure this does not conflict with any of existing
466 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
467 temp_start
= temp_start
+ PAGE_SIZE
;
471 /* We found a suitable memory range */
475 /* If we are here, we found a suitable memory range */
476 kbuf
->mem
= temp_start
;
478 /* Success, stop navigating through remaining System RAM ranges */
482 static int locate_mem_hole_callback(struct resource
*res
, void *arg
)
484 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
485 u64 start
= res
->start
, end
= res
->end
;
486 unsigned long sz
= end
- start
+ 1;
488 /* Returning 0 will take to next memory range */
489 if (sz
< kbuf
->memsz
)
492 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
496 * Allocate memory top down with-in ram range. Otherwise bottom up
500 return locate_mem_hole_top_down(start
, end
, kbuf
);
501 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
505 * arch_kexec_walk_mem - call func(data) on free memory regions
506 * @kbuf: Context info for the search. Also passed to @func.
507 * @func: Function to call for each memory region.
509 * Return: The memory walk will stop when func returns a non-zero value
510 * and that value will be returned. If all free regions are visited without
511 * func returning non-zero, then zero will be returned.
513 int __weak
arch_kexec_walk_mem(struct kexec_buf
*kbuf
,
514 int (*func
)(struct resource
*, void *))
516 if (kbuf
->image
->type
== KEXEC_TYPE_CRASH
)
517 return walk_iomem_res_desc(crashk_res
.desc
,
518 IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
,
519 crashk_res
.start
, crashk_res
.end
,
522 return walk_system_ram_res(0, ULONG_MAX
, kbuf
, func
);
526 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
527 * @kbuf: Parameters for the memory search.
529 * On success, kbuf->mem will have the start address of the memory region found.
531 * Return: 0 on success, negative errno on error.
533 int kexec_locate_mem_hole(struct kexec_buf
*kbuf
)
537 ret
= arch_kexec_walk_mem(kbuf
, locate_mem_hole_callback
);
539 return ret
== 1 ? 0 : -EADDRNOTAVAIL
;
543 * kexec_add_buffer - place a buffer in a kexec segment
544 * @kbuf: Buffer contents and memory parameters.
546 * This function assumes that kexec_mutex is held.
547 * On successful return, @kbuf->mem will have the physical address of
548 * the buffer in memory.
550 * Return: 0 on success, negative errno on error.
552 int kexec_add_buffer(struct kexec_buf
*kbuf
)
555 struct kexec_segment
*ksegment
;
558 /* Currently adding segment this way is allowed only in file mode */
559 if (!kbuf
->image
->file_mode
)
562 if (kbuf
->image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
566 * Make sure we are not trying to add buffer after allocating
567 * control pages. All segments need to be placed first before
568 * any control pages are allocated. As control page allocation
569 * logic goes through list of segments to make sure there are
570 * no destination overlaps.
572 if (!list_empty(&kbuf
->image
->control_pages
)) {
577 /* Ensure minimum alignment needed for segments. */
578 kbuf
->memsz
= ALIGN(kbuf
->memsz
, PAGE_SIZE
);
579 kbuf
->buf_align
= max(kbuf
->buf_align
, PAGE_SIZE
);
581 /* Walk the RAM ranges and allocate a suitable range for the buffer */
582 ret
= kexec_locate_mem_hole(kbuf
);
586 /* Found a suitable memory range */
587 ksegment
= &kbuf
->image
->segment
[kbuf
->image
->nr_segments
];
588 ksegment
->kbuf
= kbuf
->buffer
;
589 ksegment
->bufsz
= kbuf
->bufsz
;
590 ksegment
->mem
= kbuf
->mem
;
591 ksegment
->memsz
= kbuf
->memsz
;
592 kbuf
->image
->nr_segments
++;
596 /* Calculate and store the digest of segments */
597 static int kexec_calculate_store_digests(struct kimage
*image
)
599 struct crypto_shash
*tfm
;
600 struct shash_desc
*desc
;
601 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
602 size_t desc_size
, nullsz
;
605 struct kexec_sha_region
*sha_regions
;
606 struct purgatory_info
*pi
= &image
->purgatory_info
;
608 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY
))
611 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
612 zero_buf_sz
= PAGE_SIZE
;
614 tfm
= crypto_alloc_shash("sha256", 0, 0);
620 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
621 desc
= kzalloc(desc_size
, GFP_KERNEL
);
627 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
628 sha_regions
= vzalloc(sha_region_sz
);
635 ret
= crypto_shash_init(desc
);
637 goto out_free_sha_regions
;
639 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
642 goto out_free_sha_regions
;
645 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
646 struct kexec_segment
*ksegment
;
648 ksegment
= &image
->segment
[i
];
650 * Skip purgatory as it will be modified once we put digest
653 if (ksegment
->kbuf
== pi
->purgatory_buf
)
656 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
662 * Assume rest of the buffer is filled with zero and
663 * update digest accordingly.
665 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
667 unsigned long bytes
= nullsz
;
669 if (bytes
> zero_buf_sz
)
671 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
680 sha_regions
[j
].start
= ksegment
->mem
;
681 sha_regions
[j
].len
= ksegment
->memsz
;
686 ret
= crypto_shash_final(desc
, digest
);
688 goto out_free_digest
;
689 ret
= kexec_purgatory_get_set_symbol(image
, "purgatory_sha_regions",
690 sha_regions
, sha_region_sz
, 0);
692 goto out_free_digest
;
694 ret
= kexec_purgatory_get_set_symbol(image
, "purgatory_sha256_digest",
695 digest
, SHA256_DIGEST_SIZE
, 0);
697 goto out_free_digest
;
702 out_free_sha_regions
:
712 #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
714 * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
715 * @pi: Purgatory to be loaded.
716 * @kbuf: Buffer to setup.
718 * Allocates the memory needed for the buffer. Caller is responsible to free
719 * the memory after use.
721 * Return: 0 on success, negative errno on error.
723 static int kexec_purgatory_setup_kbuf(struct purgatory_info
*pi
,
724 struct kexec_buf
*kbuf
)
726 const Elf_Shdr
*sechdrs
;
727 unsigned long bss_align
;
728 unsigned long bss_sz
;
732 sechdrs
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
733 kbuf
->buf_align
= bss_align
= 1;
734 kbuf
->bufsz
= bss_sz
= 0;
736 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
737 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
740 align
= sechdrs
[i
].sh_addralign
;
741 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
742 if (kbuf
->buf_align
< align
)
743 kbuf
->buf_align
= align
;
744 kbuf
->bufsz
= ALIGN(kbuf
->bufsz
, align
);
745 kbuf
->bufsz
+= sechdrs
[i
].sh_size
;
747 if (bss_align
< align
)
749 bss_sz
= ALIGN(bss_sz
, align
);
750 bss_sz
+= sechdrs
[i
].sh_size
;
753 kbuf
->bufsz
= ALIGN(kbuf
->bufsz
, bss_align
);
754 kbuf
->memsz
= kbuf
->bufsz
+ bss_sz
;
755 if (kbuf
->buf_align
< bss_align
)
756 kbuf
->buf_align
= bss_align
;
758 kbuf
->buffer
= vzalloc(kbuf
->bufsz
);
761 pi
->purgatory_buf
= kbuf
->buffer
;
763 ret
= kexec_add_buffer(kbuf
);
769 vfree(pi
->purgatory_buf
);
770 pi
->purgatory_buf
= NULL
;
775 * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
776 * @pi: Purgatory to be loaded.
777 * @kbuf: Buffer prepared to store purgatory.
779 * Allocates the memory needed for the buffer. Caller is responsible to free
780 * the memory after use.
782 * Return: 0 on success, negative errno on error.
784 static int kexec_purgatory_setup_sechdrs(struct purgatory_info
*pi
,
785 struct kexec_buf
*kbuf
)
787 unsigned long bss_addr
;
788 unsigned long offset
;
793 * The section headers in kexec_purgatory are read-only. In order to
794 * have them modifiable make a temporary copy.
796 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
799 memcpy(sechdrs
, (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
,
800 pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
801 pi
->sechdrs
= sechdrs
;
804 bss_addr
= kbuf
->mem
+ kbuf
->bufsz
;
805 kbuf
->image
->start
= pi
->ehdr
->e_entry
;
807 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
811 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
814 align
= sechdrs
[i
].sh_addralign
;
815 if (sechdrs
[i
].sh_type
== SHT_NOBITS
) {
816 bss_addr
= ALIGN(bss_addr
, align
);
817 sechdrs
[i
].sh_addr
= bss_addr
;
818 bss_addr
+= sechdrs
[i
].sh_size
;
822 offset
= ALIGN(offset
, align
);
823 if (sechdrs
[i
].sh_flags
& SHF_EXECINSTR
&&
824 pi
->ehdr
->e_entry
>= sechdrs
[i
].sh_addr
&&
825 pi
->ehdr
->e_entry
< (sechdrs
[i
].sh_addr
826 + sechdrs
[i
].sh_size
)) {
827 kbuf
->image
->start
-= sechdrs
[i
].sh_addr
;
828 kbuf
->image
->start
+= kbuf
->mem
+ offset
;
831 src
= (void *)pi
->ehdr
+ sechdrs
[i
].sh_offset
;
832 dst
= pi
->purgatory_buf
+ offset
;
833 memcpy(dst
, src
, sechdrs
[i
].sh_size
);
835 sechdrs
[i
].sh_addr
= kbuf
->mem
+ offset
;
836 sechdrs
[i
].sh_offset
= offset
;
837 offset
+= sechdrs
[i
].sh_size
;
843 static int kexec_apply_relocations(struct kimage
*image
)
846 struct purgatory_info
*pi
= &image
->purgatory_info
;
847 const Elf_Shdr
*sechdrs
;
849 sechdrs
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
851 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
852 const Elf_Shdr
*relsec
;
853 const Elf_Shdr
*symtab
;
856 relsec
= sechdrs
+ i
;
858 if (relsec
->sh_type
!= SHT_RELA
&&
859 relsec
->sh_type
!= SHT_REL
)
863 * For section of type SHT_RELA/SHT_REL,
864 * ->sh_link contains section header index of associated
865 * symbol table. And ->sh_info contains section header
866 * index of section to which relocations apply.
868 if (relsec
->sh_info
>= pi
->ehdr
->e_shnum
||
869 relsec
->sh_link
>= pi
->ehdr
->e_shnum
)
872 section
= pi
->sechdrs
+ relsec
->sh_info
;
873 symtab
= sechdrs
+ relsec
->sh_link
;
875 if (!(section
->sh_flags
& SHF_ALLOC
))
879 * symtab->sh_link contain section header index of associated
882 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
883 /* Invalid section number? */
887 * Respective architecture needs to provide support for applying
888 * relocations of type SHT_RELA/SHT_REL.
890 if (relsec
->sh_type
== SHT_RELA
)
891 ret
= arch_kexec_apply_relocations_add(pi
, section
,
893 else if (relsec
->sh_type
== SHT_REL
)
894 ret
= arch_kexec_apply_relocations(pi
, section
,
904 * kexec_load_purgatory - Load and relocate the purgatory object.
905 * @image: Image to add the purgatory to.
906 * @kbuf: Memory parameters to use.
908 * Allocates the memory needed for image->purgatory_info.sechdrs and
909 * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
910 * to free the memory after use.
912 * Return: 0 on success, negative errno on error.
914 int kexec_load_purgatory(struct kimage
*image
, struct kexec_buf
*kbuf
)
916 struct purgatory_info
*pi
= &image
->purgatory_info
;
919 if (kexec_purgatory_size
<= 0)
922 pi
->ehdr
= (const Elf_Ehdr
*)kexec_purgatory
;
924 ret
= kexec_purgatory_setup_kbuf(pi
, kbuf
);
928 ret
= kexec_purgatory_setup_sechdrs(pi
, kbuf
);
932 ret
= kexec_apply_relocations(image
);
941 vfree(pi
->purgatory_buf
);
942 pi
->purgatory_buf
= NULL
;
947 * kexec_purgatory_find_symbol - find a symbol in the purgatory
948 * @pi: Purgatory to search in.
949 * @name: Name of the symbol.
951 * Return: pointer to symbol in read-only symtab on success, NULL on error.
953 static const Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
956 const Elf_Shdr
*sechdrs
;
957 const Elf_Ehdr
*ehdr
;
966 sechdrs
= (void *)ehdr
+ ehdr
->e_shoff
;
968 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
969 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
972 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
973 /* Invalid strtab section number */
975 strtab
= (void *)ehdr
+ sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
976 syms
= (void *)ehdr
+ sechdrs
[i
].sh_offset
;
978 /* Go through symbols for a match */
979 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
980 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
983 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
986 if (syms
[k
].st_shndx
== SHN_UNDEF
||
987 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
988 pr_debug("Symbol: %s has bad section index %d.\n",
989 name
, syms
[k
].st_shndx
);
993 /* Found the symbol we are looking for */
1001 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
1003 struct purgatory_info
*pi
= &image
->purgatory_info
;
1007 sym
= kexec_purgatory_find_symbol(pi
, name
);
1009 return ERR_PTR(-EINVAL
);
1011 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
1014 * Returns the address where symbol will finally be loaded after
1015 * kexec_load_segment()
1017 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
1021 * Get or set value of a symbol. If "get_value" is true, symbol value is
1022 * returned in buf otherwise symbol value is set based on value in buf.
1024 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
1025 void *buf
, unsigned int size
, bool get_value
)
1027 struct purgatory_info
*pi
= &image
->purgatory_info
;
1032 sym
= kexec_purgatory_find_symbol(pi
, name
);
1036 if (sym
->st_size
!= size
) {
1037 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1038 name
, (unsigned long)sym
->st_size
, size
);
1042 sec
= pi
->sechdrs
+ sym
->st_shndx
;
1044 if (sec
->sh_type
== SHT_NOBITS
) {
1045 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
1046 get_value
? "get" : "set");
1050 sym_buf
= (char *)pi
->purgatory_buf
+ sec
->sh_offset
+ sym
->st_value
;
1053 memcpy((void *)buf
, sym_buf
, size
);
1055 memcpy((void *)sym_buf
, buf
, size
);
1059 #endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
1061 int crash_exclude_mem_range(struct crash_mem
*mem
,
1062 unsigned long long mstart
, unsigned long long mend
)
1065 unsigned long long start
, end
;
1066 struct crash_mem_range temp_range
= {0, 0};
1068 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
1069 start
= mem
->ranges
[i
].start
;
1070 end
= mem
->ranges
[i
].end
;
1072 if (mstart
> end
|| mend
< start
)
1075 /* Truncate any area outside of range */
1081 /* Found completely overlapping range */
1082 if (mstart
== start
&& mend
== end
) {
1083 mem
->ranges
[i
].start
= 0;
1084 mem
->ranges
[i
].end
= 0;
1085 if (i
< mem
->nr_ranges
- 1) {
1086 /* Shift rest of the ranges to left */
1087 for (j
= i
; j
< mem
->nr_ranges
- 1; j
++) {
1088 mem
->ranges
[j
].start
=
1089 mem
->ranges
[j
+1].start
;
1090 mem
->ranges
[j
].end
=
1091 mem
->ranges
[j
+1].end
;
1098 if (mstart
> start
&& mend
< end
) {
1099 /* Split original range */
1100 mem
->ranges
[i
].end
= mstart
- 1;
1101 temp_range
.start
= mend
+ 1;
1102 temp_range
.end
= end
;
1103 } else if (mstart
!= start
)
1104 mem
->ranges
[i
].end
= mstart
- 1;
1106 mem
->ranges
[i
].start
= mend
+ 1;
1110 /* If a split happened, add the split to array */
1111 if (!temp_range
.end
)
1114 /* Split happened */
1115 if (i
== mem
->max_nr_ranges
- 1)
1118 /* Location where new range should go */
1120 if (j
< mem
->nr_ranges
) {
1121 /* Move over all ranges one slot towards the end */
1122 for (i
= mem
->nr_ranges
- 1; i
>= j
; i
--)
1123 mem
->ranges
[i
+ 1] = mem
->ranges
[i
];
1126 mem
->ranges
[j
].start
= temp_range
.start
;
1127 mem
->ranges
[j
].end
= temp_range
.end
;
1132 int crash_prepare_elf64_headers(struct crash_mem
*mem
, int kernel_map
,
1133 void **addr
, unsigned long *sz
)
1137 unsigned long nr_cpus
= num_possible_cpus(), nr_phdr
, elf_sz
;
1139 unsigned int cpu
, i
;
1140 unsigned long long notes_addr
;
1141 unsigned long mstart
, mend
;
1143 /* extra phdr for vmcoreinfo elf note */
1144 nr_phdr
= nr_cpus
+ 1;
1145 nr_phdr
+= mem
->nr_ranges
;
1148 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1149 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1150 * I think this is required by tools like gdb. So same physical
1151 * memory will be mapped in two elf headers. One will contain kernel
1152 * text virtual addresses and other will have __va(physical) addresses.
1156 elf_sz
= sizeof(Elf64_Ehdr
) + nr_phdr
* sizeof(Elf64_Phdr
);
1157 elf_sz
= ALIGN(elf_sz
, ELF_CORE_HEADER_ALIGN
);
1159 buf
= vzalloc(elf_sz
);
1163 ehdr
= (Elf64_Ehdr
*)buf
;
1164 phdr
= (Elf64_Phdr
*)(ehdr
+ 1);
1165 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
1166 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
1167 ehdr
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
1168 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1169 ehdr
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1170 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
1171 ehdr
->e_type
= ET_CORE
;
1172 ehdr
->e_machine
= ELF_ARCH
;
1173 ehdr
->e_version
= EV_CURRENT
;
1174 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
1175 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
1176 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
1178 /* Prepare one phdr of type PT_NOTE for each present cpu */
1179 for_each_present_cpu(cpu
) {
1180 phdr
->p_type
= PT_NOTE
;
1181 notes_addr
= per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes
, cpu
));
1182 phdr
->p_offset
= phdr
->p_paddr
= notes_addr
;
1183 phdr
->p_filesz
= phdr
->p_memsz
= sizeof(note_buf_t
);
1188 /* Prepare one PT_NOTE header for vmcoreinfo */
1189 phdr
->p_type
= PT_NOTE
;
1190 phdr
->p_offset
= phdr
->p_paddr
= paddr_vmcoreinfo_note();
1191 phdr
->p_filesz
= phdr
->p_memsz
= VMCOREINFO_NOTE_SIZE
;
1195 /* Prepare PT_LOAD type program header for kernel text region */
1197 phdr
->p_type
= PT_LOAD
;
1198 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
1199 phdr
->p_vaddr
= (Elf64_Addr
)_text
;
1200 phdr
->p_filesz
= phdr
->p_memsz
= _end
- _text
;
1201 phdr
->p_offset
= phdr
->p_paddr
= __pa_symbol(_text
);
1206 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1207 for (i
= 0; i
< mem
->nr_ranges
; i
++) {
1208 mstart
= mem
->ranges
[i
].start
;
1209 mend
= mem
->ranges
[i
].end
;
1211 phdr
->p_type
= PT_LOAD
;
1212 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
1213 phdr
->p_offset
= mstart
;
1215 phdr
->p_paddr
= mstart
;
1216 phdr
->p_vaddr
= (unsigned long long) __va(mstart
);
1217 phdr
->p_filesz
= phdr
->p_memsz
= mend
- mstart
+ 1;
1221 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1222 phdr
, phdr
->p_vaddr
, phdr
->p_paddr
, phdr
->p_filesz
,
1223 ehdr
->e_phnum
, phdr
->p_offset
);