2 * kexec: kexec_file_load system call
4 * Copyright (C) 2014 Red Hat Inc.
6 * Vivek Goyal <vgoyal@redhat.com>
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
12 #include <linux/capability.h>
14 #include <linux/file.h>
15 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <crypto/hash.h>
20 #include <crypto/sha.h>
21 #include <linux/syscalls.h>
22 #include <linux/vmalloc.h>
23 #include "kexec_internal.h"
26 * Declare these symbols weak so that if architecture provides a purgatory,
27 * these will be overridden.
29 char __weak kexec_purgatory
[0];
30 size_t __weak kexec_purgatory_size
= 0;
32 static int kexec_calculate_store_digests(struct kimage
*image
);
34 static int copy_file_from_fd(int fd
, void **buf
, unsigned long *buf_len
)
36 struct fd f
= fdget(fd
);
45 ret
= vfs_getattr(&f
.file
->f_path
, &stat
);
49 if (stat
.size
> INT_MAX
) {
54 /* Don't hand 0 to vmalloc, it whines. */
60 *buf
= vmalloc(stat
.size
);
67 while (pos
< stat
.size
) {
68 bytes
= kernel_read(f
.file
, pos
, (char *)(*buf
) + pos
,
81 if (pos
!= stat
.size
) {
93 /* Architectures can provide this probe function */
94 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
95 unsigned long buf_len
)
100 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
102 return ERR_PTR(-ENOEXEC
);
105 int __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
110 int __weak
arch_kexec_kernel_verify_sig(struct kimage
*image
, void *buf
,
111 unsigned long buf_len
)
113 return -EKEYREJECTED
;
116 /* Apply relocations of type RELA */
118 arch_kexec_apply_relocations_add(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
121 pr_err("RELA relocation unsupported.\n");
125 /* Apply relocations of type REL */
127 arch_kexec_apply_relocations(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
130 pr_err("REL relocation unsupported.\n");
135 * Free up memory used by kernel, initrd, and command line. This is temporary
136 * memory allocation which is not needed any more after these buffers have
137 * been loaded into separate segments and have been copied elsewhere.
139 void kimage_file_post_load_cleanup(struct kimage
*image
)
141 struct purgatory_info
*pi
= &image
->purgatory_info
;
143 vfree(image
->kernel_buf
);
144 image
->kernel_buf
= NULL
;
146 vfree(image
->initrd_buf
);
147 image
->initrd_buf
= NULL
;
149 kfree(image
->cmdline_buf
);
150 image
->cmdline_buf
= NULL
;
152 vfree(pi
->purgatory_buf
);
153 pi
->purgatory_buf
= NULL
;
158 /* See if architecture has anything to cleanup post load */
159 arch_kimage_file_post_load_cleanup(image
);
162 * Above call should have called into bootloader to free up
163 * any data stored in kimage->image_loader_data. It should
164 * be ok now to free it up.
166 kfree(image
->image_loader_data
);
167 image
->image_loader_data
= NULL
;
171 * In file mode list of segments is prepared by kernel. Copy relevant
172 * data from user space, do error checking, prepare segment list
175 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
176 const char __user
*cmdline_ptr
,
177 unsigned long cmdline_len
, unsigned flags
)
182 ret
= copy_file_from_fd(kernel_fd
, &image
->kernel_buf
,
183 &image
->kernel_buf_len
);
187 /* Call arch image probe handlers */
188 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
189 image
->kernel_buf_len
);
194 #ifdef CONFIG_KEXEC_VERIFY_SIG
195 ret
= arch_kexec_kernel_verify_sig(image
, image
->kernel_buf
,
196 image
->kernel_buf_len
);
198 pr_debug("kernel signature verification failed.\n");
201 pr_debug("kernel signature verification successful.\n");
203 /* It is possible that there no initramfs is being loaded */
204 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
205 ret
= copy_file_from_fd(initrd_fd
, &image
->initrd_buf
,
206 &image
->initrd_buf_len
);
212 image
->cmdline_buf
= kzalloc(cmdline_len
, GFP_KERNEL
);
213 if (!image
->cmdline_buf
) {
218 ret
= copy_from_user(image
->cmdline_buf
, cmdline_ptr
,
225 image
->cmdline_buf_len
= cmdline_len
;
227 /* command line should be a string with last byte null */
228 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
234 /* Call arch image load handlers */
235 ldata
= arch_kexec_kernel_image_load(image
);
238 ret
= PTR_ERR(ldata
);
242 image
->image_loader_data
= ldata
;
244 /* In case of error, free up all allocated memory in this function */
246 kimage_file_post_load_cleanup(image
);
251 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
252 int initrd_fd
, const char __user
*cmdline_ptr
,
253 unsigned long cmdline_len
, unsigned long flags
)
256 struct kimage
*image
;
257 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
259 image
= do_kimage_alloc_init();
263 image
->file_mode
= 1;
265 if (kexec_on_panic
) {
266 /* Enable special crash kernel control page alloc policy. */
267 image
->control_page
= crashk_res
.start
;
268 image
->type
= KEXEC_TYPE_CRASH
;
271 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
272 cmdline_ptr
, cmdline_len
, flags
);
276 ret
= sanity_check_segment_list(image
);
278 goto out_free_post_load_bufs
;
281 image
->control_code_page
= kimage_alloc_control_pages(image
,
282 get_order(KEXEC_CONTROL_PAGE_SIZE
));
283 if (!image
->control_code_page
) {
284 pr_err("Could not allocate control_code_buffer\n");
285 goto out_free_post_load_bufs
;
288 if (!kexec_on_panic
) {
289 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
290 if (!image
->swap_page
) {
291 pr_err("Could not allocate swap buffer\n");
292 goto out_free_control_pages
;
298 out_free_control_pages
:
299 kimage_free_page_list(&image
->control_pages
);
300 out_free_post_load_bufs
:
301 kimage_file_post_load_cleanup(image
);
307 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
308 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
309 unsigned long, flags
)
312 struct kimage
**dest_image
, *image
;
314 /* We only trust the superuser with rebooting the system. */
315 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
318 /* Make sure we have a legal set of flags */
319 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
324 if (!mutex_trylock(&kexec_mutex
))
327 dest_image
= &kexec_image
;
328 if (flags
& KEXEC_FILE_ON_CRASH
)
329 dest_image
= &kexec_crash_image
;
331 if (flags
& KEXEC_FILE_UNLOAD
)
335 * In case of crash, new kernel gets loaded in reserved region. It is
336 * same memory where old crash kernel might be loaded. Free any
337 * current crash dump kernel before we corrupt it.
339 if (flags
& KEXEC_FILE_ON_CRASH
)
340 kimage_free(xchg(&kexec_crash_image
, NULL
));
342 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
347 ret
= machine_kexec_prepare(image
);
351 ret
= kexec_calculate_store_digests(image
);
355 for (i
= 0; i
< image
->nr_segments
; i
++) {
356 struct kexec_segment
*ksegment
;
358 ksegment
= &image
->segment
[i
];
359 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
360 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
363 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
368 kimage_terminate(image
);
371 * Free up any temporary buffers allocated which are not needed
372 * after image has been loaded
374 kimage_file_post_load_cleanup(image
);
376 image
= xchg(dest_image
, image
);
378 mutex_unlock(&kexec_mutex
);
383 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
384 struct kexec_buf
*kbuf
)
386 struct kimage
*image
= kbuf
->image
;
387 unsigned long temp_start
, temp_end
;
389 temp_end
= min(end
, kbuf
->buf_max
);
390 temp_start
= temp_end
- kbuf
->memsz
;
393 /* align down start */
394 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
396 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
399 temp_end
= temp_start
+ kbuf
->memsz
- 1;
402 * Make sure this does not conflict with any of existing
405 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
406 temp_start
= temp_start
- PAGE_SIZE
;
410 /* We found a suitable memory range */
414 /* If we are here, we found a suitable memory range */
415 kbuf
->mem
= temp_start
;
417 /* Success, stop navigating through remaining System RAM ranges */
421 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
422 struct kexec_buf
*kbuf
)
424 struct kimage
*image
= kbuf
->image
;
425 unsigned long temp_start
, temp_end
;
427 temp_start
= max(start
, kbuf
->buf_min
);
430 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
431 temp_end
= temp_start
+ kbuf
->memsz
- 1;
433 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
436 * Make sure this does not conflict with any of existing
439 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
440 temp_start
= temp_start
+ PAGE_SIZE
;
444 /* We found a suitable memory range */
448 /* If we are here, we found a suitable memory range */
449 kbuf
->mem
= temp_start
;
451 /* Success, stop navigating through remaining System RAM ranges */
455 static int locate_mem_hole_callback(u64 start
, u64 end
, void *arg
)
457 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
458 unsigned long sz
= end
- start
+ 1;
460 /* Returning 0 will take to next memory range */
461 if (sz
< kbuf
->memsz
)
464 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
468 * Allocate memory top down with-in ram range. Otherwise bottom up
472 return locate_mem_hole_top_down(start
, end
, kbuf
);
473 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
477 * Helper function for placing a buffer in a kexec segment. This assumes
478 * that kexec_mutex is held.
480 int kexec_add_buffer(struct kimage
*image
, char *buffer
, unsigned long bufsz
,
481 unsigned long memsz
, unsigned long buf_align
,
482 unsigned long buf_min
, unsigned long buf_max
,
483 bool top_down
, unsigned long *load_addr
)
486 struct kexec_segment
*ksegment
;
487 struct kexec_buf buf
, *kbuf
;
490 /* Currently adding segment this way is allowed only in file mode */
491 if (!image
->file_mode
)
494 if (image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
498 * Make sure we are not trying to add buffer after allocating
499 * control pages. All segments need to be placed first before
500 * any control pages are allocated. As control page allocation
501 * logic goes through list of segments to make sure there are
502 * no destination overlaps.
504 if (!list_empty(&image
->control_pages
)) {
509 memset(&buf
, 0, sizeof(struct kexec_buf
));
512 kbuf
->buffer
= buffer
;
515 kbuf
->memsz
= ALIGN(memsz
, PAGE_SIZE
);
516 kbuf
->buf_align
= max(buf_align
, PAGE_SIZE
);
517 kbuf
->buf_min
= buf_min
;
518 kbuf
->buf_max
= buf_max
;
519 kbuf
->top_down
= top_down
;
521 /* Walk the RAM ranges and allocate a suitable range for the buffer */
522 if (image
->type
== KEXEC_TYPE_CRASH
)
523 ret
= walk_iomem_res("Crash kernel",
524 IORESOURCE_MEM
| IORESOURCE_BUSY
,
525 crashk_res
.start
, crashk_res
.end
, kbuf
,
526 locate_mem_hole_callback
);
528 ret
= walk_system_ram_res(0, -1, kbuf
,
529 locate_mem_hole_callback
);
531 /* A suitable memory range could not be found for buffer */
532 return -EADDRNOTAVAIL
;
535 /* Found a suitable memory range */
536 ksegment
= &image
->segment
[image
->nr_segments
];
537 ksegment
->kbuf
= kbuf
->buffer
;
538 ksegment
->bufsz
= kbuf
->bufsz
;
539 ksegment
->mem
= kbuf
->mem
;
540 ksegment
->memsz
= kbuf
->memsz
;
541 image
->nr_segments
++;
542 *load_addr
= ksegment
->mem
;
546 /* Calculate and store the digest of segments */
547 static int kexec_calculate_store_digests(struct kimage
*image
)
549 struct crypto_shash
*tfm
;
550 struct shash_desc
*desc
;
551 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
552 size_t desc_size
, nullsz
;
555 struct kexec_sha_region
*sha_regions
;
556 struct purgatory_info
*pi
= &image
->purgatory_info
;
558 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
559 zero_buf_sz
= PAGE_SIZE
;
561 tfm
= crypto_alloc_shash("sha256", 0, 0);
567 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
568 desc
= kzalloc(desc_size
, GFP_KERNEL
);
574 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
575 sha_regions
= vzalloc(sha_region_sz
);
582 ret
= crypto_shash_init(desc
);
584 goto out_free_sha_regions
;
586 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
589 goto out_free_sha_regions
;
592 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
593 struct kexec_segment
*ksegment
;
595 ksegment
= &image
->segment
[i
];
597 * Skip purgatory as it will be modified once we put digest
600 if (ksegment
->kbuf
== pi
->purgatory_buf
)
603 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
609 * Assume rest of the buffer is filled with zero and
610 * update digest accordingly.
612 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
614 unsigned long bytes
= nullsz
;
616 if (bytes
> zero_buf_sz
)
618 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
627 sha_regions
[j
].start
= ksegment
->mem
;
628 sha_regions
[j
].len
= ksegment
->memsz
;
633 ret
= crypto_shash_final(desc
, digest
);
635 goto out_free_digest
;
636 ret
= kexec_purgatory_get_set_symbol(image
, "sha_regions",
637 sha_regions
, sha_region_sz
, 0);
639 goto out_free_digest
;
641 ret
= kexec_purgatory_get_set_symbol(image
, "sha256_digest",
642 digest
, SHA256_DIGEST_SIZE
, 0);
644 goto out_free_digest
;
649 out_free_sha_regions
:
659 /* Actually load purgatory. Lot of code taken from kexec-tools */
660 static int __kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
661 unsigned long max
, int top_down
)
663 struct purgatory_info
*pi
= &image
->purgatory_info
;
664 unsigned long align
, buf_align
, bss_align
, buf_sz
, bss_sz
, bss_pad
;
665 unsigned long memsz
, entry
, load_addr
, curr_load_addr
, bss_addr
, offset
;
666 unsigned char *buf_addr
, *src
;
667 int i
, ret
= 0, entry_sidx
= -1;
668 const Elf_Shdr
*sechdrs_c
;
669 Elf_Shdr
*sechdrs
= NULL
;
670 void *purgatory_buf
= NULL
;
673 * sechdrs_c points to section headers in purgatory and are read
674 * only. No modifications allowed.
676 sechdrs_c
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
679 * We can not modify sechdrs_c[] and its fields. It is read only.
680 * Copy it over to a local copy where one can store some temporary
681 * data and free it at the end. We need to modify ->sh_addr and
682 * ->sh_offset fields to keep track of permanent and temporary
683 * locations of sections.
685 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
689 memcpy(sechdrs
, sechdrs_c
, pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
692 * We seem to have multiple copies of sections. First copy is which
693 * is embedded in kernel in read only section. Some of these sections
694 * will be copied to a temporary buffer and relocated. And these
695 * sections will finally be copied to their final destination at
698 * Use ->sh_offset to reflect section address in memory. It will
699 * point to original read only copy if section is not allocatable.
700 * Otherwise it will point to temporary copy which will be relocated.
702 * Use ->sh_addr to contain final address of the section where it
703 * will go during execution time.
705 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
706 if (sechdrs
[i
].sh_type
== SHT_NOBITS
)
709 sechdrs
[i
].sh_offset
= (unsigned long)pi
->ehdr
+
710 sechdrs
[i
].sh_offset
;
714 * Identify entry point section and make entry relative to section
717 entry
= pi
->ehdr
->e_entry
;
718 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
719 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
722 if (!(sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
725 /* Make entry section relative */
726 if (sechdrs
[i
].sh_addr
<= pi
->ehdr
->e_entry
&&
727 ((sechdrs
[i
].sh_addr
+ sechdrs
[i
].sh_size
) >
728 pi
->ehdr
->e_entry
)) {
730 entry
-= sechdrs
[i
].sh_addr
;
735 /* Determine how much memory is needed to load relocatable object. */
741 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
742 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
745 align
= sechdrs
[i
].sh_addralign
;
746 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
747 if (buf_align
< align
)
749 buf_sz
= ALIGN(buf_sz
, align
);
750 buf_sz
+= sechdrs
[i
].sh_size
;
753 if (bss_align
< align
)
755 bss_sz
= ALIGN(bss_sz
, align
);
756 bss_sz
+= sechdrs
[i
].sh_size
;
760 /* Determine the bss padding required to align bss properly */
762 if (buf_sz
& (bss_align
- 1))
763 bss_pad
= bss_align
- (buf_sz
& (bss_align
- 1));
765 memsz
= buf_sz
+ bss_pad
+ bss_sz
;
767 /* Allocate buffer for purgatory */
768 purgatory_buf
= vzalloc(buf_sz
);
769 if (!purgatory_buf
) {
774 if (buf_align
< bss_align
)
775 buf_align
= bss_align
;
777 /* Add buffer to segment list */
778 ret
= kexec_add_buffer(image
, purgatory_buf
, buf_sz
, memsz
,
779 buf_align
, min
, max
, top_down
,
780 &pi
->purgatory_load_addr
);
784 /* Load SHF_ALLOC sections */
785 buf_addr
= purgatory_buf
;
786 load_addr
= curr_load_addr
= pi
->purgatory_load_addr
;
787 bss_addr
= load_addr
+ buf_sz
+ bss_pad
;
789 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
790 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
793 align
= sechdrs
[i
].sh_addralign
;
794 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
795 curr_load_addr
= ALIGN(curr_load_addr
, align
);
796 offset
= curr_load_addr
- load_addr
;
797 /* We already modifed ->sh_offset to keep src addr */
798 src
= (char *) sechdrs
[i
].sh_offset
;
799 memcpy(buf_addr
+ offset
, src
, sechdrs
[i
].sh_size
);
801 /* Store load address and source address of section */
802 sechdrs
[i
].sh_addr
= curr_load_addr
;
805 * This section got copied to temporary buffer. Update
806 * ->sh_offset accordingly.
808 sechdrs
[i
].sh_offset
= (unsigned long)(buf_addr
+ offset
);
810 /* Advance to the next address */
811 curr_load_addr
+= sechdrs
[i
].sh_size
;
813 bss_addr
= ALIGN(bss_addr
, align
);
814 sechdrs
[i
].sh_addr
= bss_addr
;
815 bss_addr
+= sechdrs
[i
].sh_size
;
819 /* Update entry point based on load address of text section */
821 entry
+= sechdrs
[entry_sidx
].sh_addr
;
823 /* Make kernel jump to purgatory after shutdown */
824 image
->start
= entry
;
826 /* Used later to get/set symbol values */
827 pi
->sechdrs
= sechdrs
;
830 * Used later to identify which section is purgatory and skip it
833 pi
->purgatory_buf
= purgatory_buf
;
837 vfree(purgatory_buf
);
841 static int kexec_apply_relocations(struct kimage
*image
)
844 struct purgatory_info
*pi
= &image
->purgatory_info
;
845 Elf_Shdr
*sechdrs
= pi
->sechdrs
;
847 /* Apply relocations */
848 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
849 Elf_Shdr
*section
, *symtab
;
851 if (sechdrs
[i
].sh_type
!= SHT_RELA
&&
852 sechdrs
[i
].sh_type
!= SHT_REL
)
856 * For section of type SHT_RELA/SHT_REL,
857 * ->sh_link contains section header index of associated
858 * symbol table. And ->sh_info contains section header
859 * index of section to which relocations apply.
861 if (sechdrs
[i
].sh_info
>= pi
->ehdr
->e_shnum
||
862 sechdrs
[i
].sh_link
>= pi
->ehdr
->e_shnum
)
865 section
= &sechdrs
[sechdrs
[i
].sh_info
];
866 symtab
= &sechdrs
[sechdrs
[i
].sh_link
];
868 if (!(section
->sh_flags
& SHF_ALLOC
))
872 * symtab->sh_link contain section header index of associated
875 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
876 /* Invalid section number? */
880 * Respective architecture needs to provide support for applying
881 * relocations of type SHT_RELA/SHT_REL.
883 if (sechdrs
[i
].sh_type
== SHT_RELA
)
884 ret
= arch_kexec_apply_relocations_add(pi
->ehdr
,
886 else if (sechdrs
[i
].sh_type
== SHT_REL
)
887 ret
= arch_kexec_apply_relocations(pi
->ehdr
,
896 /* Load relocatable purgatory object and relocate it appropriately */
897 int kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
898 unsigned long max
, int top_down
,
899 unsigned long *load_addr
)
901 struct purgatory_info
*pi
= &image
->purgatory_info
;
904 if (kexec_purgatory_size
<= 0)
907 if (kexec_purgatory_size
< sizeof(Elf_Ehdr
))
910 pi
->ehdr
= (Elf_Ehdr
*)kexec_purgatory
;
912 if (memcmp(pi
->ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
913 || pi
->ehdr
->e_type
!= ET_REL
914 || !elf_check_arch(pi
->ehdr
)
915 || pi
->ehdr
->e_shentsize
!= sizeof(Elf_Shdr
))
918 if (pi
->ehdr
->e_shoff
>= kexec_purgatory_size
919 || (pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
) >
920 kexec_purgatory_size
- pi
->ehdr
->e_shoff
))
923 ret
= __kexec_load_purgatory(image
, min
, max
, top_down
);
927 ret
= kexec_apply_relocations(image
);
931 *load_addr
= pi
->purgatory_load_addr
;
935 vfree(pi
->purgatory_buf
);
939 static Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
948 if (!pi
->sechdrs
|| !pi
->ehdr
)
951 sechdrs
= pi
->sechdrs
;
954 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
955 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
958 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
959 /* Invalid strtab section number */
961 strtab
= (char *)sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
962 syms
= (Elf_Sym
*)sechdrs
[i
].sh_offset
;
964 /* Go through symbols for a match */
965 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
966 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
969 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
972 if (syms
[k
].st_shndx
== SHN_UNDEF
||
973 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
974 pr_debug("Symbol: %s has bad section index %d.\n",
975 name
, syms
[k
].st_shndx
);
979 /* Found the symbol we are looking for */
987 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
989 struct purgatory_info
*pi
= &image
->purgatory_info
;
993 sym
= kexec_purgatory_find_symbol(pi
, name
);
995 return ERR_PTR(-EINVAL
);
997 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
1000 * Returns the address where symbol will finally be loaded after
1001 * kexec_load_segment()
1003 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
1007 * Get or set value of a symbol. If "get_value" is true, symbol value is
1008 * returned in buf otherwise symbol value is set based on value in buf.
1010 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
1011 void *buf
, unsigned int size
, bool get_value
)
1015 struct purgatory_info
*pi
= &image
->purgatory_info
;
1018 sym
= kexec_purgatory_find_symbol(pi
, name
);
1022 if (sym
->st_size
!= size
) {
1023 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1024 name
, (unsigned long)sym
->st_size
, size
);
1028 sechdrs
= pi
->sechdrs
;
1030 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
1031 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
1032 get_value
? "get" : "set");
1036 sym_buf
= (unsigned char *)sechdrs
[sym
->st_shndx
].sh_offset
+
1040 memcpy((void *)buf
, sym_buf
, size
);
1042 memcpy((void *)sym_buf
, buf
, size
);