2 * kexec: kexec_file_load system call
4 * Copyright (C) 2014 Red Hat Inc.
6 * Vivek Goyal <vgoyal@redhat.com>
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/capability.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18 #include <linux/kexec.h>
19 #include <linux/mutex.h>
20 #include <linux/list.h>
22 #include <crypto/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/syscalls.h>
25 #include <linux/vmalloc.h>
26 #include "kexec_internal.h"
29 * Declare these symbols weak so that if architecture provides a purgatory,
30 * these will be overridden.
32 char __weak kexec_purgatory
[0];
33 size_t __weak kexec_purgatory_size
= 0;
35 static int kexec_calculate_store_digests(struct kimage
*image
);
37 /* Architectures can provide this probe function */
38 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
39 unsigned long buf_len
)
44 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
46 return ERR_PTR(-ENOEXEC
);
49 int __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
54 #ifdef CONFIG_KEXEC_VERIFY_SIG
55 int __weak
arch_kexec_kernel_verify_sig(struct kimage
*image
, void *buf
,
56 unsigned long buf_len
)
62 /* Apply relocations of type RELA */
64 arch_kexec_apply_relocations_add(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
67 pr_err("RELA relocation unsupported.\n");
71 /* Apply relocations of type REL */
73 arch_kexec_apply_relocations(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
76 pr_err("REL relocation unsupported.\n");
81 * Free up memory used by kernel, initrd, and command line. This is temporary
82 * memory allocation which is not needed any more after these buffers have
83 * been loaded into separate segments and have been copied elsewhere.
85 void kimage_file_post_load_cleanup(struct kimage
*image
)
87 struct purgatory_info
*pi
= &image
->purgatory_info
;
89 vfree(image
->kernel_buf
);
90 image
->kernel_buf
= NULL
;
92 vfree(image
->initrd_buf
);
93 image
->initrd_buf
= NULL
;
95 kfree(image
->cmdline_buf
);
96 image
->cmdline_buf
= NULL
;
98 vfree(pi
->purgatory_buf
);
99 pi
->purgatory_buf
= NULL
;
104 /* See if architecture has anything to cleanup post load */
105 arch_kimage_file_post_load_cleanup(image
);
108 * Above call should have called into bootloader to free up
109 * any data stored in kimage->image_loader_data. It should
110 * be ok now to free it up.
112 kfree(image
->image_loader_data
);
113 image
->image_loader_data
= NULL
;
117 * In file mode list of segments is prepared by kernel. Copy relevant
118 * data from user space, do error checking, prepare segment list
121 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
122 const char __user
*cmdline_ptr
,
123 unsigned long cmdline_len
, unsigned flags
)
129 ret
= kernel_read_file_from_fd(kernel_fd
, &image
->kernel_buf
,
130 &size
, INT_MAX
, READING_KEXEC_IMAGE
);
133 image
->kernel_buf_len
= size
;
135 /* Call arch image probe handlers */
136 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
137 image
->kernel_buf_len
);
141 #ifdef CONFIG_KEXEC_VERIFY_SIG
142 ret
= arch_kexec_kernel_verify_sig(image
, image
->kernel_buf
,
143 image
->kernel_buf_len
);
145 pr_debug("kernel signature verification failed.\n");
148 pr_debug("kernel signature verification successful.\n");
150 /* It is possible that there no initramfs is being loaded */
151 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
152 ret
= kernel_read_file_from_fd(initrd_fd
, &image
->initrd_buf
,
154 READING_KEXEC_INITRAMFS
);
157 image
->initrd_buf_len
= size
;
161 image
->cmdline_buf
= kzalloc(cmdline_len
, GFP_KERNEL
);
162 if (!image
->cmdline_buf
) {
167 ret
= copy_from_user(image
->cmdline_buf
, cmdline_ptr
,
174 image
->cmdline_buf_len
= cmdline_len
;
176 /* command line should be a string with last byte null */
177 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
183 /* Call arch image load handlers */
184 ldata
= arch_kexec_kernel_image_load(image
);
187 ret
= PTR_ERR(ldata
);
191 image
->image_loader_data
= ldata
;
193 /* In case of error, free up all allocated memory in this function */
195 kimage_file_post_load_cleanup(image
);
200 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
201 int initrd_fd
, const char __user
*cmdline_ptr
,
202 unsigned long cmdline_len
, unsigned long flags
)
205 struct kimage
*image
;
206 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
208 image
= do_kimage_alloc_init();
212 image
->file_mode
= 1;
214 if (kexec_on_panic
) {
215 /* Enable special crash kernel control page alloc policy. */
216 image
->control_page
= crashk_res
.start
;
217 image
->type
= KEXEC_TYPE_CRASH
;
220 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
221 cmdline_ptr
, cmdline_len
, flags
);
225 ret
= sanity_check_segment_list(image
);
227 goto out_free_post_load_bufs
;
230 image
->control_code_page
= kimage_alloc_control_pages(image
,
231 get_order(KEXEC_CONTROL_PAGE_SIZE
));
232 if (!image
->control_code_page
) {
233 pr_err("Could not allocate control_code_buffer\n");
234 goto out_free_post_load_bufs
;
237 if (!kexec_on_panic
) {
238 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
239 if (!image
->swap_page
) {
240 pr_err("Could not allocate swap buffer\n");
241 goto out_free_control_pages
;
247 out_free_control_pages
:
248 kimage_free_page_list(&image
->control_pages
);
249 out_free_post_load_bufs
:
250 kimage_file_post_load_cleanup(image
);
256 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
257 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
258 unsigned long, flags
)
261 struct kimage
**dest_image
, *image
;
263 /* We only trust the superuser with rebooting the system. */
264 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
267 /* Make sure we have a legal set of flags */
268 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
273 if (!mutex_trylock(&kexec_mutex
))
276 dest_image
= &kexec_image
;
277 if (flags
& KEXEC_FILE_ON_CRASH
) {
278 dest_image
= &kexec_crash_image
;
279 if (kexec_crash_image
)
280 arch_kexec_unprotect_crashkres();
283 if (flags
& KEXEC_FILE_UNLOAD
)
287 * In case of crash, new kernel gets loaded in reserved region. It is
288 * same memory where old crash kernel might be loaded. Free any
289 * current crash dump kernel before we corrupt it.
291 if (flags
& KEXEC_FILE_ON_CRASH
)
292 kimage_free(xchg(&kexec_crash_image
, NULL
));
294 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
299 ret
= machine_kexec_prepare(image
);
303 ret
= kexec_calculate_store_digests(image
);
307 for (i
= 0; i
< image
->nr_segments
; i
++) {
308 struct kexec_segment
*ksegment
;
310 ksegment
= &image
->segment
[i
];
311 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
312 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
315 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
320 kimage_terminate(image
);
323 * Free up any temporary buffers allocated which are not needed
324 * after image has been loaded
326 kimage_file_post_load_cleanup(image
);
328 image
= xchg(dest_image
, image
);
330 if ((flags
& KEXEC_FILE_ON_CRASH
) && kexec_crash_image
)
331 arch_kexec_protect_crashkres();
333 mutex_unlock(&kexec_mutex
);
338 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
339 struct kexec_buf
*kbuf
)
341 struct kimage
*image
= kbuf
->image
;
342 unsigned long temp_start
, temp_end
;
344 temp_end
= min(end
, kbuf
->buf_max
);
345 temp_start
= temp_end
- kbuf
->memsz
;
348 /* align down start */
349 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
351 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
354 temp_end
= temp_start
+ kbuf
->memsz
- 1;
357 * Make sure this does not conflict with any of existing
360 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
361 temp_start
= temp_start
- PAGE_SIZE
;
365 /* We found a suitable memory range */
369 /* If we are here, we found a suitable memory range */
370 kbuf
->mem
= temp_start
;
372 /* Success, stop navigating through remaining System RAM ranges */
376 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
377 struct kexec_buf
*kbuf
)
379 struct kimage
*image
= kbuf
->image
;
380 unsigned long temp_start
, temp_end
;
382 temp_start
= max(start
, kbuf
->buf_min
);
385 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
386 temp_end
= temp_start
+ kbuf
->memsz
- 1;
388 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
391 * Make sure this does not conflict with any of existing
394 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
395 temp_start
= temp_start
+ PAGE_SIZE
;
399 /* We found a suitable memory range */
403 /* If we are here, we found a suitable memory range */
404 kbuf
->mem
= temp_start
;
406 /* Success, stop navigating through remaining System RAM ranges */
410 static int locate_mem_hole_callback(u64 start
, u64 end
, void *arg
)
412 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
413 unsigned long sz
= end
- start
+ 1;
415 /* Returning 0 will take to next memory range */
416 if (sz
< kbuf
->memsz
)
419 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
423 * Allocate memory top down with-in ram range. Otherwise bottom up
427 return locate_mem_hole_top_down(start
, end
, kbuf
);
428 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
432 * Helper function for placing a buffer in a kexec segment. This assumes
433 * that kexec_mutex is held.
435 int kexec_add_buffer(struct kimage
*image
, char *buffer
, unsigned long bufsz
,
436 unsigned long memsz
, unsigned long buf_align
,
437 unsigned long buf_min
, unsigned long buf_max
,
438 bool top_down
, unsigned long *load_addr
)
441 struct kexec_segment
*ksegment
;
442 struct kexec_buf buf
, *kbuf
;
445 /* Currently adding segment this way is allowed only in file mode */
446 if (!image
->file_mode
)
449 if (image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
453 * Make sure we are not trying to add buffer after allocating
454 * control pages. All segments need to be placed first before
455 * any control pages are allocated. As control page allocation
456 * logic goes through list of segments to make sure there are
457 * no destination overlaps.
459 if (!list_empty(&image
->control_pages
)) {
464 memset(&buf
, 0, sizeof(struct kexec_buf
));
467 kbuf
->buffer
= buffer
;
470 kbuf
->memsz
= ALIGN(memsz
, PAGE_SIZE
);
471 kbuf
->buf_align
= max(buf_align
, PAGE_SIZE
);
472 kbuf
->buf_min
= buf_min
;
473 kbuf
->buf_max
= buf_max
;
474 kbuf
->top_down
= top_down
;
476 /* Walk the RAM ranges and allocate a suitable range for the buffer */
477 if (image
->type
== KEXEC_TYPE_CRASH
)
478 ret
= walk_iomem_res_desc(crashk_res
.desc
,
479 IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
,
480 crashk_res
.start
, crashk_res
.end
, kbuf
,
481 locate_mem_hole_callback
);
483 ret
= walk_system_ram_res(0, -1, kbuf
,
484 locate_mem_hole_callback
);
486 /* A suitable memory range could not be found for buffer */
487 return -EADDRNOTAVAIL
;
490 /* Found a suitable memory range */
491 ksegment
= &image
->segment
[image
->nr_segments
];
492 ksegment
->kbuf
= kbuf
->buffer
;
493 ksegment
->bufsz
= kbuf
->bufsz
;
494 ksegment
->mem
= kbuf
->mem
;
495 ksegment
->memsz
= kbuf
->memsz
;
496 image
->nr_segments
++;
497 *load_addr
= ksegment
->mem
;
501 /* Calculate and store the digest of segments */
502 static int kexec_calculate_store_digests(struct kimage
*image
)
504 struct crypto_shash
*tfm
;
505 struct shash_desc
*desc
;
506 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
507 size_t desc_size
, nullsz
;
510 struct kexec_sha_region
*sha_regions
;
511 struct purgatory_info
*pi
= &image
->purgatory_info
;
513 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
514 zero_buf_sz
= PAGE_SIZE
;
516 tfm
= crypto_alloc_shash("sha256", 0, 0);
522 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
523 desc
= kzalloc(desc_size
, GFP_KERNEL
);
529 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
530 sha_regions
= vzalloc(sha_region_sz
);
537 ret
= crypto_shash_init(desc
);
539 goto out_free_sha_regions
;
541 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
544 goto out_free_sha_regions
;
547 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
548 struct kexec_segment
*ksegment
;
550 ksegment
= &image
->segment
[i
];
552 * Skip purgatory as it will be modified once we put digest
555 if (ksegment
->kbuf
== pi
->purgatory_buf
)
558 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
564 * Assume rest of the buffer is filled with zero and
565 * update digest accordingly.
567 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
569 unsigned long bytes
= nullsz
;
571 if (bytes
> zero_buf_sz
)
573 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
582 sha_regions
[j
].start
= ksegment
->mem
;
583 sha_regions
[j
].len
= ksegment
->memsz
;
588 ret
= crypto_shash_final(desc
, digest
);
590 goto out_free_digest
;
591 ret
= kexec_purgatory_get_set_symbol(image
, "sha_regions",
592 sha_regions
, sha_region_sz
, 0);
594 goto out_free_digest
;
596 ret
= kexec_purgatory_get_set_symbol(image
, "sha256_digest",
597 digest
, SHA256_DIGEST_SIZE
, 0);
599 goto out_free_digest
;
604 out_free_sha_regions
:
614 /* Actually load purgatory. Lot of code taken from kexec-tools */
615 static int __kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
616 unsigned long max
, int top_down
)
618 struct purgatory_info
*pi
= &image
->purgatory_info
;
619 unsigned long align
, buf_align
, bss_align
, buf_sz
, bss_sz
, bss_pad
;
620 unsigned long memsz
, entry
, load_addr
, curr_load_addr
, bss_addr
, offset
;
621 unsigned char *buf_addr
, *src
;
622 int i
, ret
= 0, entry_sidx
= -1;
623 const Elf_Shdr
*sechdrs_c
;
624 Elf_Shdr
*sechdrs
= NULL
;
625 void *purgatory_buf
= NULL
;
628 * sechdrs_c points to section headers in purgatory and are read
629 * only. No modifications allowed.
631 sechdrs_c
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
634 * We can not modify sechdrs_c[] and its fields. It is read only.
635 * Copy it over to a local copy where one can store some temporary
636 * data and free it at the end. We need to modify ->sh_addr and
637 * ->sh_offset fields to keep track of permanent and temporary
638 * locations of sections.
640 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
644 memcpy(sechdrs
, sechdrs_c
, pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
647 * We seem to have multiple copies of sections. First copy is which
648 * is embedded in kernel in read only section. Some of these sections
649 * will be copied to a temporary buffer and relocated. And these
650 * sections will finally be copied to their final destination at
653 * Use ->sh_offset to reflect section address in memory. It will
654 * point to original read only copy if section is not allocatable.
655 * Otherwise it will point to temporary copy which will be relocated.
657 * Use ->sh_addr to contain final address of the section where it
658 * will go during execution time.
660 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
661 if (sechdrs
[i
].sh_type
== SHT_NOBITS
)
664 sechdrs
[i
].sh_offset
= (unsigned long)pi
->ehdr
+
665 sechdrs
[i
].sh_offset
;
669 * Identify entry point section and make entry relative to section
672 entry
= pi
->ehdr
->e_entry
;
673 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
674 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
677 if (!(sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
680 /* Make entry section relative */
681 if (sechdrs
[i
].sh_addr
<= pi
->ehdr
->e_entry
&&
682 ((sechdrs
[i
].sh_addr
+ sechdrs
[i
].sh_size
) >
683 pi
->ehdr
->e_entry
)) {
685 entry
-= sechdrs
[i
].sh_addr
;
690 /* Determine how much memory is needed to load relocatable object. */
696 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
697 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
700 align
= sechdrs
[i
].sh_addralign
;
701 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
702 if (buf_align
< align
)
704 buf_sz
= ALIGN(buf_sz
, align
);
705 buf_sz
+= sechdrs
[i
].sh_size
;
708 if (bss_align
< align
)
710 bss_sz
= ALIGN(bss_sz
, align
);
711 bss_sz
+= sechdrs
[i
].sh_size
;
715 /* Determine the bss padding required to align bss properly */
717 if (buf_sz
& (bss_align
- 1))
718 bss_pad
= bss_align
- (buf_sz
& (bss_align
- 1));
720 memsz
= buf_sz
+ bss_pad
+ bss_sz
;
722 /* Allocate buffer for purgatory */
723 purgatory_buf
= vzalloc(buf_sz
);
724 if (!purgatory_buf
) {
729 if (buf_align
< bss_align
)
730 buf_align
= bss_align
;
732 /* Add buffer to segment list */
733 ret
= kexec_add_buffer(image
, purgatory_buf
, buf_sz
, memsz
,
734 buf_align
, min
, max
, top_down
,
735 &pi
->purgatory_load_addr
);
739 /* Load SHF_ALLOC sections */
740 buf_addr
= purgatory_buf
;
741 load_addr
= curr_load_addr
= pi
->purgatory_load_addr
;
742 bss_addr
= load_addr
+ buf_sz
+ bss_pad
;
744 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
745 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
748 align
= sechdrs
[i
].sh_addralign
;
749 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
750 curr_load_addr
= ALIGN(curr_load_addr
, align
);
751 offset
= curr_load_addr
- load_addr
;
752 /* We already modifed ->sh_offset to keep src addr */
753 src
= (char *) sechdrs
[i
].sh_offset
;
754 memcpy(buf_addr
+ offset
, src
, sechdrs
[i
].sh_size
);
756 /* Store load address and source address of section */
757 sechdrs
[i
].sh_addr
= curr_load_addr
;
760 * This section got copied to temporary buffer. Update
761 * ->sh_offset accordingly.
763 sechdrs
[i
].sh_offset
= (unsigned long)(buf_addr
+ offset
);
765 /* Advance to the next address */
766 curr_load_addr
+= sechdrs
[i
].sh_size
;
768 bss_addr
= ALIGN(bss_addr
, align
);
769 sechdrs
[i
].sh_addr
= bss_addr
;
770 bss_addr
+= sechdrs
[i
].sh_size
;
774 /* Update entry point based on load address of text section */
776 entry
+= sechdrs
[entry_sidx
].sh_addr
;
778 /* Make kernel jump to purgatory after shutdown */
779 image
->start
= entry
;
781 /* Used later to get/set symbol values */
782 pi
->sechdrs
= sechdrs
;
785 * Used later to identify which section is purgatory and skip it
788 pi
->purgatory_buf
= purgatory_buf
;
792 vfree(purgatory_buf
);
796 static int kexec_apply_relocations(struct kimage
*image
)
799 struct purgatory_info
*pi
= &image
->purgatory_info
;
800 Elf_Shdr
*sechdrs
= pi
->sechdrs
;
802 /* Apply relocations */
803 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
804 Elf_Shdr
*section
, *symtab
;
806 if (sechdrs
[i
].sh_type
!= SHT_RELA
&&
807 sechdrs
[i
].sh_type
!= SHT_REL
)
811 * For section of type SHT_RELA/SHT_REL,
812 * ->sh_link contains section header index of associated
813 * symbol table. And ->sh_info contains section header
814 * index of section to which relocations apply.
816 if (sechdrs
[i
].sh_info
>= pi
->ehdr
->e_shnum
||
817 sechdrs
[i
].sh_link
>= pi
->ehdr
->e_shnum
)
820 section
= &sechdrs
[sechdrs
[i
].sh_info
];
821 symtab
= &sechdrs
[sechdrs
[i
].sh_link
];
823 if (!(section
->sh_flags
& SHF_ALLOC
))
827 * symtab->sh_link contain section header index of associated
830 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
831 /* Invalid section number? */
835 * Respective architecture needs to provide support for applying
836 * relocations of type SHT_RELA/SHT_REL.
838 if (sechdrs
[i
].sh_type
== SHT_RELA
)
839 ret
= arch_kexec_apply_relocations_add(pi
->ehdr
,
841 else if (sechdrs
[i
].sh_type
== SHT_REL
)
842 ret
= arch_kexec_apply_relocations(pi
->ehdr
,
851 /* Load relocatable purgatory object and relocate it appropriately */
852 int kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
853 unsigned long max
, int top_down
,
854 unsigned long *load_addr
)
856 struct purgatory_info
*pi
= &image
->purgatory_info
;
859 if (kexec_purgatory_size
<= 0)
862 if (kexec_purgatory_size
< sizeof(Elf_Ehdr
))
865 pi
->ehdr
= (Elf_Ehdr
*)kexec_purgatory
;
867 if (memcmp(pi
->ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
868 || pi
->ehdr
->e_type
!= ET_REL
869 || !elf_check_arch(pi
->ehdr
)
870 || pi
->ehdr
->e_shentsize
!= sizeof(Elf_Shdr
))
873 if (pi
->ehdr
->e_shoff
>= kexec_purgatory_size
874 || (pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
) >
875 kexec_purgatory_size
- pi
->ehdr
->e_shoff
))
878 ret
= __kexec_load_purgatory(image
, min
, max
, top_down
);
882 ret
= kexec_apply_relocations(image
);
886 *load_addr
= pi
->purgatory_load_addr
;
890 vfree(pi
->purgatory_buf
);
894 static Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
903 if (!pi
->sechdrs
|| !pi
->ehdr
)
906 sechdrs
= pi
->sechdrs
;
909 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
910 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
913 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
914 /* Invalid strtab section number */
916 strtab
= (char *)sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
917 syms
= (Elf_Sym
*)sechdrs
[i
].sh_offset
;
919 /* Go through symbols for a match */
920 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
921 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
924 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
927 if (syms
[k
].st_shndx
== SHN_UNDEF
||
928 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
929 pr_debug("Symbol: %s has bad section index %d.\n",
930 name
, syms
[k
].st_shndx
);
934 /* Found the symbol we are looking for */
942 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
944 struct purgatory_info
*pi
= &image
->purgatory_info
;
948 sym
= kexec_purgatory_find_symbol(pi
, name
);
950 return ERR_PTR(-EINVAL
);
952 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
955 * Returns the address where symbol will finally be loaded after
956 * kexec_load_segment()
958 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
962 * Get or set value of a symbol. If "get_value" is true, symbol value is
963 * returned in buf otherwise symbol value is set based on value in buf.
965 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
966 void *buf
, unsigned int size
, bool get_value
)
970 struct purgatory_info
*pi
= &image
->purgatory_info
;
973 sym
= kexec_purgatory_find_symbol(pi
, name
);
977 if (sym
->st_size
!= size
) {
978 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
979 name
, (unsigned long)sym
->st_size
, size
);
983 sechdrs
= pi
->sechdrs
;
985 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
986 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
987 get_value
? "get" : "set");
991 sym_buf
= (unsigned char *)sechdrs
[sym
->st_shndx
].sh_offset
+
995 memcpy((void *)buf
, sym_buf
, size
);
997 memcpy((void *)sym_buf
, buf
, size
);