1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
9 #define pr_fmt(fmt) "kexec_file(Image): " fmt
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/kexec.h>
16 #include <linux/string.h>
17 #include <linux/verification.h>
18 #include <asm/byteorder.h>
19 #include <asm/cpufeature.h>
20 #include <asm/image.h>
21 #include <asm/memory.h>
23 static int image_probe(const char *kernel_buf
, unsigned long kernel_len
)
25 const struct arm64_image_header
*h
=
26 (const struct arm64_image_header
*)(kernel_buf
);
28 if (!h
|| (kernel_len
< sizeof(*h
)))
31 if (memcmp(&h
->magic
, ARM64_IMAGE_MAGIC
, sizeof(h
->magic
)))
37 static void *image_load(struct kimage
*image
,
38 char *kernel
, unsigned long kernel_len
,
39 char *initrd
, unsigned long initrd_len
,
40 char *cmdline
, unsigned long cmdline_len
)
42 struct arm64_image_header
*h
;
44 bool be_image
, be_kernel
;
45 struct kexec_buf kbuf
;
46 unsigned long text_offset
, kernel_segment_number
;
47 struct kexec_segment
*kernel_segment
;
51 * We require a kernel with an unambiguous Image header. Per
52 * Documentation/arm64/booting.rst, this is the case when image_size
53 * is non-zero (practically speaking, since v3.17).
55 h
= (struct arm64_image_header
*)kernel
;
57 return ERR_PTR(-EINVAL
);
59 /* Check cpu features */
60 flags
= le64_to_cpu(h
->flags
);
61 be_image
= arm64_image_flag_field(flags
, ARM64_IMAGE_FLAG_BE
);
62 be_kernel
= IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
);
63 if ((be_image
!= be_kernel
) && !system_supports_mixed_endian())
64 return ERR_PTR(-EINVAL
);
66 value
= arm64_image_flag_field(flags
, ARM64_IMAGE_FLAG_PAGE_SIZE
);
67 if (((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_4K
) &&
68 !system_supports_4kb_granule()) ||
69 ((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_64K
) &&
70 !system_supports_64kb_granule()) ||
71 ((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_16K
) &&
72 !system_supports_16kb_granule()))
73 return ERR_PTR(-EINVAL
);
78 kbuf
.buf_max
= ULONG_MAX
;
79 kbuf
.top_down
= false;
82 kbuf
.bufsz
= kernel_len
;
83 kbuf
.mem
= KEXEC_BUF_MEM_UNKNOWN
;
84 kbuf
.memsz
= le64_to_cpu(h
->image_size
);
85 text_offset
= le64_to_cpu(h
->text_offset
);
86 kbuf
.buf_align
= MIN_KIMG_ALIGN
;
88 /* Adjust kernel segment with TEXT_OFFSET */
89 kbuf
.memsz
+= text_offset
;
91 kernel_segment_number
= image
->nr_segments
;
94 * The location of the kernel segment may make it impossible to satisfy
95 * the other segment requirements, so we try repeatedly to find a
96 * location that will work.
98 while ((ret
= kexec_add_buffer(&kbuf
)) == 0) {
99 /* Try to load additional data */
100 kernel_segment
= &image
->segment
[kernel_segment_number
];
101 ret
= load_other_segments(image
, kernel_segment
->mem
,
102 kernel_segment
->memsz
, initrd
,
103 initrd_len
, cmdline
);
108 * We couldn't find space for the other segments; erase the
109 * kernel segment and try the next available hole.
111 image
->nr_segments
-= 1;
112 kbuf
.buf_min
= kernel_segment
->mem
+ kernel_segment
->memsz
;
113 kbuf
.mem
= KEXEC_BUF_MEM_UNKNOWN
;
117 pr_err("Could not find any suitable kernel location!");
121 kernel_segment
= &image
->segment
[kernel_segment_number
];
122 kernel_segment
->mem
+= text_offset
;
123 kernel_segment
->memsz
-= text_offset
;
124 image
->start
= kernel_segment
->mem
;
126 pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
127 kernel_segment
->mem
, kbuf
.bufsz
,
128 kernel_segment
->memsz
);
133 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
134 static int image_verify_sig(const char *kernel
, unsigned long kernel_len
)
136 return verify_pefile_signature(kernel
, kernel_len
, NULL
,
137 VERIFYING_KEXEC_PE_SIGNATURE
);
141 const struct kexec_file_ops kexec_image_ops
= {
142 .probe
= image_probe
,
144 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
145 .verify_sig
= image_verify_sig
,