1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
9 #define pr_fmt(fmt) "kexec_file(Image): " fmt
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/kexec.h>
16 #include <linux/string.h>
17 #include <linux/verification.h>
18 #include <asm/byteorder.h>
19 #include <asm/cpufeature.h>
20 #include <asm/image.h>
21 #include <asm/memory.h>
23 static int image_probe(const char *kernel_buf
, unsigned long kernel_len
)
25 const struct arm64_image_header
*h
=
26 (const struct arm64_image_header
*)(kernel_buf
);
28 if (!h
|| (kernel_len
< sizeof(*h
)))
31 if (memcmp(&h
->magic
, ARM64_IMAGE_MAGIC
, sizeof(h
->magic
)))
37 static void *image_load(struct kimage
*image
,
38 char *kernel
, unsigned long kernel_len
,
39 char *initrd
, unsigned long initrd_len
,
40 char *cmdline
, unsigned long cmdline_len
)
42 struct arm64_image_header
*h
;
44 bool be_image
, be_kernel
;
45 struct kexec_buf kbuf
;
46 unsigned long text_offset
;
47 struct kexec_segment
*kernel_segment
;
51 * We require a kernel with an unambiguous Image header. Per
52 * Documentation/arm64/booting.rst, this is the case when image_size
53 * is non-zero (practically speaking, since v3.17).
55 h
= (struct arm64_image_header
*)kernel
;
57 return ERR_PTR(-EINVAL
);
59 /* Check cpu features */
60 flags
= le64_to_cpu(h
->flags
);
61 be_image
= arm64_image_flag_field(flags
, ARM64_IMAGE_FLAG_BE
);
62 be_kernel
= IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
);
63 if ((be_image
!= be_kernel
) && !system_supports_mixed_endian())
64 return ERR_PTR(-EINVAL
);
66 value
= arm64_image_flag_field(flags
, ARM64_IMAGE_FLAG_PAGE_SIZE
);
67 if (((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_4K
) &&
68 !system_supports_4kb_granule()) ||
69 ((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_64K
) &&
70 !system_supports_64kb_granule()) ||
71 ((value
== ARM64_IMAGE_FLAG_PAGE_SIZE_16K
) &&
72 !system_supports_16kb_granule()))
73 return ERR_PTR(-EINVAL
);
78 kbuf
.buf_max
= ULONG_MAX
;
79 kbuf
.top_down
= false;
82 kbuf
.bufsz
= kernel_len
;
83 kbuf
.mem
= KEXEC_BUF_MEM_UNKNOWN
;
84 kbuf
.memsz
= le64_to_cpu(h
->image_size
);
85 text_offset
= le64_to_cpu(h
->text_offset
);
86 kbuf
.buf_align
= MIN_KIMG_ALIGN
;
88 /* Adjust kernel segment with TEXT_OFFSET */
89 kbuf
.memsz
+= text_offset
;
91 ret
= kexec_add_buffer(&kbuf
);
95 kernel_segment
= &image
->segment
[image
->nr_segments
- 1];
96 kernel_segment
->mem
+= text_offset
;
97 kernel_segment
->memsz
-= text_offset
;
98 image
->start
= kernel_segment
->mem
;
100 pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
101 kernel_segment
->mem
, kbuf
.bufsz
,
102 kernel_segment
->memsz
);
104 /* Load additional data */
105 ret
= load_other_segments(image
,
106 kernel_segment
->mem
, kernel_segment
->memsz
,
107 initrd
, initrd_len
, cmdline
);
112 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
113 static int image_verify_sig(const char *kernel
, unsigned long kernel_len
)
115 return verify_pefile_signature(kernel
, kernel_len
, NULL
,
116 VERIFYING_KEXEC_PE_SIGNATURE
);
120 const struct kexec_file_ops kexec_image_ops
= {
121 .probe
= image_probe
,
123 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
124 .verify_sig
= image_verify_sig
,