1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
9 #include <linux/interrupt.h>
10 #include <linux/irq.h>
11 #include <linux/kernel.h>
12 #include <linux/kexec.h>
13 #include <linux/page-flags.h>
14 #include <linux/smp.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cpu_ops.h>
18 #include <asm/daifflags.h>
19 #include <asm/memory.h>
21 #include <asm/mmu_context.h>
24 #include "cpu-reset.h"
26 /* Global variables for the arm64_relocate_new_kernel routine. */
27 extern const unsigned char arm64_relocate_new_kernel
[];
28 extern const unsigned long arm64_relocate_new_kernel_size
;
31 * kexec_image_info - For debugging output.
33 #define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
34 static void _kexec_image_info(const char *func
, int line
,
35 const struct kimage
*kimage
)
39 pr_debug("%s:%d:\n", func
, line
);
40 pr_debug(" kexec kimage info:\n");
41 pr_debug(" type: %d\n", kimage
->type
);
42 pr_debug(" start: %lx\n", kimage
->start
);
43 pr_debug(" head: %lx\n", kimage
->head
);
44 pr_debug(" nr_segments: %lu\n", kimage
->nr_segments
);
46 for (i
= 0; i
< kimage
->nr_segments
; i
++) {
47 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
49 kimage
->segment
[i
].mem
,
50 kimage
->segment
[i
].mem
+ kimage
->segment
[i
].memsz
,
51 kimage
->segment
[i
].memsz
,
52 kimage
->segment
[i
].memsz
/ PAGE_SIZE
);
56 void machine_kexec_cleanup(struct kimage
*kimage
)
58 /* Empty routine needed to avoid build errors. */
62 * machine_kexec_prepare - Prepare for a kexec reboot.
64 * Called from the core kexec code when a kernel image is loaded.
65 * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
66 * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
68 int machine_kexec_prepare(struct kimage
*kimage
)
70 kexec_image_info(kimage
);
72 if (kimage
->type
!= KEXEC_TYPE_CRASH
&& cpus_are_stuck_in_kernel()) {
73 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
81 * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
83 static void kexec_list_flush(struct kimage
*kimage
)
85 kimage_entry_t
*entry
;
87 for (entry
= &kimage
->head
; ; entry
++) {
91 /* flush the list entries. */
92 __flush_dcache_area(entry
, sizeof(kimage_entry_t
));
94 flag
= *entry
& IND_FLAGS
;
98 addr
= phys_to_virt(*entry
& PAGE_MASK
);
101 case IND_INDIRECTION
:
102 /* Set entry point just before the new list page. */
103 entry
= (kimage_entry_t
*)addr
- 1;
106 /* flush the source pages. */
107 __flush_dcache_area(addr
, PAGE_SIZE
);
109 case IND_DESTINATION
:
118 * kexec_segment_flush - Helper to flush the kimage segments to PoC.
120 static void kexec_segment_flush(const struct kimage
*kimage
)
124 pr_debug("%s:\n", __func__
);
126 for (i
= 0; i
< kimage
->nr_segments
; i
++) {
127 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
129 kimage
->segment
[i
].mem
,
130 kimage
->segment
[i
].mem
+ kimage
->segment
[i
].memsz
,
131 kimage
->segment
[i
].memsz
,
132 kimage
->segment
[i
].memsz
/ PAGE_SIZE
);
134 __flush_dcache_area(phys_to_virt(kimage
->segment
[i
].mem
),
135 kimage
->segment
[i
].memsz
);
140 * machine_kexec - Do the kexec reboot.
142 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
144 void machine_kexec(struct kimage
*kimage
)
146 phys_addr_t reboot_code_buffer_phys
;
147 void *reboot_code_buffer
;
148 bool in_kexec_crash
= (kimage
== kexec_crash_image
);
149 bool stuck_cpus
= cpus_are_stuck_in_kernel();
152 * New cpus may have become stuck_in_kernel after we loaded the image.
154 BUG_ON(!in_kexec_crash
&& (stuck_cpus
|| (num_online_cpus() > 1)));
155 WARN(in_kexec_crash
&& (stuck_cpus
|| smp_crash_stop_failed()),
156 "Some CPUs may be stale, kdump will be unreliable.\n");
158 reboot_code_buffer_phys
= page_to_phys(kimage
->control_code_page
);
159 reboot_code_buffer
= phys_to_virt(reboot_code_buffer_phys
);
161 kexec_image_info(kimage
);
163 pr_debug("%s:%d: control_code_page: %p\n", __func__
, __LINE__
,
164 kimage
->control_code_page
);
165 pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__
, __LINE__
,
166 &reboot_code_buffer_phys
);
167 pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__
, __LINE__
,
169 pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__
, __LINE__
,
170 arm64_relocate_new_kernel
);
171 pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
172 __func__
, __LINE__
, arm64_relocate_new_kernel_size
,
173 arm64_relocate_new_kernel_size
);
176 * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
177 * after the kernel is shut down.
179 memcpy(reboot_code_buffer
, arm64_relocate_new_kernel
,
180 arm64_relocate_new_kernel_size
);
182 /* Flush the reboot_code_buffer in preparation for its execution. */
183 __flush_dcache_area(reboot_code_buffer
, arm64_relocate_new_kernel_size
);
186 * Although we've killed off the secondary CPUs, we don't update
187 * the online mask if we're handling a crash kernel and consequently
188 * need to avoid flush_icache_range(), which will attempt to IPI
189 * the offline CPUs. Therefore, we must use the __* variant here.
191 __flush_icache_range((uintptr_t)reboot_code_buffer
,
192 arm64_relocate_new_kernel_size
);
194 /* Flush the kimage list and its buffers. */
195 kexec_list_flush(kimage
);
197 /* Flush the new image if already in place. */
198 if ((kimage
!= kexec_crash_image
) && (kimage
->head
& IND_DONE
))
199 kexec_segment_flush(kimage
);
206 * cpu_soft_restart will shutdown the MMU, disable data caches, then
207 * transfer control to the reboot_code_buffer which contains a copy of
208 * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
209 * uses physical addressing to relocate the new image to its final
210 * position and transfers control to the image entry point when the
211 * relocation is complete.
212 * In kexec case, kimage->start points to purgatory assuming that
213 * kernel entry and dtb address are embedded in purgatory by
214 * userspace (kexec-tools).
215 * In kexec_file case, the kernel starts directly without purgatory.
217 cpu_soft_restart(reboot_code_buffer_phys
, kimage
->head
, kimage
->start
,
218 #ifdef CONFIG_KEXEC_FILE
219 kimage
->arch
.dtb_mem
);
224 BUG(); /* Should never get here. */
227 static void machine_kexec_mask_interrupts(void)
230 struct irq_desc
*desc
;
232 for_each_irq_desc(i
, desc
) {
233 struct irq_chip
*chip
;
236 chip
= irq_desc_get_chip(desc
);
241 * First try to remove the active state. If this
242 * fails, try to EOI the interrupt.
244 ret
= irq_set_irqchip_state(i
, IRQCHIP_STATE_ACTIVE
, false);
246 if (ret
&& irqd_irq_inprogress(&desc
->irq_data
) &&
248 chip
->irq_eoi(&desc
->irq_data
);
251 chip
->irq_mask(&desc
->irq_data
);
253 if (chip
->irq_disable
&& !irqd_irq_disabled(&desc
->irq_data
))
254 chip
->irq_disable(&desc
->irq_data
);
259 * machine_crash_shutdown - shutdown non-crashing cpus and save registers
261 void machine_crash_shutdown(struct pt_regs
*regs
)
265 /* shutdown non-crashing cpus */
266 crash_smp_send_stop();
268 /* for crashing cpu */
269 crash_save_cpu(regs
, smp_processor_id());
270 machine_kexec_mask_interrupts();
272 pr_info("Starting crashdump kernel...\n");
275 void arch_kexec_protect_crashkres(void)
279 kexec_segment_flush(kexec_crash_image
);
281 for (i
= 0; i
< kexec_crash_image
->nr_segments
; i
++)
283 __phys_to_virt(kexec_crash_image
->segment
[i
].mem
),
284 kexec_crash_image
->segment
[i
].memsz
>> PAGE_SHIFT
, 0);
287 void arch_kexec_unprotect_crashkres(void)
291 for (i
= 0; i
< kexec_crash_image
->nr_segments
; i
++)
293 __phys_to_virt(kexec_crash_image
->segment
[i
].mem
),
294 kexec_crash_image
->segment
[i
].memsz
>> PAGE_SHIFT
, 1);
297 #ifdef CONFIG_HIBERNATION
299 * To preserve the crash dump kernel image, the relevant memory segments
300 * should be mapped again around the hibernation.
302 void crash_prepare_suspend(void)
304 if (kexec_crash_image
)
305 arch_kexec_unprotect_crashkres();
308 void crash_post_resume(void)
310 if (kexec_crash_image
)
311 arch_kexec_protect_crashkres();
317 * Return true only if a page is part of reserved memory for crash dump kernel,
318 * but does not hold any data of loaded kernel image.
320 * Note that all the pages in crash dump kernel memory have been initially
321 * marked as Reserved as memory was allocated via memblock_reserve().
323 * In hibernation, the pages which are Reserved and yet "nosave" are excluded
324 * from the hibernation iamge. crash_is_nosave() does thich check for crash
325 * dump kernel and will reduce the total size of hibernation image.
328 bool crash_is_nosave(unsigned long pfn
)
336 /* in reserved memory? */
337 addr
= __pfn_to_phys(pfn
);
338 if ((addr
< crashk_res
.start
) || (crashk_res
.end
< addr
))
341 if (!kexec_crash_image
)
344 /* not part of loaded kernel image? */
345 for (i
= 0; i
< kexec_crash_image
->nr_segments
; i
++)
346 if (addr
>= kexec_crash_image
->segment
[i
].mem
&&
347 addr
< (kexec_crash_image
->segment
[i
].mem
+
348 kexec_crash_image
->segment
[i
].memsz
))
354 void crash_free_reserved_phys_range(unsigned long begin
, unsigned long end
)
359 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
) {
360 page
= phys_to_page(addr
);
361 free_reserved_page(page
);
364 #endif /* CONFIG_HIBERNATION */