accel/ivpu: Move recovery work to system_unbound_wq
[drm/drm-misc.git] / arch / arm / include / asm / kexec.h
bloba8287e7ab9d41ac88f2f30630c428bc18429eeff
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARM_KEXEC_H
3 #define _ARM_KEXEC_H
5 /* Maximum physical address we can use pages from */
6 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
7 /* Maximum address we can reach in physical address mode */
8 #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
9 /* Maximum address we can use for the control code buffer */
10 #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
12 #define KEXEC_CONTROL_PAGE_SIZE 4096
14 #define KEXEC_ARCH KEXEC_ARCH_ARM
16 #define KEXEC_ARM_ATAGS_OFFSET 0x1000
17 #define KEXEC_ARM_ZIMAGE_OFFSET 0x8000
19 #ifndef __ASSEMBLY__
21 #define ARCH_HAS_KIMAGE_ARCH
22 struct kimage_arch {
23 u32 kernel_r2;
26 /**
27 * crash_setup_regs() - save registers for the panic kernel
28 * @newregs: registers are saved here
29 * @oldregs: registers to be saved (may be %NULL)
31 * Function copies machine registers from @oldregs to @newregs. If @oldregs is
32 * %NULL then current registers are stored there.
34 static inline void crash_setup_regs(struct pt_regs *newregs,
35 struct pt_regs *oldregs)
37 if (oldregs) {
38 memcpy(newregs, oldregs, sizeof(*newregs));
39 } else {
40 __asm__ __volatile__ (
41 "stmia %[regs_base], {r0-r12}\n\t"
42 "mov %[_ARM_sp], sp\n\t"
43 "str lr, %[_ARM_lr]\n\t"
44 "adr %[_ARM_pc], 1f\n\t"
45 "mrs %[_ARM_cpsr], cpsr\n\t"
46 "1:"
47 : [_ARM_pc] "=r" (newregs->ARM_pc),
48 [_ARM_cpsr] "=r" (newregs->ARM_cpsr),
49 [_ARM_sp] "=r" (newregs->ARM_sp),
50 [_ARM_lr] "=o" (newregs->ARM_lr)
51 : [regs_base] "r" (&newregs->ARM_r0)
52 : "memory"
57 static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
59 return phys_to_idmap(phys);
61 #define phys_to_boot_phys phys_to_boot_phys
63 static inline phys_addr_t boot_phys_to_phys(unsigned long entry)
65 return idmap_to_phys(entry);
67 #define boot_phys_to_phys boot_phys_to_phys
69 static inline unsigned long page_to_boot_pfn(struct page *page)
71 return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
73 #define page_to_boot_pfn page_to_boot_pfn
75 static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
77 return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT));
79 #define boot_pfn_to_page boot_pfn_to_page
81 #endif /* __ASSEMBLY__ */
83 #endif /* _ARM_KEXEC_H */