4 #include <asm/fpu/api.h>
5 #include <asm/pgtable.h>
9 * We map the EFI regions needed for runtime services non-contiguously,
10 * with preserved alignment on virtual addresses starting from -4G down
11 * for a total max space of 64G. This way, we provide for stable runtime
12 * services addresses across kernels so that a kexec'd kernel can still
15 * This is the main reason why we're doing stable VA mappings for RT
18 * This flag is used in conjuction with a chicken bit called
19 * "efi=old_map" which can be used as a fallback to the old runtime
20 * services mapping method in case there's some b0rkage with a
21 * particular EFI implementation (haha, it is hard to hold up the
24 #define EFI_OLD_MEMMAP EFI_ARCH_1
26 #define EFI32_LOADER_SIGNATURE "EL32"
27 #define EFI64_LOADER_SIGNATURE "EL64"
29 #define MAX_CMDLINE_ADDRESS UINT_MAX
34 extern unsigned long asmlinkage
efi_call_phys(void *, ...);
37 * Wrap all the virtual calls in a way that forces the parameters on the stack.
40 /* Use this macro if your virtual returns a non-void value */
41 #define efi_call_virt(f, args...) \
45 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
46 efi.systab->runtime->f)(args); \
51 /* Use this macro if your virtual call does not return any value */
52 #define __efi_call_virt(f, args...) \
55 ((efi_##f##_t __attribute__((regparm(0)))*) \
56 efi.systab->runtime->f)(args); \
60 #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
62 #else /* !CONFIG_X86_32 */
64 #define EFI_LOADER_SIGNATURE "EL64"
66 extern u64 asmlinkage
efi_call(void *fp
, ...);
68 #define efi_call_phys(f, args...) efi_call((f), args)
71 * Scratch space used for switching the pagetable in the EFI stub
81 #define efi_call_virt(f, ...) \
85 efi_sync_low_kernel_mappings(); \
87 __kernel_fpu_begin(); \
89 if (efi_scratch.use_pgd) { \
90 efi_scratch.prev_cr3 = read_cr3(); \
91 write_cr3((unsigned long)efi_scratch.efi_pgt); \
95 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
97 if (efi_scratch.use_pgd) { \
98 write_cr3(efi_scratch.prev_cr3); \
102 __kernel_fpu_end(); \
108 * All X86_64 virt calls return non-void values. Thus, use non-void call for
109 * virt calls that would be void on X86_32.
111 #define __efi_call_virt(f, args...) efi_call_virt(f, args)
113 extern void __iomem
*__init
efi_ioremap(unsigned long addr
, unsigned long size
,
114 u32 type
, u64 attribute
);
118 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
119 * only in kernel binary. Since the EFI stub linked into a separate binary it
120 * doesn't have __memset(). So we should use standard memset from
121 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
128 #endif /* CONFIG_X86_32 */
130 extern struct efi_scratch efi_scratch
;
131 extern void __init
efi_set_executable(efi_memory_desc_t
*md
, bool executable
);
132 extern int __init
efi_memblock_x86_reserve_range(void);
133 extern pgd_t
* __init
efi_call_phys_prolog(void);
134 extern void __init
efi_call_phys_epilog(pgd_t
*save_pgd
);
135 extern void __init
efi_print_memmap(void);
136 extern void __init
efi_unmap_memmap(void);
137 extern void __init
efi_memory_uc(u64 addr
, unsigned long size
);
138 extern void __init
efi_map_region(efi_memory_desc_t
*md
);
139 extern void __init
efi_map_region_fixed(efi_memory_desc_t
*md
);
140 extern void efi_sync_low_kernel_mappings(void);
141 extern int __init
efi_alloc_page_tables(void);
142 extern int __init
efi_setup_page_tables(unsigned long pa_memmap
, unsigned num_pages
);
143 extern void __init
efi_cleanup_page_tables(unsigned long pa_memmap
, unsigned num_pages
);
144 extern void __init
old_map_region(efi_memory_desc_t
*md
);
145 extern void __init
runtime_code_page_mkexec(void);
146 extern void __init
efi_runtime_update_mappings(void);
147 extern void __init
efi_dump_pagetable(void);
148 extern void __init
efi_apply_memmap_quirks(void);
149 extern int __init
efi_reuse_config(u64 tables
, int nr_tables
);
150 extern void efi_delete_dummy_variable(void);
152 struct efi_setup_data
{
160 extern u64 efi_setup
;
164 static inline bool efi_is_native(void)
166 return IS_ENABLED(CONFIG_X86_64
) == efi_enabled(EFI_64BIT
);
169 static inline bool efi_runtime_supported(void)
174 if (IS_ENABLED(CONFIG_EFI_MIXED
) && !efi_enabled(EFI_OLD_MEMMAP
))
180 extern struct console early_efi_console
;
181 extern void parse_efi_setup(u64 phys_addr
, u32 data_len
);
183 #ifdef CONFIG_EFI_MIXED
184 extern void efi_thunk_runtime_setup(void);
185 extern efi_status_t
efi_thunk_set_virtual_address_map(
186 void *phys_set_virtual_address_map
,
187 unsigned long memory_map_size
,
188 unsigned long descriptor_size
,
189 u32 descriptor_version
,
190 efi_memory_desc_t
*virtual_map
);
192 static inline void efi_thunk_runtime_setup(void) {}
193 static inline efi_status_t
efi_thunk_set_virtual_address_map(
194 void *phys_set_virtual_address_map
,
195 unsigned long memory_map_size
,
196 unsigned long descriptor_size
,
197 u32 descriptor_version
,
198 efi_memory_desc_t
*virtual_map
)
202 #endif /* CONFIG_EFI_MIXED */
205 /* arch specific definitions used by the stub code */
217 u64 exit_boot_services
;
219 efi_status_t (*call
)(unsigned long, ...);
223 __pure
const struct efi_config
*__efi_early(void);
225 #define efi_call_early(f, ...) \
226 __efi_early()->call(__efi_early()->f, __VA_ARGS__);
228 #define __efi_call_early(f, ...) \
229 __efi_early()->call((unsigned long)f, __VA_ARGS__);
231 #define efi_is_64bit() __efi_early()->is64
233 extern bool efi_reboot_required(void);
236 static inline void parse_efi_setup(u64 phys_addr
, u32 data_len
) {}
237 static inline bool efi_reboot_required(void)
241 #endif /* CONFIG_EFI */
243 #endif /* _ASM_X86_EFI_H */