1 // SPDX-License-Identifier: GPL-2.0-only
3 * Extensible Firmware Interface
5 * Based on Extensible Firmware Interface Specification version 2.4
7 * Copyright (C) 2013, 2014 Linaro Ltd.
10 #include <linux/efi.h>
11 #include <linux/init.h>
12 #include <linux/kmemleak.h>
13 #include <linux/screen_info.h>
14 #include <linux/vmalloc.h>
17 #include <asm/stacktrace.h>
19 static bool region_is_misaligned(const efi_memory_desc_t
*md
)
21 if (PAGE_SIZE
== EFI_PAGE_SIZE
)
23 return !PAGE_ALIGNED(md
->phys_addr
) ||
24 !PAGE_ALIGNED(md
->num_pages
<< EFI_PAGE_SHIFT
);
28 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
29 * executable, everything else can be mapped with the XN bits
30 * set. Also take the new (optional) RO/XP bits into account.
32 static __init pteval_t
create_mapping_protection(efi_memory_desc_t
*md
)
34 u64 attr
= md
->attribute
;
37 if (type
== EFI_MEMORY_MAPPED_IO
) {
38 pgprot_t prot
= __pgprot(PROT_DEVICE_nGnRE
);
40 if (arm64_is_protected_mmio(md
->phys_addr
,
41 md
->num_pages
<< EFI_PAGE_SHIFT
))
42 prot
= pgprot_encrypted(prot
);
44 prot
= pgprot_decrypted(prot
);
45 return pgprot_val(prot
);
48 if (region_is_misaligned(md
)) {
49 static bool __initdata code_is_misaligned
;
52 * Regions that are not aligned to the OS page size cannot be
53 * mapped with strict permissions, as those might interfere
54 * with the permissions that are needed by the adjacent
55 * region's mapping. However, if we haven't encountered any
56 * misaligned runtime code regions so far, we can safely use
57 * non-executable permissions for non-code regions.
59 code_is_misaligned
|= (type
== EFI_RUNTIME_SERVICES_CODE
);
61 return code_is_misaligned
? pgprot_val(PAGE_KERNEL_EXEC
)
62 : pgprot_val(PAGE_KERNEL
);
66 if ((attr
& (EFI_MEMORY_XP
| EFI_MEMORY_RO
)) ==
67 (EFI_MEMORY_XP
| EFI_MEMORY_RO
))
68 return pgprot_val(PAGE_KERNEL_RO
);
71 if (attr
& EFI_MEMORY_RO
)
72 return pgprot_val(PAGE_KERNEL_ROX
);
75 if (((attr
& (EFI_MEMORY_RP
| EFI_MEMORY_WP
| EFI_MEMORY_XP
)) ==
77 type
!= EFI_RUNTIME_SERVICES_CODE
)
78 return pgprot_val(PAGE_KERNEL
);
81 return pgprot_val(PAGE_KERNEL_EXEC
);
84 int __init
efi_create_mapping(struct mm_struct
*mm
, efi_memory_desc_t
*md
)
86 pteval_t prot_val
= create_mapping_protection(md
);
87 bool page_mappings_only
= (md
->type
== EFI_RUNTIME_SERVICES_CODE
||
88 md
->type
== EFI_RUNTIME_SERVICES_DATA
);
91 * If this region is not aligned to the page size used by the OS, the
92 * mapping will be rounded outwards, and may end up sharing a page
93 * frame with an adjacent runtime memory region. Given that the page
94 * table descriptor covering the shared page will be rewritten when the
95 * adjacent region gets mapped, we must avoid block mappings here so we
96 * don't have to worry about splitting them when that happens.
98 if (region_is_misaligned(md
))
99 page_mappings_only
= true;
101 create_pgd_mapping(mm
, md
->phys_addr
, md
->virt_addr
,
102 md
->num_pages
<< EFI_PAGE_SHIFT
,
103 __pgprot(prot_val
| PTE_NG
), page_mappings_only
);
107 struct set_perm_data
{
108 const efi_memory_desc_t
*md
;
112 static int __init
set_permissions(pte_t
*ptep
, unsigned long addr
, void *data
)
114 struct set_perm_data
*spd
= data
;
115 const efi_memory_desc_t
*md
= spd
->md
;
116 pte_t pte
= __ptep_get(ptep
);
118 if (md
->attribute
& EFI_MEMORY_RO
)
119 pte
= set_pte_bit(pte
, __pgprot(PTE_RDONLY
));
120 if (md
->attribute
& EFI_MEMORY_XP
)
121 pte
= set_pte_bit(pte
, __pgprot(PTE_PXN
));
122 else if (system_supports_bti_kernel() && spd
->has_bti
)
123 pte
= set_pte_bit(pte
, __pgprot(PTE_GP
));
124 __set_pte(ptep
, pte
);
128 int __init
efi_set_mapping_permissions(struct mm_struct
*mm
,
129 efi_memory_desc_t
*md
,
132 struct set_perm_data data
= { md
, has_bti
};
134 BUG_ON(md
->type
!= EFI_RUNTIME_SERVICES_CODE
&&
135 md
->type
!= EFI_RUNTIME_SERVICES_DATA
);
137 if (region_is_misaligned(md
))
141 * Calling apply_to_page_range() is only safe on regions that are
142 * guaranteed to be mapped down to pages. Since we are only called
143 * for regions that have been mapped using efi_create_mapping() above
144 * (and this is checked by the generic Memory Attributes table parsing
145 * routines), there is no need to check that again here.
147 return apply_to_page_range(mm
, md
->virt_addr
,
148 md
->num_pages
<< EFI_PAGE_SHIFT
,
149 set_permissions
, &data
);
153 * UpdateCapsule() depends on the system being shutdown via
156 bool efi_poweroff_required(void)
158 return efi_enabled(EFI_RUNTIME_SERVICES
);
161 asmlinkage efi_status_t
efi_handle_corrupted_x18(efi_status_t s
, const char *f
)
163 pr_err_ratelimited(FW_BUG
"register x18 corrupted by EFI %s\n", f
);
167 static DEFINE_RAW_SPINLOCK(efi_rt_lock
);
169 void arch_efi_call_virt_setup(void)
172 __efi_fpsimd_begin();
173 raw_spin_lock(&efi_rt_lock
);
176 void arch_efi_call_virt_teardown(void)
178 raw_spin_unlock(&efi_rt_lock
);
180 efi_virtmap_unload();
183 asmlinkage u64
*efi_rt_stack_top __ro_after_init
;
185 asmlinkage efi_status_t
__efi_rt_asm_recover(void);
187 bool efi_runtime_fixup_exception(struct pt_regs
*regs
, const char *msg
)
189 /* Check whether the exception occurred while running the firmware */
190 if (!current_in_efi() || regs
->pc
>= TASK_SIZE_64
)
193 pr_err(FW_BUG
"Unable to handle %s in EFI runtime service\n", msg
);
194 add_taint(TAINT_FIRMWARE_WORKAROUND
, LOCKDEP_STILL_OK
);
195 clear_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
197 regs
->regs
[0] = EFI_ABORTED
;
198 regs
->regs
[30] = efi_rt_stack_top
[-1];
199 regs
->pc
= (u64
)__efi_rt_asm_recover
;
201 if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK
))
202 regs
->regs
[18] = efi_rt_stack_top
[-2];
207 /* EFI requires 8 KiB of stack space for runtime services */
208 static_assert(THREAD_SIZE
>= SZ_8K
);
210 static int __init
arm64_efi_rt_init(void)
214 if (!efi_enabled(EFI_RUNTIME_SERVICES
))
217 p
= __vmalloc_node(THREAD_SIZE
, THREAD_ALIGN
, GFP_KERNEL
,
220 pr_warn("Failed to allocate EFI runtime stack\n");
221 clear_bit(EFI_RUNTIME_SERVICES
, &efi
.flags
);
225 kmemleak_not_leak(p
);
226 efi_rt_stack_top
= p
+ THREAD_SIZE
;
229 core_initcall(arm64_efi_rt_init
);