2 * Extensible Firmware Interface
4 * Based on Extensible Firmware Interface Specification version 2.4
6 * Copyright (C) 2013, 2014 Linaro Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/dmi.h>
15 #include <linux/efi.h>
16 #include <linux/init.h>
21 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
22 * executable, everything else can be mapped with the XN bits
23 * set. Also take the new (optional) RO/XP bits into account.
25 static __init pteval_t
create_mapping_protection(efi_memory_desc_t
*md
)
27 u64 attr
= md
->attribute
;
30 if (type
== EFI_MEMORY_MAPPED_IO
)
31 return PROT_DEVICE_nGnRE
;
33 if (WARN_ONCE(!PAGE_ALIGNED(md
->phys_addr
),
34 "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
36 * If the region is not aligned to the page size of the OS, we
37 * can not use strict permissions, since that would also affect
38 * the mapping attributes of the adjacent regions.
40 return pgprot_val(PAGE_KERNEL_EXEC
);
43 if ((attr
& (EFI_MEMORY_XP
| EFI_MEMORY_RO
)) ==
44 (EFI_MEMORY_XP
| EFI_MEMORY_RO
))
45 return pgprot_val(PAGE_KERNEL_RO
);
48 if (attr
& EFI_MEMORY_RO
)
49 return pgprot_val(PAGE_KERNEL_ROX
);
52 if (attr
& EFI_MEMORY_XP
|| type
!= EFI_RUNTIME_SERVICES_CODE
)
53 return pgprot_val(PAGE_KERNEL
);
56 return pgprot_val(PAGE_KERNEL_EXEC
);
59 /* we will fill this structure from the stub, so don't put it in .bss */
60 struct screen_info screen_info
__section(.data
);
62 int __init
efi_create_mapping(struct mm_struct
*mm
, efi_memory_desc_t
*md
)
64 pteval_t prot_val
= create_mapping_protection(md
);
65 bool allow_block_mappings
= (md
->type
!= EFI_RUNTIME_SERVICES_CODE
&&
66 md
->type
!= EFI_RUNTIME_SERVICES_DATA
);
68 if (!PAGE_ALIGNED(md
->phys_addr
) ||
69 !PAGE_ALIGNED(md
->num_pages
<< EFI_PAGE_SHIFT
)) {
71 * If the end address of this region is not aligned to page
72 * size, the mapping is rounded up, and may end up sharing a
73 * page frame with the next UEFI memory region. If we create
74 * a block entry now, we may need to split it again when mapping
75 * the next region, and support for that is going to be removed
76 * from the MMU routines. So avoid block mappings altogether in
79 allow_block_mappings
= false;
82 create_pgd_mapping(mm
, md
->phys_addr
, md
->virt_addr
,
83 md
->num_pages
<< EFI_PAGE_SHIFT
,
84 __pgprot(prot_val
| PTE_NG
), allow_block_mappings
);
88 static int __init
set_permissions(pte_t
*ptep
, pgtable_t token
,
89 unsigned long addr
, void *data
)
91 efi_memory_desc_t
*md
= data
;
94 if (md
->attribute
& EFI_MEMORY_RO
)
95 pte
= set_pte_bit(pte
, __pgprot(PTE_RDONLY
));
96 if (md
->attribute
& EFI_MEMORY_XP
)
97 pte
= set_pte_bit(pte
, __pgprot(PTE_PXN
));
102 int __init
efi_set_mapping_permissions(struct mm_struct
*mm
,
103 efi_memory_desc_t
*md
)
105 BUG_ON(md
->type
!= EFI_RUNTIME_SERVICES_CODE
&&
106 md
->type
!= EFI_RUNTIME_SERVICES_DATA
);
109 * Calling apply_to_page_range() is only safe on regions that are
110 * guaranteed to be mapped down to pages. Since we are only called
111 * for regions that have been mapped using efi_create_mapping() above
112 * (and this is checked by the generic Memory Attributes table parsing
113 * routines), there is no need to check that again here.
115 return apply_to_page_range(mm
, md
->virt_addr
,
116 md
->num_pages
<< EFI_PAGE_SHIFT
,
117 set_permissions
, md
);
120 static int __init
arm64_dmi_init(void)
123 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
124 * be called early because dmi_id_init(), which is an arch_initcall
125 * itself, depends on dmi_scan_machine() having been called already.
129 dmi_set_dump_stack_arch_desc();
132 core_initcall(arm64_dmi_init
);
135 * UpdateCapsule() depends on the system being shutdown via
138 bool efi_poweroff_required(void)
140 return efi_enabled(EFI_RUNTIME_SERVICES
);