1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
8 #include <asm/mach/map.h>
9 #include <asm/mmu_context.h>
11 static int __init
set_permissions(pte_t
*ptep
, unsigned long addr
, void *data
)
13 efi_memory_desc_t
*md
= data
;
16 if (md
->attribute
& EFI_MEMORY_RO
)
17 pte
= set_pte_bit(pte
, __pgprot(L_PTE_RDONLY
));
18 if (md
->attribute
& EFI_MEMORY_XP
)
19 pte
= set_pte_bit(pte
, __pgprot(L_PTE_XN
));
20 set_pte_ext(ptep
, pte
, PTE_EXT_NG
);
24 int __init
efi_set_mapping_permissions(struct mm_struct
*mm
,
25 efi_memory_desc_t
*md
)
27 unsigned long base
, size
;
30 size
= md
->num_pages
<< EFI_PAGE_SHIFT
;
33 * We can only use apply_to_page_range() if we can guarantee that the
34 * entire region was mapped using pages. This should be the case if the
35 * region does not cover any naturally aligned SECTION_SIZE sized
38 if (round_down(base
+ size
, SECTION_SIZE
) <
39 round_up(base
, SECTION_SIZE
) + SECTION_SIZE
)
40 return apply_to_page_range(mm
, base
, size
, set_permissions
, md
);
45 int __init
efi_create_mapping(struct mm_struct
*mm
, efi_memory_desc_t
*md
)
47 struct map_desc desc
= {
48 .virtual = md
->virt_addr
,
49 .pfn
= __phys_to_pfn(md
->phys_addr
),
50 .length
= md
->num_pages
* EFI_PAGE_SIZE
,
54 * Order is important here: memory regions may have all of the
55 * bits below set (and usually do), so we check them in order of
58 if (md
->attribute
& EFI_MEMORY_WB
)
59 desc
.type
= MT_MEMORY_RWX
;
60 else if (md
->attribute
& EFI_MEMORY_WT
)
61 desc
.type
= MT_MEMORY_RWX_NONCACHED
;
62 else if (md
->attribute
& EFI_MEMORY_WC
)
63 desc
.type
= MT_DEVICE_WC
;
65 desc
.type
= MT_DEVICE
;
67 create_mapping_late(mm
, &desc
, true);
70 * If stricter permissions were specified, apply them now.
72 if (md
->attribute
& (EFI_MEMORY_RO
| EFI_MEMORY_XP
))
73 return efi_set_mapping_permissions(mm
, md
);