1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2023 Google LLC
3 // Author: Ard Biesheuvel <ardb@google.com>
5 #include <linux/types.h>
6 #include <linux/sizes.h>
8 #include <asm/memory.h>
9 #include <asm/pgalloc.h>
10 #include <asm/pgtable.h>
15 * map_range - Map a contiguous range of physical pages into virtual memory
17 * @pte: Address of physical pointer to array of pages to
18 * allocate page tables from
19 * @start: Virtual address of the start of the range
20 * @end: Virtual address of the end of the range (exclusive)
21 * @pa: Physical address of the start of the range
22 * @prot: Access permissions of the range
23 * @level: Translation level for the mapping
24 * @tbl: The level @level page table to create the mappings in
25 * @may_use_cont: Whether the use of the contiguous attribute is allowed
26 * @va_offset: Offset between a physical page and its current mapping
29 void __init
map_range(u64
*pte
, u64 start
, u64 end
, u64 pa
, pgprot_t prot
,
30 int level
, pte_t
*tbl
, bool may_use_cont
, u64 va_offset
)
32 u64 cmask
= (level
== 3) ? CONT_PTE_SIZE
- 1 : U64_MAX
;
33 pteval_t protval
= pgprot_val(prot
) & ~PTE_TYPE_MASK
;
34 int lshift
= (3 - level
) * (PAGE_SHIFT
- 3);
35 u64 lmask
= (PAGE_SIZE
<< lshift
) - 1;
40 /* Advance tbl to the entry that covers start */
41 tbl
+= (start
>> (lshift
+ PAGE_SHIFT
)) % PTRS_PER_PTE
;
44 * Set the right block/page bits for this level unless we are
45 * clearing the mapping
48 protval
|= (level
< 3) ? PMD_TYPE_SECT
: PTE_TYPE_PAGE
;
51 u64 next
= min((start
| lmask
) + 1, PAGE_ALIGN(end
));
53 if (level
< 3 && (start
| next
| pa
) & lmask
) {
55 * This chunk needs a finer grained mapping. Create a
56 * table mapping if necessary and recurse.
59 *tbl
= __pte(__phys_to_pte_val(*pte
) |
60 PMD_TYPE_TABLE
| PMD_TABLE_UXN
);
61 *pte
+= PTRS_PER_PTE
* sizeof(pte_t
);
63 map_range(pte
, start
, next
, pa
, prot
, level
+ 1,
64 (pte_t
*)(__pte_to_phys(*tbl
) + va_offset
),
65 may_use_cont
, va_offset
);
68 * Start a contiguous range if start and pa are
71 if (((start
| pa
) & cmask
) == 0 && may_use_cont
)
75 * Clear the contiguous attribute if the remaining
76 * range does not cover a contiguous block
78 if ((end
& ~cmask
) <= start
)
81 /* Put down a block or page mapping */
82 *tbl
= __pte(__phys_to_pte_val(pa
) | protval
);
90 asmlinkage u64 __init
create_init_idmap(pgd_t
*pg_dir
, pteval_t clrmask
)
92 u64 ptep
= (u64
)pg_dir
+ PAGE_SIZE
;
93 pgprot_t text_prot
= PAGE_KERNEL_ROX
;
94 pgprot_t data_prot
= PAGE_KERNEL
;
96 pgprot_val(text_prot
) &= ~clrmask
;
97 pgprot_val(data_prot
) &= ~clrmask
;
99 map_range(&ptep
, (u64
)_stext
, (u64
)__initdata_begin
, (u64
)_stext
,
100 text_prot
, IDMAP_ROOT_LEVEL
, (pte_t
*)pg_dir
, false, 0);
101 map_range(&ptep
, (u64
)__initdata_begin
, (u64
)_end
, (u64
)__initdata_begin
,
102 data_prot
, IDMAP_ROOT_LEVEL
, (pte_t
*)pg_dir
, false, 0);