1 // SPDX-License-Identifier: GPL-2.0
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
7 static void free_pte(struct x86_mapping_info
*info
, pmd_t
*pmd
)
9 pte_t
*pte
= pte_offset_kernel(pmd
, 0);
11 info
->free_pgt_page(pte
, info
->context
);
14 static void free_pmd(struct x86_mapping_info
*info
, pud_t
*pud
)
16 pmd_t
*pmd
= pmd_offset(pud
, 0);
19 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
20 if (!pmd_present(pmd
[i
]))
26 free_pte(info
, &pmd
[i
]);
29 info
->free_pgt_page(pmd
, info
->context
);
32 static void free_pud(struct x86_mapping_info
*info
, p4d_t
*p4d
)
34 pud_t
*pud
= pud_offset(p4d
, 0);
37 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
38 if (!pud_present(pud
[i
]))
44 free_pmd(info
, &pud
[i
]);
47 info
->free_pgt_page(pud
, info
->context
);
50 static void free_p4d(struct x86_mapping_info
*info
, pgd_t
*pgd
)
52 p4d_t
*p4d
= p4d_offset(pgd
, 0);
55 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
56 if (!p4d_present(p4d
[i
]))
59 free_pud(info
, &p4d
[i
]);
62 if (pgtable_l5_enabled())
63 info
->free_pgt_page(p4d
, info
->context
);
66 void kernel_ident_mapping_free(struct x86_mapping_info
*info
, pgd_t
*pgd
)
70 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
71 if (!pgd_present(pgd
[i
]))
74 free_p4d(info
, &pgd
[i
]);
77 info
->free_pgt_page(pgd
, info
->context
);
80 static void ident_pmd_init(struct x86_mapping_info
*info
, pmd_t
*pmd_page
,
81 unsigned long addr
, unsigned long end
)
84 for (; addr
< end
; addr
+= PMD_SIZE
) {
85 pmd_t
*pmd
= pmd_page
+ pmd_index(addr
);
87 if (pmd_present(*pmd
))
90 set_pmd(pmd
, __pmd((addr
- info
->offset
) | info
->page_flag
));
94 static int ident_pud_init(struct x86_mapping_info
*info
, pud_t
*pud_page
,
95 unsigned long addr
, unsigned long end
)
99 for (; addr
< end
; addr
= next
) {
100 pud_t
*pud
= pud_page
+ pud_index(addr
);
104 next
= (addr
& PUD_MASK
) + PUD_SIZE
;
108 /* if this is already a gbpage, this portion is already mapped */
112 /* Is using a gbpage allowed? */
113 use_gbpage
= info
->direct_gbpages
;
115 /* Don't use gbpage if it maps more than the requested region. */
116 /* at the begining: */
117 use_gbpage
&= ((addr
& ~PUD_MASK
) == 0);
118 /* ... or at the end: */
119 use_gbpage
&= ((next
& ~PUD_MASK
) == 0);
121 /* Never overwrite existing mappings */
122 use_gbpage
&= !pud_present(*pud
);
127 pudval
= __pud((addr
- info
->offset
) | info
->page_flag
);
128 set_pud(pud
, pudval
);
132 if (pud_present(*pud
)) {
133 pmd
= pmd_offset(pud
, 0);
134 ident_pmd_init(info
, pmd
, addr
, next
);
137 pmd
= (pmd_t
*)info
->alloc_pgt_page(info
->context
);
140 ident_pmd_init(info
, pmd
, addr
, next
);
141 set_pud(pud
, __pud(__pa(pmd
) | info
->kernpg_flag
));
147 static int ident_p4d_init(struct x86_mapping_info
*info
, p4d_t
*p4d_page
,
148 unsigned long addr
, unsigned long end
)
153 for (; addr
< end
; addr
= next
) {
154 p4d_t
*p4d
= p4d_page
+ p4d_index(addr
);
157 next
= (addr
& P4D_MASK
) + P4D_SIZE
;
161 if (p4d_present(*p4d
)) {
162 pud
= pud_offset(p4d
, 0);
163 result
= ident_pud_init(info
, pud
, addr
, next
);
169 pud
= (pud_t
*)info
->alloc_pgt_page(info
->context
);
173 result
= ident_pud_init(info
, pud
, addr
, next
);
177 set_p4d(p4d
, __p4d(__pa(pud
) | info
->kernpg_flag
| _PAGE_NOPTISHADOW
));
183 int kernel_ident_mapping_init(struct x86_mapping_info
*info
, pgd_t
*pgd_page
,
184 unsigned long pstart
, unsigned long pend
)
186 unsigned long addr
= pstart
+ info
->offset
;
187 unsigned long end
= pend
+ info
->offset
;
191 /* Set the default pagetable flags if not supplied */
192 if (!info
->kernpg_flag
)
193 info
->kernpg_flag
= _KERNPG_TABLE
;
195 /* Filter out unsupported __PAGE_KERNEL_* bits: */
196 info
->kernpg_flag
&= __default_kernel_pte_mask
;
198 for (; addr
< end
; addr
= next
) {
199 pgd_t
*pgd
= pgd_page
+ pgd_index(addr
);
202 next
= (addr
& PGDIR_MASK
) + PGDIR_SIZE
;
206 if (pgd_present(*pgd
)) {
207 p4d
= p4d_offset(pgd
, 0);
208 result
= ident_p4d_init(info
, p4d
, addr
, next
);
214 p4d
= (p4d_t
*)info
->alloc_pgt_page(info
->context
);
217 result
= ident_p4d_init(info
, p4d
, addr
, next
);
220 if (pgtable_l5_enabled()) {
221 set_pgd(pgd
, __pgd(__pa(p4d
) | info
->kernpg_flag
| _PAGE_NOPTISHADOW
));
224 * With p4d folded, pgd is equal to p4d.
225 * The pgd entry has to point to the pud page table in this case.
227 pud_t
*pud
= pud_offset(p4d
, 0);
228 set_pgd(pgd
, __pgd(__pa(pud
) | info
->kernpg_flag
| _PAGE_NOPTISHADOW
));