1 // SPDX-License-Identifier: GPL-2.0
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
7 static void ident_pmd_init(struct x86_mapping_info
*info
, pmd_t
*pmd_page
,
8 unsigned long addr
, unsigned long end
)
11 for (; addr
< end
; addr
+= PMD_SIZE
) {
12 pmd_t
*pmd
= pmd_page
+ pmd_index(addr
);
14 if (pmd_present(*pmd
))
17 set_pmd(pmd
, __pmd((addr
- info
->offset
) | info
->page_flag
));
21 static int ident_pud_init(struct x86_mapping_info
*info
, pud_t
*pud_page
,
22 unsigned long addr
, unsigned long end
)
26 for (; addr
< end
; addr
= next
) {
27 pud_t
*pud
= pud_page
+ pud_index(addr
);
30 next
= (addr
& PUD_MASK
) + PUD_SIZE
;
34 if (info
->direct_gbpages
) {
37 if (pud_present(*pud
))
41 pudval
= __pud((addr
- info
->offset
) | info
->page_flag
);
46 if (pud_present(*pud
)) {
47 pmd
= pmd_offset(pud
, 0);
48 ident_pmd_init(info
, pmd
, addr
, next
);
51 pmd
= (pmd_t
*)info
->alloc_pgt_page(info
->context
);
54 ident_pmd_init(info
, pmd
, addr
, next
);
55 set_pud(pud
, __pud(__pa(pmd
) | info
->kernpg_flag
));
61 static int ident_p4d_init(struct x86_mapping_info
*info
, p4d_t
*p4d_page
,
62 unsigned long addr
, unsigned long end
)
66 for (; addr
< end
; addr
= next
) {
67 p4d_t
*p4d
= p4d_page
+ p4d_index(addr
);
70 next
= (addr
& P4D_MASK
) + P4D_SIZE
;
74 if (p4d_present(*p4d
)) {
75 pud
= pud_offset(p4d
, 0);
76 ident_pud_init(info
, pud
, addr
, next
);
79 pud
= (pud_t
*)info
->alloc_pgt_page(info
->context
);
82 ident_pud_init(info
, pud
, addr
, next
);
83 set_p4d(p4d
, __p4d(__pa(pud
) | info
->kernpg_flag
));
89 int kernel_ident_mapping_init(struct x86_mapping_info
*info
, pgd_t
*pgd_page
,
90 unsigned long pstart
, unsigned long pend
)
92 unsigned long addr
= pstart
+ info
->offset
;
93 unsigned long end
= pend
+ info
->offset
;
97 /* Set the default pagetable flags if not supplied */
98 if (!info
->kernpg_flag
)
99 info
->kernpg_flag
= _KERNPG_TABLE
;
101 /* Filter out unsupported __PAGE_KERNEL_* bits: */
102 info
->kernpg_flag
&= __default_kernel_pte_mask
;
104 for (; addr
< end
; addr
= next
) {
105 pgd_t
*pgd
= pgd_page
+ pgd_index(addr
);
108 next
= (addr
& PGDIR_MASK
) + PGDIR_SIZE
;
112 if (pgd_present(*pgd
)) {
113 p4d
= p4d_offset(pgd
, 0);
114 result
= ident_p4d_init(info
, p4d
, addr
, next
);
120 p4d
= (p4d_t
*)info
->alloc_pgt_page(info
->context
);
123 result
= ident_p4d_init(info
, p4d
, addr
, next
);
126 if (pgtable_l5_enabled()) {
127 set_pgd(pgd
, __pgd(__pa(p4d
) | info
->kernpg_flag
));
130 * With p4d folded, pgd is equal to p4d.
131 * The pgd entry has to point to the pud page table in this case.
133 pud_t
*pud
= pud_offset(p4d
, 0);
134 set_pgd(pgd
, __pgd(__pa(pud
) | info
->kernpg_flag
));