1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/pgd.c
5 * Copyright (C) 1998-2005 Russell King
9 #include <linux/highmem.h>
10 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
19 #ifdef CONFIG_ARM_LPAE
20 #define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
21 #define __pgd_free(pgd) kfree(pgd)
23 #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
24 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
28 * need to get a 16k page for level 1
30 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
32 pgd_t
*new_pgd
, *init_pgd
;
33 p4d_t
*new_p4d
, *init_p4d
;
34 pud_t
*new_pud
, *init_pud
;
35 pmd_t
*new_pmd
, *init_pmd
;
36 pte_t
*new_pte
, *init_pte
;
38 new_pgd
= __pgd_alloc();
42 memset(new_pgd
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
45 * Copy over the kernel and IO PGD entries
47 init_pgd
= pgd_offset_k(0);
48 memcpy(new_pgd
+ USER_PTRS_PER_PGD
, init_pgd
+ USER_PTRS_PER_PGD
,
49 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
51 clean_dcache_area(new_pgd
, PTRS_PER_PGD
* sizeof(pgd_t
));
53 #ifdef CONFIG_ARM_LPAE
55 * Allocate PMD table for modules and pkmap mappings.
57 new_p4d
= p4d_alloc(mm
, new_pgd
+ pgd_index(MODULES_VADDR
),
62 new_pud
= pud_alloc(mm
, new_p4d
, MODULES_VADDR
);
66 new_pmd
= pmd_alloc(mm
, new_pud
, 0);
71 * Copy PMD table for KASAN shadow mappings.
73 init_pgd
= pgd_offset_k(TASK_SIZE
);
74 init_p4d
= p4d_offset(init_pgd
, TASK_SIZE
);
75 init_pud
= pud_offset(init_p4d
, TASK_SIZE
);
76 init_pmd
= pmd_offset(init_pud
, TASK_SIZE
);
77 new_pmd
= pmd_offset(new_pud
, TASK_SIZE
);
78 memcpy(new_pmd
, init_pmd
,
79 (pmd_index(MODULES_VADDR
) - pmd_index(TASK_SIZE
))
81 clean_dcache_area(new_pmd
, PTRS_PER_PMD
* sizeof(pmd_t
));
82 #endif /* CONFIG_KASAN */
83 #endif /* CONFIG_LPAE */
85 if (!vectors_high()) {
87 * On ARM, first page must always be allocated since it
88 * contains the machine vectors. The vectors are always high
91 new_p4d
= p4d_alloc(mm
, new_pgd
, 0);
95 new_pud
= pud_alloc(mm
, new_p4d
, 0);
99 new_pmd
= pmd_alloc(mm
, new_pud
, 0);
103 new_pte
= pte_alloc_map(mm
, new_pmd
, 0);
107 #ifndef CONFIG_ARM_LPAE
109 * Modify the PTE pointer to have the correct domain. This
110 * needs to be the vectors domain to avoid the low vectors
113 pmd_val(*new_pmd
) &= ~PMD_DOMAIN_MASK
;
114 pmd_val(*new_pmd
) |= PMD_DOMAIN(DOMAIN_VECTORS
);
117 init_p4d
= p4d_offset(init_pgd
, 0);
118 init_pud
= pud_offset(init_p4d
, 0);
119 init_pmd
= pmd_offset(init_pud
, 0);
120 init_pte
= pte_offset_map(init_pmd
, 0);
121 set_pte_ext(new_pte
+ 0, init_pte
[0], 0);
122 set_pte_ext(new_pte
+ 1, init_pte
[1], 0);
130 pmd_free(mm
, new_pmd
);
133 pud_free(mm
, new_pud
);
135 p4d_free(mm
, new_p4d
);
142 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd_base
)
153 pgd
= pgd_base
+ pgd_index(0);
154 if (pgd_none_or_clear_bad(pgd
))
157 p4d
= p4d_offset(pgd
, 0);
158 if (p4d_none_or_clear_bad(p4d
))
161 pud
= pud_offset(p4d
, 0);
162 if (pud_none_or_clear_bad(pud
))
165 pmd
= pmd_offset(pud
, 0);
166 if (pmd_none_or_clear_bad(pmd
))
169 pte
= pmd_pgtable(*pmd
);
184 #ifdef CONFIG_ARM_LPAE
186 * Free modules/pkmap or identity pmd tables.
188 for (pgd
= pgd_base
; pgd
< pgd_base
+ PTRS_PER_PGD
; pgd
++) {
189 if (pgd_none_or_clear_bad(pgd
))
191 if (pgd_val(*pgd
) & L_PGD_SWAPPER
)
193 p4d
= p4d_offset(pgd
, 0);
194 if (p4d_none_or_clear_bad(p4d
))
196 pud
= pud_offset(p4d
, 0);
197 if (pud_none_or_clear_bad(pud
))
199 pmd
= pmd_offset(pud
, 0);
210 __pgd_free(pgd_base
);