1 /* SPDX-License-Identifier: GPL-2.0 */
7 #include <linux/threads.h>
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
11 #include <asm/cache.h>
13 #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
15 /* Allocate the top level pgd (page directory)
17 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
18 * allocate the first pmd adjacent to the pgd. This means that we can
19 * subtract a constant offset to get to it. The pmd and pgd sizes are
20 * arranged so that a single pmd covers 4GB (giving a full 64-bit
21 * process access to 8TB) so our lookups are effectively L2 for the
22 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
23 * kernel for machines with under 4GB of memory) */
24 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
26 pgd_t
*pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
,
28 pgd_t
*actual_pgd
= pgd
;
30 if (likely(pgd
!= NULL
)) {
31 memset(pgd
, 0, PAGE_SIZE
<<PGD_ALLOC_ORDER
);
32 #if CONFIG_PGTABLE_LEVELS == 3
33 actual_pgd
+= PTRS_PER_PGD
;
34 /* Populate first pmd with allocated memory. We mark it
35 * with PxD_FLAG_ATTACHED as a signal to the system that this
36 * pmd entry may not be cleared. */
37 set_pgd(actual_pgd
, __pgd((PxD_FLAG_PRESENT
|
40 + (__u32
)(__pa((unsigned long)pgd
) >> PxD_VALUE_SHIFT
)));
41 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
42 * a signal that this pmd may not be freed */
43 set_pgd(pgd
, __pgd(PxD_FLAG_ATTACHED
));
46 spin_lock_init(pgd_spinlock(actual_pgd
));
50 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
52 #if CONFIG_PGTABLE_LEVELS == 3
55 free_pages((unsigned long)pgd
, PGD_ALLOC_ORDER
);
58 #if CONFIG_PGTABLE_LEVELS == 3
60 /* Three Level Page Table Support for pmd's */
62 static inline void pud_populate(struct mm_struct
*mm
, pud_t
*pud
, pmd_t
*pmd
)
64 set_pud(pud
, __pud((PxD_FLAG_PRESENT
| PxD_FLAG_VALID
) +
65 (__u32
)(__pa((unsigned long)pmd
) >> PxD_VALUE_SHIFT
)));
68 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long address
)
70 pmd_t
*pmd
= (pmd_t
*)__get_free_pages(GFP_KERNEL
, PMD_ORDER
);
72 memset(pmd
, 0, PAGE_SIZE
<<PMD_ORDER
);
76 static inline void pmd_free(struct mm_struct
*mm
, pmd_t
*pmd
)
78 if (pmd_flag(*pmd
) & PxD_FLAG_ATTACHED
) {
80 * This is the permanent pmd attached to the pgd;
82 * Increment the counter to compensate for the decrement
83 * done by generic mm code.
88 free_pages((unsigned long)pmd
, PMD_ORDER
);
94 pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmd
, pte_t
*pte
)
96 #if CONFIG_PGTABLE_LEVELS == 3
97 /* preserve the gateway marker if this is the beginning of
98 * the permanent pmd */
99 if(pmd_flag(*pmd
) & PxD_FLAG_ATTACHED
)
100 set_pmd(pmd
, __pmd((PxD_FLAG_PRESENT
|
103 + (__u32
)(__pa((unsigned long)pte
) >> PxD_VALUE_SHIFT
)));
106 set_pmd(pmd
, __pmd((PxD_FLAG_PRESENT
| PxD_FLAG_VALID
)
107 + (__u32
)(__pa((unsigned long)pte
) >> PxD_VALUE_SHIFT
)));
110 #define pmd_populate(mm, pmd, pte_page) \
111 pmd_populate_kernel(mm, pmd, page_address(pte_page))
112 #define pmd_pgtable(pmd) pmd_page(pmd)