6 #include <linux/threads.h>
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
10 #include <asm/cache.h>
12 /* Allocate the top level pgd (page directory)
14 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
15 * allocate the first pmd adjacent to the pgd. This means that we can
16 * subtract a constant offset to get to it. The pmd and pgd sizes are
17 * arranged so that a single pmd covers 4GB (giving a full 64-bit
18 * process access to 8TB) so our lookups are effectively L2 for the
19 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
20 * kernel for machines with under 4GB of memory) */
21 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
23 pgd_t
*pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
,
25 pgd_t
*actual_pgd
= pgd
;
27 if (likely(pgd
!= NULL
)) {
28 memset(pgd
, 0, PAGE_SIZE
<<PGD_ALLOC_ORDER
);
30 actual_pgd
+= PTRS_PER_PGD
;
31 /* Populate first pmd with allocated memory. We mark it
32 * with PxD_FLAG_ATTACHED as a signal to the system that this
33 * pmd entry may not be cleared. */
34 __pgd_val_set(*actual_pgd
, (PxD_FLAG_PRESENT
|
37 + (__u32
)(__pa((unsigned long)pgd
) >> PxD_VALUE_SHIFT
));
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as
39 * a signal that this pmd may not be freed */
40 __pgd_val_set(*pgd
, PxD_FLAG_ATTACHED
);
46 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
51 free_pages((unsigned long)pgd
, PGD_ALLOC_ORDER
);
56 /* Three Level Page Table Support for pmd's */
58 static inline void pgd_populate(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmd
)
60 __pgd_val_set(*pgd
, (PxD_FLAG_PRESENT
| PxD_FLAG_VALID
) +
61 (__u32
)(__pa((unsigned long)pmd
) >> PxD_VALUE_SHIFT
));
64 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long address
)
66 pmd_t
*pmd
= (pmd_t
*)__get_free_pages(GFP_KERNEL
|__GFP_REPEAT
,
69 memset(pmd
, 0, PAGE_SIZE
<<PMD_ORDER
);
73 static inline void pmd_free(struct mm_struct
*mm
, pmd_t
*pmd
)
76 if(pmd_flag(*pmd
) & PxD_FLAG_ATTACHED
)
77 /* This is the permanent pmd attached to the pgd;
81 free_pages((unsigned long)pmd
, PMD_ORDER
);
86 /* Two Level Page Table Support for pmd's */
89 * allocating and freeing a pmd is trivial: the 1-entry pmd is
90 * inside the pgd, so has no extra memory associated with it.
93 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
94 #define pmd_free(mm, x) do { } while (0)
95 #define pgd_populate(mm, pmd, pte) BUG()
100 pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmd
, pte_t
*pte
)
103 /* preserve the gateway marker if this is the beginning of
104 * the permanent pmd */
105 if(pmd_flag(*pmd
) & PxD_FLAG_ATTACHED
)
106 __pmd_val_set(*pmd
, (PxD_FLAG_PRESENT
|
109 + (__u32
)(__pa((unsigned long)pte
) >> PxD_VALUE_SHIFT
));
112 __pmd_val_set(*pmd
, (PxD_FLAG_PRESENT
| PxD_FLAG_VALID
)
113 + (__u32
)(__pa((unsigned long)pte
) >> PxD_VALUE_SHIFT
));
116 #define pmd_populate(mm, pmd, pte_page) \
117 pmd_populate_kernel(mm, pmd, page_address(pte_page))
118 #define pmd_pgtable(pmd) pmd_page(pmd)
120 static inline pgtable_t
121 pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
123 struct page
*page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
126 if (!pgtable_page_ctor(page
)) {
133 static inline pte_t
*
134 pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long addr
)
136 pte_t
*pte
= (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
140 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
142 free_page((unsigned long)pte
);
145 static inline void pte_free(struct mm_struct
*mm
, struct page
*pte
)
147 pgtable_page_dtor(pte
);
148 pte_free_kernel(mm
, page_address(pte
));
151 #define check_pgt_cache() do { } while (0)