MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-parisc / pgalloc.h
blob53a6fec7df959588753102198c829dc36da8fcbe
1 #ifndef _ASM_PGALLOC_H
2 #define _ASM_PGALLOC_H
4 #include <linux/gfp.h>
5 #include <linux/mm.h>
6 #include <linux/threads.h>
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
10 #include <asm/pgtable.h>
11 #include <asm/cache.h>
13 /* Allocate the top level pgd (page directory)
15 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
16 * allocate the first pmd adjacent to the pgd. This means that we can
17 * subtract a constant offset to get to it. The pmd and pgd sizes are
18 * arranged so that a single pmd covers 4GB (giving a full LP64
19 * process access to 8TB) so our lookups are effectively L2 for the
20 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
21 * kernel for machines with under 4GB of memory) */
22 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
24 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
25 PGD_ALLOC_ORDER);
26 pgd_t *actual_pgd = pgd;
28 if (likely(pgd != NULL)) {
29 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
30 #ifdef __LP64__
31 actual_pgd += PTRS_PER_PGD;
32 /* Populate first pmd with allocated memory. We mark it
33 * with PxD_FLAG_ATTACHED as a signal to the system that this
34 * pmd entry may not be cleared. */
35 __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
36 PxD_FLAG_VALID |
37 PxD_FLAG_ATTACHED)
38 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
39 /* The first pmd entry also is marked with _PAGE_GATEWAY as
40 * a signal that this pmd may not be freed */
41 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
42 #endif
44 return actual_pgd;
47 static inline void pgd_free(pgd_t *pgd)
49 #ifdef __LP64__
50 pgd -= PTRS_PER_PGD;
51 #endif
52 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
55 #if PT_NLEVELS == 3
57 /* Three Level Page Table Support for pmd's */
59 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
61 __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
62 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
65 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
67 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
68 PMD_ORDER);
69 if (pmd)
70 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
71 return pmd;
74 static inline void pmd_free(pmd_t *pmd)
76 #ifdef __LP64__
77 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
78 /* This is the permanent pmd attached to the pgd;
79 * cannot free it */
80 return;
81 #endif
82 free_pages((unsigned long)pmd, PMD_ORDER);
85 #else
87 /* Two Level Page Table Support for pmd's */
90 * allocating and freeing a pmd is trivial: the 1-entry pmd is
91 * inside the pgd, so has no extra memory associated with it.
94 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
95 #define pmd_free(x) do { } while (0)
96 #define pgd_populate(mm, pmd, pte) BUG()
98 #endif
100 static inline void
101 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
103 #ifdef __LP64__
104 /* preserve the gateway marker if this is the beginning of
105 * the permanent pmd */
106 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
107 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
108 PxD_FLAG_VALID |
109 PxD_FLAG_ATTACHED)
110 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
111 else
112 #endif
113 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
114 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
117 #define pmd_populate(mm, pmd, pte_page) \
118 pmd_populate_kernel(mm, pmd, page_address(pte_page))
120 static inline struct page *
121 pte_alloc_one(struct mm_struct *mm, unsigned long address)
123 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
124 if (likely(page != NULL))
125 clear_page(page_address(page));
126 return page;
129 static inline pte_t *
130 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
132 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
133 if (likely(pte != NULL))
134 clear_page(pte);
135 return pte;
138 static inline void pte_free_kernel(pte_t *pte)
140 free_page((unsigned long)pte);
143 #define pte_free(page) pte_free_kernel(page_address(page))
145 extern int do_check_pgt_cache(int, int);
146 #define check_pgt_cache() do { } while (0)
148 #endif