xfs: calculate XFS_TRANS_QM_QUOTAOFF_END space log reservation at mount time
[linux/fpc-iii.git] / arch / mips / include / asm / pgtable-64.h
blobc63191055e695c5a49812f3f32ed0c913d941a0a
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_64_H
10 #define _ASM_PGTABLE_64_H
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
15 #include <asm/addrspace.h>
16 #include <asm/page.h>
17 #include <asm/cachectl.h>
18 #include <asm/fixmap.h>
20 #ifdef CONFIG_PAGE_SIZE_64KB
21 #include <asm-generic/pgtable-nopmd.h>
22 #else
23 #include <asm-generic/pgtable-nopud.h>
24 #endif
27 * Each address space has 2 4K pages as its page directory, giving 1024
28 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
29 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
30 * tables. Each page table is also a single 4K page, giving 512 (==
31 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
32 * invalid_pmd_table, each pmd entry is initialized to point to
33 * invalid_pte_table, each pte is initialized to 0. When memory is low,
34 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
35 * and empty_bad_page_table is returned back to higher layer code, so
36 * that the failure is recognized later on. Linux does not seem to
37 * handle these failures very well though. The empty_bad_page_table has
38 * invalid pte entries in it, to force page faults.
40 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
41 * The layout is identical to userspace except it's indexed with the
42 * fault address - VMALLOC_START.
46 /* PGDIR_SHIFT determines what a third-level page table entry can map */
47 #ifdef __PAGETABLE_PMD_FOLDED
48 #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
49 #else
51 /* PMD_SHIFT determines the size of the area a second-level page table can map */
52 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
53 #define PMD_SIZE (1UL << PMD_SHIFT)
54 #define PMD_MASK (~(PMD_SIZE-1))
57 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
58 #endif
59 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
60 #define PGDIR_MASK (~(PGDIR_SIZE-1))
63 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
64 * permits us mapping 40 bits of virtual address space.
66 * We used to implement 41 bits by having an order 1 pmd level but that seemed
67 * rather pointless.
69 * For 8kB page size we use a 3 level page tree which permits a total of
70 * 8TB of address space. Alternatively a 33-bit / 8GB organization using
71 * two levels would be easy to implement.
73 * For 16kB page size we use a 2 level page tree which permits a total of
74 * 36 bits of virtual address space. We could add a third level but it seems
75 * like at the moment there's no need for this.
77 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
78 * of virtual address space.
80 #ifdef CONFIG_PAGE_SIZE_4KB
81 #define PGD_ORDER 1
82 #define PUD_ORDER aieeee_attempt_to_allocate_pud
83 #define PMD_ORDER 0
84 #define PTE_ORDER 0
85 #endif
86 #ifdef CONFIG_PAGE_SIZE_8KB
87 #define PGD_ORDER 0
88 #define PUD_ORDER aieeee_attempt_to_allocate_pud
89 #define PMD_ORDER 0
90 #define PTE_ORDER 0
91 #endif
92 #ifdef CONFIG_PAGE_SIZE_16KB
93 #define PGD_ORDER 0
94 #define PUD_ORDER aieeee_attempt_to_allocate_pud
95 #define PMD_ORDER 0
96 #define PTE_ORDER 0
97 #endif
98 #ifdef CONFIG_PAGE_SIZE_32KB
99 #define PGD_ORDER 0
100 #define PUD_ORDER aieeee_attempt_to_allocate_pud
101 #define PMD_ORDER 0
102 #define PTE_ORDER 0
103 #endif
104 #ifdef CONFIG_PAGE_SIZE_64KB
105 #define PGD_ORDER 0
106 #define PUD_ORDER aieeee_attempt_to_allocate_pud
107 #define PMD_ORDER aieeee_attempt_to_allocate_pmd
108 #define PTE_ORDER 0
109 #endif
111 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
112 #ifndef __PAGETABLE_PMD_FOLDED
113 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
114 #endif
115 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
117 #if PGDIR_SIZE >= TASK_SIZE64
118 #define USER_PTRS_PER_PGD (1)
119 #else
120 #define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
121 #endif
122 #define FIRST_USER_ADDRESS 0UL
125 * TLB refill handlers also map the vmalloc area into xuseg. Avoid
126 * the first couple of pages so NULL pointer dereferences will still
127 * reliably trap.
129 #define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
130 #define VMALLOC_END \
131 (MAP_BASE + \
132 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
133 (1UL << cpu_vmbits)) - (1UL << 32))
135 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
136 VMALLOC_START != CKSSEG
137 /* Load modules into 32bit-compatible segment. */
138 #define MODULE_START CKSSEG
139 #define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
140 #endif
142 #define pte_ERROR(e) \
143 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
144 #ifndef __PAGETABLE_PMD_FOLDED
145 #define pmd_ERROR(e) \
146 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
147 #endif
148 #define pgd_ERROR(e) \
149 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
151 extern pte_t invalid_pte_table[PTRS_PER_PTE];
152 extern pte_t empty_bad_page_table[PTRS_PER_PTE];
155 #ifndef __PAGETABLE_PMD_FOLDED
157 * For 3-level pagetables we defines these ourselves, for 2-level the
158 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
160 typedef struct { unsigned long pmd; } pmd_t;
161 #define pmd_val(x) ((x).pmd)
162 #define __pmd(x) ((pmd_t) { (x) } )
165 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
166 #endif
169 * Empty pgd/pmd entries point to the invalid_pte_table.
171 static inline int pmd_none(pmd_t pmd)
173 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
176 static inline int pmd_bad(pmd_t pmd)
178 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
179 /* pmd_huge(pmd) but inline */
180 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
181 return 0;
182 #endif
184 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
185 return 1;
187 return 0;
190 static inline int pmd_present(pmd_t pmd)
192 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
195 static inline void pmd_clear(pmd_t *pmdp)
197 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
199 #ifndef __PAGETABLE_PMD_FOLDED
202 * Empty pud entries point to the invalid_pmd_table.
204 static inline int pud_none(pud_t pud)
206 return pud_val(pud) == (unsigned long) invalid_pmd_table;
209 static inline int pud_bad(pud_t pud)
211 return pud_val(pud) & ~PAGE_MASK;
214 static inline int pud_present(pud_t pud)
216 return pud_val(pud) != (unsigned long) invalid_pmd_table;
219 static inline void pud_clear(pud_t *pudp)
221 pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
223 #endif
225 #define pte_page(x) pfn_to_page(pte_pfn(x))
227 #ifdef CONFIG_CPU_VR41XX
228 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
229 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
230 #else
231 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
232 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
233 #endif
235 #define __pgd_offset(address) pgd_index(address)
236 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
237 #define __pmd_offset(address) pmd_index(address)
239 /* to find an entry in a kernel page-table-directory */
240 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
242 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
243 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
245 /* to find an entry in a page-table-directory */
246 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
248 #ifndef __PAGETABLE_PMD_FOLDED
249 static inline unsigned long pud_page_vaddr(pud_t pud)
251 return pud_val(pud);
253 #define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
254 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
256 /* Find an entry in the second-level page table.. */
257 static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
259 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
261 #endif
263 /* Find an entry in the third-level page table.. */
264 #define __pte_offset(address) \
265 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
266 #define pte_offset(dir, address) \
267 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
268 #define pte_offset_kernel(dir, address) \
269 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
270 #define pte_offset_map(dir, address) \
271 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
272 #define pte_unmap(pte) ((void)(pte))
275 * Initialize a new pgd / pmd table with invalid pointers.
277 extern void pgd_init(unsigned long page);
278 extern void pmd_init(unsigned long page, unsigned long pagetable);
281 * Non-present pages: high 24 bits are offset, next 8 bits type,
282 * low 32 bits zero.
284 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
285 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
287 #define __swp_type(x) (((x).val >> 32) & 0xff)
288 #define __swp_offset(x) ((x).val >> 40)
289 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
290 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
291 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
294 * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
295 * make things easier, and only use the upper 56 bits for the page offset...
297 #define PTE_FILE_MAX_BITS 56
299 #define pte_to_pgoff(_pte) ((_pte).pte >> 8)
300 #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
302 #endif /* _ASM_PGTABLE_64_H */