treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / include / asm / pgtable-32.h
blob1945c8970141dfc2767d587579bfc8a867f704b2
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_32_H
10 #define _ASM_PGTABLE_32_H
12 #include <asm/addrspace.h>
13 #include <asm/page.h>
15 #include <linux/linkage.h>
16 #include <asm/cachectl.h>
17 #include <asm/fixmap.h>
19 #include <asm-generic/pgtable-nopmd.h>
21 #ifdef CONFIG_HIGHMEM
22 #include <asm/highmem.h>
23 #endif
26 * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
28 * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
29 * our 2-level table layout would normally have a PGD entry cover a contiguous
30 * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
31 * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
32 * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
33 * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
34 * To correct for this, when huge pages are enabled, we halve the number of
35 * pointers a PTE page holds, making its last half go to waste. Correspondingly,
36 * we double the number of PGD pages. Overall, page table memory overhead
37 * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
39 * NOTE: We don't yet support huge pages if extended-addressing is enabled
40 * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
43 extern int temp_tlb_entry;
46 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
47 * starting at the top and working down. This is for populating the
48 * TLB before trap_init() puts the TLB miss handler in place. It
49 * should be used only for entries matching the actual page tables,
50 * to prevent inconsistencies.
52 extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
53 unsigned long entryhi, unsigned long pagemask);
56 * Basically we have the same two-level (which is the logical three level
57 * Linux page table layout folded) page tables as the i386. Some day
58 * when we have proper page coloring support we can have a 1% quicker
59 * tlb refill handling mechanism, but for now it is a bit slower but
60 * works even with the cache aliasing problem the R4k and above have.
63 /* PGDIR_SHIFT determines what a third-level page table entry can map */
64 #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
65 # define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
66 #else
67 # define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
68 #endif
70 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
71 #define PGDIR_MASK (~(PGDIR_SIZE-1))
74 * Entries per page directory level: we use two-level, so
75 * we don't really have any PUD/PMD directory physically.
77 #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
78 # define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
79 #else
80 # define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
81 #endif
83 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
84 #define PUD_ORDER aieeee_attempt_to_allocate_pud
85 #define PMD_ORDER aieeee_attempt_to_allocate_pmd
86 #define PTE_ORDER 0
88 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
89 #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
90 # define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
91 #else
92 # define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
93 #endif
95 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
96 #define FIRST_USER_ADDRESS 0UL
98 #define VMALLOC_START MAP_BASE
100 #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
101 #define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
103 #ifdef CONFIG_HIGHMEM
104 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
105 #else
106 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
107 #endif
109 #ifdef CONFIG_PHYS_ADDR_T_64BIT
110 #define pte_ERROR(e) \
111 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
112 #else
113 #define pte_ERROR(e) \
114 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
115 #endif
116 #define pgd_ERROR(e) \
117 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
119 extern void load_pgd(unsigned long pg_dir);
121 extern pte_t invalid_pte_table[PTRS_PER_PTE];
124 * Empty pgd/pmd entries point to the invalid_pte_table.
126 static inline int pmd_none(pmd_t pmd)
128 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
131 static inline int pmd_bad(pmd_t pmd)
133 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
134 /* pmd_huge(pmd) but inline */
135 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
136 return 0;
137 #endif
139 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
140 return 1;
142 return 0;
145 static inline int pmd_present(pmd_t pmd)
147 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
150 static inline void pmd_clear(pmd_t *pmdp)
152 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
155 #if defined(CONFIG_XPA)
157 #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
158 static inline pte_t
159 pfn_pte(unsigned long pfn, pgprot_t prot)
161 pte_t pte;
163 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
164 (pgprot_val(prot) & ~_PFNX_MASK);
165 pte.pte_high = (pfn << _PFN_SHIFT) |
166 (pgprot_val(prot) & ~_PFN_MASK);
167 return pte;
170 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
172 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
174 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
176 pte_t pte;
178 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
179 pte.pte_low = pgprot_val(prot);
181 return pte;
184 #else
186 #ifdef CONFIG_CPU_VR41XX
187 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
188 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
189 #else
190 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
191 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
192 #define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
193 #endif
194 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
196 #define pte_page(x) pfn_to_page(pte_pfn(x))
198 /* to find an entry in a kernel page-table-directory */
199 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
201 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
202 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
203 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
205 /* to find an entry in a page-table-directory */
206 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
208 /* Find an entry in the third-level page table.. */
209 #define __pte_offset(address) \
210 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
211 #define pte_offset(dir, address) \
212 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
213 #define pte_offset_kernel(dir, address) \
214 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
216 #define pte_offset_map(dir, address) \
217 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
218 #define pte_unmap(pte) ((void)(pte))
220 #if defined(CONFIG_CPU_R3K_TLB)
222 /* Swap entries must have VALID bit cleared. */
223 #define __swp_type(x) (((x).val >> 10) & 0x1f)
224 #define __swp_offset(x) ((x).val >> 15)
225 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
226 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
227 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
229 #else
231 #if defined(CONFIG_XPA)
233 /* Swap entries must have VALID and GLOBAL bits cleared. */
234 #define __swp_type(x) (((x).val >> 4) & 0x1f)
235 #define __swp_offset(x) ((x).val >> 9)
236 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
237 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
238 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
240 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
242 /* Swap entries must have VALID and GLOBAL bits cleared. */
243 #define __swp_type(x) (((x).val >> 2) & 0x1f)
244 #define __swp_offset(x) ((x).val >> 7)
245 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
246 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
247 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
249 #else
251 * Constraints:
252 * _PAGE_PRESENT at bit 0
253 * _PAGE_MODIFIED at bit 4
254 * _PAGE_GLOBAL at bit 6
255 * _PAGE_VALID at bit 7
257 #define __swp_type(x) (((x).val >> 8) & 0x1f)
258 #define __swp_offset(x) ((x).val >> 13)
259 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
260 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
261 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
263 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
265 #endif /* defined(CONFIG_CPU_R3K_TLB) */
267 #endif /* _ASM_PGTABLE_32_H */