sched: make early bootup sched_clock() use safer
[wrt350n-kernel.git] / include / asm-mips / pgtable-32.h
bloba0947092d0e0f11607f03193a87ee098dc061bf3
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_32_H
10 #define _ASM_PGTABLE_32_H
12 #include <asm/addrspace.h>
13 #include <asm/page.h>
15 #include <linux/linkage.h>
16 #include <asm/cachectl.h>
17 #include <asm/fixmap.h>
19 #include <asm-generic/pgtable-nopmd.h>
22 * - add_wired_entry() add a fixed TLB entry, and move wired register
24 extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
25 unsigned long entryhi, unsigned long pagemask);
28 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
29 * starting at the top and working down. This is for populating the
30 * TLB before trap_init() puts the TLB miss handler in place. It
31 * should be used only for entries matching the actual page tables,
32 * to prevent inconsistencies.
34 extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
35 unsigned long entryhi, unsigned long pagemask);
38 /* Basically we have the same two-level (which is the logical three level
39 * Linux page table layout folded) page tables as the i386. Some day
40 * when we have proper page coloring support we can have a 1% quicker
41 * tlb refill handling mechanism, but for now it is a bit slower but
42 * works even with the cache aliasing problem the R4k and above have.
45 /* PGDIR_SHIFT determines what a third-level page table entry can map */
46 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
47 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
48 #define PGDIR_MASK (~(PGDIR_SIZE-1))
51 * Entries per page directory level: we use two-level, so
52 * we don't really have any PUD/PMD directory physically.
54 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
55 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
56 #define PUD_ORDER aieeee_attempt_to_allocate_pud
57 #define PMD_ORDER 1
58 #define PTE_ORDER 0
60 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
61 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
63 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
64 #define FIRST_USER_ADDRESS 0
66 #define VMALLOC_START MAP_BASE
68 #ifdef CONFIG_HIGHMEM
69 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
70 #else
71 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
72 #endif
74 #ifdef CONFIG_64BIT_PHYS_ADDR
75 #define pte_ERROR(e) \
76 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
77 #else
78 #define pte_ERROR(e) \
79 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
80 #endif
81 #define pgd_ERROR(e) \
82 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
84 extern void load_pgd(unsigned long pg_dir);
86 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
89 * Empty pgd/pmd entries point to the invalid_pte_table.
91 static inline int pmd_none(pmd_t pmd)
93 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
96 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
98 static inline int pmd_present(pmd_t pmd)
100 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
103 static inline void pmd_clear(pmd_t *pmdp)
105 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
108 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
109 #define pte_page(x) pfn_to_page(pte_pfn(x))
110 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
111 static inline pte_t
112 pfn_pte(unsigned long pfn, pgprot_t prot)
114 pte_t pte;
115 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
116 pte.pte_low = pgprot_val(prot);
117 return pte;
120 #else
122 #define pte_page(x) pfn_to_page(pte_pfn(x))
124 #ifdef CONFIG_CPU_VR41XX
125 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
126 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
127 #else
128 #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT))
129 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
130 #endif
131 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) */
133 #define __pgd_offset(address) pgd_index(address)
134 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
135 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
137 /* to find an entry in a kernel page-table-directory */
138 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
140 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
142 /* to find an entry in a page-table-directory */
143 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
145 /* Find an entry in the third-level page table.. */
146 #define __pte_offset(address) \
147 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
148 #define pte_offset(dir, address) \
149 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
150 #define pte_offset_kernel(dir, address) \
151 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
153 #define pte_offset_map(dir, address) \
154 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
155 #define pte_offset_map_nested(dir, address) \
156 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
157 #define pte_unmap(pte) ((void)(pte))
158 #define pte_unmap_nested(pte) ((void)(pte))
160 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
162 /* Swap entries must have VALID bit cleared. */
163 #define __swp_type(x) (((x).val >> 10) & 0x1f)
164 #define __swp_offset(x) ((x).val >> 15)
165 #define __swp_entry(type,offset) \
166 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
169 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
171 #define PTE_FILE_MAX_BITS 28
173 #define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
174 (((_pte).pte >> 2 ) & 0x38) | \
175 (((_pte).pte >> 10) << 6 ))
177 #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
178 (((off) & 0x38) << 2 ) | \
179 (((off) >> 6 ) << 10) | \
180 _PAGE_FILE })
182 #else
184 /* Swap entries must have VALID and GLOBAL bits cleared. */
185 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
186 #define __swp_type(x) (((x).val >> 2) & 0x1f)
187 #define __swp_offset(x) ((x).val >> 7)
188 #define __swp_entry(type,offset) \
189 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
190 #else
191 #define __swp_type(x) (((x).val >> 8) & 0x1f)
192 #define __swp_offset(x) ((x).val >> 13)
193 #define __swp_entry(type,offset) \
194 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
195 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
197 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
199 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
201 #define PTE_FILE_MAX_BITS 30
203 #define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
204 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
206 #else
208 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
210 #define PTE_FILE_MAX_BITS 28
212 #define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
213 (((_pte).pte >> 2) & 0x8) | \
214 (((_pte).pte >> 8) << 4))
216 #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
217 (((off) & 0x8) << 2) | \
218 (((off) >> 4) << 8) | \
219 _PAGE_FILE })
220 #endif
222 #endif
224 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
225 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
226 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
227 #else
228 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
229 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
230 #endif
232 #endif /* _ASM_PGTABLE_32_H */