2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
9 #ifndef _ASM_PGTABLE_32_H
10 #define _ASM_PGTABLE_32_H
12 #include <asm/addrspace.h>
15 #include <linux/linkage.h>
16 #include <asm/cachectl.h>
17 #include <asm/fixmap.h>
19 #include <asm-generic/pgtable-nopmd.h>
22 * - add_wired_entry() add a fixed TLB entry, and move wired register
24 extern void add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
25 unsigned long entryhi
, unsigned long pagemask
);
28 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
29 * starting at the top and working down. This is for populating the
30 * TLB before trap_init() puts the TLB miss handler in place. It
31 * should be used only for entries matching the actual page tables,
32 * to prevent inconsistencies.
34 extern int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
35 unsigned long entryhi
, unsigned long pagemask
);
38 /* Basically we have the same two-level (which is the logical three level
39 * Linux page table layout folded) page tables as the i386. Some day
40 * when we have proper page coloring support we can have a 1% quicker
41 * tlb refill handling mechanism, but for now it is a bit slower but
42 * works even with the cache aliasing problem the R4k and above have.
45 /* PGDIR_SHIFT determines what a third-level page table entry can map */
46 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
47 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
48 #define PGDIR_MASK (~(PGDIR_SIZE-1))
51 * Entries per page directory level: we use two-level, so
52 * we don't really have any PUD/PMD directory physically.
54 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
55 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
56 #define PUD_ORDER aieeee_attempt_to_allocate_pud
60 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
61 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
63 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
64 #define FIRST_USER_ADDRESS 0
66 #define VMALLOC_START MAP_BASE
68 <<<<<<< HEAD
:include
/asm-mips
/pgtable
-32.h
70 #define PKMAP_BASE (0xfe000000UL)
72 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:include
/asm-mips
/pgtable
-32.h
74 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
76 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
79 #ifdef CONFIG_64BIT_PHYS_ADDR
80 #define pte_ERROR(e) \
81 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
83 #define pte_ERROR(e) \
84 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
86 #define pgd_ERROR(e) \
87 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
89 extern void load_pgd(unsigned long pg_dir
);
91 extern pte_t invalid_pte_table
[PAGE_SIZE
/sizeof(pte_t
)];
94 * Empty pgd/pmd entries point to the invalid_pte_table.
96 static inline int pmd_none(pmd_t pmd
)
98 return pmd_val(pmd
) == (unsigned long) invalid_pte_table
;
101 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
103 static inline int pmd_present(pmd_t pmd
)
105 return pmd_val(pmd
) != (unsigned long) invalid_pte_table
;
108 static inline void pmd_clear(pmd_t
*pmdp
)
110 pmd_val(*pmdp
) = ((unsigned long) invalid_pte_table
);
113 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
114 #define pte_page(x) pfn_to_page(pte_pfn(x))
115 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
117 pfn_pte(unsigned long pfn
, pgprot_t prot
)
120 pte
.pte_high
= (pfn
<< 6) | (pgprot_val(prot
) & 0x3f);
121 pte
.pte_low
= pgprot_val(prot
);
127 #define pte_page(x) pfn_to_page(pte_pfn(x))
129 #ifdef CONFIG_CPU_VR41XX
130 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
131 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
133 #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT))
134 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
136 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) */
138 #define __pgd_offset(address) pgd_index(address)
139 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
140 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
142 /* to find an entry in a kernel page-table-directory */
143 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
145 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
147 /* to find an entry in a page-table-directory */
148 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
150 /* Find an entry in the third-level page table.. */
151 #define __pte_offset(address) \
152 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
153 #define pte_offset(dir, address) \
154 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
155 #define pte_offset_kernel(dir, address) \
156 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
158 #define pte_offset_map(dir, address) \
159 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
160 #define pte_offset_map_nested(dir, address) \
161 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
162 #define pte_unmap(pte) ((void)(pte))
163 #define pte_unmap_nested(pte) ((void)(pte))
165 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
167 /* Swap entries must have VALID bit cleared. */
168 #define __swp_type(x) (((x).val >> 10) & 0x1f)
169 #define __swp_offset(x) ((x).val >> 15)
170 #define __swp_entry(type,offset) \
171 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
174 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
176 #define PTE_FILE_MAX_BITS 28
178 #define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
179 (((_pte).pte >> 2 ) & 0x38) | \
180 (((_pte).pte >> 10) << 6 ))
182 #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
183 (((off) & 0x38) << 2 ) | \
184 (((off) >> 6 ) << 10) | \
189 /* Swap entries must have VALID and GLOBAL bits cleared. */
190 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
191 #define __swp_type(x) (((x).val >> 2) & 0x1f)
192 #define __swp_offset(x) ((x).val >> 7)
193 #define __swp_entry(type,offset) \
194 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
196 #define __swp_type(x) (((x).val >> 8) & 0x1f)
197 #define __swp_offset(x) ((x).val >> 13)
198 #define __swp_entry(type,offset) \
199 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
200 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
202 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
204 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
206 #define PTE_FILE_MAX_BITS 30
208 #define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
209 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
213 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
215 #define PTE_FILE_MAX_BITS 28
217 #define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
218 (((_pte).pte >> 2) & 0x8) | \
219 (((_pte).pte >> 8) << 4))
221 #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
222 (((off) & 0x8) << 2) | \
223 (((off) >> 4) << 8) | \
229 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
230 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
231 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
233 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
234 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
237 #endif /* _ASM_PGTABLE_32_H */