1 #ifndef _ASM_M32R_PGTABLE_H
2 #define _ASM_M32R_PGTABLE_H
7 * The Linux memory management assumes a three-level page table setup. On
8 * the M32R, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
12 * This file contains the functions and defines necessary to modify and use
13 * the M32R page table tree.
16 /* CAUTION!: If you change macro definitions in this file, you might have to
17 * change arch/m32r/mmu.S manually.
22 #include <linux/config.h>
23 #include <linux/threads.h>
24 #include <asm/processor.h>
25 #include <asm/addrspace.h>
26 #include <asm/bitops.h>
29 extern pgd_t swapper_pg_dir
[1024];
30 extern void paging_init(void);
33 * ZERO_PAGE is a global shared page that is always zero: used
34 * for zero-mapped memory areas etc..
36 extern unsigned long empty_zero_page
[1024];
37 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
39 #endif /* !__ASSEMBLY__ */
42 * The Linux x86 paging architecture is 'compile-time dual-mode', it
43 * implements both the traditional 2-level x86 page tables and the
44 * newer 3-level PAE-mode page tables.
47 #include <asm/pgtable-2level.h>
50 #define pgtable_cache_init() do { } while (0)
52 #define PMD_SIZE (1UL << PMD_SHIFT)
53 #define PMD_MASK (~(PMD_SIZE - 1))
54 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
55 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
57 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
58 #define FIRST_USER_PGD_NR 0
61 /* Just any arbitrary offset to the start of the vmalloc VM area: the
62 * current 8MB value just means that there will be a 8MB "hole" after the
63 * physical memory until the kernel virtual memory starts. That means that
64 * any out-of-bounds memory accesses will hopefully be caught.
65 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
66 * area for the same reason. ;)
68 #define VMALLOC_START KSEG2
69 #define VMALLOC_END KSEG3
72 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
73 * of the Pentium details, but assuming intel did the straightforward
74 * thing, this bit set in the page directory entry just means that
75 * the page directory entry points directly to a 4MB-aligned block of
82 * [0] [1:19] [20:23] [24:31]
83 * +-----------------------+----+-------------+
85 * +-----------------------+----+-------------+
86 * +-+---------------------+----+-+---+-+-+-+-+
87 * |0 PPN |0000|N|AC |L|G|V| |
88 * +-+---------------------+----+-+---+-+-+-+-+
92 #define _PAGE_BIT_DIRTY 0 /* software */
93 #define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
95 #define _PAGE_BIT_PRESENT 1 /* Valid */
96 #define _PAGE_BIT_GLOBAL 2 /* Global */
97 #define _PAGE_BIT_LARGE 3 /* Large */
98 #define _PAGE_BIT_EXEC 4 /* Execute */
99 #define _PAGE_BIT_WRITE 5 /* Write */
100 #define _PAGE_BIT_READ 6 /* Read */
101 #define _PAGE_BIT_NONCACHABLE 7 /* Non cachable */
102 #define _PAGE_BIT_USER 8 /* software */
103 #define _PAGE_BIT_ACCESSED 9 /* software */
105 #define _PAGE_DIRTY \
106 (1UL << _PAGE_BIT_DIRTY) /* software : page changed */
108 (1UL << _PAGE_BIT_FILE) /* when !present: nonlinear file
110 #define _PAGE_PRESENT \
111 (1UL << _PAGE_BIT_PRESENT) /* Valid : Page is Valid */
112 #define _PAGE_GLOBAL \
113 (1UL << _PAGE_BIT_GLOBAL) /* Global */
114 #define _PAGE_LARGE \
115 (1UL << _PAGE_BIT_LARGE) /* Large */
117 (1UL << _PAGE_BIT_EXEC) /* Execute */
118 #define _PAGE_WRITE \
119 (1UL << _PAGE_BIT_WRITE) /* Write */
121 (1UL << _PAGE_BIT_READ) /* Read */
122 #define _PAGE_NONCACHABLE \
123 (1UL<<_PAGE_BIT_NONCACHABLE) /* Non cachable */
125 (1UL << _PAGE_BIT_USER) /* software : user space access
127 #define _PAGE_ACCESSED \
128 (1UL << _PAGE_BIT_ACCESSED) /* software : page referenced */
130 #define _PAGE_TABLE \
131 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_USER \
132 | _PAGE_ACCESSED | _PAGE_DIRTY )
133 #define _KERNPG_TABLE \
134 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
136 #define _PAGE_CHG_MASK \
137 ( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
141 __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
142 #define PAGE_SHARED \
143 __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_USER \
145 #define PAGE_SHARED_X \
146 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
147 | _PAGE_USER | _PAGE_ACCESSED)
149 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \
151 #define PAGE_COPY_X \
152 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \
154 #define PAGE_READONLY \
155 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_USER | _PAGE_ACCESSED)
156 #define PAGE_READONLY_X \
157 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \
160 #define __PAGE_KERNEL \
161 ( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
163 #define __PAGE_KERNEL_RO ( __PAGE_KERNEL & ~_PAGE_WRITE )
164 #define __PAGE_KERNEL_NOCACHE ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
166 #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
168 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
169 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
170 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
173 #define PAGE_NONE __pgprot(0)
174 #define PAGE_SHARED __pgprot(0)
175 #define PAGE_SHARED_X __pgprot(0)
176 #define PAGE_COPY __pgprot(0)
177 #define PAGE_COPY_X __pgprot(0)
178 #define PAGE_READONLY __pgprot(0)
179 #define PAGE_READONLY_X __pgprot(0)
181 #define PAGE_KERNEL __pgprot(0)
182 #define PAGE_KERNEL_RO __pgprot(0)
183 #define PAGE_KERNEL_NOCACHE __pgprot(0)
184 #endif /* CONFIG_MMU */
187 * The i386 can't do page protection for execute, and considers that
188 * the same are read. Also, write permissions imply read permissions.
189 * This is the closest we can get..
192 #define __P000 PAGE_NONE
193 #define __P001 PAGE_READONLY_X
194 #define __P010 PAGE_COPY_X
195 #define __P011 PAGE_COPY_X
196 #define __P100 PAGE_READONLY
197 #define __P101 PAGE_READONLY_X
198 #define __P110 PAGE_COPY_X
199 #define __P111 PAGE_COPY_X
201 #define __S000 PAGE_NONE
202 #define __S001 PAGE_READONLY_X
203 #define __S010 PAGE_SHARED
204 #define __S011 PAGE_SHARED_X
205 #define __S100 PAGE_READONLY
206 #define __S101 PAGE_READONLY_X
207 #define __S110 PAGE_SHARED
208 #define __S111 PAGE_SHARED_X
210 /* page table for 0-4MB for everybody */
212 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
213 #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
215 #define pmd_none(x) (!pmd_val(x))
216 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
217 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
218 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) \
221 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
224 * The following only work if pte_present() is true.
225 * Undefined behaviour if not..
227 static __inline__
int pte_user(pte_t pte
)
229 return pte_val(pte
) & _PAGE_USER
;
232 static __inline__
int pte_read(pte_t pte
)
234 return pte_val(pte
) & _PAGE_READ
;
237 static __inline__
int pte_exec(pte_t pte
)
239 return pte_val(pte
) & _PAGE_EXEC
;
242 static __inline__
int pte_dirty(pte_t pte
)
244 return pte_val(pte
) & _PAGE_DIRTY
;
247 static __inline__
int pte_young(pte_t pte
)
249 return pte_val(pte
) & _PAGE_ACCESSED
;
252 static __inline__
int pte_write(pte_t pte
)
254 return pte_val(pte
) & _PAGE_WRITE
;
258 * The following only works if pte_present() is not true.
260 static __inline__
int pte_file(pte_t pte
)
262 return pte_val(pte
) & _PAGE_FILE
;
265 static __inline__ pte_t
pte_rdprotect(pte_t pte
)
267 pte_val(pte
) &= ~_PAGE_READ
;
271 static __inline__ pte_t
pte_exprotect(pte_t pte
)
273 pte_val(pte
) &= ~_PAGE_EXEC
;
277 static __inline__ pte_t
pte_mkclean(pte_t pte
)
279 pte_val(pte
) &= ~_PAGE_DIRTY
;
283 static __inline__ pte_t
pte_mkold(pte_t pte
)
285 pte_val(pte
) &= ~_PAGE_ACCESSED
;return pte
;}
287 static __inline__ pte_t
pte_wrprotect(pte_t pte
)
289 pte_val(pte
) &= ~_PAGE_WRITE
;
293 static __inline__ pte_t
pte_mkread(pte_t pte
)
295 pte_val(pte
) |= _PAGE_READ
;
299 static __inline__ pte_t
pte_mkexec(pte_t pte
)
301 pte_val(pte
) |= _PAGE_EXEC
;
305 static __inline__ pte_t
pte_mkdirty(pte_t pte
)
307 pte_val(pte
) |= _PAGE_DIRTY
;
311 static __inline__ pte_t
pte_mkyoung(pte_t pte
)
313 pte_val(pte
) |= _PAGE_ACCESSED
;
317 static __inline__ pte_t
pte_mkwrite(pte_t pte
)
319 pte_val(pte
) |= _PAGE_WRITE
;
323 static __inline__
int ptep_test_and_clear_dirty(pte_t
*ptep
)
325 return test_and_clear_bit(_PAGE_BIT_DIRTY
, ptep
);
328 static __inline__
int ptep_test_and_clear_young(pte_t
*ptep
)
330 return test_and_clear_bit(_PAGE_BIT_ACCESSED
, ptep
);
333 static __inline__
void ptep_set_wrprotect(pte_t
*ptep
)
335 clear_bit(_PAGE_BIT_WRITE
, ptep
);
338 static __inline__
void ptep_mkdirty(pte_t
*ptep
)
340 set_bit(_PAGE_BIT_DIRTY
, ptep
);
344 * Conversion functions: convert a page and protection to a page entry,
345 * and a page entry and page directory to the page they refer to.
347 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot)
349 static __inline__ pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
351 set_pte(&pte
, __pte((pte_val(pte
) & _PAGE_CHG_MASK
) \
352 | pgprot_val(newprot
)));
357 #define page_pte(page) page_pte_prot(page, __pgprot(0))
360 * Conversion functions: convert a page and protection to a page entry,
361 * and a page entry and page directory to the page they refer to.
364 static __inline__
void pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
366 pmd_val(*pmdp
) = (((unsigned long) ptep
) & PAGE_MASK
);
369 #define pmd_page_kernel(pmd) \
370 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
372 #ifndef CONFIG_DISCONTIGMEM
373 #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
374 #endif /* !CONFIG_DISCONTIGMEM */
376 /* to find an entry in a page-table-directory. */
377 #define pgd_index(address) \
378 (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
380 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
382 /* to find an entry in a kernel page-table-directory */
383 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
385 #define pmd_index(address) \
386 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
388 #define pte_index(address) \
389 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
390 #define pte_offset_kernel(dir, address) \
391 ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
392 #define pte_offset_map(dir, address) \
393 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
394 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
395 #define pte_unmap(pte) do { } while (0)
396 #define pte_unmap_nested(pte) do { } while (0)
398 /* Encode and de-code a swap entry */
399 #define __swp_type(x) (((x).val >> 1) & 0x3f)
400 #define __swp_offset(x) ((x).val >> 8)
401 #define __swp_entry(type, offset) \
402 ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
403 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
404 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
406 #endif /* !__ASSEMBLY__ */
408 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
409 #define kern_addr_valid(addr) (1)
411 #define io_remap_page_range remap_page_range
413 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
414 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
415 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
416 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
417 #define __HAVE_ARCH_PTEP_MKDIRTY
418 #define __HAVE_ARCH_PTE_SAME
419 #include <asm-generic/pgtable.h>
421 #endif /* _ASM_M32R_PGTABLE_H */