[PATCH] x86_64: CPU hotplug sibling map cleanup
[linux/fpc-iii.git] / include / asm-m32r / pgtable.h
blobda805e970844a1d9dab8210062e8c67cb5ac8805
1 #ifndef _ASM_M32R_PGTABLE_H
2 #define _ASM_M32R_PGTABLE_H
4 #include <asm-generic/4level-fixup.h>
6 #ifdef __KERNEL__
7 /*
8 * The Linux memory management assumes a three-level page table setup. On
9 * the M32R, we use that, but "fold" the mid level into the top-level page
10 * table, so that we physically have the same two-level page table as the
11 * M32R mmu expects.
13 * This file contains the functions and defines necessary to modify and use
14 * the M32R page table tree.
17 /* CAUTION!: If you change macro definitions in this file, you might have to
18 * change arch/m32r/mmu.S manually.
21 #ifndef __ASSEMBLY__
23 #include <linux/config.h>
24 #include <linux/threads.h>
25 #include <asm/processor.h>
26 #include <asm/addrspace.h>
27 #include <asm/bitops.h>
28 #include <asm/page.h>
30 extern pgd_t swapper_pg_dir[1024];
31 extern void paging_init(void);
34 * ZERO_PAGE is a global shared page that is always zero: used
35 * for zero-mapped memory areas etc..
37 extern unsigned long empty_zero_page[1024];
38 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
40 #endif /* !__ASSEMBLY__ */
42 #ifndef __ASSEMBLY__
43 #include <asm/pgtable-2level.h>
44 #endif
46 #define pgtable_cache_init() do { } while (0)
48 #define PMD_SIZE (1UL << PMD_SHIFT)
49 #define PMD_MASK (~(PMD_SIZE - 1))
50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
53 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
54 #define FIRST_USER_ADDRESS 0
56 #ifndef __ASSEMBLY__
57 /* Just any arbitrary offset to the start of the vmalloc VM area: the
58 * current 8MB value just means that there will be a 8MB "hole" after the
59 * physical memory until the kernel virtual memory starts. That means that
60 * any out-of-bounds memory accesses will hopefully be caught.
61 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
62 * area for the same reason. ;)
64 #define VMALLOC_START KSEG2
65 #define VMALLOC_END KSEG3
68 * M32R TLB format
70 * [0] [1:19] [20:23] [24:31]
71 * +-----------------------+----+-------------+
72 * | VPN |0000| ASID |
73 * +-----------------------+----+-------------+
74 * +-+---------------------+----+-+---+-+-+-+-+
75 * |0 PPN |0000|N|AC |L|G|V| |
76 * +-+---------------------+----+-+---+-+-+-+-+
77 * RWX
80 #define _PAGE_BIT_DIRTY 0 /* software: page changed */
81 #define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
82 mapping */
83 #define _PAGE_BIT_PRESENT 1 /* Valid: page is valid */
84 #define _PAGE_BIT_GLOBAL 2 /* Global */
85 #define _PAGE_BIT_LARGE 3 /* Large */
86 #define _PAGE_BIT_EXEC 4 /* Execute */
87 #define _PAGE_BIT_WRITE 5 /* Write */
88 #define _PAGE_BIT_READ 6 /* Read */
89 #define _PAGE_BIT_NONCACHABLE 7 /* Non cachable */
90 #define _PAGE_BIT_ACCESSED 8 /* software: page referenced */
91 #define _PAGE_BIT_PROTNONE 9 /* software: if not present */
93 #define _PAGE_DIRTY (1UL << _PAGE_BIT_DIRTY)
94 #define _PAGE_FILE (1UL << _PAGE_BIT_FILE)
95 #define _PAGE_PRESENT (1UL << _PAGE_BIT_PRESENT)
96 #define _PAGE_GLOBAL (1UL << _PAGE_BIT_GLOBAL)
97 #define _PAGE_LARGE (1UL << _PAGE_BIT_LARGE)
98 #define _PAGE_EXEC (1UL << _PAGE_BIT_EXEC)
99 #define _PAGE_WRITE (1UL << _PAGE_BIT_WRITE)
100 #define _PAGE_READ (1UL << _PAGE_BIT_READ)
101 #define _PAGE_NONCACHABLE (1UL << _PAGE_BIT_NONCACHABLE)
102 #define _PAGE_ACCESSED (1UL << _PAGE_BIT_ACCESSED)
103 #define _PAGE_PROTNONE (1UL << _PAGE_BIT_PROTNONE)
105 #define _PAGE_TABLE \
106 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
107 | _PAGE_DIRTY )
108 #define _KERNPG_TABLE \
109 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
110 | _PAGE_DIRTY )
111 #define _PAGE_CHG_MASK \
112 ( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
114 #ifdef CONFIG_MMU
115 #define PAGE_NONE \
116 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
117 #define PAGE_SHARED \
118 __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
119 #define PAGE_SHARED_EXEC \
120 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
121 | _PAGE_ACCESSED)
122 #define PAGE_COPY \
123 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
124 #define PAGE_COPY_EXEC \
125 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
126 #define PAGE_READONLY \
127 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
128 #define PAGE_READONLY_EXEC \
129 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
131 #define __PAGE_KERNEL \
132 ( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
133 | _PAGE_ACCESSED )
134 #define __PAGE_KERNEL_RO ( __PAGE_KERNEL & ~_PAGE_WRITE )
135 #define __PAGE_KERNEL_NOCACHE ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
137 #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
139 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
140 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
141 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
143 #else
144 #define PAGE_NONE __pgprot(0)
145 #define PAGE_SHARED __pgprot(0)
146 #define PAGE_SHARED_EXEC __pgprot(0)
147 #define PAGE_COPY __pgprot(0)
148 #define PAGE_COPY_EXEC __pgprot(0)
149 #define PAGE_READONLY __pgprot(0)
150 #define PAGE_READONLY_EXEC __pgprot(0)
152 #define PAGE_KERNEL __pgprot(0)
153 #define PAGE_KERNEL_RO __pgprot(0)
154 #define PAGE_KERNEL_NOCACHE __pgprot(0)
155 #endif /* CONFIG_MMU */
157 /* xwr */
158 #define __P000 PAGE_NONE
159 #define __P001 PAGE_READONLY
160 #define __P010 PAGE_COPY
161 #define __P011 PAGE_COPY
162 #define __P100 PAGE_READONLY_EXEC
163 #define __P101 PAGE_READONLY_EXEC
164 #define __P110 PAGE_COPY_EXEC
165 #define __P111 PAGE_COPY_EXEC
167 #define __S000 PAGE_NONE
168 #define __S001 PAGE_READONLY
169 #define __S010 PAGE_SHARED
170 #define __S011 PAGE_SHARED
171 #define __S100 PAGE_READONLY_EXEC
172 #define __S101 PAGE_READONLY_EXEC
173 #define __S110 PAGE_SHARED_EXEC
174 #define __S111 PAGE_SHARED_EXEC
176 /* page table for 0-4MB for everybody */
178 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
179 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
181 #define pmd_none(x) (!pmd_val(x))
182 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
183 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
184 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
186 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
189 * The following only work if pte_present() is true.
190 * Undefined behaviour if not..
192 static inline int pte_read(pte_t pte)
194 return pte_val(pte) & _PAGE_READ;
197 static inline int pte_exec(pte_t pte)
199 return pte_val(pte) & _PAGE_EXEC;
202 static inline int pte_dirty(pte_t pte)
204 return pte_val(pte) & _PAGE_DIRTY;
207 static inline int pte_young(pte_t pte)
209 return pte_val(pte) & _PAGE_ACCESSED;
212 static inline int pte_write(pte_t pte)
214 return pte_val(pte) & _PAGE_WRITE;
218 * The following only works if pte_present() is not true.
220 static inline int pte_file(pte_t pte)
222 return pte_val(pte) & _PAGE_FILE;
225 static inline pte_t pte_rdprotect(pte_t pte)
227 pte_val(pte) &= ~_PAGE_READ;
228 return pte;
231 static inline pte_t pte_exprotect(pte_t pte)
233 pte_val(pte) &= ~_PAGE_EXEC;
234 return pte;
237 static inline pte_t pte_mkclean(pte_t pte)
239 pte_val(pte) &= ~_PAGE_DIRTY;
240 return pte;
243 static inline pte_t pte_mkold(pte_t pte)
245 pte_val(pte) &= ~_PAGE_ACCESSED;
246 return pte;
249 static inline pte_t pte_wrprotect(pte_t pte)
251 pte_val(pte) &= ~_PAGE_WRITE;
252 return pte;
255 static inline pte_t pte_mkread(pte_t pte)
257 pte_val(pte) |= _PAGE_READ;
258 return pte;
261 static inline pte_t pte_mkexec(pte_t pte)
263 pte_val(pte) |= _PAGE_EXEC;
264 return pte;
267 static inline pte_t pte_mkdirty(pte_t pte)
269 pte_val(pte) |= _PAGE_DIRTY;
270 return pte;
273 static inline pte_t pte_mkyoung(pte_t pte)
275 pte_val(pte) |= _PAGE_ACCESSED;
276 return pte;
279 static inline pte_t pte_mkwrite(pte_t pte)
281 pte_val(pte) |= _PAGE_WRITE;
282 return pte;
285 static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
287 return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
290 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
292 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
295 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
297 clear_bit(_PAGE_BIT_WRITE, ptep);
301 * Macro and implementation to make a page protection as uncachable.
303 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
305 unsigned long prot = pgprot_val(_prot);
307 prot |= _PAGE_NONCACHABLE;
308 return __pgprot(prot);
311 #define pgprot_writecombine(prot) pgprot_noncached(prot)
314 * Conversion functions: convert a page and protection to a page entry,
315 * and a page entry and page directory to the page they refer to.
317 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot)
319 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
321 set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
322 | pgprot_val(newprot)));
324 return pte;
327 #define page_pte(page) page_pte_prot(page, __pgprot(0))
330 * Conversion functions: convert a page and protection to a page entry,
331 * and a page entry and page directory to the page they refer to.
334 static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
336 pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
339 #define pmd_page_kernel(pmd) \
340 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
342 #ifndef CONFIG_DISCONTIGMEM
343 #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
344 #endif /* !CONFIG_DISCONTIGMEM */
346 /* to find an entry in a page-table-directory. */
347 #define pgd_index(address) \
348 (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
350 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
352 /* to find an entry in a kernel page-table-directory */
353 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
355 #define pmd_index(address) \
356 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
358 #define pte_index(address) \
359 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
360 #define pte_offset_kernel(dir, address) \
361 ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
362 #define pte_offset_map(dir, address) \
363 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
364 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
365 #define pte_unmap(pte) do { } while (0)
366 #define pte_unmap_nested(pte) do { } while (0)
368 /* Encode and de-code a swap entry */
369 #define __swp_type(x) (((x).val >> 2) & 0x3f)
370 #define __swp_offset(x) ((x).val >> 10)
371 #define __swp_entry(type, offset) \
372 ((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
373 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
374 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
376 #endif /* !__ASSEMBLY__ */
378 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
379 #define kern_addr_valid(addr) (1)
381 #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
382 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
384 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
385 remap_pfn_range(vma, vaddr, pfn, size, prot)
387 #define MK_IOSPACE_PFN(space, pfn) (pfn)
388 #define GET_IOSPACE(pfn) 0
389 #define GET_PFN(pfn) (pfn)
391 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
392 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
393 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
394 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
395 #define __HAVE_ARCH_PTE_SAME
396 #include <asm-generic/pgtable.h>
398 #endif /* __KERNEL__ */
400 #endif /* _ASM_M32R_PGTABLE_H */