2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
11 #ifndef _ASM_MICROBLAZE_PGTABLE_H
12 #define _ASM_MICROBLAZE_PGTABLE_H
14 #include <asm/setup.h>
17 extern int mem_init_done
;
22 #define pgd_present(pgd) (1) /* pages are always present on non MMU */
23 #define pgd_none(pgd) (0)
24 #define pgd_bad(pgd) (0)
25 #define pgd_clear(pgdp)
26 #define kern_addr_valid(addr) (1)
27 #define pmd_offset(a, b) ((void *) 0)
29 #define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
30 #define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
31 #define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */
32 #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
33 #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
35 #define pgprot_noncached(x) (x)
36 #define pgprot_writecombine pgprot_noncached
37 #define pgprot_device pgprot_noncached
39 #define __swp_type(x) (0)
40 #define __swp_offset(x) (0)
41 #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
42 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
43 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
45 #define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
47 #define swapper_pg_dir ((pgd_t *) NULL)
49 #define arch_enter_lazy_cpu_mode() do {} while (0)
51 #define pgprot_noncached_wc(prot) prot
54 * All 32bit addresses are effectively valid for vmalloc...
55 * Sort of meaningless for non-VM targets.
57 #define VMALLOC_START 0
58 #define VMALLOC_END 0xffffffff
60 #else /* CONFIG_MMU */
62 #include <asm-generic/pgtable-nopmd.h>
67 #include <linux/sched.h>
68 #include <linux/threads.h>
69 #include <asm/processor.h> /* For TASK_SIZE */
73 #define FIRST_USER_ADDRESS 0UL
75 extern unsigned long va_to_phys(unsigned long address
);
76 extern pte_t
*va_to_pte(unsigned long address
);
79 * The following only work if pte_present() is true.
80 * Undefined behaviour if not..
83 static inline int pte_special(pte_t pte
) { return 0; }
85 static inline pte_t
pte_mkspecial(pte_t pte
) { return pte
; }
87 /* Start and end of the vmalloc area. */
88 /* Make sure to map the vmalloc area above the pinned kernel memory area
90 #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
91 #define VMALLOC_END ioremap_bot
93 #endif /* __ASSEMBLY__ */
96 * Macro to mark a page protection value as "uncacheable".
99 #define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
102 #define pgprot_noncached(prot) \
103 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
104 _PAGE_NO_CACHE | _PAGE_GUARDED))
106 #define pgprot_noncached_wc(prot) \
107 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
111 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
112 * table containing PTEs, together with a set of 16 segment registers, to
113 * define the virtual to physical address mapping.
115 * We use the hash table as an extended TLB, i.e. a cache of currently
116 * active mappings. We maintain a two-level page table tree, much
117 * like that used by the i386, for the sake of the Linux memory
118 * management code. Low-level assembler code in hashtable.S
119 * (procedure hash_page) is responsible for extracting ptes from the
120 * tree and putting them into the hash table when necessary, and
121 * updating the accessed and modified bits in the page table tree.
125 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
126 * instruction and data sides share a unified, 64-entry, semi-associative
127 * TLB which is maintained totally under software control. In addition, the
128 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
129 * TLB which serves as a first level to the shared TLB. These two TLBs are
130 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
134 * The normal case is that PTEs are 32-bits and we have a 1-page
135 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
139 /* PGDIR_SHIFT determines what a top-level page table entry can map */
140 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
141 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
142 #define PGDIR_MASK (~(PGDIR_SIZE-1))
145 * entries per page directory level: our page-table tree is two-level, so
146 * we don't really have any PMD directory.
148 #define PTRS_PER_PTE (1 << PTE_SHIFT)
149 #define PTRS_PER_PMD 1
150 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
152 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
153 #define FIRST_USER_PGD_NR 0
155 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
156 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
158 #define pte_ERROR(e) \
159 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
160 __FILE__, __LINE__, pte_val(e))
161 #define pgd_ERROR(e) \
162 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
163 __FILE__, __LINE__, pgd_val(e))
166 * Bits in a linux-style PTE. These match the bits in the
167 * (hardware-defined) PTE as closely as possible.
170 /* There are several potential gotchas here. The hardware TLBLO
171 * field looks like this:
173 * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
174 * RPN..................... 0 0 EX WR ZSEL....... W I M G
176 * Where possible we make the Linux PTE bits match up with this
178 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
179 * support down to 1k pages), this is done in the TLBMiss exception
181 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
182 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
183 * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
185 * - PRESENT *must* be in the bottom two bits because swap cache
186 * entries use the top 30 bits. Because 4xx doesn't support SMP
187 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
188 * is cleared in the TLB miss handler before the TLB entry is loaded.
189 * - All other bits of the PTE are loaded into TLBLO without
190 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
191 * software PTE bits. We actually use bits 21, 24, 25, and
192 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
196 /* Definitions for MicroBlaze. */
197 #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
198 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
199 #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
200 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
201 #define _PAGE_USER 0x010 /* matches one of the zone permission bits */
202 #define _PAGE_RW 0x040 /* software: Writes permitted */
203 #define _PAGE_DIRTY 0x080 /* software: dirty page */
204 #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
205 #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
206 #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
207 #define _PMD_PRESENT PAGE_MASK
210 * Some bits are unused...
212 #ifndef _PAGE_HASHPTE
213 #define _PAGE_HASHPTE 0
215 #ifndef _PTE_NONE_MASK
216 #define _PTE_NONE_MASK 0
219 #define _PAGE_SHARED 0
225 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
228 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
229 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
230 * to have it in the Linux PTE, and in fact the bit could be reused for
231 * another purpose. -- paulus.
233 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
234 #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
236 #define _PAGE_KERNEL \
237 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
239 #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
241 #define PAGE_NONE __pgprot(_PAGE_BASE)
242 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
243 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
244 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
245 #define PAGE_SHARED_X \
246 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
247 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
248 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
250 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
251 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
252 #define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
255 * We consider execute permission the same as read.
256 * Also, write permissions imply read permissions.
258 #define __P000 PAGE_NONE
259 #define __P001 PAGE_READONLY_X
260 #define __P010 PAGE_COPY
261 #define __P011 PAGE_COPY_X
262 #define __P100 PAGE_READONLY
263 #define __P101 PAGE_READONLY_X
264 #define __P110 PAGE_COPY
265 #define __P111 PAGE_COPY_X
267 #define __S000 PAGE_NONE
268 #define __S001 PAGE_READONLY_X
269 #define __S010 PAGE_SHARED
270 #define __S011 PAGE_SHARED_X
271 #define __S100 PAGE_READONLY
272 #define __S101 PAGE_READONLY_X
273 #define __S110 PAGE_SHARED
274 #define __S111 PAGE_SHARED_X
278 * ZERO_PAGE is a global shared page that is always zero: used
279 * for zero-mapped memory areas etc..
281 extern unsigned long empty_zero_page
[1024];
282 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
284 #endif /* __ASSEMBLY__ */
286 #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
287 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
288 #define pte_clear(mm, addr, ptep) \
289 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
291 #define pmd_none(pmd) (!pmd_val(pmd))
292 #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
293 #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
294 #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
296 #define pte_page(x) (mem_map + (unsigned long) \
297 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
298 #define PFN_SHIFT_OFFSET (PAGE_SHIFT)
300 #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
302 #define pfn_pte(pfn, prot) \
303 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
307 * The following only work if pte_present() is true.
308 * Undefined behaviour if not..
310 static inline int pte_read(pte_t pte
) { return pte_val(pte
) & _PAGE_USER
; }
311 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_RW
; }
312 static inline int pte_exec(pte_t pte
) { return pte_val(pte
) & _PAGE_EXEC
; }
313 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
314 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
316 static inline void pte_uncache(pte_t pte
) { pte_val(pte
) |= _PAGE_NO_CACHE
; }
317 static inline void pte_cache(pte_t pte
) { pte_val(pte
) &= ~_PAGE_NO_CACHE
; }
319 static inline pte_t
pte_rdprotect(pte_t pte
) \
320 { pte_val(pte
) &= ~_PAGE_USER
; return pte
; }
321 static inline pte_t
pte_wrprotect(pte_t pte
) \
322 { pte_val(pte
) &= ~(_PAGE_RW
| _PAGE_HWWRITE
); return pte
; }
323 static inline pte_t
pte_exprotect(pte_t pte
) \
324 { pte_val(pte
) &= ~_PAGE_EXEC
; return pte
; }
325 static inline pte_t
pte_mkclean(pte_t pte
) \
326 { pte_val(pte
) &= ~(_PAGE_DIRTY
| _PAGE_HWWRITE
); return pte
; }
327 static inline pte_t
pte_mkold(pte_t pte
) \
328 { pte_val(pte
) &= ~_PAGE_ACCESSED
; return pte
; }
330 static inline pte_t
pte_mkread(pte_t pte
) \
331 { pte_val(pte
) |= _PAGE_USER
; return pte
; }
332 static inline pte_t
pte_mkexec(pte_t pte
) \
333 { pte_val(pte
) |= _PAGE_USER
| _PAGE_EXEC
; return pte
; }
334 static inline pte_t
pte_mkwrite(pte_t pte
) \
335 { pte_val(pte
) |= _PAGE_RW
; return pte
; }
336 static inline pte_t
pte_mkdirty(pte_t pte
) \
337 { pte_val(pte
) |= _PAGE_DIRTY
; return pte
; }
338 static inline pte_t
pte_mkyoung(pte_t pte
) \
339 { pte_val(pte
) |= _PAGE_ACCESSED
; return pte
; }
342 * Conversion functions: convert a page and protection to a page entry,
343 * and a page entry and page directory to the page they refer to.
346 static inline pte_t
mk_pte_phys(phys_addr_t physpage
, pgprot_t pgprot
)
349 pte_val(pte
) = physpage
| pgprot_val(pgprot
);
353 #define mk_pte(page, pgprot) \
356 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
357 pgprot_val(pgprot); \
361 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
363 pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
);
368 * Atomic PTE updates.
370 * pte_update clears and sets bit atomically, and returns
372 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
373 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
375 static inline unsigned long pte_update(pte_t
*p
, unsigned long clr
,
378 unsigned long flags
, old
, tmp
;
380 raw_local_irq_save(flags
);
382 __asm__
__volatile__( "lw %0, %2, r0 \n"
386 : "=&r" (old
), "=&r" (tmp
)
387 : "r" ((unsigned long)(p
+ 1) - 4), "r" (clr
), "r" (set
)
390 raw_local_irq_restore(flags
);
396 * set_pte stores a linux PTE into the linux page table.
398 static inline void set_pte(struct mm_struct
*mm
, unsigned long addr
,
399 pte_t
*ptep
, pte_t pte
)
404 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
405 pte_t
*ptep
, pte_t pte
)
410 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
411 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
412 unsigned long address
, pte_t
*ptep
)
414 return (pte_update(ptep
, _PAGE_ACCESSED
, 0) & _PAGE_ACCESSED
) != 0;
417 static inline int ptep_test_and_clear_dirty(struct mm_struct
*mm
,
418 unsigned long addr
, pte_t
*ptep
)
420 return (pte_update(ptep
, \
421 (_PAGE_DIRTY
| _PAGE_HWWRITE
), 0) & _PAGE_DIRTY
) != 0;
424 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
425 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
426 unsigned long addr
, pte_t
*ptep
)
428 return __pte(pte_update(ptep
, ~_PAGE_HASHPTE
, 0));
431 /*static inline void ptep_set_wrprotect(struct mm_struct *mm,
432 unsigned long addr, pte_t *ptep)
434 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
437 static inline void ptep_mkdirty(struct mm_struct
*mm
,
438 unsigned long addr
, pte_t
*ptep
)
440 pte_update(ptep
, 0, _PAGE_DIRTY
);
443 /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
445 /* Convert pmd entry to page */
446 /* our pmd entry is an effective address of pte table*/
447 /* returns effective address of the pmd entry*/
448 #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
450 /* returns struct *page of the pmd entry*/
451 #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
453 /* to find an entry in a kernel page-table-directory */
454 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
456 /* to find an entry in a page-table-directory */
457 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
458 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
460 /* Find an entry in the third-level page table.. */
461 #define pte_index(address) \
462 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
463 #define pte_offset_kernel(dir, addr) \
464 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
465 #define pte_offset_map(dir, addr) \
466 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
468 #define pte_unmap(pte) kunmap_atomic(pte)
470 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
473 * Encode and decode a swap entry.
474 * Note that the bits we use in a PTE for representing a swap entry
475 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
476 * (if used). -- paulus
478 #define __swp_type(entry) ((entry).val & 0x3f)
479 #define __swp_offset(entry) ((entry).val >> 6)
480 #define __swp_entry(type, offset) \
481 ((swp_entry_t) { (type) | ((offset) << 6) })
482 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
483 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
485 extern unsigned long iopa(unsigned long addr
);
487 /* Values for nocacheflag and cmode */
488 /* These are not used by the APUS kernel_map, but prevents
489 * compilation errors.
491 #define IOMAP_FULL_CACHING 0
492 #define IOMAP_NOCACHE_SER 1
493 #define IOMAP_NOCACHE_NONSER 2
494 #define IOMAP_NO_COPYBACK 3
496 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
497 #define kern_addr_valid(addr) (1)
499 void do_page_fault(struct pt_regs
*regs
, unsigned long address
,
500 unsigned long error_code
);
502 void mapin_ram(void);
503 int map_page(unsigned long va
, phys_addr_t pa
, int flags
);
505 extern int mem_init_done
;
507 asmlinkage
void __init
mmu_init(void);
509 void __init
*early_get_page(void);
511 #endif /* __ASSEMBLY__ */
512 #endif /* __KERNEL__ */
514 #endif /* CONFIG_MMU */
517 #include <asm-generic/pgtable.h>
519 extern unsigned long ioremap_bot
, ioremap_base
;
521 void setup_memory(void);
522 #endif /* __ASSEMBLY__ */
524 #endif /* _ASM_MICROBLAZE_PGTABLE_H */