2 * linux/include/asm-arm/proc-armv/pgtable.h
4 * Copyright (C) 1995, 1996, 1997 Russell King
6 * 12-Jan-1997 RMK Altered flushing routines to use function pointers
7 * now possible to combine ARM6, ARM7 and StrongARM versions.
8 * 17-Apr-1999 RMK Now pass an area size to clean_cache_area and
11 #ifndef __ASM_PROC_PGTABLE_H
12 #define __ASM_PROC_PGTABLE_H
14 #include <asm/arch/memory.h> /* For TASK_SIZE */
16 #define LIBRARY_TEXT_START 0x0c000000
21 #define flush_cache_all() \
24 #define flush_cache_mm(_mm) \
26 if ((_mm) == current->mm) \
27 cpu_flush_cache_all(); \
30 #define flush_cache_range(_mm,_start,_end) \
32 if ((_mm) == current->mm) \
33 cpu_flush_cache_area((_start), (_end), 1); \
36 #define flush_cache_page(_vma,_vmaddr) \
38 if ((_vma)->vm_mm == current->mm) \
39 cpu_flush_cache_area((_vmaddr), \
40 (_vmaddr) + PAGE_SIZE, \
41 ((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
44 #define clean_cache_range(_start,_end) \
46 unsigned long _s, _sz; \
47 _s = (unsigned long)_start; \
48 _sz = (unsigned long)_end - _s; \
49 cpu_clean_cache_area(_s, _sz); \
52 #define clean_cache_area(_start,_size) \
55 _s = (unsigned long)_start; \
56 cpu_clean_cache_area(_s, _size); \
59 #define flush_icache_range(_start,_end) \
60 cpu_flush_icache_area((_start), (_end) - (_start))
63 * We don't have a MEMC chip...
65 #define update_memc_all() do { } while (0)
66 #define update_memc_task(tsk) do { } while (0)
67 #define update_memc_mm(mm) do { } while (0)
68 #define update_memc_addr(mm,addr,pte) do { } while (0)
71 * This flushes back any buffered write data. We have to clean and flush the entries
72 * in the cache for this page. Is it necessary to invalidate the I-cache?
74 #define flush_page_to_ram(_page) \
75 cpu_flush_ram_page((_page) & PAGE_MASK);
80 * - flush_tlb() flushes the current mm struct TLBs
81 * - flush_tlb_all() flushes all processes TLBs
82 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
83 * - flush_tlb_page(vma, vmaddr) flushes one page
84 * - flush_tlb_range(mm, start, end) flushes a range of pages
86 * GCC uses conditional instructions, and expects the assembler code to do so as well.
88 * We drain the write buffer in here to ensure that the page tables in ram
89 * are really up to date. It is more efficient to do this here...
91 #define flush_tlb() flush_tlb_all()
93 #define flush_tlb_all() \
96 #define flush_tlb_mm(_mm) \
98 if ((_mm) == current->mm) \
99 cpu_flush_tlb_all(); \
102 #define flush_tlb_range(_mm,_start,_end) \
104 if ((_mm) == current->mm) \
105 cpu_flush_tlb_area((_start), (_end), 1); \
108 #define flush_tlb_page(_vma,_vmaddr) \
110 if ((_vma)->vm_mm == current->mm) \
111 cpu_flush_tlb_area((_vmaddr), (_vmaddr) + PAGE_SIZE, \
112 ((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
116 * PMD_SHIFT determines the size of the area a second-level page table can map
119 #define PMD_SIZE (1UL << PMD_SHIFT)
120 #define PMD_MASK (~(PMD_SIZE-1))
123 * PGDIR_SHIFT determines what a third-level page table entry can map
125 #define PGDIR_SHIFT 20
126 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
127 #define PGDIR_MASK (~(PGDIR_SIZE-1))
130 * entries per page directory level: the sa110 is two-level, so
131 * we don't really have any PMD directory physically.
133 #define PTRS_PER_PTE 256
134 #define PTRS_PER_PMD 1
135 #define PTRS_PER_PGD 4096
136 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
139 /* Just any arbitrary offset to the start of the vmalloc VM area: the
140 * current 8MB value just means that there will be a 8MB "hole" after the
141 * physical memory until the kernel virtual memory starts. That means that
142 * any out-of-bounds memory accesses will hopefully be caught.
143 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
144 * area for the same reason. ;)
146 #define VMALLOC_OFFSET (8*1024*1024)
147 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
148 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
149 #define VMALLOC_END (PAGE_OFFSET + 0x10000000)
155 #define DOMAIN_USER 0
156 #define DOMAIN_KERNEL 1
157 #define DOMAIN_TABLE 1
162 #undef TEST_VERIFY_AREA
165 * The sa110 doesn't have any external MMU info: the kernel page
166 * tables contain all the necessary information.
168 extern __inline__
void update_mmu_cache(struct vm_area_struct
* vma
,
169 unsigned long address
, pte_t pte
)
175 * BAD_PAGETABLE is used when we need a bogus page-table, while
176 * BAD_PAGE is used for a bogus page.
178 * ZERO_PAGE is a global shared page that is always zero: used
179 * for zero-mapped memory areas etc..
181 extern pte_t
__bad_page(void);
182 extern pte_t
* __bad_pagetable(void);
183 extern unsigned long *empty_zero_page
;
185 #define BAD_PAGETABLE __bad_pagetable()
186 #define BAD_PAGE __bad_page()
187 #define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
189 /* number of bits that fit into a memory pointer */
190 #define BYTES_PER_PTR (sizeof(unsigned long))
191 #define BITS_PER_PTR (8*BYTES_PER_PTR)
193 /* to align the pointer to a pointer address */
194 #define PTR_MASK (~(sizeof(void*)-1))
196 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
197 #define SIZEOF_PTR_LOG2 2
199 /* to find an entry in a page-table */
200 #define PAGE_PTR(address) \
201 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
203 /* to set the page-dir
204 * Note that we need to flush the cache and TLBs
205 * if we are affecting the current task.
207 #define SET_PAGE_DIR(tsk,pgdir) \
209 tsk->tss.memmap = __virt_to_phys((unsigned long)pgdir); \
210 if ((tsk) == current) { \
212 __asm__ __volatile__( \
213 "mcr%? p15, 0, %0, c2, c0, 0\n" \
214 : : "r" (tsk->tss.memmap)); \
220 extern unsigned long get_page_2k(int priority
);
221 extern void free_page_2k(unsigned long page
);
224 * Allocate and free page tables. The xxx_kernel() versions are
225 * used to allocate a kernel page table - this turns on ASN bits
230 extern struct pgtable_cache_struct
{
231 unsigned long *pgd_cache
;
232 unsigned long *pte_cache
;
233 unsigned long pgtable_cache_sz
;
236 #define pgd_quicklist (quicklists.pgd_cache)
237 #define pmd_quicklist ((unsigned long *)0)
238 #define pte_quicklist (quicklists.pte_cache)
239 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
241 #error Pgtable caches have to be per-CPU, so that no locking is needed.
248 /* PMD types (actually level 1 descriptor) */
249 #define PMD_TYPE_MASK 0x0003
250 #define PMD_TYPE_FAULT 0x0000
251 #define PMD_TYPE_TABLE 0x0001
252 #define PMD_TYPE_SECT 0x0002
253 #define PMD_UPDATABLE 0x0010
254 #define PMD_SECT_CACHEABLE 0x0008
255 #define PMD_SECT_BUFFERABLE 0x0004
256 #define PMD_SECT_AP_WRITE 0x0400
257 #define PMD_SECT_AP_READ 0x0800
258 #define PMD_DOMAIN(x) ((x) << 5)
260 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER))
261 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
263 #define pmd_none(pmd) (!pmd_val(pmd))
264 #define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
265 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
266 #define mk_user_pmd(ptep) __mk_pmd(ptep, _PAGE_USER_TABLE)
267 #define mk_kernel_pmd(ptep) __mk_pmd(ptep, _PAGE_KERNEL_TABLE)
268 #define set_pmd(pmdp,pmd) cpu_set_pmd(pmdp,pmd)
270 /* Find an entry in the second-level page table.. */
271 #define pmd_offset(dir, address) ((pmd_t *)(dir))
273 extern __inline__
int pmd_present(pmd_t pmd
)
275 return ((pmd_val(pmd
) + 1) & 2);
278 /* We don't use pmd cache, so this is a dummy routine */
279 extern __inline__ pmd_t
*get_pmd_fast(void)
284 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
288 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
292 extern void __bad_pmd(pmd_t
*pmd
);
293 extern void __bad_pmd_kernel(pmd_t
*pmd
);
296 * allocating and freeing a pmd is trivial: the 1-entry pmd is
297 * inside the pgd, so has no extra memory associated with it.
299 extern __inline__
void pmd_free(pmd_t
*pmd
)
303 extern __inline__ pmd_t
*pmd_alloc(pgd_t
*pgd
, unsigned long address
)
305 return (pmd_t
*) pgd
;
308 #define pmd_free_kernel pmd_free
309 #define pmd_alloc_kernel pmd_alloc
311 extern __inline__ pmd_t
__mk_pmd(pte_t
*ptep
, unsigned long prot
)
313 unsigned long pte_ptr
= (unsigned long)ptep
;
316 pte_ptr
-= PTRS_PER_PTE
* BYTES_PER_PTR
;
319 * The pmd must be loaded with the physical
320 * address of the PTE table
322 pmd_val(pmd
) = __virt_to_phys(pte_ptr
) | prot
;
327 extern __inline__
unsigned long pmd_page(pmd_t pmd
)
331 ptr
= pmd_val(pmd
) & ~(PTRS_PER_PTE
* BYTES_PER_PTR
- 1);
333 ptr
+= PTRS_PER_PTE
* BYTES_PER_PTR
;
335 return __phys_to_virt(ptr
);
342 /* PTE types (actially level 2 descriptor) */
343 #define PTE_TYPE_MASK 0x0003
344 #define PTE_TYPE_FAULT 0x0000
345 #define PTE_TYPE_LARGE 0x0001
346 #define PTE_TYPE_SMALL 0x0002
347 #define PTE_AP_READ 0x0aa0
348 #define PTE_AP_WRITE 0x0550
349 #define PTE_CACHEABLE 0x0008
350 #define PTE_BUFFERABLE 0x0004
352 #define pte_none(pte) (!pte_val(pte))
353 #define pte_clear(ptep) set_pte(ptep, __pte(0))
356 * Conversion functions: convert a page and protection to a page entry,
357 * and a page entry and page directory to the page they refer to.
359 extern __inline__ pte_t
mk_pte(unsigned long page
, pgprot_t pgprot
)
362 pte_val(pte
) = __virt_to_phys(page
) | pgprot_val(pgprot
);
366 /* This takes a physical page address that is used by the remapping functions */
367 extern __inline__ pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
370 pte_val(pte
) = physpage
+ pgprot_val(pgprot
);
374 #define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
376 extern __inline__
unsigned long pte_page(pte_t pte
)
378 return __phys_to_virt(pte_val(pte
) & PAGE_MASK
);
381 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
382 extern pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
384 extern __inline__ pte_t
*get_pte_fast(void)
388 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
389 pte_quicklist
= (unsigned long *)(*ret
);
391 clean_cache_area(ret
, 4);
392 pgtable_cache_size
--;
397 extern __inline__
void free_pte_fast(pte_t
*pte
)
399 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
400 pte_quicklist
= (unsigned long *) pte
;
401 pgtable_cache_size
++;
404 extern __inline__
void free_pte_slow(pte_t
*pte
)
406 free_page_2k((unsigned long)(pte
- PTRS_PER_PTE
));
409 #define pte_free_kernel(pte) free_pte_fast(pte)
410 #define pte_free(pte) free_pte_fast(pte)
412 /*###############################################################################
413 * New PageTableEntry stuff...
415 /* We now keep two sets of ptes - the physical and the linux version.
416 * This gives us many advantages, and allows us greater flexibility.
418 * The Linux pte's contain:
422 * 2 bufferable - matches physical pte
423 * 3 cacheable - matches physical pte
429 * 12-31 virtual page address
431 * These are stored at the pte pointer; the physical PTE is at -1024bytes
433 #define L_PTE_PRESENT (1 << 0)
434 #define L_PTE_YOUNG (1 << 1)
435 #define L_PTE_BUFFERABLE (1 << 2)
436 #define L_PTE_CACHEABLE (1 << 3)
437 #define L_PTE_USER (1 << 4)
438 #define L_PTE_WRITE (1 << 5)
439 #define L_PTE_EXEC (1 << 6)
440 #define L_PTE_DIRTY (1 << 7)
443 * The following macros handle the cache and bufferable bits...
445 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
446 #define _L_PTE_READ L_PTE_USER | L_PTE_CACHEABLE
447 #define _L_PTE_EXEC _L_PTE_READ | L_PTE_EXEC
449 #define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
450 #define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_BUFFERABLE)
451 #define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_BUFFERABLE | L_PTE_WRITE)
452 #define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
453 #define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE)
455 #define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
458 * The table below defines the page protection levels that we insert into our
459 * Linux page table version. These get translated into the best that the
460 * architecture can perform. Note that on most ARM hardware:
461 * 1) We cannot do execute protection
462 * 2) If we could do execute protection, then read is implied
463 * 3) write implies read permissions
465 #define __P000 PAGE_NONE
466 #define __P001 PAGE_READONLY
467 #define __P010 PAGE_COPY
468 #define __P011 PAGE_COPY
469 #define __P100 PAGE_READONLY
470 #define __P101 PAGE_READONLY
471 #define __P110 PAGE_COPY
472 #define __P111 PAGE_COPY
474 #define __S000 PAGE_NONE
475 #define __S001 PAGE_READONLY
476 #define __S010 PAGE_SHARED
477 #define __S011 PAGE_SHARED
478 #define __S100 PAGE_READONLY
479 #define __S101 PAGE_READONLY
480 #define __S110 PAGE_SHARED
481 #define __S111 PAGE_SHARED
483 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
486 * The following only work if pte_present() is true.
487 * Undefined behaviour if not..
489 #define pte_read(pte) (pte_val(pte) & L_PTE_USER)
490 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
491 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
492 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
493 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
495 #define PTE_BIT_FUNC(fn,op) \
496 extern inline pte_t fn##(pte_t pte) { pte_val(pte) op##; return pte; }
498 /*PTE_BIT_FUNC(pte_rdprotect, &= ~L_PTE_USER);*/
499 PTE_BIT_FUNC(pte_wrprotect
, &= ~L_PTE_WRITE
);
500 PTE_BIT_FUNC(pte_exprotect
, &= ~L_PTE_EXEC
);
501 PTE_BIT_FUNC(pte_mkclean
, &= ~L_PTE_DIRTY
);
502 PTE_BIT_FUNC(pte_mkold
, &= ~L_PTE_YOUNG
);
503 /*PTE_BIT_FUNC(pte_mkread, |= L_PTE_USER);*/
504 PTE_BIT_FUNC(pte_mkwrite
, |= L_PTE_WRITE
);
505 PTE_BIT_FUNC(pte_mkexec
, |= L_PTE_EXEC
);
506 PTE_BIT_FUNC(pte_mkdirty
, |= L_PTE_DIRTY
);
507 PTE_BIT_FUNC(pte_mkyoung
, |= L_PTE_YOUNG
);
508 PTE_BIT_FUNC(pte_nocache
, &= ~L_PTE_CACHEABLE
);
510 extern __inline__ pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
512 pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
);
516 /* Find an entry in the third-level page table.. */
517 extern __inline__ pte_t
* pte_offset(pmd_t
* dir
, unsigned long address
)
519 return (pte_t
*) pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
522 extern __inline__ pte_t
* pte_alloc_kernel(pmd_t
*pmd
, unsigned long address
)
524 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
525 if (pmd_none(*pmd
)) {
526 pte_t
*page
= (pte_t
*) get_pte_fast();
529 return get_pte_kernel_slow(pmd
, address
);
530 set_pmd(pmd
, mk_kernel_pmd(page
));
531 return page
+ address
;
534 __bad_pmd_kernel(pmd
);
537 return (pte_t
*) pmd_page(*pmd
) + address
;
540 extern __inline__ pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
542 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
544 if (pmd_none(*pmd
)) {
545 pte_t
*page
= (pte_t
*) get_pte_fast();
548 return get_pte_slow(pmd
, address
);
549 set_pmd(pmd
, mk_user_pmd(page
));
550 return page
+ address
;
556 return (pte_t
*) pmd_page(*pmd
) + address
;
560 * The "pgd_xxx()" functions here are trivial for a folded two-level
561 * setup: the pgd is never bad, and a pmd always exists (as it's folded
562 * into the pgd entry)
564 #define pgd_none(pgd) (0)
565 #define pgd_bad(pgd) (0)
566 #define pgd_present(pgd) (1)
567 #define pgd_clear(pgdp)
569 /* to find an entry in a kernel page-table-directory */
570 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
572 /* used for quicklists */
573 #define __pgd_next(pgd) (((unsigned long *)pgd)[1])
575 /* to find an entry in a page-table-directory */
576 extern __inline__ pgd_t
* pgd_offset(struct mm_struct
* mm
, unsigned long address
)
578 return mm
->pgd
+ (address
>> PGDIR_SHIFT
);
581 extern pgd_t
*get_pgd_slow(void);
583 extern __inline__ pgd_t
*get_pgd_fast(void)
587 if((ret
= pgd_quicklist
) != NULL
) {
588 pgd_quicklist
= (unsigned long *)__pgd_next(ret
);
590 clean_cache_area(ret
+ 1, 4);
591 pgtable_cache_size
--;
593 ret
= (unsigned long *)get_pgd_slow();
597 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
599 __pgd_next(pgd
) = (unsigned long) pgd_quicklist
;
600 pgd_quicklist
= (unsigned long *) pgd
;
601 pgtable_cache_size
++;
604 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
607 if (pgd
) { /* can pgd be NULL? */
611 /* pgd is never none and bad - it is
612 * detected in the pmd macros.
614 pmd
= pmd_offset(pgd
, 0);
618 printk("free_pgd_slow: bad directory entry %08lx\n", pmd_val(*pmd
));
623 pte
= pte_offset(pmd
, 0);
629 free_pages((unsigned long) pgd
, 2);
632 #define pgd_free(pgd) free_pgd_fast(pgd)
633 #define pgd_alloc() get_pgd_fast()
635 extern __inline__
void set_pgdir(unsigned long address
, pgd_t entry
)
637 struct task_struct
* p
;
640 read_lock(&tasklist_lock
);
644 *pgd_offset(p
->mm
,address
) = entry
;
646 read_unlock(&tasklist_lock
);
647 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)__pgd_next(pgd
))
648 pgd
[address
>> PGDIR_SHIFT
] = entry
;
651 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
653 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
654 #define SWP_OFFSET(entry) ((entry) >> 9)
655 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
657 #endif /* __ASM_PROC_PAGE_H */