mm-only debug patch...
[mmotm.git] / arch / um / include / asm / pgtable-3level.h
blob084de4a9fc701412492b315e4162e693884beb8b
1 /*
2 * Copyright 2003 PathScale Inc
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
5 */
7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H
10 #include <asm-generic/pgtable-nopud.h>
12 /* PGDIR_SHIFT determines what a third-level page table entry can map */
14 #ifdef CONFIG_64BIT
15 #define PGDIR_SHIFT 30
16 #else
17 #define PGDIR_SHIFT 31
18 #endif
19 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
20 #define PGDIR_MASK (~(PGDIR_SIZE-1))
22 /* PMD_SHIFT determines the size of the area a second-level page table can
23 * map
26 #define PMD_SHIFT 21
27 #define PMD_SIZE (1UL << PMD_SHIFT)
28 #define PMD_MASK (~(PMD_SIZE-1))
31 * entries per page directory level
34 #define PTRS_PER_PTE 512
35 #ifdef CONFIG_64BIT
36 #define PTRS_PER_PMD 512
37 #define PTRS_PER_PGD 512
38 #else
39 #define PTRS_PER_PMD 1024
40 #define PTRS_PER_PGD 1024
41 #endif
43 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
44 #define FIRST_USER_ADDRESS 0
46 #define pte_ERROR(e) \
47 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
48 pte_val(e))
49 #define pmd_ERROR(e) \
50 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
51 pmd_val(e))
52 #define pgd_ERROR(e) \
53 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
54 pgd_val(e))
56 #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
57 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
58 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
59 #define pud_populate(mm, pud, pmd) \
60 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
62 #ifdef CONFIG_64BIT
63 #define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
64 #else
65 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
66 #endif
68 static inline int pgd_newpage(pgd_t pgd)
70 return(pgd_val(pgd) & _PAGE_NEWPAGE);
73 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
75 #ifdef CONFIG_64BIT
76 #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
77 #else
78 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
79 #endif
81 struct mm_struct;
82 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
84 static inline void pud_clear (pud_t *pud)
86 set_pud(pud, __pud(_PAGE_NEWPAGE));
89 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
90 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
92 /* Find an entry in the second-level page table.. */
93 #define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
94 pmd_index(address))
96 static inline unsigned long pte_pfn(pte_t pte)
98 return phys_to_pfn(pte_val(pte));
101 static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
103 pte_t pte;
104 phys_t phys = pfn_to_phys(page_nr);
106 pte_set_val(pte, phys, pgprot);
107 return pte;
110 static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
112 return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
116 * Bits 0 through 3 are taken in the low part of the pte,
117 * put the 32 bits of offset into the high part.
119 #define PTE_FILE_MAX_BITS 32
121 #ifdef CONFIG_64BIT
123 #define pte_to_pgoff(p) ((p).pte >> 32)
125 #define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
127 #else
129 #define pte_to_pgoff(pte) ((pte).pte_high)
131 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
133 #endif
135 #endif