2 * Copyright 2003 PathScale Inc
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H
10 #include <asm-generic/pgtable-nopud.h>
12 /* PGDIR_SHIFT determines what a third-level page table entry can map */
14 #define PGDIR_SHIFT 30
15 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
16 #define PGDIR_MASK (~(PGDIR_SIZE-1))
18 /* PMD_SHIFT determines the size of the area a second-level page table can
23 #define PMD_SIZE (1UL << PMD_SHIFT)
24 #define PMD_MASK (~(PMD_SIZE-1))
27 * entries per page directory level
30 #define PTRS_PER_PTE 512
31 #define PTRS_PER_PMD 512
32 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
33 #define PTRS_PER_PGD 512
34 #define FIRST_USER_ADDRESS 0
36 #define pte_ERROR(e) \
37 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
39 #define pmd_ERROR(e) \
40 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
42 #define pgd_ERROR(e) \
43 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
46 #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
47 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
48 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
49 #define pud_populate(mm, pud, pmd) \
50 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
52 #define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
53 static inline int pgd_newpage(pgd_t pgd
)
55 return(pgd_val(pgd
) & _PAGE_NEWPAGE
);
58 static inline void pgd_mkuptodate(pgd_t pgd
) { pgd_val(pgd
) &= ~_PAGE_NEWPAGE
; }
61 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
63 static inline pte_t
pte_mknewprot(pte_t pte
)
65 pte_set_bits(pte
, _PAGE_NEWPROT
);
69 static inline pte_t
pte_mknewpage(pte_t pte
)
71 pte_set_bits(pte
, _PAGE_NEWPAGE
);
75 static inline void set_pte(pte_t
*pteptr
, pte_t pteval
)
77 pte_copy(*pteptr
, pteval
);
79 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
80 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
84 *pteptr
= pte_mknewpage(*pteptr
);
85 if(pte_present(*pteptr
)) *pteptr
= pte_mknewprot(*pteptr
);
87 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
89 #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
91 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long address
)
93 pmd_t
*pmd
= (pmd_t
*) __get_free_page(GFP_KERNEL
);
96 memset(pmd
, 0, PAGE_SIZE
);
101 static inline void pmd_free(pmd_t
*pmd
){
102 free_page((unsigned long) pmd
);
105 #define __pmd_free_tlb(tlb,x) do { } while (0)
107 static inline void pud_clear (pud_t
* pud
) { }
109 #define pud_page(pud) \
110 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
112 /* Find an entry in the second-level page table.. */
113 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
116 #define pte_page(x) pfn_to_page(pte_pfn(x))
118 static inline int pte_none(pte_t pte
)
120 return pte_is_zero(pte
);
123 static inline unsigned long pte_pfn(pte_t pte
)
125 return phys_to_pfn(pte_val(pte
));
128 static inline pte_t
pfn_pte(pfn_t page_nr
, pgprot_t pgprot
)
131 phys_t phys
= pfn_to_phys(page_nr
);
133 pte_set_val(pte
, phys
, pgprot
);
137 static inline pmd_t
pfn_pmd(pfn_t page_nr
, pgprot_t pgprot
)
139 return __pmd((page_nr
<< PAGE_SHIFT
) | pgprot_val(pgprot
));
143 * Bits 0 through 3 are taken in the low part of the pte,
144 * put the 32 bits of offset into the high part.
146 #define PTE_FILE_MAX_BITS 32
150 #define pte_to_pgoff(p) ((p).pte >> 32)
152 #define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
156 #define pte_to_pgoff(pte) ((pte).pte_high)
158 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
165 * Overrides for Emacs so that we follow Linus's tabbing style.
166 * Emacs will notice this stuff at the end of the file and automatically
167 * adjust the settings for this buffer only. This must remain at the end
169 * ---------------------------------------------------------------------------
171 * c-file-style: "linux"