cxgb4/l2t: Mark expected switch fall-through
[linux/fpc-iii.git] / arch / um / include / asm / pgtable-3level.h
blobc4d876dfb9acd14bc11ff6b4230bbff5bbe070fe
1 /*
2 * Copyright 2003 PathScale Inc
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
5 */
7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H
10 #define __ARCH_USE_5LEVEL_HACK
11 #include <asm-generic/pgtable-nopud.h>
13 /* PGDIR_SHIFT determines what a third-level page table entry can map */
15 #ifdef CONFIG_64BIT
16 #define PGDIR_SHIFT 30
17 #else
18 #define PGDIR_SHIFT 31
19 #endif
20 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
21 #define PGDIR_MASK (~(PGDIR_SIZE-1))
23 /* PMD_SHIFT determines the size of the area a second-level page table can
24 * map
27 #define PMD_SHIFT 21
28 #define PMD_SIZE (1UL << PMD_SHIFT)
29 #define PMD_MASK (~(PMD_SIZE-1))
32 * entries per page directory level
35 #define PTRS_PER_PTE 512
36 #ifdef CONFIG_64BIT
37 #define PTRS_PER_PMD 512
38 #define PTRS_PER_PGD 512
39 #else
40 #define PTRS_PER_PMD 1024
41 #define PTRS_PER_PGD 1024
42 #endif
44 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
45 #define FIRST_USER_ADDRESS 0UL
47 #define pte_ERROR(e) \
48 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
49 pte_val(e))
50 #define pmd_ERROR(e) \
51 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
52 pmd_val(e))
53 #define pgd_ERROR(e) \
54 printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
55 pgd_val(e))
57 #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
58 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
59 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
60 #define pud_populate(mm, pud, pmd) \
61 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
63 #ifdef CONFIG_64BIT
64 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
65 #else
66 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
67 #endif
69 static inline int pgd_newpage(pgd_t pgd)
71 return(pgd_val(pgd) & _PAGE_NEWPAGE);
74 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
76 #ifdef CONFIG_64BIT
77 #define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
78 #else
79 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
80 #endif
82 struct mm_struct;
83 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
85 static inline void pud_clear (pud_t *pud)
87 set_pud(pud, __pud(_PAGE_NEWPAGE));
90 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
91 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
93 /* Find an entry in the second-level page table.. */
94 #define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
95 pmd_index(address))
97 static inline unsigned long pte_pfn(pte_t pte)
99 return phys_to_pfn(pte_val(pte));
102 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
104 pte_t pte;
105 phys_t phys = pfn_to_phys(page_nr);
107 pte_set_val(pte, phys, pgprot);
108 return pte;
111 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
113 return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
116 #endif