2 * arch/arm/include/asm/pgtable-3level.h
4 * Copyright (C) 2011 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #ifndef _ASM_PGTABLE_3LEVEL_H
21 #define _ASM_PGTABLE_3LEVEL_H
24 * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
25 * 8 bytes each, occupying a 4K page. The first level table covers a range of
26 * 512GB, each entry representing 1GB. Since we are limited to 4GB input
27 * address range, only 4 entries in the PGD are used.
29 * There are enough spare bits in a page table entry for the kernel specific
32 #define PTRS_PER_PTE 512
33 #define PTRS_PER_PMD 512
34 #define PTRS_PER_PGD 4
36 #define PTE_HWTABLE_PTRS (0)
37 #define PTE_HWTABLE_OFF (0)
38 #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
41 * PGDIR_SHIFT determines the size a top-level page table entry can map.
43 #define PGDIR_SHIFT 30
46 * PMD_SHIFT determines the size a middle-level page table entry can map.
50 #define PMD_SIZE (1UL << PMD_SHIFT)
51 #define PMD_MASK (~((1 << PMD_SHIFT) - 1))
52 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
53 #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
56 * section address mask and size definitions.
58 #define SECTION_SHIFT 21
59 #define SECTION_SIZE (1UL << SECTION_SHIFT)
60 #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
62 #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
65 * Hugetlb definitions.
67 #define HPAGE_SHIFT PMD_SHIFT
68 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
69 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
70 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
73 * "Linux" PTE definitions for LPAE.
75 * These bits overlap with the hardware bits but the naming is preserved for
76 * consistency with the classic page table format.
78 #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
79 #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
80 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
81 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
82 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
83 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
84 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
85 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
86 #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
87 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
89 #define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
90 #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
91 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
92 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
95 * To be used in assembly code with the upper page attributes.
97 #define L_PTE_XN_HIGH (1 << (54 - 32))
98 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
101 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
103 #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
104 #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
105 #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
106 #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
107 #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
108 #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
109 #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
110 #define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
111 #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
112 #define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
115 * Software PGD flags.
117 #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
120 * 2nd stage PTE definitions for LPAE.
122 #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
123 #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
124 #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
125 #define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
126 #define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
128 #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
129 #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
131 #define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
132 #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
135 * Hyp-mode PL2 PTE definitions for LPAE.
137 #define L_PTE_HYP L_PTE_USER
141 #define pud_none(pud) (!pud_val(pud))
142 #define pud_bad(pud) (!(pud_val(pud) & 2))
143 #define pud_present(pud) (pud_val(pud))
144 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
146 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
148 #define pmd_large(pmd) pmd_sect(pmd)
150 #define pud_clear(pudp) \
153 clean_pmd_entry(pudp); \
156 #define set_pud(pudp, pud) \
159 flush_pmd_entry(pudp); \
162 static inline pmd_t
*pud_page_vaddr(pud_t pud
)
164 return __va(pud_val(pud
) & PHYS_MASK
& (s32
)PAGE_MASK
);
167 /* Find an entry in the second-level page table.. */
168 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
169 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long addr
)
171 return (pmd_t
*)pud_page_vaddr(*pud
) + pmd_index(addr
);
174 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
176 #define copy_pmd(pmdpd,pmdps) \
179 flush_pmd_entry(pmdpd); \
182 #define pmd_clear(pmdp) \
185 clean_pmd_entry(pmdp); \
189 * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
190 * that are written to a page table but not for ptes created with mk_pte.
192 * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
193 * hugetlb_cow, where it is compared with an entry in a page table.
194 * This comparison test fails erroneously leading ultimately to a memory leak.
196 * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
197 * present before running the comparison.
199 #define __HAVE_ARCH_PTE_SAME
200 #define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
202 == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
205 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
207 #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
208 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
210 #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
211 : !!(pmd_val(pmd) & (val)))
212 #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
214 #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
215 #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
216 #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
217 static inline pte_t
pte_mkspecial(pte_t pte
)
219 pte_val(pte
) |= L_PTE_SPECIAL
;
222 #define __HAVE_ARCH_PTE_SPECIAL
224 #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
225 #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
226 #define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
227 #define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
229 #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
230 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
232 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
233 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
236 #define PMD_BIT_FUNC(fn,op) \
237 static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
239 PMD_BIT_FUNC(wrprotect
, |= L_PMD_SECT_RDONLY
);
240 PMD_BIT_FUNC(mkold
, &= ~PMD_SECT_AF
);
241 PMD_BIT_FUNC(mkwrite
, &= ~L_PMD_SECT_RDONLY
);
242 PMD_BIT_FUNC(mkdirty
, |= L_PMD_SECT_DIRTY
);
243 PMD_BIT_FUNC(mkclean
, &= ~L_PMD_SECT_DIRTY
);
244 PMD_BIT_FUNC(mkyoung
, |= PMD_SECT_AF
);
246 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
248 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
249 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
250 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
252 /* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
253 #define pmdp_establish generic_pmdp_establish
255 /* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
256 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
258 return __pmd(pmd_val(pmd
) & ~L_PMD_SECT_VALID
);
261 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
263 const pmdval_t mask
= PMD_SECT_USER
| PMD_SECT_XN
| L_PMD_SECT_RDONLY
|
264 L_PMD_SECT_VALID
| L_PMD_SECT_NONE
;
265 pmd_val(pmd
) = (pmd_val(pmd
) & ~mask
) | (pgprot_val(newprot
) & mask
);
269 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
270 pmd_t
*pmdp
, pmd_t pmd
)
272 BUG_ON(addr
>= TASK_SIZE
);
274 /* create a faulting entry if PROT_NONE protected */
275 if (pmd_val(pmd
) & L_PMD_SECT_NONE
)
276 pmd_val(pmd
) &= ~L_PMD_SECT_VALID
;
278 if (pmd_write(pmd
) && pmd_dirty(pmd
))
279 pmd_val(pmd
) &= ~PMD_SECT_AP2
;
281 pmd_val(pmd
) |= PMD_SECT_AP2
;
283 *pmdp
= __pmd(pmd_val(pmd
) | PMD_SECT_nG
);
284 flush_pmd_entry(pmdp
);
287 #endif /* __ASSEMBLY__ */
289 #endif /* _ASM_PGTABLE_3LEVEL_H */