2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_PGTABLE_HWDEF_H
17 #define __ASM_PGTABLE_HWDEF_H
20 * Number of page-table levels required to address 'va_bits' wide
21 * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
22 * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
24 * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
26 * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
28 * We cannot include linux/kernel.h which defines DIV_ROUND_UP here
29 * due to build issues. So we open code DIV_ROUND_UP here:
31 * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
33 * which gets simplified as :
35 #define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
38 * Size mapped by an entry at level n ( 0 <= n <= 3)
39 * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
40 * in the final page. The maximum number of translation levels supported by
41 * the architecture is 4. Hence, starting at at level n, we have further
42 * ((4 - n) - 1) levels of translation excluding the offset within the page.
43 * So, the total number of bits mapped by an entry at level n is :
45 * ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
47 * Rearranging it a bit we get :
48 * (4 - n) * (PAGE_SHIFT - 3) + 3
50 #define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
52 #define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
55 * PMD_SHIFT determines the size a level 2 page table entry can map.
57 #if CONFIG_PGTABLE_LEVELS > 2
58 #define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
59 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
60 #define PMD_MASK (~(PMD_SIZE-1))
61 #define PTRS_PER_PMD PTRS_PER_PTE
65 * PUD_SHIFT determines the size a level 1 page table entry can map.
67 #if CONFIG_PGTABLE_LEVELS > 3
68 #define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
69 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
70 #define PUD_MASK (~(PUD_SIZE-1))
71 #define PTRS_PER_PUD PTRS_PER_PTE
75 * PGDIR_SHIFT determines the size a top-level page table entry can map
76 * (depending on the configuration, this level can be 0, 1 or 2).
78 #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
79 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
80 #define PGDIR_MASK (~(PGDIR_SIZE-1))
81 #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
84 * Section address mask and size definitions.
86 #define SECTION_SHIFT PMD_SHIFT
87 #define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
88 #define SECTION_MASK (~(SECTION_SIZE-1))
91 * Contiguous page definitions.
93 #define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
94 /* the the numerical offset of the PTE within a range of CONT_PTES */
95 #define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
98 * Hardware page table definitions.
100 * Level 1 descriptor (PUD).
102 #define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
103 #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
104 #define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
105 #define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
108 * Level 2 descriptor (PMD).
110 #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
111 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
112 #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
113 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
114 #define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
119 #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
120 #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
121 #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
122 #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
123 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
124 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
125 #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
126 #define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52)
127 #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
128 #define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
131 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
133 #define PMD_ATTRINDX(t) (_AT(pmdval_t, (t)) << 2)
134 #define PMD_ATTRINDX_MASK (_AT(pmdval_t, 7) << 2)
137 * Level 3 descriptor (PTE).
139 #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
140 #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
141 #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
142 #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
143 #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
144 #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
145 #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
146 #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
147 #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
148 #define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */
149 #define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
150 #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
151 #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
154 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
156 #define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2)
157 #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
160 * 2nd stage PTE definitions
162 #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
163 #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
165 #define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
166 #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
169 * Memory Attribute override for Stage-2 (MemAttr[3:0])
171 #define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
172 #define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
175 * EL2/HYP PTE/PMD definitions
177 #define PMD_HYP PMD_SECT_USER
178 #define PTE_HYP PTE_USER
181 * Highest possible physical address supported.
183 #define PHYS_MASK_SHIFT (48)
184 #define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
189 #define TCR_T0SZ_OFFSET 0
190 #define TCR_T1SZ_OFFSET 16
191 #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
192 #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
193 #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
194 #define TCR_TxSZ_WIDTH 6
195 #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
196 #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
197 #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
198 #define TCR_IRGN_WBnWA ((UL(3) << 8) | (UL(3) << 24))
199 #define TCR_IRGN_MASK ((UL(3) << 8) | (UL(3) << 24))
200 #define TCR_ORGN_NC ((UL(0) << 10) | (UL(0) << 26))
201 #define TCR_ORGN_WBWA ((UL(1) << 10) | (UL(1) << 26))
202 #define TCR_ORGN_WT ((UL(2) << 10) | (UL(2) << 26))
203 #define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26))
204 #define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26))
205 #define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
206 #define TCR_TG0_4K (UL(0) << 14)
207 #define TCR_TG0_64K (UL(1) << 14)
208 #define TCR_TG0_16K (UL(2) << 14)
209 #define TCR_TG1_16K (UL(1) << 30)
210 #define TCR_TG1_4K (UL(2) << 30)
211 #define TCR_TG1_64K (UL(3) << 30)
212 #define TCR_ASID16 (UL(1) << 36)
213 #define TCR_TBI0 (UL(1) << 37)
214 #define TCR_HA (UL(1) << 39)
215 #define TCR_HD (UL(1) << 40)