mb/starlabs/{lite_adl,byte_adl}: Don't select MAINBOARD_HAS_TPM2
[coreboot2.git] / src / arch / arm / armv7 / mmu.c
blob66ce53392dc65b4d32c14c287f5203f9b8853264
1 /* SPDX-License-Identifier: BSD-3-Clause */
3 #include <assert.h>
4 #include <commonlib/helpers.h>
5 #include <stdint.h>
6 #include <symbols.h>
8 #include <console/console.h>
10 #include <arch/cache.h>
12 #if CONFIG(ARM_LPAE)
13 /* See B3.6.2 of ARMv7 Architecture Reference Manual */
14 /* TODO: Utilize the contiguous hint flag */
15 #define ATTR_BLOCK (\
16 0ULL << 54 | /* XN. 0:Not restricted */ \
17 0ULL << 53 | /* PXN. 0:Not restricted */ \
18 1 << 10 | /* AF. 1:Accessed. This is to prevent access */ \
19 /* fault when accessed for the first time */ \
20 0 << 6 | /* AP[2:1]. 0b00:full access from PL1 */ \
21 0 << 5 | /* NS. 0:Output address is in Secure space */ \
22 0 << 1 | /* block/table. 0:block entry */ \
23 1 << 0 /* validity. 1:valid */ \
25 #define ATTR_PAGE (ATTR_BLOCK | 1 << 1)
26 #define ATTR_NEXTLEVEL (0x3)
27 #define ATTR_NC ((MAIR_INDX_NC << 2) | (1ULL << 53) | (1ULL << 54))
28 #define ATTR_WT (MAIR_INDX_WT << 2)
29 #define ATTR_WB (MAIR_INDX_WB << 2)
31 #define PAGE_MASK 0x000ffffffffff000ULL
32 #define BLOCK_MASK 0x000fffffffe00000ULL
33 #define NEXTLEVEL_MASK PAGE_MASK
34 #define BLOCK_SHIFT 21
36 typedef uint64_t pte_t;
37 #else /* CONFIG_ARM_LPAE */
39 * Section entry bits:
40 * 31:20 - section base address
41 * 18 - 0 to indicate normal section (versus supersection)
42 * 17 - nG, 0 to indicate page is global
43 * 16 - S, 0 for non-shareable (?)
44 * 15 - APX, 0 for full access
45 * 14:12 - TEX, 0b000 for outer and inner write-back
46 * 11:10 - AP, 0b11 for full access
47 * 9 - P, ? (FIXME: not described or possibly obsolete?)
48 * 8: 5 - Domain
49 * 4 - XN, 1 to set execute-never (and also avoid prefetches)
50 * 3 - C, 1 for cacheable
51 * 2 - B, 1 for bufferable
52 * 1: 0 - 0b10 to indicate section entry
54 #define ATTR_BLOCK ((3 << 10) | 0x2)
55 #define ATTR_PAGE ((3 << 4) | 0x2)
56 #define ATTR_NEXTLEVEL (0x1)
57 #define ATTR_NC (1 << 4)
58 #define ATTR_WT (1 << 3)
59 #define ATTR_WB ((1 << 3) | (1 << 2))
61 #define PAGE_MASK 0xfffff000UL
62 #define BLOCK_MASK 0xfff00000UL
63 #define NEXTLEVEL_MASK 0xfffffc00UL
64 #define BLOCK_SHIFT 20
66 typedef uint32_t pte_t;
67 #endif /* CONFIG_ARM_LPAE */
69 /* We set the first PTE to a sentinel value that cannot occur naturally (has
70 * attributes set but bits [1:0] are 0 -> unmapped) to mark unused subtables. */
71 #define ATTR_UNUSED 0xBADbA6E0
72 #define SUBTABLE_PTES (1 << (BLOCK_SHIFT - PAGE_SHIFT))
75 * mask/shift/size for pages and blocks
77 #define PAGE_SHIFT 12
78 #define PAGE_SIZE (1UL << PAGE_SHIFT)
79 #define BLOCK_SIZE (1UL << BLOCK_SHIFT)
82 * MAIR Index
84 #define MAIR_INDX_NC 0
85 #define MAIR_INDX_WT 1
86 #define MAIR_INDX_WB 2
88 static pte_t *const ttb_buff = (void *)_ttb;
90 static struct {
91 pte_t value;
92 const char *name;
93 } attrs[] = {
94 [DCACHE_OFF] = {.value = ATTR_NC, .name = "uncached"},
95 [DCACHE_WRITEBACK] = {.value = ATTR_WB, .name = "writeback"},
96 [DCACHE_WRITETHROUGH] = {.value = ATTR_WT, .name = "writethrough"},
99 /* Fills page table entries in |table| from |start_idx| to |end_idx| with |attr|
100 * and performs necessary invalidations. |offset| is the start address of the
101 * area described by |table|, and |shift| is the size-shift of each frame. */
102 static void mmu_fill_table(pte_t *table, u32 start_idx, u32 end_idx,
103 uintptr_t offset, u32 shift, pte_t attr)
105 int i;
107 /* Write out page table entries. */
108 for (i = start_idx; i < end_idx; i++)
109 table[i] = (offset + (i << shift)) | attr;
111 /* Flush the page table entries from the dcache. */
112 for (i = start_idx; i < end_idx; i++)
113 dccmvac((uintptr_t)&table[i]);
114 dsb();
116 /* Invalidate the TLB entries. */
117 for (i = start_idx; i < end_idx; i++)
118 tlbimva(offset + (i << shift));
119 dsb();
120 isb();
123 static pte_t *mmu_create_subtable(pte_t *pgd_entry)
125 pte_t *table = (pte_t *)_ttb_subtables;
127 /* Find unused subtable (first PTE == ATTR_UNUSED). */
128 while (table[0] != ATTR_UNUSED) {
129 table += SUBTABLE_PTES;
130 if ((pte_t *)_ettb_subtables - table <= 0)
131 die("Not enough room for another sub-pagetable!");
134 /* We assume that *pgd_entry must already be a valid block mapping. */
135 uintptr_t start_addr = (uintptr_t)(*pgd_entry & BLOCK_MASK);
136 printk(BIOS_DEBUG, "Creating new subtable @%p for [%#.8lx:%#.8lx)\n",
137 table, start_addr, start_addr + BLOCK_SIZE);
139 /* Initialize the new subtable with entries of the same attributes
140 * (XN bit moves from 4 to 0, set PAGE unless block was unmapped). */
141 pte_t attr = *pgd_entry & ~(BLOCK_MASK);
142 if (!CONFIG(ARM_LPAE) && (attr & (1 << 4)))
143 attr = ((attr & ~(1 << 4)) | (1 << 0));
144 if (attr & ATTR_BLOCK)
145 attr = (attr & ~ATTR_BLOCK) | ATTR_PAGE;
146 mmu_fill_table(table, 0, SUBTABLE_PTES, start_addr, PAGE_SHIFT, attr);
148 /* Replace old entry in upper level table to point at subtable. */
149 *pgd_entry = (pte_t)(uintptr_t)table | ATTR_NEXTLEVEL;
150 dccmvac((uintptr_t)pgd_entry);
151 dsb();
152 tlbimva(start_addr);
153 dsb();
154 isb();
156 return table;
159 static pte_t *mmu_validate_create_sub_table(u32 start_kb, u32 size_kb)
161 pte_t *pgd_entry = &ttb_buff[start_kb / (BLOCK_SIZE/KiB)];
162 pte_t *table = (void *)(uintptr_t)(*pgd_entry & NEXTLEVEL_MASK);
164 /* Make sure the range is contained within a single superpage. */
165 assert(((start_kb + size_kb - 1) & (BLOCK_MASK/KiB))
166 == (start_kb & (BLOCK_MASK/KiB)) && start_kb < 4 * (GiB/KiB));
168 if ((*pgd_entry & ~NEXTLEVEL_MASK) != ATTR_NEXTLEVEL)
169 table = mmu_create_subtable(pgd_entry);
171 return table;
174 void mmu_config_range_kb(u32 start_kb, u32 size_kb, enum dcache_policy policy)
176 pte_t *table = mmu_validate_create_sub_table(start_kb, size_kb);
178 /* Always _one_ _damn_ bit that won't fit... (XN moves from 4 to 0) */
179 pte_t attr = attrs[policy].value;
180 if (!CONFIG(ARM_LPAE) && (attr & (1 << 4)))
181 attr = ((attr & ~(1 << 4)) | (1 << 0));
183 /* Mask away high address bits that are handled by upper level table. */
184 u32 mask = BLOCK_SIZE/KiB - 1;
185 printk(BIOS_DEBUG, "Mapping address range [%#.8x:%#.8x) as %s\n",
186 start_kb * KiB, (start_kb + size_kb) * KiB, attrs[policy].name);
188 u32 end_kb = ALIGN_UP((start_kb + size_kb), PAGE_SIZE/KiB) -
189 (start_kb & ~mask);
191 assert(end_kb <= BLOCK_SIZE/KiB);
193 mmu_fill_table(table, (start_kb & mask) / (PAGE_SIZE/KiB),
194 end_kb / (PAGE_SIZE/KiB),
195 (start_kb & ~mask) * KiB, PAGE_SHIFT, ATTR_PAGE | attr);
198 void mmu_disable_range_kb(u32 start_kb, u32 size_kb)
200 pte_t *table = mmu_validate_create_sub_table(start_kb, size_kb);
202 /* Mask away high address bits that are handled by upper level table. */
203 u32 mask = BLOCK_SIZE/KiB - 1;
204 printk(BIOS_DEBUG, "Setting address range [%#.8x:%#.8x) as unmapped\n",
205 start_kb * KiB, (start_kb + size_kb) * KiB);
206 mmu_fill_table(table, (start_kb & mask) / (PAGE_SIZE/KiB),
207 DIV_ROUND_UP((start_kb + size_kb) & mask, PAGE_SIZE/KiB),
208 (start_kb & ~mask) * KiB, PAGE_SHIFT, 0);
211 void mmu_disable_range(u32 start_mb, u32 size_mb)
213 printk(BIOS_DEBUG, "Setting address range [%#.8x:%#.8x) as unmapped\n",
214 start_mb * MiB, (start_mb + size_mb) * MiB);
215 assert(start_mb + size_mb <= 4 * (GiB/MiB));
216 mmu_fill_table(ttb_buff, start_mb / (BLOCK_SIZE/MiB),
217 DIV_ROUND_UP(start_mb + size_mb, BLOCK_SIZE/MiB),
218 0, BLOCK_SHIFT, 0);
221 void mmu_config_range(u32 start_mb, u32 size_mb, enum dcache_policy policy)
223 printk(BIOS_DEBUG, "Mapping address range [%#.8x:%#.8x) as %s\n",
224 start_mb * MiB, (start_mb + size_mb) * MiB, attrs[policy].name);
225 assert(start_mb + size_mb <= 4 * (GiB/MiB));
226 mmu_fill_table(ttb_buff, start_mb / (BLOCK_SIZE/MiB),
227 DIV_ROUND_UP(start_mb + size_mb, BLOCK_SIZE/MiB),
228 0, BLOCK_SHIFT, ATTR_BLOCK | attrs[policy].value);
232 * For coreboot's purposes, we will create a simple identity map.
234 * If LPAE is disabled, we will create a L1 page
235 * table in RAM with 1MB section translation entries over the 4GB address space.
236 * (ref: section 10.2 and example 15-4 in Cortex-A series programmer's guide)
238 * If LPAE is enabled, we do two level translation with one L1 table with 4
239 * entries, each covering a 1GB space, and four L2 tables with 512 entries, each
240 * covering a 2MB space.
242 void mmu_init(void)
244 /* Initially mark all subtables as unused (first PTE == ATTR_UNUSED). */
245 pte_t *table = (pte_t *)_ttb_subtables;
246 for (; (pte_t *)_ettb_subtables - table > 0; table += SUBTABLE_PTES)
247 table[0] = ATTR_UNUSED;
249 if (CONFIG(ARM_LPAE)) {
250 pte_t *const pgd_buff = (pte_t *)(_ttb + 16*KiB);
251 pte_t *pmd = ttb_buff;
252 int i;
254 printk(BIOS_DEBUG, "LPAE Translation tables are @ %p\n",
255 ttb_buff);
256 ASSERT((read_mmfr0() & 0xf) >= 5);
259 * Set MAIR
260 * See B4.1.104 of ARMv7 Architecture Reference Manual
262 write_mair0(
263 0x00 << (MAIR_INDX_NC*8) | /* Strongly-ordered,
264 * Non-Cacheable */
265 0xaa << (MAIR_INDX_WT*8) | /* Write-Thru,
266 * Read-Allocate */
267 0xff << (MAIR_INDX_WB*8) /* Write-Back,
268 * Read/Write-Allocate */
272 * Set up L1 table
273 * Once set here, L1 table won't be modified by coreboot.
274 * See B3.6.1 of ARMv7 Architecture Reference Manual
276 for (i = 0; i < 4; i++) {
277 pgd_buff[i] = ((uint32_t)pmd & NEXTLEVEL_MASK) |
278 ATTR_NEXTLEVEL;
279 pmd += BLOCK_SIZE / PAGE_SIZE;
283 * Set TTBR0
285 write_ttbr0((uintptr_t)pgd_buff);
286 } else {
287 printk(BIOS_DEBUG, "Translation table is @ %p\n", ttb_buff);
290 * Translation table base 0 address is in bits 31:14-N, where N
291 * is given by bits 2:0 in TTBCR (which we set to 0). All lower
292 * bits in this register should be zero for coreboot.
294 write_ttbr0((uintptr_t)ttb_buff);
298 * Set TTBCR
299 * See B4.1.153 of ARMv7 Architecture Reference Manual
300 * See B3.5.4 and B3.6.4 for how TTBR0 or TTBR1 is selected.
302 write_ttbcr(
303 CONFIG(ARM_LPAE) << 31 |/* EAE. 1:Enable LPAE */
304 0 << 16 | 0 << 0 /* Use TTBR0 for all addresses */
307 /* Set domain 0 to Client so XN bit works (to prevent prefetches) */
308 write_dacr(0x5);