mb/google/brox/var/lotso: Update gpio setting
[coreboot2.git] / src / arch / arm64 / armv8 / mmu.c
blob65d0f92e5dc1276c524d6080f615aae7012ad70a
1 /* SPDX-License-Identifier: BSD-3-Clause */
3 #include <assert.h>
4 #include <stdint.h>
5 #include <string.h>
6 #include <symbols.h>
8 #include <arch/barrier.h>
9 #include <arch/cache.h>
10 #include <arch/lib_helpers.h>
11 #include <arch/mmu.h>
12 #include <console/console.h>
14 /* 12 hex digits (48 bits VA) plus 1 for exclusive upper bound. */
15 #define ADDR_FMT "0x%013lx"
17 /* This just caches the next free table slot (okay to do since they fill up from
18 * bottom to top and can never be freed up again). It will reset to its initial
19 * value on stage transition, so we still need to check it for UNUSED_DESC. */
20 static uint64_t *next_free_table = (void *)_ttb;
22 static void print_tag(int level, uint64_t tag)
24 printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
25 " cacheable | ");
26 printk(level, tag & MA_RO ? "read-only | " :
27 "read-write | ");
28 printk(level, tag & MA_NS ? "non-secure | " :
29 " secure | ");
30 printk(level, tag & MA_MEM ? "normal\n" :
31 "device\n");
34 /* Func : get_block_attr
35 * Desc : Get block descriptor attributes based on the value of tag in memrange
36 * region
38 static uint64_t get_block_attr(unsigned long tag)
40 uint64_t attr;
42 attr = (tag & MA_NS) ? BLOCK_NS : 0;
43 attr |= (tag & MA_RO) ? BLOCK_AP_RO : BLOCK_AP_RW;
44 attr |= BLOCK_ACCESS;
46 if (tag & MA_MEM) {
47 attr |= BLOCK_SH_INNER_SHAREABLE;
48 if (tag & MA_MEM_NC)
49 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
50 else
51 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
52 } else {
53 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
54 attr |= BLOCK_XN;
57 return attr;
60 /* Func : table_level_name
61 * Desc : Get the descriptions table level name from the given size.
63 static const char *table_level_name(size_t xlat_size)
65 switch (xlat_size) {
66 case L0_XLAT_SIZE:
67 return "L0";
68 case L1_XLAT_SIZE:
69 return "L1";
70 case L2_XLAT_SIZE:
71 return "L2";
72 case L3_XLAT_SIZE:
73 return "L3";
74 default:
75 return "";
79 /* Func : setup_new_table
80 * Desc : Get next free table from TTB and set it up to match old parent entry.
82 static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
84 while (next_free_table[0] != UNUSED_DESC) {
85 next_free_table += GRANULE_SIZE/sizeof(*next_free_table);
86 if (_ettb - (u8 *)next_free_table <= 0)
87 die("Ran out of page table space!");
90 void *frame_base = (void *)(desc & XLAT_ADDR_MASK);
91 const char *level_name = table_level_name(xlat_size);
92 printk(BIOS_DEBUG,
93 "Backing address range [" ADDR_FMT ":" ADDR_FMT ") with new %s table @%p\n",
94 (uintptr_t)frame_base,
95 (uintptr_t)frame_base + (xlat_size << BITS_RESOLVED_PER_LVL),
96 level_name, next_free_table);
98 if (!desc) {
99 memset(next_free_table, 0, GRANULE_SIZE);
100 } else {
101 /* Can reuse old parent entry, but may need to adjust type. */
102 if (xlat_size == L3_XLAT_SIZE)
103 desc |= PAGE_DESC;
105 int i = 0;
106 for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) {
107 next_free_table[i] = desc;
108 desc += xlat_size;
112 return next_free_table;
115 /* Func: get_next_level_table
116 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
117 * table, update the entry and return the table addr. If valid, return the addr
119 static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
121 uint64_t desc = *ptr;
123 if ((desc & DESC_MASK) != TABLE_DESC) {
124 uint64_t *new_table = setup_new_table(desc, xlat_size);
125 desc = ((uint64_t)new_table) | TABLE_DESC;
126 *ptr = desc;
128 return (uint64_t *)(desc & XLAT_ADDR_MASK);
131 /* Func : init_xlat_table
132 * Desc : Given a base address and size, it identifies the indices within
133 * different level XLAT tables which map the given base addr. Similar to table
134 * walk, except that all invalid entries during the walk are updated
135 * accordingly. On success, it returns the size of the block/page addressed by
136 * the final table.
138 static uint64_t init_xlat_table(uint64_t base_addr,
139 uint64_t size,
140 uint64_t tag)
142 uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
143 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
144 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
145 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
146 uint64_t *table = (uint64_t *)_ttb;
147 uint64_t desc;
148 uint64_t attr = get_block_attr(tag);
150 /* L0 entry stores a table descriptor (doesn't support blocks) */
151 table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
153 /* L1 table lookup */
154 if ((size >= L1_XLAT_SIZE) &&
155 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
156 /* If block address is aligned and size is greater than
157 * or equal to size addressed by each L1 entry, we can
158 * directly store a block desc */
159 desc = base_addr | BLOCK_DESC | attr;
160 table[l1_index] = desc;
161 /* L2 lookup is not required */
162 return L1_XLAT_SIZE;
165 /* L1 entry stores a table descriptor */
166 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
168 /* L2 table lookup */
169 if ((size >= L2_XLAT_SIZE) &&
170 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
171 /* If block address is aligned and size is greater than
172 * or equal to size addressed by each L2 entry, we can
173 * directly store a block desc */
174 desc = base_addr | BLOCK_DESC | attr;
175 table[l2_index] = desc;
176 /* L3 lookup is not required */
177 return L2_XLAT_SIZE;
180 /* L2 entry stores a table descriptor */
181 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
183 /* L3 table lookup */
184 desc = base_addr | PAGE_DESC | attr;
185 table[l3_index] = desc;
186 return L3_XLAT_SIZE;
189 /* Func : sanity_check
190 * Desc : Check address/size alignment of a table or page.
192 static void sanity_check(uint64_t addr, uint64_t size)
194 assert(!(addr & GRANULE_SIZE_MASK) &&
195 !(size & GRANULE_SIZE_MASK) &&
196 (addr + size < (1UL << BITS_PER_VA)) &&
197 size >= GRANULE_SIZE);
200 /* Func : get_pte
201 * Desc : Returns the page table entry governing a specific address. */
202 static uint64_t get_pte(void *addr)
204 int shift = L0_ADDR_SHIFT;
205 uint64_t *pte = (uint64_t *)_ttb;
207 while (1) {
208 int index = ((uintptr_t)addr >> shift) &
209 ((1UL << BITS_RESOLVED_PER_LVL) - 1);
211 if ((pte[index] & DESC_MASK) != TABLE_DESC ||
212 shift <= GRANULE_SIZE_SHIFT)
213 return pte[index];
215 pte = (uint64_t *)(pte[index] & XLAT_ADDR_MASK);
216 shift -= BITS_RESOLVED_PER_LVL;
220 /* Func : assert_correct_ttb_mapping
221 * Desc : Asserts that mapping for addr matches the access type used by the
222 * page table walk (i.e. addr is correctly mapped to be part of the TTB). */
223 static void assert_correct_ttb_mapping(void *addr)
225 uint64_t pte = get_pte(addr);
226 assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
227 == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS));
230 /* Func : mmu_config_range
231 * Desc : This function repeatedly calls init_xlat_table with the base
232 * address. Based on size returned from init_xlat_table, base_addr is updated
233 * and subsequent calls are made for initializing the xlat table until the whole
234 * region is initialized.
236 void mmu_config_range(void *start, size_t size, uint64_t tag)
238 uint64_t base_addr = (uintptr_t)start;
239 uint64_t temp_size = size;
241 printk(BIOS_INFO, "Mapping address range [" ADDR_FMT ":" ADDR_FMT ") as ",
242 (uintptr_t)start, (uintptr_t)start + size);
243 print_tag(BIOS_INFO, tag);
245 sanity_check(base_addr, temp_size);
247 while (temp_size)
248 temp_size -= init_xlat_table(base_addr + (size - temp_size),
249 temp_size, tag);
251 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
252 dsb();
253 tlbiall();
254 dsb();
255 isb();
258 /* Func : mmu_init
259 * Desc : Initialize MMU registers and page table memory region. This must be
260 * called exactly ONCE PER BOOT before trying to configure any mappings.
262 void mmu_init(void)
264 /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
265 uint64_t *table = (uint64_t *)_ttb;
266 for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table))
267 table[0] = UNUSED_DESC;
269 /* Initialize the root table (L0) to be completely unmapped. */
270 uint64_t *root = setup_new_table(INVALID_DESC, L0_XLAT_SIZE);
271 assert((u8 *)root == _ttb);
273 /* Initialize TTBR */
274 raw_write_ttbr0((uintptr_t)root);
276 /* Initialize MAIR indices */
277 raw_write_mair(MAIR_ATTRIBUTES);
279 /* Initialize TCR flags */
280 raw_write_tcr(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
281 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
282 TCR_TBI_USED);
285 /* Func : mmu_save_context
286 * Desc : Save mmu context (registers and ttbr base).
288 void mmu_save_context(struct mmu_context *mmu_context)
290 assert(mmu_context);
292 /* Back-up MAIR_ATTRIBUTES */
293 mmu_context->mair = raw_read_mair();
295 /* Back-up TCR value */
296 mmu_context->tcr = raw_read_tcr();
299 /* Func : mmu_restore_context
300 * Desc : Restore mmu context using input backed-up context
302 void mmu_restore_context(const struct mmu_context *mmu_context)
304 assert(mmu_context);
306 /* Restore TTBR */
307 raw_write_ttbr0((uintptr_t)_ttb);
309 /* Restore MAIR indices */
310 raw_write_mair(mmu_context->mair);
312 /* Restore TCR flags */
313 raw_write_tcr(mmu_context->tcr);
315 /* invalidate tlb since ttbr is updated. */
316 tlb_invalidate_all();
319 void mmu_enable(void)
321 assert_correct_ttb_mapping(_ttb);
322 assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));
324 uint32_t sctlr = raw_read_sctlr();
325 raw_write_sctlr(sctlr | SCTLR_C | SCTLR_M | SCTLR_I);
327 isb();