1 /* SPDX-License-Identifier: BSD-3-Clause */
8 #include <arch/barrier.h>
9 #include <arch/cache.h>
10 #include <arch/lib_helpers.h>
12 #include <console/console.h>
14 /* 12 hex digits (48 bits VA) plus 1 for exclusive upper bound. */
15 #define ADDR_FMT "0x%013lx"
17 /* This just caches the next free table slot (okay to do since they fill up from
18 * bottom to top and can never be freed up again). It will reset to its initial
19 * value on stage transition, so we still need to check it for UNUSED_DESC. */
20 static uint64_t *next_free_table
= (void *)_ttb
;
22 static void print_tag(int level
, uint64_t tag
)
24 printk(level
, tag
& MA_MEM_NC
? "non-cacheable | " :
26 printk(level
, tag
& MA_RO
? "read-only | " :
28 printk(level
, tag
& MA_NS
? "non-secure | " :
30 printk(level
, tag
& MA_MEM
? "normal\n" :
34 /* Func : get_block_attr
35 * Desc : Get block descriptor attributes based on the value of tag in memrange
38 static uint64_t get_block_attr(unsigned long tag
)
42 attr
= (tag
& MA_NS
) ? BLOCK_NS
: 0;
43 attr
|= (tag
& MA_RO
) ? BLOCK_AP_RO
: BLOCK_AP_RW
;
47 attr
|= BLOCK_SH_INNER_SHAREABLE
;
49 attr
|= BLOCK_INDEX_MEM_NORMAL_NC
<< BLOCK_INDEX_SHIFT
;
51 attr
|= BLOCK_INDEX_MEM_NORMAL
<< BLOCK_INDEX_SHIFT
;
53 attr
|= BLOCK_INDEX_MEM_DEV_NGNRNE
<< BLOCK_INDEX_SHIFT
;
60 /* Func : table_level_name
61 * Desc : Get the descriptions table level name from the given size.
63 static const char *table_level_name(size_t xlat_size
)
79 /* Func : setup_new_table
80 * Desc : Get next free table from TTB and set it up to match old parent entry.
82 static uint64_t *setup_new_table(uint64_t desc
, size_t xlat_size
)
84 while (next_free_table
[0] != UNUSED_DESC
) {
85 next_free_table
+= GRANULE_SIZE
/sizeof(*next_free_table
);
86 if (_ettb
- (u8
*)next_free_table
<= 0)
87 die("Ran out of page table space!");
90 void *frame_base
= (void *)(desc
& XLAT_ADDR_MASK
);
91 const char *level_name
= table_level_name(xlat_size
);
93 "Backing address range [" ADDR_FMT
":" ADDR_FMT
") with new %s table @%p\n",
94 (uintptr_t)frame_base
,
95 (uintptr_t)frame_base
+ (xlat_size
<< BITS_RESOLVED_PER_LVL
),
96 level_name
, next_free_table
);
99 memset(next_free_table
, 0, GRANULE_SIZE
);
101 /* Can reuse old parent entry, but may need to adjust type. */
102 if (xlat_size
== L3_XLAT_SIZE
)
106 for (; i
< GRANULE_SIZE
/sizeof(*next_free_table
); i
++) {
107 next_free_table
[i
] = desc
;
112 return next_free_table
;
115 /* Func: get_next_level_table
116 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
117 * table, update the entry and return the table addr. If valid, return the addr
119 static uint64_t *get_next_level_table(uint64_t *ptr
, size_t xlat_size
)
121 uint64_t desc
= *ptr
;
123 if ((desc
& DESC_MASK
) != TABLE_DESC
) {
124 uint64_t *new_table
= setup_new_table(desc
, xlat_size
);
125 desc
= ((uint64_t)new_table
) | TABLE_DESC
;
128 return (uint64_t *)(desc
& XLAT_ADDR_MASK
);
131 /* Func : init_xlat_table
132 * Desc : Given a base address and size, it identifies the indices within
133 * different level XLAT tables which map the given base addr. Similar to table
134 * walk, except that all invalid entries during the walk are updated
135 * accordingly. On success, it returns the size of the block/page addressed by
138 static uint64_t init_xlat_table(uint64_t base_addr
,
142 uint64_t l0_index
= (base_addr
& L0_ADDR_MASK
) >> L0_ADDR_SHIFT
;
143 uint64_t l1_index
= (base_addr
& L1_ADDR_MASK
) >> L1_ADDR_SHIFT
;
144 uint64_t l2_index
= (base_addr
& L2_ADDR_MASK
) >> L2_ADDR_SHIFT
;
145 uint64_t l3_index
= (base_addr
& L3_ADDR_MASK
) >> L3_ADDR_SHIFT
;
146 uint64_t *table
= (uint64_t *)_ttb
;
148 uint64_t attr
= get_block_attr(tag
);
150 /* L0 entry stores a table descriptor (doesn't support blocks) */
151 table
= get_next_level_table(&table
[l0_index
], L1_XLAT_SIZE
);
153 /* L1 table lookup */
154 if ((size
>= L1_XLAT_SIZE
) &&
155 IS_ALIGNED(base_addr
, (1UL << L1_ADDR_SHIFT
))) {
156 /* If block address is aligned and size is greater than
157 * or equal to size addressed by each L1 entry, we can
158 * directly store a block desc */
159 desc
= base_addr
| BLOCK_DESC
| attr
;
160 table
[l1_index
] = desc
;
161 /* L2 lookup is not required */
165 /* L1 entry stores a table descriptor */
166 table
= get_next_level_table(&table
[l1_index
], L2_XLAT_SIZE
);
168 /* L2 table lookup */
169 if ((size
>= L2_XLAT_SIZE
) &&
170 IS_ALIGNED(base_addr
, (1UL << L2_ADDR_SHIFT
))) {
171 /* If block address is aligned and size is greater than
172 * or equal to size addressed by each L2 entry, we can
173 * directly store a block desc */
174 desc
= base_addr
| BLOCK_DESC
| attr
;
175 table
[l2_index
] = desc
;
176 /* L3 lookup is not required */
180 /* L2 entry stores a table descriptor */
181 table
= get_next_level_table(&table
[l2_index
], L3_XLAT_SIZE
);
183 /* L3 table lookup */
184 desc
= base_addr
| PAGE_DESC
| attr
;
185 table
[l3_index
] = desc
;
189 /* Func : sanity_check
190 * Desc : Check address/size alignment of a table or page.
192 static void sanity_check(uint64_t addr
, uint64_t size
)
194 assert(!(addr
& GRANULE_SIZE_MASK
) &&
195 !(size
& GRANULE_SIZE_MASK
) &&
196 (addr
+ size
< (1UL << BITS_PER_VA
)) &&
197 size
>= GRANULE_SIZE
);
201 * Desc : Returns the page table entry governing a specific address. */
202 static uint64_t get_pte(void *addr
)
204 int shift
= L0_ADDR_SHIFT
;
205 uint64_t *pte
= (uint64_t *)_ttb
;
208 int index
= ((uintptr_t)addr
>> shift
) &
209 ((1UL << BITS_RESOLVED_PER_LVL
) - 1);
211 if ((pte
[index
] & DESC_MASK
) != TABLE_DESC
||
212 shift
<= GRANULE_SIZE_SHIFT
)
215 pte
= (uint64_t *)(pte
[index
] & XLAT_ADDR_MASK
);
216 shift
-= BITS_RESOLVED_PER_LVL
;
220 /* Func : assert_correct_ttb_mapping
221 * Desc : Asserts that mapping for addr matches the access type used by the
222 * page table walk (i.e. addr is correctly mapped to be part of the TTB). */
223 static void assert_correct_ttb_mapping(void *addr
)
225 uint64_t pte
= get_pte(addr
);
226 assert(((pte
>> BLOCK_INDEX_SHIFT
) & BLOCK_INDEX_MASK
)
227 == BLOCK_INDEX_MEM_NORMAL
&& !(pte
& BLOCK_NS
));
230 /* Func : mmu_config_range
231 * Desc : This function repeatedly calls init_xlat_table with the base
232 * address. Based on size returned from init_xlat_table, base_addr is updated
233 * and subsequent calls are made for initializing the xlat table until the whole
234 * region is initialized.
236 void mmu_config_range(void *start
, size_t size
, uint64_t tag
)
238 uint64_t base_addr
= (uintptr_t)start
;
239 uint64_t temp_size
= size
;
241 printk(BIOS_INFO
, "Mapping address range [" ADDR_FMT
":" ADDR_FMT
") as ",
242 (uintptr_t)start
, (uintptr_t)start
+ size
);
243 print_tag(BIOS_INFO
, tag
);
245 sanity_check(base_addr
, temp_size
);
248 temp_size
-= init_xlat_table(base_addr
+ (size
- temp_size
),
251 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
259 * Desc : Initialize MMU registers and page table memory region. This must be
260 * called exactly ONCE PER BOOT before trying to configure any mappings.
264 /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
265 uint64_t *table
= (uint64_t *)_ttb
;
266 for (; _ettb
- (u8
*)table
> 0; table
+= GRANULE_SIZE
/sizeof(*table
))
267 table
[0] = UNUSED_DESC
;
269 /* Initialize the root table (L0) to be completely unmapped. */
270 uint64_t *root
= setup_new_table(INVALID_DESC
, L0_XLAT_SIZE
);
271 assert((u8
*)root
== _ttb
);
273 /* Initialize TTBR */
274 raw_write_ttbr0((uintptr_t)root
);
276 /* Initialize MAIR indices */
277 raw_write_mair(MAIR_ATTRIBUTES
);
279 /* Initialize TCR flags */
280 raw_write_tcr(TCR_TOSZ
| TCR_IRGN0_NM_WBWAC
| TCR_ORGN0_NM_WBWAC
|
281 TCR_SH0_IS
| TCR_TG0_4KB
| TCR_PS_256TB
|
285 /* Func : mmu_save_context
286 * Desc : Save mmu context (registers and ttbr base).
288 void mmu_save_context(struct mmu_context
*mmu_context
)
292 /* Back-up MAIR_ATTRIBUTES */
293 mmu_context
->mair
= raw_read_mair();
295 /* Back-up TCR value */
296 mmu_context
->tcr
= raw_read_tcr();
299 /* Func : mmu_restore_context
300 * Desc : Restore mmu context using input backed-up context
302 void mmu_restore_context(const struct mmu_context
*mmu_context
)
307 raw_write_ttbr0((uintptr_t)_ttb
);
309 /* Restore MAIR indices */
310 raw_write_mair(mmu_context
->mair
);
312 /* Restore TCR flags */
313 raw_write_tcr(mmu_context
->tcr
);
315 /* invalidate tlb since ttbr is updated. */
316 tlb_invalidate_all();
319 void mmu_enable(void)
321 assert_correct_ttb_mapping(_ttb
);
322 assert_correct_ttb_mapping((void *)((uintptr_t)_ettb
- 1));
324 uint32_t sctlr
= raw_read_sctlr();
325 raw_write_sctlr(sctlr
| SCTLR_C
| SCTLR_M
| SCTLR_I
);