3 * Copyright 2014 Google Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <arch/lib_helpers.h>
36 #include <arch/cache.h>
38 /* Maximum number of XLAT Tables available based on ttb buffer size */
39 static unsigned int max_tables
;
40 /* Address of ttb buffer */
41 static uint64_t *xlat_addr
;
46 * We refer to the section ".bss.ttb_buffer" in the linker script for ChromeOS's depthcharge
47 * payload. Please DO NOT change the section name without discussing with us.
48 * Please contact: jwerner@chromium.org or yich@chromium.org
50 static uint8_t ttb_buffer
[TTB_DEFAULT_SIZE
] __aligned(GRANULE_SIZE
)
51 __section(".bss.ttb_buffer");
53 static const char * const tag_to_string
[] = {
54 [TYPE_NORMAL_MEM
] = "normal",
55 [TYPE_DEV_MEM
] = "device",
56 [TYPE_DMA_MEM
] = "uncached",
60 * The usedmem_ranges is used to describe all the memory ranges that are
61 * actually used by payload i.e. _start -> _end in linker script and the
62 * coreboot tables. This is required for two purposes:
63 * 1) During the pre_sysinfo_scan_mmu_setup, these are the only ranges
64 * initialized in the page table as we do not know the entire memory map.
65 * 2) During the post_sysinfo_scan_mmu_setup, these ranges are used to check if
66 * the DMA buffer is being placed in a sane location and does not overlap any of
67 * the used mem ranges.
69 static struct mmu_ranges usedmem_ranges
;
71 static void __attribute__((noreturn
)) mmu_error(void)
76 /* Func : get_block_attr
77 * Desc : Get block descriptor attributes based on the value of tag in memrange
80 static uint64_t get_block_attr(unsigned long tag
)
84 /* We should be in EL2(which is non-secure only) or EL1(non-secure) */
87 /* Assuming whole memory is read-write */
95 attr
|= BLOCK_SH_INNER_SHAREABLE
;
96 attr
|= (BLOCK_INDEX_MEM_NORMAL
<< BLOCK_INDEX_SHIFT
);
99 attr
|= BLOCK_INDEX_MEM_DEV_NGNRNE
<< BLOCK_INDEX_SHIFT
;
103 attr
|= BLOCK_INDEX_MEM_NORMAL_NC
<< BLOCK_INDEX_SHIFT
;
110 /* Func : table_desc_valid
111 * Desc : Check if a table entry contains valid desc
113 static uint64_t table_desc_valid(uint64_t desc
)
115 return((desc
& TABLE_DESC
) == TABLE_DESC
);
118 /* Func : setup_new_table
119 * Desc : Get next free table from TTB and set it up to match old parent entry.
121 static uint64_t *setup_new_table(uint64_t desc
, size_t xlat_size
)
123 uint64_t *new, *entry
;
125 assert(free_idx
< max_tables
);
127 new = (uint64_t*)((unsigned char *)xlat_addr
+ free_idx
* GRANULE_SIZE
);
131 memset(new, 0, GRANULE_SIZE
);
133 /* Can reuse old parent entry, but may need to adjust type. */
134 if (xlat_size
== L3_XLAT_SIZE
)
137 for (entry
= new; (u8
*)entry
< (u8
*)new + GRANULE_SIZE
;
138 entry
++, desc
+= xlat_size
)
145 /* Func : get_table_from_desc
146 * Desc : Get next level table address from table descriptor
148 static uint64_t *get_table_from_desc(uint64_t desc
)
150 uint64_t *ptr
= (uint64_t*)(desc
& XLAT_TABLE_MASK
);
154 /* Func: get_next_level_table
155 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
156 * table, update the entry and return the table addr. If valid, return the addr.
158 static uint64_t *get_next_level_table(uint64_t *ptr
, size_t xlat_size
)
160 uint64_t desc
= *ptr
;
162 if (!table_desc_valid(desc
)) {
163 uint64_t *new_table
= setup_new_table(desc
, xlat_size
);
164 desc
= ((uint64_t)new_table
) | TABLE_DESC
;
167 return get_table_from_desc(desc
);
170 /* Func : init_xlat_table
171 * Desc : Given a base address and size, it identifies the indices within
172 * different level XLAT tables which map the given base addr. Similar to table
173 * walk, except that all invalid entries during the walk are updated
174 * accordingly. On success, it returns the size of the block/page addressed by
177 static uint64_t init_xlat_table(uint64_t base_addr
,
181 uint64_t l0_index
= (base_addr
& L0_ADDR_MASK
) >> L0_ADDR_SHIFT
;
182 uint64_t l1_index
= (base_addr
& L1_ADDR_MASK
) >> L1_ADDR_SHIFT
;
183 uint64_t l2_index
= (base_addr
& L2_ADDR_MASK
) >> L2_ADDR_SHIFT
;
184 uint64_t l3_index
= (base_addr
& L3_ADDR_MASK
) >> L3_ADDR_SHIFT
;
185 uint64_t *table
= xlat_addr
;
187 uint64_t attr
= get_block_attr(tag
);
189 /* L0 entry stores a table descriptor (doesn't support blocks) */
190 table
= get_next_level_table(&table
[l0_index
], L1_XLAT_SIZE
);
192 /* L1 table lookup */
193 if ((size
>= L1_XLAT_SIZE
) &&
194 IS_ALIGNED(base_addr
, (1UL << L1_ADDR_SHIFT
))) {
195 /* If block address is aligned and size is greater than
196 * or equal to size addressed by each L1 entry, we can
197 * directly store a block desc */
198 desc
= base_addr
| BLOCK_DESC
| attr
;
199 table
[l1_index
] = desc
;
200 /* L2 lookup is not required */
204 /* L1 entry stores a table descriptor */
205 table
= get_next_level_table(&table
[l1_index
], L2_XLAT_SIZE
);
207 /* L2 table lookup */
208 if ((size
>= L2_XLAT_SIZE
) &&
209 IS_ALIGNED(base_addr
, (1UL << L2_ADDR_SHIFT
))) {
210 /* If block address is aligned and size is greater than
211 * or equal to size addressed by each L2 entry, we can
212 * directly store a block desc */
213 desc
= base_addr
| BLOCK_DESC
| attr
;
214 table
[l2_index
] = desc
;
215 /* L3 lookup is not required */
219 /* L2 entry stores a table descriptor */
220 table
= get_next_level_table(&table
[l2_index
], L3_XLAT_SIZE
);
222 /* L3 table lookup */
223 desc
= base_addr
| PAGE_DESC
| attr
;
224 table
[l3_index
] = desc
;
228 /* Func : sanity_check
229 * Desc : Check address/size alignment of a table or page.
231 static void sanity_check(uint64_t addr
, uint64_t size
)
233 assert(!(addr
& GRANULE_SIZE_MASK
) &&
234 !(size
& GRANULE_SIZE_MASK
) &&
235 (addr
+ size
< (1UL << BITS_PER_VA
)) &&
236 size
>= GRANULE_SIZE
);
239 /* Func : mmu_config_range
240 * Desc : This function repeatedly calls init_xlat_table with the base
241 * address. Based on size returned from init_xlat_table, base_addr is updated
242 * and subsequent calls are made for initializing the xlat table until the whole
243 * region is initialized.
245 void mmu_config_range(void *start
, size_t size
, uint64_t tag
)
247 uint64_t base_addr
= (uintptr_t)start
;
248 uint64_t temp_size
= size
;
250 assert(tag
< ARRAY_SIZE(tag_to_string
));
251 printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n",
252 start
, start
+ size
, tag_to_string
[tag
]);
253 sanity_check(base_addr
, temp_size
);
256 temp_size
-= init_xlat_table(base_addr
+ (size
- temp_size
),
259 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
267 * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
268 * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
269 * tables that can be used
270 * Assuming that memory 0-4GiB is device memory.
272 uint64_t mmu_init(struct mmu_ranges
*mmu_ranges
)
276 xlat_addr
= (uint64_t *)&ttb_buffer
;
278 memset((void*)xlat_addr
, 0, GRANULE_SIZE
);
279 max_tables
= (TTB_DEFAULT_SIZE
>> GRANULE_SIZE_SHIFT
);
282 printf("Libpayload ARM64: TTB_BUFFER: %p Max Tables: %d\n",
283 (void*)xlat_addr
, max_tables
);
286 * To keep things simple we start with mapping the entire base 4GB as
287 * device memory. This accommodates various architectures' default
288 * settings (for instance rk3399 mmio starts at 0xf8000000); it is
289 * fine tuned (e.g. mapping DRAM areas as write-back) later in the
292 mmu_config_range(NULL
, 0x100000000, TYPE_DEV_MEM
);
294 for (; i
< mmu_ranges
->used
; i
++)
295 mmu_config_range((void *)mmu_ranges
->entries
[i
].base
,
296 mmu_ranges
->entries
[i
].size
,
297 mmu_ranges
->entries
[i
].type
);
299 printf("Libpayload ARM64: MMU init done\n");
303 static uint32_t is_mmu_enabled(void)
307 sctlr
= raw_read_sctlr_el2();
309 return (sctlr
& SCTLR_M
);
314 * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits
317 void mmu_enable(void)
321 /* Initialize MAIR indices */
322 raw_write_mair_el2(MAIR_ATTRIBUTES
);
324 /* Invalidate TLBs */
327 /* Initialize TCR flags */
328 raw_write_tcr_el2(TCR_TOSZ
| TCR_IRGN0_NM_WBWAC
| TCR_ORGN0_NM_WBWAC
|
329 TCR_SH0_IS
| TCR_TG0_4KB
| TCR_PS_256TB
|
332 /* Initialize TTBR */
333 raw_write_ttbr0_el2((uintptr_t)xlat_addr
);
335 /* Ensure system register writes are committed before enabling MMU */
339 sctlr
= raw_read_sctlr_el2();
340 sctlr
|= SCTLR_C
| SCTLR_M
| SCTLR_I
;
341 raw_write_sctlr_el2(sctlr
);
346 printf("ARM64: MMU enable done\n");
348 printf("ARM64: MMU enable failed\n");
352 * Func: mmu_add_memrange
353 * Desc: Adds a new memory range
355 static struct mmu_memrange
*mmu_add_memrange(struct mmu_ranges
*r
,
356 uint64_t base
, uint64_t size
,
359 struct mmu_memrange
*curr
= NULL
;
362 if (i
< ARRAY_SIZE(r
->entries
)) {
363 curr
= &r
->entries
[i
];
374 /* Structure to define properties of new memrange request */
375 struct mmu_new_range_prop
{
376 /* Type of memrange */
378 /* Size of the range */
381 * If any restrictions on the max addr limit(This addr is exclusive for
385 /* If any restrictions on alignment of the range base, else 0 */
388 * Function to test whether selected range is fine.
389 * NULL=any range is fine
390 * Return value 1=valid range, 0=otherwise
392 int (*is_valid_range
)(uint64_t, uint64_t);
393 /* From what type of source range should this range be extracted */
398 * Func: mmu_is_range_free
399 * Desc: We need to ensure that the new range being allocated doesn't overlap
400 * with any used memory range. Basically:
401 * 1. Memory ranges used by the payload (usedmem_ranges)
402 * 2. Any area that falls below _end symbol in linker script (Kernel needs to be
403 * loaded in lower areas of memory, So, the payload linker script can have
404 * kernel memory below _start and _end. Thus, we want to make sure we do not
405 * step in those areas as well.
406 * Returns: 1 on success, 0 on error
407 * ASSUMPTION: All the memory used by payload resides below the program
408 * proper. If there is any memory used above the _end symbol, then it should be
409 * marked as used memory in usedmem_ranges during the presysinfo_scan.
411 static int mmu_is_range_free(uint64_t r_base
,
414 uint64_t payload_end
= (uint64_t)&_end
;
416 struct mmu_memrange
*r
= &usedmem_ranges
.entries
[0];
418 /* Allocate memranges only above payload */
419 if ((r_base
<= payload_end
) || (r_end
<= payload_end
))
422 for (i
= 0; i
< usedmem_ranges
.used
; i
++) {
423 uint64_t start
= r
[i
].base
;
424 uint64_t end
= start
+ r
[i
].size
;
426 if ((start
< r_end
) && (end
> r_base
))
434 * Func: mmu_get_new_range
435 * Desc: Add a requested new memrange. We take as input set of all memranges and
436 * a structure to define the new memrange properties i.e. its type, size,
437 * max_addr it can grow upto, alignment restrictions, source type to take range
438 * from and finally a function pointer to check if the chosen range is valid.
440 static struct mmu_memrange
*mmu_get_new_range(struct mmu_ranges
*mmu_ranges
,
441 struct mmu_new_range_prop
*new)
444 struct mmu_memrange
*r
= &mmu_ranges
->entries
[0];
446 if (new->size
== 0) {
447 printf("MMU Error: Invalid range size\n");
451 for (; i
< mmu_ranges
->used
; i
++) {
453 if ((r
[i
].type
!= new->src_type
) ||
454 (r
[i
].size
< new->size
) ||
455 (new->lim_excl
&& (r
[i
].base
>= new->lim_excl
)))
459 uint64_t range_end_addr
= r
[i
].base
+ r
[i
].size
;
460 uint64_t end_addr
= range_end_addr
;
462 /* Make sure we do not go above max if it is non-zero */
463 if (new->lim_excl
&& (end_addr
>= new->lim_excl
))
464 end_addr
= new->lim_excl
;
468 * In case of alignment requirement,
469 * if end_addr is aligned, then base_addr will be too.
472 end_addr
= ALIGN_DOWN(end_addr
, new->align
);
474 base_addr
= end_addr
- new->size
;
476 if (base_addr
< r
[i
].base
)
480 * If the selected range is not used and valid for the
481 * user, move ahead with it
483 if (mmu_is_range_free(base_addr
, end_addr
) &&
484 ((new->is_valid_range
== NULL
) ||
485 new->is_valid_range(base_addr
, end_addr
)))
488 /* Drop to the next address. */
492 if (base_addr
< r
[i
].base
)
495 if (end_addr
!= range_end_addr
) {
496 /* Add a new memrange since we split up one
497 * range crossing the 4GiB boundary or doing an
498 * ALIGN_DOWN on end_addr.
500 r
[i
].size
-= (range_end_addr
- end_addr
);
501 if (mmu_add_memrange(mmu_ranges
, end_addr
,
502 range_end_addr
- end_addr
,
507 if (r
[i
].size
== new->size
) {
508 r
[i
].type
= new->type
;
512 r
[i
].size
-= new->size
;
514 r
= mmu_add_memrange(mmu_ranges
, base_addr
, new->size
,
523 /* Should never reach here if everything went fine */
524 printf("ARM64 ERROR: No region allocated\n");
529 * Func: mmu_alloc_range
530 * Desc: Call get_new_range to get a new memrange which is unused and mark it as
531 * used to avoid same range being allocated for different purposes.
533 static struct mmu_memrange
*mmu_alloc_range(struct mmu_ranges
*mmu_ranges
,
534 struct mmu_new_range_prop
*p
)
536 struct mmu_memrange
*r
= mmu_get_new_range(mmu_ranges
, p
);
542 * Mark this memrange as used memory. Important since function
543 * can be called multiple times and we do not want to reuse some
544 * range already allocated.
546 if (mmu_add_memrange(&usedmem_ranges
, r
->base
, r
->size
, r
->type
)
554 * Func: mmu_add_dma_range
555 * Desc: Add a memrange for dma operations. This is special because we want to
556 * initialize this memory as non-cacheable. We have a constraint that the DMA
557 * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
558 * from the lowest available addresses and align it to page size i.e. 64KiB.
560 static struct mmu_memrange
*mmu_add_dma_range(struct mmu_ranges
*mmu_ranges
)
562 struct mmu_new_range_prop prop
;
564 prop
.type
= TYPE_DMA_MEM
;
565 /* DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE */
566 assert((DMA_DEFAULT_SIZE
% GRANULE_SIZE
) == 0);
567 prop
.size
= DMA_DEFAULT_SIZE
;
568 prop
.lim_excl
= (uint64_t)CONFIG_LP_DMA_LIM_EXCL
* MiB
;
569 prop
.align
= GRANULE_SIZE
;
570 prop
.is_valid_range
= NULL
;
571 prop
.src_type
= TYPE_NORMAL_MEM
;
573 return mmu_alloc_range(mmu_ranges
, &prop
);
576 static struct mmu_memrange
*_mmu_add_fb_range(
578 struct mmu_ranges
*mmu_ranges
)
580 struct mmu_new_range_prop prop
;
582 prop
.type
= TYPE_DMA_MEM
;
585 prop
.lim_excl
= MIN_64_BIT_ADDR
;
586 prop
.align
= MB_SIZE
;
587 prop
.is_valid_range
= NULL
;
588 prop
.src_type
= TYPE_NORMAL_MEM
;
590 return mmu_alloc_range(mmu_ranges
, &prop
);
594 * Func: mmu_extract_ranges
595 * Desc: Assumption is that coreboot tables have memranges in sorted
596 * order. So, if there is an opportunity to combine ranges, we do that as
597 * well. Memranges are initialized for both CB_MEM_RAM and CB_MEM_TABLE as
600 static void mmu_extract_ranges(struct memrange
*cb_ranges
,
602 struct mmu_ranges
*mmu_ranges
)
605 struct mmu_memrange
*prev_range
= NULL
;
607 /* Extract memory ranges to be mapped */
608 for (; i
< ncb
; i
++) {
609 switch (cb_ranges
[i
].type
) {
612 if (prev_range
&& (prev_range
->base
+ prev_range
->size
613 == cb_ranges
[i
].base
)) {
614 prev_range
->size
+= cb_ranges
[i
].size
;
616 prev_range
= mmu_add_memrange(mmu_ranges
,
620 if (prev_range
== NULL
)
630 static void mmu_add_fb_range(struct mmu_ranges
*mmu_ranges
)
632 struct mmu_memrange
*fb_range
;
633 struct cb_framebuffer
*framebuffer
= &lib_sysinfo
.framebuffer
;
636 /* Check whether framebuffer is needed */
637 fb_size
= framebuffer
->bytes_per_line
* framebuffer
->y_resolution
;
641 /* make sure to allocate a size of multiple of GRANULE_SIZE */
642 fb_size
= ALIGN_UP(fb_size
, GRANULE_SIZE
);
644 /* framebuffer address has been set already, so just add it as DMA */
645 if (framebuffer
->physical_address
) {
646 if (mmu_add_memrange(mmu_ranges
,
647 framebuffer
->physical_address
,
649 TYPE_DMA_MEM
) == NULL
)
654 /* Allocate framebuffer */
655 fb_range
= _mmu_add_fb_range(fb_size
, mmu_ranges
);
656 if (fb_range
== NULL
)
659 framebuffer
->physical_address
= fb_range
->base
;
663 * Func: mmu_init_ranges
664 * Desc: Initialize mmu_memranges based on the memranges obtained from coreboot
665 * tables. Also, initialize dma memrange and xlat_addr for ttb buffer.
667 struct mmu_memrange
*mmu_init_ranges_from_sysinfo(struct memrange
*cb_ranges
,
669 struct mmu_ranges
*mmu_ranges
)
671 struct mmu_memrange
*dma_range
;
673 /* Initialize mmu_ranges to contain no entries. */
674 mmu_ranges
->used
= 0;
676 /* Extract ranges from memrange in lib_sysinfo */
677 mmu_extract_ranges(cb_ranges
, ncb
, mmu_ranges
);
679 /* Get a range for dma */
680 dma_range
= mmu_add_dma_range(mmu_ranges
);
682 /* Get a range for framebuffer */
683 mmu_add_fb_range(mmu_ranges
);
685 if (dma_range
== NULL
)
692 * Func: mmu_presysinfo_memory_used
693 * Desc: Initializes all the memory used for presysinfo page table
694 * initialization and enabling of MMU. All these ranges are stored in
695 * usedmem_ranges. usedmem_ranges plays an important role in selecting the dma
696 * buffer as well since we check the dma buffer range against the used memory
697 * ranges to prevent any overstepping.
699 void mmu_presysinfo_memory_used(uint64_t base
, uint64_t size
)
703 range_base
= ALIGN_DOWN(base
, GRANULE_SIZE
);
705 size
+= (base
- range_base
);
706 size
= ALIGN_UP(size
, GRANULE_SIZE
);
708 mmu_add_memrange(&usedmem_ranges
, range_base
, size
, TYPE_NORMAL_MEM
);
711 void mmu_presysinfo_enable(void)
713 mmu_init(&usedmem_ranges
);
717 const struct mmu_ranges
*mmu_get_used_ranges(void)
719 return &usedmem_ranges
;