1 /* SPDX-License-Identifier: GPL-2.0-only */
4 #include <boot_device.h>
7 #include <commonlib/bsd/cbfs_private.h>
8 #include <commonlib/bsd/compression.h>
9 #include <console/console.h>
13 #include <metadata_hash.h>
14 #include <security/tpm/tspi/crtm.h>
15 #include <security/vboot/misc.h>
20 #include <timestamp.h>
22 #if ENV_HAS_DATA_SECTION
23 struct mem_pool cbfs_cache
=
24 MEM_POOL_INIT(_cbfs_cache
, REGION_SIZE(cbfs_cache
), CONFIG_CBFS_CACHE_ALIGN
);
26 struct mem_pool cbfs_cache
= MEM_POOL_INIT(NULL
, 0, 0);
29 static void switch_to_postram_cache(int unused
)
31 if (_preram_cbfs_cache
!= _postram_cbfs_cache
)
32 mem_pool_init(&cbfs_cache
, _postram_cbfs_cache
, REGION_SIZE(postram_cbfs_cache
),
33 CONFIG_CBFS_CACHE_ALIGN
);
35 CBMEM_CREATION_HOOK(switch_to_postram_cache
);
37 enum cb_err
_cbfs_boot_lookup(const char *name
, bool force_ro
,
38 union cbfs_mdata
*mdata
, struct region_device
*rdev
)
40 const struct cbfs_boot_device
*cbd
= cbfs_get_boot_device(force_ro
);
45 enum cb_err err
= CB_CBFS_CACHE_FULL
;
46 if (!CONFIG(NO_CBFS_MCACHE
) && !ENV_SMM
&& cbd
->mcache_size
)
47 err
= cbfs_mcache_lookup(cbd
->mcache
, cbd
->mcache_size
,
48 name
, mdata
, &data_offset
);
49 if (err
== CB_CBFS_CACHE_FULL
) {
50 struct vb2_hash
*metadata_hash
= NULL
;
51 if (CONFIG(TOCTOU_SAFETY
)) {
52 if (ENV_SMM
) /* Cannot provide TOCTOU safety for SMM */
54 if (!cbd
->mcache_size
)
55 die("Cannot access CBFS TOCTOU-safely in " ENV_STRING
" before CBMEM init!\n");
56 /* We can only reach this for the RW CBFS -- an mcache overflow in the
57 RO CBFS would have been caught when building the mcache in cbfs_get
58 boot_device(). (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
59 assert(cbd
== vboot_get_cbfs_boot_device());
60 die("TODO: set metadata_hash to RW metadata hash here.\n");
62 err
= cbfs_lookup(&cbd
->rdev
, name
, mdata
, &data_offset
, metadata_hash
);
65 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK
) && !force_ro
&& err
== CB_CBFS_NOT_FOUND
) {
66 printk(BIOS_INFO
, "CBFS: Fall back to RO region for %s\n", name
);
67 return _cbfs_boot_lookup(name
, true, mdata
, rdev
);
70 if (err
== CB_CBFS_NOT_FOUND
)
71 printk(BIOS_WARNING
, "CBFS: '%s' not found.\n", name
);
72 else if (err
== CB_CBFS_HASH_MISMATCH
)
73 printk(BIOS_ERR
, "CBFS ERROR: metadata hash mismatch!\n");
75 printk(BIOS_ERR
, "CBFS ERROR: error %d when looking up '%s'\n",
80 if (rdev_chain(rdev
, &cbd
->rdev
, data_offset
, be32toh(mdata
->h
.len
)))
86 void cbfs_unmap(void *mapping
)
89 * This is save to call with mappings that weren't allocated in the cache (e.g. x86
90 * direct mappings) -- mem_pool_free() just does nothing for addresses it doesn't
91 * recognize. This hardcodes the assumption that if platforms implement an rdev_mmap()
92 * that requires a free() for the boot_device, they need to implement it via the
93 * cbfs_cache mem_pool.
95 mem_pool_free(&cbfs_cache
, mapping
);
98 static inline bool fsps_env(void)
100 /* FSP-S is assumed to be loaded in ramstage. */
106 static inline bool fspm_env(void)
108 /* FSP-M is assumed to be loaded in romstage. */
114 static inline bool cbfs_lz4_enabled(void)
116 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4
))
118 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4
))
121 if ((ENV_BOOTBLOCK
|| ENV_SEPARATE_VERSTAGE
) && !CONFIG(COMPRESS_PRERAM_STAGES
))
130 static inline bool cbfs_lzma_enabled(void)
132 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA
))
134 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA
))
136 /* We assume here romstage and postcar are never compressed. */
137 if (ENV_BOOTBLOCK
|| ENV_SEPARATE_VERSTAGE
)
139 if (ENV_ROMSTAGE
&& CONFIG(POSTCAR_STAGE
))
141 if ((ENV_ROMSTAGE
|| ENV_POSTCAR
) && !CONFIG(COMPRESS_RAMSTAGE
))
148 static bool cbfs_file_hash_mismatch(const void *buffer
, size_t size
,
149 const union cbfs_mdata
*mdata
, bool skip_verification
)
151 /* Avoid linking hash functions when verification and measurement are disabled. */
152 if (!CONFIG(CBFS_VERIFICATION
) && !CONFIG(TPM_MEASURED_BOOT
))
155 const struct vb2_hash
*hash
= NULL
;
157 if (CONFIG(CBFS_VERIFICATION
) && !skip_verification
) {
158 hash
= cbfs_file_hash(mdata
);
160 ERROR("'%s' does not have a file hash!\n", mdata
->h
.filename
);
163 if (vb2_hash_verify(vboot_hwcrypto_allowed(), buffer
, size
, hash
)) {
164 ERROR("'%s' file hash mismatch!\n", mdata
->h
.filename
);
169 if (CONFIG(TPM_MEASURED_BOOT
) && !ENV_SMM
) {
170 struct vb2_hash calculated_hash
;
172 /* No need to re-hash file if we already have it from verification. */
173 if (!hash
|| hash
->algo
!= TPM_MEASURE_ALGO
) {
174 if (vb2_hash_calculate(vboot_hwcrypto_allowed(), buffer
, size
,
175 TPM_MEASURE_ALGO
, &calculated_hash
))
178 hash
= &calculated_hash
;
182 tspi_cbfs_measurement(mdata
->h
.filename
, be32toh(mdata
->h
.type
), hash
))
183 ERROR("failed to measure '%s' into TCPA log\n", mdata
->h
.filename
);
184 /* We intentionally continue to boot on measurement errors. */
190 static size_t cbfs_load_and_decompress(const struct region_device
*rdev
, void *buffer
,
191 size_t buffer_size
, uint32_t compression
,
192 const union cbfs_mdata
*mdata
, bool skip_verification
)
194 size_t in_size
= region_device_sz(rdev
);
198 DEBUG("Decompressing %zu bytes from '%s' to %p with algo %d\n",
199 in_size
, mdata
->h
.filename
, buffer
, compression
);
201 switch (compression
) {
202 case CBFS_COMPRESS_NONE
:
203 if (buffer_size
< in_size
)
205 if (rdev_readat(rdev
, buffer
, 0, in_size
) != in_size
)
207 if (cbfs_file_hash_mismatch(buffer
, in_size
, mdata
, skip_verification
))
211 case CBFS_COMPRESS_LZ4
:
212 if (!cbfs_lz4_enabled())
215 /* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by
216 setting up the rdev to be in memory. */
217 map
= rdev_mmap_full(rdev
);
221 if (!cbfs_file_hash_mismatch(map
, in_size
, mdata
, skip_verification
)) {
222 timestamp_add_now(TS_ULZ4F_START
);
223 out_size
= ulz4fn(map
, in_size
, buffer
, buffer_size
);
224 timestamp_add_now(TS_ULZ4F_END
);
227 rdev_munmap(rdev
, map
);
231 case CBFS_COMPRESS_LZMA
:
232 if (!cbfs_lzma_enabled())
234 map
= rdev_mmap_full(rdev
);
238 if (!cbfs_file_hash_mismatch(map
, in_size
, mdata
, skip_verification
)) {
239 /* Note: timestamp not useful for memory-mapped media (x86) */
240 timestamp_add_now(TS_ULZMA_START
);
241 out_size
= ulzman(map
, in_size
, buffer
, buffer_size
);
242 timestamp_add_now(TS_ULZMA_END
);
245 rdev_munmap(rdev
, map
);
254 struct cbfs_preload_context
{
255 struct region_device rdev
;
256 struct thread_handle handle
;
257 struct list_node list_node
;
262 static struct list_node cbfs_preload_context_list
;
264 static struct cbfs_preload_context
*alloc_cbfs_preload_context(size_t additional
)
266 struct cbfs_preload_context
*context
;
267 size_t size
= sizeof(*context
) + additional
;
269 context
= mem_pool_alloc(&cbfs_cache
, size
);
274 memset(context
, 0, size
);
279 static void append_cbfs_preload_context(struct cbfs_preload_context
*context
)
281 list_append(&context
->list_node
, &cbfs_preload_context_list
);
284 static void free_cbfs_preload_context(struct cbfs_preload_context
*context
)
286 list_remove(&context
->list_node
);
288 mem_pool_free(&cbfs_cache
, context
);
291 static enum cb_err
cbfs_preload_thread_entry(void *arg
)
293 struct cbfs_preload_context
*context
= arg
;
295 if (rdev_read_full(&context
->rdev
, context
->buffer
) < 0) {
296 ERROR("%s(name='%s') readat failed\n", __func__
, context
->name
);
303 void cbfs_preload(const char *name
)
305 struct region_device rdev
;
306 union cbfs_mdata mdata
;
307 struct cbfs_preload_context
*context
;
308 bool force_ro
= false;
311 if (!CONFIG(CBFS_PRELOAD
))
314 /* We don't want to cross the vboot boundary */
315 if (ENV_ROMSTAGE
&& CONFIG(VBOOT_STARTS_IN_ROMSTAGE
))
318 DEBUG("%s(name='%s')\n", __func__
, name
);
320 if (_cbfs_boot_lookup(name
, force_ro
, &mdata
, &rdev
))
323 size
= region_device_sz(&rdev
);
325 context
= alloc_cbfs_preload_context(strlen(name
) + 1);
327 ERROR("%s(name='%s') failed to allocate preload context\n", __func__
, name
);
331 context
->buffer
= mem_pool_alloc(&cbfs_cache
, size
);
332 if (context
->buffer
== NULL
) {
333 ERROR("%s(name='%s') failed to allocate %zu bytes for preload buffer\n",
334 __func__
, name
, size
);
338 context
->rdev
= rdev
;
339 strcpy(context
->name
, name
);
341 append_cbfs_preload_context(context
);
343 if (thread_run(&context
->handle
, cbfs_preload_thread_entry
, context
) == 0)
346 ERROR("%s(name='%s') failed to start preload thread\n", __func__
, name
);
347 mem_pool_free(&cbfs_cache
, context
->buffer
);
350 free_cbfs_preload_context(context
);
353 static struct cbfs_preload_context
*find_cbfs_preload_context(const char *name
)
355 struct cbfs_preload_context
*context
;
357 list_for_each(context
, cbfs_preload_context_list
, list_node
) {
358 if (strcmp(context
->name
, name
) == 0)
365 static enum cb_err
get_preload_rdev(struct region_device
*rdev
, const char *name
)
368 struct cbfs_preload_context
*context
;
370 if (!CONFIG(CBFS_PRELOAD
) || !ENV_SUPPORTS_COOP
)
373 context
= find_cbfs_preload_context(name
);
377 err
= thread_join(&context
->handle
);
378 if (err
!= CB_SUCCESS
) {
379 ERROR("%s(name='%s') Preload thread failed: %u\n", __func__
, name
, err
);
384 if (rdev_chain_mem(rdev
, context
->buffer
, region_device_sz(&context
->rdev
)) != 0) {
385 ERROR("%s(name='%s') chaining failed\n", __func__
, name
);
393 DEBUG("%s(name='%s') preload successful\n", __func__
, name
);
396 free_cbfs_preload_context(context
);
401 static void *do_alloc(union cbfs_mdata
*mdata
, struct region_device
*rdev
,
402 cbfs_allocator_t allocator
, void *arg
, size_t *size_out
,
403 bool skip_verification
)
405 size_t size
= region_device_sz(rdev
);
408 uint32_t compression
= CBFS_COMPRESS_NONE
;
409 const struct cbfs_file_attr_compression
*cattr
= cbfs_find_attr(mdata
,
410 CBFS_FILE_ATTR_TAG_COMPRESSION
, sizeof(*cattr
));
412 compression
= be32toh(cattr
->compression
);
413 size
= be32toh(cattr
->decompressed_size
);
419 /* allocator == NULL means do a cbfs_map() */
421 loc
= allocator(arg
, size
, mdata
);
422 } else if (compression
== CBFS_COMPRESS_NONE
) {
423 void *mapping
= rdev_mmap_full(rdev
);
426 if (cbfs_file_hash_mismatch(mapping
, size
, mdata
, skip_verification
)) {
427 rdev_munmap(rdev
, mapping
);
431 } else if (!cbfs_cache
.size
) {
432 /* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
433 * memlayout. For stages that don't have .data sections (x86 pre-RAM),
434 * it is not possible to add a CBFS_CACHE. */
435 ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata
->h
.filename
);
438 loc
= mem_pool_alloc(&cbfs_cache
, size
);
442 ERROR("'%s' allocation failure\n", mdata
->h
.filename
);
446 size
= cbfs_load_and_decompress(rdev
, loc
, size
, compression
, mdata
, skip_verification
);
453 void *_cbfs_alloc(const char *name
, cbfs_allocator_t allocator
, void *arg
,
454 size_t *size_out
, bool force_ro
, enum cbfs_type
*type
)
456 struct region_device rdev
;
457 bool preload_successful
= false;
458 union cbfs_mdata mdata
;
460 DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__
, name
, allocator
,
461 arg
, force_ro
? "true" : "false", type
? *type
: -1);
463 if (_cbfs_boot_lookup(name
, force_ro
, &mdata
, &rdev
))
467 const enum cbfs_type real_type
= be32toh(mdata
.h
.type
);
468 if (*type
== CBFS_TYPE_QUERY
)
470 else if (*type
!= real_type
) {
471 ERROR("'%s' type mismatch (is %u, expected %u)\n",
472 mdata
.h
.filename
, real_type
, *type
);
477 /* Update the rdev with the preload content */
478 if (!force_ro
&& get_preload_rdev(&rdev
, name
) == CB_SUCCESS
)
479 preload_successful
= true;
481 void *ret
= do_alloc(&mdata
, &rdev
, allocator
, arg
, size_out
, false);
483 /* When using cbfs_preload we need to free the preload buffer after populating the
484 * destination buffer. We know we must have a mem_rdev here, so extra mmap is fine. */
485 if (preload_successful
)
486 cbfs_unmap(rdev_mmap_full(&rdev
));
491 void *_cbfs_unverified_area_alloc(const char *area
, const char *name
,
492 cbfs_allocator_t allocator
, void *arg
, size_t *size_out
)
494 struct region_device area_rdev
, file_rdev
;
495 union cbfs_mdata mdata
;
498 DEBUG("%s(area='%s', name='%s', alloc=%p(%p))\n", __func__
, area
, name
, allocator
, arg
);
500 if (fmap_locate_area_as_rdev(area
, &area_rdev
))
503 if (cbfs_lookup(&area_rdev
, name
, &mdata
, &data_offset
, NULL
)) {
504 ERROR("'%s' not found in '%s'\n", name
, area
);
508 if (rdev_chain(&file_rdev
, &area_rdev
, data_offset
, be32toh(mdata
.h
.len
)))
511 return do_alloc(&mdata
, &file_rdev
, allocator
, arg
, size_out
, true);
514 void *_cbfs_default_allocator(void *arg
, size_t size
, const union cbfs_mdata
*unused
)
516 struct _cbfs_default_allocator_arg
*darg
= arg
;
517 if (size
> darg
->buf_size
)
522 void *_cbfs_cbmem_allocator(void *arg
, size_t size
, const union cbfs_mdata
*unused
)
524 return cbmem_add((uintptr_t)arg
, size
);
527 enum cb_err
cbfs_prog_stage_load(struct prog
*pstage
)
529 union cbfs_mdata mdata
;
530 struct region_device rdev
;
533 prog_locate_hook(pstage
);
535 if ((err
= _cbfs_boot_lookup(prog_name(pstage
), false, &mdata
, &rdev
)))
538 assert(be32toh(mdata
.h
.type
) == CBFS_TYPE_STAGE
);
539 pstage
->cbfs_type
= CBFS_TYPE_STAGE
;
541 enum cbfs_compression compression
= CBFS_COMPRESS_NONE
;
542 const struct cbfs_file_attr_compression
*cattr
= cbfs_find_attr(&mdata
,
543 CBFS_FILE_ATTR_TAG_COMPRESSION
, sizeof(*cattr
));
545 compression
= be32toh(cattr
->compression
);
547 const struct cbfs_file_attr_stageheader
*sattr
= cbfs_find_attr(&mdata
,
548 CBFS_FILE_ATTR_TAG_STAGEHEADER
, sizeof(*sattr
));
551 prog_set_area(pstage
, (void *)(uintptr_t)be64toh(sattr
->loadaddr
),
552 be32toh(sattr
->memlen
));
553 prog_set_entry(pstage
, prog_start(pstage
) +
554 be32toh(sattr
->entry_offset
), NULL
);
556 /* Hacky way to not load programs over read only media. The stages
557 * that would hit this path initialize themselves. */
558 if ((ENV_BOOTBLOCK
|| ENV_SEPARATE_VERSTAGE
) &&
559 !CONFIG(NO_XIP_EARLY_STAGES
) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED
)) {
560 void *mapping
= rdev_mmap_full(&rdev
);
561 rdev_munmap(&rdev
, mapping
);
562 if (cbfs_file_hash_mismatch(mapping
, region_device_sz(&rdev
), &mdata
, false))
563 return CB_CBFS_HASH_MISMATCH
;
564 if (mapping
== prog_start(pstage
))
568 /* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the
569 compressed data to the end of the buffer and point &rdev to that memory location. */
570 if (cbfs_lz4_enabled() && compression
== CBFS_COMPRESS_LZ4
) {
571 size_t in_size
= region_device_sz(&rdev
);
572 void *compr_start
= prog_start(pstage
) + prog_size(pstage
) - in_size
;
573 if (rdev_readat(&rdev
, compr_start
, 0, in_size
) != in_size
)
575 rdev_chain_mem(&rdev
, compr_start
, in_size
);
578 size_t fsize
= cbfs_load_and_decompress(&rdev
, prog_start(pstage
), prog_size(pstage
),
579 compression
, &mdata
, false);
583 /* Clear area not covered by file. */
584 memset(prog_start(pstage
) + fsize
, 0, prog_size(pstage
) - fsize
);
586 prog_segment_loaded((uintptr_t)prog_start(pstage
), prog_size(pstage
),
592 void cbfs_boot_device_find_mcache(struct cbfs_boot_device
*cbd
, uint32_t id
)
594 if (CONFIG(NO_CBFS_MCACHE
) || ENV_SMM
)
597 if (cbd
->mcache_size
)
600 const struct cbmem_entry
*entry
;
601 if (cbmem_possibly_online() &&
602 (entry
= cbmem_entry_find(id
))) {
603 cbd
->mcache
= cbmem_entry_start(entry
);
604 cbd
->mcache_size
= cbmem_entry_size(entry
);
605 } else if (ENV_ROMSTAGE_OR_BEFORE
) {
606 u8
*boundary
= _ecbfs_mcache
- REGION_SIZE(cbfs_mcache
) *
607 CONFIG_CBFS_MCACHE_RW_PERCENTAGE
/ 100;
608 boundary
= (u8
*)ALIGN_DOWN((uintptr_t)boundary
, CBFS_MCACHE_ALIGNMENT
);
609 if (id
== CBMEM_ID_CBFS_RO_MCACHE
) {
610 cbd
->mcache
= _cbfs_mcache
;
611 cbd
->mcache_size
= boundary
- _cbfs_mcache
;
612 } else if (id
== CBMEM_ID_CBFS_RW_MCACHE
) {
613 cbd
->mcache
= boundary
;
614 cbd
->mcache_size
= _ecbfs_mcache
- boundary
;
619 enum cb_err
cbfs_init_boot_device(const struct cbfs_boot_device
*cbd
,
620 struct vb2_hash
*mdata_hash
)
622 /* If we have an mcache, mcache_build() will also check mdata hash. */
623 if (!CONFIG(NO_CBFS_MCACHE
) && !ENV_SMM
&& cbd
->mcache_size
> 0)
624 return cbfs_mcache_build(&cbd
->rdev
, cbd
->mcache
, cbd
->mcache_size
, mdata_hash
);
626 /* No mcache and no verification means we have nothing special to do. */
627 if (!CONFIG(CBFS_VERIFICATION
) || !mdata_hash
)
630 /* Verification only: use cbfs_walk() without a walker() function to just run through
631 the CBFS once, will return NOT_FOUND by default. */
632 enum cb_err err
= cbfs_walk(&cbd
->rdev
, NULL
, NULL
, mdata_hash
, 0);
633 if (err
== CB_CBFS_NOT_FOUND
)
638 const struct cbfs_boot_device
*cbfs_get_boot_device(bool force_ro
)
640 static struct cbfs_boot_device ro
;
642 /* Ensure we always init RO mcache, even if the first file is from the RW CBFS.
643 Otherwise it may not be available when needed in later stages. */
644 if (ENV_INITIAL_STAGE
&& !force_ro
&& !region_device_sz(&ro
.rdev
))
645 cbfs_get_boot_device(true);
648 const struct cbfs_boot_device
*rw
= vboot_get_cbfs_boot_device();
649 /* This will return NULL if vboot isn't enabled, didn't run yet or decided to
650 boot into recovery mode. */
655 /* In rare cases post-RAM stages may run this before cbmem_initialize(), so we can't
656 lock in the result of find_mcache() on the first try and should keep trying every
657 time until an mcache is found. */
658 cbfs_boot_device_find_mcache(&ro
, CBMEM_ID_CBFS_RO_MCACHE
);
660 if (region_device_sz(&ro
.rdev
))
663 if (fmap_locate_area_as_rdev("COREBOOT", &ro
.rdev
))
664 die("Cannot locate primary CBFS");
666 if (ENV_INITIAL_STAGE
) {
667 enum cb_err err
= cbfs_init_boot_device(&ro
, metadata_hash_get());
668 if (err
== CB_CBFS_HASH_MISMATCH
)
669 die("RO CBFS metadata hash verification failure");
670 else if (CONFIG(TOCTOU_SAFETY
) && err
== CB_CBFS_CACHE_FULL
)
671 die("RO mcache overflow breaks TOCTOU safety!\n");
672 else if (err
&& err
!= CB_CBFS_CACHE_FULL
)
673 die("RO CBFS initialization error: %d", err
);
679 #if !CONFIG(NO_CBFS_MCACHE)
680 static void mcache_to_cbmem(const struct cbfs_boot_device
*cbd
, u32 cbmem_id
)
685 size_t real_size
= cbfs_mcache_real_size(cbd
->mcache
, cbd
->mcache_size
);
686 void *cbmem_mcache
= cbmem_add(cbmem_id
, real_size
);
688 printk(BIOS_ERR
, "Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
689 cbmem_id
, real_size
);
692 memcpy(cbmem_mcache
, cbd
->mcache
, real_size
);
695 static void cbfs_mcache_migrate(int unused
)
697 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE
);
698 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE
);
700 CBMEM_CREATION_HOOK(cbfs_mcache_migrate
);