1 /* SPDX-License-Identifier: GPL-2.0-only */
4 #include <boot_device.h>
7 #include <console/console.h>
11 #include <region_file.h>
12 #include <security/vboot/antirollback.h>
13 #include <security/vboot/mrc_cache_hash_tpm.h>
14 #include <security/vboot/vboot_common.h>
15 #include <spi_flash.h>
18 #include "mrc_cache.h"
20 #define DEFAULT_MRC_CACHE "RW_MRC_CACHE"
21 #define VARIABLE_MRC_CACHE "RW_VAR_MRC_CACHE"
22 #define RECOVERY_MRC_CACHE "RECOVERY_MRC_CACHE"
23 #define UNIFIED_MRC_CACHE "UNIFIED_MRC_CACHE"
25 /* Signature "MRCD" was used for older header format before CB:67670. */
26 #define MRC_DATA_SIGNATURE (('M'<<0)|('R'<<8)|('C'<<16)|('d'<<24))
28 static const uint32_t mrc_invalid_sig
= ~MRC_DATA_SIGNATURE
;
44 #define NORMAL_FLAG (1 << 0)
45 #define RECOVERY_FLAG (1 << 1)
52 uint32_t tpm_hash_index
;
56 static const struct cache_region recovery_training
= {
57 .name
= RECOVERY_MRC_CACHE
,
58 .cbmem_id
= CBMEM_ID_MRCDATA
,
59 .type
= MRC_TRAINING_DATA
,
60 .elog_slot
= ELOG_MEM_CACHE_UPDATE_SLOT_RECOVERY
,
61 .tpm_hash_index
= MRC_REC_HASH_NV_INDEX
,
62 #if CONFIG(HAS_RECOVERY_MRC_CACHE)
63 .flags
= RECOVERY_FLAG
,
69 static const struct cache_region normal_training
= {
70 .name
= DEFAULT_MRC_CACHE
,
71 .cbmem_id
= CBMEM_ID_MRCDATA
,
72 .type
= MRC_TRAINING_DATA
,
73 .elog_slot
= ELOG_MEM_CACHE_UPDATE_SLOT_NORMAL
,
74 .tpm_hash_index
= MRC_RW_HASH_NV_INDEX
,
75 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
77 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
78 * memory training happens before vboot (in RO) and the
79 * mrc_cache data is always safe to use.
81 .flags
= NORMAL_FLAG
| RECOVERY_FLAG
,
84 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
85 * vboot (in RW code) and is never safe to use in recovery.
91 static const struct cache_region variable_data
= {
92 .name
= VARIABLE_MRC_CACHE
,
93 .cbmem_id
= CBMEM_ID_VAR_MRCDATA
,
94 .type
= MRC_VARIABLE_DATA
,
95 .elog_slot
= ELOG_MEM_CACHE_UPDATE_SLOT_VARIABLE
,
97 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
99 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
100 * memory training happens before vboot (in RO) and the
101 * mrc_cache data is always safe to use.
103 .flags
= NORMAL_FLAG
| RECOVERY_FLAG
,
106 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
107 * vboot (in RW code) and is never safe to use in recovery.
109 .flags
= NORMAL_FLAG
,
113 /* Order matters here for priority in matching. */
114 static const struct cache_region
*cache_regions
[] = {
120 /* TPM MRC hash functionality depends on vboot starting before memory init. */
121 _Static_assert(!CONFIG(MRC_SAVE_HASH_IN_TPM
) ||
122 CONFIG(VBOOT_STARTS_IN_BOOTBLOCK
),
123 "for TPM MRC hash functionality, vboot must start in bootblock");
125 static int lookup_region_by_name(const char *name
, struct region
*r
)
127 if (fmap_locate_area(name
, r
) == 0)
132 static const struct cache_region
*lookup_region_type(int type
)
137 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK
) && vboot_recovery_mode_enabled())
138 flags
= RECOVERY_FLAG
;
142 for (i
= 0; i
< ARRAY_SIZE(cache_regions
); i
++) {
143 if (cache_regions
[i
]->type
!= type
)
145 if ((cache_regions
[i
]->flags
& flags
) == flags
)
146 return cache_regions
[i
];
152 static const struct cache_region
*lookup_region(struct region
*r
, int type
)
154 const struct cache_region
*cr
;
156 cr
= lookup_region_type(type
);
159 /* There will be no recovery MRC cache region if (!HAS_RECOVERY_MRC_CACHE &&
160 !VBOOT_STARTS_IN_ROMSTAGE). */
161 printk(BIOS_DEBUG
, "MRC: failed to locate region type %d\n", type
);
165 if (lookup_region_by_name(cr
->name
, r
) < 0)
171 static int mrc_header_valid(struct region_device
*rdev
, struct mrc_metadata
*md
)
174 uint32_t hash_result
;
177 if (rdev_readat(rdev
, md
, 0, sizeof(*md
)) < 0) {
178 /* When the metadata was invalidated intentionally (for example from the
179 previous recovery boot), print a warning instead of an error. */
180 if (rdev_readat(rdev
, md
, 0, sizeof(mrc_invalid_sig
)) >= 0 &&
181 md
->signature
== mrc_invalid_sig
) {
182 printk(BIOS_INFO
, "MRC: metadata was invalidated\n");
186 printk(BIOS_ERR
, "MRC: couldn't read metadata\n");
190 if (md
->signature
!= MRC_DATA_SIGNATURE
) {
191 printk(BIOS_ERR
, "MRC: invalid header signature\n");
195 /* Compute hash over header with 0 as the value. */
196 hash
= md
->header_hash
;
198 hash_result
= xxh32(md
, sizeof(*md
), 0);
200 if (hash
!= hash_result
) {
201 printk(BIOS_ERR
, "MRC: header hash mismatch: %x vs %x\n",
206 /* Put back original. */
207 md
->header_hash
= hash
;
209 /* Re-size the region device according to the metadata as a region_file
210 * does block allocation. */
211 size
= sizeof(*md
) + md
->data_size
;
212 if (rdev_chain(rdev
, rdev
, 0, size
) < 0) {
213 printk(BIOS_ERR
, "MRC: size exceeds rdev size: %zx vs %zx\n",
214 size
, region_device_sz(rdev
));
221 static int mrc_data_valid(int type
, const struct mrc_metadata
*md
,
222 void *data
, size_t data_size
)
225 const struct cache_region
*cr
= lookup_region_type(type
);
231 if (md
->data_size
!= data_size
)
234 hash_idx
= cr
->tpm_hash_index
;
235 if (hash_idx
&& CONFIG(MRC_SAVE_HASH_IN_TPM
)) {
236 if (!mrc_cache_verify_hash(hash_idx
, data
, data_size
))
239 hash
= xxh32(data
, data_size
, 0);
241 if (md
->data_hash
!= hash
) {
242 printk(BIOS_ERR
, "MRC: data hash mismatch: %x vs %x\n",
243 md
->data_hash
, hash
);
251 static int mrc_cache_get_latest_slot_info(const char *name
,
252 const struct region_device
*backing_rdev
,
253 struct mrc_metadata
*md
,
254 struct region_file
*cache_file
,
255 struct region_device
*rdev
,
258 /* Init and obtain a handle to the file data. */
259 if (region_file_init(cache_file
, backing_rdev
) < 0) {
260 printk(BIOS_ERR
, "MRC: region file invalid in '%s'\n", name
);
264 /* Provide a 0 sized region_device from here on out so the caller
265 * has a valid yet unusable region_device. */
266 rdev_chain(rdev
, backing_rdev
, 0, 0);
268 /* No data to return. */
269 if (region_file_data(cache_file
, rdev
) < 0) {
270 printk(BIOS_NOTICE
, "MRC: no data in '%s'\n", name
);
271 return fail_bad_data
? -1 : 0;
274 /* Validate header and resize region to reflect actual usage on the
275 * saved medium (including metadata and data). */
276 if (mrc_header_valid(rdev
, md
) < 0)
277 return fail_bad_data
? -1 : 0;
282 static int mrc_cache_find_current(int type
, uint32_t version
,
283 struct region_device
*rdev
,
284 struct mrc_metadata
*md
)
286 const struct cache_region
*cr
;
287 struct region region
;
288 struct region_device read_rdev
;
289 struct region_file cache_file
;
291 const size_t md_size
= sizeof(*md
);
292 const bool fail_bad_data
= true;
295 * In recovery mode, force retraining if the memory retrain
298 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK
) && vboot_recovery_mode_enabled()
299 && get_recovery_mode_retrain_switch())
302 cr
= lookup_region(®ion
, type
);
307 if (boot_device_ro_subregion(®ion
, &read_rdev
) < 0)
310 if (mrc_cache_get_latest_slot_info(cr
->name
,
318 if (version
!= md
->version
) {
319 printk(BIOS_INFO
, "MRC: version mismatch: %x vs %x\n",
320 md
->version
, version
);
324 /* Re-size rdev to only contain the data. i.e. remove metadata. */
325 data_size
= md
->data_size
;
326 return rdev_chain(rdev
, rdev
, md_size
, data_size
);
329 ssize_t
mrc_cache_load_current(int type
, uint32_t version
, void *buffer
,
332 struct region_device rdev
;
333 struct mrc_metadata md
;
336 if (mrc_cache_find_current(type
, version
, &rdev
, &md
) < 0)
339 data_size
= region_device_sz(&rdev
);
340 if (buffer_size
< data_size
)
343 if (rdev_readat(&rdev
, buffer
, 0, data_size
) != data_size
)
346 if (mrc_data_valid(type
, &md
, buffer
, data_size
) < 0)
352 void *mrc_cache_current_mmap_leak(int type
, uint32_t version
,
355 struct region_device rdev
;
357 size_t region_device_size
;
358 struct mrc_metadata md
;
360 if (mrc_cache_find_current(type
, version
, &rdev
, &md
) < 0)
363 region_device_size
= region_device_sz(&rdev
);
365 *data_size
= region_device_size
;
366 data
= rdev_mmap_full(&rdev
);
369 printk(BIOS_INFO
, "MRC: mmap failure.\n");
373 if (mrc_data_valid(type
, &md
, data
, region_device_size
) < 0)
379 static bool mrc_cache_needs_update(const struct region_device
*rdev
,
380 const struct mrc_metadata
*new_md
,
381 size_t new_data_size
)
384 size_t old_data_size
= region_device_sz(rdev
) - sizeof(struct mrc_metadata
);
385 bool need_update
= false;
387 if (new_data_size
!= old_data_size
)
390 mapping
= rdev_mmap_full(rdev
);
391 if (mapping
== NULL
) {
392 printk(BIOS_ERR
, "MRC: cannot mmap existing cache.\n");
397 * Compare the old and new metadata only. If the data hashes don't
398 * match, the comparison will fail.
400 if (memcmp(new_md
, mapping
, sizeof(struct mrc_metadata
)))
403 rdev_munmap(rdev
, mapping
);
408 static void log_event_cache_update(uint8_t slot
, enum result res
)
410 const int type
= ELOG_TYPE_MEM_CACHE_UPDATE
;
411 struct elog_event_mem_cache_update event
= {
415 /* Filter through interesting events only */
418 event
.status
= ELOG_MEM_CACHE_UPDATE_STATUS_FAIL
;
421 event
.status
= ELOG_MEM_CACHE_UPDATE_STATUS_SUCCESS
;
427 if (elog_add_event_raw(type
, &event
, sizeof(event
)) < 0)
428 printk(BIOS_ERR
, "Failed to log mem cache update event.\n");
431 /* During ramstage this code purposefully uses incoherent transactions between
432 * read and write. The read assumes a memory-mapped boot device that can be used
433 * to quickly locate and compare the up-to-date data. However, when an update
434 * is required it uses the writable region access to perform the update. */
435 static void update_mrc_cache_by_type(int type
,
436 struct mrc_metadata
*new_md
,
437 const void *new_data
,
438 size_t new_data_size
)
440 const struct cache_region
*cr
;
441 struct region region
;
442 struct region_device read_rdev
;
443 struct region_device write_rdev
;
444 struct region_file cache_file
;
445 struct mrc_metadata md
;
446 struct incoherent_rdev backing_irdev
;
447 const struct region_device
*backing_rdev
;
448 struct region_device latest_rdev
;
449 const bool fail_bad_data
= false;
452 cr
= lookup_region(®ion
, type
);
457 printk(BIOS_DEBUG
, "MRC: Checking cached data update for '%s'.\n",
460 if (boot_device_ro_subregion(®ion
, &read_rdev
) < 0)
463 if (boot_device_rw_subregion(®ion
, &write_rdev
) < 0)
466 backing_rdev
= incoherent_rdev_init(&backing_irdev
, ®ion
, &read_rdev
,
469 if (backing_rdev
== NULL
)
472 /* Note that mrc_cache_get_latest_slot_info doesn't check the
473 * validity of the current slot. If the slot is invalid,
474 * we'll overwrite it anyway when we update the mrc_cache.
476 if (mrc_cache_get_latest_slot_info(cr
->name
,
485 if (!mrc_cache_needs_update(&latest_rdev
, new_md
, new_data_size
)) {
486 printk(BIOS_DEBUG
, "MRC: '%s' does not need update.\n", cr
->name
);
487 log_event_cache_update(cr
->elog_slot
, ALREADY_UPTODATE
);
491 printk(BIOS_DEBUG
, "MRC: cache data '%s' needs update.\n", cr
->name
);
493 struct update_region_file_entry entries
[] = {
495 .size
= sizeof(*new_md
),
499 .size
= new_data_size
,
503 if (region_file_update_data_arr(&cache_file
, entries
, ARRAY_SIZE(entries
)) < 0) {
504 printk(BIOS_ERR
, "MRC: failed to update '%s'.\n", cr
->name
);
505 log_event_cache_update(cr
->elog_slot
, UPDATE_FAILURE
);
507 printk(BIOS_DEBUG
, "MRC: updated '%s'.\n", cr
->name
);
508 log_event_cache_update(cr
->elog_slot
, UPDATE_SUCCESS
);
509 hash_idx
= cr
->tpm_hash_index
;
510 if (hash_idx
&& CONFIG(MRC_SAVE_HASH_IN_TPM
))
511 mrc_cache_update_hash(hash_idx
, new_data
, new_data_size
);
515 /* Read flash status register to determine if write protect is active */
516 static int nvm_is_write_protected(void)
522 if (!CONFIG(CHROMEOS
))
525 if (!CONFIG(BOOT_DEVICE_SPI_FLASH
))
528 /* Read Write Protect GPIO if available */
529 wp_gpio
= get_write_protect_state();
531 /* Read Status Register 1 */
532 if (spi_flash_status(boot_device_spi_flash(), &sr1
) < 0) {
533 printk(BIOS_ERR
, "Failed to read SPI status register 1\n");
536 wp_spi
= !!(sr1
& 0x80);
538 printk(BIOS_DEBUG
, "SPI flash protection: WPSW=%d SRP0=%d\n",
541 return wp_gpio
&& wp_spi
;
544 /* Apply protection to a range of flash */
545 static int nvm_protect(const struct region
*r
)
547 if (!CONFIG(MRC_SETTINGS_PROTECT
))
550 if (!CONFIG(BOOT_DEVICE_SPI_FLASH
))
553 return spi_flash_ctrlr_protect_region(boot_device_spi_flash(), r
, WRITE_PROTECT
);
556 /* Protect mrc region with a Protected Range Register */
557 static int protect_mrc_cache(const char *name
)
559 struct region region
;
561 if (!CONFIG(MRC_SETTINGS_PROTECT
))
564 if (lookup_region_by_name(name
, ®ion
) < 0) {
565 printk(BIOS_INFO
, "MRC: Could not find region '%s'\n", name
);
569 if (nvm_is_write_protected() <= 0) {
570 printk(BIOS_INFO
, "MRC: NOT enabling PRR for '%s'.\n", name
);
574 if (nvm_protect(®ion
) < 0) {
575 printk(BIOS_ERR
, "MRC: ERROR setting PRR for '%s'.\n", name
);
579 printk(BIOS_INFO
, "MRC: Enabled Protected Range on '%s'.\n", name
);
583 static void protect_mrc_region(void)
586 * Check if there is a single unified region that encompasses both
587 * RECOVERY_MRC_CACHE and DEFAULT_MRC_CACHE. In that case protect the
588 * entire region using a single PRR.
590 * If we are not able to protect the entire region, try protecting
591 * individual regions next.
593 if (protect_mrc_cache(UNIFIED_MRC_CACHE
) == 0)
596 if (CONFIG(HAS_RECOVERY_MRC_CACHE
))
597 protect_mrc_cache(RECOVERY_MRC_CACHE
);
599 protect_mrc_cache(DEFAULT_MRC_CACHE
);
602 static void invalidate_normal_cache(void)
604 struct region_file cache_file
;
605 struct region_device rdev
;
606 const char *name
= DEFAULT_MRC_CACHE
;
609 * If !HAS_RECOVERY_MRC_CACHE and VBOOT_STARTS_IN_ROMSTAGE is
610 * selected, this means that memory training occurs before
611 * verified boot (in RO), so normal mode cache does not need
614 if (!CONFIG(HAS_RECOVERY_MRC_CACHE
) && CONFIG(VBOOT_STARTS_IN_ROMSTAGE
))
617 /* We only invalidate the normal cache in recovery mode. */
618 if (!vboot_recovery_mode_enabled())
622 * For platforms with a recovery mrc_cache, no need to
623 * invalidate when retrain switch is not set.
625 if (CONFIG(HAS_RECOVERY_MRC_CACHE
) && !get_recovery_mode_retrain_switch())
628 if (fmap_locate_area_as_rdev_rw(name
, &rdev
) < 0) {
629 printk(BIOS_ERR
, "MRC: Couldn't find '%s' region. Invalidation failed\n",
634 if (region_file_init(&cache_file
, &rdev
) < 0) {
635 printk(BIOS_ERR
, "MRC: region file invalid for '%s'. Invalidation failed\n",
640 /* Push an update that consists of 4 bytes that is smaller than the
641 * MRC metadata as well as an invalid signature. */
642 if (region_file_update_data(&cache_file
, &mrc_invalid_sig
,
643 sizeof(mrc_invalid_sig
)) < 0)
644 printk(BIOS_ERR
, "MRC: invalidation failed for '%s'.\n", name
);
647 static void update_mrc_cache_from_cbmem(int type
)
649 const struct cache_region
*cr
;
650 struct region region
;
651 const struct cbmem_entry
*to_be_updated
;
653 cr
= lookup_region(®ion
, type
);
656 printk(BIOS_INFO
, "MRC: could not find cache_region type %d\n", type
);
660 to_be_updated
= cbmem_entry_find(cr
->cbmem_id
);
662 if (to_be_updated
== NULL
) {
663 printk(BIOS_INFO
, "MRC: No data in cbmem for '%s'.\n",
668 update_mrc_cache_by_type(type
,
669 /* pointer to mrc_cache entry metadata header */
670 cbmem_entry_start(to_be_updated
),
671 /* pointer to start of mrc_cache entry data */
672 cbmem_entry_start(to_be_updated
) +
673 sizeof(struct mrc_metadata
),
674 /* size of just data portion of the entry */
675 cbmem_entry_size(to_be_updated
) -
676 sizeof(struct mrc_metadata
));
679 static void finalize_mrc_cache(void *unused
)
681 if (CONFIG(MRC_STASH_TO_CBMEM
)) {
682 update_mrc_cache_from_cbmem(MRC_TRAINING_DATA
);
684 if (CONFIG(MRC_SETTINGS_VARIABLE_DATA
))
685 update_mrc_cache_from_cbmem(MRC_VARIABLE_DATA
);
688 invalidate_normal_cache();
690 protect_mrc_region();
693 int mrc_cache_stash_data(int type
, uint32_t version
, const void *data
,
696 const struct cache_region
*cr
;
698 struct mrc_metadata md
= {
699 .signature
= MRC_DATA_SIGNATURE
,
702 .data_hash
= xxh32(data
, size
, 0),
704 md
.header_hash
= xxh32(&md
, sizeof(md
), 0);
706 if (CONFIG(MRC_STASH_TO_CBMEM
)) {
707 /* Store data in cbmem for use in ramstage */
708 struct mrc_metadata
*cbmem_md
;
710 cbmem_size
= sizeof(*cbmem_md
) + size
;
712 cr
= lookup_region_type(type
);
714 printk(BIOS_INFO
, "MRC: No region type found. Skip adding to cbmem for type %d.\n",
719 cbmem_md
= cbmem_add(cr
->cbmem_id
, cbmem_size
);
721 if (cbmem_md
== NULL
) {
722 printk(BIOS_ERR
, "MRC: failed to add '%s' to cbmem.\n",
727 memcpy(cbmem_md
, &md
, sizeof(*cbmem_md
));
728 /* cbmem_md + 1 is the pointer to the mrc_cache data */
729 memcpy(cbmem_md
+ 1, data
, size
);
731 /* Otherwise store to mrc_cache right away */
732 update_mrc_cache_by_type(type
, &md
, data
, size
);
738 * Ensures MRC training data is stored into SPI after PCI enumeration is done.
739 * Some implementations may require this to be later than others.
741 #if CONFIG(MRC_WRITE_NV_LATE)
742 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME_CHECK
, BS_ON_ENTRY
, finalize_mrc_cache
, NULL
);
744 BOOT_STATE_INIT_ENTRY(BS_DEV_ENUMERATE
, BS_ON_EXIT
, finalize_mrc_cache
, NULL
);