mb/google/brya: Create rull variant
[coreboot2.git] / src / drivers / mrc_cache / mrc_cache.c
blob17f5fee72753ff1aaff11ceb78342fd6e7603026
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <string.h>
4 #include <boot_device.h>
5 #include <bootstate.h>
6 #include <bootmode.h>
7 #include <console/console.h>
8 #include <cbmem.h>
9 #include <elog.h>
10 #include <fmap.h>
11 #include <region_file.h>
12 #include <security/vboot/antirollback.h>
13 #include <security/vboot/mrc_cache_hash_tpm.h>
14 #include <security/vboot/vboot_common.h>
15 #include <spi_flash.h>
16 #include <xxhash.h>
18 #include "mrc_cache.h"
20 #define DEFAULT_MRC_CACHE "RW_MRC_CACHE"
21 #define VARIABLE_MRC_CACHE "RW_VAR_MRC_CACHE"
22 #define RECOVERY_MRC_CACHE "RECOVERY_MRC_CACHE"
23 #define UNIFIED_MRC_CACHE "UNIFIED_MRC_CACHE"
25 /* Signature "MRCD" was used for older header format before CB:67670. */
26 #define MRC_DATA_SIGNATURE (('M'<<0)|('R'<<8)|('C'<<16)|('d'<<24))
28 static const uint32_t mrc_invalid_sig = ~MRC_DATA_SIGNATURE;
30 struct mrc_metadata {
31 uint32_t signature;
32 uint32_t data_size;
33 uint32_t data_hash;
34 uint32_t header_hash;
35 uint32_t version;
36 } __packed;
38 enum result {
39 UPDATE_FAILURE = -1,
40 UPDATE_SUCCESS = 0,
41 ALREADY_UPTODATE = 1
44 #define NORMAL_FLAG (1 << 0)
45 #define RECOVERY_FLAG (1 << 1)
47 struct cache_region {
48 const char *name;
49 uint32_t cbmem_id;
50 int type;
51 int elog_slot;
52 uint32_t tpm_hash_index;
53 int flags;
56 static const struct cache_region recovery_training = {
57 .name = RECOVERY_MRC_CACHE,
58 .cbmem_id = CBMEM_ID_MRCDATA,
59 .type = MRC_TRAINING_DATA,
60 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_RECOVERY,
61 .tpm_hash_index = MRC_REC_HASH_NV_INDEX,
62 #if CONFIG(HAS_RECOVERY_MRC_CACHE)
63 .flags = RECOVERY_FLAG,
64 #else
65 .flags = 0,
66 #endif
69 static const struct cache_region normal_training = {
70 .name = DEFAULT_MRC_CACHE,
71 .cbmem_id = CBMEM_ID_MRCDATA,
72 .type = MRC_TRAINING_DATA,
73 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_NORMAL,
74 .tpm_hash_index = MRC_RW_HASH_NV_INDEX,
75 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
77 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
78 * memory training happens before vboot (in RO) and the
79 * mrc_cache data is always safe to use.
81 .flags = NORMAL_FLAG | RECOVERY_FLAG,
82 #else
84 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
85 * vboot (in RW code) and is never safe to use in recovery.
87 .flags = NORMAL_FLAG,
88 #endif
91 static const struct cache_region variable_data = {
92 .name = VARIABLE_MRC_CACHE,
93 .cbmem_id = CBMEM_ID_VAR_MRCDATA,
94 .type = MRC_VARIABLE_DATA,
95 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_VARIABLE,
96 .tpm_hash_index = 0,
97 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
99 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
100 * memory training happens before vboot (in RO) and the
101 * mrc_cache data is always safe to use.
103 .flags = NORMAL_FLAG | RECOVERY_FLAG,
104 #else
106 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
107 * vboot (in RW code) and is never safe to use in recovery.
109 .flags = NORMAL_FLAG,
110 #endif
113 /* Order matters here for priority in matching. */
114 static const struct cache_region *cache_regions[] = {
115 &recovery_training,
116 &normal_training,
117 &variable_data,
120 /* TPM MRC hash functionality depends on vboot starting before memory init. */
121 _Static_assert(!CONFIG(MRC_SAVE_HASH_IN_TPM) ||
122 CONFIG(VBOOT_STARTS_IN_BOOTBLOCK),
123 "for TPM MRC hash functionality, vboot must start in bootblock");
125 static int lookup_region_by_name(const char *name, struct region *r)
127 if (fmap_locate_area(name, r) == 0)
128 return 0;
129 return -1;
132 static const struct cache_region *lookup_region_type(int type)
134 int i;
135 int flags;
137 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled())
138 flags = RECOVERY_FLAG;
139 else
140 flags = NORMAL_FLAG;
142 for (i = 0; i < ARRAY_SIZE(cache_regions); i++) {
143 if (cache_regions[i]->type != type)
144 continue;
145 if ((cache_regions[i]->flags & flags) == flags)
146 return cache_regions[i];
149 return NULL;
152 static const struct cache_region *lookup_region(struct region *r, int type)
154 const struct cache_region *cr;
156 cr = lookup_region_type(type);
158 if (cr == NULL) {
159 /* There will be no recovery MRC cache region if (!HAS_RECOVERY_MRC_CACHE &&
160 !VBOOT_STARTS_IN_ROMSTAGE). */
161 printk(BIOS_DEBUG, "MRC: failed to locate region type %d\n", type);
162 return NULL;
165 if (lookup_region_by_name(cr->name, r) < 0)
166 return NULL;
168 return cr;
171 static int mrc_header_valid(struct region_device *rdev, struct mrc_metadata *md)
173 uint32_t hash;
174 uint32_t hash_result;
175 size_t size;
177 if (rdev_readat(rdev, md, 0, sizeof(*md)) < 0) {
178 /* When the metadata was invalidated intentionally (for example from the
179 previous recovery boot), print a warning instead of an error. */
180 if (rdev_readat(rdev, md, 0, sizeof(mrc_invalid_sig)) >= 0 &&
181 md->signature == mrc_invalid_sig) {
182 printk(BIOS_INFO, "MRC: metadata was invalidated\n");
183 return -1;
186 printk(BIOS_ERR, "MRC: couldn't read metadata\n");
187 return -1;
190 if (md->signature != MRC_DATA_SIGNATURE) {
191 printk(BIOS_ERR, "MRC: invalid header signature\n");
192 return -1;
195 /* Compute hash over header with 0 as the value. */
196 hash = md->header_hash;
197 md->header_hash = 0;
198 hash_result = xxh32(md, sizeof(*md), 0);
200 if (hash != hash_result) {
201 printk(BIOS_ERR, "MRC: header hash mismatch: %x vs %x\n",
202 hash, hash_result);
203 return -1;
206 /* Put back original. */
207 md->header_hash = hash;
209 /* Re-size the region device according to the metadata as a region_file
210 * does block allocation. */
211 size = sizeof(*md) + md->data_size;
212 if (rdev_chain(rdev, rdev, 0, size) < 0) {
213 printk(BIOS_ERR, "MRC: size exceeds rdev size: %zx vs %zx\n",
214 size, region_device_sz(rdev));
215 return -1;
218 return 0;
221 static int mrc_data_valid(int type, const struct mrc_metadata *md,
222 void *data, size_t data_size)
224 uint32_t hash;
225 const struct cache_region *cr = lookup_region_type(type);
226 uint32_t hash_idx;
228 if (cr == NULL)
229 return -1;
231 if (md->data_size != data_size)
232 return -1;
234 hash_idx = cr->tpm_hash_index;
235 if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM)) {
236 if (!mrc_cache_verify_hash(hash_idx, data, data_size))
237 return -1;
238 } else {
239 hash = xxh32(data, data_size, 0);
241 if (md->data_hash != hash) {
242 printk(BIOS_ERR, "MRC: data hash mismatch: %x vs %x\n",
243 md->data_hash, hash);
244 return -1;
248 return 0;
251 static int mrc_cache_get_latest_slot_info(const char *name,
252 const struct region_device *backing_rdev,
253 struct mrc_metadata *md,
254 struct region_file *cache_file,
255 struct region_device *rdev,
256 bool fail_bad_data)
258 /* Init and obtain a handle to the file data. */
259 if (region_file_init(cache_file, backing_rdev) < 0) {
260 printk(BIOS_ERR, "MRC: region file invalid in '%s'\n", name);
261 return -1;
264 /* Provide a 0 sized region_device from here on out so the caller
265 * has a valid yet unusable region_device. */
266 rdev_chain(rdev, backing_rdev, 0, 0);
268 /* No data to return. */
269 if (region_file_data(cache_file, rdev) < 0) {
270 printk(BIOS_NOTICE, "MRC: no data in '%s'\n", name);
271 return fail_bad_data ? -1 : 0;
274 /* Validate header and resize region to reflect actual usage on the
275 * saved medium (including metadata and data). */
276 if (mrc_header_valid(rdev, md) < 0)
277 return fail_bad_data ? -1 : 0;
279 return 0;
282 static int mrc_cache_find_current(int type, uint32_t version,
283 struct region_device *rdev,
284 struct mrc_metadata *md)
286 const struct cache_region *cr;
287 struct region region;
288 struct region_device read_rdev;
289 struct region_file cache_file;
290 size_t data_size;
291 const size_t md_size = sizeof(*md);
292 const bool fail_bad_data = true;
295 * In recovery mode, force retraining if the memory retrain
296 * switch is set.
298 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled()
299 && get_recovery_mode_retrain_switch())
300 return -1;
302 cr = lookup_region(&region, type);
304 if (cr == NULL)
305 return -1;
307 if (boot_device_ro_subregion(&region, &read_rdev) < 0)
308 return -1;
310 if (mrc_cache_get_latest_slot_info(cr->name,
311 &read_rdev,
313 &cache_file,
314 rdev,
315 fail_bad_data) < 0)
316 return -1;
318 if (version != md->version) {
319 printk(BIOS_INFO, "MRC: version mismatch: %x vs %x\n",
320 md->version, version);
321 return -1;
324 /* Re-size rdev to only contain the data. i.e. remove metadata. */
325 data_size = md->data_size;
326 return rdev_chain(rdev, rdev, md_size, data_size);
329 ssize_t mrc_cache_load_current(int type, uint32_t version, void *buffer,
330 size_t buffer_size)
332 struct region_device rdev;
333 struct mrc_metadata md;
334 ssize_t data_size;
336 if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
337 return -1;
339 data_size = region_device_sz(&rdev);
340 if (buffer_size < data_size)
341 return -1;
343 if (rdev_readat(&rdev, buffer, 0, data_size) != data_size)
344 return -1;
346 if (mrc_data_valid(type, &md, buffer, data_size) < 0)
347 return -1;
349 return data_size;
352 void *mrc_cache_current_mmap_leak(int type, uint32_t version,
353 size_t *data_size)
355 struct region_device rdev;
356 void *data;
357 size_t region_device_size;
358 struct mrc_metadata md;
360 if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
361 return NULL;
363 region_device_size = region_device_sz(&rdev);
364 if (data_size)
365 *data_size = region_device_size;
366 data = rdev_mmap_full(&rdev);
368 if (data == NULL) {
369 printk(BIOS_INFO, "MRC: mmap failure.\n");
370 return NULL;
373 if (mrc_data_valid(type, &md, data, region_device_size) < 0)
374 return NULL;
376 return data;
379 static bool mrc_cache_needs_update(const struct region_device *rdev,
380 const struct mrc_metadata *new_md,
381 size_t new_data_size)
383 void *mapping;
384 size_t old_data_size = region_device_sz(rdev) - sizeof(struct mrc_metadata);
385 bool need_update = false;
387 if (new_data_size != old_data_size)
388 return true;
390 mapping = rdev_mmap_full(rdev);
391 if (mapping == NULL) {
392 printk(BIOS_ERR, "MRC: cannot mmap existing cache.\n");
393 return true;
397 * Compare the old and new metadata only. If the data hashes don't
398 * match, the comparison will fail.
400 if (memcmp(new_md, mapping, sizeof(struct mrc_metadata)))
401 need_update = true;
403 rdev_munmap(rdev, mapping);
405 return need_update;
408 static void log_event_cache_update(uint8_t slot, enum result res)
410 const int type = ELOG_TYPE_MEM_CACHE_UPDATE;
411 struct elog_event_mem_cache_update event = {
412 .slot = slot
415 /* Filter through interesting events only */
416 switch (res) {
417 case UPDATE_FAILURE:
418 event.status = ELOG_MEM_CACHE_UPDATE_STATUS_FAIL;
419 break;
420 case UPDATE_SUCCESS:
421 event.status = ELOG_MEM_CACHE_UPDATE_STATUS_SUCCESS;
422 break;
423 default:
424 return;
427 if (elog_add_event_raw(type, &event, sizeof(event)) < 0)
428 printk(BIOS_ERR, "Failed to log mem cache update event.\n");
431 /* During ramstage this code purposefully uses incoherent transactions between
432 * read and write. The read assumes a memory-mapped boot device that can be used
433 * to quickly locate and compare the up-to-date data. However, when an update
434 * is required it uses the writable region access to perform the update. */
435 static void update_mrc_cache_by_type(int type,
436 struct mrc_metadata *new_md,
437 const void *new_data,
438 size_t new_data_size)
440 const struct cache_region *cr;
441 struct region region;
442 struct region_device read_rdev;
443 struct region_device write_rdev;
444 struct region_file cache_file;
445 struct mrc_metadata md;
446 struct incoherent_rdev backing_irdev;
447 const struct region_device *backing_rdev;
448 struct region_device latest_rdev;
449 const bool fail_bad_data = false;
450 uint32_t hash_idx;
452 cr = lookup_region(&region, type);
454 if (cr == NULL)
455 return;
457 printk(BIOS_DEBUG, "MRC: Checking cached data update for '%s'.\n",
458 cr->name);
460 if (boot_device_ro_subregion(&region, &read_rdev) < 0)
461 return;
463 if (boot_device_rw_subregion(&region, &write_rdev) < 0)
464 return;
466 backing_rdev = incoherent_rdev_init(&backing_irdev, &region, &read_rdev,
467 &write_rdev);
469 if (backing_rdev == NULL)
470 return;
472 /* Note that mrc_cache_get_latest_slot_info doesn't check the
473 * validity of the current slot. If the slot is invalid,
474 * we'll overwrite it anyway when we update the mrc_cache.
476 if (mrc_cache_get_latest_slot_info(cr->name,
477 backing_rdev,
478 &md,
479 &cache_file,
480 &latest_rdev,
481 fail_bad_data) < 0)
483 return;
485 if (!mrc_cache_needs_update(&latest_rdev, new_md, new_data_size)) {
486 printk(BIOS_DEBUG, "MRC: '%s' does not need update.\n", cr->name);
487 log_event_cache_update(cr->elog_slot, ALREADY_UPTODATE);
488 return;
491 printk(BIOS_DEBUG, "MRC: cache data '%s' needs update.\n", cr->name);
493 struct update_region_file_entry entries[] = {
494 [0] = {
495 .size = sizeof(*new_md),
496 .data = new_md,
498 [1] = {
499 .size = new_data_size,
500 .data = new_data,
503 if (region_file_update_data_arr(&cache_file, entries, ARRAY_SIZE(entries)) < 0) {
504 printk(BIOS_ERR, "MRC: failed to update '%s'.\n", cr->name);
505 log_event_cache_update(cr->elog_slot, UPDATE_FAILURE);
506 } else {
507 printk(BIOS_DEBUG, "MRC: updated '%s'.\n", cr->name);
508 log_event_cache_update(cr->elog_slot, UPDATE_SUCCESS);
509 hash_idx = cr->tpm_hash_index;
510 if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM))
511 mrc_cache_update_hash(hash_idx, new_data, new_data_size);
515 /* Read flash status register to determine if write protect is active */
516 static int nvm_is_write_protected(void)
518 u8 sr1;
519 u8 wp_gpio;
520 u8 wp_spi;
522 if (!CONFIG(CHROMEOS))
523 return 0;
525 if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
526 return 0;
528 /* Read Write Protect GPIO if available */
529 wp_gpio = get_write_protect_state();
531 /* Read Status Register 1 */
532 if (spi_flash_status(boot_device_spi_flash(), &sr1) < 0) {
533 printk(BIOS_ERR, "Failed to read SPI status register 1\n");
534 return -1;
536 wp_spi = !!(sr1 & 0x80);
538 printk(BIOS_DEBUG, "SPI flash protection: WPSW=%d SRP0=%d\n",
539 wp_gpio, wp_spi);
541 return wp_gpio && wp_spi;
544 /* Apply protection to a range of flash */
545 static int nvm_protect(const struct region *r)
547 if (!CONFIG(MRC_SETTINGS_PROTECT))
548 return 0;
550 if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
551 return 0;
553 return spi_flash_ctrlr_protect_region(boot_device_spi_flash(), r, WRITE_PROTECT);
556 /* Protect mrc region with a Protected Range Register */
557 static int protect_mrc_cache(const char *name)
559 struct region region;
561 if (!CONFIG(MRC_SETTINGS_PROTECT))
562 return 0;
564 if (lookup_region_by_name(name, &region) < 0) {
565 printk(BIOS_INFO, "MRC: Could not find region '%s'\n", name);
566 return -1;
569 if (nvm_is_write_protected() <= 0) {
570 printk(BIOS_INFO, "MRC: NOT enabling PRR for '%s'.\n", name);
571 return 0;
574 if (nvm_protect(&region) < 0) {
575 printk(BIOS_ERR, "MRC: ERROR setting PRR for '%s'.\n", name);
576 return -1;
579 printk(BIOS_INFO, "MRC: Enabled Protected Range on '%s'.\n", name);
580 return 0;
583 static void protect_mrc_region(void)
586 * Check if there is a single unified region that encompasses both
587 * RECOVERY_MRC_CACHE and DEFAULT_MRC_CACHE. In that case protect the
588 * entire region using a single PRR.
590 * If we are not able to protect the entire region, try protecting
591 * individual regions next.
593 if (protect_mrc_cache(UNIFIED_MRC_CACHE) == 0)
594 return;
596 if (CONFIG(HAS_RECOVERY_MRC_CACHE))
597 protect_mrc_cache(RECOVERY_MRC_CACHE);
599 protect_mrc_cache(DEFAULT_MRC_CACHE);
602 static void invalidate_normal_cache(void)
604 struct region_file cache_file;
605 struct region_device rdev;
606 const char *name = DEFAULT_MRC_CACHE;
609 * If !HAS_RECOVERY_MRC_CACHE and VBOOT_STARTS_IN_ROMSTAGE is
610 * selected, this means that memory training occurs before
611 * verified boot (in RO), so normal mode cache does not need
612 * to be invalidated.
614 if (!CONFIG(HAS_RECOVERY_MRC_CACHE) && CONFIG(VBOOT_STARTS_IN_ROMSTAGE))
615 return;
617 /* We only invalidate the normal cache in recovery mode. */
618 if (!vboot_recovery_mode_enabled())
619 return;
622 * For platforms with a recovery mrc_cache, no need to
623 * invalidate when retrain switch is not set.
625 if (CONFIG(HAS_RECOVERY_MRC_CACHE) && !get_recovery_mode_retrain_switch())
626 return;
628 if (fmap_locate_area_as_rdev_rw(name, &rdev) < 0) {
629 printk(BIOS_ERR, "MRC: Couldn't find '%s' region. Invalidation failed\n",
630 name);
631 return;
634 if (region_file_init(&cache_file, &rdev) < 0) {
635 printk(BIOS_ERR, "MRC: region file invalid for '%s'. Invalidation failed\n",
636 name);
637 return;
640 /* Push an update that consists of 4 bytes that is smaller than the
641 * MRC metadata as well as an invalid signature. */
642 if (region_file_update_data(&cache_file, &mrc_invalid_sig,
643 sizeof(mrc_invalid_sig)) < 0)
644 printk(BIOS_ERR, "MRC: invalidation failed for '%s'.\n", name);
647 static void update_mrc_cache_from_cbmem(int type)
649 const struct cache_region *cr;
650 struct region region;
651 const struct cbmem_entry *to_be_updated;
653 cr = lookup_region(&region, type);
655 if (cr == NULL) {
656 printk(BIOS_INFO, "MRC: could not find cache_region type %d\n", type);
657 return;
660 to_be_updated = cbmem_entry_find(cr->cbmem_id);
662 if (to_be_updated == NULL) {
663 printk(BIOS_INFO, "MRC: No data in cbmem for '%s'.\n",
664 cr->name);
665 return;
668 update_mrc_cache_by_type(type,
669 /* pointer to mrc_cache entry metadata header */
670 cbmem_entry_start(to_be_updated),
671 /* pointer to start of mrc_cache entry data */
672 cbmem_entry_start(to_be_updated) +
673 sizeof(struct mrc_metadata),
674 /* size of just data portion of the entry */
675 cbmem_entry_size(to_be_updated) -
676 sizeof(struct mrc_metadata));
679 static void finalize_mrc_cache(void *unused)
681 if (CONFIG(MRC_STASH_TO_CBMEM)) {
682 update_mrc_cache_from_cbmem(MRC_TRAINING_DATA);
684 if (CONFIG(MRC_SETTINGS_VARIABLE_DATA))
685 update_mrc_cache_from_cbmem(MRC_VARIABLE_DATA);
688 invalidate_normal_cache();
690 protect_mrc_region();
693 int mrc_cache_stash_data(int type, uint32_t version, const void *data,
694 size_t size)
696 const struct cache_region *cr;
698 struct mrc_metadata md = {
699 .signature = MRC_DATA_SIGNATURE,
700 .data_size = size,
701 .version = version,
702 .data_hash = xxh32(data, size, 0),
704 md.header_hash = xxh32(&md, sizeof(md), 0);
706 if (CONFIG(MRC_STASH_TO_CBMEM)) {
707 /* Store data in cbmem for use in ramstage */
708 struct mrc_metadata *cbmem_md;
709 size_t cbmem_size;
710 cbmem_size = sizeof(*cbmem_md) + size;
712 cr = lookup_region_type(type);
713 if (cr == NULL) {
714 printk(BIOS_INFO, "MRC: No region type found. Skip adding to cbmem for type %d.\n",
715 type);
716 return 0;
719 cbmem_md = cbmem_add(cr->cbmem_id, cbmem_size);
721 if (cbmem_md == NULL) {
722 printk(BIOS_ERR, "MRC: failed to add '%s' to cbmem.\n",
723 cr->name);
724 return -1;
727 memcpy(cbmem_md, &md, sizeof(*cbmem_md));
728 /* cbmem_md + 1 is the pointer to the mrc_cache data */
729 memcpy(cbmem_md + 1, data, size);
730 } else {
731 /* Otherwise store to mrc_cache right away */
732 update_mrc_cache_by_type(type, &md, data, size);
734 return 0;
738 * Ensures MRC training data is stored into SPI after PCI enumeration is done.
739 * Some implementations may require this to be later than others.
741 #if CONFIG(MRC_WRITE_NV_LATE)
742 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME_CHECK, BS_ON_ENTRY, finalize_mrc_cache, NULL);
743 #else
744 BOOT_STATE_INIT_ENTRY(BS_DEV_ENUMERATE, BS_ON_EXIT, finalize_mrc_cache, NULL);
745 #endif