1 /* SPDX-License-Identifier: GPL-2.0-only */
4 #include <boot/coreboot_tables.h>
8 #include <console/console.h>
9 #include <cpu/x86/pae.h>
10 #include <drivers/efi/efivars.h>
11 #include <drivers/efi/capsules.h>
18 #include <Uefi/UefiSpec.h>
19 #include <Guid/GlobalVariable.h>
20 #include <Guid/FmpCapsule.h>
21 #include <IndustryStandard/WindowsUxCapsule.h>
26 * SG stands for scatter-gather. SG list consists of SG blocks that describe a
27 * potentially discontinuous sequence of memory blocks while not necessarily
28 * lying in continuous memory themselves.
30 * SG list is basically a linked list of arrays of block descriptors (SG
31 * blocks). Each of SG blocks can be:
32 * - a data block, which points to capsule's data
33 * - a continuation block, which says where other SG blocks are to be found
34 * - end-of-list block, which indicates there are no more blocks
36 * Each of the CapsuleUpdateData* EFI variables point to some SG list which
37 * might contain one or more update capsules. SG blocks never contain data of
38 * more than one of the capsules. Boundary between capsules in an SG list is
39 * determined by parsing capsule headers and counting amount of data seen so
42 * There can be multiple CapsuleUpdateData* variables (CapsuleUpdateData,
43 * CapsuleUpdateData1, etc.) in which case their SG lists are chained together
44 * after sanity checks.
47 /* This should be more than enough. */
48 #define MAX_CAPSULES 32
50 /* 4 should be enough, but 8 won't hurt. */
51 #define CAPSULE_ALIGNMENT 8
54 * A helper structure which bundles physical block address with its data. It's
55 * necessary because 32-bit code can't easily access anything beyond 4 GiB
56 * boundary and this structure allows reading the data, passing it around and,
57 * if necessary, updating it.
60 * 1) Set .self to physical address
61 * 2) Check block's address with is_good_block()
62 * 3) Use load_block() to fetch or store_block() to update data
65 /* Where the data comes from. */
69 * Data read from the self address above. Three cases:
70 * - len != 0 && addr != 0 => len bytes of capsule data at addr
71 * next block_descr follows this one (self + 16)
72 * - len == 0 && addr != 0 => no data
73 * next block_descr is at addr
74 * - len == 0 && addr == 0 => no data
81 /* For passing data from efi_parse_capsules() to bootmem and CBMEM callbacks. */
87 static const EFI_GUID capsule_vendor_guid
= {
88 0x711C703F, 0xC285, 0x4B10, { 0xA3, 0xB0, 0x36, 0xEC, 0xBD, 0x3C, 0x8B, 0xE2 }
90 static const EFI_GUID windows_ux_capsule_guid
= WINDOWS_UX_CAPSULE_GUID
;
91 static const EFI_GUID edk2_capsule_on_disk_name_guid
= {
92 0x98C80A4F, 0xE16B, 0x4D11, { 0x93, 0x9A, 0xAB, 0xE5, 0x61, 0x26, 0x3, 0x30 }
94 static const EFI_GUID efi_fmp_capsule_guid
= EFI_FIRMWARE_MANAGEMENT_CAPSULE_ID_GUID
;
96 /* Memory map to keep track of unused or reserved ranges. */
97 struct memranges memory_map
;
99 /* Page tables required for pae_map_2M_page(). */
100 static char pae_page_tables
[20 * KiB
] __aligned(4 * KiB
);
102 /* Where all coalesced capsules are located. */
103 struct memory_range coalesce_buffer
;
105 /* Where individual coalesced capsules are located and their count. */
106 static struct memory_range uefi_capsules
[MAX_CAPSULES
];
107 static int uefi_capsule_count
;
109 static bool is_data_block(const struct block_descr
*block
)
111 return (block
->len
!= 0);
114 static bool is_final_block(const struct block_descr
*block
)
116 return (block
->len
== 0 && block
->addr
== 0);
119 static void *map_range(uint64_t base
, uint32_t len
)
121 static uint64_t last_mapping_base
= UINT64_MAX
;
123 /* Using MMCONF should be safe as long as we don't do any device
124 initialization during parsing of capsules and don't forget to call
125 paging_disable_pae() at the end. */
126 _Static_assert(IS_ALIGNED(CONFIG_ECAM_MMCONF_BASE_ADDRESS
, 2 * MiB
));
127 uintptr_t window_base
= CONFIG_ECAM_MMCONF_BASE_ADDRESS
;
128 size_t window_size
= 2 * MiB
;
130 printk(BIOS_SPEW
, "capsules: mapping %#010x bytes at %#010llx.\n", len
, base
);
132 if (base
+ len
<= 4ULL * GiB
&&
133 (base
+ len
<= window_base
|| base
>= window_base
+ window_size
)) {
134 /* Don't bother with the mapping, the whole range must be
135 already accessible without it. */
136 printk(BIOS_SPEW
, "capsules: no need to map anything.\n");
137 return (void *)(uintptr_t)base
;
140 uint64_t aligned_base
= ALIGN_DOWN(base
, 2 * MiB
);
141 if (base
- aligned_base
+ len
> 2 * MiB
)
142 die("capsules: memory range map request can't be satisfied.\n");
144 /* No need to map the same data. */
145 if (aligned_base
!= last_mapping_base
) {
146 printk(BIOS_SPEW
, "capsules: mapping from %#010llx.\n", aligned_base
);
147 pae_map_2M_page(&pae_page_tables
, aligned_base
, (void *)window_base
);
148 last_mapping_base
= aligned_base
;
151 return (uint8_t *)window_base
+ (base
- aligned_base
);
155 * Alignment requirement on EFI_CAPSULE_BLOCK_DESCRIPTOR seems to be 8 bytes,
156 * which means that it can be cut in half by a mapping. Could map two 2 MiB
157 * pages instead, but should be easier to simply read those 16 bytes and pass
160 * `volatile` is to guard against a hypothetical statement reordering.
163 static void load_block(struct block_descr
*block
)
165 volatile uint64_t *len
= map_range(block
->self
, sizeof(uint64_t));
168 volatile uint64_t *addr
= map_range(block
->self
+ sizeof(uint64_t), sizeof(uint64_t));
172 static void store_block(const struct block_descr
*block
)
174 volatile uint64_t *len
= map_range(block
->self
, sizeof(uint64_t));
177 volatile uint64_t *addr
= map_range(block
->self
+ sizeof(uint64_t), sizeof(uint64_t));
181 static void advance_block(struct block_descr
*block
)
183 if (is_final_block(block
))
184 die("capsules: attempt to advance beyond final SG block of UEFI capsules.\n");
186 if (is_data_block(block
)) {
187 /* That was at least part of a capsule. */
188 block
->self
= block
->self
+ sizeof(EFI_CAPSULE_BLOCK_DESCRIPTOR
);
190 /* End of continuous sequence of descriptors, but there are more. */
191 block
->self
= block
->addr
;
195 static bool is_good_capsule(const EFI_CAPSULE_HEADER
*capsule
)
197 if (capsule
->HeaderSize
< sizeof(*capsule
)) {
198 printk(BIOS_ERR
, "capsules: capsule header size is too small: %#010x.\n",
199 capsule
->HeaderSize
);
202 if (capsule
->CapsuleImageSize
<= capsule
->HeaderSize
) {
203 printk(BIOS_ERR
, "capsules: capsule image size is too small: %#010x.\n",
204 capsule
->CapsuleImageSize
);
207 if (!(capsule
->Flags
& CAPSULE_FLAGS_PERSIST_ACROSS_RESET
)) {
209 "capsules: this capsule should not have persisted, flags: %#010x.\n",
214 const EFI_GUID
*guid
= &capsule
->CapsuleGuid
;
215 if (memcmp(guid
, &windows_ux_capsule_guid
, sizeof(*guid
)) == 0)
217 if (memcmp(guid
, &edk2_capsule_on_disk_name_guid
, sizeof(*guid
)) == 0)
219 if (memcmp(guid
, &efi_fmp_capsule_guid
, sizeof(*guid
)) == 0)
222 printk(BIOS_ERR
, "capsules: unrecognized capsule GUID.\n");
226 static bool is_in_unused_ram(uint64_t base
, uint64_t len
)
229 die("capsules: %s() was passed an empty range: %#010llx:%#010llx.\n",
230 __func__
, base
, len
);
232 if (base
+ len
< base
) {
233 die("capsules: %s() was passed an invalid range: %#010llx:%#010llx.\n",
234 __func__
, base
, len
);
237 const struct range_entry
*r
;
238 memranges_each_entry(r
, &memory_map
) {
239 if (range_entry_tag(r
) != BM_MEM_RAM
)
242 if (base
>= range_entry_base(r
) && base
+ len
<= range_entry_end(r
))
249 static bool is_good_block(struct block_descr
*block
)
251 if (!IS_ALIGNED(block
->self
, sizeof(uint64_t))) {
252 printk(BIOS_ERR
, "capsules: misaligned SG block at %#010llx.\n", block
->self
);
256 if (!is_in_unused_ram(block
->self
, sizeof(*block
))) {
257 printk(BIOS_ERR
, "capsules: SG block is not in unused memory.\n");
264 static bool is_good_capsule_head(struct block_descr
*block
)
266 if (!is_data_block(block
)) {
267 printk(BIOS_ERR
, "capsules: first capsule SG block is not a data block.\n");
271 if (block
->len
< sizeof(EFI_CAPSULE_HEADER
)) {
272 printk(BIOS_ERR
, "capsules: first SG block of a capsule is too small.\n");
276 if (!is_in_unused_ram(block
->addr
, block
->len
)) {
277 printk(BIOS_ERR
, "capsules: capsule header is not in unused memory.\n");
284 static bool is_good_capsule_block(struct block_descr
*block
, uint32_t size_left
)
286 if (is_final_block(block
)) {
287 printk(BIOS_ERR
, "capsules: not enough SG blocks to cover a capsule.\n");
291 if (!is_data_block(block
)) {
292 printk(BIOS_ERR
, "capsules: capsule SG block is not a data block.\n");
296 if (block
->len
> size_left
) {
297 printk(BIOS_ERR
, "capsules: SG blocks reach beyond a capsule.\n");
301 if (!is_in_unused_ram(block
->addr
, block
->len
)) {
302 printk(BIOS_ERR
, "capsules: capsule data is not in unused memory.\n");
309 /* Checks a single SG list for sanity. Returns its end-of-list descriptor or
310 an empty one on error. */
311 static struct block_descr
check_capsule_block(struct block_descr first_block
,
312 uint64_t *total_data_size
)
314 struct block_descr block
= first_block
;
315 if (!is_good_block(&block
)) {
316 printk(BIOS_ERR
, "capsules: bad capsule block start.\n");
322 uint64_t data_size
= 0;
323 while (!is_final_block(&block
)) {
325 * This results in dropping of this capsule block if any of
326 * contained capsule headers looks weird. An alternative is to
327 * cut the capsule block upon finding a bad header. Maybe
328 * could even jump over a broken capsule, temporarily trusting
329 * size field in its header because invalid value should not
330 * break parsing anyway, and then cut it out of the sequence of
331 * blocks. EDK doesn't bother, so only noting the possibility.
333 if (!is_good_capsule_head(&block
)) {
334 printk(BIOS_ERR
, "capsules: bad capsule header @ %#010llx.\n",
339 const EFI_CAPSULE_HEADER
*capsule_hdr
=
340 map_range(block
.addr
, sizeof(*capsule_hdr
));
341 if (!is_good_capsule(capsule_hdr
)) {
342 printk(BIOS_ERR
, "capsules: bad capsule header @ %#010llx.\n",
347 data_size
+= ALIGN_UP(capsule_hdr
->CapsuleImageSize
, CAPSULE_ALIGNMENT
);
349 uint32_t size_left
= capsule_hdr
->CapsuleImageSize
;
350 while (size_left
!= 0) {
351 /* is_good_block() holds here whether it's the first iteration or
354 if (!is_good_capsule_block(&block
, size_left
))
357 size_left
-= block
.len
;
359 advance_block(&block
);
360 if (!is_good_block(&block
)) {
361 printk(BIOS_ERR
, "capsules: capsule body has a bad block.\n");
366 if (!is_final_block(&block
) && !is_data_block(&block
)) {
367 /* Advance to the next page of block descriptors. */
368 advance_block(&block
);
369 if (!is_good_block(&block
)) {
370 printk(BIOS_ERR
, "capsules: bad SG continuation.\n");
375 /* Not expecting a continuation to be followed by another
376 continuation or an end-of-list. */
377 if (!is_data_block(&block
)) {
379 "capsules: chained SG continuations.\n");
386 /* Increase the size only on successful parsing of the capsule block. */
387 *total_data_size
+= data_size
;
392 return (struct block_descr
){ .self
= 0 };
395 /* Fills an array with pointers to capsule blocks. Returns number of
396 discovered capsule blocks or -1 on error. */
397 static int discover_capsule_blocks(struct region_device
*rdev
,
398 struct block_descr
*blocks
,
402 for (int i
= 0; block_count
< max_blocks
; ++i
) {
405 strcpy(var_name
, "CapsuleUpdateData");
407 snprintf(var_name
, sizeof(var_name
), "CapsuleUpdateData%d", i
);
409 struct block_descr block
;
410 uint32_t size
= sizeof(block
.self
);
411 enum cb_err ret
= efi_fv_get_option(rdev
, &capsule_vendor_guid
, var_name
,
413 if (ret
!= CB_SUCCESS
) {
414 /* No more variables. */
417 if (size
!= sizeof(block
.self
)) {
418 printk(BIOS_ERR
, "capsules: unexpected capsule data size (%d).\n",
424 * EDK2 checks for duplicates probably because we'll get into
425 * trouble with chaining if there are any, so do the check.
427 * This, however, won't handle all possible situations which
428 * lead to loops or processing the same capsule more than once.
431 for (j
= 0; j
< block_count
; ++j
) {
432 if (blocks
[j
].self
== block
.self
)
435 if (j
< block_count
) {
436 printk(BIOS_INFO
, "capsules: skipping duplicated %s.\n", var_name
);
440 printk(BIOS_INFO
, "capsules: capsule block #%d at %#010llx.\n",
441 block_count
, block
.self
);
442 blocks
[block_count
++] = block
;
449 * This function connects tail of one block of descriptors with the head of the
450 * next one and returns pointer to the head of the whole chain. While at it:
451 * - validate structures and pointers for sanity
452 * - compute total amount of memory needed for coalesced capsules
454 * Returns block that starts at 0 on error.
456 static struct block_descr
verify_and_chain_blocks(struct block_descr
*blocks
,
458 uint64_t *total_data_size
)
460 /* This won't be blocks[0] if there is something wrong with the first capsule block. */
461 struct block_descr head
= {0};
463 /* End-of-list descriptor of the last chained block. */
464 struct block_descr tail
= {0};
466 *total_data_size
= 0;
468 for (int i
= 0; i
< block_count
; ++i
) {
469 struct block_descr last_block
= check_capsule_block(blocks
[i
], total_data_size
);
470 if (last_block
.self
== 0) {
471 /* Fail hard instead? EDK just keeps going, as if capsule
472 blocks are always independent. */
474 "capsules: skipping damaged capsule block #%d @ %#010llx.\n",
479 if (head
.self
== 0) {
482 tail
.addr
= blocks
[i
].self
;
492 /* Marks structures and data of SG lists as BM_MEM_RESERVED so we don't step on
493 them when looking for usable memory. */
494 static void reserve_capsules(struct block_descr block_chain
)
496 struct block_descr block
= block_chain
;
498 /* This is the first block of a continuous sequence of blocks. */
499 struct block_descr seq_start
= {0};
501 /* The code reserves sequences of blocks to avoid invoking
502 memranges_insert() on each of a bunch of adjacent 16-byte blocks. */
505 for (; !is_final_block(&block
); advance_block(&block
), load_block(&block
)) {
506 if (seq_start
.self
== 0)
509 if (is_data_block(&block
)) {
510 /* Reserve capsule data. */
511 memranges_insert(&memory_map
, block
.addr
, block
.len
, BM_MEM_RESERVED
);
513 /* This isn't the final or a data block, so it must be the
514 last block of a continuous sequence. Reserve the whole
516 memranges_insert(&memory_map
,
518 block
.self
- seq_start
.self
+
519 sizeof(EFI_CAPSULE_BLOCK_DESCRIPTOR
),
522 /* Will be set on the next iteration if there is one. */
527 /* If continuations never show up in a row as checked by
528 check_capsule_block(), seq_start must be non-NULL here. */
529 memranges_insert(&memory_map
,
531 block
.self
- seq_start
.self
+ sizeof(EFI_CAPSULE_BLOCK_DESCRIPTOR
),
536 * Find a buffer below 4 GiB for coalesced capsules.
538 * Keeping it simple and allocating a single buffer. However, there is
539 * no requirement to put all the capsules together, only that each of
540 * them is continuous in memory. So if this is bad for some reason,
541 * can allocate a separate block for each.
543 * Returns buffer that starts at 0 on error.
545 static struct memory_range
pick_buffer(uint64_t total_data_size
)
547 struct memory_range buffer
= {0};
549 /* 4 * KiB is the alignment set by memranges_init(). */
550 total_data_size
= ALIGN_UP(total_data_size
, 4 * KiB
);
552 const struct range_entry
*r
;
553 memranges_each_entry(r
, &memory_map
) {
554 if (range_entry_tag(r
) != BM_MEM_RAM
)
557 resource_t base
= range_entry_base(r
);
558 if (base
>= 4ULL * GiB
)
561 /* Possibly reduce size to not deal with ranges that cross 4 GiB boundary. */
562 resource_t size
= range_entry_size(r
);
563 if (base
+ size
> 4ULL * GiB
)
564 size
-= base
+ size
- 4ULL * GiB
;
566 if (size
>= total_data_size
) {
568 * To not create troubles for payloads prefer higher addresses:
569 * - use the top part of a suitable range
570 * - exit the loop only after hitting 4 GiB boundary or end of the list
572 buffer
.base
= base
+ size
- total_data_size
;
573 buffer
.len
= total_data_size
;
580 /* Puts capsules into continuous physical memory. */
581 static void coalesce_capsules(struct block_descr block_chain
, uint8_t *target
)
583 struct block_descr block
= block_chain
;
584 uint8_t *capsule_start
= NULL
;
585 uint32_t size_left
= 0;
587 /* No safety checks in this function, as all of them were done earlier. */
590 for (; !is_final_block(&block
); advance_block(&block
), load_block(&block
)) {
591 /* Advance over a continuation. */
592 if (!is_data_block(&block
))
595 /* This must be the first block of a capsule. */
596 if (size_left
== 0) {
597 const EFI_CAPSULE_HEADER
*capsule_hdr
=
598 map_range(block
.addr
, sizeof(*capsule_hdr
));
599 size_left
= capsule_hdr
->CapsuleImageSize
;
600 capsule_start
= target
;
603 uint64_t addr
= block
.addr
;
604 uint64_t data_left
= block
.len
;
605 while (data_left
!= 0) {
606 uint64_t piece_len
= MIN(data_left
, 2 * MiB
- (addr
% 2 * MiB
));
607 void *data
= map_range(addr
, piece_len
);
609 memcpy(target
, data
, piece_len
);
613 data_left
-= piece_len
;
616 size_left
-= block
.len
;
618 /* This must be the last block of a capsule, record it. */
619 if (size_left
== 0) {
620 /* If we can just ignore corrupted capsules, then we can simply
621 drop those which don't fit. */
622 if (uefi_capsule_count
== MAX_CAPSULES
) {
624 "capsules: ignoring all capsules after #%d.\n",
629 uefi_capsules
[uefi_capsule_count
].base
= (uintptr_t)capsule_start
;
630 uefi_capsules
[uefi_capsule_count
].len
= block
.len
;
631 uefi_capsule_count
++;
633 /* This is to align start of the next capsule (assumes that
634 initial value of target was suitably aligned). */
635 if (!IS_ALIGNED(block
.len
, CAPSULE_ALIGNMENT
))
636 target
+= ALIGN_UP(block
.len
, CAPSULE_ALIGNMENT
) - block
.len
;
640 printk(BIOS_INFO
, "capsules: found %d capsule(s).\n", uefi_capsule_count
);
643 void efi_parse_capsules(void)
645 /* EDK2 starts with 20 items and then grows the list, but it's unlikely
646 to be necessary in practice. */
647 enum { MAX_CAPSULE_BLOCKS
= MAX_CAPSULES
};
649 struct region_device rdev
;
650 if (smmstore_lookup_region(&rdev
)) {
651 printk(BIOS_INFO
, "capsules: no SMMSTORE region, no update capsules.\n");
655 memranges_init(&memory_map
, IORESOURCE_MEM
| IORESOURCE_FIXED
| IORESOURCE_STORED
|
656 IORESOURCE_ASSIGNED
| IORESOURCE_CACHEABLE
, IORESOURCE_MEM
|
657 IORESOURCE_FIXED
| IORESOURCE_STORED
| IORESOURCE_ASSIGNED
|
658 IORESOURCE_CACHEABLE
, BM_MEM_RAM
);
660 init_pae_pagetables(&pae_page_tables
);
662 /* Blocks are collected here when traversing CapsuleUpdateData*
663 variables, duplicates are skipped. */
664 struct block_descr blocks
[MAX_CAPSULE_BLOCKS
];
665 int block_count
= discover_capsule_blocks(&rdev
, blocks
, ARRAY_SIZE(blocks
));
666 if (block_count
<= 0) {
667 if (block_count
== 0)
668 printk(BIOS_INFO
, "capsules: no UEFI capsules were discovered.\n");
672 printk(BIOS_INFO
, "capsules: processing %d capsule block(s).\n", block_count
);
674 /* Broken capsules are ignored, ignore those which didn't fit as well. */
675 if (block_count
== ARRAY_SIZE(blocks
)) {
677 "capsules: hit limit on capsule blocks, some might be ignored.\n");
680 /* Chaining is done to not pass around and update an array of pointers. */
681 uint64_t total_data_size
;
682 struct block_descr block_chain
=
683 verify_and_chain_blocks(blocks
, block_count
, &total_data_size
);
684 if (block_chain
.self
== 0) {
685 printk(BIOS_ERR
, "capsules: no valid capsules to process.\n");
689 printk(BIOS_DEBUG
, "capsules: chained capsule blocks.\n");
691 /* Reserve all blocks and the data they point to to avoid checking for
692 overlaps when looking for a buffer. */
693 reserve_capsules(block_chain
);
695 printk(BIOS_DEBUG
, "capsules: reserved capsule blocks.\n");
697 /* Also reserve memory range for cbmem. Since it will still grow in
698 size by an unknown amount, try to account for that by reserving at
702 cbmem_get_region(&cbmem_current
, &cbmem_size
);
703 uintptr_t cbmem_future_base
= ALIGN_DOWN((uintptr_t)cbmem_current
- 4 * MiB
, MiB
);
704 memranges_insert(&memory_map
,
706 (uintptr_t)cbmem_current
+ cbmem_size
- cbmem_future_base
,
709 coalesce_buffer
= pick_buffer(total_data_size
);
710 if (coalesce_buffer
.base
== 0) {
712 "capsules: failed to find a buffer (%#llx bytes) for coalesced UEFI capsules.\n",
715 printk(BIOS_DEBUG
, "capsules: coalescing capsules data @ %#010x.\n",
716 coalesce_buffer
.base
);
717 coalesce_capsules(block_chain
, (void *)(uintptr_t)coalesce_buffer
.base
);
721 paging_disable_pae();
722 memranges_teardown(&memory_map
);
725 void lb_efi_capsules(struct lb_header
*header
)
728 for (i
= 0; i
< uefi_capsule_count
; ++i
) {
729 struct lb_range
*capsule
= (void *)lb_new_record(header
);
731 printk(BIOS_INFO
, "capsules: publishing a capsule @ %#010x.\n",
732 uefi_capsules
[i
].base
);
734 capsule
->tag
= LB_TAG_CAPSULE
;
735 capsule
->size
= sizeof(*capsule
);
736 capsule
->range_start
= uefi_capsules
[i
].base
;
737 capsule
->range_size
= uefi_capsules
[i
].len
;
741 void efi_add_capsules_to_bootmem(void)
743 if (coalesce_buffer
.len
!= 0) {
744 printk(BIOS_INFO
, "capsules: reserving capsules data @ %#010x.\n",
745 coalesce_buffer
.base
);
746 bootmem_add_range(coalesce_buffer
.base
, coalesce_buffer
.len
, BM_MEM_RESERVED
);
751 * The code from this unit is typically executed by clear_memory() which is run
752 * after DEV_INIT. However, clear_memory() might not be compiled in in which
753 * case we still want to process capsules.
755 * State machine doesn't enforce any particular ordering for callbacks and
756 * running before DEV_INIT is too early due to MTTRs not being initialized.
757 * Hence invoking code is in two different places that should be mutually
758 * exclusive (can't set a "done" flag due to unknown ordering).
760 #if !CONFIG(PLATFORM_HAS_DRAM_CLEAR)
762 static void parse_capsules(void *unused
)
764 if (!acpi_is_wakeup_s3())
765 efi_parse_capsules();
768 BOOT_STATE_INIT_ENTRY(BS_DEV_INIT
, BS_ON_EXIT
, parse_capsules
, NULL
);