soc/intel: Remove blank lines before '}' and after '{'
[coreboot2.git] / src / lib / region_file.c
blobf3e66bfcfb48b20a9ef1569cecb15afda1d34d36
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/helpers.h>
4 #include <console/console.h>
5 #include <region_file.h>
6 #include <string.h>
8 /*
9 * A region file provides generic support for appending new data
10 * within a storage region. The book keeping is tracked in metadata
11 * blocks where an offset pointer points to the last byte of a newly
12 * allocated byte sequence. Thus, by taking 2 block offsets one can
13 * determine start and size of the latest update. The data does not
14 * have to be the same consistent size, but the data size has be small
15 * enough to fit a metadata block and one data write within the region.
17 * The granularity of the block offsets are 16 bytes. By using 16-bit
18 * block offsets a region's total size can be no larger than 1MiB.
19 * However, the last 32 bytes cannot be used in the 1MiB maximum region
20 * because one needs to put a block offset indicating last byte written.
21 * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
22 * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
24 * The goal of this library is to provide a simple mechanism for
25 * allocating blocks of data for updates. The metadata is written first
26 * followed by the data. That means a power event between the block offset
27 * write and the data write results in blocks being allocated but not
28 * entirely written. It's up to the user of the library to sanity check
29 * data stored.
32 #define REGF_BLOCK_SHIFT 4
33 #define REGF_BLOCK_GRANULARITY (1 << REGF_BLOCK_SHIFT)
34 #define REGF_METADATA_BLOCK_SIZE REGF_BLOCK_GRANULARITY
35 #define REGF_UNALLOCATED_BLOCK 0xffff
36 #define REGF_UPDATES_PER_METADATA_BLOCK \
37 (REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
39 enum {
40 RF_ONLY_METADATA = 0,
41 RF_EMPTY = -1,
42 RF_NEED_TO_EMPTY = -2,
43 RF_FATAL = -3,
46 struct metadata_block {
47 uint16_t blocks[REGF_UPDATES_PER_METADATA_BLOCK];
50 static size_t block_to_bytes(uint16_t offset)
52 return (size_t)offset << REGF_BLOCK_SHIFT;
55 static size_t bytes_to_block(size_t bytes)
57 return bytes >> REGF_BLOCK_SHIFT;
60 static inline int block_offset_unallocated(uint16_t offset)
62 return offset == REGF_UNALLOCATED_BLOCK;
65 static inline size_t region_file_data_begin(const struct region_file *f)
67 return f->data_blocks[0];
70 static inline size_t region_file_data_end(const struct region_file *f)
72 return f->data_blocks[1];
75 static int all_block_offsets_unallocated(const struct metadata_block *mb)
77 size_t i;
79 for (i = 0; i < ARRAY_SIZE(mb->blocks); i++) {
80 if (!block_offset_unallocated(mb->blocks[i]))
81 return 0;
84 return 1;
87 /* Read metadata block at block i. */
88 static int read_mb(size_t i, struct metadata_block *mb,
89 const struct region_file *f)
91 size_t offset = block_to_bytes(i);
93 if (rdev_readat(&f->metadata, mb, offset, sizeof(*mb)) < 0)
94 return -1;
96 return 0;
99 /* Locate metadata block with the latest update */
100 static int find_latest_mb(struct metadata_block *mb, size_t num_mb_blocks,
101 struct region_file *f)
103 size_t l = 0;
104 size_t r = num_mb_blocks;
106 while (l + 1 < r) {
107 size_t mid = (l + r) / 2;
109 if (read_mb(mid, mb, f) < 0)
110 return -1;
111 if (all_block_offsets_unallocated(mb))
112 r = mid;
113 else
114 l = mid;
117 /* Set the base block slot. */
118 f->slot = l * REGF_UPDATES_PER_METADATA_BLOCK;
120 /* Re-read metadata block with the latest update. */
121 if (read_mb(l, mb, f) < 0)
122 return -1;
124 return 0;
127 static void find_latest_slot(struct metadata_block *mb, struct region_file *f)
129 size_t i;
131 for (i = REGF_UPDATES_PER_METADATA_BLOCK - 1; i > 0; i--) {
132 if (!block_offset_unallocated(mb->blocks[i]))
133 break;
136 f->slot += i;
139 static int fill_data_boundaries(struct region_file *f)
141 struct region_device slots;
142 size_t offset;
143 size_t size = sizeof(f->data_blocks);
145 if (f->slot == RF_ONLY_METADATA) {
146 size_t start = bytes_to_block(region_device_sz(&f->metadata));
147 f->data_blocks[0] = start;
148 f->data_blocks[1] = start;
149 return 0;
152 /* Sanity check the 2 slot sequence to read. If it's out of the
153 * metadata blocks' bounds then one needs to empty it. This is done
154 * to uniquely identify I/O vs data errors in the readat() below. */
155 offset = (f->slot - 1) * sizeof(f->data_blocks[0]);
156 if (rdev_chain(&slots, &f->metadata, offset, size)) {
157 f->slot = RF_NEED_TO_EMPTY;
158 return 0;
161 if (rdev_readat(&slots, &f->data_blocks, 0, size) < 0) {
162 printk(BIOS_ERR, "REGF failed to read data boundaries.\n");
163 return -1;
166 /* All used blocks should be incrementing from previous write. */
167 if (region_file_data_begin(f) >= region_file_data_end(f)) {
168 printk(BIOS_ERR, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
169 region_file_data_begin(f), region_file_data_end(f));
170 f->slot = RF_NEED_TO_EMPTY;
171 return 0;
174 /* Ensure data doesn't exceed the region. */
175 if (region_file_data_end(f) >
176 bytes_to_block(region_device_sz(&f->rdev))) {
177 printk(BIOS_ERR, "REGF data exceeds region %zd > %zd\n",
178 region_file_data_end(f),
179 bytes_to_block(region_device_sz(&f->rdev)));
180 f->slot = RF_NEED_TO_EMPTY;
183 return 0;
186 int region_file_init(struct region_file *f, const struct region_device *p)
188 struct metadata_block mb;
190 /* Total number of metadata blocks is found by reading the first
191 * block offset as the metadata is allocated first. At least one
192 * metadata block is available. */
194 memset(f, 0, sizeof(*f));
195 f->slot = RF_FATAL;
197 /* Keep parent around for accessing data later. */
198 if (rdev_chain_full(&f->rdev, p))
199 return -1;
201 if (rdev_readat(p, &mb, 0, sizeof(mb)) < 0) {
202 printk(BIOS_ERR, "REGF fail reading first metadata block.\n");
203 return -1;
206 /* No metadata has been allocated. Assume region is empty. */
207 if (block_offset_unallocated(mb.blocks[0])) {
208 f->slot = RF_EMPTY;
209 return 0;
212 /* If metadata block is 0 in size then need to empty. */
213 if (mb.blocks[0] == 0) {
214 f->slot = RF_NEED_TO_EMPTY;
215 return 0;
218 /* The region needs to be emptied as the metadata is broken. */
219 if (rdev_chain(&f->metadata, p, 0, block_to_bytes(mb.blocks[0]))) {
220 f->slot = RF_NEED_TO_EMPTY;
221 return 0;
224 /* Locate latest metadata block with latest update. */
225 if (find_latest_mb(&mb, mb.blocks[0], f)) {
226 printk(BIOS_ERR, "REGF fail locating latest metadata block.\n");
227 f->slot = RF_FATAL;
228 return -1;
231 find_latest_slot(&mb, f);
233 /* Fill in the data blocks marking the latest update. */
234 if (fill_data_boundaries(f)) {
235 printk(BIOS_ERR, "REGF fail locating data boundaries.\n");
236 f->slot = RF_FATAL;
237 return -1;
240 return 0;
243 int region_file_data(const struct region_file *f, struct region_device *rdev)
246 size_t offset;
247 size_t size;
249 /* Slot indicates if any data is available. */
250 if (f->slot <= RF_ONLY_METADATA)
251 return -1;
253 offset = block_to_bytes(region_file_data_begin(f));
254 size = block_to_bytes(region_file_data_end(f)) - offset;
256 return rdev_chain(rdev, &f->rdev, offset, size);
260 * Allocate enough metadata blocks to maximize data updates. Do this in
261 * terms of blocks. To solve the balance of metadata vs data, 2 linear
262 * equations are solved in terms of blocks where 'x' is number of
263 * data updates and 'y' is number of metadata blocks:
265 * x = number of data updates
266 * y = number of metadata blocks
267 * T = total blocks in region
268 * D = data size in blocks
269 * M = metadata size in blocks
270 * A = updates accounted for in each metadata block
272 * T = D * x + M * y
273 * y = x / A
274 * -----------------
275 * T = D * x + M * x / A = x * (D + M / A)
276 * T * A = x * (D * A + M)
277 * x = T * A / (D * A + M)
279 static int allocate_metadata(struct region_file *f, size_t data_blks)
281 size_t t, m;
282 size_t x, y;
283 uint16_t tot_metadata;
284 const size_t a = REGF_UPDATES_PER_METADATA_BLOCK;
285 const size_t d = data_blks;
287 t = bytes_to_block(ALIGN_DOWN(region_device_sz(&f->rdev),
288 REGF_BLOCK_GRANULARITY));
289 m = bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE,
290 REGF_BLOCK_GRANULARITY));
292 /* Ensure at least one data update can fit with 1 metadata block
293 * within the region. */
294 if (d > t - m)
295 return -1;
297 /* Maximize number of updates by aligning up to the number updates in
298 * a metadata block. May not really be able to achieve the number of
299 * updates in practice, but it ensures enough metadata blocks are
300 * allocated. */
301 x = ALIGN_UP(t * a / (d * a + m), a);
303 /* One data block has to fit. */
304 if (x == 0)
305 x = 1;
307 /* Now calculate how many metadata blocks are needed. */
308 y = ALIGN_UP(x, a) / a;
310 /* Need to commit the metadata allocation. */
311 tot_metadata = m * y;
312 if (rdev_writeat(&f->rdev, &tot_metadata, 0, sizeof(tot_metadata)) < 0)
313 return -1;
315 if (rdev_chain(&f->metadata, &f->rdev, 0,
316 block_to_bytes(tot_metadata)))
317 return -1;
319 /* Initialize a 0 data block to start appending from. */
320 f->data_blocks[0] = tot_metadata;
321 f->data_blocks[1] = tot_metadata;
323 return 0;
326 static int update_can_fit(const struct region_file *f, size_t data_blks)
328 size_t metadata_slots;
329 size_t end_blk;
331 metadata_slots = region_device_sz(&f->metadata) / sizeof(uint16_t);
333 /* No more slots. */
334 if ((size_t)f->slot + 1 >= metadata_slots)
335 return 0;
337 /* See where the last block lies from the current one. */
338 end_blk = data_blks + region_file_data_end(f);
340 /* Update would have exceeded block addressing. */
341 if (end_blk >= REGF_UNALLOCATED_BLOCK)
342 return 0;
344 /* End block exceeds size of region. */
345 if (end_blk > bytes_to_block(region_device_sz(&f->rdev)))
346 return 0;
348 return 1;
351 static int commit_data_allocation(struct region_file *f, size_t data_blks)
353 size_t offset;
355 f->slot++;
357 offset = f->slot * sizeof(uint16_t);
358 f->data_blocks[0] = region_file_data_end(f);
359 f->data_blocks[1] = region_file_data_begin(f) + data_blks;
361 if (rdev_writeat(&f->metadata, &f->data_blocks[1], offset,
362 sizeof(f->data_blocks[1])) < 0)
363 return -1;
365 return 0;
368 static int commit_data(const struct region_file *f,
369 const struct update_region_file_entry *entries,
370 size_t num_entries)
372 size_t offset = block_to_bytes(region_file_data_begin(f));
373 for (int i = 0; i < num_entries; i++) {
374 if (rdev_writeat(&f->rdev, entries[i].data, offset, entries[i].size) < 0)
375 return -1;
376 offset += entries[i].size;
378 return 0;
381 static int handle_empty(struct region_file *f, size_t data_blks)
383 if (allocate_metadata(f, data_blks)) {
384 printk(BIOS_ERR, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
385 data_blks, bytes_to_block(region_device_sz(&f->rdev)));
386 return -1;
389 f->slot = RF_ONLY_METADATA;
391 return 0;
394 static int handle_need_to_empty(struct region_file *f)
396 if (rdev_eraseat(&f->rdev, 0, region_device_sz(&f->rdev)) < 0) {
397 printk(BIOS_ERR, "REGF empty failed.\n");
398 return -1;
401 f->slot = RF_EMPTY;
403 return 0;
406 static int handle_update(struct region_file *f, size_t blocks,
407 const struct update_region_file_entry *entries,
408 size_t num_entries)
410 if (!update_can_fit(f, blocks)) {
411 printk(BIOS_INFO, "REGF update can't fit. Will empty.\n");
412 f->slot = RF_NEED_TO_EMPTY;
413 return 0;
416 if (commit_data_allocation(f, blocks)) {
417 printk(BIOS_ERR, "REGF failed to commit data allocation.\n");
418 return -1;
421 if (commit_data(f, entries, num_entries)) {
422 printk(BIOS_ERR, "REGF failed to commit data.\n");
423 return -1;
426 return 0;
429 int region_file_update_data_arr(struct region_file *f,
430 const struct update_region_file_entry *entries,
431 size_t num_entries)
433 int ret;
434 size_t blocks;
435 size_t size = 0;
437 for (int i = 0; i < num_entries; i++)
438 size += entries[i].size;
439 blocks = bytes_to_block(ALIGN_UP(size, REGF_BLOCK_GRANULARITY));
441 while (1) {
442 int prev_slot = f->slot;
444 switch (f->slot) {
445 case RF_EMPTY:
446 ret = handle_empty(f, blocks);
447 break;
448 case RF_NEED_TO_EMPTY:
449 ret = handle_need_to_empty(f);
450 break;
451 case RF_FATAL:
452 ret = -1;
453 break;
454 default:
455 ret = handle_update(f, blocks, entries, num_entries);
456 break;
459 /* Failing case. No more updates allowed to be attempted. */
460 if (ret) {
461 f->slot = RF_FATAL;
462 break;
465 /* No more state changes and data committed. */
466 if (f->slot > RF_ONLY_METADATA && prev_slot != f->slot)
467 break;
470 return ret;
473 int region_file_update_data(struct region_file *f, const void *buf, size_t size)
475 struct update_region_file_entry entry = {
476 .size = size,
477 .data = buf,
479 return region_file_update_data_arr(f, &entry, 1);