Xeon-SP boards: Factor out OCP VPD `get_cxl_mode()` impl
[coreboot2.git] / src / lib / region_file.c
blobf77b9b0c3b11df645577b2648f51757abc77bb1a
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/helpers.h>
4 #include <console/console.h>
5 #include <region_file.h>
6 #include <string.h>
8 /*
9 * A region file provides generic support for appending new data
10 * within a storage region. The book keeping is tracked in metadata
11 * blocks where an offset pointer points to the last byte of a newly
12 * allocated byte sequence. Thus, by taking 2 block offsets one can
13 * determine start and size of the latest update. The data does not
14 * have to be the same consistent size, but the data size has be small
15 * enough to fit a metadata block and one data write within the region.
17 * The granularity of the block offsets are 16 bytes. By using 16-bit
18 * block offsets a region's total size can be no larger than 1MiB.
19 * However, the last 32 bytes cannot be used in the 1MiB maximum region
20 * because one needs to put a block offset indicating last byte written.
21 * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
22 * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
24 * The goal of this library is to provide a simple mechanism for
25 * allocating blocks of data for updates. The metadata is written first
26 * followed by the data. That means a power event between the block offset
27 * write and the data write results in blocks being allocated but not
28 * entirely written. It's up to the user of the library to sanity check
29 * data stored.
32 #define REGF_BLOCK_SHIFT 4
33 #define REGF_BLOCK_GRANULARITY (1 << REGF_BLOCK_SHIFT)
34 #define REGF_METADATA_BLOCK_SIZE REGF_BLOCK_GRANULARITY
35 #define REGF_UNALLOCATED_BLOCK 0xffff
36 #define REGF_UPDATES_PER_METADATA_BLOCK \
37 (REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
39 enum {
40 RF_ONLY_METADATA = 0,
41 RF_EMPTY = -1,
42 RF_NEED_TO_EMPTY = -2,
43 RF_FATAL = -3,
46 struct metadata_block {
47 uint16_t blocks[REGF_UPDATES_PER_METADATA_BLOCK];
50 static size_t block_to_bytes(uint16_t offset)
52 return (size_t)offset << REGF_BLOCK_SHIFT;
55 static size_t bytes_to_block(size_t bytes)
57 return bytes >> REGF_BLOCK_SHIFT;
60 static inline int block_offset_unallocated(uint16_t offset)
62 return offset == REGF_UNALLOCATED_BLOCK;
65 static inline size_t region_file_data_begin(const struct region_file *f)
67 return f->data_blocks[0];
70 static inline size_t region_file_data_end(const struct region_file *f)
72 return f->data_blocks[1];
75 static int all_block_offsets_unallocated(const struct metadata_block *mb)
77 size_t i;
79 for (i = 0; i < ARRAY_SIZE(mb->blocks); i++) {
80 if (!block_offset_unallocated(mb->blocks[i]))
81 return 0;
84 return 1;
87 /* Read metadata block at block i. */
88 static int read_mb(size_t i, struct metadata_block *mb,
89 const struct region_file *f)
91 size_t offset = block_to_bytes(i);
93 if (rdev_readat(&f->metadata, mb, offset, sizeof(*mb)) < 0)
94 return -1;
96 return 0;
99 /* Locate metadata block with the latest update */
100 static int find_latest_mb(struct metadata_block *mb, size_t num_mb_blocks,
101 struct region_file *f)
103 size_t l = 0;
104 size_t r = num_mb_blocks;
106 while (l + 1 < r) {
107 size_t mid = (l + r) / 2;
109 if (read_mb(mid, mb, f) < 0)
110 return -1;
111 if (all_block_offsets_unallocated(mb))
112 r = mid;
113 else
114 l = mid;
117 /* Set the base block slot. */
118 f->slot = l * REGF_UPDATES_PER_METADATA_BLOCK;
120 /* Re-read metadata block with the latest update. */
121 if (read_mb(l, mb, f) < 0)
122 return -1;
124 return 0;
127 static void find_latest_slot(struct metadata_block *mb, struct region_file *f)
129 size_t i;
131 for (i = REGF_UPDATES_PER_METADATA_BLOCK - 1; i > 0; i--) {
132 if (!block_offset_unallocated(mb->blocks[i]))
133 break;
136 f->slot += i;
139 static int fill_data_boundaries(struct region_file *f)
141 struct region_device slots;
142 size_t offset;
143 size_t size = sizeof(f->data_blocks);
145 if (f->slot == RF_ONLY_METADATA) {
146 size_t start = bytes_to_block(region_device_sz(&f->metadata));
147 f->data_blocks[0] = start;
148 f->data_blocks[1] = start;
149 return 0;
152 /* Sanity check the 2 slot sequence to read. If it's out of the
153 * metadata blocks' bounds then one needs to empty it. This is done
154 * to uniquely identify I/O vs data errors in the readat() below. */
155 offset = (f->slot - 1) * sizeof(f->data_blocks[0]);
156 if (rdev_chain(&slots, &f->metadata, offset, size)) {
157 f->slot = RF_NEED_TO_EMPTY;
158 return 0;
161 if (rdev_readat(&slots, &f->data_blocks, 0, size) < 0) {
162 printk(BIOS_ERR, "REGF failed to read data boundaries.\n");
163 return -1;
166 /* All used blocks should be incrementing from previous write. */
167 if (region_file_data_begin(f) >= region_file_data_end(f)) {
168 printk(BIOS_ERR, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
169 region_file_data_begin(f), region_file_data_end(f));
170 f->slot = RF_NEED_TO_EMPTY;
171 return 0;
174 /* Ensure data doesn't exceed the region. */
175 if (region_file_data_end(f) >
176 bytes_to_block(region_device_sz(&f->rdev))) {
177 printk(BIOS_ERR, "REGF data exceeds region %zd > %zd\n",
178 region_file_data_end(f),
179 bytes_to_block(region_device_sz(&f->rdev)));
180 f->slot = RF_NEED_TO_EMPTY;
183 return 0;
186 int region_file_init(struct region_file *f, const struct region_device *p)
188 struct metadata_block mb;
190 /* Total number of metadata blocks is found by reading the first
191 * block offset as the metadata is allocated first. At least one
192 * metadata block is available. */
194 memset(f, 0, sizeof(*f));
195 f->slot = RF_FATAL;
197 /* Keep parent around for accessing data later. */
198 if (rdev_chain_full(&f->rdev, p))
199 return -1;
201 if (rdev_readat(p, &mb, 0, sizeof(mb)) < 0) {
202 printk(BIOS_ERR, "REGF fail reading first metadata block.\n");
203 return -1;
206 /* No metadata has been allocated. Assume region is empty. */
207 if (block_offset_unallocated(mb.blocks[0])) {
208 f->slot = RF_EMPTY;
209 return 0;
212 /* If metadata block is 0 in size then need to empty. */
213 if (mb.blocks[0] == 0) {
214 f->slot = RF_NEED_TO_EMPTY;
215 return 0;
218 /* The region needs to be emptied as the metadata is broken. */
219 if (rdev_chain(&f->metadata, p, 0, block_to_bytes(mb.blocks[0]))) {
220 f->slot = RF_NEED_TO_EMPTY;
221 return 0;
224 /* Locate latest metadata block with latest update. */
225 if (find_latest_mb(&mb, mb.blocks[0], f)) {
226 printk(BIOS_ERR, "REGF fail locating latest metadata block.\n");
227 f->slot = RF_FATAL;
228 return -1;
231 find_latest_slot(&mb, f);
233 /* Fill in the data blocks marking the latest update. */
234 if (fill_data_boundaries(f)) {
235 printk(BIOS_ERR, "REGF fail locating data boundaries.\n");
236 f->slot = RF_FATAL;
237 return -1;
240 return 0;
243 int region_file_data(const struct region_file *f, struct region_device *rdev)
245 size_t offset;
246 size_t size;
248 /* Slot indicates if any data is available. */
249 if (f->slot <= RF_ONLY_METADATA)
250 return -1;
252 offset = block_to_bytes(region_file_data_begin(f));
253 size = block_to_bytes(region_file_data_end(f)) - offset;
255 return rdev_chain(rdev, &f->rdev, offset, size);
259 * Allocate enough metadata blocks to maximize data updates. Do this in
260 * terms of blocks. To solve the balance of metadata vs data, 2 linear
261 * equations are solved in terms of blocks where 'x' is number of
262 * data updates and 'y' is number of metadata blocks:
264 * x = number of data updates
265 * y = number of metadata blocks
266 * T = total blocks in region
267 * D = data size in blocks
268 * M = metadata size in blocks
269 * A = updates accounted for in each metadata block
271 * T = D * x + M * y
272 * y = x / A
273 * -----------------
274 * T = D * x + M * x / A = x * (D + M / A)
275 * T * A = x * (D * A + M)
276 * x = T * A / (D * A + M)
278 static int allocate_metadata(struct region_file *f, size_t data_blks)
280 size_t t, m;
281 size_t x, y;
282 uint16_t tot_metadata;
283 const size_t a = REGF_UPDATES_PER_METADATA_BLOCK;
284 const size_t d = data_blks;
286 t = bytes_to_block(ALIGN_DOWN(region_device_sz(&f->rdev),
287 REGF_BLOCK_GRANULARITY));
288 m = bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE,
289 REGF_BLOCK_GRANULARITY));
291 /* Ensure at least one data update can fit with 1 metadata block
292 * within the region. */
293 if (d > t - m)
294 return -1;
296 /* Maximize number of updates by aligning up to the number updates in
297 * a metadata block. May not really be able to achieve the number of
298 * updates in practice, but it ensures enough metadata blocks are
299 * allocated. */
300 x = ALIGN_UP(t * a / (d * a + m), a);
302 /* One data block has to fit. */
303 if (x == 0)
304 x = 1;
306 /* Now calculate how many metadata blocks are needed. */
307 y = ALIGN_UP(x, a) / a;
309 /* Need to commit the metadata allocation. */
310 tot_metadata = m * y;
311 if (rdev_writeat(&f->rdev, &tot_metadata, 0, sizeof(tot_metadata)) < 0)
312 return -1;
314 if (rdev_chain(&f->metadata, &f->rdev, 0,
315 block_to_bytes(tot_metadata)))
316 return -1;
318 /* Initialize a 0 data block to start appending from. */
319 f->data_blocks[0] = tot_metadata;
320 f->data_blocks[1] = tot_metadata;
322 return 0;
325 static int update_can_fit(const struct region_file *f, size_t data_blks)
327 size_t metadata_slots;
328 size_t end_blk;
330 metadata_slots = region_device_sz(&f->metadata) / sizeof(uint16_t);
332 /* No more slots. */
333 if ((size_t)f->slot + 1 >= metadata_slots)
334 return 0;
336 /* See where the last block lies from the current one. */
337 end_blk = data_blks + region_file_data_end(f);
339 /* Update would have exceeded block addressing. */
340 if (end_blk >= REGF_UNALLOCATED_BLOCK)
341 return 0;
343 /* End block exceeds size of region. */
344 if (end_blk > bytes_to_block(region_device_sz(&f->rdev)))
345 return 0;
347 return 1;
350 static int commit_data_allocation(struct region_file *f, size_t data_blks)
352 size_t offset;
354 f->slot++;
356 offset = f->slot * sizeof(uint16_t);
357 f->data_blocks[0] = region_file_data_end(f);
358 f->data_blocks[1] = region_file_data_begin(f) + data_blks;
360 if (rdev_writeat(&f->metadata, &f->data_blocks[1], offset,
361 sizeof(f->data_blocks[1])) < 0)
362 return -1;
364 return 0;
367 static int commit_data(const struct region_file *f,
368 const struct update_region_file_entry *entries,
369 size_t num_entries)
371 size_t offset = block_to_bytes(region_file_data_begin(f));
372 for (int i = 0; i < num_entries; i++) {
373 if (rdev_writeat(&f->rdev, entries[i].data, offset, entries[i].size) < 0)
374 return -1;
375 offset += entries[i].size;
377 return 0;
380 static int handle_empty(struct region_file *f, size_t data_blks)
382 if (allocate_metadata(f, data_blks)) {
383 printk(BIOS_ERR, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
384 data_blks, bytes_to_block(region_device_sz(&f->rdev)));
385 return -1;
388 f->slot = RF_ONLY_METADATA;
390 return 0;
393 static int handle_need_to_empty(struct region_file *f)
395 if (rdev_eraseat(&f->rdev, 0, region_device_sz(&f->rdev)) < 0) {
396 printk(BIOS_ERR, "REGF empty failed.\n");
397 return -1;
400 f->slot = RF_EMPTY;
402 return 0;
405 static int handle_update(struct region_file *f, size_t blocks,
406 const struct update_region_file_entry *entries,
407 size_t num_entries)
409 if (!update_can_fit(f, blocks)) {
410 printk(BIOS_INFO, "REGF update can't fit. Will empty.\n");
411 f->slot = RF_NEED_TO_EMPTY;
412 return 0;
415 if (commit_data_allocation(f, blocks)) {
416 printk(BIOS_ERR, "REGF failed to commit data allocation.\n");
417 return -1;
420 if (commit_data(f, entries, num_entries)) {
421 printk(BIOS_ERR, "REGF failed to commit data.\n");
422 return -1;
425 return 0;
428 int region_file_update_data_arr(struct region_file *f,
429 const struct update_region_file_entry *entries,
430 size_t num_entries)
432 int ret;
433 size_t blocks;
434 size_t size = 0;
436 for (int i = 0; i < num_entries; i++)
437 size += entries[i].size;
438 blocks = bytes_to_block(ALIGN_UP(size, REGF_BLOCK_GRANULARITY));
440 while (1) {
441 int prev_slot = f->slot;
443 switch (f->slot) {
444 case RF_EMPTY:
445 ret = handle_empty(f, blocks);
446 break;
447 case RF_NEED_TO_EMPTY:
448 ret = handle_need_to_empty(f);
449 break;
450 case RF_FATAL:
451 ret = -1;
452 break;
453 default:
454 ret = handle_update(f, blocks, entries, num_entries);
455 break;
458 /* Failing case. No more updates allowed to be attempted. */
459 if (ret) {
460 f->slot = RF_FATAL;
461 break;
464 /* No more state changes and data committed. */
465 if (f->slot > RF_ONLY_METADATA && prev_slot != f->slot)
466 break;
469 return ret;
472 int region_file_update_data(struct region_file *f, const void *buf, size_t size)
474 struct update_region_file_entry entry = {
475 .size = size,
476 .data = buf,
478 return region_file_update_data_arr(f, &entry, 1);