1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/helpers.h>
4 #include <console/console.h>
5 #include <region_file.h>
9 * A region file provides generic support for appending new data
10 * within a storage region. The book keeping is tracked in metadata
11 * blocks where an offset pointer points to the last byte of a newly
12 * allocated byte sequence. Thus, by taking 2 block offsets one can
13 * determine start and size of the latest update. The data does not
14 * have to be the same consistent size, but the data size has be small
15 * enough to fit a metadata block and one data write within the region.
17 * The granularity of the block offsets are 16 bytes. By using 16-bit
18 * block offsets a region's total size can be no larger than 1MiB.
19 * However, the last 32 bytes cannot be used in the 1MiB maximum region
20 * because one needs to put a block offset indicating last byte written.
21 * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
22 * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
24 * The goal of this library is to provide a simple mechanism for
25 * allocating blocks of data for updates. The metadata is written first
26 * followed by the data. That means a power event between the block offset
27 * write and the data write results in blocks being allocated but not
28 * entirely written. It's up to the user of the library to sanity check
32 #define REGF_BLOCK_SHIFT 4
33 #define REGF_BLOCK_GRANULARITY (1 << REGF_BLOCK_SHIFT)
34 #define REGF_METADATA_BLOCK_SIZE REGF_BLOCK_GRANULARITY
35 #define REGF_UNALLOCATED_BLOCK 0xffff
36 #define REGF_UPDATES_PER_METADATA_BLOCK \
37 (REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
42 RF_NEED_TO_EMPTY
= -2,
46 struct metadata_block
{
47 uint16_t blocks
[REGF_UPDATES_PER_METADATA_BLOCK
];
50 static size_t block_to_bytes(uint16_t offset
)
52 return (size_t)offset
<< REGF_BLOCK_SHIFT
;
55 static size_t bytes_to_block(size_t bytes
)
57 return bytes
>> REGF_BLOCK_SHIFT
;
60 static inline int block_offset_unallocated(uint16_t offset
)
62 return offset
== REGF_UNALLOCATED_BLOCK
;
65 static inline size_t region_file_data_begin(const struct region_file
*f
)
67 return f
->data_blocks
[0];
70 static inline size_t region_file_data_end(const struct region_file
*f
)
72 return f
->data_blocks
[1];
75 static int all_block_offsets_unallocated(const struct metadata_block
*mb
)
79 for (i
= 0; i
< ARRAY_SIZE(mb
->blocks
); i
++) {
80 if (!block_offset_unallocated(mb
->blocks
[i
]))
87 /* Read metadata block at block i. */
88 static int read_mb(size_t i
, struct metadata_block
*mb
,
89 const struct region_file
*f
)
91 size_t offset
= block_to_bytes(i
);
93 if (rdev_readat(&f
->metadata
, mb
, offset
, sizeof(*mb
)) < 0)
99 /* Locate metadata block with the latest update */
100 static int find_latest_mb(struct metadata_block
*mb
, size_t num_mb_blocks
,
101 struct region_file
*f
)
104 size_t r
= num_mb_blocks
;
107 size_t mid
= (l
+ r
) / 2;
109 if (read_mb(mid
, mb
, f
) < 0)
111 if (all_block_offsets_unallocated(mb
))
117 /* Set the base block slot. */
118 f
->slot
= l
* REGF_UPDATES_PER_METADATA_BLOCK
;
120 /* Re-read metadata block with the latest update. */
121 if (read_mb(l
, mb
, f
) < 0)
127 static void find_latest_slot(struct metadata_block
*mb
, struct region_file
*f
)
131 for (i
= REGF_UPDATES_PER_METADATA_BLOCK
- 1; i
> 0; i
--) {
132 if (!block_offset_unallocated(mb
->blocks
[i
]))
139 static int fill_data_boundaries(struct region_file
*f
)
141 struct region_device slots
;
143 size_t size
= sizeof(f
->data_blocks
);
145 if (f
->slot
== RF_ONLY_METADATA
) {
146 size_t start
= bytes_to_block(region_device_sz(&f
->metadata
));
147 f
->data_blocks
[0] = start
;
148 f
->data_blocks
[1] = start
;
152 /* Sanity check the 2 slot sequence to read. If it's out of the
153 * metadata blocks' bounds then one needs to empty it. This is done
154 * to uniquely identify I/O vs data errors in the readat() below. */
155 offset
= (f
->slot
- 1) * sizeof(f
->data_blocks
[0]);
156 if (rdev_chain(&slots
, &f
->metadata
, offset
, size
)) {
157 f
->slot
= RF_NEED_TO_EMPTY
;
161 if (rdev_readat(&slots
, &f
->data_blocks
, 0, size
) < 0) {
162 printk(BIOS_ERR
, "REGF failed to read data boundaries.\n");
166 /* All used blocks should be incrementing from previous write. */
167 if (region_file_data_begin(f
) >= region_file_data_end(f
)) {
168 printk(BIOS_ERR
, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
169 region_file_data_begin(f
), region_file_data_end(f
));
170 f
->slot
= RF_NEED_TO_EMPTY
;
174 /* Ensure data doesn't exceed the region. */
175 if (region_file_data_end(f
) >
176 bytes_to_block(region_device_sz(&f
->rdev
))) {
177 printk(BIOS_ERR
, "REGF data exceeds region %zd > %zd\n",
178 region_file_data_end(f
),
179 bytes_to_block(region_device_sz(&f
->rdev
)));
180 f
->slot
= RF_NEED_TO_EMPTY
;
186 int region_file_init(struct region_file
*f
, const struct region_device
*p
)
188 struct metadata_block mb
;
190 /* Total number of metadata blocks is found by reading the first
191 * block offset as the metadata is allocated first. At least one
192 * metadata block is available. */
194 memset(f
, 0, sizeof(*f
));
197 /* Keep parent around for accessing data later. */
198 if (rdev_chain_full(&f
->rdev
, p
))
201 if (rdev_readat(p
, &mb
, 0, sizeof(mb
)) < 0) {
202 printk(BIOS_ERR
, "REGF fail reading first metadata block.\n");
206 /* No metadata has been allocated. Assume region is empty. */
207 if (block_offset_unallocated(mb
.blocks
[0])) {
212 /* If metadata block is 0 in size then need to empty. */
213 if (mb
.blocks
[0] == 0) {
214 f
->slot
= RF_NEED_TO_EMPTY
;
218 /* The region needs to be emptied as the metadata is broken. */
219 if (rdev_chain(&f
->metadata
, p
, 0, block_to_bytes(mb
.blocks
[0]))) {
220 f
->slot
= RF_NEED_TO_EMPTY
;
224 /* Locate latest metadata block with latest update. */
225 if (find_latest_mb(&mb
, mb
.blocks
[0], f
)) {
226 printk(BIOS_ERR
, "REGF fail locating latest metadata block.\n");
231 find_latest_slot(&mb
, f
);
233 /* Fill in the data blocks marking the latest update. */
234 if (fill_data_boundaries(f
)) {
235 printk(BIOS_ERR
, "REGF fail locating data boundaries.\n");
243 int region_file_data(const struct region_file
*f
, struct region_device
*rdev
)
248 /* Slot indicates if any data is available. */
249 if (f
->slot
<= RF_ONLY_METADATA
)
252 offset
= block_to_bytes(region_file_data_begin(f
));
253 size
= block_to_bytes(region_file_data_end(f
)) - offset
;
255 return rdev_chain(rdev
, &f
->rdev
, offset
, size
);
259 * Allocate enough metadata blocks to maximize data updates. Do this in
260 * terms of blocks. To solve the balance of metadata vs data, 2 linear
261 * equations are solved in terms of blocks where 'x' is number of
262 * data updates and 'y' is number of metadata blocks:
264 * x = number of data updates
265 * y = number of metadata blocks
266 * T = total blocks in region
267 * D = data size in blocks
268 * M = metadata size in blocks
269 * A = updates accounted for in each metadata block
274 * T = D * x + M * x / A = x * (D + M / A)
275 * T * A = x * (D * A + M)
276 * x = T * A / (D * A + M)
278 static int allocate_metadata(struct region_file
*f
, size_t data_blks
)
282 uint16_t tot_metadata
;
283 const size_t a
= REGF_UPDATES_PER_METADATA_BLOCK
;
284 const size_t d
= data_blks
;
286 t
= bytes_to_block(ALIGN_DOWN(region_device_sz(&f
->rdev
),
287 REGF_BLOCK_GRANULARITY
));
288 m
= bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE
,
289 REGF_BLOCK_GRANULARITY
));
291 /* Ensure at least one data update can fit with 1 metadata block
292 * within the region. */
296 /* Maximize number of updates by aligning up to the number updates in
297 * a metadata block. May not really be able to achieve the number of
298 * updates in practice, but it ensures enough metadata blocks are
300 x
= ALIGN_UP(t
* a
/ (d
* a
+ m
), a
);
302 /* One data block has to fit. */
306 /* Now calculate how many metadata blocks are needed. */
307 y
= ALIGN_UP(x
, a
) / a
;
309 /* Need to commit the metadata allocation. */
310 tot_metadata
= m
* y
;
311 if (rdev_writeat(&f
->rdev
, &tot_metadata
, 0, sizeof(tot_metadata
)) < 0)
314 if (rdev_chain(&f
->metadata
, &f
->rdev
, 0,
315 block_to_bytes(tot_metadata
)))
318 /* Initialize a 0 data block to start appending from. */
319 f
->data_blocks
[0] = tot_metadata
;
320 f
->data_blocks
[1] = tot_metadata
;
325 static int update_can_fit(const struct region_file
*f
, size_t data_blks
)
327 size_t metadata_slots
;
330 metadata_slots
= region_device_sz(&f
->metadata
) / sizeof(uint16_t);
333 if ((size_t)f
->slot
+ 1 >= metadata_slots
)
336 /* See where the last block lies from the current one. */
337 end_blk
= data_blks
+ region_file_data_end(f
);
339 /* Update would have exceeded block addressing. */
340 if (end_blk
>= REGF_UNALLOCATED_BLOCK
)
343 /* End block exceeds size of region. */
344 if (end_blk
> bytes_to_block(region_device_sz(&f
->rdev
)))
350 static int commit_data_allocation(struct region_file
*f
, size_t data_blks
)
356 offset
= f
->slot
* sizeof(uint16_t);
357 f
->data_blocks
[0] = region_file_data_end(f
);
358 f
->data_blocks
[1] = region_file_data_begin(f
) + data_blks
;
360 if (rdev_writeat(&f
->metadata
, &f
->data_blocks
[1], offset
,
361 sizeof(f
->data_blocks
[1])) < 0)
367 static int commit_data(const struct region_file
*f
,
368 const struct update_region_file_entry
*entries
,
371 size_t offset
= block_to_bytes(region_file_data_begin(f
));
372 for (int i
= 0; i
< num_entries
; i
++) {
373 if (rdev_writeat(&f
->rdev
, entries
[i
].data
, offset
, entries
[i
].size
) < 0)
375 offset
+= entries
[i
].size
;
380 static int handle_empty(struct region_file
*f
, size_t data_blks
)
382 if (allocate_metadata(f
, data_blks
)) {
383 printk(BIOS_ERR
, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
384 data_blks
, bytes_to_block(region_device_sz(&f
->rdev
)));
388 f
->slot
= RF_ONLY_METADATA
;
393 static int handle_need_to_empty(struct region_file
*f
)
395 if (rdev_eraseat(&f
->rdev
, 0, region_device_sz(&f
->rdev
)) < 0) {
396 printk(BIOS_ERR
, "REGF empty failed.\n");
405 static int handle_update(struct region_file
*f
, size_t blocks
,
406 const struct update_region_file_entry
*entries
,
409 if (!update_can_fit(f
, blocks
)) {
410 printk(BIOS_INFO
, "REGF update can't fit. Will empty.\n");
411 f
->slot
= RF_NEED_TO_EMPTY
;
415 if (commit_data_allocation(f
, blocks
)) {
416 printk(BIOS_ERR
, "REGF failed to commit data allocation.\n");
420 if (commit_data(f
, entries
, num_entries
)) {
421 printk(BIOS_ERR
, "REGF failed to commit data.\n");
428 int region_file_update_data_arr(struct region_file
*f
,
429 const struct update_region_file_entry
*entries
,
436 for (int i
= 0; i
< num_entries
; i
++)
437 size
+= entries
[i
].size
;
438 blocks
= bytes_to_block(ALIGN_UP(size
, REGF_BLOCK_GRANULARITY
));
441 int prev_slot
= f
->slot
;
445 ret
= handle_empty(f
, blocks
);
447 case RF_NEED_TO_EMPTY
:
448 ret
= handle_need_to_empty(f
);
454 ret
= handle_update(f
, blocks
, entries
, num_entries
);
458 /* Failing case. No more updates allowed to be attempted. */
464 /* No more state changes and data committed. */
465 if (f
->slot
> RF_ONLY_METADATA
&& prev_slot
!= f
->slot
)
472 int region_file_update_data(struct region_file
*f
, const void *buf
, size_t size
)
474 struct update_region_file_entry entry
= {
478 return region_file_update_data_arr(f
, &entry
, 1);