1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2023 Red Hat
6 #include "index-layout.h"
8 #include <linux/random.h>
11 #include "memory-alloc.h"
12 #include "murmurhash3.h"
14 #include "time-utils.h"
17 #include "open-chapter.h"
18 #include "volume-index.h"
21 * The UDS layout on storage media is divided into a number of fixed-size regions, the sizes of
22 * which are computed when the index is created. Every header and region begins on 4K block
23 * boundary. Save regions are further sub-divided into regions of their own.
25 * Each region has a kind and an instance number. Some kinds only have one instance and therefore
26 * use RL_SOLE_INSTANCE (-1) as the instance number. The RL_KIND_INDEX used to use instances to
27 * represent sub-indices; now, however there is only ever one sub-index and therefore one instance.
28 * The RL_KIND_VOLUME_INDEX uses instances to record which zone is being saved.
30 * Every region header has a type and version.
32 * +-+-+---------+--------+--------+-+
33 * | | | I N D E X 0 101, 0 | |
34 * |H|C+---------+--------+--------+S|
35 * |D|f| Volume | Save | Save |e|
36 * |R|g| Region | Region | Region |a|
37 * | | | 201, -1 | 202, 0 | 202, 1 |l|
38 * +-+-+--------+---------+--------+-+
40 * The header contains the encoded region layout table as well as some index configuration data.
41 * The sub-index region and its subdivisions are maintained in the same table.
43 * There are two save regions to preserve the old state in case saving the new state is incomplete.
44 * They are used in alternation. Each save region is further divided into sub-regions.
46 * +-+-----+------+------+-----+-----+
47 * |H| IPM | MI | MI | | OC |
48 * |D| | zone | zone | ... | |
49 * |R| 301 | 302 | 302 | | 303 |
50 * | | -1 | 0 | 1 | | -1 |
51 * +-+-----+------+------+-----+-----+
53 * The header contains the encoded region layout table as well as index state data for that save.
54 * Each save also has a unique nonce.
58 #define NONCE_INFO_SIZE 32
69 RL_KIND_INDEX_PAGE_MAP
= 301,
70 RL_KIND_VOLUME_INDEX
= 302,
71 RL_KIND_OPEN_CHAPTER
= 303,
74 /* Some region types are historical and are no longer used. */
76 RH_TYPE_FREE
= 0, /* unused */
79 RH_TYPE_CHECKPOINT
= 3, /* unused */
83 #define RL_SOLE_INSTANCE 65535
86 * Super block version 2 is the first released version.
88 * Super block version 3 is the normal version used from RHEL 8.2 onwards.
90 * Super block versions 4 through 6 were incremental development versions and
93 * Super block version 7 is used for volumes which have been reduced in size by one chapter in
94 * order to make room to prepend LVM metadata to a volume originally created without lvm. This
95 * allows the index to retain most its deduplication records.
97 #define SUPER_VERSION_MINIMUM 3
98 #define SUPER_VERSION_CURRENT 3
99 #define SUPER_VERSION_MAXIMUM 7
101 static const u8 LAYOUT_MAGIC
[MAGIC_SIZE
] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
102 static const u64 REGION_MAGIC
= 0x416c6252676e3031; /* 'AlbRgn01' */
104 struct region_header
{
108 /* Currently always version 1 */
114 struct layout_region
{
122 struct region_table
{
124 struct region_header header
;
125 struct layout_region regions
[];
128 struct index_save_data
{
131 /* Currently always version 1 */
136 struct index_state_version
{
141 static const struct index_state_version INDEX_STATE_VERSION_301
= {
146 struct index_state_data301
{
147 struct index_state_version version
;
155 struct index_save_layout
{
156 unsigned int zone_count
;
157 struct layout_region index_save
;
158 struct layout_region header
;
159 struct layout_region index_page_map
;
160 struct layout_region free_space
;
161 struct layout_region volume_index_zones
[MAX_ZONES
];
162 struct layout_region open_chapter
;
163 struct index_save_data save_data
;
164 struct index_state_data301 state_data
;
167 struct sub_index_layout
{
169 struct layout_region sub_index
;
170 struct layout_region volume
;
171 struct index_save_layout
*saves
;
174 struct super_block_data
{
175 u8 magic_label
[MAGIC_SIZE
];
176 u8 nonce_info
[NONCE_INFO_SIZE
];
182 /* Padding reflects a blank field on permanent storage */
184 u64 open_chapter_blocks
;
190 struct index_layout
{
191 struct io_factory
*factory
;
194 struct super_block_data super
;
195 struct layout_region header
;
196 struct layout_region config
;
197 struct sub_index_layout index
;
198 struct layout_region seal
;
202 struct save_layout_sizes
{
203 unsigned int save_count
;
206 u64 volume_index_blocks
;
208 u64 open_chapter_blocks
;
210 u64 sub_index_blocks
;
215 static inline bool is_converted_super_block(struct super_block_data
*super
)
217 return super
->version
== 7;
220 static int __must_check
compute_sizes(const struct uds_configuration
*config
,
221 struct save_layout_sizes
*sls
)
224 struct index_geometry
*geometry
= config
->geometry
;
226 memset(sls
, 0, sizeof(*sls
));
227 sls
->save_count
= MAX_SAVES
;
228 sls
->block_size
= UDS_BLOCK_SIZE
;
229 sls
->volume_blocks
= geometry
->bytes_per_volume
/ sls
->block_size
;
231 result
= uds_compute_volume_index_save_blocks(config
, sls
->block_size
,
232 &sls
->volume_index_blocks
);
233 if (result
!= UDS_SUCCESS
)
234 return vdo_log_error_strerror(result
, "cannot compute index save size");
236 sls
->page_map_blocks
=
237 DIV_ROUND_UP(uds_compute_index_page_map_save_size(geometry
),
239 sls
->open_chapter_blocks
=
240 DIV_ROUND_UP(uds_compute_saved_open_chapter_size(geometry
),
243 1 + (sls
->volume_index_blocks
+ sls
->page_map_blocks
+ sls
->open_chapter_blocks
);
244 sls
->sub_index_blocks
= sls
->volume_blocks
+ (sls
->save_count
* sls
->save_blocks
);
245 sls
->total_blocks
= 3 + sls
->sub_index_blocks
;
246 sls
->total_size
= sls
->total_blocks
* sls
->block_size
;
251 /* Create unique data using the current time and a pseudorandom number. */
252 static void create_unique_nonce_data(u8
*buffer
)
254 ktime_t now
= current_time_ns(CLOCK_REALTIME
);
258 get_random_bytes(&rand
, sizeof(u32
));
259 memcpy(buffer
+ offset
, &now
, sizeof(now
));
260 offset
+= sizeof(now
);
261 memcpy(buffer
+ offset
, &rand
, sizeof(rand
));
262 offset
+= sizeof(rand
);
263 while (offset
< NONCE_INFO_SIZE
) {
264 size_t len
= min(NONCE_INFO_SIZE
- offset
, offset
);
266 memcpy(buffer
+ offset
, buffer
, len
);
271 static u64
hash_stuff(u64 start
, const void *data
, size_t len
)
273 u32 seed
= start
^ (start
>> 27);
276 murmurhash3_128(data
, len
, seed
, hash_buffer
);
277 return get_unaligned_le64(hash_buffer
+ 4);
280 /* Generate a primary nonce from the provided data. */
281 static u64
generate_primary_nonce(const void *data
, size_t len
)
283 return hash_stuff(0xa1b1e0fc, data
, len
);
287 * Deterministically generate a secondary nonce from an existing nonce and some arbitrary data by
288 * hashing the original nonce and the data to produce a new nonce.
290 static u64
generate_secondary_nonce(u64 nonce
, const void *data
, size_t len
)
292 return hash_stuff(nonce
+ 1, data
, len
);
295 static int __must_check
open_layout_reader(struct index_layout
*layout
,
296 struct layout_region
*lr
, off_t offset
,
297 struct buffered_reader
**reader_ptr
)
299 return uds_make_buffered_reader(layout
->factory
, lr
->start_block
+ offset
,
300 lr
->block_count
, reader_ptr
);
303 static int open_region_reader(struct index_layout
*layout
, struct layout_region
*region
,
304 struct buffered_reader
**reader_ptr
)
306 return open_layout_reader(layout
, region
, -layout
->super
.start_offset
,
310 static int __must_check
open_layout_writer(struct index_layout
*layout
,
311 struct layout_region
*lr
, off_t offset
,
312 struct buffered_writer
**writer_ptr
)
314 return uds_make_buffered_writer(layout
->factory
, lr
->start_block
+ offset
,
315 lr
->block_count
, writer_ptr
);
318 static int open_region_writer(struct index_layout
*layout
, struct layout_region
*region
,
319 struct buffered_writer
**writer_ptr
)
321 return open_layout_writer(layout
, region
, -layout
->super
.start_offset
,
325 static void generate_super_block_data(struct save_layout_sizes
*sls
,
326 struct super_block_data
*super
)
328 memset(super
, 0, sizeof(*super
));
329 memcpy(super
->magic_label
, LAYOUT_MAGIC
, MAGIC_SIZE
);
330 create_unique_nonce_data(super
->nonce_info
);
332 super
->nonce
= generate_primary_nonce(super
->nonce_info
,
333 sizeof(super
->nonce_info
));
334 super
->version
= SUPER_VERSION_CURRENT
;
335 super
->block_size
= sls
->block_size
;
336 super
->index_count
= 1;
337 super
->max_saves
= sls
->save_count
;
338 super
->open_chapter_blocks
= sls
->open_chapter_blocks
;
339 super
->page_map_blocks
= sls
->page_map_blocks
;
340 super
->volume_offset
= 0;
341 super
->start_offset
= 0;
344 static void define_sub_index_nonce(struct index_layout
*layout
)
346 struct sub_index_nonce_data
{
350 struct sub_index_layout
*sil
= &layout
->index
;
351 u64 primary_nonce
= layout
->super
.nonce
;
352 u8 buffer
[sizeof(struct sub_index_nonce_data
)] = { 0 };
355 encode_u64_le(buffer
, &offset
, sil
->sub_index
.start_block
);
356 encode_u16_le(buffer
, &offset
, 0);
357 sil
->nonce
= generate_secondary_nonce(primary_nonce
, buffer
, sizeof(buffer
));
358 if (sil
->nonce
== 0) {
359 sil
->nonce
= generate_secondary_nonce(~primary_nonce
+ 1, buffer
,
364 static void setup_sub_index(struct index_layout
*layout
, u64 start_block
,
365 struct save_layout_sizes
*sls
)
367 struct sub_index_layout
*sil
= &layout
->index
;
368 u64 next_block
= start_block
;
371 sil
->sub_index
= (struct layout_region
) {
372 .start_block
= start_block
,
373 .block_count
= sls
->sub_index_blocks
,
374 .kind
= RL_KIND_INDEX
,
378 sil
->volume
= (struct layout_region
) {
379 .start_block
= next_block
,
380 .block_count
= sls
->volume_blocks
,
381 .kind
= RL_KIND_VOLUME
,
382 .instance
= RL_SOLE_INSTANCE
,
385 next_block
+= sls
->volume_blocks
;
387 for (i
= 0; i
< sls
->save_count
; i
++) {
388 sil
->saves
[i
].index_save
= (struct layout_region
) {
389 .start_block
= next_block
,
390 .block_count
= sls
->save_blocks
,
391 .kind
= RL_KIND_SAVE
,
395 next_block
+= sls
->save_blocks
;
398 define_sub_index_nonce(layout
);
401 static void initialize_layout(struct index_layout
*layout
, struct save_layout_sizes
*sls
)
403 u64 next_block
= layout
->offset
/ sls
->block_size
;
405 layout
->total_blocks
= sls
->total_blocks
;
406 generate_super_block_data(sls
, &layout
->super
);
407 layout
->header
= (struct layout_region
) {
408 .start_block
= next_block
++,
410 .kind
= RL_KIND_HEADER
,
411 .instance
= RL_SOLE_INSTANCE
,
414 layout
->config
= (struct layout_region
) {
415 .start_block
= next_block
++,
417 .kind
= RL_KIND_CONFIG
,
418 .instance
= RL_SOLE_INSTANCE
,
421 setup_sub_index(layout
, next_block
, sls
);
422 next_block
+= sls
->sub_index_blocks
;
424 layout
->seal
= (struct layout_region
) {
425 .start_block
= next_block
,
427 .kind
= RL_KIND_SEAL
,
428 .instance
= RL_SOLE_INSTANCE
,
432 static int __must_check
make_index_save_region_table(struct index_save_layout
*isl
,
433 struct region_table
**table_ptr
)
437 struct region_table
*table
;
438 struct layout_region
*lr
;
443 if (isl
->zone_count
> 0) {
445 * Normal save regions: header, page map, volume index zones,
446 * open chapter, and possibly free space.
448 region_count
= 3 + isl
->zone_count
;
449 if (isl
->free_space
.block_count
> 0)
452 payload
= sizeof(isl
->save_data
) + sizeof(isl
->state_data
);
455 /* Empty save regions: header, page map, free space. */
457 payload
= sizeof(isl
->save_data
);
458 type
= RH_TYPE_UNSAVED
;
461 result
= vdo_allocate_extended(struct region_table
, region_count
,
462 struct layout_region
,
463 "layout region table for ISL", &table
);
464 if (result
!= VDO_SUCCESS
)
467 lr
= &table
->regions
[0];
469 *lr
++ = isl
->index_page_map
;
470 for (z
= 0; z
< isl
->zone_count
; z
++)
471 *lr
++ = isl
->volume_index_zones
[z
];
473 if (isl
->zone_count
> 0)
474 *lr
++ = isl
->open_chapter
;
476 if (isl
->free_space
.block_count
> 0)
477 *lr
++ = isl
->free_space
;
479 table
->header
= (struct region_header
) {
480 .magic
= REGION_MAGIC
,
481 .region_blocks
= isl
->index_save
.block_count
,
484 .region_count
= region_count
,
488 table
->encoded_size
= (sizeof(struct region_header
) + payload
+
489 region_count
* sizeof(struct layout_region
));
494 static void encode_region_table(u8
*buffer
, size_t *offset
, struct region_table
*table
)
498 encode_u64_le(buffer
, offset
, REGION_MAGIC
);
499 encode_u64_le(buffer
, offset
, table
->header
.region_blocks
);
500 encode_u16_le(buffer
, offset
, table
->header
.type
);
501 encode_u16_le(buffer
, offset
, table
->header
.version
);
502 encode_u16_le(buffer
, offset
, table
->header
.region_count
);
503 encode_u16_le(buffer
, offset
, table
->header
.payload
);
505 for (i
= 0; i
< table
->header
.region_count
; i
++) {
506 encode_u64_le(buffer
, offset
, table
->regions
[i
].start_block
);
507 encode_u64_le(buffer
, offset
, table
->regions
[i
].block_count
);
508 encode_u32_le(buffer
, offset
, 0);
509 encode_u16_le(buffer
, offset
, table
->regions
[i
].kind
);
510 encode_u16_le(buffer
, offset
, table
->regions
[i
].instance
);
514 static int __must_check
write_index_save_header(struct index_save_layout
*isl
,
515 struct region_table
*table
,
516 struct buffered_writer
*writer
)
522 result
= vdo_allocate(table
->encoded_size
, u8
, "index save data", &buffer
);
523 if (result
!= VDO_SUCCESS
)
526 encode_region_table(buffer
, &offset
, table
);
527 encode_u64_le(buffer
, &offset
, isl
->save_data
.timestamp
);
528 encode_u64_le(buffer
, &offset
, isl
->save_data
.nonce
);
529 encode_u32_le(buffer
, &offset
, isl
->save_data
.version
);
530 encode_u32_le(buffer
, &offset
, 0);
531 if (isl
->zone_count
> 0) {
532 encode_u32_le(buffer
, &offset
, INDEX_STATE_VERSION_301
.signature
);
533 encode_u32_le(buffer
, &offset
, INDEX_STATE_VERSION_301
.version_id
);
534 encode_u64_le(buffer
, &offset
, isl
->state_data
.newest_chapter
);
535 encode_u64_le(buffer
, &offset
, isl
->state_data
.oldest_chapter
);
536 encode_u64_le(buffer
, &offset
, isl
->state_data
.last_save
);
537 encode_u64_le(buffer
, &offset
, 0);
540 result
= uds_write_to_buffered_writer(writer
, buffer
, offset
);
542 if (result
!= UDS_SUCCESS
)
545 return uds_flush_buffered_writer(writer
);
548 static int write_index_save_layout(struct index_layout
*layout
,
549 struct index_save_layout
*isl
)
552 struct region_table
*table
;
553 struct buffered_writer
*writer
;
555 result
= make_index_save_region_table(isl
, &table
);
556 if (result
!= UDS_SUCCESS
)
559 result
= open_region_writer(layout
, &isl
->header
, &writer
);
560 if (result
!= UDS_SUCCESS
) {
565 result
= write_index_save_header(isl
, table
, writer
);
567 uds_free_buffered_writer(writer
);
572 static void reset_index_save_layout(struct index_save_layout
*isl
, u64 page_map_blocks
)
575 u64 next_block
= isl
->index_save
.start_block
;
578 memset(&isl
->save_data
, 0, sizeof(isl
->save_data
));
580 isl
->header
= (struct layout_region
) {
581 .start_block
= next_block
++,
583 .kind
= RL_KIND_HEADER
,
584 .instance
= RL_SOLE_INSTANCE
,
587 isl
->index_page_map
= (struct layout_region
) {
588 .start_block
= next_block
,
589 .block_count
= page_map_blocks
,
590 .kind
= RL_KIND_INDEX_PAGE_MAP
,
591 .instance
= RL_SOLE_INSTANCE
,
594 next_block
+= page_map_blocks
;
596 free_blocks
= isl
->index_save
.block_count
- page_map_blocks
- 1;
597 isl
->free_space
= (struct layout_region
) {
598 .start_block
= next_block
,
599 .block_count
= free_blocks
,
600 .kind
= RL_KIND_EMPTY
,
601 .instance
= RL_SOLE_INSTANCE
,
605 static int __must_check
invalidate_old_save(struct index_layout
*layout
,
606 struct index_save_layout
*isl
)
608 reset_index_save_layout(isl
, layout
->super
.page_map_blocks
);
609 return write_index_save_layout(layout
, isl
);
612 static int discard_index_state_data(struct index_layout
*layout
)
615 int saved_result
= UDS_SUCCESS
;
618 for (i
= 0; i
< layout
->super
.max_saves
; i
++) {
619 result
= invalidate_old_save(layout
, &layout
->index
.saves
[i
]);
620 if (result
!= UDS_SUCCESS
)
621 saved_result
= result
;
624 if (saved_result
!= UDS_SUCCESS
) {
625 return vdo_log_error_strerror(result
,
626 "%s: cannot destroy all index saves",
633 static int __must_check
make_layout_region_table(struct index_layout
*layout
,
634 struct region_table
**table_ptr
)
638 /* Regions: header, config, index, volume, saves, seal */
639 u16 region_count
= 5 + layout
->super
.max_saves
;
641 struct region_table
*table
;
642 struct layout_region
*lr
;
644 result
= vdo_allocate_extended(struct region_table
, region_count
,
645 struct layout_region
, "layout region table",
647 if (result
!= VDO_SUCCESS
)
650 lr
= &table
->regions
[0];
651 *lr
++ = layout
->header
;
652 *lr
++ = layout
->config
;
653 *lr
++ = layout
->index
.sub_index
;
654 *lr
++ = layout
->index
.volume
;
656 for (i
= 0; i
< layout
->super
.max_saves
; i
++)
657 *lr
++ = layout
->index
.saves
[i
].index_save
;
659 *lr
++ = layout
->seal
;
661 if (is_converted_super_block(&layout
->super
)) {
662 payload
= sizeof(struct super_block_data
);
664 payload
= (sizeof(struct super_block_data
) -
665 sizeof(layout
->super
.volume_offset
) -
666 sizeof(layout
->super
.start_offset
));
669 table
->header
= (struct region_header
) {
670 .magic
= REGION_MAGIC
,
671 .region_blocks
= layout
->total_blocks
,
672 .type
= RH_TYPE_SUPER
,
674 .region_count
= region_count
,
678 table
->encoded_size
= (sizeof(struct region_header
) + payload
+
679 region_count
* sizeof(struct layout_region
));
684 static int __must_check
write_layout_header(struct index_layout
*layout
,
685 struct region_table
*table
,
686 struct buffered_writer
*writer
)
692 result
= vdo_allocate(table
->encoded_size
, u8
, "layout data", &buffer
);
693 if (result
!= VDO_SUCCESS
)
696 encode_region_table(buffer
, &offset
, table
);
697 memcpy(buffer
+ offset
, &layout
->super
.magic_label
, MAGIC_SIZE
);
698 offset
+= MAGIC_SIZE
;
699 memcpy(buffer
+ offset
, &layout
->super
.nonce_info
, NONCE_INFO_SIZE
);
700 offset
+= NONCE_INFO_SIZE
;
701 encode_u64_le(buffer
, &offset
, layout
->super
.nonce
);
702 encode_u32_le(buffer
, &offset
, layout
->super
.version
);
703 encode_u32_le(buffer
, &offset
, layout
->super
.block_size
);
704 encode_u16_le(buffer
, &offset
, layout
->super
.index_count
);
705 encode_u16_le(buffer
, &offset
, layout
->super
.max_saves
);
706 encode_u32_le(buffer
, &offset
, 0);
707 encode_u64_le(buffer
, &offset
, layout
->super
.open_chapter_blocks
);
708 encode_u64_le(buffer
, &offset
, layout
->super
.page_map_blocks
);
710 if (is_converted_super_block(&layout
->super
)) {
711 encode_u64_le(buffer
, &offset
, layout
->super
.volume_offset
);
712 encode_u64_le(buffer
, &offset
, layout
->super
.start_offset
);
715 result
= uds_write_to_buffered_writer(writer
, buffer
, offset
);
717 if (result
!= UDS_SUCCESS
)
720 return uds_flush_buffered_writer(writer
);
723 static int __must_check
write_uds_index_config(struct index_layout
*layout
,
724 struct uds_configuration
*config
,
728 struct buffered_writer
*writer
= NULL
;
730 result
= open_layout_writer(layout
, &layout
->config
, offset
, &writer
);
731 if (result
!= UDS_SUCCESS
)
732 return vdo_log_error_strerror(result
, "failed to open config region");
734 result
= uds_write_config_contents(writer
, config
, layout
->super
.version
);
735 if (result
!= UDS_SUCCESS
) {
736 uds_free_buffered_writer(writer
);
737 return vdo_log_error_strerror(result
, "failed to write config region");
740 result
= uds_flush_buffered_writer(writer
);
741 if (result
!= UDS_SUCCESS
) {
742 uds_free_buffered_writer(writer
);
743 return vdo_log_error_strerror(result
, "cannot flush config writer");
746 uds_free_buffered_writer(writer
);
750 static int __must_check
save_layout(struct index_layout
*layout
, off_t offset
)
753 struct buffered_writer
*writer
= NULL
;
754 struct region_table
*table
;
756 result
= make_layout_region_table(layout
, &table
);
757 if (result
!= UDS_SUCCESS
)
760 result
= open_layout_writer(layout
, &layout
->header
, offset
, &writer
);
761 if (result
!= UDS_SUCCESS
) {
766 result
= write_layout_header(layout
, table
, writer
);
768 uds_free_buffered_writer(writer
);
773 static int create_index_layout(struct index_layout
*layout
, struct uds_configuration
*config
)
776 struct save_layout_sizes sizes
;
778 result
= compute_sizes(config
, &sizes
);
779 if (result
!= UDS_SUCCESS
)
782 result
= vdo_allocate(sizes
.save_count
, struct index_save_layout
, __func__
,
783 &layout
->index
.saves
);
784 if (result
!= VDO_SUCCESS
)
787 initialize_layout(layout
, &sizes
);
789 result
= discard_index_state_data(layout
);
790 if (result
!= UDS_SUCCESS
)
793 result
= write_uds_index_config(layout
, config
, 0);
794 if (result
!= UDS_SUCCESS
)
797 return save_layout(layout
, 0);
800 static u64
generate_index_save_nonce(u64 volume_nonce
, struct index_save_layout
*isl
)
802 struct save_nonce_data
{
803 struct index_save_data data
;
806 u8 buffer
[sizeof(nonce_data
)];
809 encode_u64_le(buffer
, &offset
, isl
->save_data
.timestamp
);
810 encode_u64_le(buffer
, &offset
, 0);
811 encode_u32_le(buffer
, &offset
, isl
->save_data
.version
);
812 encode_u32_le(buffer
, &offset
, 0U);
813 encode_u64_le(buffer
, &offset
, isl
->index_save
.start_block
);
814 VDO_ASSERT_LOG_ONLY(offset
== sizeof(nonce_data
),
815 "%zu bytes encoded of %zu expected",
816 offset
, sizeof(nonce_data
));
817 return generate_secondary_nonce(volume_nonce
, buffer
, sizeof(buffer
));
820 static u64
validate_index_save_layout(struct index_save_layout
*isl
, u64 volume_nonce
)
822 if ((isl
->zone_count
== 0) || (isl
->save_data
.timestamp
== 0))
825 if (isl
->save_data
.nonce
!= generate_index_save_nonce(volume_nonce
, isl
))
828 return isl
->save_data
.timestamp
;
831 static int find_latest_uds_index_save_slot(struct index_layout
*layout
,
832 struct index_save_layout
**isl_ptr
)
834 struct index_save_layout
*latest
= NULL
;
835 struct index_save_layout
*isl
;
840 for (i
= 0; i
< layout
->super
.max_saves
; i
++) {
841 isl
= &layout
->index
.saves
[i
];
842 save_time
= validate_index_save_layout(isl
, layout
->index
.nonce
);
843 if (save_time
> latest_time
) {
845 latest_time
= save_time
;
849 if (latest
== NULL
) {
850 vdo_log_error("No valid index save found");
851 return UDS_INDEX_NOT_SAVED_CLEANLY
;
858 int uds_discard_open_chapter(struct index_layout
*layout
)
861 struct index_save_layout
*isl
;
862 struct buffered_writer
*writer
;
864 result
= find_latest_uds_index_save_slot(layout
, &isl
);
865 if (result
!= UDS_SUCCESS
)
868 result
= open_region_writer(layout
, &isl
->open_chapter
, &writer
);
869 if (result
!= UDS_SUCCESS
)
872 result
= uds_write_to_buffered_writer(writer
, NULL
, UDS_BLOCK_SIZE
);
873 if (result
!= UDS_SUCCESS
) {
874 uds_free_buffered_writer(writer
);
878 result
= uds_flush_buffered_writer(writer
);
879 uds_free_buffered_writer(writer
);
883 int uds_load_index_state(struct index_layout
*layout
, struct uds_index
*index
)
887 struct index_save_layout
*isl
;
888 struct buffered_reader
*readers
[MAX_ZONES
];
890 result
= find_latest_uds_index_save_slot(layout
, &isl
);
891 if (result
!= UDS_SUCCESS
)
894 index
->newest_virtual_chapter
= isl
->state_data
.newest_chapter
;
895 index
->oldest_virtual_chapter
= isl
->state_data
.oldest_chapter
;
896 index
->last_save
= isl
->state_data
.last_save
;
898 result
= open_region_reader(layout
, &isl
->open_chapter
, &readers
[0]);
899 if (result
!= UDS_SUCCESS
)
902 result
= uds_load_open_chapter(index
, readers
[0]);
903 uds_free_buffered_reader(readers
[0]);
904 if (result
!= UDS_SUCCESS
)
907 for (zone
= 0; zone
< isl
->zone_count
; zone
++) {
908 result
= open_region_reader(layout
, &isl
->volume_index_zones
[zone
],
910 if (result
!= UDS_SUCCESS
) {
911 for (; zone
> 0; zone
--)
912 uds_free_buffered_reader(readers
[zone
- 1]);
918 result
= uds_load_volume_index(index
->volume_index
, readers
, isl
->zone_count
);
919 for (zone
= 0; zone
< isl
->zone_count
; zone
++)
920 uds_free_buffered_reader(readers
[zone
]);
921 if (result
!= UDS_SUCCESS
)
924 result
= open_region_reader(layout
, &isl
->index_page_map
, &readers
[0]);
925 if (result
!= UDS_SUCCESS
)
928 result
= uds_read_index_page_map(index
->volume
->index_page_map
, readers
[0]);
929 uds_free_buffered_reader(readers
[0]);
934 static struct index_save_layout
*select_oldest_index_save_layout(struct index_layout
*layout
)
936 struct index_save_layout
*oldest
= NULL
;
937 struct index_save_layout
*isl
;
942 for (i
= 0; i
< layout
->super
.max_saves
; i
++) {
943 isl
= &layout
->index
.saves
[i
];
944 save_time
= validate_index_save_layout(isl
, layout
->index
.nonce
);
945 if (oldest
== NULL
|| save_time
< oldest_time
) {
947 oldest_time
= save_time
;
954 static void instantiate_index_save_layout(struct index_save_layout
*isl
,
955 struct super_block_data
*super
,
956 u64 volume_nonce
, unsigned int zone_count
)
961 u64 volume_index_blocks
;
963 isl
->zone_count
= zone_count
;
964 memset(&isl
->save_data
, 0, sizeof(isl
->save_data
));
965 isl
->save_data
.timestamp
= ktime_to_ms(current_time_ns(CLOCK_REALTIME
));
966 isl
->save_data
.version
= 1;
967 isl
->save_data
.nonce
= generate_index_save_nonce(volume_nonce
, isl
);
969 next_block
= isl
->index_save
.start_block
;
970 isl
->header
= (struct layout_region
) {
971 .start_block
= next_block
++,
973 .kind
= RL_KIND_HEADER
,
974 .instance
= RL_SOLE_INSTANCE
,
977 isl
->index_page_map
= (struct layout_region
) {
978 .start_block
= next_block
,
979 .block_count
= super
->page_map_blocks
,
980 .kind
= RL_KIND_INDEX_PAGE_MAP
,
981 .instance
= RL_SOLE_INSTANCE
,
983 next_block
+= super
->page_map_blocks
;
985 free_blocks
= (isl
->index_save
.block_count
- 1 -
986 super
->page_map_blocks
-
987 super
->open_chapter_blocks
);
988 volume_index_blocks
= free_blocks
/ isl
->zone_count
;
989 for (z
= 0; z
< isl
->zone_count
; z
++) {
990 isl
->volume_index_zones
[z
] = (struct layout_region
) {
991 .start_block
= next_block
,
992 .block_count
= volume_index_blocks
,
993 .kind
= RL_KIND_VOLUME_INDEX
,
997 next_block
+= volume_index_blocks
;
998 free_blocks
-= volume_index_blocks
;
1001 isl
->open_chapter
= (struct layout_region
) {
1002 .start_block
= next_block
,
1003 .block_count
= super
->open_chapter_blocks
,
1004 .kind
= RL_KIND_OPEN_CHAPTER
,
1005 .instance
= RL_SOLE_INSTANCE
,
1008 next_block
+= super
->open_chapter_blocks
;
1010 isl
->free_space
= (struct layout_region
) {
1011 .start_block
= next_block
,
1012 .block_count
= free_blocks
,
1013 .kind
= RL_KIND_EMPTY
,
1014 .instance
= RL_SOLE_INSTANCE
,
1018 static int setup_uds_index_save_slot(struct index_layout
*layout
,
1019 unsigned int zone_count
,
1020 struct index_save_layout
**isl_ptr
)
1023 struct index_save_layout
*isl
;
1025 isl
= select_oldest_index_save_layout(layout
);
1026 result
= invalidate_old_save(layout
, isl
);
1027 if (result
!= UDS_SUCCESS
)
1030 instantiate_index_save_layout(isl
, &layout
->super
, layout
->index
.nonce
,
1037 static void cancel_uds_index_save(struct index_save_layout
*isl
)
1039 memset(&isl
->save_data
, 0, sizeof(isl
->save_data
));
1040 memset(&isl
->state_data
, 0, sizeof(isl
->state_data
));
1041 isl
->zone_count
= 0;
1044 int uds_save_index_state(struct index_layout
*layout
, struct uds_index
*index
)
1048 struct index_save_layout
*isl
;
1049 struct buffered_writer
*writers
[MAX_ZONES
];
1051 result
= setup_uds_index_save_slot(layout
, index
->zone_count
, &isl
);
1052 if (result
!= UDS_SUCCESS
)
1055 isl
->state_data
= (struct index_state_data301
) {
1056 .newest_chapter
= index
->newest_virtual_chapter
,
1057 .oldest_chapter
= index
->oldest_virtual_chapter
,
1058 .last_save
= index
->last_save
,
1061 result
= open_region_writer(layout
, &isl
->open_chapter
, &writers
[0]);
1062 if (result
!= UDS_SUCCESS
) {
1063 cancel_uds_index_save(isl
);
1067 result
= uds_save_open_chapter(index
, writers
[0]);
1068 uds_free_buffered_writer(writers
[0]);
1069 if (result
!= UDS_SUCCESS
) {
1070 cancel_uds_index_save(isl
);
1074 for (zone
= 0; zone
< index
->zone_count
; zone
++) {
1075 result
= open_region_writer(layout
, &isl
->volume_index_zones
[zone
],
1077 if (result
!= UDS_SUCCESS
) {
1078 for (; zone
> 0; zone
--)
1079 uds_free_buffered_writer(writers
[zone
- 1]);
1081 cancel_uds_index_save(isl
);
1086 result
= uds_save_volume_index(index
->volume_index
, writers
, index
->zone_count
);
1087 for (zone
= 0; zone
< index
->zone_count
; zone
++)
1088 uds_free_buffered_writer(writers
[zone
]);
1089 if (result
!= UDS_SUCCESS
) {
1090 cancel_uds_index_save(isl
);
1094 result
= open_region_writer(layout
, &isl
->index_page_map
, &writers
[0]);
1095 if (result
!= UDS_SUCCESS
) {
1096 cancel_uds_index_save(isl
);
1100 result
= uds_write_index_page_map(index
->volume
->index_page_map
, writers
[0]);
1101 uds_free_buffered_writer(writers
[0]);
1102 if (result
!= UDS_SUCCESS
) {
1103 cancel_uds_index_save(isl
);
1107 return write_index_save_layout(layout
, isl
);
1110 static int __must_check
load_region_table(struct buffered_reader
*reader
,
1111 struct region_table
**table_ptr
)
1115 struct region_header header
;
1116 struct region_table
*table
;
1117 u8 buffer
[sizeof(struct region_header
)];
1120 result
= uds_read_from_buffered_reader(reader
, buffer
, sizeof(buffer
));
1121 if (result
!= UDS_SUCCESS
)
1122 return vdo_log_error_strerror(result
, "cannot read region table header");
1124 decode_u64_le(buffer
, &offset
, &header
.magic
);
1125 decode_u64_le(buffer
, &offset
, &header
.region_blocks
);
1126 decode_u16_le(buffer
, &offset
, &header
.type
);
1127 decode_u16_le(buffer
, &offset
, &header
.version
);
1128 decode_u16_le(buffer
, &offset
, &header
.region_count
);
1129 decode_u16_le(buffer
, &offset
, &header
.payload
);
1131 if (header
.magic
!= REGION_MAGIC
)
1132 return UDS_NO_INDEX
;
1134 if (header
.version
!= 1) {
1135 return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION
,
1136 "unknown region table version %hu",
1140 result
= vdo_allocate_extended(struct region_table
, header
.region_count
,
1141 struct layout_region
,
1142 "single file layout region table", &table
);
1143 if (result
!= VDO_SUCCESS
)
1146 table
->header
= header
;
1147 for (i
= 0; i
< header
.region_count
; i
++) {
1148 u8 region_buffer
[sizeof(struct layout_region
)];
1151 result
= uds_read_from_buffered_reader(reader
, region_buffer
,
1152 sizeof(region_buffer
));
1153 if (result
!= UDS_SUCCESS
) {
1155 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1156 "cannot read region table layouts");
1159 decode_u64_le(region_buffer
, &offset
, &table
->regions
[i
].start_block
);
1160 decode_u64_le(region_buffer
, &offset
, &table
->regions
[i
].block_count
);
1161 offset
+= sizeof(u32
);
1162 decode_u16_le(region_buffer
, &offset
, &table
->regions
[i
].kind
);
1163 decode_u16_le(region_buffer
, &offset
, &table
->regions
[i
].instance
);
1170 static int __must_check
read_super_block_data(struct buffered_reader
*reader
,
1171 struct index_layout
*layout
,
1175 struct super_block_data
*super
= &layout
->super
;
1179 result
= vdo_allocate(saved_size
, u8
, "super block data", &buffer
);
1180 if (result
!= VDO_SUCCESS
)
1183 result
= uds_read_from_buffered_reader(reader
, buffer
, saved_size
);
1184 if (result
!= UDS_SUCCESS
) {
1186 return vdo_log_error_strerror(result
, "cannot read region table header");
1189 memcpy(&super
->magic_label
, buffer
, MAGIC_SIZE
);
1190 offset
+= MAGIC_SIZE
;
1191 memcpy(&super
->nonce_info
, buffer
+ offset
, NONCE_INFO_SIZE
);
1192 offset
+= NONCE_INFO_SIZE
;
1193 decode_u64_le(buffer
, &offset
, &super
->nonce
);
1194 decode_u32_le(buffer
, &offset
, &super
->version
);
1195 decode_u32_le(buffer
, &offset
, &super
->block_size
);
1196 decode_u16_le(buffer
, &offset
, &super
->index_count
);
1197 decode_u16_le(buffer
, &offset
, &super
->max_saves
);
1198 offset
+= sizeof(u32
);
1199 decode_u64_le(buffer
, &offset
, &super
->open_chapter_blocks
);
1200 decode_u64_le(buffer
, &offset
, &super
->page_map_blocks
);
1202 if (is_converted_super_block(super
)) {
1203 decode_u64_le(buffer
, &offset
, &super
->volume_offset
);
1204 decode_u64_le(buffer
, &offset
, &super
->start_offset
);
1206 super
->volume_offset
= 0;
1207 super
->start_offset
= 0;
1212 if (memcmp(super
->magic_label
, LAYOUT_MAGIC
, MAGIC_SIZE
) != 0)
1213 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1214 "unknown superblock magic label");
1216 if ((super
->version
< SUPER_VERSION_MINIMUM
) ||
1217 (super
->version
== 4) || (super
->version
== 5) || (super
->version
== 6) ||
1218 (super
->version
> SUPER_VERSION_MAXIMUM
)) {
1219 return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION
,
1220 "unknown superblock version number %u",
1224 if (super
->volume_offset
< super
->start_offset
) {
1225 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1226 "inconsistent offsets (start %llu, volume %llu)",
1227 (unsigned long long) super
->start_offset
,
1228 (unsigned long long) super
->volume_offset
);
1231 /* Sub-indexes are no longer used but the layout retains this field. */
1232 if (super
->index_count
!= 1) {
1233 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1234 "invalid subindex count %u",
1235 super
->index_count
);
1238 if (generate_primary_nonce(super
->nonce_info
, sizeof(super
->nonce_info
)) != super
->nonce
) {
1239 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1240 "inconsistent superblock nonce");
1246 static int __must_check
verify_region(struct layout_region
*lr
, u64 start_block
,
1247 enum region_kind kind
, unsigned int instance
)
1249 if (lr
->start_block
!= start_block
)
1250 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1251 "incorrect layout region offset");
1253 if (lr
->kind
!= kind
)
1254 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1255 "incorrect layout region kind");
1257 if (lr
->instance
!= instance
) {
1258 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1259 "incorrect layout region instance");
1265 static int __must_check
verify_sub_index(struct index_layout
*layout
, u64 start_block
,
1266 struct region_table
*table
)
1270 struct sub_index_layout
*sil
= &layout
->index
;
1271 u64 next_block
= start_block
;
1273 sil
->sub_index
= table
->regions
[2];
1274 result
= verify_region(&sil
->sub_index
, next_block
, RL_KIND_INDEX
, 0);
1275 if (result
!= UDS_SUCCESS
)
1278 define_sub_index_nonce(layout
);
1280 sil
->volume
= table
->regions
[3];
1281 result
= verify_region(&sil
->volume
, next_block
, RL_KIND_VOLUME
,
1283 if (result
!= UDS_SUCCESS
)
1286 next_block
+= sil
->volume
.block_count
+ layout
->super
.volume_offset
;
1288 for (i
= 0; i
< layout
->super
.max_saves
; i
++) {
1289 sil
->saves
[i
].index_save
= table
->regions
[i
+ 4];
1290 result
= verify_region(&sil
->saves
[i
].index_save
, next_block
,
1292 if (result
!= UDS_SUCCESS
)
1295 next_block
+= sil
->saves
[i
].index_save
.block_count
;
1298 next_block
-= layout
->super
.volume_offset
;
1299 if (next_block
!= start_block
+ sil
->sub_index
.block_count
) {
1300 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1301 "sub index region does not span all saves");
1307 static int __must_check
reconstitute_layout(struct index_layout
*layout
,
1308 struct region_table
*table
, u64 first_block
)
1311 u64 next_block
= first_block
;
1313 result
= vdo_allocate(layout
->super
.max_saves
, struct index_save_layout
,
1314 __func__
, &layout
->index
.saves
);
1315 if (result
!= VDO_SUCCESS
)
1318 layout
->total_blocks
= table
->header
.region_blocks
;
1320 layout
->header
= table
->regions
[0];
1321 result
= verify_region(&layout
->header
, next_block
++, RL_KIND_HEADER
,
1323 if (result
!= UDS_SUCCESS
)
1326 layout
->config
= table
->regions
[1];
1327 result
= verify_region(&layout
->config
, next_block
++, RL_KIND_CONFIG
,
1329 if (result
!= UDS_SUCCESS
)
1332 result
= verify_sub_index(layout
, next_block
, table
);
1333 if (result
!= UDS_SUCCESS
)
1336 next_block
+= layout
->index
.sub_index
.block_count
;
1338 layout
->seal
= table
->regions
[table
->header
.region_count
- 1];
1339 result
= verify_region(&layout
->seal
, next_block
+ layout
->super
.volume_offset
,
1340 RL_KIND_SEAL
, RL_SOLE_INSTANCE
);
1341 if (result
!= UDS_SUCCESS
)
1344 if (++next_block
!= (first_block
+ layout
->total_blocks
)) {
1345 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1346 "layout table does not span total blocks");
1352 static int __must_check
load_super_block(struct index_layout
*layout
, size_t block_size
,
1353 u64 first_block
, struct buffered_reader
*reader
)
1356 struct region_table
*table
= NULL
;
1357 struct super_block_data
*super
= &layout
->super
;
1359 result
= load_region_table(reader
, &table
);
1360 if (result
!= UDS_SUCCESS
)
1363 if (table
->header
.type
!= RH_TYPE_SUPER
) {
1365 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1366 "not a superblock region table");
1369 result
= read_super_block_data(reader
, layout
, table
->header
.payload
);
1370 if (result
!= UDS_SUCCESS
) {
1372 return vdo_log_error_strerror(result
, "unknown superblock format");
1375 if (super
->block_size
!= block_size
) {
1377 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1378 "superblock saved block_size %u differs from supplied block_size %zu",
1379 super
->block_size
, block_size
);
1382 first_block
-= (super
->volume_offset
- super
->start_offset
);
1383 result
= reconstitute_layout(layout
, table
, first_block
);
1388 static int __must_check
read_index_save_data(struct buffered_reader
*reader
,
1389 struct index_save_layout
*isl
,
1393 struct index_state_version file_version
;
1394 u8 buffer
[sizeof(struct index_save_data
) + sizeof(struct index_state_data301
)];
1397 if (saved_size
!= sizeof(buffer
)) {
1398 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1399 "unexpected index save data size %zu",
1403 result
= uds_read_from_buffered_reader(reader
, buffer
, sizeof(buffer
));
1404 if (result
!= UDS_SUCCESS
)
1405 return vdo_log_error_strerror(result
, "cannot read index save data");
1407 decode_u64_le(buffer
, &offset
, &isl
->save_data
.timestamp
);
1408 decode_u64_le(buffer
, &offset
, &isl
->save_data
.nonce
);
1409 decode_u32_le(buffer
, &offset
, &isl
->save_data
.version
);
1410 offset
+= sizeof(u32
);
1412 if (isl
->save_data
.version
> 1) {
1413 return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION
,
1414 "unknown index save version number %u",
1415 isl
->save_data
.version
);
1418 decode_s32_le(buffer
, &offset
, &file_version
.signature
);
1419 decode_s32_le(buffer
, &offset
, &file_version
.version_id
);
1421 if ((file_version
.signature
!= INDEX_STATE_VERSION_301
.signature
) ||
1422 (file_version
.version_id
!= INDEX_STATE_VERSION_301
.version_id
)) {
1423 return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION
,
1424 "index state version %d,%d is unsupported",
1425 file_version
.signature
,
1426 file_version
.version_id
);
1429 decode_u64_le(buffer
, &offset
, &isl
->state_data
.newest_chapter
);
1430 decode_u64_le(buffer
, &offset
, &isl
->state_data
.oldest_chapter
);
1431 decode_u64_le(buffer
, &offset
, &isl
->state_data
.last_save
);
1432 /* Skip past some historical fields that are now unused */
1433 offset
+= sizeof(u32
) + sizeof(u32
);
1437 static int __must_check
reconstruct_index_save(struct index_save_layout
*isl
,
1438 struct region_table
*table
)
1442 struct layout_region
*last_region
;
1443 u64 next_block
= isl
->index_save
.start_block
;
1444 u64 last_block
= next_block
+ isl
->index_save
.block_count
;
1446 isl
->zone_count
= table
->header
.region_count
- 3;
1448 last_region
= &table
->regions
[table
->header
.region_count
- 1];
1449 if (last_region
->kind
== RL_KIND_EMPTY
) {
1450 isl
->free_space
= *last_region
;
1453 isl
->free_space
= (struct layout_region
) {
1454 .start_block
= last_block
,
1456 .kind
= RL_KIND_EMPTY
,
1457 .instance
= RL_SOLE_INSTANCE
,
1461 isl
->header
= table
->regions
[0];
1462 result
= verify_region(&isl
->header
, next_block
++, RL_KIND_HEADER
,
1464 if (result
!= UDS_SUCCESS
)
1467 isl
->index_page_map
= table
->regions
[1];
1468 result
= verify_region(&isl
->index_page_map
, next_block
, RL_KIND_INDEX_PAGE_MAP
,
1470 if (result
!= UDS_SUCCESS
)
1473 next_block
+= isl
->index_page_map
.block_count
;
1475 for (z
= 0; z
< isl
->zone_count
; z
++) {
1476 isl
->volume_index_zones
[z
] = table
->regions
[z
+ 2];
1477 result
= verify_region(&isl
->volume_index_zones
[z
], next_block
,
1478 RL_KIND_VOLUME_INDEX
, z
);
1479 if (result
!= UDS_SUCCESS
)
1482 next_block
+= isl
->volume_index_zones
[z
].block_count
;
1485 isl
->open_chapter
= table
->regions
[isl
->zone_count
+ 2];
1486 result
= verify_region(&isl
->open_chapter
, next_block
, RL_KIND_OPEN_CHAPTER
,
1488 if (result
!= UDS_SUCCESS
)
1491 next_block
+= isl
->open_chapter
.block_count
;
1493 result
= verify_region(&isl
->free_space
, next_block
, RL_KIND_EMPTY
,
1495 if (result
!= UDS_SUCCESS
)
1498 next_block
+= isl
->free_space
.block_count
;
1499 if (next_block
!= last_block
) {
1500 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1501 "index save layout table incomplete");
1507 static int __must_check
load_index_save(struct index_save_layout
*isl
,
1508 struct buffered_reader
*reader
,
1509 unsigned int instance
)
1512 struct region_table
*table
= NULL
;
1514 result
= load_region_table(reader
, &table
);
1515 if (result
!= UDS_SUCCESS
) {
1516 return vdo_log_error_strerror(result
, "cannot read index save %u header",
1520 if (table
->header
.region_blocks
!= isl
->index_save
.block_count
) {
1521 u64 region_blocks
= table
->header
.region_blocks
;
1524 return vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1525 "unexpected index save %u region block count %llu",
1527 (unsigned long long) region_blocks
);
1530 if (table
->header
.type
== RH_TYPE_UNSAVED
) {
1532 reset_index_save_layout(isl
, 0);
1537 if (table
->header
.type
!= RH_TYPE_SAVE
) {
1538 vdo_log_error_strerror(UDS_CORRUPT_DATA
,
1539 "unexpected index save %u header type %u",
1540 instance
, table
->header
.type
);
1542 return UDS_CORRUPT_DATA
;
1545 result
= read_index_save_data(reader
, isl
, table
->header
.payload
);
1546 if (result
!= UDS_SUCCESS
) {
1548 return vdo_log_error_strerror(result
,
1549 "unknown index save %u data format",
1553 result
= reconstruct_index_save(isl
, table
);
1555 if (result
!= UDS_SUCCESS
) {
1556 return vdo_log_error_strerror(result
, "cannot reconstruct index save %u",
1563 static int __must_check
load_sub_index_regions(struct index_layout
*layout
)
1567 struct index_save_layout
*isl
;
1568 struct buffered_reader
*reader
;
1570 for (j
= 0; j
< layout
->super
.max_saves
; j
++) {
1571 isl
= &layout
->index
.saves
[j
];
1572 result
= open_region_reader(layout
, &isl
->index_save
, &reader
);
1574 if (result
!= UDS_SUCCESS
) {
1575 vdo_log_error_strerror(result
,
1576 "cannot get reader for index 0 save %u",
1581 result
= load_index_save(isl
, reader
, j
);
1582 uds_free_buffered_reader(reader
);
1583 if (result
!= UDS_SUCCESS
) {
1584 /* Another save slot might be valid. */
1585 reset_index_save_layout(isl
, 0);
1593 static int __must_check
verify_uds_index_config(struct index_layout
*layout
,
1594 struct uds_configuration
*config
)
1597 struct buffered_reader
*reader
= NULL
;
1600 offset
= layout
->super
.volume_offset
- layout
->super
.start_offset
;
1601 result
= open_layout_reader(layout
, &layout
->config
, offset
, &reader
);
1602 if (result
!= UDS_SUCCESS
)
1603 return vdo_log_error_strerror(result
, "failed to open config reader");
1605 result
= uds_validate_config_contents(reader
, config
);
1606 if (result
!= UDS_SUCCESS
) {
1607 uds_free_buffered_reader(reader
);
1608 return vdo_log_error_strerror(result
, "failed to read config region");
1611 uds_free_buffered_reader(reader
);
1615 static int load_index_layout(struct index_layout
*layout
, struct uds_configuration
*config
)
1618 struct buffered_reader
*reader
;
1620 result
= uds_make_buffered_reader(layout
->factory
,
1621 layout
->offset
/ UDS_BLOCK_SIZE
, 1, &reader
);
1622 if (result
!= UDS_SUCCESS
)
1623 return vdo_log_error_strerror(result
, "unable to read superblock");
1625 result
= load_super_block(layout
, UDS_BLOCK_SIZE
,
1626 layout
->offset
/ UDS_BLOCK_SIZE
, reader
);
1627 uds_free_buffered_reader(reader
);
1628 if (result
!= UDS_SUCCESS
)
1631 result
= verify_uds_index_config(layout
, config
);
1632 if (result
!= UDS_SUCCESS
)
1635 return load_sub_index_regions(layout
);
1638 static int create_layout_factory(struct index_layout
*layout
,
1639 const struct uds_configuration
*config
)
1642 size_t writable_size
;
1643 struct io_factory
*factory
= NULL
;
1645 result
= uds_make_io_factory(config
->bdev
, &factory
);
1646 if (result
!= UDS_SUCCESS
)
1649 writable_size
= uds_get_writable_size(factory
) & -UDS_BLOCK_SIZE
;
1650 if (writable_size
< config
->size
+ config
->offset
) {
1651 uds_put_io_factory(factory
);
1652 vdo_log_error("index storage (%zu) is smaller than the requested size %zu",
1653 writable_size
, config
->size
+ config
->offset
);
1657 layout
->factory
= factory
;
1658 layout
->factory_size
= (config
->size
> 0) ? config
->size
: writable_size
;
1659 layout
->offset
= config
->offset
;
1663 int uds_make_index_layout(struct uds_configuration
*config
, bool new_layout
,
1664 struct index_layout
**layout_ptr
)
1667 struct index_layout
*layout
= NULL
;
1668 struct save_layout_sizes sizes
;
1670 result
= compute_sizes(config
, &sizes
);
1671 if (result
!= UDS_SUCCESS
)
1674 result
= vdo_allocate(1, struct index_layout
, __func__
, &layout
);
1675 if (result
!= VDO_SUCCESS
)
1678 result
= create_layout_factory(layout
, config
);
1679 if (result
!= UDS_SUCCESS
) {
1680 uds_free_index_layout(layout
);
1684 if (layout
->factory_size
< sizes
.total_size
) {
1685 vdo_log_error("index storage (%zu) is smaller than the required size %llu",
1686 layout
->factory_size
,
1687 (unsigned long long) sizes
.total_size
);
1688 uds_free_index_layout(layout
);
1693 result
= create_index_layout(layout
, config
);
1695 result
= load_index_layout(layout
, config
);
1696 if (result
!= UDS_SUCCESS
) {
1697 uds_free_index_layout(layout
);
1701 *layout_ptr
= layout
;
1705 void uds_free_index_layout(struct index_layout
*layout
)
1710 vdo_free(layout
->index
.saves
);
1711 if (layout
->factory
!= NULL
)
1712 uds_put_io_factory(layout
->factory
);
1717 int uds_replace_index_layout_storage(struct index_layout
*layout
,
1718 struct block_device
*bdev
)
1720 return uds_replace_storage(layout
->factory
, bdev
);
1723 /* Obtain a dm_bufio_client for the volume region. */
1724 int uds_open_volume_bufio(struct index_layout
*layout
, size_t block_size
,
1725 unsigned int reserved_buffers
,
1726 struct dm_bufio_client
**client_ptr
)
1728 off_t offset
= (layout
->index
.volume
.start_block
+
1729 layout
->super
.volume_offset
-
1730 layout
->super
.start_offset
);
1732 return uds_make_bufio(layout
->factory
, offset
, block_size
, reserved_buffers
,
1736 u64
uds_get_volume_nonce(struct index_layout
*layout
)
1738 return layout
->index
.nonce
;