2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
23 #include <stdio_ext.h>
25 #include <sys/types.h>
29 #include <uuid/uuid.h>
33 #include "radix-tree.h"
35 #include "extent-cache.h"
38 #include "transaction.h"
44 struct recover_control
{
52 u64 chunk_root_generation
;
54 struct btrfs_fs_devices
*fs_devices
;
56 struct cache_tree chunk
;
57 struct block_group_tree bg
;
58 struct device_extent_tree devext
;
59 struct cache_tree eb_cache
;
61 struct list_head good_chunks
;
62 struct list_head bad_chunks
;
63 struct list_head rebuild_chunks
;
64 struct list_head unrepaired_chunks
;
65 pthread_mutex_t rc_lock
;
68 struct extent_record
{
69 struct cache_extent cache
;
71 u8 csum
[BTRFS_CSUM_SIZE
];
72 struct btrfs_device
*devices
[BTRFS_MAX_MIRRORS
];
73 u64 offsets
[BTRFS_MAX_MIRRORS
];
78 struct recover_control
*rc
;
79 struct btrfs_device
*dev
;
84 static struct extent_record
*btrfs_new_extent_record(struct extent_buffer
*eb
)
86 struct extent_record
*rec
;
88 rec
= calloc(1, sizeof(*rec
));
90 fprintf(stderr
, "Fail to allocate memory for extent record.\n");
94 rec
->cache
.start
= btrfs_header_bytenr(eb
);
95 rec
->cache
.size
= eb
->len
;
96 rec
->generation
= btrfs_header_generation(eb
);
97 read_extent_buffer(eb
, rec
->csum
, (unsigned long)btrfs_header_csum(eb
),
102 static int process_extent_buffer(struct cache_tree
*eb_cache
,
103 struct extent_buffer
*eb
,
104 struct btrfs_device
*device
, u64 offset
)
106 struct extent_record
*rec
;
107 struct extent_record
*exist
;
108 struct cache_extent
*cache
;
111 rec
= btrfs_new_extent_record(eb
);
112 if (!rec
->cache
.size
)
115 cache
= lookup_cache_extent(eb_cache
,
119 exist
= container_of(cache
, struct extent_record
, cache
);
121 if (exist
->generation
> rec
->generation
)
123 if (exist
->generation
== rec
->generation
) {
124 if (exist
->cache
.start
!= rec
->cache
.start
||
125 exist
->cache
.size
!= rec
->cache
.size
||
126 memcmp(exist
->csum
, rec
->csum
, BTRFS_CSUM_SIZE
)) {
129 BUG_ON(exist
->nmirrors
>= BTRFS_MAX_MIRRORS
);
130 exist
->devices
[exist
->nmirrors
] = device
;
131 exist
->offsets
[exist
->nmirrors
] = offset
;
136 remove_cache_extent(eb_cache
, cache
);
141 rec
->devices
[0] = device
;
142 rec
->offsets
[0] = offset
;
144 ret
= insert_cache_extent(eb_cache
, &rec
->cache
);
153 static void free_extent_record(struct cache_extent
*cache
)
155 struct extent_record
*er
;
157 er
= container_of(cache
, struct extent_record
, cache
);
161 FREE_EXTENT_CACHE_BASED_TREE(extent_record
, free_extent_record
);
163 static struct btrfs_chunk
*create_chunk_item(struct chunk_record
*record
)
165 struct btrfs_chunk
*ret
;
166 struct btrfs_stripe
*chunk_stripe
;
169 if (!record
|| record
->num_stripes
== 0)
171 ret
= malloc(btrfs_chunk_item_size(record
->num_stripes
));
174 btrfs_set_stack_chunk_length(ret
, record
->length
);
175 btrfs_set_stack_chunk_owner(ret
, record
->owner
);
176 btrfs_set_stack_chunk_stripe_len(ret
, record
->stripe_len
);
177 btrfs_set_stack_chunk_type(ret
, record
->type_flags
);
178 btrfs_set_stack_chunk_io_align(ret
, record
->io_align
);
179 btrfs_set_stack_chunk_io_width(ret
, record
->io_width
);
180 btrfs_set_stack_chunk_sector_size(ret
, record
->sector_size
);
181 btrfs_set_stack_chunk_num_stripes(ret
, record
->num_stripes
);
182 btrfs_set_stack_chunk_sub_stripes(ret
, record
->sub_stripes
);
183 for (i
= 0, chunk_stripe
= &ret
->stripe
; i
< record
->num_stripes
;
184 i
++, chunk_stripe
++) {
185 btrfs_set_stack_stripe_devid(chunk_stripe
,
186 record
->stripes
[i
].devid
);
187 btrfs_set_stack_stripe_offset(chunk_stripe
,
188 record
->stripes
[i
].offset
);
189 memcpy(chunk_stripe
->dev_uuid
, record
->stripes
[i
].dev_uuid
,
195 static void init_recover_control(struct recover_control
*rc
, int verbose
,
198 memset(rc
, 0, sizeof(struct recover_control
));
199 cache_tree_init(&rc
->chunk
);
200 cache_tree_init(&rc
->eb_cache
);
201 block_group_tree_init(&rc
->bg
);
202 device_extent_tree_init(&rc
->devext
);
204 INIT_LIST_HEAD(&rc
->good_chunks
);
205 INIT_LIST_HEAD(&rc
->bad_chunks
);
206 INIT_LIST_HEAD(&rc
->rebuild_chunks
);
207 INIT_LIST_HEAD(&rc
->unrepaired_chunks
);
209 rc
->verbose
= verbose
;
211 pthread_mutex_init(&rc
->rc_lock
, NULL
);
214 static void free_recover_control(struct recover_control
*rc
)
216 free_block_group_tree(&rc
->bg
);
217 free_chunk_cache_tree(&rc
->chunk
);
218 free_device_extent_tree(&rc
->devext
);
219 free_extent_record_tree(&rc
->eb_cache
);
220 pthread_mutex_destroy(&rc
->rc_lock
);
223 static int process_block_group_item(struct block_group_tree
*bg_cache
,
224 struct extent_buffer
*leaf
,
225 struct btrfs_key
*key
, int slot
)
227 struct block_group_record
*rec
;
228 struct block_group_record
*exist
;
229 struct cache_extent
*cache
;
232 rec
= btrfs_new_block_group_record(leaf
, key
, slot
);
233 if (!rec
->cache
.size
)
236 cache
= lookup_cache_extent(&bg_cache
->tree
,
240 exist
= container_of(cache
, struct block_group_record
, cache
);
242 /*check the generation and replace if needed*/
243 if (exist
->generation
> rec
->generation
)
245 if (exist
->generation
== rec
->generation
) {
246 int offset
= offsetof(struct block_group_record
,
249 * According to the current kernel code, the following
250 * case is impossible, or there is something wrong in
253 if (memcmp(((void *)exist
) + offset
,
254 ((void *)rec
) + offset
,
255 sizeof(*rec
) - offset
))
259 remove_cache_extent(&bg_cache
->tree
, cache
);
260 list_del_init(&exist
->list
);
263 * We must do search again to avoid the following cache.
264 * /--old bg 1--//--old bg 2--/
270 ret
= insert_block_group_record(bg_cache
, rec
);
279 static int process_chunk_item(struct cache_tree
*chunk_cache
,
280 struct extent_buffer
*leaf
, struct btrfs_key
*key
,
283 struct chunk_record
*rec
;
284 struct chunk_record
*exist
;
285 struct cache_extent
*cache
;
288 rec
= btrfs_new_chunk_record(leaf
, key
, slot
);
289 if (!rec
->cache
.size
)
292 cache
= lookup_cache_extent(chunk_cache
, rec
->offset
, rec
->length
);
294 exist
= container_of(cache
, struct chunk_record
, cache
);
296 if (exist
->generation
> rec
->generation
)
298 if (exist
->generation
== rec
->generation
) {
299 int num_stripes
= rec
->num_stripes
;
300 int rec_size
= btrfs_chunk_record_size(num_stripes
);
301 int offset
= offsetof(struct chunk_record
, generation
);
303 if (exist
->num_stripes
!= rec
->num_stripes
||
304 memcmp(((void *)exist
) + offset
,
305 ((void *)rec
) + offset
,
310 remove_cache_extent(chunk_cache
, cache
);
314 ret
= insert_cache_extent(chunk_cache
, &rec
->cache
);
323 static int process_device_extent_item(struct device_extent_tree
*devext_cache
,
324 struct extent_buffer
*leaf
,
325 struct btrfs_key
*key
, int slot
)
327 struct device_extent_record
*rec
;
328 struct device_extent_record
*exist
;
329 struct cache_extent
*cache
;
332 rec
= btrfs_new_device_extent_record(leaf
, key
, slot
);
333 if (!rec
->cache
.size
)
336 cache
= lookup_cache_extent2(&devext_cache
->tree
,
341 exist
= container_of(cache
, struct device_extent_record
, cache
);
342 if (exist
->generation
> rec
->generation
)
344 if (exist
->generation
== rec
->generation
) {
345 int offset
= offsetof(struct device_extent_record
,
347 if (memcmp(((void *)exist
) + offset
,
348 ((void *)rec
) + offset
,
349 sizeof(*rec
) - offset
))
353 remove_cache_extent(&devext_cache
->tree
, cache
);
354 list_del_init(&exist
->chunk_list
);
355 list_del_init(&exist
->device_list
);
360 ret
= insert_device_extent_record(devext_cache
, rec
);
369 static void print_block_group_info(struct block_group_record
*rec
, char *prefix
)
372 printf("%s", prefix
);
373 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
374 rec
->objectid
, rec
->offset
, rec
->flags
);
377 static void print_block_group_tree(struct block_group_tree
*tree
)
379 struct cache_extent
*cache
;
380 struct block_group_record
*rec
;
382 printf("All Block Groups:\n");
383 for (cache
= first_cache_extent(&tree
->tree
); cache
;
384 cache
= next_cache_extent(cache
)) {
385 rec
= container_of(cache
, struct block_group_record
, cache
);
386 print_block_group_info(rec
, "\t");
391 static void print_stripe_info(struct stripe
*data
, char *prefix1
, char *prefix2
,
395 printf("%s", prefix1
);
397 printf("%s", prefix2
);
398 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
399 index
, data
->devid
, data
->offset
);
402 static void print_chunk_self_info(struct chunk_record
*rec
, char *prefix
)
407 printf("%s", prefix
);
408 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
409 rec
->offset
, rec
->length
, rec
->type_flags
, rec
->num_stripes
);
411 printf("%s", prefix
);
412 printf(" Stripes list:\n");
413 for (i
= 0; i
< rec
->num_stripes
; i
++)
414 print_stripe_info(&rec
->stripes
[i
], prefix
, " ", i
);
417 static void print_chunk_tree(struct cache_tree
*tree
)
419 struct cache_extent
*n
;
420 struct chunk_record
*entry
;
422 printf("All Chunks:\n");
423 for (n
= first_cache_extent(tree
); n
;
424 n
= next_cache_extent(n
)) {
425 entry
= container_of(n
, struct chunk_record
, cache
);
426 print_chunk_self_info(entry
, "\t");
431 static void print_device_extent_info(struct device_extent_record
*rec
,
435 printf("%s", prefix
);
436 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
437 rec
->objectid
, rec
->offset
, rec
->length
, rec
->chunk_offset
);
440 static void print_device_extent_tree(struct device_extent_tree
*tree
)
442 struct cache_extent
*n
;
443 struct device_extent_record
*entry
;
445 printf("All Device Extents:\n");
446 for (n
= first_cache_extent(&tree
->tree
); n
;
447 n
= next_cache_extent(n
)) {
448 entry
= container_of(n
, struct device_extent_record
, cache
);
449 print_device_extent_info(entry
, "\t");
454 static void print_device_info(struct btrfs_device
*device
, char *prefix
)
457 printf("%s", prefix
);
458 printf("Device: id = %llu, name = %s\n",
459 device
->devid
, device
->name
);
462 static void print_all_devices(struct list_head
*devices
)
464 struct btrfs_device
*dev
;
466 printf("All Devices:\n");
467 list_for_each_entry(dev
, devices
, dev_list
)
468 print_device_info(dev
, "\t");
472 static void print_scan_result(struct recover_control
*rc
)
477 printf("DEVICE SCAN RESULT:\n");
478 printf("Filesystem Information:\n");
479 printf("\tsectorsize: %d\n", rc
->sectorsize
);
480 printf("\tnodesize: %d\n", rc
->nodesize
);
481 printf("\ttree root generation: %llu\n", rc
->generation
);
482 printf("\tchunk root generation: %llu\n", rc
->chunk_root_generation
);
485 print_all_devices(&rc
->fs_devices
->devices
);
486 print_block_group_tree(&rc
->bg
);
487 print_chunk_tree(&rc
->chunk
);
488 print_device_extent_tree(&rc
->devext
);
491 static void print_chunk_info(struct chunk_record
*chunk
, char *prefix
)
493 struct device_extent_record
*devext
;
496 print_chunk_self_info(chunk
, prefix
);
498 printf("%s", prefix
);
500 print_block_group_info(chunk
->bg_rec
, " ");
502 printf(" No block group.\n");
504 printf("%s", prefix
);
505 if (list_empty(&chunk
->dextents
)) {
506 printf(" No device extent.\n");
508 printf(" Device extent list:\n");
510 list_for_each_entry(devext
, &chunk
->dextents
, chunk_list
) {
512 printf("%s", prefix
);
513 printf("%s[%2d]", " ", i
);
514 print_device_extent_info(devext
, NULL
);
520 static void print_check_result(struct recover_control
*rc
)
522 struct chunk_record
*chunk
;
523 struct block_group_record
*bg
;
524 struct device_extent_record
*devext
;
532 printf("CHECK RESULT:\n");
533 printf("Recoverable Chunks:\n");
534 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
535 print_chunk_info(chunk
, " ");
539 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
540 print_chunk_info(chunk
, " ");
544 list_for_each_entry(chunk
, &rc
->unrepaired_chunks
, list
) {
545 print_chunk_info(chunk
, " ");
549 printf("Unrecoverable Chunks:\n");
550 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
551 print_chunk_info(chunk
, " ");
556 printf("Total Chunks:\t\t%d\n", total
);
557 printf(" Recoverable:\t\t%d\n", good
);
558 printf(" Unrecoverable:\t%d\n", bad
);
561 printf("Orphan Block Groups:\n");
562 list_for_each_entry(bg
, &rc
->bg
.block_groups
, list
)
563 print_block_group_info(bg
, " ");
566 printf("Orphan Device Extents:\n");
567 list_for_each_entry(devext
, &rc
->devext
.no_chunk_orphans
, chunk_list
)
568 print_device_extent_info(devext
, " ");
572 static int check_chunk_by_metadata(struct recover_control
*rc
,
573 struct btrfs_root
*root
,
574 struct chunk_record
*chunk
, int bg_only
)
579 struct btrfs_path path
;
580 struct btrfs_key key
;
581 struct btrfs_root
*dev_root
;
582 struct stripe
*stripe
;
583 struct btrfs_dev_extent
*dev_extent
;
584 struct btrfs_block_group_item
*bg_ptr
;
585 struct extent_buffer
*l
;
587 btrfs_init_path(&path
);
592 dev_root
= root
->fs_info
->dev_root
;
593 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
594 stripe
= &chunk
->stripes
[i
];
596 key
.objectid
= stripe
->devid
;
597 key
.offset
= stripe
->offset
;
598 key
.type
= BTRFS_DEV_EXTENT_KEY
;
600 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, &path
, 0, 0);
602 fprintf(stderr
, "Search device extent failed(%d)\n",
604 btrfs_release_path(&path
);
606 } else if (ret
> 0) {
609 "No device extent[%llu, %llu]\n",
610 stripe
->devid
, stripe
->offset
);
611 btrfs_release_path(&path
);
615 slot
= path
.slots
[0];
616 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
618 btrfs_dev_extent_chunk_offset(l
, dev_extent
)) {
621 "Device tree mismatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
622 btrfs_dev_extent_chunk_offset(l
,
624 btrfs_dev_extent_length(l
, dev_extent
),
625 chunk
->offset
, chunk
->length
);
626 btrfs_release_path(&path
);
629 btrfs_release_path(&path
);
633 key
.objectid
= chunk
->offset
;
634 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
635 key
.offset
= chunk
->length
;
637 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, &path
,
640 fprintf(stderr
, "Search block group failed(%d)\n", ret
);
641 btrfs_release_path(&path
);
643 } else if (ret
> 0) {
645 fprintf(stderr
, "No block group[%llu, %llu]\n",
646 key
.objectid
, key
.offset
);
647 btrfs_release_path(&path
);
652 slot
= path
.slots
[0];
653 bg_ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_block_group_item
);
654 if (chunk
->type_flags
!= btrfs_disk_block_group_flags(l
, bg_ptr
)) {
657 "Chunk[%llu, %llu]'s type(%llu) is different with Block Group's type(%llu)\n",
658 chunk
->offset
, chunk
->length
, chunk
->type_flags
,
659 btrfs_disk_block_group_flags(l
, bg_ptr
));
660 btrfs_release_path(&path
);
663 btrfs_release_path(&path
);
667 static int check_all_chunks_by_metadata(struct recover_control
*rc
,
668 struct btrfs_root
*root
)
670 struct chunk_record
*chunk
;
671 struct chunk_record
*next
;
672 LIST_HEAD(orphan_chunks
);
676 list_for_each_entry_safe(chunk
, next
, &rc
->good_chunks
, list
) {
677 err
= check_chunk_by_metadata(rc
, root
, chunk
, 0);
680 list_move_tail(&chunk
->list
, &orphan_chunks
);
681 else if (err
&& !ret
)
686 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
687 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
689 list_move_tail(&chunk
->list
, &orphan_chunks
);
690 else if (err
&& !ret
)
694 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
695 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
696 if (err
!= -ENOENT
&& !ret
)
697 ret
= err
? err
: -EINVAL
;
699 list_splice(&orphan_chunks
, &rc
->bad_chunks
);
703 static int extract_metadata_record(struct recover_control
*rc
,
704 struct extent_buffer
*leaf
)
706 struct btrfs_key key
;
711 nritems
= btrfs_header_nritems(leaf
);
712 for (i
= 0; i
< nritems
; i
++) {
713 btrfs_item_key_to_cpu(leaf
, &key
, i
);
715 case BTRFS_BLOCK_GROUP_ITEM_KEY
:
716 pthread_mutex_lock(&rc
->rc_lock
);
717 ret
= process_block_group_item(&rc
->bg
, leaf
, &key
, i
);
718 pthread_mutex_unlock(&rc
->rc_lock
);
720 case BTRFS_CHUNK_ITEM_KEY
:
721 pthread_mutex_lock(&rc
->rc_lock
);
722 ret
= process_chunk_item(&rc
->chunk
, leaf
, &key
, i
);
723 pthread_mutex_unlock(&rc
->rc_lock
);
725 case BTRFS_DEV_EXTENT_KEY
:
726 pthread_mutex_lock(&rc
->rc_lock
);
727 ret
= process_device_extent_item(&rc
->devext
, leaf
,
729 pthread_mutex_unlock(&rc
->rc_lock
);
738 static inline int is_super_block_address(u64 offset
)
742 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
743 if (offset
== btrfs_sb_offset(i
))
749 static int scan_one_device(void *dev_scan_struct
)
751 struct extent_buffer
*buf
;
754 struct device_scan
*dev_scan
= (struct device_scan
*)dev_scan_struct
;
755 struct recover_control
*rc
= dev_scan
->rc
;
756 struct btrfs_device
*device
= dev_scan
->dev
;
757 int fd
= dev_scan
->fd
;
760 ret
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &oldtype
);
764 buf
= malloc(sizeof(*buf
) + rc
->nodesize
);
767 buf
->len
= rc
->nodesize
;
771 dev_scan
->bytenr
= bytenr
;
773 if (is_super_block_address(bytenr
))
774 bytenr
+= rc
->sectorsize
;
776 if (pread64(fd
, buf
->data
, rc
->nodesize
, bytenr
) <
780 if (memcmp_extent_buffer(buf
, rc
->fs_devices
->fsid
,
783 bytenr
+= rc
->sectorsize
;
787 if (verify_tree_block_csum_silent(buf
, rc
->csum_size
)) {
788 bytenr
+= rc
->sectorsize
;
792 pthread_mutex_lock(&rc
->rc_lock
);
793 ret
= process_extent_buffer(&rc
->eb_cache
, buf
, device
, bytenr
);
794 pthread_mutex_unlock(&rc
->rc_lock
);
798 if (btrfs_header_level(buf
) != 0)
801 switch (btrfs_header_owner(buf
)) {
802 case BTRFS_EXTENT_TREE_OBJECTID
:
803 case BTRFS_DEV_TREE_OBJECTID
:
804 /* different tree use different generation */
805 if (btrfs_header_generation(buf
) > rc
->generation
)
807 ret
= extract_metadata_record(rc
, buf
);
811 case BTRFS_CHUNK_TREE_OBJECTID
:
812 if (btrfs_header_generation(buf
) >
813 rc
->chunk_root_generation
)
815 ret
= extract_metadata_record(rc
, buf
);
821 bytenr
+= rc
->nodesize
;
829 static int scan_devices(struct recover_control
*rc
)
833 struct btrfs_device
*dev
;
834 struct device_scan
*dev_scans
;
842 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
)
844 dev_scans
= (struct device_scan
*)malloc(sizeof(struct device_scan
)
848 t_scans
= (pthread_t
*)malloc(sizeof(pthread_t
) * devnr
);
853 t_rets
= (long *)malloc(sizeof(long) * devnr
);
860 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
861 fd
= open(dev
->name
, O_RDONLY
);
863 fprintf(stderr
, "Failed to open device %s\n",
868 dev_scans
[devidx
].rc
= rc
;
869 dev_scans
[devidx
].dev
= dev
;
870 dev_scans
[devidx
].fd
= fd
;
871 dev_scans
[devidx
].bytenr
= -1;
875 for (i
= 0; i
< devidx
; i
++) {
876 ret
= pthread_create(&t_scans
[i
], NULL
,
877 (void *)scan_one_device
,
878 (void *)&dev_scans
[i
]);
882 dev_scans
[i
].bytenr
= 0;
887 for (i
= 0; i
< devidx
; i
++) {
888 if (dev_scans
[i
].bytenr
== -1)
890 ret
= pthread_tryjoin_np(t_scans
[i
],
891 (void **)&t_rets
[i
]);
896 if (ret
|| t_rets
[i
]) {
900 dev_scans
[i
].bytenr
= -1;
903 printf("\rScanning: ");
904 for (i
= 0; i
< devidx
; i
++) {
905 if (dev_scans
[i
].bytenr
== -1)
906 printf("%sDONE in dev%d",
909 printf("%s%llu in dev%d",
910 i
? ", " : "", dev_scans
[i
].bytenr
, i
);
912 /* clear chars if exist in tail */
914 printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b");
925 for (i
= 0; i
< devidx
; i
++) {
926 if (dev_scans
[i
].bytenr
== -1)
928 pthread_cancel(t_scans
[i
]);
937 static int build_device_map_by_chunk_record(struct btrfs_root
*root
,
938 struct chunk_record
*chunk
)
943 u8 uuid
[BTRFS_UUID_SIZE
];
945 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
946 struct btrfs_mapping_tree
*map_tree
;
947 struct map_lookup
*map
;
948 struct stripe
*stripe
;
950 map_tree
= &fs_info
->mapping_tree
;
951 num_stripes
= chunk
->num_stripes
;
952 map
= malloc(btrfs_map_lookup_size(num_stripes
));
955 map
->ce
.start
= chunk
->offset
;
956 map
->ce
.size
= chunk
->length
;
957 map
->num_stripes
= num_stripes
;
958 map
->io_width
= chunk
->io_width
;
959 map
->io_align
= chunk
->io_align
;
960 map
->sector_size
= chunk
->sector_size
;
961 map
->stripe_len
= chunk
->stripe_len
;
962 map
->type
= chunk
->type_flags
;
963 map
->sub_stripes
= chunk
->sub_stripes
;
965 for (i
= 0, stripe
= chunk
->stripes
; i
< num_stripes
; i
++, stripe
++) {
966 devid
= stripe
->devid
;
967 memcpy(uuid
, stripe
->dev_uuid
, BTRFS_UUID_SIZE
);
968 map
->stripes
[i
].physical
= stripe
->offset
;
969 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
,
971 if (!map
->stripes
[i
].dev
) {
977 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
981 static int build_device_maps_by_chunk_records(struct recover_control
*rc
,
982 struct btrfs_root
*root
)
985 struct chunk_record
*chunk
;
987 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
988 ret
= build_device_map_by_chunk_record(root
, chunk
);
992 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
993 ret
= build_device_map_by_chunk_record(root
, chunk
);
1000 static int block_group_remove_all_extent_items(struct btrfs_trans_handle
*trans
,
1001 struct btrfs_root
*root
,
1002 struct block_group_record
*bg
)
1004 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1005 struct btrfs_key key
;
1006 struct btrfs_path path
;
1007 struct extent_buffer
*leaf
;
1008 u64 start
= bg
->objectid
;
1009 u64 end
= bg
->objectid
+ bg
->offset
;
1016 btrfs_init_path(&path
);
1017 root
= root
->fs_info
->extent_root
;
1019 key
.objectid
= start
;
1021 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1023 ret
= btrfs_search_slot(trans
, root
, &key
, &path
, -1, 1);
1029 leaf
= path
.nodes
[0];
1030 nitems
= btrfs_header_nritems(leaf
);
1032 /* The tree is empty. */
1037 if (path
.slots
[0] >= nitems
) {
1038 ret
= btrfs_next_leaf(root
, &path
);
1045 leaf
= path
.nodes
[0];
1046 btrfs_item_key_to_cpu(leaf
, &key
, 0);
1047 if (key
.objectid
>= end
)
1049 btrfs_release_path(&path
);
1055 for (i
= path
.slots
[0]; i
< nitems
; i
++) {
1056 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1057 if (key
.objectid
>= end
)
1060 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1070 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1071 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
1072 old_val
= btrfs_super_bytes_used(fs_info
->super_copy
);
1073 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1074 old_val
+= fs_info
->nodesize
;
1076 old_val
+= key
.offset
;
1077 btrfs_set_super_bytes_used(fs_info
->super_copy
,
1083 ret
= btrfs_del_items(trans
, root
, &path
, del_s
, del_nr
);
1088 if (key
.objectid
< end
) {
1089 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1090 key
.objectid
+= fs_info
->sectorsize
;
1091 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1094 btrfs_release_path(&path
);
1098 btrfs_release_path(&path
);
1102 static int block_group_free_all_extent(struct btrfs_root
*root
,
1103 struct block_group_record
*bg
)
1105 struct btrfs_block_group_cache
*cache
;
1106 struct btrfs_fs_info
*info
;
1110 info
= root
->fs_info
;
1111 cache
= btrfs_lookup_block_group(info
, bg
->objectid
);
1115 start
= cache
->key
.objectid
;
1116 end
= start
+ cache
->key
.offset
- 1;
1118 set_extent_bits(&info
->block_group_cache
, start
, end
,
1120 set_extent_dirty(&info
->free_space_cache
, start
, end
);
1122 btrfs_set_block_group_used(&cache
->item
, 0);
1127 static int remove_chunk_extent_item(struct btrfs_trans_handle
*trans
,
1128 struct recover_control
*rc
,
1129 struct btrfs_root
*root
)
1131 struct chunk_record
*chunk
;
1134 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
1135 if (!(chunk
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1137 ret
= block_group_remove_all_extent_items(trans
, root
,
1142 ret
= block_group_free_all_extent(root
, chunk
->bg_rec
);
1149 static int __rebuild_chunk_root(struct btrfs_trans_handle
*trans
,
1150 struct recover_control
*rc
,
1151 struct btrfs_root
*root
)
1154 struct btrfs_device
*dev
;
1155 struct extent_buffer
*cow
;
1156 struct btrfs_disk_key disk_key
;
1159 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1160 if (min_devid
> dev
->devid
)
1161 min_devid
= dev
->devid
;
1163 btrfs_set_disk_key_objectid(&disk_key
, BTRFS_DEV_ITEMS_OBJECTID
);
1164 btrfs_set_disk_key_type(&disk_key
, BTRFS_DEV_ITEM_KEY
);
1165 btrfs_set_disk_key_offset(&disk_key
, min_devid
);
1167 cow
= btrfs_alloc_free_block(trans
, root
, root
->fs_info
->nodesize
,
1168 BTRFS_CHUNK_TREE_OBJECTID
,
1169 &disk_key
, 0, 0, 0);
1170 btrfs_set_header_bytenr(cow
, cow
->start
);
1171 btrfs_set_header_generation(cow
, trans
->transid
);
1172 btrfs_set_header_nritems(cow
, 0);
1173 btrfs_set_header_level(cow
, 0);
1174 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1175 btrfs_set_header_owner(cow
, BTRFS_CHUNK_TREE_OBJECTID
);
1176 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1177 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
1179 write_extent_buffer(cow
, root
->fs_info
->chunk_tree_uuid
,
1180 btrfs_header_chunk_tree_uuid(cow
),
1184 btrfs_mark_buffer_dirty(cow
);
1189 static int __rebuild_device_items(struct btrfs_trans_handle
*trans
,
1190 struct recover_control
*rc
,
1191 struct btrfs_root
*root
)
1193 struct btrfs_device
*dev
;
1194 struct btrfs_key key
;
1195 struct btrfs_dev_item dev_item_tmp
;
1196 struct btrfs_dev_item
*dev_item
= &dev_item_tmp
;
1199 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1200 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1201 key
.type
= BTRFS_DEV_ITEM_KEY
;
1202 key
.offset
= dev
->devid
;
1204 btrfs_set_stack_device_generation(dev_item
, 0);
1205 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1206 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1207 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1208 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1209 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1210 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1211 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1212 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1213 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1215 ret
= btrfs_insert_item(trans
, root
, &key
,
1216 dev_item
, sizeof(*dev_item
));
1222 static int __insert_chunk_item(struct btrfs_trans_handle
*trans
,
1223 struct chunk_record
*chunk_rec
,
1224 struct btrfs_root
*chunk_root
)
1226 struct btrfs_key key
;
1227 struct btrfs_chunk
*chunk
= NULL
;
1230 chunk
= create_chunk_item(chunk_rec
);
1233 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1234 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1235 key
.offset
= chunk_rec
->offset
;
1237 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1238 btrfs_chunk_item_size(chunk_rec
->num_stripes
));
1243 static int __rebuild_chunk_items(struct btrfs_trans_handle
*trans
,
1244 struct recover_control
*rc
,
1245 struct btrfs_root
*root
)
1247 struct btrfs_root
*chunk_root
;
1248 struct chunk_record
*chunk_rec
;
1251 chunk_root
= root
->fs_info
->chunk_root
;
1253 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1254 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1258 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1259 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1266 static int rebuild_chunk_tree(struct btrfs_trans_handle
*trans
,
1267 struct recover_control
*rc
,
1268 struct btrfs_root
*root
)
1272 root
= root
->fs_info
->chunk_root
;
1274 ret
= __rebuild_chunk_root(trans
, rc
, root
);
1278 ret
= __rebuild_device_items(trans
, rc
, root
);
1282 ret
= __rebuild_chunk_items(trans
, rc
, root
);
1287 static int rebuild_sys_array(struct recover_control
*rc
,
1288 struct btrfs_root
*root
)
1290 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1291 struct btrfs_chunk
*chunk
;
1292 struct btrfs_key key
;
1293 struct chunk_record
*chunk_rec
;
1297 btrfs_set_super_sys_array_size(fs_info
->super_copy
, 0);
1299 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1300 if (!(chunk_rec
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1303 num_stripes
= chunk_rec
->num_stripes
;
1304 chunk
= create_chunk_item(chunk_rec
);
1310 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1311 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1312 key
.offset
= chunk_rec
->offset
;
1314 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
,
1315 btrfs_chunk_item_size(num_stripes
));
1324 static int calculate_bg_used(struct btrfs_root
*extent_root
,
1325 struct chunk_record
*chunk_rec
,
1326 struct btrfs_path
*path
,
1329 struct extent_buffer
*node
;
1330 struct btrfs_key found_key
;
1336 node
= path
->nodes
[0];
1337 slot
= path
->slots
[0];
1338 btrfs_item_key_to_cpu(node
, &found_key
, slot
);
1339 if (found_key
.objectid
>= chunk_rec
->offset
+ chunk_rec
->length
)
1341 if (found_key
.type
!= BTRFS_METADATA_ITEM_KEY
&&
1342 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1344 if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
1345 used_ret
+= extent_root
->fs_info
->nodesize
;
1347 used_ret
+= found_key
.offset
;
1349 if (slot
+ 1 < btrfs_header_nritems(node
)) {
1352 ret
= btrfs_next_leaf(extent_root
, path
);
1366 static int __insert_block_group(struct btrfs_trans_handle
*trans
,
1367 struct chunk_record
*chunk_rec
,
1368 struct btrfs_root
*extent_root
,
1371 struct btrfs_block_group_item bg_item
;
1372 struct btrfs_key key
;
1375 btrfs_set_block_group_used(&bg_item
, used
);
1376 btrfs_set_block_group_chunk_objectid(&bg_item
, used
);
1377 btrfs_set_block_group_flags(&bg_item
, chunk_rec
->type_flags
);
1378 key
.objectid
= chunk_rec
->offset
;
1379 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
1380 key
.offset
= chunk_rec
->length
;
1382 ret
= btrfs_insert_item(trans
, extent_root
, &key
, &bg_item
,
1388 * Search through the extent tree to rebuild the 'used' member of the block
1390 * However, since block group and extent item shares the extent tree,
1391 * the extent item may also missing.
1392 * In that case, we fill the 'used' with the length of the block group to
1393 * ensure no write into the block group.
1394 * Btrfsck will hate it but we will inform user to call '--init-extent-tree'
1395 * if possible, or just salvage as much data as possible from the fs.
1397 static int rebuild_block_group(struct btrfs_trans_handle
*trans
,
1398 struct recover_control
*rc
,
1399 struct btrfs_root
*root
)
1401 struct chunk_record
*chunk_rec
;
1402 struct btrfs_key search_key
;
1403 struct btrfs_path path
;
1407 if (list_empty(&rc
->rebuild_chunks
))
1410 btrfs_init_path(&path
);
1411 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1412 search_key
.objectid
= chunk_rec
->offset
;
1413 search_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1414 search_key
.offset
= 0;
1415 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
,
1416 &search_key
, &path
, 0, 0);
1419 ret
= calculate_bg_used(root
->fs_info
->extent_root
,
1420 chunk_rec
, &path
, &used
);
1422 * Extent tree is damaged, better to rebuild the whole extent
1423 * tree. Currently, change the used to chunk's len to prevent
1424 * write/block reserve happening in that block group.
1428 "Fail to search extent tree for block group: [%llu,%llu]\n",
1430 chunk_rec
->offset
+ chunk_rec
->length
);
1432 "Mark the block group full to prevent block rsv problems\n");
1433 used
= chunk_rec
->length
;
1435 btrfs_release_path(&path
);
1436 ret
= __insert_block_group(trans
, chunk_rec
,
1437 root
->fs_info
->extent_root
,
1443 btrfs_release_path(&path
);
1447 static struct btrfs_root
*
1448 open_ctree_with_broken_chunk(struct recover_control
*rc
)
1450 struct btrfs_fs_info
*fs_info
;
1451 struct btrfs_super_block
*disk_super
;
1452 struct extent_buffer
*eb
;
1455 fs_info
= btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET
);
1457 fprintf(stderr
, "Failed to allocate memory for fs_info\n");
1458 return ERR_PTR(-ENOMEM
);
1460 fs_info
->is_chunk_recover
= 1;
1462 fs_info
->fs_devices
= rc
->fs_devices
;
1463 ret
= btrfs_open_devices(fs_info
->fs_devices
, O_RDWR
);
1467 disk_super
= fs_info
->super_copy
;
1468 ret
= btrfs_read_dev_super(fs_info
->fs_devices
->latest_bdev
,
1469 disk_super
, fs_info
->super_bytenr
,
1472 fprintf(stderr
, "No valid btrfs found\n");
1476 memcpy(fs_info
->fsid
, &disk_super
->fsid
, BTRFS_FSID_SIZE
);
1477 fs_info
->sectorsize
= btrfs_super_sectorsize(disk_super
);
1478 fs_info
->nodesize
= btrfs_super_nodesize(disk_super
);
1479 fs_info
->stripesize
= btrfs_super_stripesize(disk_super
);
1481 ret
= btrfs_check_fs_compatibility(disk_super
, OPEN_CTREE_WRITES
);
1485 btrfs_setup_root(fs_info
->chunk_root
, fs_info
,
1486 BTRFS_CHUNK_TREE_OBJECTID
);
1488 ret
= build_device_maps_by_chunk_records(rc
, fs_info
->chunk_root
);
1492 ret
= btrfs_setup_all_roots(fs_info
, 0, 0);
1496 eb
= fs_info
->tree_root
->node
;
1497 read_extent_buffer(eb
, fs_info
->chunk_tree_uuid
,
1498 btrfs_header_chunk_tree_uuid(eb
),
1501 return fs_info
->fs_root
;
1503 btrfs_release_all_roots(fs_info
);
1505 btrfs_cleanup_all_caches(fs_info
);
1507 btrfs_close_devices(fs_info
->fs_devices
);
1509 btrfs_free_fs_info(fs_info
);
1510 return ERR_PTR(ret
);
1513 static int recover_prepare(struct recover_control
*rc
, char *path
)
1517 struct btrfs_super_block
*sb
;
1518 char buf
[BTRFS_SUPER_INFO_SIZE
];
1519 struct btrfs_fs_devices
*fs_devices
;
1522 fd
= open(path
, O_RDONLY
);
1524 fprintf(stderr
, "open %s\n error.\n", path
);
1528 sb
= (struct btrfs_super_block
*)buf
;
1529 ret
= btrfs_read_dev_super(fd
, sb
, BTRFS_SUPER_INFO_OFFSET
,
1532 fprintf(stderr
, "read super block error\n");
1536 rc
->sectorsize
= btrfs_super_sectorsize(sb
);
1537 rc
->nodesize
= btrfs_super_nodesize(sb
);
1538 rc
->generation
= btrfs_super_generation(sb
);
1539 rc
->chunk_root_generation
= btrfs_super_chunk_root_generation(sb
);
1540 rc
->csum_size
= btrfs_super_csum_size(sb
);
1542 /* if seed, the result of scanning below will be partial */
1543 if (btrfs_super_flags(sb
) & BTRFS_SUPER_FLAG_SEEDING
) {
1544 fprintf(stderr
, "this device is seed device\n");
1549 ret
= btrfs_scan_fs_devices(fd
, path
, &fs_devices
, 0, SBREAD_RECOVER
, 0);
1553 rc
->fs_devices
= fs_devices
;
1556 print_all_devices(&rc
->fs_devices
->devices
);
1563 static int btrfs_get_device_extents(u64 chunk_object
,
1564 struct list_head
*orphan_devexts
,
1565 struct list_head
*ret_list
)
1567 struct device_extent_record
*devext
;
1568 struct device_extent_record
*next
;
1571 list_for_each_entry_safe(devext
, next
, orphan_devexts
, chunk_list
) {
1572 if (devext
->chunk_offset
== chunk_object
) {
1573 list_move_tail(&devext
->chunk_list
, ret_list
);
1580 static int calc_num_stripes(u64 type
)
1582 if (type
& (BTRFS_BLOCK_GROUP_RAID0
|
1583 BTRFS_BLOCK_GROUP_RAID10
|
1584 BTRFS_BLOCK_GROUP_RAID5
|
1585 BTRFS_BLOCK_GROUP_RAID6
))
1587 else if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
1588 BTRFS_BLOCK_GROUP_DUP
))
1594 static inline int calc_sub_nstripes(u64 type
)
1596 if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1602 static int btrfs_verify_device_extents(struct block_group_record
*bg
,
1603 struct list_head
*devexts
, int ndevexts
)
1605 struct device_extent_record
*devext
;
1607 int expected_num_stripes
;
1609 expected_num_stripes
= calc_num_stripes(bg
->flags
);
1610 if (expected_num_stripes
&& expected_num_stripes
!= ndevexts
)
1613 if (check_num_stripes(bg
->flags
, ndevexts
) < 0)
1616 stripe_length
= calc_stripe_length(bg
->flags
, bg
->offset
, ndevexts
);
1617 list_for_each_entry(devext
, devexts
, chunk_list
) {
1618 if (devext
->length
!= stripe_length
)
1624 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control
*rc
,
1625 struct chunk_record
*chunk
)
1627 struct device_extent_record
*devext
;
1628 struct btrfs_device
*device
;
1631 devext
= list_first_entry(&chunk
->dextents
, struct device_extent_record
,
1633 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1634 chunk
->stripes
[i
].devid
= devext
->objectid
;
1635 chunk
->stripes
[i
].offset
= devext
->offset
;
1636 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1641 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1644 memcpy(chunk
->stripes
[i
].dev_uuid
, device
->uuid
,
1646 devext
= list_next_entry(devext
, chunk_list
);
1651 static int btrfs_calc_stripe_index(struct chunk_record
*chunk
, u64 logical
)
1653 u64 offset
= logical
- chunk
->offset
;
1655 int nr_data_stripes
;
1658 stripe_nr
= offset
/ chunk
->stripe_len
;
1659 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID0
) {
1660 index
= stripe_nr
% chunk
->num_stripes
;
1661 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID10
) {
1662 index
= stripe_nr
% (chunk
->num_stripes
/ chunk
->sub_stripes
);
1663 index
*= chunk
->sub_stripes
;
1664 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
) {
1665 nr_data_stripes
= chunk
->num_stripes
- 1;
1666 index
= stripe_nr
% nr_data_stripes
;
1667 stripe_nr
/= nr_data_stripes
;
1668 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1669 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
) {
1670 nr_data_stripes
= chunk
->num_stripes
- 2;
1671 index
= stripe_nr
% nr_data_stripes
;
1672 stripe_nr
/= nr_data_stripes
;
1673 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1680 /* calc the logical offset which is the start of the next stripe. */
1681 static inline u64
btrfs_next_stripe_logical_offset(struct chunk_record
*chunk
,
1684 u64 offset
= logical
- chunk
->offset
;
1686 offset
/= chunk
->stripe_len
;
1687 offset
*= chunk
->stripe_len
;
1688 offset
+= chunk
->stripe_len
;
1690 return offset
+ chunk
->offset
;
1693 static int is_extent_record_in_device_extent(struct extent_record
*er
,
1694 struct device_extent_record
*dext
,
1699 for (i
= 0; i
< er
->nmirrors
; i
++) {
1700 if (er
->devices
[i
]->devid
== dext
->objectid
&&
1701 er
->offsets
[i
] >= dext
->offset
&&
1702 er
->offsets
[i
] < dext
->offset
+ dext
->length
) {
1711 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control
*rc
,
1712 struct chunk_record
*chunk
)
1714 u64 start
= chunk
->offset
;
1715 u64 end
= chunk
->offset
+ chunk
->length
;
1716 struct cache_extent
*cache
;
1717 struct extent_record
*er
;
1718 struct device_extent_record
*devext
;
1719 struct device_extent_record
*next
;
1720 struct btrfs_device
*device
;
1726 cache
= lookup_cache_extent(&rc
->eb_cache
,
1727 start
, chunk
->length
);
1729 /* No used space, we can reorder the stripes freely. */
1730 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1734 list_splice_init(&chunk
->dextents
, &devexts
);
1736 er
= container_of(cache
, struct extent_record
, cache
);
1737 index
= btrfs_calc_stripe_index(chunk
, er
->cache
.start
);
1738 BUG_ON(index
== -1);
1739 if (chunk
->stripes
[index
].devid
)
1741 list_for_each_entry_safe(devext
, next
, &devexts
, chunk_list
) {
1742 if (is_extent_record_in_device_extent(er
, devext
, &mirror
)) {
1743 chunk
->stripes
[index
].devid
= devext
->objectid
;
1744 chunk
->stripes
[index
].offset
= devext
->offset
;
1745 memcpy(chunk
->stripes
[index
].dev_uuid
,
1746 er
->devices
[mirror
]->uuid
,
1749 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1753 start
= btrfs_next_stripe_logical_offset(chunk
, er
->cache
.start
);
1755 goto no_extent_record
;
1757 cache
= lookup_cache_extent(&rc
->eb_cache
, start
, end
- start
);
1761 if (list_empty(&devexts
))
1764 if (chunk
->type_flags
& (BTRFS_BLOCK_GROUP_RAID5
|
1765 BTRFS_BLOCK_GROUP_RAID6
)) {
1766 /* Fixme: try to recover the order by the parity block. */
1767 list_splice_tail(&devexts
, &chunk
->dextents
);
1771 /* There is no data on the lost stripes, we can reorder them freely. */
1772 for (index
= 0; index
< chunk
->num_stripes
; index
++) {
1773 if (chunk
->stripes
[index
].devid
)
1776 devext
= list_first_entry(&devexts
,
1777 struct device_extent_record
,
1779 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1781 chunk
->stripes
[index
].devid
= devext
->objectid
;
1782 chunk
->stripes
[index
].offset
= devext
->offset
;
1783 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1787 list_splice_tail(&devexts
, &chunk
->dextents
);
1790 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1793 memcpy(chunk
->stripes
[index
].dev_uuid
, device
->uuid
,
1799 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1800 BTRFS_BLOCK_GROUP_RAID10 | \
1801 BTRFS_BLOCK_GROUP_RAID5 | \
1802 BTRFS_BLOCK_GROUP_RAID6)
1804 static int btrfs_rebuild_chunk_stripes(struct recover_control
*rc
,
1805 struct chunk_record
*chunk
)
1810 * All the data in the system metadata chunk will be dropped,
1811 * so we need not guarantee that the data is right or not, that
1812 * is we can reorder the stripes in the system metadata chunk.
1814 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1815 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1816 ret
=btrfs_rebuild_ordered_meta_chunk_stripes(rc
, chunk
);
1817 else if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
) &&
1818 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1819 ret
= 1; /* Be handled after the fs is opened. */
1821 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1826 static int next_csum(struct btrfs_root
*root
,
1827 struct extent_buffer
**leaf
,
1828 struct btrfs_path
*path
,
1833 struct btrfs_key
*key
)
1836 struct btrfs_root
*csum_root
= root
->fs_info
->csum_root
;
1837 struct btrfs_csum_item
*csum_item
;
1838 u32 blocksize
= root
->fs_info
->sectorsize
;
1839 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1840 int csums_in_item
= btrfs_item_size_nr(*leaf
, *slot
) / csum_size
;
1842 if (*csum_offset
>= csums_in_item
) {
1845 if (*slot
>= btrfs_header_nritems(*leaf
)) {
1846 ret
= btrfs_next_leaf(csum_root
, path
);
1851 *leaf
= path
->nodes
[0];
1852 *slot
= path
->slots
[0];
1854 btrfs_item_key_to_cpu(*leaf
, key
, *slot
);
1857 if (key
->offset
+ (*csum_offset
) * blocksize
>= end
)
1859 csum_item
= btrfs_item_ptr(*leaf
, *slot
, struct btrfs_csum_item
);
1860 csum_item
= (struct btrfs_csum_item
*)((unsigned char *)csum_item
1861 + (*csum_offset
) * csum_size
);
1862 read_extent_buffer(*leaf
, tree_csum
,
1863 (unsigned long)csum_item
, csum_size
);
1867 static u64
calc_data_offset(struct btrfs_key
*key
,
1868 struct chunk_record
*chunk
,
1874 int logical_stripe_nr
;
1876 int nr_data_stripes
;
1878 data_offset
= key
->offset
+ csum_offset
* blocksize
- chunk
->offset
;
1879 nr_data_stripes
= chunk
->num_stripes
;
1881 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
)
1882 nr_data_stripes
-= 1;
1883 else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
)
1884 nr_data_stripes
-= 2;
1886 logical_stripe_nr
= data_offset
/ chunk
->stripe_len
;
1887 dev_stripe_nr
= logical_stripe_nr
/ nr_data_stripes
;
1889 data_offset
-= logical_stripe_nr
* chunk
->stripe_len
;
1890 data_offset
+= dev_stripe_nr
* chunk
->stripe_len
;
1892 return dev_offset
+ data_offset
;
1895 static int check_one_csum(int fd
, u64 start
, u32 len
, u32 tree_csum
)
1899 u32 csum_result
= ~(u32
)0;
1904 ret
= pread64(fd
, data
, len
, start
);
1905 if (ret
< 0 || ret
!= len
) {
1910 csum_result
= btrfs_csum_data(data
, csum_result
, len
);
1911 btrfs_csum_final(csum_result
, (u8
*)&csum_result
);
1912 if (csum_result
!= tree_csum
)
1919 static u64
item_end_offset(struct btrfs_root
*root
, struct btrfs_key
*key
,
1920 struct extent_buffer
*leaf
, int slot
) {
1921 u32 blocksize
= root
->fs_info
->sectorsize
;
1922 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1924 u64 offset
= btrfs_item_size_nr(leaf
, slot
);
1925 offset
/= csum_size
;
1926 offset
*= blocksize
;
1927 offset
+= key
->offset
;
1932 static int insert_stripe(struct list_head
*devexts
,
1933 struct recover_control
*rc
,
1934 struct chunk_record
*chunk
,
1936 struct device_extent_record
*devext
;
1937 struct btrfs_device
*dev
;
1939 devext
= list_entry(devexts
->next
, struct device_extent_record
,
1941 dev
= btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1945 if (btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
, 1)) {
1946 error("unexpected: found another device with id %llu",
1947 (unsigned long long)devext
->objectid
);
1951 chunk
->stripes
[index
].devid
= devext
->objectid
;
1952 chunk
->stripes
[index
].offset
= devext
->offset
;
1953 memcpy(chunk
->stripes
[index
].dev_uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1955 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1960 static inline int count_devext_records(struct list_head
*record_list
)
1962 int num_of_records
= 0;
1963 struct device_extent_record
*devext
;
1965 list_for_each_entry(devext
, record_list
, chunk_list
)
1968 return num_of_records
;
1971 static int fill_chunk_up(struct chunk_record
*chunk
, struct list_head
*devexts
,
1972 struct recover_control
*rc
)
1977 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1978 if (!chunk
->stripes
[i
].devid
) {
1979 ret
= insert_stripe(devexts
, rc
, chunk
, i
);
1988 #define EQUAL_STRIPE (1 << 0)
1990 static int rebuild_raid_data_chunk_stripes(struct recover_control
*rc
,
1991 struct btrfs_root
*root
,
1992 struct chunk_record
*chunk
,
1998 struct btrfs_path path
;
1999 struct btrfs_key prev_key
;
2000 struct btrfs_key key
;
2001 struct btrfs_root
*csum_root
;
2002 struct extent_buffer
*leaf
;
2003 struct device_extent_record
*devext
;
2004 struct device_extent_record
*next
;
2005 struct btrfs_device
*dev
;
2006 u64 start
= chunk
->offset
;
2007 u64 end
= start
+ chunk
->stripe_len
;
2008 u64 chunk_end
= chunk
->offset
+ chunk
->length
;
2009 u64 csum_offset
= 0;
2011 u32 blocksize
= root
->fs_info
->sectorsize
;
2014 int num_unordered
= 0;
2015 LIST_HEAD(unordered
);
2016 LIST_HEAD(candidates
);
2018 csum_root
= root
->fs_info
->csum_root
;
2019 btrfs_init_path(&path
);
2020 list_splice_init(&chunk
->dextents
, &candidates
);
2022 if (list_is_last(candidates
.next
, &candidates
))
2025 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2026 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
2029 ret
= btrfs_search_slot(NULL
, csum_root
, &key
, &path
, 0, 0);
2031 fprintf(stderr
, "Search csum failed(%d)\n", ret
);
2034 leaf
= path
.nodes
[0];
2035 slot
= path
.slots
[0];
2037 if (slot
>= btrfs_header_nritems(leaf
)) {
2038 ret
= btrfs_next_leaf(csum_root
, &path
);
2041 "Walk tree failed(%d)\n", ret
);
2043 } else if (ret
> 0) {
2044 slot
= btrfs_header_nritems(leaf
) - 1;
2045 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2046 if (item_end_offset(root
, &key
, leaf
, slot
)
2048 csum_offset
= start
- key
.offset
;
2049 csum_offset
/= blocksize
;
2054 leaf
= path
.nodes
[0];
2055 slot
= path
.slots
[0];
2057 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2058 ret
= btrfs_previous_item(csum_root
, &path
, 0,
2059 BTRFS_EXTENT_CSUM_KEY
);
2063 if (key
.offset
>= end
)
2068 leaf
= path
.nodes
[0];
2069 slot
= path
.slots
[0];
2071 btrfs_item_key_to_cpu(leaf
, &prev_key
, slot
);
2072 if (item_end_offset(root
, &prev_key
, leaf
, slot
) > start
) {
2073 csum_offset
= start
- prev_key
.offset
;
2074 csum_offset
/= blocksize
;
2075 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2077 if (key
.offset
>= end
)
2081 if (key
.offset
+ csum_offset
* blocksize
> chunk_end
)
2085 ret
= next_csum(root
, &leaf
, &path
, &slot
, &csum_offset
, &tree_csum
,
2088 fprintf(stderr
, "Fetch csum failed\n");
2090 } else if (ret
== 1) {
2091 if (!(*flags
& EQUAL_STRIPE
))
2092 *flags
|= EQUAL_STRIPE
;
2094 } else if (ret
== 2)
2097 list_for_each_entry_safe(devext
, next
, &candidates
, chunk_list
) {
2098 data_offset
= calc_data_offset(&key
, chunk
, devext
->offset
,
2099 csum_offset
, blocksize
);
2100 dev
= btrfs_find_device_by_devid(rc
->fs_devices
,
2101 devext
->objectid
, 0);
2106 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
2107 devext
->objectid
, 1));
2109 ret
= check_one_csum(dev
->fd
, data_offset
, blocksize
,
2114 list_move(&devext
->chunk_list
, &unordered
);
2117 if (list_empty(&candidates
)) {
2118 num_unordered
= count_devext_records(&unordered
);
2119 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
2120 && num_unordered
== 2) {
2121 btrfs_release_path(&path
);
2122 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2129 if (list_is_last(candidates
.next
, &candidates
)) {
2130 index
= btrfs_calc_stripe_index(chunk
,
2131 key
.offset
+ csum_offset
* blocksize
);
2132 BUG_ON(index
== -1);
2133 if (chunk
->stripes
[index
].devid
)
2135 ret
= insert_stripe(&candidates
, rc
, chunk
, index
);
2143 start
= btrfs_next_stripe_logical_offset(chunk
, start
);
2144 end
= min(start
+ chunk
->stripe_len
, chunk_end
);
2145 list_splice_init(&unordered
, &candidates
);
2146 btrfs_release_path(&path
);
2148 if (end
< chunk_end
)
2152 list_splice_init(&candidates
, &unordered
);
2153 num_unordered
= count_devext_records(&unordered
);
2154 if (num_unordered
== 1) {
2155 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
2156 if (!chunk
->stripes
[i
].devid
) {
2161 ret
= insert_stripe(&unordered
, rc
, chunk
, index
);
2165 if ((num_unordered
== 2 && chunk
->type_flags
2166 & BTRFS_BLOCK_GROUP_RAID5
)
2167 || (num_unordered
== 3 && chunk
->type_flags
2168 & BTRFS_BLOCK_GROUP_RAID6
)) {
2169 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2173 ret
= !!ret
|| (list_empty(&unordered
) ? 0 : 1);
2174 list_splice_init(&candidates
, &chunk
->dextents
);
2175 list_splice_init(&unordered
, &chunk
->dextents
);
2176 btrfs_release_path(&path
);
2181 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control
*rc
,
2182 struct btrfs_root
*root
)
2184 struct chunk_record
*chunk
;
2185 struct chunk_record
*next
;
2190 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
2191 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
)
2192 && (chunk
->type_flags
& BTRFS_ORDERED_RAID
)) {
2194 err
= rebuild_raid_data_chunk_stripes(rc
, root
, chunk
,
2197 list_move(&chunk
->list
, &rc
->bad_chunks
);
2198 if (flags
& EQUAL_STRIPE
)
2200 "Failure: too many equal stripes in chunk[%llu %llu]\n",
2201 chunk
->offset
, chunk
->length
);
2205 list_move(&chunk
->list
, &rc
->good_chunks
);
2211 static int btrfs_recover_chunks(struct recover_control
*rc
)
2213 struct chunk_record
*chunk
;
2214 struct block_group_record
*bg
;
2215 struct block_group_record
*next
;
2216 LIST_HEAD(new_chunks
);
2221 /* create the chunk by block group */
2222 list_for_each_entry_safe(bg
, next
, &rc
->bg
.block_groups
, list
) {
2223 nstripes
= btrfs_get_device_extents(bg
->objectid
,
2224 &rc
->devext
.no_chunk_orphans
,
2226 chunk
= calloc(1, btrfs_chunk_record_size(nstripes
));
2229 INIT_LIST_HEAD(&chunk
->dextents
);
2231 chunk
->cache
.start
= bg
->objectid
;
2232 chunk
->cache
.size
= bg
->offset
;
2233 chunk
->objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2234 chunk
->type
= BTRFS_CHUNK_ITEM_KEY
;
2235 chunk
->offset
= bg
->objectid
;
2236 chunk
->generation
= bg
->generation
;
2237 chunk
->length
= bg
->offset
;
2238 chunk
->owner
= BTRFS_CHUNK_TREE_OBJECTID
;
2239 chunk
->stripe_len
= BTRFS_STRIPE_LEN
;
2240 chunk
->type_flags
= bg
->flags
;
2241 chunk
->io_width
= BTRFS_STRIPE_LEN
;
2242 chunk
->io_align
= BTRFS_STRIPE_LEN
;
2243 chunk
->sector_size
= rc
->sectorsize
;
2244 chunk
->sub_stripes
= calc_sub_nstripes(bg
->flags
);
2246 ret
= insert_cache_extent(&rc
->chunk
, &chunk
->cache
);
2247 if (ret
== -EEXIST
) {
2248 error("duplicate entry in cache start %llu size %llu",
2249 (unsigned long long)chunk
->cache
.start
,
2250 (unsigned long long)chunk
->cache
.size
);
2256 list_del_init(&bg
->list
);
2258 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2262 list_splice_init(&devexts
, &chunk
->dextents
);
2264 ret
= btrfs_verify_device_extents(bg
, &devexts
, nstripes
);
2266 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2270 chunk
->num_stripes
= nstripes
;
2271 ret
= btrfs_rebuild_chunk_stripes(rc
, chunk
);
2273 list_add_tail(&chunk
->list
, &rc
->unrepaired_chunks
);
2275 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2277 list_add_tail(&chunk
->list
, &rc
->good_chunks
);
2280 * Don't worry about the lost orphan device extents, they don't
2281 * have its chunk and block group, they must be the old ones that
2287 static inline int is_chunk_overlap(struct chunk_record
*chunk1
,
2288 struct chunk_record
*chunk2
)
2290 if (chunk1
->offset
>= chunk2
->offset
+ chunk2
->length
||
2291 chunk1
->offset
+ chunk1
->length
<= chunk2
->offset
)
2296 /* Move invalid(overlap with good chunks) rebuild chunks to bad chunk list */
2297 static void validate_rebuild_chunks(struct recover_control
*rc
)
2299 struct chunk_record
*good
;
2300 struct chunk_record
*rebuild
;
2301 struct chunk_record
*tmp
;
2303 list_for_each_entry_safe(rebuild
, tmp
, &rc
->rebuild_chunks
, list
) {
2304 list_for_each_entry(good
, &rc
->good_chunks
, list
) {
2305 if (is_chunk_overlap(rebuild
, good
)) {
2306 list_move_tail(&rebuild
->list
,
2315 * Return 0 when successful, < 0 on error and > 0 if aborted by user
2317 int btrfs_recover_chunk_tree(char *path
, int verbose
, int yes
)
2320 struct btrfs_root
*root
= NULL
;
2321 struct btrfs_trans_handle
*trans
;
2322 struct recover_control rc
;
2324 init_recover_control(&rc
, verbose
, yes
);
2326 ret
= recover_prepare(&rc
, path
);
2328 fprintf(stderr
, "recover prepare error\n");
2332 ret
= scan_devices(&rc
);
2334 fprintf(stderr
, "scan chunk headers error\n");
2338 if (cache_tree_empty(&rc
.chunk
) &&
2339 cache_tree_empty(&rc
.bg
.tree
) &&
2340 cache_tree_empty(&rc
.devext
.tree
)) {
2341 fprintf(stderr
, "no recoverable chunk\n");
2345 print_scan_result(&rc
);
2347 ret
= check_chunks(&rc
.chunk
, &rc
.bg
, &rc
.devext
, &rc
.good_chunks
,
2348 &rc
.bad_chunks
, &rc
.rebuild_chunks
, 1);
2350 if (!list_empty(&rc
.bg
.block_groups
) ||
2351 !list_empty(&rc
.devext
.no_chunk_orphans
)) {
2352 ret
= btrfs_recover_chunks(&rc
);
2357 print_check_result(&rc
);
2358 printf("Check chunks successfully with no orphans\n");
2361 validate_rebuild_chunks(&rc
);
2362 print_check_result(&rc
);
2364 root
= open_ctree_with_broken_chunk(&rc
);
2366 fprintf(stderr
, "open with broken chunk error\n");
2367 ret
= PTR_ERR(root
);
2371 ret
= check_all_chunks_by_metadata(&rc
, root
);
2373 fprintf(stderr
, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2374 goto fail_close_ctree
;
2377 ret
= btrfs_rebuild_ordered_data_chunk_stripes(&rc
, root
);
2379 fprintf(stderr
, "Failed to rebuild ordered chunk stripes.\n");
2380 goto fail_close_ctree
;
2384 ret
= ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2387 goto fail_close_ctree
;
2391 trans
= btrfs_start_transaction(root
, 1);
2392 BUG_ON(IS_ERR(trans
));
2393 ret
= remove_chunk_extent_item(trans
, &rc
, root
);
2396 ret
= rebuild_chunk_tree(trans
, &rc
, root
);
2399 ret
= rebuild_sys_array(&rc
, root
);
2402 ret
= rebuild_block_group(trans
, &rc
, root
);
2404 printf("Fail to rebuild block groups.\n");
2405 printf("Recommend to run 'btrfs check --init-extent-tree <dev>' after recovery\n");
2408 btrfs_commit_transaction(trans
, root
);
2412 free_recover_control(&rc
);