2 * Copyright (C) 2013 FUJITSU LIMITED. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
23 #include <stdio_ext.h>
25 #include <sys/types.h>
29 #include <uuid/uuid.h>
33 #include "radix-tree.h"
35 #include "extent-cache.h"
38 #include "transaction.h"
44 struct recover_control
{
52 u64 chunk_root_generation
;
54 struct btrfs_fs_devices
*fs_devices
;
56 struct cache_tree chunk
;
57 struct block_group_tree bg
;
58 struct device_extent_tree devext
;
59 struct cache_tree eb_cache
;
61 struct list_head good_chunks
;
62 struct list_head bad_chunks
;
63 struct list_head rebuild_chunks
;
64 struct list_head unrepaired_chunks
;
65 pthread_mutex_t rc_lock
;
68 struct extent_record
{
69 struct cache_extent cache
;
71 u8 csum
[BTRFS_CSUM_SIZE
];
72 struct btrfs_device
*devices
[BTRFS_MAX_MIRRORS
];
73 u64 offsets
[BTRFS_MAX_MIRRORS
];
78 struct recover_control
*rc
;
79 struct btrfs_device
*dev
;
84 static struct extent_record
*btrfs_new_extent_record(struct extent_buffer
*eb
)
86 struct extent_record
*rec
;
88 rec
= calloc(1, sizeof(*rec
));
90 fprintf(stderr
, "Fail to allocate memory for extent record.\n");
94 rec
->cache
.start
= btrfs_header_bytenr(eb
);
95 rec
->cache
.size
= eb
->len
;
96 rec
->generation
= btrfs_header_generation(eb
);
97 read_extent_buffer(eb
, rec
->csum
, (unsigned long)btrfs_header_csum(eb
),
102 static int process_extent_buffer(struct cache_tree
*eb_cache
,
103 struct extent_buffer
*eb
,
104 struct btrfs_device
*device
, u64 offset
)
106 struct extent_record
*rec
;
107 struct extent_record
*exist
;
108 struct cache_extent
*cache
;
111 rec
= btrfs_new_extent_record(eb
);
112 if (!rec
->cache
.size
)
115 cache
= lookup_cache_extent(eb_cache
,
119 exist
= container_of(cache
, struct extent_record
, cache
);
121 if (exist
->generation
> rec
->generation
)
123 if (exist
->generation
== rec
->generation
) {
124 if (exist
->cache
.start
!= rec
->cache
.start
||
125 exist
->cache
.size
!= rec
->cache
.size
||
126 memcmp(exist
->csum
, rec
->csum
, BTRFS_CSUM_SIZE
)) {
129 BUG_ON(exist
->nmirrors
>= BTRFS_MAX_MIRRORS
);
130 exist
->devices
[exist
->nmirrors
] = device
;
131 exist
->offsets
[exist
->nmirrors
] = offset
;
136 remove_cache_extent(eb_cache
, cache
);
141 rec
->devices
[0] = device
;
142 rec
->offsets
[0] = offset
;
144 ret
= insert_cache_extent(eb_cache
, &rec
->cache
);
153 static void free_extent_record(struct cache_extent
*cache
)
155 struct extent_record
*er
;
157 er
= container_of(cache
, struct extent_record
, cache
);
161 FREE_EXTENT_CACHE_BASED_TREE(extent_record
, free_extent_record
);
163 static struct btrfs_chunk
*create_chunk_item(struct chunk_record
*record
)
165 struct btrfs_chunk
*ret
;
166 struct btrfs_stripe
*chunk_stripe
;
169 if (!record
|| record
->num_stripes
== 0)
171 ret
= malloc(btrfs_chunk_item_size(record
->num_stripes
));
174 btrfs_set_stack_chunk_length(ret
, record
->length
);
175 btrfs_set_stack_chunk_owner(ret
, record
->owner
);
176 btrfs_set_stack_chunk_stripe_len(ret
, record
->stripe_len
);
177 btrfs_set_stack_chunk_type(ret
, record
->type_flags
);
178 btrfs_set_stack_chunk_io_align(ret
, record
->io_align
);
179 btrfs_set_stack_chunk_io_width(ret
, record
->io_width
);
180 btrfs_set_stack_chunk_sector_size(ret
, record
->sector_size
);
181 btrfs_set_stack_chunk_num_stripes(ret
, record
->num_stripes
);
182 btrfs_set_stack_chunk_sub_stripes(ret
, record
->sub_stripes
);
183 for (i
= 0, chunk_stripe
= &ret
->stripe
; i
< record
->num_stripes
;
184 i
++, chunk_stripe
++) {
185 btrfs_set_stack_stripe_devid(chunk_stripe
,
186 record
->stripes
[i
].devid
);
187 btrfs_set_stack_stripe_offset(chunk_stripe
,
188 record
->stripes
[i
].offset
);
189 memcpy(chunk_stripe
->dev_uuid
, record
->stripes
[i
].dev_uuid
,
195 static void init_recover_control(struct recover_control
*rc
, int verbose
,
198 memset(rc
, 0, sizeof(struct recover_control
));
199 cache_tree_init(&rc
->chunk
);
200 cache_tree_init(&rc
->eb_cache
);
201 block_group_tree_init(&rc
->bg
);
202 device_extent_tree_init(&rc
->devext
);
204 INIT_LIST_HEAD(&rc
->good_chunks
);
205 INIT_LIST_HEAD(&rc
->bad_chunks
);
206 INIT_LIST_HEAD(&rc
->rebuild_chunks
);
207 INIT_LIST_HEAD(&rc
->unrepaired_chunks
);
209 rc
->verbose
= verbose
;
211 pthread_mutex_init(&rc
->rc_lock
, NULL
);
214 static void free_recover_control(struct recover_control
*rc
)
216 free_block_group_tree(&rc
->bg
);
217 free_chunk_cache_tree(&rc
->chunk
);
218 free_device_extent_tree(&rc
->devext
);
219 free_extent_record_tree(&rc
->eb_cache
);
220 pthread_mutex_destroy(&rc
->rc_lock
);
223 static int process_block_group_item(struct block_group_tree
*bg_cache
,
224 struct extent_buffer
*leaf
,
225 struct btrfs_key
*key
, int slot
)
227 struct block_group_record
*rec
;
228 struct block_group_record
*exist
;
229 struct cache_extent
*cache
;
232 rec
= btrfs_new_block_group_record(leaf
, key
, slot
);
233 if (!rec
->cache
.size
)
236 cache
= lookup_cache_extent(&bg_cache
->tree
,
240 exist
= container_of(cache
, struct block_group_record
, cache
);
242 /*check the generation and replace if needed*/
243 if (exist
->generation
> rec
->generation
)
245 if (exist
->generation
== rec
->generation
) {
246 int offset
= offsetof(struct block_group_record
,
249 * According to the current kernel code, the following
250 * case is impossible, or there is something wrong in
253 if (memcmp(((void *)exist
) + offset
,
254 ((void *)rec
) + offset
,
255 sizeof(*rec
) - offset
))
259 remove_cache_extent(&bg_cache
->tree
, cache
);
260 list_del_init(&exist
->list
);
263 * We must do search again to avoid the following cache.
264 * /--old bg 1--//--old bg 2--/
270 ret
= insert_block_group_record(bg_cache
, rec
);
279 static int process_chunk_item(struct cache_tree
*chunk_cache
,
280 struct extent_buffer
*leaf
, struct btrfs_key
*key
,
283 struct chunk_record
*rec
;
284 struct chunk_record
*exist
;
285 struct cache_extent
*cache
;
288 rec
= btrfs_new_chunk_record(leaf
, key
, slot
);
289 if (!rec
->cache
.size
)
292 cache
= lookup_cache_extent(chunk_cache
, rec
->offset
, rec
->length
);
294 exist
= container_of(cache
, struct chunk_record
, cache
);
296 if (exist
->generation
> rec
->generation
)
298 if (exist
->generation
== rec
->generation
) {
299 int num_stripes
= rec
->num_stripes
;
300 int rec_size
= btrfs_chunk_record_size(num_stripes
);
301 int offset
= offsetof(struct chunk_record
, generation
);
303 if (exist
->num_stripes
!= rec
->num_stripes
||
304 memcmp(((void *)exist
) + offset
,
305 ((void *)rec
) + offset
,
310 remove_cache_extent(chunk_cache
, cache
);
314 ret
= insert_cache_extent(chunk_cache
, &rec
->cache
);
323 static int process_device_extent_item(struct device_extent_tree
*devext_cache
,
324 struct extent_buffer
*leaf
,
325 struct btrfs_key
*key
, int slot
)
327 struct device_extent_record
*rec
;
328 struct device_extent_record
*exist
;
329 struct cache_extent
*cache
;
332 rec
= btrfs_new_device_extent_record(leaf
, key
, slot
);
333 if (!rec
->cache
.size
)
336 cache
= lookup_cache_extent2(&devext_cache
->tree
,
341 exist
= container_of(cache
, struct device_extent_record
, cache
);
342 if (exist
->generation
> rec
->generation
)
344 if (exist
->generation
== rec
->generation
) {
345 int offset
= offsetof(struct device_extent_record
,
347 if (memcmp(((void *)exist
) + offset
,
348 ((void *)rec
) + offset
,
349 sizeof(*rec
) - offset
))
353 remove_cache_extent(&devext_cache
->tree
, cache
);
354 list_del_init(&exist
->chunk_list
);
355 list_del_init(&exist
->device_list
);
360 ret
= insert_device_extent_record(devext_cache
, rec
);
369 static void print_block_group_info(struct block_group_record
*rec
, char *prefix
)
372 printf("%s", prefix
);
373 printf("Block Group: start = %llu, len = %llu, flag = %llx\n",
374 rec
->objectid
, rec
->offset
, rec
->flags
);
377 static void print_block_group_tree(struct block_group_tree
*tree
)
379 struct cache_extent
*cache
;
380 struct block_group_record
*rec
;
382 printf("All Block Groups:\n");
383 for (cache
= first_cache_extent(&tree
->tree
); cache
;
384 cache
= next_cache_extent(cache
)) {
385 rec
= container_of(cache
, struct block_group_record
, cache
);
386 print_block_group_info(rec
, "\t");
391 static void print_stripe_info(struct stripe
*data
, char *prefix1
, char *prefix2
,
395 printf("%s", prefix1
);
397 printf("%s", prefix2
);
398 printf("[%2d] Stripe: devid = %llu, offset = %llu\n",
399 index
, data
->devid
, data
->offset
);
402 static void print_chunk_self_info(struct chunk_record
*rec
, char *prefix
)
407 printf("%s", prefix
);
408 printf("Chunk: start = %llu, len = %llu, type = %llx, num_stripes = %u\n",
409 rec
->offset
, rec
->length
, rec
->type_flags
, rec
->num_stripes
);
411 printf("%s", prefix
);
412 printf(" Stripes list:\n");
413 for (i
= 0; i
< rec
->num_stripes
; i
++)
414 print_stripe_info(&rec
->stripes
[i
], prefix
, " ", i
);
417 static void print_chunk_tree(struct cache_tree
*tree
)
419 struct cache_extent
*n
;
420 struct chunk_record
*entry
;
422 printf("All Chunks:\n");
423 for (n
= first_cache_extent(tree
); n
;
424 n
= next_cache_extent(n
)) {
425 entry
= container_of(n
, struct chunk_record
, cache
);
426 print_chunk_self_info(entry
, "\t");
431 static void print_device_extent_info(struct device_extent_record
*rec
,
435 printf("%s", prefix
);
436 printf("Device extent: devid = %llu, start = %llu, len = %llu, chunk offset = %llu\n",
437 rec
->objectid
, rec
->offset
, rec
->length
, rec
->chunk_offset
);
440 static void print_device_extent_tree(struct device_extent_tree
*tree
)
442 struct cache_extent
*n
;
443 struct device_extent_record
*entry
;
445 printf("All Device Extents:\n");
446 for (n
= first_cache_extent(&tree
->tree
); n
;
447 n
= next_cache_extent(n
)) {
448 entry
= container_of(n
, struct device_extent_record
, cache
);
449 print_device_extent_info(entry
, "\t");
454 static void print_scan_result(struct recover_control
*rc
)
459 printf("DEVICE SCAN RESULT:\n");
460 printf("Filesystem Information:\n");
461 printf("\tsectorsize: %d\n", rc
->sectorsize
);
462 printf("\tnodesize: %d\n", rc
->nodesize
);
463 printf("\ttree root generation: %llu\n", rc
->generation
);
464 printf("\tchunk root generation: %llu\n", rc
->chunk_root_generation
);
467 print_all_devices(&rc
->fs_devices
->devices
);
468 print_block_group_tree(&rc
->bg
);
469 print_chunk_tree(&rc
->chunk
);
470 print_device_extent_tree(&rc
->devext
);
473 static void print_chunk_info(struct chunk_record
*chunk
, char *prefix
)
475 struct device_extent_record
*devext
;
478 print_chunk_self_info(chunk
, prefix
);
480 printf("%s", prefix
);
482 print_block_group_info(chunk
->bg_rec
, " ");
484 printf(" No block group.\n");
486 printf("%s", prefix
);
487 if (list_empty(&chunk
->dextents
)) {
488 printf(" No device extent.\n");
490 printf(" Device extent list:\n");
492 list_for_each_entry(devext
, &chunk
->dextents
, chunk_list
) {
494 printf("%s", prefix
);
495 printf("%s[%2d]", " ", i
);
496 print_device_extent_info(devext
, NULL
);
502 static void print_check_result(struct recover_control
*rc
)
504 struct chunk_record
*chunk
;
505 struct block_group_record
*bg
;
506 struct device_extent_record
*devext
;
514 printf("CHECK RESULT:\n");
515 printf("Recoverable Chunks:\n");
516 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
517 print_chunk_info(chunk
, " ");
521 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
522 print_chunk_info(chunk
, " ");
526 list_for_each_entry(chunk
, &rc
->unrepaired_chunks
, list
) {
527 print_chunk_info(chunk
, " ");
531 printf("Unrecoverable Chunks:\n");
532 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
533 print_chunk_info(chunk
, " ");
538 printf("Total Chunks:\t\t%d\n", total
);
539 printf(" Recoverable:\t\t%d\n", good
);
540 printf(" Unrecoverable:\t%d\n", bad
);
543 printf("Orphan Block Groups:\n");
544 list_for_each_entry(bg
, &rc
->bg
.block_groups
, list
)
545 print_block_group_info(bg
, " ");
548 printf("Orphan Device Extents:\n");
549 list_for_each_entry(devext
, &rc
->devext
.no_chunk_orphans
, chunk_list
)
550 print_device_extent_info(devext
, " ");
554 static int check_chunk_by_metadata(struct recover_control
*rc
,
555 struct btrfs_root
*root
,
556 struct chunk_record
*chunk
, int bg_only
)
561 struct btrfs_path path
;
562 struct btrfs_key key
;
563 struct btrfs_root
*dev_root
;
564 struct stripe
*stripe
;
565 struct btrfs_dev_extent
*dev_extent
;
566 struct btrfs_block_group_item
*bg_ptr
;
567 struct extent_buffer
*l
;
569 btrfs_init_path(&path
);
574 dev_root
= root
->fs_info
->dev_root
;
575 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
576 stripe
= &chunk
->stripes
[i
];
578 key
.objectid
= stripe
->devid
;
579 key
.offset
= stripe
->offset
;
580 key
.type
= BTRFS_DEV_EXTENT_KEY
;
582 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, &path
, 0, 0);
584 fprintf(stderr
, "Search device extent failed(%d)\n",
586 btrfs_release_path(&path
);
588 } else if (ret
> 0) {
591 "No device extent[%llu, %llu]\n",
592 stripe
->devid
, stripe
->offset
);
593 btrfs_release_path(&path
);
597 slot
= path
.slots
[0];
598 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
600 btrfs_dev_extent_chunk_offset(l
, dev_extent
)) {
603 "Device tree mismatch with chunks dev_extent[%llu, %llu], chunk[%llu, %llu]\n",
604 btrfs_dev_extent_chunk_offset(l
,
606 btrfs_dev_extent_length(l
, dev_extent
),
607 chunk
->offset
, chunk
->length
);
608 btrfs_release_path(&path
);
611 btrfs_release_path(&path
);
615 key
.objectid
= chunk
->offset
;
616 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
617 key
.offset
= chunk
->length
;
619 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, &path
,
622 fprintf(stderr
, "Search block group failed(%d)\n", ret
);
623 btrfs_release_path(&path
);
625 } else if (ret
> 0) {
627 fprintf(stderr
, "No block group[%llu, %llu]\n",
628 key
.objectid
, key
.offset
);
629 btrfs_release_path(&path
);
634 slot
= path
.slots
[0];
635 bg_ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_block_group_item
);
636 if (chunk
->type_flags
!= btrfs_disk_block_group_flags(l
, bg_ptr
)) {
639 "Chunk[%llu, %llu]'s type(%llu) is different with Block Group's type(%llu)\n",
640 chunk
->offset
, chunk
->length
, chunk
->type_flags
,
641 btrfs_disk_block_group_flags(l
, bg_ptr
));
642 btrfs_release_path(&path
);
645 btrfs_release_path(&path
);
649 static int check_all_chunks_by_metadata(struct recover_control
*rc
,
650 struct btrfs_root
*root
)
652 struct chunk_record
*chunk
;
653 struct chunk_record
*next
;
654 LIST_HEAD(orphan_chunks
);
658 list_for_each_entry_safe(chunk
, next
, &rc
->good_chunks
, list
) {
659 err
= check_chunk_by_metadata(rc
, root
, chunk
, 0);
662 list_move_tail(&chunk
->list
, &orphan_chunks
);
663 else if (err
&& !ret
)
668 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
669 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
671 list_move_tail(&chunk
->list
, &orphan_chunks
);
672 else if (err
&& !ret
)
676 list_for_each_entry(chunk
, &rc
->bad_chunks
, list
) {
677 err
= check_chunk_by_metadata(rc
, root
, chunk
, 1);
678 if (err
!= -ENOENT
&& !ret
)
679 ret
= err
? err
: -EINVAL
;
681 list_splice(&orphan_chunks
, &rc
->bad_chunks
);
685 static int extract_metadata_record(struct recover_control
*rc
,
686 struct extent_buffer
*leaf
)
688 struct btrfs_key key
;
693 nritems
= btrfs_header_nritems(leaf
);
694 for (i
= 0; i
< nritems
; i
++) {
695 btrfs_item_key_to_cpu(leaf
, &key
, i
);
697 case BTRFS_BLOCK_GROUP_ITEM_KEY
:
698 pthread_mutex_lock(&rc
->rc_lock
);
699 ret
= process_block_group_item(&rc
->bg
, leaf
, &key
, i
);
700 pthread_mutex_unlock(&rc
->rc_lock
);
702 case BTRFS_CHUNK_ITEM_KEY
:
703 pthread_mutex_lock(&rc
->rc_lock
);
704 ret
= process_chunk_item(&rc
->chunk
, leaf
, &key
, i
);
705 pthread_mutex_unlock(&rc
->rc_lock
);
707 case BTRFS_DEV_EXTENT_KEY
:
708 pthread_mutex_lock(&rc
->rc_lock
);
709 ret
= process_device_extent_item(&rc
->devext
, leaf
,
711 pthread_mutex_unlock(&rc
->rc_lock
);
720 static inline int is_super_block_address(u64 offset
)
724 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
725 if (offset
== btrfs_sb_offset(i
))
731 static int scan_one_device(void *dev_scan_struct
)
733 struct extent_buffer
*buf
;
736 struct device_scan
*dev_scan
= (struct device_scan
*)dev_scan_struct
;
737 struct recover_control
*rc
= dev_scan
->rc
;
738 struct btrfs_device
*device
= dev_scan
->dev
;
739 int fd
= dev_scan
->fd
;
742 ret
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &oldtype
);
746 buf
= malloc(sizeof(*buf
) + rc
->nodesize
);
749 buf
->len
= rc
->nodesize
;
753 dev_scan
->bytenr
= bytenr
;
755 if (is_super_block_address(bytenr
))
756 bytenr
+= rc
->sectorsize
;
758 if (pread64(fd
, buf
->data
, rc
->nodesize
, bytenr
) <
762 if (memcmp_extent_buffer(buf
, rc
->fs_devices
->fsid
,
765 bytenr
+= rc
->sectorsize
;
769 if (verify_tree_block_csum_silent(buf
, rc
->csum_size
)) {
770 bytenr
+= rc
->sectorsize
;
774 pthread_mutex_lock(&rc
->rc_lock
);
775 ret
= process_extent_buffer(&rc
->eb_cache
, buf
, device
, bytenr
);
776 pthread_mutex_unlock(&rc
->rc_lock
);
780 if (btrfs_header_level(buf
) != 0)
783 switch (btrfs_header_owner(buf
)) {
784 case BTRFS_EXTENT_TREE_OBJECTID
:
785 case BTRFS_DEV_TREE_OBJECTID
:
786 /* different tree use different generation */
787 if (btrfs_header_generation(buf
) > rc
->generation
)
789 ret
= extract_metadata_record(rc
, buf
);
793 case BTRFS_CHUNK_TREE_OBJECTID
:
794 if (btrfs_header_generation(buf
) >
795 rc
->chunk_root_generation
)
797 ret
= extract_metadata_record(rc
, buf
);
803 bytenr
+= rc
->nodesize
;
811 static int scan_devices(struct recover_control
*rc
)
815 struct btrfs_device
*dev
;
816 struct device_scan
*dev_scans
;
824 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
)
826 dev_scans
= (struct device_scan
*)malloc(sizeof(struct device_scan
)
830 t_scans
= (pthread_t
*)malloc(sizeof(pthread_t
) * devnr
);
835 t_rets
= (long *)malloc(sizeof(long) * devnr
);
842 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
843 fd
= open(dev
->name
, O_RDONLY
);
845 fprintf(stderr
, "Failed to open device %s\n",
850 dev_scans
[devidx
].rc
= rc
;
851 dev_scans
[devidx
].dev
= dev
;
852 dev_scans
[devidx
].fd
= fd
;
853 dev_scans
[devidx
].bytenr
= -1;
857 for (i
= 0; i
< devidx
; i
++) {
858 ret
= pthread_create(&t_scans
[i
], NULL
,
859 (void *)scan_one_device
,
860 (void *)&dev_scans
[i
]);
864 dev_scans
[i
].bytenr
= 0;
869 for (i
= 0; i
< devidx
; i
++) {
870 if (dev_scans
[i
].bytenr
== -1)
872 ret
= pthread_tryjoin_np(t_scans
[i
],
873 (void **)&t_rets
[i
]);
878 if (ret
|| t_rets
[i
]) {
882 dev_scans
[i
].bytenr
= -1;
885 printf("\rScanning: ");
886 for (i
= 0; i
< devidx
; i
++) {
887 if (dev_scans
[i
].bytenr
== -1)
888 printf("%sDONE in dev%d",
891 printf("%s%llu in dev%d",
892 i
? ", " : "", dev_scans
[i
].bytenr
, i
);
894 /* clear chars if exist in tail */
896 printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b");
907 for (i
= 0; i
< devidx
; i
++) {
908 if (dev_scans
[i
].bytenr
== -1)
910 pthread_cancel(t_scans
[i
]);
919 static int build_device_map_by_chunk_record(struct btrfs_root
*root
,
920 struct chunk_record
*chunk
)
925 u8 uuid
[BTRFS_UUID_SIZE
];
927 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
928 struct btrfs_mapping_tree
*map_tree
;
929 struct map_lookup
*map
;
930 struct stripe
*stripe
;
932 map_tree
= &fs_info
->mapping_tree
;
933 num_stripes
= chunk
->num_stripes
;
934 map
= malloc(btrfs_map_lookup_size(num_stripes
));
937 map
->ce
.start
= chunk
->offset
;
938 map
->ce
.size
= chunk
->length
;
939 map
->num_stripes
= num_stripes
;
940 map
->io_width
= chunk
->io_width
;
941 map
->io_align
= chunk
->io_align
;
942 map
->sector_size
= chunk
->sector_size
;
943 map
->stripe_len
= chunk
->stripe_len
;
944 map
->type
= chunk
->type_flags
;
945 map
->sub_stripes
= chunk
->sub_stripes
;
947 for (i
= 0, stripe
= chunk
->stripes
; i
< num_stripes
; i
++, stripe
++) {
948 devid
= stripe
->devid
;
949 memcpy(uuid
, stripe
->dev_uuid
, BTRFS_UUID_SIZE
);
950 map
->stripes
[i
].physical
= stripe
->offset
;
951 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
,
953 if (!map
->stripes
[i
].dev
) {
959 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
963 static int build_device_maps_by_chunk_records(struct recover_control
*rc
,
964 struct btrfs_root
*root
)
967 struct chunk_record
*chunk
;
969 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
970 ret
= build_device_map_by_chunk_record(root
, chunk
);
974 list_for_each_entry(chunk
, &rc
->rebuild_chunks
, list
) {
975 ret
= build_device_map_by_chunk_record(root
, chunk
);
982 static int block_group_remove_all_extent_items(struct btrfs_trans_handle
*trans
,
983 struct btrfs_root
*root
,
984 struct block_group_record
*bg
)
986 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
987 struct btrfs_key key
;
988 struct btrfs_path path
;
989 struct extent_buffer
*leaf
;
990 u64 start
= bg
->objectid
;
991 u64 end
= bg
->objectid
+ bg
->offset
;
998 btrfs_init_path(&path
);
999 root
= root
->fs_info
->extent_root
;
1001 key
.objectid
= start
;
1003 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1005 ret
= btrfs_search_slot(trans
, root
, &key
, &path
, -1, 1);
1011 leaf
= path
.nodes
[0];
1012 nitems
= btrfs_header_nritems(leaf
);
1014 /* The tree is empty. */
1019 if (path
.slots
[0] >= nitems
) {
1020 ret
= btrfs_next_leaf(root
, &path
);
1027 leaf
= path
.nodes
[0];
1028 btrfs_item_key_to_cpu(leaf
, &key
, 0);
1029 if (key
.objectid
>= end
)
1031 btrfs_release_path(&path
);
1037 for (i
= path
.slots
[0]; i
< nitems
; i
++) {
1038 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1039 if (key
.objectid
>= end
)
1042 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1052 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1053 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
1054 old_val
= btrfs_super_bytes_used(fs_info
->super_copy
);
1055 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1056 old_val
+= fs_info
->nodesize
;
1058 old_val
+= key
.offset
;
1059 btrfs_set_super_bytes_used(fs_info
->super_copy
,
1065 ret
= btrfs_del_items(trans
, root
, &path
, del_s
, del_nr
);
1070 if (key
.objectid
< end
) {
1071 if (key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1072 key
.objectid
+= fs_info
->sectorsize
;
1073 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1076 btrfs_release_path(&path
);
1080 btrfs_release_path(&path
);
1084 static int block_group_free_all_extent(struct btrfs_root
*root
,
1085 struct block_group_record
*bg
)
1087 struct btrfs_block_group_cache
*cache
;
1088 struct btrfs_fs_info
*info
;
1092 info
= root
->fs_info
;
1093 cache
= btrfs_lookup_block_group(info
, bg
->objectid
);
1097 start
= cache
->key
.objectid
;
1098 end
= start
+ cache
->key
.offset
- 1;
1100 set_extent_bits(&info
->block_group_cache
, start
, end
,
1102 set_extent_dirty(&info
->free_space_cache
, start
, end
);
1104 btrfs_set_block_group_used(&cache
->item
, 0);
1109 static int remove_chunk_extent_item(struct btrfs_trans_handle
*trans
,
1110 struct recover_control
*rc
,
1111 struct btrfs_root
*root
)
1113 struct chunk_record
*chunk
;
1116 list_for_each_entry(chunk
, &rc
->good_chunks
, list
) {
1117 if (!(chunk
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1119 ret
= block_group_remove_all_extent_items(trans
, root
,
1124 ret
= block_group_free_all_extent(root
, chunk
->bg_rec
);
1131 static int __rebuild_chunk_root(struct btrfs_trans_handle
*trans
,
1132 struct recover_control
*rc
,
1133 struct btrfs_root
*root
)
1136 struct btrfs_device
*dev
;
1137 struct extent_buffer
*cow
;
1138 struct btrfs_disk_key disk_key
;
1141 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1142 if (min_devid
> dev
->devid
)
1143 min_devid
= dev
->devid
;
1145 btrfs_set_disk_key_objectid(&disk_key
, BTRFS_DEV_ITEMS_OBJECTID
);
1146 btrfs_set_disk_key_type(&disk_key
, BTRFS_DEV_ITEM_KEY
);
1147 btrfs_set_disk_key_offset(&disk_key
, min_devid
);
1149 cow
= btrfs_alloc_free_block(trans
, root
, root
->fs_info
->nodesize
,
1150 BTRFS_CHUNK_TREE_OBJECTID
,
1151 &disk_key
, 0, 0, 0);
1152 btrfs_set_header_bytenr(cow
, cow
->start
);
1153 btrfs_set_header_generation(cow
, trans
->transid
);
1154 btrfs_set_header_nritems(cow
, 0);
1155 btrfs_set_header_level(cow
, 0);
1156 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1157 btrfs_set_header_owner(cow
, BTRFS_CHUNK_TREE_OBJECTID
);
1158 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1159 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
1161 write_extent_buffer(cow
, root
->fs_info
->chunk_tree_uuid
,
1162 btrfs_header_chunk_tree_uuid(cow
),
1166 btrfs_mark_buffer_dirty(cow
);
1171 static int __rebuild_device_items(struct btrfs_trans_handle
*trans
,
1172 struct recover_control
*rc
,
1173 struct btrfs_root
*root
)
1175 struct btrfs_device
*dev
;
1176 struct btrfs_key key
;
1177 struct btrfs_dev_item dev_item_tmp
;
1178 struct btrfs_dev_item
*dev_item
= &dev_item_tmp
;
1181 list_for_each_entry(dev
, &rc
->fs_devices
->devices
, dev_list
) {
1182 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1183 key
.type
= BTRFS_DEV_ITEM_KEY
;
1184 key
.offset
= dev
->devid
;
1186 btrfs_set_stack_device_generation(dev_item
, 0);
1187 btrfs_set_stack_device_type(dev_item
, dev
->type
);
1188 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
1189 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
1190 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
1191 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
1192 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
1193 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
1194 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1195 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1197 ret
= btrfs_insert_item(trans
, root
, &key
,
1198 dev_item
, sizeof(*dev_item
));
1204 static int __insert_chunk_item(struct btrfs_trans_handle
*trans
,
1205 struct chunk_record
*chunk_rec
,
1206 struct btrfs_root
*chunk_root
)
1208 struct btrfs_key key
;
1209 struct btrfs_chunk
*chunk
= NULL
;
1212 chunk
= create_chunk_item(chunk_rec
);
1215 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1216 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1217 key
.offset
= chunk_rec
->offset
;
1219 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1220 btrfs_chunk_item_size(chunk_rec
->num_stripes
));
1225 static int __rebuild_chunk_items(struct btrfs_trans_handle
*trans
,
1226 struct recover_control
*rc
,
1227 struct btrfs_root
*root
)
1229 struct btrfs_root
*chunk_root
;
1230 struct chunk_record
*chunk_rec
;
1233 chunk_root
= root
->fs_info
->chunk_root
;
1235 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1236 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1240 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1241 ret
= __insert_chunk_item(trans
, chunk_rec
, chunk_root
);
1248 static int rebuild_chunk_tree(struct btrfs_trans_handle
*trans
,
1249 struct recover_control
*rc
,
1250 struct btrfs_root
*root
)
1254 root
= root
->fs_info
->chunk_root
;
1256 ret
= __rebuild_chunk_root(trans
, rc
, root
);
1260 ret
= __rebuild_device_items(trans
, rc
, root
);
1264 ret
= __rebuild_chunk_items(trans
, rc
, root
);
1269 static int rebuild_sys_array(struct recover_control
*rc
,
1270 struct btrfs_root
*root
)
1272 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1273 struct btrfs_chunk
*chunk
;
1274 struct btrfs_key key
;
1275 struct chunk_record
*chunk_rec
;
1279 btrfs_set_super_sys_array_size(fs_info
->super_copy
, 0);
1281 list_for_each_entry(chunk_rec
, &rc
->good_chunks
, list
) {
1282 if (!(chunk_rec
->type_flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1285 num_stripes
= chunk_rec
->num_stripes
;
1286 chunk
= create_chunk_item(chunk_rec
);
1292 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1293 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1294 key
.offset
= chunk_rec
->offset
;
1296 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
,
1297 btrfs_chunk_item_size(num_stripes
));
1306 static int calculate_bg_used(struct btrfs_root
*extent_root
,
1307 struct chunk_record
*chunk_rec
,
1308 struct btrfs_path
*path
,
1311 struct extent_buffer
*node
;
1312 struct btrfs_key found_key
;
1318 node
= path
->nodes
[0];
1319 slot
= path
->slots
[0];
1320 btrfs_item_key_to_cpu(node
, &found_key
, slot
);
1321 if (found_key
.objectid
>= chunk_rec
->offset
+ chunk_rec
->length
)
1323 if (found_key
.type
!= BTRFS_METADATA_ITEM_KEY
&&
1324 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1326 if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
1327 used_ret
+= extent_root
->fs_info
->nodesize
;
1329 used_ret
+= found_key
.offset
;
1331 if (slot
+ 1 < btrfs_header_nritems(node
)) {
1334 ret
= btrfs_next_leaf(extent_root
, path
);
1348 static int __insert_block_group(struct btrfs_trans_handle
*trans
,
1349 struct chunk_record
*chunk_rec
,
1350 struct btrfs_root
*extent_root
,
1353 struct btrfs_block_group_item bg_item
;
1354 struct btrfs_key key
;
1357 btrfs_set_block_group_used(&bg_item
, used
);
1358 btrfs_set_block_group_chunk_objectid(&bg_item
, used
);
1359 btrfs_set_block_group_flags(&bg_item
, chunk_rec
->type_flags
);
1360 key
.objectid
= chunk_rec
->offset
;
1361 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
1362 key
.offset
= chunk_rec
->length
;
1364 ret
= btrfs_insert_item(trans
, extent_root
, &key
, &bg_item
,
1370 * Search through the extent tree to rebuild the 'used' member of the block
1372 * However, since block group and extent item shares the extent tree,
1373 * the extent item may also missing.
1374 * In that case, we fill the 'used' with the length of the block group to
1375 * ensure no write into the block group.
1376 * Btrfsck will hate it but we will inform user to call '--init-extent-tree'
1377 * if possible, or just salvage as much data as possible from the fs.
1379 static int rebuild_block_group(struct btrfs_trans_handle
*trans
,
1380 struct recover_control
*rc
,
1381 struct btrfs_root
*root
)
1383 struct chunk_record
*chunk_rec
;
1384 struct btrfs_key search_key
;
1385 struct btrfs_path path
;
1389 if (list_empty(&rc
->rebuild_chunks
))
1392 btrfs_init_path(&path
);
1393 list_for_each_entry(chunk_rec
, &rc
->rebuild_chunks
, list
) {
1394 search_key
.objectid
= chunk_rec
->offset
;
1395 search_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1396 search_key
.offset
= 0;
1397 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
,
1398 &search_key
, &path
, 0, 0);
1401 ret
= calculate_bg_used(root
->fs_info
->extent_root
,
1402 chunk_rec
, &path
, &used
);
1404 * Extent tree is damaged, better to rebuild the whole extent
1405 * tree. Currently, change the used to chunk's len to prevent
1406 * write/block reserve happening in that block group.
1410 "Fail to search extent tree for block group: [%llu,%llu]\n",
1412 chunk_rec
->offset
+ chunk_rec
->length
);
1414 "Mark the block group full to prevent block rsv problems\n");
1415 used
= chunk_rec
->length
;
1417 btrfs_release_path(&path
);
1418 ret
= __insert_block_group(trans
, chunk_rec
,
1419 root
->fs_info
->extent_root
,
1425 btrfs_release_path(&path
);
1429 static struct btrfs_root
*
1430 open_ctree_with_broken_chunk(struct recover_control
*rc
)
1432 struct btrfs_fs_info
*fs_info
;
1433 struct btrfs_super_block
*disk_super
;
1434 struct extent_buffer
*eb
;
1437 fs_info
= btrfs_new_fs_info(1, BTRFS_SUPER_INFO_OFFSET
);
1439 fprintf(stderr
, "Failed to allocate memory for fs_info\n");
1440 return ERR_PTR(-ENOMEM
);
1442 fs_info
->is_chunk_recover
= 1;
1444 fs_info
->fs_devices
= rc
->fs_devices
;
1445 ret
= btrfs_open_devices(fs_info
->fs_devices
, O_RDWR
);
1449 disk_super
= fs_info
->super_copy
;
1450 ret
= btrfs_read_dev_super(fs_info
->fs_devices
->latest_bdev
,
1451 disk_super
, fs_info
->super_bytenr
,
1454 fprintf(stderr
, "No valid btrfs found\n");
1458 memcpy(fs_info
->fsid
, &disk_super
->fsid
, BTRFS_FSID_SIZE
);
1459 fs_info
->sectorsize
= btrfs_super_sectorsize(disk_super
);
1460 fs_info
->nodesize
= btrfs_super_nodesize(disk_super
);
1461 fs_info
->stripesize
= btrfs_super_stripesize(disk_super
);
1463 ret
= btrfs_check_fs_compatibility(disk_super
, OPEN_CTREE_WRITES
);
1467 btrfs_setup_root(fs_info
->chunk_root
, fs_info
,
1468 BTRFS_CHUNK_TREE_OBJECTID
);
1470 ret
= build_device_maps_by_chunk_records(rc
, fs_info
->chunk_root
);
1474 ret
= btrfs_setup_all_roots(fs_info
, 0, 0);
1478 eb
= fs_info
->tree_root
->node
;
1479 read_extent_buffer(eb
, fs_info
->chunk_tree_uuid
,
1480 btrfs_header_chunk_tree_uuid(eb
),
1483 return fs_info
->fs_root
;
1485 btrfs_release_all_roots(fs_info
);
1487 btrfs_cleanup_all_caches(fs_info
);
1489 btrfs_close_devices(fs_info
->fs_devices
);
1491 btrfs_free_fs_info(fs_info
);
1492 return ERR_PTR(ret
);
1495 static int recover_prepare(struct recover_control
*rc
, const char *path
)
1499 struct btrfs_super_block
*sb
;
1500 char buf
[BTRFS_SUPER_INFO_SIZE
];
1501 struct btrfs_fs_devices
*fs_devices
;
1504 fd
= open(path
, O_RDONLY
);
1506 fprintf(stderr
, "open %s\n error.\n", path
);
1510 sb
= (struct btrfs_super_block
*)buf
;
1511 ret
= btrfs_read_dev_super(fd
, sb
, BTRFS_SUPER_INFO_OFFSET
,
1514 fprintf(stderr
, "read super block error\n");
1518 rc
->sectorsize
= btrfs_super_sectorsize(sb
);
1519 rc
->nodesize
= btrfs_super_nodesize(sb
);
1520 rc
->generation
= btrfs_super_generation(sb
);
1521 rc
->chunk_root_generation
= btrfs_super_chunk_root_generation(sb
);
1522 rc
->csum_size
= btrfs_super_csum_size(sb
);
1524 /* if seed, the result of scanning below will be partial */
1525 if (btrfs_super_flags(sb
) & BTRFS_SUPER_FLAG_SEEDING
) {
1526 fprintf(stderr
, "this device is seed device\n");
1531 ret
= btrfs_scan_fs_devices(fd
, path
, &fs_devices
, 0, SBREAD_RECOVER
, 0);
1535 rc
->fs_devices
= fs_devices
;
1538 print_all_devices(&rc
->fs_devices
->devices
);
1545 static int btrfs_get_device_extents(u64 chunk_object
,
1546 struct list_head
*orphan_devexts
,
1547 struct list_head
*ret_list
)
1549 struct device_extent_record
*devext
;
1550 struct device_extent_record
*next
;
1553 list_for_each_entry_safe(devext
, next
, orphan_devexts
, chunk_list
) {
1554 if (devext
->chunk_offset
== chunk_object
) {
1555 list_move_tail(&devext
->chunk_list
, ret_list
);
1562 static int calc_num_stripes(u64 type
)
1564 if (type
& (BTRFS_BLOCK_GROUP_RAID0
|
1565 BTRFS_BLOCK_GROUP_RAID10
|
1566 BTRFS_BLOCK_GROUP_RAID5
|
1567 BTRFS_BLOCK_GROUP_RAID6
))
1569 else if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
1570 BTRFS_BLOCK_GROUP_DUP
))
1576 static inline int calc_sub_nstripes(u64 type
)
1578 if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1584 static int btrfs_verify_device_extents(struct block_group_record
*bg
,
1585 struct list_head
*devexts
, int ndevexts
)
1587 struct device_extent_record
*devext
;
1589 int expected_num_stripes
;
1591 expected_num_stripes
= calc_num_stripes(bg
->flags
);
1592 if (expected_num_stripes
&& expected_num_stripes
!= ndevexts
)
1595 if (check_num_stripes(bg
->flags
, ndevexts
) < 0)
1598 stripe_length
= calc_stripe_length(bg
->flags
, bg
->offset
, ndevexts
);
1599 list_for_each_entry(devext
, devexts
, chunk_list
) {
1600 if (devext
->length
!= stripe_length
)
1606 static int btrfs_rebuild_unordered_chunk_stripes(struct recover_control
*rc
,
1607 struct chunk_record
*chunk
)
1609 struct device_extent_record
*devext
;
1610 struct btrfs_device
*device
;
1613 devext
= list_first_entry(&chunk
->dextents
, struct device_extent_record
,
1615 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1616 chunk
->stripes
[i
].devid
= devext
->objectid
;
1617 chunk
->stripes
[i
].offset
= devext
->offset
;
1618 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1623 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1626 memcpy(chunk
->stripes
[i
].dev_uuid
, device
->uuid
,
1628 devext
= list_next_entry(devext
, chunk_list
);
1633 static int btrfs_calc_stripe_index(struct chunk_record
*chunk
, u64 logical
)
1635 u64 offset
= logical
- chunk
->offset
;
1637 int nr_data_stripes
;
1640 stripe_nr
= offset
/ chunk
->stripe_len
;
1641 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID0
) {
1642 index
= stripe_nr
% chunk
->num_stripes
;
1643 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID10
) {
1644 index
= stripe_nr
% (chunk
->num_stripes
/ chunk
->sub_stripes
);
1645 index
*= chunk
->sub_stripes
;
1646 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
) {
1647 nr_data_stripes
= chunk
->num_stripes
- 1;
1648 index
= stripe_nr
% nr_data_stripes
;
1649 stripe_nr
/= nr_data_stripes
;
1650 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1651 } else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
) {
1652 nr_data_stripes
= chunk
->num_stripes
- 2;
1653 index
= stripe_nr
% nr_data_stripes
;
1654 stripe_nr
/= nr_data_stripes
;
1655 index
= (index
+ stripe_nr
) % chunk
->num_stripes
;
1662 /* calc the logical offset which is the start of the next stripe. */
1663 static inline u64
btrfs_next_stripe_logical_offset(struct chunk_record
*chunk
,
1666 u64 offset
= logical
- chunk
->offset
;
1668 offset
/= chunk
->stripe_len
;
1669 offset
*= chunk
->stripe_len
;
1670 offset
+= chunk
->stripe_len
;
1672 return offset
+ chunk
->offset
;
1675 static int is_extent_record_in_device_extent(struct extent_record
*er
,
1676 struct device_extent_record
*dext
,
1681 for (i
= 0; i
< er
->nmirrors
; i
++) {
1682 if (er
->devices
[i
]->devid
== dext
->objectid
&&
1683 er
->offsets
[i
] >= dext
->offset
&&
1684 er
->offsets
[i
] < dext
->offset
+ dext
->length
) {
1693 btrfs_rebuild_ordered_meta_chunk_stripes(struct recover_control
*rc
,
1694 struct chunk_record
*chunk
)
1696 u64 start
= chunk
->offset
;
1697 u64 end
= chunk
->offset
+ chunk
->length
;
1698 struct cache_extent
*cache
;
1699 struct extent_record
*er
;
1700 struct device_extent_record
*devext
;
1701 struct device_extent_record
*next
;
1702 struct btrfs_device
*device
;
1708 cache
= lookup_cache_extent(&rc
->eb_cache
,
1709 start
, chunk
->length
);
1711 /* No used space, we can reorder the stripes freely. */
1712 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1716 list_splice_init(&chunk
->dextents
, &devexts
);
1718 er
= container_of(cache
, struct extent_record
, cache
);
1719 index
= btrfs_calc_stripe_index(chunk
, er
->cache
.start
);
1720 BUG_ON(index
== -1);
1721 if (chunk
->stripes
[index
].devid
)
1723 list_for_each_entry_safe(devext
, next
, &devexts
, chunk_list
) {
1724 if (is_extent_record_in_device_extent(er
, devext
, &mirror
)) {
1725 chunk
->stripes
[index
].devid
= devext
->objectid
;
1726 chunk
->stripes
[index
].offset
= devext
->offset
;
1727 memcpy(chunk
->stripes
[index
].dev_uuid
,
1728 er
->devices
[mirror
]->uuid
,
1731 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1735 start
= btrfs_next_stripe_logical_offset(chunk
, er
->cache
.start
);
1737 goto no_extent_record
;
1739 cache
= lookup_cache_extent(&rc
->eb_cache
, start
, end
- start
);
1743 if (list_empty(&devexts
))
1746 if (chunk
->type_flags
& (BTRFS_BLOCK_GROUP_RAID5
|
1747 BTRFS_BLOCK_GROUP_RAID6
)) {
1748 /* Fixme: try to recover the order by the parity block. */
1749 list_splice_tail(&devexts
, &chunk
->dextents
);
1753 /* There is no data on the lost stripes, we can reorder them freely. */
1754 for (index
= 0; index
< chunk
->num_stripes
; index
++) {
1755 if (chunk
->stripes
[index
].devid
)
1758 devext
= list_first_entry(&devexts
,
1759 struct device_extent_record
,
1761 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1763 chunk
->stripes
[index
].devid
= devext
->objectid
;
1764 chunk
->stripes
[index
].offset
= devext
->offset
;
1765 device
= btrfs_find_device_by_devid(rc
->fs_devices
,
1769 list_splice_tail(&devexts
, &chunk
->dextents
);
1772 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
1775 memcpy(chunk
->stripes
[index
].dev_uuid
, device
->uuid
,
1781 #define BTRFS_ORDERED_RAID (BTRFS_BLOCK_GROUP_RAID0 | \
1782 BTRFS_BLOCK_GROUP_RAID10 | \
1783 BTRFS_BLOCK_GROUP_RAID5 | \
1784 BTRFS_BLOCK_GROUP_RAID6)
1786 static int btrfs_rebuild_chunk_stripes(struct recover_control
*rc
,
1787 struct chunk_record
*chunk
)
1792 * All the data in the system metadata chunk will be dropped,
1793 * so we need not guarantee that the data is right or not, that
1794 * is we can reorder the stripes in the system metadata chunk.
1796 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1797 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1798 ret
=btrfs_rebuild_ordered_meta_chunk_stripes(rc
, chunk
);
1799 else if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
) &&
1800 (chunk
->type_flags
& BTRFS_ORDERED_RAID
))
1801 ret
= 1; /* Be handled after the fs is opened. */
1803 ret
= btrfs_rebuild_unordered_chunk_stripes(rc
, chunk
);
1808 static int next_csum(struct btrfs_root
*root
,
1809 struct extent_buffer
**leaf
,
1810 struct btrfs_path
*path
,
1815 struct btrfs_key
*key
)
1818 struct btrfs_root
*csum_root
= root
->fs_info
->csum_root
;
1819 struct btrfs_csum_item
*csum_item
;
1820 u32 blocksize
= root
->fs_info
->sectorsize
;
1821 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1822 int csums_in_item
= btrfs_item_size_nr(*leaf
, *slot
) / csum_size
;
1824 if (*csum_offset
>= csums_in_item
) {
1827 if (*slot
>= btrfs_header_nritems(*leaf
)) {
1828 ret
= btrfs_next_leaf(csum_root
, path
);
1833 *leaf
= path
->nodes
[0];
1834 *slot
= path
->slots
[0];
1836 btrfs_item_key_to_cpu(*leaf
, key
, *slot
);
1839 if (key
->offset
+ (*csum_offset
) * blocksize
>= end
)
1841 csum_item
= btrfs_item_ptr(*leaf
, *slot
, struct btrfs_csum_item
);
1842 csum_item
= (struct btrfs_csum_item
*)((unsigned char *)csum_item
1843 + (*csum_offset
) * csum_size
);
1844 read_extent_buffer(*leaf
, tree_csum
,
1845 (unsigned long)csum_item
, csum_size
);
1849 static u64
calc_data_offset(struct btrfs_key
*key
,
1850 struct chunk_record
*chunk
,
1856 int logical_stripe_nr
;
1858 int nr_data_stripes
;
1860 data_offset
= key
->offset
+ csum_offset
* blocksize
- chunk
->offset
;
1861 nr_data_stripes
= chunk
->num_stripes
;
1863 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID5
)
1864 nr_data_stripes
-= 1;
1865 else if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
)
1866 nr_data_stripes
-= 2;
1868 logical_stripe_nr
= data_offset
/ chunk
->stripe_len
;
1869 dev_stripe_nr
= logical_stripe_nr
/ nr_data_stripes
;
1871 data_offset
-= logical_stripe_nr
* chunk
->stripe_len
;
1872 data_offset
+= dev_stripe_nr
* chunk
->stripe_len
;
1874 return dev_offset
+ data_offset
;
1877 static int check_one_csum(int fd
, u64 start
, u32 len
, u32 tree_csum
)
1881 u32 csum_result
= ~(u32
)0;
1886 ret
= pread64(fd
, data
, len
, start
);
1887 if (ret
< 0 || ret
!= len
) {
1892 csum_result
= btrfs_csum_data(data
, csum_result
, len
);
1893 btrfs_csum_final(csum_result
, (u8
*)&csum_result
);
1894 if (csum_result
!= tree_csum
)
1901 static u64
item_end_offset(struct btrfs_root
*root
, struct btrfs_key
*key
,
1902 struct extent_buffer
*leaf
, int slot
) {
1903 u32 blocksize
= root
->fs_info
->sectorsize
;
1904 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
1906 u64 offset
= btrfs_item_size_nr(leaf
, slot
);
1907 offset
/= csum_size
;
1908 offset
*= blocksize
;
1909 offset
+= key
->offset
;
1914 static int insert_stripe(struct list_head
*devexts
,
1915 struct recover_control
*rc
,
1916 struct chunk_record
*chunk
,
1918 struct device_extent_record
*devext
;
1919 struct btrfs_device
*dev
;
1921 devext
= list_entry(devexts
->next
, struct device_extent_record
,
1923 dev
= btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
,
1927 if (btrfs_find_device_by_devid(rc
->fs_devices
, devext
->objectid
, 1)) {
1928 error("unexpected: found another device with id %llu",
1929 (unsigned long long)devext
->objectid
);
1933 chunk
->stripes
[index
].devid
= devext
->objectid
;
1934 chunk
->stripes
[index
].offset
= devext
->offset
;
1935 memcpy(chunk
->stripes
[index
].dev_uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
1937 list_move(&devext
->chunk_list
, &chunk
->dextents
);
1942 static inline int count_devext_records(struct list_head
*record_list
)
1944 int num_of_records
= 0;
1945 struct device_extent_record
*devext
;
1947 list_for_each_entry(devext
, record_list
, chunk_list
)
1950 return num_of_records
;
1953 static int fill_chunk_up(struct chunk_record
*chunk
, struct list_head
*devexts
,
1954 struct recover_control
*rc
)
1959 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
1960 if (!chunk
->stripes
[i
].devid
) {
1961 ret
= insert_stripe(devexts
, rc
, chunk
, i
);
1970 #define EQUAL_STRIPE (1 << 0)
1972 static int rebuild_raid_data_chunk_stripes(struct recover_control
*rc
,
1973 struct btrfs_root
*root
,
1974 struct chunk_record
*chunk
,
1980 struct btrfs_path path
;
1981 struct btrfs_key prev_key
;
1982 struct btrfs_key key
;
1983 struct btrfs_root
*csum_root
;
1984 struct extent_buffer
*leaf
;
1985 struct device_extent_record
*devext
;
1986 struct device_extent_record
*next
;
1987 struct btrfs_device
*dev
;
1988 u64 start
= chunk
->offset
;
1989 u64 end
= start
+ chunk
->stripe_len
;
1990 u64 chunk_end
= chunk
->offset
+ chunk
->length
;
1991 u64 csum_offset
= 0;
1993 u32 blocksize
= root
->fs_info
->sectorsize
;
1996 int num_unordered
= 0;
1997 LIST_HEAD(unordered
);
1998 LIST_HEAD(candidates
);
2000 csum_root
= root
->fs_info
->csum_root
;
2001 btrfs_init_path(&path
);
2002 list_splice_init(&chunk
->dextents
, &candidates
);
2004 if (list_is_last(candidates
.next
, &candidates
))
2007 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2008 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
2011 ret
= btrfs_search_slot(NULL
, csum_root
, &key
, &path
, 0, 0);
2013 fprintf(stderr
, "Search csum failed(%d)\n", ret
);
2016 leaf
= path
.nodes
[0];
2017 slot
= path
.slots
[0];
2019 if (slot
>= btrfs_header_nritems(leaf
)) {
2020 ret
= btrfs_next_leaf(csum_root
, &path
);
2023 "Walk tree failed(%d)\n", ret
);
2025 } else if (ret
> 0) {
2026 slot
= btrfs_header_nritems(leaf
) - 1;
2027 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2028 if (item_end_offset(root
, &key
, leaf
, slot
)
2030 csum_offset
= start
- key
.offset
;
2031 csum_offset
/= blocksize
;
2036 leaf
= path
.nodes
[0];
2037 slot
= path
.slots
[0];
2039 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2040 ret
= btrfs_previous_item(csum_root
, &path
, 0,
2041 BTRFS_EXTENT_CSUM_KEY
);
2045 if (key
.offset
>= end
)
2050 leaf
= path
.nodes
[0];
2051 slot
= path
.slots
[0];
2053 btrfs_item_key_to_cpu(leaf
, &prev_key
, slot
);
2054 if (item_end_offset(root
, &prev_key
, leaf
, slot
) > start
) {
2055 csum_offset
= start
- prev_key
.offset
;
2056 csum_offset
/= blocksize
;
2057 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2059 if (key
.offset
>= end
)
2063 if (key
.offset
+ csum_offset
* blocksize
> chunk_end
)
2067 ret
= next_csum(root
, &leaf
, &path
, &slot
, &csum_offset
, &tree_csum
,
2070 fprintf(stderr
, "Fetch csum failed\n");
2072 } else if (ret
== 1) {
2073 if (!(*flags
& EQUAL_STRIPE
))
2074 *flags
|= EQUAL_STRIPE
;
2076 } else if (ret
== 2)
2079 list_for_each_entry_safe(devext
, next
, &candidates
, chunk_list
) {
2080 data_offset
= calc_data_offset(&key
, chunk
, devext
->offset
,
2081 csum_offset
, blocksize
);
2082 dev
= btrfs_find_device_by_devid(rc
->fs_devices
,
2083 devext
->objectid
, 0);
2088 BUG_ON(btrfs_find_device_by_devid(rc
->fs_devices
,
2089 devext
->objectid
, 1));
2091 ret
= check_one_csum(dev
->fd
, data_offset
, blocksize
,
2096 list_move(&devext
->chunk_list
, &unordered
);
2099 if (list_empty(&candidates
)) {
2100 num_unordered
= count_devext_records(&unordered
);
2101 if (chunk
->type_flags
& BTRFS_BLOCK_GROUP_RAID6
2102 && num_unordered
== 2) {
2103 btrfs_release_path(&path
);
2104 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2111 if (list_is_last(candidates
.next
, &candidates
)) {
2112 index
= btrfs_calc_stripe_index(chunk
,
2113 key
.offset
+ csum_offset
* blocksize
);
2114 BUG_ON(index
== -1);
2115 if (chunk
->stripes
[index
].devid
)
2117 ret
= insert_stripe(&candidates
, rc
, chunk
, index
);
2125 start
= btrfs_next_stripe_logical_offset(chunk
, start
);
2126 end
= min(start
+ chunk
->stripe_len
, chunk_end
);
2127 list_splice_init(&unordered
, &candidates
);
2128 btrfs_release_path(&path
);
2130 if (end
< chunk_end
)
2134 list_splice_init(&candidates
, &unordered
);
2135 num_unordered
= count_devext_records(&unordered
);
2136 if (num_unordered
== 1) {
2137 for (i
= 0; i
< chunk
->num_stripes
; i
++) {
2138 if (!chunk
->stripes
[i
].devid
) {
2143 ret
= insert_stripe(&unordered
, rc
, chunk
, index
);
2147 if ((num_unordered
== 2 && chunk
->type_flags
2148 & BTRFS_BLOCK_GROUP_RAID5
)
2149 || (num_unordered
== 3 && chunk
->type_flags
2150 & BTRFS_BLOCK_GROUP_RAID6
)) {
2151 ret
= fill_chunk_up(chunk
, &unordered
, rc
);
2155 ret
= !!ret
|| (list_empty(&unordered
) ? 0 : 1);
2156 list_splice_init(&candidates
, &chunk
->dextents
);
2157 list_splice_init(&unordered
, &chunk
->dextents
);
2158 btrfs_release_path(&path
);
2163 static int btrfs_rebuild_ordered_data_chunk_stripes(struct recover_control
*rc
,
2164 struct btrfs_root
*root
)
2166 struct chunk_record
*chunk
;
2167 struct chunk_record
*next
;
2172 list_for_each_entry_safe(chunk
, next
, &rc
->unrepaired_chunks
, list
) {
2173 if ((chunk
->type_flags
& BTRFS_BLOCK_GROUP_DATA
)
2174 && (chunk
->type_flags
& BTRFS_ORDERED_RAID
)) {
2176 err
= rebuild_raid_data_chunk_stripes(rc
, root
, chunk
,
2179 list_move(&chunk
->list
, &rc
->bad_chunks
);
2180 if (flags
& EQUAL_STRIPE
)
2182 "Failure: too many equal stripes in chunk[%llu %llu]\n",
2183 chunk
->offset
, chunk
->length
);
2187 list_move(&chunk
->list
, &rc
->good_chunks
);
2193 static int btrfs_recover_chunks(struct recover_control
*rc
)
2195 struct chunk_record
*chunk
;
2196 struct block_group_record
*bg
;
2197 struct block_group_record
*next
;
2198 LIST_HEAD(new_chunks
);
2203 /* create the chunk by block group */
2204 list_for_each_entry_safe(bg
, next
, &rc
->bg
.block_groups
, list
) {
2205 nstripes
= btrfs_get_device_extents(bg
->objectid
,
2206 &rc
->devext
.no_chunk_orphans
,
2208 chunk
= calloc(1, btrfs_chunk_record_size(nstripes
));
2211 INIT_LIST_HEAD(&chunk
->dextents
);
2213 chunk
->cache
.start
= bg
->objectid
;
2214 chunk
->cache
.size
= bg
->offset
;
2215 chunk
->objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2216 chunk
->type
= BTRFS_CHUNK_ITEM_KEY
;
2217 chunk
->offset
= bg
->objectid
;
2218 chunk
->generation
= bg
->generation
;
2219 chunk
->length
= bg
->offset
;
2220 chunk
->owner
= BTRFS_CHUNK_TREE_OBJECTID
;
2221 chunk
->stripe_len
= BTRFS_STRIPE_LEN
;
2222 chunk
->type_flags
= bg
->flags
;
2223 chunk
->io_width
= BTRFS_STRIPE_LEN
;
2224 chunk
->io_align
= BTRFS_STRIPE_LEN
;
2225 chunk
->sector_size
= rc
->sectorsize
;
2226 chunk
->sub_stripes
= calc_sub_nstripes(bg
->flags
);
2228 ret
= insert_cache_extent(&rc
->chunk
, &chunk
->cache
);
2229 if (ret
== -EEXIST
) {
2230 error("duplicate entry in cache start %llu size %llu",
2231 (unsigned long long)chunk
->cache
.start
,
2232 (unsigned long long)chunk
->cache
.size
);
2238 list_del_init(&bg
->list
);
2240 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2244 list_splice_init(&devexts
, &chunk
->dextents
);
2246 ret
= btrfs_verify_device_extents(bg
, &devexts
, nstripes
);
2248 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2252 chunk
->num_stripes
= nstripes
;
2253 ret
= btrfs_rebuild_chunk_stripes(rc
, chunk
);
2255 list_add_tail(&chunk
->list
, &rc
->unrepaired_chunks
);
2257 list_add_tail(&chunk
->list
, &rc
->bad_chunks
);
2259 list_add_tail(&chunk
->list
, &rc
->good_chunks
);
2262 * Don't worry about the lost orphan device extents, they don't
2263 * have its chunk and block group, they must be the old ones that
2269 static inline int is_chunk_overlap(struct chunk_record
*chunk1
,
2270 struct chunk_record
*chunk2
)
2272 if (chunk1
->offset
>= chunk2
->offset
+ chunk2
->length
||
2273 chunk1
->offset
+ chunk1
->length
<= chunk2
->offset
)
2278 /* Move invalid(overlap with good chunks) rebuild chunks to bad chunk list */
2279 static void validate_rebuild_chunks(struct recover_control
*rc
)
2281 struct chunk_record
*good
;
2282 struct chunk_record
*rebuild
;
2283 struct chunk_record
*tmp
;
2285 list_for_each_entry_safe(rebuild
, tmp
, &rc
->rebuild_chunks
, list
) {
2286 list_for_each_entry(good
, &rc
->good_chunks
, list
) {
2287 if (is_chunk_overlap(rebuild
, good
)) {
2288 list_move_tail(&rebuild
->list
,
2297 * Return 0 when successful, < 0 on error and > 0 if aborted by user
2299 int btrfs_recover_chunk_tree(const char *path
, int verbose
, int yes
)
2302 struct btrfs_root
*root
= NULL
;
2303 struct btrfs_trans_handle
*trans
;
2304 struct recover_control rc
;
2306 init_recover_control(&rc
, verbose
, yes
);
2308 ret
= recover_prepare(&rc
, path
);
2310 fprintf(stderr
, "recover prepare error\n");
2314 ret
= scan_devices(&rc
);
2316 fprintf(stderr
, "scan chunk headers error\n");
2320 if (cache_tree_empty(&rc
.chunk
) &&
2321 cache_tree_empty(&rc
.bg
.tree
) &&
2322 cache_tree_empty(&rc
.devext
.tree
)) {
2323 fprintf(stderr
, "no recoverable chunk\n");
2327 print_scan_result(&rc
);
2329 ret
= check_chunks(&rc
.chunk
, &rc
.bg
, &rc
.devext
, &rc
.good_chunks
,
2330 &rc
.bad_chunks
, &rc
.rebuild_chunks
, 1);
2332 if (!list_empty(&rc
.bg
.block_groups
) ||
2333 !list_empty(&rc
.devext
.no_chunk_orphans
)) {
2334 ret
= btrfs_recover_chunks(&rc
);
2339 print_check_result(&rc
);
2340 printf("Check chunks successfully with no orphans\n");
2343 validate_rebuild_chunks(&rc
);
2344 print_check_result(&rc
);
2346 root
= open_ctree_with_broken_chunk(&rc
);
2348 fprintf(stderr
, "open with broken chunk error\n");
2349 ret
= PTR_ERR(root
);
2353 ret
= check_all_chunks_by_metadata(&rc
, root
);
2355 fprintf(stderr
, "The chunks in memory can not match the metadata of the fs. Repair failed.\n");
2356 goto fail_close_ctree
;
2359 ret
= btrfs_rebuild_ordered_data_chunk_stripes(&rc
, root
);
2361 fprintf(stderr
, "Failed to rebuild ordered chunk stripes.\n");
2362 goto fail_close_ctree
;
2366 ret
= ask_user("We are going to rebuild the chunk tree on disk, it might destroy the old metadata on the disk, Are you sure?");
2369 goto fail_close_ctree
;
2373 trans
= btrfs_start_transaction(root
, 1);
2374 BUG_ON(IS_ERR(trans
));
2375 ret
= remove_chunk_extent_item(trans
, &rc
, root
);
2378 ret
= rebuild_chunk_tree(trans
, &rc
, root
);
2381 ret
= rebuild_sys_array(&rc
, root
);
2384 ret
= rebuild_block_group(trans
, &rc
, root
);
2386 printf("Fail to rebuild block groups.\n");
2387 printf("Recommend to run 'btrfs check --init-extent-tree <dev>' after recovery\n");
2390 btrfs_commit_transaction(trans
, root
);
2394 free_recover_control(&rc
);