2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include <sys/types.h>
30 #include "kerncompat.h"
34 #include "transaction.h"
37 #include "extent_io.h"
39 #define HEADER_MAGIC 0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE (256 * 1024)
41 #define BLOCK_SIZE 1024
42 #define BLOCK_MASK (BLOCK_SIZE - 1)
44 #define COMPRESS_NONE 0
45 #define COMPRESS_ZLIB 1
47 struct meta_cluster_item
{
50 } __attribute__ ((__packed__
));
52 struct meta_cluster_header
{
57 } __attribute__ ((__packed__
));
59 /* cluster header + index items + buffers */
61 struct meta_cluster_header header
;
62 struct meta_cluster_item items
[];
63 } __attribute__ ((__packed__
));
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66 sizeof(struct meta_cluster_item))
74 struct list_head list
;
78 struct list_head list
;
79 struct list_head ordered
;
87 struct metadump_struct
{
88 struct btrfs_root
*root
;
91 struct meta_cluster
*cluster
;
95 pthread_mutex_t mutex
;
97 struct rb_root name_tree
;
99 struct list_head list
;
100 struct list_head ordered
;
122 struct mdrestore_struct
{
128 pthread_mutex_t mutex
;
131 struct rb_root chunk_tree
;
132 struct rb_root physical_tree
;
133 struct list_head list
;
134 struct list_head overlapping_chunks
;
139 u64 last_physical_offset
;
140 u8 uuid
[BTRFS_UUID_SIZE
];
141 u8 fsid
[BTRFS_FSID_SIZE
];
149 int clear_space_cache
;
150 struct btrfs_fs_info
*info
;
153 static int search_for_chunk_blocks(struct mdrestore_struct
*mdres
,
154 u64 search
, u64 cluster_bytenr
);
155 static struct extent_buffer
*alloc_dummy_eb(u64 bytenr
, u32 size
);
157 static void csum_block(u8
*buf
, size_t len
)
159 char result
[BTRFS_CRC32_SIZE
];
161 crc
= crc32c(crc
, buf
+ BTRFS_CSUM_SIZE
, len
- BTRFS_CSUM_SIZE
);
162 btrfs_csum_final(crc
, result
);
163 memcpy(buf
, result
, BTRFS_CRC32_SIZE
);
166 static int has_name(struct btrfs_key
*key
)
169 case BTRFS_DIR_ITEM_KEY
:
170 case BTRFS_DIR_INDEX_KEY
:
171 case BTRFS_INODE_REF_KEY
:
172 case BTRFS_INODE_EXTREF_KEY
:
173 case BTRFS_XATTR_ITEM_KEY
:
182 static char *generate_garbage(u32 name_len
)
184 char *buf
= malloc(name_len
);
190 for (i
= 0; i
< name_len
; i
++) {
191 char c
= rand() % 94 + 33;
201 static int name_cmp(struct rb_node
*a
, struct rb_node
*b
, int fuzz
)
203 struct name
*entry
= rb_entry(a
, struct name
, n
);
204 struct name
*ins
= rb_entry(b
, struct name
, n
);
207 len
= min(ins
->len
, entry
->len
);
208 return memcmp(ins
->val
, entry
->val
, len
);
211 static int chunk_cmp(struct rb_node
*a
, struct rb_node
*b
, int fuzz
)
213 struct fs_chunk
*entry
= rb_entry(a
, struct fs_chunk
, l
);
214 struct fs_chunk
*ins
= rb_entry(b
, struct fs_chunk
, l
);
216 if (fuzz
&& ins
->logical
>= entry
->logical
&&
217 ins
->logical
< entry
->logical
+ entry
->bytes
)
220 if (ins
->logical
< entry
->logical
)
222 else if (ins
->logical
> entry
->logical
)
227 static int physical_cmp(struct rb_node
*a
, struct rb_node
*b
, int fuzz
)
229 struct fs_chunk
*entry
= rb_entry(a
, struct fs_chunk
, p
);
230 struct fs_chunk
*ins
= rb_entry(b
, struct fs_chunk
, p
);
232 if (fuzz
&& ins
->physical
>= entry
->physical
&&
233 ins
->physical
< entry
->physical
+ entry
->bytes
)
236 if (fuzz
&& entry
->physical
>= ins
->physical
&&
237 entry
->physical
< ins
->physical
+ ins
->bytes
)
240 if (ins
->physical
< entry
->physical
)
242 else if (ins
->physical
> entry
->physical
)
247 static void tree_insert(struct rb_root
*root
, struct rb_node
*ins
,
248 int (*cmp
)(struct rb_node
*a
, struct rb_node
*b
,
251 struct rb_node
** p
= &root
->rb_node
;
252 struct rb_node
* parent
= NULL
;
258 dir
= cmp(*p
, ins
, 1);
267 rb_link_node(ins
, parent
, p
);
268 rb_insert_color(ins
, root
);
271 static struct rb_node
*tree_search(struct rb_root
*root
,
272 struct rb_node
*search
,
273 int (*cmp
)(struct rb_node
*a
,
274 struct rb_node
*b
, int fuzz
),
277 struct rb_node
*n
= root
->rb_node
;
281 dir
= cmp(n
, search
, fuzz
);
293 static u64
logical_to_physical(struct mdrestore_struct
*mdres
, u64 logical
, u64
*size
)
295 struct fs_chunk
*fs_chunk
;
296 struct rb_node
*entry
;
297 struct fs_chunk search
;
300 if (logical
== BTRFS_SUPER_INFO_OFFSET
)
303 search
.logical
= logical
;
304 entry
= tree_search(&mdres
->chunk_tree
, &search
.l
, chunk_cmp
, 1);
306 if (mdres
->in
!= stdin
)
307 printf("Couldn't find a chunk, using logical\n");
310 fs_chunk
= rb_entry(entry
, struct fs_chunk
, l
);
311 if (fs_chunk
->logical
> logical
|| fs_chunk
->logical
+ fs_chunk
->bytes
< logical
)
313 offset
= search
.logical
- fs_chunk
->logical
;
315 *size
= min(*size
, fs_chunk
->bytes
+ fs_chunk
->logical
- logical
);
316 return fs_chunk
->physical
+ offset
;
320 static char *find_collision(struct metadump_struct
*md
, char *name
,
324 struct rb_node
*entry
;
326 unsigned long checksum
;
332 entry
= tree_search(&md
->name_tree
, &tmp
.n
, name_cmp
, 0);
334 val
= rb_entry(entry
, struct name
, n
);
339 val
= malloc(sizeof(struct name
));
341 fprintf(stderr
, "Couldn't sanitize name, enomem\n");
346 memset(val
, 0, sizeof(*val
));
350 val
->sub
= malloc(name_len
);
352 fprintf(stderr
, "Couldn't sanitize name, enomem\n");
358 checksum
= crc32c(~1, val
->val
, name_len
);
359 memset(val
->sub
, ' ', name_len
);
362 if (crc32c(~1, val
->sub
, name_len
) == checksum
&&
363 memcmp(val
->sub
, val
->val
, val
->len
)) {
368 if (val
->sub
[i
] == 127) {
373 } while (val
->sub
[i
] == 127);
378 if (val
->sub
[i
] == '/')
380 memset(val
->sub
, ' ', i
);
385 if (val
->sub
[i
] == '/')
391 fprintf(stderr
, "Couldn't find a collision for '%.*s', "
392 "generating normal garbage, it won't match indexes\n",
394 for (i
= 0; i
< name_len
; i
++) {
395 char c
= rand() % 94 + 33;
403 tree_insert(&md
->name_tree
, &val
->n
, name_cmp
);
407 static void sanitize_dir_item(struct metadump_struct
*md
, struct extent_buffer
*eb
,
410 struct btrfs_dir_item
*dir_item
;
413 unsigned long name_ptr
;
418 int free_garbage
= (md
->sanitize_names
== 1);
420 dir_item
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
421 total_len
= btrfs_item_size_nr(eb
, slot
);
422 while (cur
< total_len
) {
423 this_len
= sizeof(*dir_item
) +
424 btrfs_dir_name_len(eb
, dir_item
) +
425 btrfs_dir_data_len(eb
, dir_item
);
426 name_ptr
= (unsigned long)(dir_item
+ 1);
427 name_len
= btrfs_dir_name_len(eb
, dir_item
);
429 if (md
->sanitize_names
> 1) {
430 buf
= malloc(name_len
);
432 fprintf(stderr
, "Couldn't sanitize name, "
436 read_extent_buffer(eb
, buf
, name_ptr
, name_len
);
437 garbage
= find_collision(md
, buf
, name_len
);
439 garbage
= generate_garbage(name_len
);
442 fprintf(stderr
, "Couldn't sanitize name, enomem\n");
445 write_extent_buffer(eb
, garbage
, name_ptr
, name_len
);
447 dir_item
= (struct btrfs_dir_item
*)((char *)dir_item
+
454 static void sanitize_inode_ref(struct metadump_struct
*md
,
455 struct extent_buffer
*eb
, int slot
, int ext
)
457 struct btrfs_inode_extref
*extref
;
458 struct btrfs_inode_ref
*ref
;
461 unsigned long name_ptr
;
465 int free_garbage
= (md
->sanitize_names
== 1);
467 item_size
= btrfs_item_size_nr(eb
, slot
);
468 ptr
= btrfs_item_ptr_offset(eb
, slot
);
469 while (cur_offset
< item_size
) {
471 extref
= (struct btrfs_inode_extref
*)(ptr
+
473 name_ptr
= (unsigned long)(&extref
->name
);
474 len
= btrfs_inode_extref_name_len(eb
, extref
);
475 cur_offset
+= sizeof(*extref
);
477 ref
= (struct btrfs_inode_ref
*)(ptr
+ cur_offset
);
478 len
= btrfs_inode_ref_name_len(eb
, ref
);
479 name_ptr
= (unsigned long)(ref
+ 1);
480 cur_offset
+= sizeof(*ref
);
484 if (md
->sanitize_names
> 1) {
487 fprintf(stderr
, "Couldn't sanitize name, "
491 read_extent_buffer(eb
, buf
, name_ptr
, len
);
492 garbage
= find_collision(md
, buf
, len
);
494 garbage
= generate_garbage(len
);
498 fprintf(stderr
, "Couldn't sanitize name, enomem\n");
501 write_extent_buffer(eb
, garbage
, name_ptr
, len
);
507 static void sanitize_xattr(struct metadump_struct
*md
,
508 struct extent_buffer
*eb
, int slot
)
510 struct btrfs_dir_item
*dir_item
;
511 unsigned long data_ptr
;
514 dir_item
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
515 data_len
= btrfs_dir_data_len(eb
, dir_item
);
517 data_ptr
= (unsigned long)((char *)(dir_item
+ 1) +
518 btrfs_dir_name_len(eb
, dir_item
));
519 memset_extent_buffer(eb
, 0, data_ptr
, data_len
);
522 static void sanitize_name(struct metadump_struct
*md
, u8
*dst
,
523 struct extent_buffer
*src
, struct btrfs_key
*key
,
526 struct extent_buffer
*eb
;
528 eb
= alloc_dummy_eb(src
->start
, src
->len
);
530 fprintf(stderr
, "Couldn't sanitize name, no memory\n");
534 memcpy(eb
->data
, dst
, eb
->len
);
537 case BTRFS_DIR_ITEM_KEY
:
538 case BTRFS_DIR_INDEX_KEY
:
539 sanitize_dir_item(md
, eb
, slot
);
541 case BTRFS_INODE_REF_KEY
:
542 sanitize_inode_ref(md
, eb
, slot
, 0);
544 case BTRFS_INODE_EXTREF_KEY
:
545 sanitize_inode_ref(md
, eb
, slot
, 1);
547 case BTRFS_XATTR_ITEM_KEY
:
548 sanitize_xattr(md
, eb
, slot
);
554 memcpy(dst
, eb
->data
, eb
->len
);
559 * zero inline extents and csum items
561 static void zero_items(struct metadump_struct
*md
, u8
*dst
,
562 struct extent_buffer
*src
)
564 struct btrfs_file_extent_item
*fi
;
565 struct btrfs_item
*item
;
566 struct btrfs_key key
;
567 u32 nritems
= btrfs_header_nritems(src
);
572 for (i
= 0; i
< nritems
; i
++) {
573 item
= btrfs_item_nr(i
);
574 btrfs_item_key_to_cpu(src
, &key
, i
);
575 if (key
.type
== BTRFS_CSUM_ITEM_KEY
) {
576 size
= btrfs_item_size_nr(src
, i
);
577 memset(dst
+ btrfs_leaf_data(src
) +
578 btrfs_item_offset_nr(src
, i
), 0, size
);
582 if (md
->sanitize_names
&& has_name(&key
)) {
583 sanitize_name(md
, dst
, src
, &key
, i
);
587 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
590 fi
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
591 extent_type
= btrfs_file_extent_type(src
, fi
);
592 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
)
595 ptr
= btrfs_file_extent_inline_start(fi
);
596 size
= btrfs_file_extent_inline_item_len(src
, item
);
597 memset(dst
+ ptr
, 0, size
);
602 * copy buffer and zero useless data in the buffer
604 static void copy_buffer(struct metadump_struct
*md
, u8
*dst
,
605 struct extent_buffer
*src
)
611 memcpy(dst
, src
->data
, src
->len
);
612 if (src
->start
== BTRFS_SUPER_INFO_OFFSET
)
615 level
= btrfs_header_level(src
);
616 nritems
= btrfs_header_nritems(src
);
619 size
= sizeof(struct btrfs_header
);
620 memset(dst
+ size
, 0, src
->len
- size
);
621 } else if (level
== 0) {
622 size
= btrfs_leaf_data(src
) +
623 btrfs_item_offset_nr(src
, nritems
- 1) -
624 btrfs_item_nr_offset(nritems
);
625 memset(dst
+ btrfs_item_nr_offset(nritems
), 0, size
);
626 zero_items(md
, dst
, src
);
628 size
= offsetof(struct btrfs_node
, ptrs
) +
629 sizeof(struct btrfs_key_ptr
) * nritems
;
630 memset(dst
+ size
, 0, src
->len
- size
);
632 csum_block(dst
, src
->len
);
635 static void *dump_worker(void *data
)
637 struct metadump_struct
*md
= (struct metadump_struct
*)data
;
638 struct async_work
*async
;
642 pthread_mutex_lock(&md
->mutex
);
643 while (list_empty(&md
->list
)) {
645 pthread_mutex_unlock(&md
->mutex
);
648 pthread_cond_wait(&md
->cond
, &md
->mutex
);
650 async
= list_entry(md
->list
.next
, struct async_work
, list
);
651 list_del_init(&async
->list
);
652 pthread_mutex_unlock(&md
->mutex
);
654 if (md
->compress_level
> 0) {
655 u8
*orig
= async
->buffer
;
657 async
->bufsize
= compressBound(async
->size
);
658 async
->buffer
= malloc(async
->bufsize
);
659 if (!async
->buffer
) {
660 fprintf(stderr
, "Error allocing buffer\n");
661 pthread_mutex_lock(&md
->mutex
);
664 pthread_mutex_unlock(&md
->mutex
);
668 ret
= compress2(async
->buffer
,
669 (unsigned long *)&async
->bufsize
,
670 orig
, async
->size
, md
->compress_level
);
678 pthread_mutex_lock(&md
->mutex
);
680 pthread_mutex_unlock(&md
->mutex
);
686 static void meta_cluster_init(struct metadump_struct
*md
, u64 start
)
688 struct meta_cluster_header
*header
;
692 header
= &md
->cluster
->header
;
693 header
->magic
= cpu_to_le64(HEADER_MAGIC
);
694 header
->bytenr
= cpu_to_le64(start
);
695 header
->nritems
= cpu_to_le32(0);
696 header
->compress
= md
->compress_level
> 0 ?
697 COMPRESS_ZLIB
: COMPRESS_NONE
;
700 static void metadump_destroy(struct metadump_struct
*md
, int num_threads
)
705 pthread_mutex_lock(&md
->mutex
);
707 pthread_cond_broadcast(&md
->cond
);
708 pthread_mutex_unlock(&md
->mutex
);
710 for (i
= 0; i
< num_threads
; i
++)
711 pthread_join(md
->threads
[i
], NULL
);
713 pthread_cond_destroy(&md
->cond
);
714 pthread_mutex_destroy(&md
->mutex
);
716 while ((n
= rb_first(&md
->name_tree
))) {
719 name
= rb_entry(n
, struct name
, n
);
720 rb_erase(n
, &md
->name_tree
);
729 static int metadump_init(struct metadump_struct
*md
, struct btrfs_root
*root
,
730 FILE *out
, int num_threads
, int compress_level
,
735 memset(md
, 0, sizeof(*md
));
736 pthread_cond_init(&md
->cond
, NULL
);
737 pthread_mutex_init(&md
->mutex
, NULL
);
738 INIT_LIST_HEAD(&md
->list
);
739 INIT_LIST_HEAD(&md
->ordered
);
742 md
->pending_start
= (u64
)-1;
743 md
->compress_level
= compress_level
;
744 md
->cluster
= calloc(1, BLOCK_SIZE
);
745 md
->sanitize_names
= sanitize_names
;
746 if (sanitize_names
> 1)
747 crc32c_optimization_init();
750 pthread_cond_destroy(&md
->cond
);
751 pthread_mutex_destroy(&md
->mutex
);
755 meta_cluster_init(md
, 0);
759 md
->name_tree
.rb_node
= NULL
;
760 md
->num_threads
= num_threads
;
761 md
->threads
= calloc(num_threads
, sizeof(pthread_t
));
764 pthread_cond_destroy(&md
->cond
);
765 pthread_mutex_destroy(&md
->mutex
);
769 for (i
= 0; i
< num_threads
; i
++) {
770 ret
= pthread_create(md
->threads
+ i
, NULL
, dump_worker
, md
);
776 metadump_destroy(md
, i
+ 1);
781 static int write_zero(FILE *out
, size_t size
)
783 static char zero
[BLOCK_SIZE
];
784 return fwrite(zero
, size
, 1, out
);
787 static int write_buffers(struct metadump_struct
*md
, u64
*next
)
789 struct meta_cluster_header
*header
= &md
->cluster
->header
;
790 struct meta_cluster_item
*item
;
791 struct async_work
*async
;
797 if (list_empty(&md
->ordered
))
800 /* wait until all buffers are compressed */
801 while (!err
&& md
->num_items
> md
->num_ready
) {
802 struct timespec ts
= {
806 pthread_mutex_unlock(&md
->mutex
);
807 nanosleep(&ts
, NULL
);
808 pthread_mutex_lock(&md
->mutex
);
813 fprintf(stderr
, "One of the threads errored out %s\n",
818 /* setup and write index block */
819 list_for_each_entry(async
, &md
->ordered
, ordered
) {
820 item
= md
->cluster
->items
+ nritems
;
821 item
->bytenr
= cpu_to_le64(async
->start
);
822 item
->size
= cpu_to_le32(async
->bufsize
);
825 header
->nritems
= cpu_to_le32(nritems
);
827 ret
= fwrite(md
->cluster
, BLOCK_SIZE
, 1, md
->out
);
829 fprintf(stderr
, "Error writing out cluster: %d\n", errno
);
834 bytenr
+= le64_to_cpu(header
->bytenr
) + BLOCK_SIZE
;
835 while (!list_empty(&md
->ordered
)) {
836 async
= list_entry(md
->ordered
.next
, struct async_work
,
838 list_del_init(&async
->ordered
);
840 bytenr
+= async
->bufsize
;
842 ret
= fwrite(async
->buffer
, async
->bufsize
, 1,
847 fprintf(stderr
, "Error writing out cluster: %d\n",
855 /* zero unused space in the last block */
856 if (!err
&& bytenr
& BLOCK_MASK
) {
857 size_t size
= BLOCK_SIZE
- (bytenr
& BLOCK_MASK
);
860 ret
= write_zero(md
->out
, size
);
862 fprintf(stderr
, "Error zeroing out buffer: %d\n",
872 static int read_data_extent(struct metadump_struct
*md
,
873 struct async_work
*async
)
875 struct btrfs_root
*root
= md
->root
;
876 u64 bytes_left
= async
->size
;
877 u64 logical
= async
->start
;
884 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
, logical
,
887 /* Try our best to read data, just like read_tree_block() */
888 for (cur_mirror
= 0; cur_mirror
< num_copies
; cur_mirror
++) {
890 read_len
= bytes_left
;
891 ret
= read_extent_data(root
,
892 (char *)(async
->buffer
+ offset
),
893 logical
, &read_len
, cur_mirror
);
898 bytes_left
-= read_len
;
906 static int get_dev_fd(struct btrfs_root
*root
)
908 struct btrfs_device
*dev
;
910 dev
= list_first_entry(&root
->fs_info
->fs_devices
->devices
,
911 struct btrfs_device
, dev_list
);
915 static int flush_pending(struct metadump_struct
*md
, int done
)
917 struct async_work
*async
= NULL
;
918 struct extent_buffer
*eb
;
919 u64 blocksize
= md
->root
->nodesize
;
925 if (md
->pending_size
) {
926 async
= calloc(1, sizeof(*async
));
930 async
->start
= md
->pending_start
;
931 async
->size
= md
->pending_size
;
932 async
->bufsize
= async
->size
;
933 async
->buffer
= malloc(async
->bufsize
);
934 if (!async
->buffer
) {
939 start
= async
->start
;
943 ret
= read_data_extent(md
, async
);
952 * Balance can make the mapping not cover the super block, so
953 * just copy directly from one of the devices.
955 if (start
== BTRFS_SUPER_INFO_OFFSET
) {
956 int fd
= get_dev_fd(md
->root
);
958 ret
= pread64(fd
, async
->buffer
, size
, start
);
962 fprintf(stderr
, "Error reading superblock\n");
969 while (!md
->data
&& size
> 0) {
970 u64 this_read
= min(blocksize
, size
);
971 eb
= read_tree_block(md
->root
, start
, this_read
, 0);
972 if (!extent_buffer_uptodate(eb
)) {
976 "Error reading metadata block\n");
979 copy_buffer(md
, async
->buffer
+ offset
, eb
);
980 free_extent_buffer(eb
);
986 md
->pending_start
= (u64
)-1;
987 md
->pending_size
= 0;
992 pthread_mutex_lock(&md
->mutex
);
994 list_add_tail(&async
->ordered
, &md
->ordered
);
996 if (md
->compress_level
> 0) {
997 list_add_tail(&async
->list
, &md
->list
);
998 pthread_cond_signal(&md
->cond
);
1003 if (md
->num_items
>= ITEMS_PER_CLUSTER
|| done
) {
1004 ret
= write_buffers(md
, &start
);
1006 fprintf(stderr
, "Error writing buffers %d\n",
1009 meta_cluster_init(md
, start
);
1011 pthread_mutex_unlock(&md
->mutex
);
1015 static int add_extent(u64 start
, u64 size
, struct metadump_struct
*md
,
1019 if (md
->data
!= data
||
1020 md
->pending_size
+ size
> MAX_PENDING_SIZE
||
1021 md
->pending_start
+ md
->pending_size
!= start
) {
1022 ret
= flush_pending(md
, 0);
1025 md
->pending_start
= start
;
1027 readahead_tree_block(md
->root
, start
, size
, 0);
1028 md
->pending_size
+= size
;
1033 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1034 static int is_tree_block(struct btrfs_root
*extent_root
,
1035 struct btrfs_path
*path
, u64 bytenr
)
1037 struct extent_buffer
*leaf
;
1038 struct btrfs_key key
;
1042 leaf
= path
->nodes
[0];
1044 struct btrfs_extent_ref_v0
*ref_item
;
1046 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1047 ret
= btrfs_next_leaf(extent_root
, path
);
1052 leaf
= path
->nodes
[0];
1054 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1055 if (key
.objectid
!= bytenr
)
1057 if (key
.type
!= BTRFS_EXTENT_REF_V0_KEY
)
1059 ref_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1060 struct btrfs_extent_ref_v0
);
1061 ref_objectid
= btrfs_ref_objectid_v0(leaf
, ref_item
);
1062 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
1070 static int copy_tree_blocks(struct btrfs_root
*root
, struct extent_buffer
*eb
,
1071 struct metadump_struct
*metadump
, int root_tree
)
1073 struct extent_buffer
*tmp
;
1074 struct btrfs_root_item
*ri
;
1075 struct btrfs_key key
;
1082 ret
= add_extent(btrfs_header_bytenr(eb
), root
->leafsize
, metadump
, 0);
1084 fprintf(stderr
, "Error adding metadata block\n");
1088 if (btrfs_header_level(eb
) == 0 && !root_tree
)
1091 level
= btrfs_header_level(eb
);
1092 nritems
= btrfs_header_nritems(eb
);
1093 for (i
= 0; i
< nritems
; i
++) {
1095 btrfs_item_key_to_cpu(eb
, &key
, i
);
1096 if (key
.type
!= BTRFS_ROOT_ITEM_KEY
)
1098 ri
= btrfs_item_ptr(eb
, i
, struct btrfs_root_item
);
1099 bytenr
= btrfs_disk_root_bytenr(eb
, ri
);
1100 tmp
= read_tree_block(root
, bytenr
, root
->leafsize
, 0);
1101 if (!extent_buffer_uptodate(tmp
)) {
1103 "Error reading log root block\n");
1106 ret
= copy_tree_blocks(root
, tmp
, metadump
, 0);
1107 free_extent_buffer(tmp
);
1111 bytenr
= btrfs_node_blockptr(eb
, i
);
1112 tmp
= read_tree_block(root
, bytenr
, root
->leafsize
, 0);
1113 if (!extent_buffer_uptodate(tmp
)) {
1114 fprintf(stderr
, "Error reading log block\n");
1117 ret
= copy_tree_blocks(root
, tmp
, metadump
, root_tree
);
1118 free_extent_buffer(tmp
);
1127 static int copy_log_trees(struct btrfs_root
*root
,
1128 struct metadump_struct
*metadump
,
1129 struct btrfs_path
*path
)
1131 u64 blocknr
= btrfs_super_log_root(root
->fs_info
->super_copy
);
1136 if (!root
->fs_info
->log_root_tree
||
1137 !root
->fs_info
->log_root_tree
->node
) {
1138 fprintf(stderr
, "Error copying tree log, it wasn't setup\n");
1142 return copy_tree_blocks(root
, root
->fs_info
->log_root_tree
->node
,
1146 static int copy_space_cache(struct btrfs_root
*root
,
1147 struct metadump_struct
*metadump
,
1148 struct btrfs_path
*path
)
1150 struct extent_buffer
*leaf
;
1151 struct btrfs_file_extent_item
*fi
;
1152 struct btrfs_key key
;
1153 u64 bytenr
, num_bytes
;
1156 root
= root
->fs_info
->tree_root
;
1159 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1162 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1164 fprintf(stderr
, "Error searching for free space inode %d\n",
1169 leaf
= path
->nodes
[0];
1172 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1173 ret
= btrfs_next_leaf(root
, path
);
1175 fprintf(stderr
, "Error going to next leaf "
1181 leaf
= path
->nodes
[0];
1184 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1185 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
1190 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1191 struct btrfs_file_extent_item
);
1192 if (btrfs_file_extent_type(leaf
, fi
) !=
1193 BTRFS_FILE_EXTENT_REG
) {
1198 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1199 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1200 ret
= add_extent(bytenr
, num_bytes
, metadump
, 1);
1202 fprintf(stderr
, "Error adding space cache blocks %d\n",
1204 btrfs_release_path(path
);
1213 static int copy_from_extent_tree(struct metadump_struct
*metadump
,
1214 struct btrfs_path
*path
)
1216 struct btrfs_root
*extent_root
;
1217 struct extent_buffer
*leaf
;
1218 struct btrfs_extent_item
*ei
;
1219 struct btrfs_key key
;
1224 extent_root
= metadump
->root
->fs_info
->extent_root
;
1225 bytenr
= BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
;
1226 key
.objectid
= bytenr
;
1227 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1230 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
1232 fprintf(stderr
, "Error searching extent root %d\n", ret
);
1237 leaf
= path
->nodes
[0];
1240 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1241 ret
= btrfs_next_leaf(extent_root
, path
);
1243 fprintf(stderr
, "Error going to next leaf %d"
1251 leaf
= path
->nodes
[0];
1254 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1255 if (key
.objectid
< bytenr
||
1256 (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
1257 key
.type
!= BTRFS_METADATA_ITEM_KEY
)) {
1262 bytenr
= key
.objectid
;
1263 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
1264 num_bytes
= extent_root
->leafsize
;
1266 num_bytes
= key
.offset
;
1268 if (btrfs_item_size_nr(leaf
, path
->slots
[0]) > sizeof(*ei
)) {
1269 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
1270 struct btrfs_extent_item
);
1271 if (btrfs_extent_flags(leaf
, ei
) &
1272 BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1273 ret
= add_extent(bytenr
, num_bytes
, metadump
,
1276 fprintf(stderr
, "Error adding block "
1282 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1283 ret
= is_tree_block(extent_root
, path
, bytenr
);
1285 fprintf(stderr
, "Error checking tree block "
1291 ret
= add_extent(bytenr
, num_bytes
, metadump
,
1294 fprintf(stderr
, "Error adding block "
1301 fprintf(stderr
, "Either extent tree corruption or "
1302 "you haven't built with V0 support\n");
1307 bytenr
+= num_bytes
;
1310 btrfs_release_path(path
);
1315 static int create_metadump(const char *input
, FILE *out
, int num_threads
,
1316 int compress_level
, int sanitize
, int walk_trees
)
1318 struct btrfs_root
*root
;
1319 struct btrfs_path
*path
= NULL
;
1320 struct metadump_struct metadump
;
1324 root
= open_ctree(input
, 0, 0);
1326 fprintf(stderr
, "Open ctree failed\n");
1330 BUG_ON(root
->nodesize
!= root
->leafsize
);
1332 ret
= metadump_init(&metadump
, root
, out
, num_threads
,
1333 compress_level
, sanitize
);
1335 fprintf(stderr
, "Error initing metadump %d\n", ret
);
1340 ret
= add_extent(BTRFS_SUPER_INFO_OFFSET
, BTRFS_SUPER_INFO_SIZE
,
1343 fprintf(stderr
, "Error adding metadata %d\n", ret
);
1348 path
= btrfs_alloc_path();
1350 fprintf(stderr
, "Out of memory allocing path\n");
1356 ret
= copy_tree_blocks(root
, root
->fs_info
->chunk_root
->node
,
1363 ret
= copy_tree_blocks(root
, root
->fs_info
->tree_root
->node
,
1370 ret
= copy_from_extent_tree(&metadump
, path
);
1377 ret
= copy_log_trees(root
, &metadump
, path
);
1383 ret
= copy_space_cache(root
, &metadump
, path
);
1385 ret
= flush_pending(&metadump
, 1);
1389 fprintf(stderr
, "Error flushing pending %d\n", ret
);
1392 metadump_destroy(&metadump
, num_threads
);
1394 btrfs_free_path(path
);
1395 ret
= close_ctree(root
);
1396 return err
? err
: ret
;
1399 static void update_super_old(u8
*buffer
)
1401 struct btrfs_super_block
*super
= (struct btrfs_super_block
*)buffer
;
1402 struct btrfs_chunk
*chunk
;
1403 struct btrfs_disk_key
*key
;
1404 u32 sectorsize
= btrfs_super_sectorsize(super
);
1405 u64 flags
= btrfs_super_flags(super
);
1407 flags
|= BTRFS_SUPER_FLAG_METADUMP
;
1408 btrfs_set_super_flags(super
, flags
);
1410 key
= (struct btrfs_disk_key
*)(super
->sys_chunk_array
);
1411 chunk
= (struct btrfs_chunk
*)(super
->sys_chunk_array
+
1412 sizeof(struct btrfs_disk_key
));
1414 btrfs_set_disk_key_objectid(key
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
1415 btrfs_set_disk_key_type(key
, BTRFS_CHUNK_ITEM_KEY
);
1416 btrfs_set_disk_key_offset(key
, 0);
1418 btrfs_set_stack_chunk_length(chunk
, (u64
)-1);
1419 btrfs_set_stack_chunk_owner(chunk
, BTRFS_EXTENT_TREE_OBJECTID
);
1420 btrfs_set_stack_chunk_stripe_len(chunk
, BTRFS_STRIPE_LEN
);
1421 btrfs_set_stack_chunk_type(chunk
, BTRFS_BLOCK_GROUP_SYSTEM
);
1422 btrfs_set_stack_chunk_io_align(chunk
, sectorsize
);
1423 btrfs_set_stack_chunk_io_width(chunk
, sectorsize
);
1424 btrfs_set_stack_chunk_sector_size(chunk
, sectorsize
);
1425 btrfs_set_stack_chunk_num_stripes(chunk
, 1);
1426 btrfs_set_stack_chunk_sub_stripes(chunk
, 0);
1427 chunk
->stripe
.devid
= super
->dev_item
.devid
;
1428 btrfs_set_stack_stripe_offset(&chunk
->stripe
, 0);
1429 memcpy(chunk
->stripe
.dev_uuid
, super
->dev_item
.uuid
, BTRFS_UUID_SIZE
);
1430 btrfs_set_super_sys_array_size(super
, sizeof(*key
) + sizeof(*chunk
));
1431 csum_block(buffer
, BTRFS_SUPER_INFO_SIZE
);
1434 static int update_super(struct mdrestore_struct
*mdres
, u8
*buffer
)
1436 struct btrfs_super_block
*super
= (struct btrfs_super_block
*)buffer
;
1437 struct btrfs_chunk
*chunk
;
1438 struct btrfs_disk_key
*disk_key
;
1439 struct btrfs_key key
;
1440 u64 flags
= btrfs_super_flags(super
);
1441 u32 new_array_size
= 0;
1444 u8
*ptr
, *write_ptr
;
1445 int old_num_stripes
;
1447 write_ptr
= ptr
= super
->sys_chunk_array
;
1448 array_size
= btrfs_super_sys_array_size(super
);
1450 while (cur
< array_size
) {
1451 disk_key
= (struct btrfs_disk_key
*)ptr
;
1452 btrfs_disk_key_to_cpu(&key
, disk_key
);
1454 new_array_size
+= sizeof(*disk_key
);
1455 memmove(write_ptr
, ptr
, sizeof(*disk_key
));
1457 write_ptr
+= sizeof(*disk_key
);
1458 ptr
+= sizeof(*disk_key
);
1459 cur
+= sizeof(*disk_key
);
1461 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1462 u64 physical
, size
= 0;
1464 chunk
= (struct btrfs_chunk
*)ptr
;
1465 old_num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1466 chunk
= (struct btrfs_chunk
*)write_ptr
;
1468 memmove(write_ptr
, ptr
, sizeof(*chunk
));
1469 btrfs_set_stack_chunk_num_stripes(chunk
, 1);
1470 btrfs_set_stack_chunk_sub_stripes(chunk
, 0);
1471 btrfs_set_stack_chunk_type(chunk
,
1472 BTRFS_BLOCK_GROUP_SYSTEM
);
1473 btrfs_set_stack_stripe_devid(&chunk
->stripe
,
1474 super
->dev_item
.devid
);
1475 physical
= logical_to_physical(mdres
, key
.offset
,
1477 if (size
!= (u64
)-1)
1478 btrfs_set_stack_stripe_offset(&chunk
->stripe
,
1480 memcpy(chunk
->stripe
.dev_uuid
, super
->dev_item
.uuid
,
1482 new_array_size
+= sizeof(*chunk
);
1484 fprintf(stderr
, "Bogus key in the sys chunk array "
1488 write_ptr
+= sizeof(*chunk
);
1489 ptr
+= btrfs_chunk_item_size(old_num_stripes
);
1490 cur
+= btrfs_chunk_item_size(old_num_stripes
);
1493 if (mdres
->clear_space_cache
)
1494 btrfs_set_super_cache_generation(super
, 0);
1496 flags
|= BTRFS_SUPER_FLAG_METADUMP_V2
;
1497 btrfs_set_super_flags(super
, flags
);
1498 btrfs_set_super_sys_array_size(super
, new_array_size
);
1499 csum_block(buffer
, BTRFS_SUPER_INFO_SIZE
);
1504 static struct extent_buffer
*alloc_dummy_eb(u64 bytenr
, u32 size
)
1506 struct extent_buffer
*eb
;
1508 eb
= calloc(1, sizeof(struct extent_buffer
) + size
);
1517 static void truncate_item(struct extent_buffer
*eb
, int slot
, u32 new_size
)
1519 struct btrfs_item
*item
;
1527 old_size
= btrfs_item_size_nr(eb
, slot
);
1528 if (old_size
== new_size
)
1531 nritems
= btrfs_header_nritems(eb
);
1532 data_end
= btrfs_item_offset_nr(eb
, nritems
- 1);
1534 old_data_start
= btrfs_item_offset_nr(eb
, slot
);
1535 size_diff
= old_size
- new_size
;
1537 for (i
= slot
; i
< nritems
; i
++) {
1539 item
= btrfs_item_nr(i
);
1540 ioff
= btrfs_item_offset(eb
, item
);
1541 btrfs_set_item_offset(eb
, item
, ioff
+ size_diff
);
1544 memmove_extent_buffer(eb
, btrfs_leaf_data(eb
) + data_end
+ size_diff
,
1545 btrfs_leaf_data(eb
) + data_end
,
1546 old_data_start
+ new_size
- data_end
);
1547 item
= btrfs_item_nr(slot
);
1548 btrfs_set_item_size(eb
, item
, new_size
);
1551 static int fixup_chunk_tree_block(struct mdrestore_struct
*mdres
,
1552 struct async_work
*async
, u8
*buffer
,
1555 struct extent_buffer
*eb
;
1556 size_t size_left
= size
;
1557 u64 bytenr
= async
->start
;
1560 if (size_left
% mdres
->leafsize
)
1563 eb
= alloc_dummy_eb(bytenr
, mdres
->leafsize
);
1569 memcpy(eb
->data
, buffer
, mdres
->leafsize
);
1571 if (btrfs_header_bytenr(eb
) != bytenr
)
1573 if (memcmp(mdres
->fsid
,
1574 eb
->data
+ offsetof(struct btrfs_header
, fsid
),
1578 if (btrfs_header_owner(eb
) != BTRFS_CHUNK_TREE_OBJECTID
)
1581 if (btrfs_header_level(eb
) != 0)
1584 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
1585 struct btrfs_chunk chunk
;
1586 struct btrfs_key key
;
1587 u64 type
, physical
, size
= (u64
)-1;
1589 btrfs_item_key_to_cpu(eb
, &key
, i
);
1590 if (key
.type
!= BTRFS_CHUNK_ITEM_KEY
)
1592 truncate_item(eb
, i
, sizeof(chunk
));
1593 read_extent_buffer(eb
, &chunk
,
1594 btrfs_item_ptr_offset(eb
, i
),
1598 physical
= logical_to_physical(mdres
, key
.offset
,
1601 /* Zero out the RAID profile */
1602 type
= btrfs_stack_chunk_type(&chunk
);
1603 type
&= (BTRFS_BLOCK_GROUP_DATA
|
1604 BTRFS_BLOCK_GROUP_SYSTEM
|
1605 BTRFS_BLOCK_GROUP_METADATA
|
1606 BTRFS_BLOCK_GROUP_DUP
);
1607 btrfs_set_stack_chunk_type(&chunk
, type
);
1609 btrfs_set_stack_chunk_num_stripes(&chunk
, 1);
1610 btrfs_set_stack_chunk_sub_stripes(&chunk
, 0);
1611 btrfs_set_stack_stripe_devid(&chunk
.stripe
, mdres
->devid
);
1612 if (size
!= (u64
)-1)
1613 btrfs_set_stack_stripe_offset(&chunk
.stripe
,
1615 memcpy(chunk
.stripe
.dev_uuid
, mdres
->uuid
,
1617 write_extent_buffer(eb
, &chunk
,
1618 btrfs_item_ptr_offset(eb
, i
),
1621 memcpy(buffer
, eb
->data
, eb
->len
);
1622 csum_block(buffer
, eb
->len
);
1624 size_left
-= mdres
->leafsize
;
1625 buffer
+= mdres
->leafsize
;
1626 bytenr
+= mdres
->leafsize
;
1633 static void write_backup_supers(int fd
, u8
*buf
)
1635 struct btrfs_super_block
*super
= (struct btrfs_super_block
*)buf
;
1642 if (fstat(fd
, &st
)) {
1643 fprintf(stderr
, "Couldn't stat restore point, won't be able "
1644 "to write backup supers: %d\n", errno
);
1648 size
= btrfs_device_size(fd
, &st
);
1650 for (i
= 1; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
1651 bytenr
= btrfs_sb_offset(i
);
1652 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> size
)
1654 btrfs_set_super_bytenr(super
, bytenr
);
1655 csum_block(buf
, BTRFS_SUPER_INFO_SIZE
);
1656 ret
= pwrite64(fd
, buf
, BTRFS_SUPER_INFO_SIZE
, bytenr
);
1657 if (ret
< BTRFS_SUPER_INFO_SIZE
) {
1659 fprintf(stderr
, "Problem writing out backup "
1660 "super block %d, err %d\n", i
, errno
);
1662 fprintf(stderr
, "Short write writing out "
1663 "backup super block\n");
1669 static void *restore_worker(void *data
)
1671 struct mdrestore_struct
*mdres
= (struct mdrestore_struct
*)data
;
1672 struct async_work
*async
;
1678 int compress_size
= MAX_PENDING_SIZE
* 4;
1680 outfd
= fileno(mdres
->out
);
1681 buffer
= malloc(compress_size
);
1683 fprintf(stderr
, "Error allocing buffer\n");
1684 pthread_mutex_lock(&mdres
->mutex
);
1686 mdres
->error
= -ENOMEM
;
1687 pthread_mutex_unlock(&mdres
->mutex
);
1696 pthread_mutex_lock(&mdres
->mutex
);
1697 while (!mdres
->leafsize
|| list_empty(&mdres
->list
)) {
1699 pthread_mutex_unlock(&mdres
->mutex
);
1702 pthread_cond_wait(&mdres
->cond
, &mdres
->mutex
);
1704 async
= list_entry(mdres
->list
.next
, struct async_work
, list
);
1705 list_del_init(&async
->list
);
1706 pthread_mutex_unlock(&mdres
->mutex
);
1708 if (mdres
->compress_method
== COMPRESS_ZLIB
) {
1709 size
= compress_size
;
1710 ret
= uncompress(buffer
, (unsigned long *)&size
,
1711 async
->buffer
, async
->bufsize
);
1713 fprintf(stderr
, "Error decompressing %d\n",
1719 outbuf
= async
->buffer
;
1720 size
= async
->bufsize
;
1723 if (!mdres
->multi_devices
) {
1724 if (async
->start
== BTRFS_SUPER_INFO_OFFSET
) {
1725 if (mdres
->old_restore
) {
1726 update_super_old(outbuf
);
1728 ret
= update_super(mdres
, outbuf
);
1732 } else if (!mdres
->old_restore
) {
1733 ret
= fixup_chunk_tree_block(mdres
, async
, outbuf
, size
);
1739 if (!mdres
->fixup_offset
) {
1741 u64 chunk_size
= size
;
1742 if (!mdres
->multi_devices
&& !mdres
->old_restore
)
1743 bytenr
= logical_to_physical(mdres
,
1744 async
->start
+ offset
,
1747 bytenr
= async
->start
+ offset
;
1749 ret
= pwrite64(outfd
, outbuf
+offset
, chunk_size
,
1751 if (ret
!= chunk_size
) {
1753 fprintf(stderr
, "Error writing to "
1754 "device %d\n", errno
);
1758 fprintf(stderr
, "Short write\n");
1764 offset
+= chunk_size
;
1766 } else if (async
->start
!= BTRFS_SUPER_INFO_OFFSET
) {
1767 ret
= write_data_to_disk(mdres
->info
, outbuf
, async
->start
, size
, 0);
1769 printk("Error write data\n");
1775 /* backup super blocks are already there at fixup_offset stage */
1776 if (!mdres
->multi_devices
&& async
->start
== BTRFS_SUPER_INFO_OFFSET
)
1777 write_backup_supers(outfd
, outbuf
);
1779 pthread_mutex_lock(&mdres
->mutex
);
1780 if (err
&& !mdres
->error
)
1783 pthread_mutex_unlock(&mdres
->mutex
);
1785 free(async
->buffer
);
1793 static void mdrestore_destroy(struct mdrestore_struct
*mdres
, int num_threads
)
1798 while ((n
= rb_first(&mdres
->chunk_tree
))) {
1799 struct fs_chunk
*entry
;
1801 entry
= rb_entry(n
, struct fs_chunk
, l
);
1802 rb_erase(n
, &mdres
->chunk_tree
);
1803 rb_erase(&entry
->p
, &mdres
->physical_tree
);
1806 pthread_mutex_lock(&mdres
->mutex
);
1808 pthread_cond_broadcast(&mdres
->cond
);
1809 pthread_mutex_unlock(&mdres
->mutex
);
1811 for (i
= 0; i
< num_threads
; i
++)
1812 pthread_join(mdres
->threads
[i
], NULL
);
1814 pthread_cond_destroy(&mdres
->cond
);
1815 pthread_mutex_destroy(&mdres
->mutex
);
1816 free(mdres
->threads
);
1819 static int mdrestore_init(struct mdrestore_struct
*mdres
,
1820 FILE *in
, FILE *out
, int old_restore
,
1821 int num_threads
, int fixup_offset
,
1822 struct btrfs_fs_info
*info
, int multi_devices
)
1826 memset(mdres
, 0, sizeof(*mdres
));
1827 pthread_cond_init(&mdres
->cond
, NULL
);
1828 pthread_mutex_init(&mdres
->mutex
, NULL
);
1829 INIT_LIST_HEAD(&mdres
->list
);
1830 INIT_LIST_HEAD(&mdres
->overlapping_chunks
);
1833 mdres
->old_restore
= old_restore
;
1834 mdres
->chunk_tree
.rb_node
= NULL
;
1835 mdres
->fixup_offset
= fixup_offset
;
1837 mdres
->multi_devices
= multi_devices
;
1838 mdres
->clear_space_cache
= 0;
1839 mdres
->last_physical_offset
= 0;
1840 mdres
->alloced_chunks
= 0;
1845 mdres
->num_threads
= num_threads
;
1846 mdres
->threads
= calloc(num_threads
, sizeof(pthread_t
));
1847 if (!mdres
->threads
)
1849 for (i
= 0; i
< num_threads
; i
++) {
1850 ret
= pthread_create(mdres
->threads
+ i
, NULL
, restore_worker
,
1856 mdrestore_destroy(mdres
, i
+ 1);
1860 static int fill_mdres_info(struct mdrestore_struct
*mdres
,
1861 struct async_work
*async
)
1863 struct btrfs_super_block
*super
;
1868 /* We've already been initialized */
1869 if (mdres
->leafsize
)
1872 if (mdres
->compress_method
== COMPRESS_ZLIB
) {
1873 size_t size
= MAX_PENDING_SIZE
* 2;
1875 buffer
= malloc(MAX_PENDING_SIZE
* 2);
1878 ret
= uncompress(buffer
, (unsigned long *)&size
,
1879 async
->buffer
, async
->bufsize
);
1881 fprintf(stderr
, "Error decompressing %d\n", ret
);
1887 outbuf
= async
->buffer
;
1890 super
= (struct btrfs_super_block
*)outbuf
;
1891 mdres
->leafsize
= btrfs_super_leafsize(super
);
1892 memcpy(mdres
->fsid
, super
->fsid
, BTRFS_FSID_SIZE
);
1893 memcpy(mdres
->uuid
, super
->dev_item
.uuid
,
1895 mdres
->devid
= le64_to_cpu(super
->dev_item
.devid
);
1900 static int add_cluster(struct meta_cluster
*cluster
,
1901 struct mdrestore_struct
*mdres
, u64
*next
)
1903 struct meta_cluster_item
*item
;
1904 struct meta_cluster_header
*header
= &cluster
->header
;
1905 struct async_work
*async
;
1910 mdres
->compress_method
= header
->compress
;
1912 bytenr
= le64_to_cpu(header
->bytenr
) + BLOCK_SIZE
;
1913 nritems
= le32_to_cpu(header
->nritems
);
1914 for (i
= 0; i
< nritems
; i
++) {
1915 item
= &cluster
->items
[i
];
1916 async
= calloc(1, sizeof(*async
));
1918 fprintf(stderr
, "Error allocating async\n");
1921 async
->start
= le64_to_cpu(item
->bytenr
);
1922 async
->bufsize
= le32_to_cpu(item
->size
);
1923 async
->buffer
= malloc(async
->bufsize
);
1924 if (!async
->buffer
) {
1925 fprintf(stderr
, "Error allocing async buffer\n");
1929 ret
= fread(async
->buffer
, async
->bufsize
, 1, mdres
->in
);
1931 fprintf(stderr
, "Error reading buffer %d\n", errno
);
1932 free(async
->buffer
);
1936 bytenr
+= async
->bufsize
;
1938 pthread_mutex_lock(&mdres
->mutex
);
1939 if (async
->start
== BTRFS_SUPER_INFO_OFFSET
) {
1940 ret
= fill_mdres_info(mdres
, async
);
1942 fprintf(stderr
, "Error setting up restore\n");
1943 pthread_mutex_unlock(&mdres
->mutex
);
1944 free(async
->buffer
);
1949 list_add_tail(&async
->list
, &mdres
->list
);
1951 pthread_cond_signal(&mdres
->cond
);
1952 pthread_mutex_unlock(&mdres
->mutex
);
1954 if (bytenr
& BLOCK_MASK
) {
1955 char buffer
[BLOCK_MASK
];
1956 size_t size
= BLOCK_SIZE
- (bytenr
& BLOCK_MASK
);
1959 ret
= fread(buffer
, size
, 1, mdres
->in
);
1961 fprintf(stderr
, "Error reading in buffer %d\n", errno
);
1969 static int wait_for_worker(struct mdrestore_struct
*mdres
)
1973 pthread_mutex_lock(&mdres
->mutex
);
1975 while (!ret
&& mdres
->num_items
> 0) {
1976 struct timespec ts
= {
1978 .tv_nsec
= 10000000,
1980 pthread_mutex_unlock(&mdres
->mutex
);
1981 nanosleep(&ts
, NULL
);
1982 pthread_mutex_lock(&mdres
->mutex
);
1985 pthread_mutex_unlock(&mdres
->mutex
);
1989 static int read_chunk_block(struct mdrestore_struct
*mdres
, u8
*buffer
,
1990 u64 bytenr
, u64 item_bytenr
, u32 bufsize
,
1993 struct extent_buffer
*eb
;
1997 eb
= alloc_dummy_eb(bytenr
, mdres
->leafsize
);
2003 while (item_bytenr
!= bytenr
) {
2004 buffer
+= mdres
->leafsize
;
2005 item_bytenr
+= mdres
->leafsize
;
2008 memcpy(eb
->data
, buffer
, mdres
->leafsize
);
2009 if (btrfs_header_bytenr(eb
) != bytenr
) {
2010 fprintf(stderr
, "Eb bytenr doesn't match found bytenr\n");
2015 if (memcmp(mdres
->fsid
, eb
->data
+ offsetof(struct btrfs_header
, fsid
),
2017 fprintf(stderr
, "Fsid doesn't match\n");
2022 if (btrfs_header_owner(eb
) != BTRFS_CHUNK_TREE_OBJECTID
) {
2023 fprintf(stderr
, "Does not belong to the chunk tree\n");
2028 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
2029 struct btrfs_chunk chunk
;
2030 struct fs_chunk
*fs_chunk
;
2031 struct btrfs_key key
;
2033 if (btrfs_header_level(eb
)) {
2034 u64 blockptr
= btrfs_node_blockptr(eb
, i
);
2036 ret
= search_for_chunk_blocks(mdres
, blockptr
,
2043 /* Yay a leaf! We loves leafs! */
2044 btrfs_item_key_to_cpu(eb
, &key
, i
);
2045 if (key
.type
!= BTRFS_CHUNK_ITEM_KEY
)
2048 fs_chunk
= malloc(sizeof(struct fs_chunk
));
2050 fprintf(stderr
, "Erorr allocating chunk\n");
2054 memset(fs_chunk
, 0, sizeof(*fs_chunk
));
2055 read_extent_buffer(eb
, &chunk
, btrfs_item_ptr_offset(eb
, i
),
2058 fs_chunk
->logical
= key
.offset
;
2059 fs_chunk
->physical
= btrfs_stack_stripe_offset(&chunk
.stripe
);
2060 fs_chunk
->bytes
= btrfs_stack_chunk_length(&chunk
);
2061 INIT_LIST_HEAD(&fs_chunk
->list
);
2062 if (tree_search(&mdres
->physical_tree
, &fs_chunk
->p
,
2063 physical_cmp
, 1) != NULL
)
2064 list_add(&fs_chunk
->list
, &mdres
->overlapping_chunks
);
2066 tree_insert(&mdres
->physical_tree
, &fs_chunk
->p
,
2068 if (fs_chunk
->physical
+ fs_chunk
->bytes
>
2069 mdres
->last_physical_offset
)
2070 mdres
->last_physical_offset
= fs_chunk
->physical
+
2072 mdres
->alloced_chunks
+= fs_chunk
->bytes
;
2073 tree_insert(&mdres
->chunk_tree
, &fs_chunk
->l
, chunk_cmp
);
2080 /* If you have to ask you aren't worthy */
2081 static int search_for_chunk_blocks(struct mdrestore_struct
*mdres
,
2082 u64 search
, u64 cluster_bytenr
)
2084 struct meta_cluster
*cluster
;
2085 struct meta_cluster_header
*header
;
2086 struct meta_cluster_item
*item
;
2087 u64 current_cluster
= cluster_bytenr
, bytenr
;
2089 u32 bufsize
, nritems
, i
;
2090 u32 max_size
= MAX_PENDING_SIZE
* 2;
2091 u8
*buffer
, *tmp
= NULL
;
2094 cluster
= malloc(BLOCK_SIZE
);
2096 fprintf(stderr
, "Error allocating cluster\n");
2100 buffer
= malloc(max_size
);
2102 fprintf(stderr
, "Error allocing buffer\n");
2107 if (mdres
->compress_method
== COMPRESS_ZLIB
) {
2108 tmp
= malloc(max_size
);
2110 fprintf(stderr
, "Error allocing tmp buffer\n");
2117 bytenr
= current_cluster
;
2119 if (fseek(mdres
->in
, current_cluster
, SEEK_SET
)) {
2120 fprintf(stderr
, "Error seeking: %d\n", errno
);
2125 ret
= fread(cluster
, BLOCK_SIZE
, 1, mdres
->in
);
2127 if (cluster_bytenr
!= 0) {
2129 current_cluster
= 0;
2133 printf("ok this is where we screwed up?\n");
2136 } else if (ret
< 0) {
2137 fprintf(stderr
, "Error reading image\n");
2142 header
= &cluster
->header
;
2143 if (le64_to_cpu(header
->magic
) != HEADER_MAGIC
||
2144 le64_to_cpu(header
->bytenr
) != current_cluster
) {
2145 fprintf(stderr
, "bad header in metadump image\n");
2150 bytenr
+= BLOCK_SIZE
;
2151 nritems
= le32_to_cpu(header
->nritems
);
2152 for (i
= 0; i
< nritems
; i
++) {
2155 item
= &cluster
->items
[i
];
2156 bufsize
= le32_to_cpu(item
->size
);
2157 item_bytenr
= le64_to_cpu(item
->bytenr
);
2159 if (bufsize
> max_size
) {
2160 fprintf(stderr
, "item %u size %u too big\n",
2166 if (mdres
->compress_method
== COMPRESS_ZLIB
) {
2167 ret
= fread(tmp
, bufsize
, 1, mdres
->in
);
2169 fprintf(stderr
, "Error reading: %d\n",
2176 ret
= uncompress(buffer
,
2177 (unsigned long *)&size
, tmp
,
2180 fprintf(stderr
, "Error decompressing "
2186 ret
= fread(buffer
, bufsize
, 1, mdres
->in
);
2188 fprintf(stderr
, "Error reading: %d\n",
2197 if (item_bytenr
<= search
&&
2198 item_bytenr
+ size
> search
) {
2199 ret
= read_chunk_block(mdres
, buffer
, search
,
2213 if (bytenr
& BLOCK_MASK
)
2214 bytenr
+= BLOCK_SIZE
- (bytenr
& BLOCK_MASK
);
2215 current_cluster
= bytenr
;
2224 static int build_chunk_tree(struct mdrestore_struct
*mdres
,
2225 struct meta_cluster
*cluster
)
2227 struct btrfs_super_block
*super
;
2228 struct meta_cluster_header
*header
;
2229 struct meta_cluster_item
*item
= NULL
;
2230 u64 chunk_root_bytenr
= 0;
2236 /* We can't seek with stdin so don't bother doing this */
2237 if (mdres
->in
== stdin
)
2240 ret
= fread(cluster
, BLOCK_SIZE
, 1, mdres
->in
);
2242 fprintf(stderr
, "Error reading in cluster: %d\n", errno
);
2247 header
= &cluster
->header
;
2248 if (le64_to_cpu(header
->magic
) != HEADER_MAGIC
||
2249 le64_to_cpu(header
->bytenr
) != 0) {
2250 fprintf(stderr
, "bad header in metadump image\n");
2254 bytenr
+= BLOCK_SIZE
;
2255 mdres
->compress_method
= header
->compress
;
2256 nritems
= le32_to_cpu(header
->nritems
);
2257 for (i
= 0; i
< nritems
; i
++) {
2258 item
= &cluster
->items
[i
];
2260 if (le64_to_cpu(item
->bytenr
) == BTRFS_SUPER_INFO_OFFSET
)
2262 bytenr
+= le32_to_cpu(item
->size
);
2263 if (fseek(mdres
->in
, le32_to_cpu(item
->size
), SEEK_CUR
)) {
2264 fprintf(stderr
, "Error seeking: %d\n", errno
);
2269 if (!item
|| le64_to_cpu(item
->bytenr
) != BTRFS_SUPER_INFO_OFFSET
) {
2270 fprintf(stderr
, "Huh, didn't find the super?\n");
2274 buffer
= malloc(le32_to_cpu(item
->size
));
2276 fprintf(stderr
, "Error allocing buffer\n");
2280 ret
= fread(buffer
, le32_to_cpu(item
->size
), 1, mdres
->in
);
2282 fprintf(stderr
, "Error reading buffer: %d\n", errno
);
2287 if (mdres
->compress_method
== COMPRESS_ZLIB
) {
2288 size_t size
= MAX_PENDING_SIZE
* 2;
2291 tmp
= malloc(MAX_PENDING_SIZE
* 2);
2296 ret
= uncompress(tmp
, (unsigned long *)&size
,
2297 buffer
, le32_to_cpu(item
->size
));
2299 fprintf(stderr
, "Error decompressing %d\n", ret
);
2308 pthread_mutex_lock(&mdres
->mutex
);
2309 super
= (struct btrfs_super_block
*)buffer
;
2310 chunk_root_bytenr
= btrfs_super_chunk_root(super
);
2311 mdres
->leafsize
= btrfs_super_leafsize(super
);
2312 memcpy(mdres
->fsid
, super
->fsid
, BTRFS_FSID_SIZE
);
2313 memcpy(mdres
->uuid
, super
->dev_item
.uuid
,
2315 mdres
->devid
= le64_to_cpu(super
->dev_item
.devid
);
2317 pthread_mutex_unlock(&mdres
->mutex
);
2319 return search_for_chunk_blocks(mdres
, chunk_root_bytenr
, 0);
2322 static int range_contains_super(u64 physical
, u64 bytes
)
2327 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2328 super_bytenr
= btrfs_sb_offset(i
);
2329 if (super_bytenr
>= physical
&&
2330 super_bytenr
< physical
+ bytes
)
2337 static void remap_overlapping_chunks(struct mdrestore_struct
*mdres
)
2339 struct fs_chunk
*fs_chunk
;
2341 while (!list_empty(&mdres
->overlapping_chunks
)) {
2342 fs_chunk
= list_first_entry(&mdres
->overlapping_chunks
,
2343 struct fs_chunk
, list
);
2344 list_del_init(&fs_chunk
->list
);
2345 if (range_contains_super(fs_chunk
->physical
,
2347 fprintf(stderr
, "Remapping a chunk that had a super "
2348 "mirror inside of it, clearing space cache "
2349 "so we don't end up with corruption\n");
2350 mdres
->clear_space_cache
= 1;
2352 fs_chunk
->physical
= mdres
->last_physical_offset
;
2353 tree_insert(&mdres
->physical_tree
, &fs_chunk
->p
, physical_cmp
);
2354 mdres
->last_physical_offset
+= fs_chunk
->bytes
;
2358 static int fixup_devices(struct btrfs_fs_info
*fs_info
,
2359 struct mdrestore_struct
*mdres
, off_t dev_size
)
2361 struct btrfs_trans_handle
*trans
;
2362 struct btrfs_dev_item
*dev_item
;
2363 struct btrfs_path
*path
;
2364 struct extent_buffer
*leaf
;
2365 struct btrfs_root
*root
= fs_info
->chunk_root
;
2366 struct btrfs_key key
;
2367 u64 devid
, cur_devid
;
2370 path
= btrfs_alloc_path();
2372 fprintf(stderr
, "Error alloc'ing path\n");
2376 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
2377 if (IS_ERR(trans
)) {
2378 fprintf(stderr
, "Error starting transaction %ld\n",
2380 btrfs_free_path(path
);
2381 return PTR_ERR(trans
);
2384 dev_item
= &fs_info
->super_copy
->dev_item
;
2386 devid
= btrfs_stack_device_id(dev_item
);
2388 btrfs_set_stack_device_total_bytes(dev_item
, dev_size
);
2389 btrfs_set_stack_device_bytes_used(dev_item
, mdres
->alloced_chunks
);
2391 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2392 key
.type
= BTRFS_DEV_ITEM_KEY
;
2396 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2398 fprintf(stderr
, "search failed %d\n", ret
);
2403 leaf
= path
->nodes
[0];
2404 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2405 ret
= btrfs_next_leaf(root
, path
);
2407 fprintf(stderr
, "Error going to next leaf "
2415 leaf
= path
->nodes
[0];
2418 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2419 if (key
.type
> BTRFS_DEV_ITEM_KEY
)
2421 if (key
.type
!= BTRFS_DEV_ITEM_KEY
) {
2426 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2427 struct btrfs_dev_item
);
2428 cur_devid
= btrfs_device_id(leaf
, dev_item
);
2429 if (devid
!= cur_devid
) {
2430 ret
= btrfs_del_item(trans
, root
, path
);
2432 fprintf(stderr
, "Error deleting item %d\n",
2436 btrfs_release_path(path
);
2440 btrfs_set_device_total_bytes(leaf
, dev_item
, dev_size
);
2441 btrfs_set_device_bytes_used(leaf
, dev_item
,
2442 mdres
->alloced_chunks
);
2443 btrfs_mark_buffer_dirty(leaf
);
2447 btrfs_free_path(path
);
2448 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
2450 fprintf(stderr
, "Commit failed %d\n", ret
);
2456 static int restore_metadump(const char *input
, FILE *out
, int old_restore
,
2457 int num_threads
, int fixup_offset
,
2458 const char *target
, int multi_devices
)
2460 struct meta_cluster
*cluster
= NULL
;
2461 struct meta_cluster_header
*header
;
2462 struct mdrestore_struct mdrestore
;
2463 struct btrfs_fs_info
*info
= NULL
;
2468 if (!strcmp(input
, "-")) {
2471 in
= fopen(input
, "r");
2473 perror("unable to open metadump image");
2478 /* NOTE: open with write mode */
2481 info
= open_ctree_fs_info(target
, 0, 0,
2483 OPEN_CTREE_RESTORE
|
2484 OPEN_CTREE_PARTIAL
);
2486 fprintf(stderr
, "%s: open ctree failed\n", __func__
);
2492 cluster
= malloc(BLOCK_SIZE
);
2494 fprintf(stderr
, "Error allocating cluster\n");
2499 ret
= mdrestore_init(&mdrestore
, in
, out
, old_restore
, num_threads
,
2500 fixup_offset
, info
, multi_devices
);
2502 fprintf(stderr
, "Error initing mdrestore %d\n", ret
);
2503 goto failed_cluster
;
2506 if (!multi_devices
&& !old_restore
) {
2507 ret
= build_chunk_tree(&mdrestore
, cluster
);
2510 if (!list_empty(&mdrestore
.overlapping_chunks
))
2511 remap_overlapping_chunks(&mdrestore
);
2514 if (in
!= stdin
&& fseek(in
, 0, SEEK_SET
)) {
2515 fprintf(stderr
, "Error seeking %d\n", errno
);
2519 while (!mdrestore
.error
) {
2520 ret
= fread(cluster
, BLOCK_SIZE
, 1, in
);
2524 header
= &cluster
->header
;
2525 if (le64_to_cpu(header
->magic
) != HEADER_MAGIC
||
2526 le64_to_cpu(header
->bytenr
) != bytenr
) {
2527 fprintf(stderr
, "bad header in metadump image\n");
2531 ret
= add_cluster(cluster
, &mdrestore
, &bytenr
);
2533 fprintf(stderr
, "Error adding cluster\n");
2537 ret
= wait_for_worker(&mdrestore
);
2539 if (!ret
&& !multi_devices
&& !old_restore
) {
2540 struct btrfs_root
*root
;
2543 root
= open_ctree_fd(fileno(out
), target
, 0,
2544 OPEN_CTREE_PARTIAL
|
2546 OPEN_CTREE_NO_DEVICES
);
2548 fprintf(stderr
, "unable to open %s\n", target
);
2552 info
= root
->fs_info
;
2554 if (stat(target
, &st
)) {
2555 fprintf(stderr
, "statting %s failed\n", target
);
2556 close_ctree(info
->chunk_root
);
2560 ret
= fixup_devices(info
, &mdrestore
, st
.st_size
);
2561 close_ctree(info
->chunk_root
);
2566 mdrestore_destroy(&mdrestore
, num_threads
);
2570 if (fixup_offset
&& info
)
2571 close_ctree(info
->chunk_root
);
2578 static int update_disk_super_on_device(struct btrfs_fs_info
*info
,
2579 const char *other_dev
, u64 cur_devid
)
2581 struct btrfs_key key
;
2582 struct extent_buffer
*leaf
;
2583 struct btrfs_path path
;
2584 struct btrfs_dev_item
*dev_item
;
2585 struct btrfs_super_block
*disk_super
;
2586 char dev_uuid
[BTRFS_UUID_SIZE
];
2587 char fs_uuid
[BTRFS_UUID_SIZE
];
2588 u64 devid
, type
, io_align
, io_width
;
2589 u64 sector_size
, total_bytes
, bytes_used
;
2590 char buf
[BTRFS_SUPER_INFO_SIZE
];
2594 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2595 key
.type
= BTRFS_DEV_ITEM_KEY
;
2596 key
.offset
= cur_devid
;
2598 btrfs_init_path(&path
);
2599 ret
= btrfs_search_slot(NULL
, info
->chunk_root
, &key
, &path
, 0, 0);
2601 fprintf(stderr
, "ERROR: search key failed\n");
2606 leaf
= path
.nodes
[0];
2607 dev_item
= btrfs_item_ptr(leaf
, path
.slots
[0],
2608 struct btrfs_dev_item
);
2610 devid
= btrfs_device_id(leaf
, dev_item
);
2611 if (devid
!= cur_devid
) {
2612 printk("ERROR: devid %llu mismatch with %llu\n", devid
, cur_devid
);
2617 type
= btrfs_device_type(leaf
, dev_item
);
2618 io_align
= btrfs_device_io_align(leaf
, dev_item
);
2619 io_width
= btrfs_device_io_width(leaf
, dev_item
);
2620 sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
2621 total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
2622 bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
2623 read_extent_buffer(leaf
, dev_uuid
, (unsigned long)btrfs_device_uuid(dev_item
), BTRFS_UUID_SIZE
);
2624 read_extent_buffer(leaf
, fs_uuid
, (unsigned long)btrfs_device_fsid(dev_item
), BTRFS_UUID_SIZE
);
2626 btrfs_release_path(&path
);
2628 printk("update disk super on %s devid=%llu\n", other_dev
, devid
);
2630 /* update other devices' super block */
2631 fp
= open(other_dev
, O_CREAT
| O_RDWR
, 0600);
2633 fprintf(stderr
, "ERROR: could not open %s\n", other_dev
);
2638 memcpy(buf
, info
->super_copy
, BTRFS_SUPER_INFO_SIZE
);
2640 disk_super
= (struct btrfs_super_block
*)buf
;
2641 dev_item
= &disk_super
->dev_item
;
2643 btrfs_set_stack_device_type(dev_item
, type
);
2644 btrfs_set_stack_device_id(dev_item
, devid
);
2645 btrfs_set_stack_device_total_bytes(dev_item
, total_bytes
);
2646 btrfs_set_stack_device_bytes_used(dev_item
, bytes_used
);
2647 btrfs_set_stack_device_io_align(dev_item
, io_align
);
2648 btrfs_set_stack_device_io_width(dev_item
, io_width
);
2649 btrfs_set_stack_device_sector_size(dev_item
, sector_size
);
2650 memcpy(dev_item
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
2651 memcpy(dev_item
->fsid
, fs_uuid
, BTRFS_UUID_SIZE
);
2652 csum_block((u8
*)buf
, BTRFS_SUPER_INFO_SIZE
);
2654 ret
= pwrite64(fp
, buf
, BTRFS_SUPER_INFO_SIZE
, BTRFS_SUPER_INFO_OFFSET
);
2655 if (ret
!= BTRFS_SUPER_INFO_SIZE
) {
2657 fprintf(stderr
, "ERROR: cannot write superblock: %s\n", strerror(ret
));
2659 fprintf(stderr
, "ERROR: cannot write superblock\n");
2664 write_backup_supers(fp
, (u8
*)buf
);
2672 static void print_usage(int ret
)
2674 fprintf(stderr
, "usage: btrfs-image [options] source target\n");
2675 fprintf(stderr
, "\t-r \trestore metadump image\n");
2676 fprintf(stderr
, "\t-c value\tcompression level (0 ~ 9)\n");
2677 fprintf(stderr
, "\t-t value\tnumber of threads (1 ~ 32)\n");
2678 fprintf(stderr
, "\t-o \tdon't mess with the chunk tree when restoring\n");
2679 fprintf(stderr
, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2680 fprintf(stderr
, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2681 fprintf(stderr
, "\t-m \trestore for multiple devices\n");
2682 fprintf(stderr
, "\n");
2683 fprintf(stderr
, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2684 fprintf(stderr
, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2688 int main(int argc
, char *argv
[])
2692 u64 num_threads
= 0;
2693 u64 compress_level
= 0;
2695 int old_restore
= 0;
2697 int multi_devices
= 0;
2701 int usage_error
= 0;
2705 static const struct option long_options
[] = {
2706 { "help", no_argument
, NULL
, GETOPT_VAL_HELP
},
2707 { NULL
, 0, NULL
, 0 }
2709 int c
= getopt_long(argc
, argv
, "rc:t:oswm", long_options
, NULL
);
2717 num_threads
= arg_strtou64(optarg
);
2718 if (num_threads
> 32)
2722 compress_level
= arg_strtou64(optarg
);
2723 if (compress_level
> 9)
2739 case GETOPT_VAL_HELP
:
2741 print_usage(c
!= GETOPT_VAL_HELP
);
2745 argc
= argc
- optind
;
2747 if (check_argc_min(argc
, 2))
2754 fprintf(stderr
, "Usage error: create and restore cannot be used at the same time\n");
2758 if (walk_trees
|| sanitize
|| compress_level
) {
2759 fprintf(stderr
, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2762 if (multi_devices
&& dev_cnt
< 2) {
2763 fprintf(stderr
, "Usage error: not enough devices specified for -m option\n");
2766 if (!multi_devices
&& dev_cnt
!= 1) {
2767 fprintf(stderr
, "Usage error: accepts only 1 device without -m option\n");
2775 source
= argv
[optind
];
2776 target
= argv
[optind
+ 1];
2778 if (create
&& !strcmp(target
, "-")) {
2781 out
= fopen(target
, "w+");
2783 perror("unable to create target file");
2788 if (compress_level
> 0 || create
== 0) {
2789 if (num_threads
== 0) {
2790 num_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
2791 if (num_threads
<= 0)
2799 ret
= check_mounted(source
);
2801 fprintf(stderr
, "Could not check mount status: %s\n",
2806 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2808 ret
= create_metadump(source
, out
, num_threads
,
2809 compress_level
, sanitize
, walk_trees
);
2811 ret
= restore_metadump(source
, out
, old_restore
, num_threads
,
2812 0, target
, multi_devices
);
2815 printk("%s failed (%s)\n", (create
) ? "create" : "restore",
2820 /* extended support for multiple devices */
2821 if (!create
&& multi_devices
) {
2822 struct btrfs_fs_info
*info
;
2826 info
= open_ctree_fs_info(target
, 0, 0,
2827 OPEN_CTREE_PARTIAL
|
2828 OPEN_CTREE_RESTORE
);
2831 fprintf(stderr
, "unable to open %s error = %s\n",
2832 target
, strerror(e
));
2836 total_devs
= btrfs_super_num_devices(info
->super_copy
);
2837 if (total_devs
!= dev_cnt
) {
2838 printk("it needs %llu devices but has only %d\n",
2839 total_devs
, dev_cnt
);
2840 close_ctree(info
->chunk_root
);
2844 /* update super block on other disks */
2845 for (i
= 2; i
<= dev_cnt
; i
++) {
2846 ret
= update_disk_super_on_device(info
,
2847 argv
[optind
+ i
], (u64
)i
);
2849 printk("update disk super failed devid=%d (error=%d)\n",
2851 close_ctree(info
->chunk_root
);
2856 close_ctree(info
->chunk_root
);
2858 /* fix metadata block to map correct chunk */
2859 ret
= restore_metadump(source
, out
, 0, num_threads
, 1,
2862 fprintf(stderr
, "fix metadump failed (error=%d)\n",
2868 if (out
== stdout
) {
2872 if (ret
&& create
) {
2875 unlink_ret
= unlink(target
);
2878 "unlink output file failed : %s\n",
2883 btrfs_close_all_devices();