btrfs-progs: fix floating point exception for btrfs-calc-size
[btrfs-progs-unstable/devel.git] / btrfs-image.c
blob20396ef725efbd98e0ae2a8515e6121518c89a53
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <pthread.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <fcntl.h>
25 #include <unistd.h>
26 #include <dirent.h>
27 #include <zlib.h>
28 #include <getopt.h>
30 #include "kerncompat.h"
31 #include "crc32c.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "utils.h"
36 #include "volumes.h"
37 #include "extent_io.h"
39 #define HEADER_MAGIC 0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE (256 * 1024)
41 #define BLOCK_SIZE 1024
42 #define BLOCK_MASK (BLOCK_SIZE - 1)
44 #define COMPRESS_NONE 0
45 #define COMPRESS_ZLIB 1
47 struct meta_cluster_item {
48 __le64 bytenr;
49 __le32 size;
50 } __attribute__ ((__packed__));
52 struct meta_cluster_header {
53 __le64 magic;
54 __le64 bytenr;
55 __le32 nritems;
56 u8 compress;
57 } __attribute__ ((__packed__));
59 /* cluster header + index items + buffers */
60 struct meta_cluster {
61 struct meta_cluster_header header;
62 struct meta_cluster_item items[];
63 } __attribute__ ((__packed__));
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66 sizeof(struct meta_cluster_item))
68 struct fs_chunk {
69 u64 logical;
70 u64 physical;
71 u64 bytes;
72 struct rb_node l;
73 struct rb_node p;
74 struct list_head list;
77 struct async_work {
78 struct list_head list;
79 struct list_head ordered;
80 u64 start;
81 u64 size;
82 u8 *buffer;
83 size_t bufsize;
84 int error;
87 struct metadump_struct {
88 struct btrfs_root *root;
89 FILE *out;
91 struct meta_cluster *cluster;
93 pthread_t *threads;
94 size_t num_threads;
95 pthread_mutex_t mutex;
96 pthread_cond_t cond;
97 struct rb_root name_tree;
99 struct list_head list;
100 struct list_head ordered;
101 size_t num_items;
102 size_t num_ready;
104 u64 pending_start;
105 u64 pending_size;
107 int compress_level;
108 int done;
109 int data;
110 int sanitize_names;
112 int error;
115 struct name {
116 struct rb_node n;
117 char *val;
118 char *sub;
119 u32 len;
122 struct mdrestore_struct {
123 FILE *in;
124 FILE *out;
126 pthread_t *threads;
127 size_t num_threads;
128 pthread_mutex_t mutex;
129 pthread_cond_t cond;
131 struct rb_root chunk_tree;
132 struct rb_root physical_tree;
133 struct list_head list;
134 struct list_head overlapping_chunks;
135 size_t num_items;
136 u32 leafsize;
137 u64 devid;
138 u64 alloced_chunks;
139 u64 last_physical_offset;
140 u8 uuid[BTRFS_UUID_SIZE];
141 u8 fsid[BTRFS_FSID_SIZE];
143 int compress_method;
144 int done;
145 int error;
146 int old_restore;
147 int fixup_offset;
148 int multi_devices;
149 int clear_space_cache;
150 struct btrfs_fs_info *info;
153 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
154 u64 search, u64 cluster_bytenr);
155 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
157 static void csum_block(u8 *buf, size_t len)
159 char result[BTRFS_CRC32_SIZE];
160 u32 crc = ~(u32)0;
161 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
162 btrfs_csum_final(crc, result);
163 memcpy(buf, result, BTRFS_CRC32_SIZE);
166 static int has_name(struct btrfs_key *key)
168 switch (key->type) {
169 case BTRFS_DIR_ITEM_KEY:
170 case BTRFS_DIR_INDEX_KEY:
171 case BTRFS_INODE_REF_KEY:
172 case BTRFS_INODE_EXTREF_KEY:
173 case BTRFS_XATTR_ITEM_KEY:
174 return 1;
175 default:
176 break;
179 return 0;
182 static char *generate_garbage(u32 name_len)
184 char *buf = malloc(name_len);
185 int i;
187 if (!buf)
188 return NULL;
190 for (i = 0; i < name_len; i++) {
191 char c = rand() % 94 + 33;
193 if (c == '/')
194 c++;
195 buf[i] = c;
198 return buf;
201 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
203 struct name *entry = rb_entry(a, struct name, n);
204 struct name *ins = rb_entry(b, struct name, n);
205 u32 len;
207 len = min(ins->len, entry->len);
208 return memcmp(ins->val, entry->val, len);
211 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
213 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, l);
214 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, l);
216 if (fuzz && ins->logical >= entry->logical &&
217 ins->logical < entry->logical + entry->bytes)
218 return 0;
220 if (ins->logical < entry->logical)
221 return -1;
222 else if (ins->logical > entry->logical)
223 return 1;
224 return 0;
227 static int physical_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
229 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, p);
230 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, p);
232 if (fuzz && ins->physical >= entry->physical &&
233 ins->physical < entry->physical + entry->bytes)
234 return 0;
236 if (fuzz && entry->physical >= ins->physical &&
237 entry->physical < ins->physical + ins->bytes)
238 return 0;
240 if (ins->physical < entry->physical)
241 return -1;
242 else if (ins->physical > entry->physical)
243 return 1;
244 return 0;
247 static void tree_insert(struct rb_root *root, struct rb_node *ins,
248 int (*cmp)(struct rb_node *a, struct rb_node *b,
249 int fuzz))
251 struct rb_node ** p = &root->rb_node;
252 struct rb_node * parent = NULL;
253 int dir;
255 while(*p) {
256 parent = *p;
258 dir = cmp(*p, ins, 1);
259 if (dir < 0)
260 p = &(*p)->rb_left;
261 else if (dir > 0)
262 p = &(*p)->rb_right;
263 else
264 BUG();
267 rb_link_node(ins, parent, p);
268 rb_insert_color(ins, root);
271 static struct rb_node *tree_search(struct rb_root *root,
272 struct rb_node *search,
273 int (*cmp)(struct rb_node *a,
274 struct rb_node *b, int fuzz),
275 int fuzz)
277 struct rb_node *n = root->rb_node;
278 int dir;
280 while (n) {
281 dir = cmp(n, search, fuzz);
282 if (dir < 0)
283 n = n->rb_left;
284 else if (dir > 0)
285 n = n->rb_right;
286 else
287 return n;
290 return NULL;
293 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
295 struct fs_chunk *fs_chunk;
296 struct rb_node *entry;
297 struct fs_chunk search;
298 u64 offset;
300 if (logical == BTRFS_SUPER_INFO_OFFSET)
301 return logical;
303 search.logical = logical;
304 entry = tree_search(&mdres->chunk_tree, &search.l, chunk_cmp, 1);
305 if (!entry) {
306 if (mdres->in != stdin)
307 printf("Couldn't find a chunk, using logical\n");
308 return logical;
310 fs_chunk = rb_entry(entry, struct fs_chunk, l);
311 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
312 BUG();
313 offset = search.logical - fs_chunk->logical;
315 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
316 return fs_chunk->physical + offset;
320 static char *find_collision(struct metadump_struct *md, char *name,
321 u32 name_len)
323 struct name *val;
324 struct rb_node *entry;
325 struct name tmp;
326 unsigned long checksum;
327 int found = 0;
328 int i;
330 tmp.val = name;
331 tmp.len = name_len;
332 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
333 if (entry) {
334 val = rb_entry(entry, struct name, n);
335 free(name);
336 return val->sub;
339 val = malloc(sizeof(struct name));
340 if (!val) {
341 fprintf(stderr, "Couldn't sanitize name, enomem\n");
342 free(name);
343 return NULL;
346 memset(val, 0, sizeof(*val));
348 val->val = name;
349 val->len = name_len;
350 val->sub = malloc(name_len);
351 if (!val->sub) {
352 fprintf(stderr, "Couldn't sanitize name, enomem\n");
353 free(val);
354 free(name);
355 return NULL;
358 checksum = crc32c(~1, val->val, name_len);
359 memset(val->sub, ' ', name_len);
360 i = 0;
361 while (1) {
362 if (crc32c(~1, val->sub, name_len) == checksum &&
363 memcmp(val->sub, val->val, val->len)) {
364 found = 1;
365 break;
368 if (val->sub[i] == 127) {
369 do {
370 i++;
371 if (i >= name_len)
372 break;
373 } while (val->sub[i] == 127);
375 if (i >= name_len)
376 break;
377 val->sub[i]++;
378 if (val->sub[i] == '/')
379 val->sub[i]++;
380 memset(val->sub, ' ', i);
381 i = 0;
382 continue;
383 } else {
384 val->sub[i]++;
385 if (val->sub[i] == '/')
386 val->sub[i]++;
390 if (!found) {
391 fprintf(stderr, "Couldn't find a collision for '%.*s', "
392 "generating normal garbage, it won't match indexes\n",
393 val->len, val->val);
394 for (i = 0; i < name_len; i++) {
395 char c = rand() % 94 + 33;
397 if (c == '/')
398 c++;
399 val->sub[i] = c;
403 tree_insert(&md->name_tree, &val->n, name_cmp);
404 return val->sub;
407 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
408 int slot)
410 struct btrfs_dir_item *dir_item;
411 char *buf;
412 char *garbage;
413 unsigned long name_ptr;
414 u32 total_len;
415 u32 cur = 0;
416 u32 this_len;
417 u32 name_len;
418 int free_garbage = (md->sanitize_names == 1);
420 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
421 total_len = btrfs_item_size_nr(eb, slot);
422 while (cur < total_len) {
423 this_len = sizeof(*dir_item) +
424 btrfs_dir_name_len(eb, dir_item) +
425 btrfs_dir_data_len(eb, dir_item);
426 name_ptr = (unsigned long)(dir_item + 1);
427 name_len = btrfs_dir_name_len(eb, dir_item);
429 if (md->sanitize_names > 1) {
430 buf = malloc(name_len);
431 if (!buf) {
432 fprintf(stderr, "Couldn't sanitize name, "
433 "enomem\n");
434 return;
436 read_extent_buffer(eb, buf, name_ptr, name_len);
437 garbage = find_collision(md, buf, name_len);
438 } else {
439 garbage = generate_garbage(name_len);
441 if (!garbage) {
442 fprintf(stderr, "Couldn't sanitize name, enomem\n");
443 return;
445 write_extent_buffer(eb, garbage, name_ptr, name_len);
446 cur += this_len;
447 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
448 this_len);
449 if (free_garbage)
450 free(garbage);
454 static void sanitize_inode_ref(struct metadump_struct *md,
455 struct extent_buffer *eb, int slot, int ext)
457 struct btrfs_inode_extref *extref;
458 struct btrfs_inode_ref *ref;
459 char *garbage, *buf;
460 unsigned long ptr;
461 unsigned long name_ptr;
462 u32 item_size;
463 u32 cur_offset = 0;
464 int len;
465 int free_garbage = (md->sanitize_names == 1);
467 item_size = btrfs_item_size_nr(eb, slot);
468 ptr = btrfs_item_ptr_offset(eb, slot);
469 while (cur_offset < item_size) {
470 if (ext) {
471 extref = (struct btrfs_inode_extref *)(ptr +
472 cur_offset);
473 name_ptr = (unsigned long)(&extref->name);
474 len = btrfs_inode_extref_name_len(eb, extref);
475 cur_offset += sizeof(*extref);
476 } else {
477 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
478 len = btrfs_inode_ref_name_len(eb, ref);
479 name_ptr = (unsigned long)(ref + 1);
480 cur_offset += sizeof(*ref);
482 cur_offset += len;
484 if (md->sanitize_names > 1) {
485 buf = malloc(len);
486 if (!buf) {
487 fprintf(stderr, "Couldn't sanitize name, "
488 "enomem\n");
489 return;
491 read_extent_buffer(eb, buf, name_ptr, len);
492 garbage = find_collision(md, buf, len);
493 } else {
494 garbage = generate_garbage(len);
497 if (!garbage) {
498 fprintf(stderr, "Couldn't sanitize name, enomem\n");
499 return;
501 write_extent_buffer(eb, garbage, name_ptr, len);
502 if (free_garbage)
503 free(garbage);
507 static void sanitize_xattr(struct metadump_struct *md,
508 struct extent_buffer *eb, int slot)
510 struct btrfs_dir_item *dir_item;
511 unsigned long data_ptr;
512 u32 data_len;
514 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
515 data_len = btrfs_dir_data_len(eb, dir_item);
517 data_ptr = (unsigned long)((char *)(dir_item + 1) +
518 btrfs_dir_name_len(eb, dir_item));
519 memset_extent_buffer(eb, 0, data_ptr, data_len);
522 static void sanitize_name(struct metadump_struct *md, u8 *dst,
523 struct extent_buffer *src, struct btrfs_key *key,
524 int slot)
526 struct extent_buffer *eb;
528 eb = alloc_dummy_eb(src->start, src->len);
529 if (!eb) {
530 fprintf(stderr, "Couldn't sanitize name, no memory\n");
531 return;
534 memcpy(eb->data, dst, eb->len);
536 switch (key->type) {
537 case BTRFS_DIR_ITEM_KEY:
538 case BTRFS_DIR_INDEX_KEY:
539 sanitize_dir_item(md, eb, slot);
540 break;
541 case BTRFS_INODE_REF_KEY:
542 sanitize_inode_ref(md, eb, slot, 0);
543 break;
544 case BTRFS_INODE_EXTREF_KEY:
545 sanitize_inode_ref(md, eb, slot, 1);
546 break;
547 case BTRFS_XATTR_ITEM_KEY:
548 sanitize_xattr(md, eb, slot);
549 break;
550 default:
551 break;
554 memcpy(dst, eb->data, eb->len);
555 free(eb);
559 * zero inline extents and csum items
561 static void zero_items(struct metadump_struct *md, u8 *dst,
562 struct extent_buffer *src)
564 struct btrfs_file_extent_item *fi;
565 struct btrfs_item *item;
566 struct btrfs_key key;
567 u32 nritems = btrfs_header_nritems(src);
568 size_t size;
569 unsigned long ptr;
570 int i, extent_type;
572 for (i = 0; i < nritems; i++) {
573 item = btrfs_item_nr(i);
574 btrfs_item_key_to_cpu(src, &key, i);
575 if (key.type == BTRFS_CSUM_ITEM_KEY) {
576 size = btrfs_item_size_nr(src, i);
577 memset(dst + btrfs_leaf_data(src) +
578 btrfs_item_offset_nr(src, i), 0, size);
579 continue;
582 if (md->sanitize_names && has_name(&key)) {
583 sanitize_name(md, dst, src, &key, i);
584 continue;
587 if (key.type != BTRFS_EXTENT_DATA_KEY)
588 continue;
590 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
591 extent_type = btrfs_file_extent_type(src, fi);
592 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
593 continue;
595 ptr = btrfs_file_extent_inline_start(fi);
596 size = btrfs_file_extent_inline_item_len(src, item);
597 memset(dst + ptr, 0, size);
602 * copy buffer and zero useless data in the buffer
604 static void copy_buffer(struct metadump_struct *md, u8 *dst,
605 struct extent_buffer *src)
607 int level;
608 size_t size;
609 u32 nritems;
611 memcpy(dst, src->data, src->len);
612 if (src->start == BTRFS_SUPER_INFO_OFFSET)
613 return;
615 level = btrfs_header_level(src);
616 nritems = btrfs_header_nritems(src);
618 if (nritems == 0) {
619 size = sizeof(struct btrfs_header);
620 memset(dst + size, 0, src->len - size);
621 } else if (level == 0) {
622 size = btrfs_leaf_data(src) +
623 btrfs_item_offset_nr(src, nritems - 1) -
624 btrfs_item_nr_offset(nritems);
625 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
626 zero_items(md, dst, src);
627 } else {
628 size = offsetof(struct btrfs_node, ptrs) +
629 sizeof(struct btrfs_key_ptr) * nritems;
630 memset(dst + size, 0, src->len - size);
632 csum_block(dst, src->len);
635 static void *dump_worker(void *data)
637 struct metadump_struct *md = (struct metadump_struct *)data;
638 struct async_work *async;
639 int ret;
641 while (1) {
642 pthread_mutex_lock(&md->mutex);
643 while (list_empty(&md->list)) {
644 if (md->done) {
645 pthread_mutex_unlock(&md->mutex);
646 goto out;
648 pthread_cond_wait(&md->cond, &md->mutex);
650 async = list_entry(md->list.next, struct async_work, list);
651 list_del_init(&async->list);
652 pthread_mutex_unlock(&md->mutex);
654 if (md->compress_level > 0) {
655 u8 *orig = async->buffer;
657 async->bufsize = compressBound(async->size);
658 async->buffer = malloc(async->bufsize);
659 if (!async->buffer) {
660 fprintf(stderr, "Error allocing buffer\n");
661 pthread_mutex_lock(&md->mutex);
662 if (!md->error)
663 md->error = -ENOMEM;
664 pthread_mutex_unlock(&md->mutex);
665 pthread_exit(NULL);
668 ret = compress2(async->buffer,
669 (unsigned long *)&async->bufsize,
670 orig, async->size, md->compress_level);
672 if (ret != Z_OK)
673 async->error = 1;
675 free(orig);
678 pthread_mutex_lock(&md->mutex);
679 md->num_ready++;
680 pthread_mutex_unlock(&md->mutex);
682 out:
683 pthread_exit(NULL);
686 static void meta_cluster_init(struct metadump_struct *md, u64 start)
688 struct meta_cluster_header *header;
690 md->num_items = 0;
691 md->num_ready = 0;
692 header = &md->cluster->header;
693 header->magic = cpu_to_le64(HEADER_MAGIC);
694 header->bytenr = cpu_to_le64(start);
695 header->nritems = cpu_to_le32(0);
696 header->compress = md->compress_level > 0 ?
697 COMPRESS_ZLIB : COMPRESS_NONE;
700 static void metadump_destroy(struct metadump_struct *md, int num_threads)
702 int i;
703 struct rb_node *n;
705 pthread_mutex_lock(&md->mutex);
706 md->done = 1;
707 pthread_cond_broadcast(&md->cond);
708 pthread_mutex_unlock(&md->mutex);
710 for (i = 0; i < num_threads; i++)
711 pthread_join(md->threads[i], NULL);
713 pthread_cond_destroy(&md->cond);
714 pthread_mutex_destroy(&md->mutex);
716 while ((n = rb_first(&md->name_tree))) {
717 struct name *name;
719 name = rb_entry(n, struct name, n);
720 rb_erase(n, &md->name_tree);
721 free(name->val);
722 free(name->sub);
723 free(name);
725 free(md->threads);
726 free(md->cluster);
729 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
730 FILE *out, int num_threads, int compress_level,
731 int sanitize_names)
733 int i, ret = 0;
735 memset(md, 0, sizeof(*md));
736 pthread_cond_init(&md->cond, NULL);
737 pthread_mutex_init(&md->mutex, NULL);
738 INIT_LIST_HEAD(&md->list);
739 INIT_LIST_HEAD(&md->ordered);
740 md->root = root;
741 md->out = out;
742 md->pending_start = (u64)-1;
743 md->compress_level = compress_level;
744 md->cluster = calloc(1, BLOCK_SIZE);
745 md->sanitize_names = sanitize_names;
746 if (sanitize_names > 1)
747 crc32c_optimization_init();
749 if (!md->cluster) {
750 pthread_cond_destroy(&md->cond);
751 pthread_mutex_destroy(&md->mutex);
752 return -ENOMEM;
755 meta_cluster_init(md, 0);
756 if (!num_threads)
757 return 0;
759 md->name_tree.rb_node = NULL;
760 md->num_threads = num_threads;
761 md->threads = calloc(num_threads, sizeof(pthread_t));
762 if (!md->threads) {
763 free(md->cluster);
764 pthread_cond_destroy(&md->cond);
765 pthread_mutex_destroy(&md->mutex);
766 return -ENOMEM;
769 for (i = 0; i < num_threads; i++) {
770 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
771 if (ret)
772 break;
775 if (ret)
776 metadump_destroy(md, i + 1);
778 return ret;
781 static int write_zero(FILE *out, size_t size)
783 static char zero[BLOCK_SIZE];
784 return fwrite(zero, size, 1, out);
787 static int write_buffers(struct metadump_struct *md, u64 *next)
789 struct meta_cluster_header *header = &md->cluster->header;
790 struct meta_cluster_item *item;
791 struct async_work *async;
792 u64 bytenr = 0;
793 u32 nritems = 0;
794 int ret;
795 int err = 0;
797 if (list_empty(&md->ordered))
798 goto out;
800 /* wait until all buffers are compressed */
801 while (!err && md->num_items > md->num_ready) {
802 struct timespec ts = {
803 .tv_sec = 0,
804 .tv_nsec = 10000000,
806 pthread_mutex_unlock(&md->mutex);
807 nanosleep(&ts, NULL);
808 pthread_mutex_lock(&md->mutex);
809 err = md->error;
812 if (err) {
813 fprintf(stderr, "One of the threads errored out %s\n",
814 strerror(err));
815 goto out;
818 /* setup and write index block */
819 list_for_each_entry(async, &md->ordered, ordered) {
820 item = md->cluster->items + nritems;
821 item->bytenr = cpu_to_le64(async->start);
822 item->size = cpu_to_le32(async->bufsize);
823 nritems++;
825 header->nritems = cpu_to_le32(nritems);
827 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
828 if (ret != 1) {
829 fprintf(stderr, "Error writing out cluster: %d\n", errno);
830 return -EIO;
833 /* write buffers */
834 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
835 while (!list_empty(&md->ordered)) {
836 async = list_entry(md->ordered.next, struct async_work,
837 ordered);
838 list_del_init(&async->ordered);
840 bytenr += async->bufsize;
841 if (!err)
842 ret = fwrite(async->buffer, async->bufsize, 1,
843 md->out);
844 if (ret != 1) {
845 err = -EIO;
846 ret = 0;
847 fprintf(stderr, "Error writing out cluster: %d\n",
848 errno);
851 free(async->buffer);
852 free(async);
855 /* zero unused space in the last block */
856 if (!err && bytenr & BLOCK_MASK) {
857 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
859 bytenr += size;
860 ret = write_zero(md->out, size);
861 if (ret != 1) {
862 fprintf(stderr, "Error zeroing out buffer: %d\n",
863 errno);
864 err = -EIO;
867 out:
868 *next = bytenr;
869 return err;
872 static int read_data_extent(struct metadump_struct *md,
873 struct async_work *async)
875 struct btrfs_root *root = md->root;
876 u64 bytes_left = async->size;
877 u64 logical = async->start;
878 u64 offset = 0;
879 u64 read_len;
880 int num_copies;
881 int cur_mirror;
882 int ret;
884 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, logical,
885 bytes_left);
887 /* Try our best to read data, just like read_tree_block() */
888 for (cur_mirror = 0; cur_mirror < num_copies; cur_mirror++) {
889 while (bytes_left) {
890 read_len = bytes_left;
891 ret = read_extent_data(root,
892 (char *)(async->buffer + offset),
893 logical, &read_len, cur_mirror);
894 if (ret < 0)
895 break;
896 offset += read_len;
897 logical += read_len;
898 bytes_left -= read_len;
901 if (bytes_left)
902 return -EIO;
903 return 0;
906 static int get_dev_fd(struct btrfs_root *root)
908 struct btrfs_device *dev;
910 dev = list_first_entry(&root->fs_info->fs_devices->devices,
911 struct btrfs_device, dev_list);
912 return dev->fd;
915 static int flush_pending(struct metadump_struct *md, int done)
917 struct async_work *async = NULL;
918 struct extent_buffer *eb;
919 u64 blocksize = md->root->nodesize;
920 u64 start;
921 u64 size;
922 size_t offset;
923 int ret = 0;
925 if (md->pending_size) {
926 async = calloc(1, sizeof(*async));
927 if (!async)
928 return -ENOMEM;
930 async->start = md->pending_start;
931 async->size = md->pending_size;
932 async->bufsize = async->size;
933 async->buffer = malloc(async->bufsize);
934 if (!async->buffer) {
935 free(async);
936 return -ENOMEM;
938 offset = 0;
939 start = async->start;
940 size = async->size;
942 if (md->data) {
943 ret = read_data_extent(md, async);
944 if (ret) {
945 free(async->buffer);
946 free(async);
947 return ret;
952 * Balance can make the mapping not cover the super block, so
953 * just copy directly from one of the devices.
955 if (start == BTRFS_SUPER_INFO_OFFSET) {
956 int fd = get_dev_fd(md->root);
958 ret = pread64(fd, async->buffer, size, start);
959 if (ret < size) {
960 free(async->buffer);
961 free(async);
962 fprintf(stderr, "Error reading superblock\n");
963 return -EIO;
965 size = 0;
966 ret = 0;
969 while (!md->data && size > 0) {
970 u64 this_read = min(blocksize, size);
971 eb = read_tree_block(md->root, start, this_read, 0);
972 if (!extent_buffer_uptodate(eb)) {
973 free(async->buffer);
974 free(async);
975 fprintf(stderr,
976 "Error reading metadata block\n");
977 return -EIO;
979 copy_buffer(md, async->buffer + offset, eb);
980 free_extent_buffer(eb);
981 start += this_read;
982 offset += this_read;
983 size -= this_read;
986 md->pending_start = (u64)-1;
987 md->pending_size = 0;
988 } else if (!done) {
989 return 0;
992 pthread_mutex_lock(&md->mutex);
993 if (async) {
994 list_add_tail(&async->ordered, &md->ordered);
995 md->num_items++;
996 if (md->compress_level > 0) {
997 list_add_tail(&async->list, &md->list);
998 pthread_cond_signal(&md->cond);
999 } else {
1000 md->num_ready++;
1003 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
1004 ret = write_buffers(md, &start);
1005 if (ret)
1006 fprintf(stderr, "Error writing buffers %d\n",
1007 errno);
1008 else
1009 meta_cluster_init(md, start);
1011 pthread_mutex_unlock(&md->mutex);
1012 return ret;
1015 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
1016 int data)
1018 int ret;
1019 if (md->data != data ||
1020 md->pending_size + size > MAX_PENDING_SIZE ||
1021 md->pending_start + md->pending_size != start) {
1022 ret = flush_pending(md, 0);
1023 if (ret)
1024 return ret;
1025 md->pending_start = start;
1027 readahead_tree_block(md->root, start, size, 0);
1028 md->pending_size += size;
1029 md->data = data;
1030 return 0;
1033 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1034 static int is_tree_block(struct btrfs_root *extent_root,
1035 struct btrfs_path *path, u64 bytenr)
1037 struct extent_buffer *leaf;
1038 struct btrfs_key key;
1039 u64 ref_objectid;
1040 int ret;
1042 leaf = path->nodes[0];
1043 while (1) {
1044 struct btrfs_extent_ref_v0 *ref_item;
1045 path->slots[0]++;
1046 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1047 ret = btrfs_next_leaf(extent_root, path);
1048 if (ret < 0)
1049 return ret;
1050 if (ret > 0)
1051 break;
1052 leaf = path->nodes[0];
1054 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1055 if (key.objectid != bytenr)
1056 break;
1057 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
1058 continue;
1059 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1060 struct btrfs_extent_ref_v0);
1061 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1062 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1063 return 1;
1064 break;
1066 return 0;
1068 #endif
1070 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1071 struct metadump_struct *metadump, int root_tree)
1073 struct extent_buffer *tmp;
1074 struct btrfs_root_item *ri;
1075 struct btrfs_key key;
1076 u64 bytenr;
1077 int level;
1078 int nritems = 0;
1079 int i = 0;
1080 int ret;
1082 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1083 if (ret) {
1084 fprintf(stderr, "Error adding metadata block\n");
1085 return ret;
1088 if (btrfs_header_level(eb) == 0 && !root_tree)
1089 return 0;
1091 level = btrfs_header_level(eb);
1092 nritems = btrfs_header_nritems(eb);
1093 for (i = 0; i < nritems; i++) {
1094 if (level == 0) {
1095 btrfs_item_key_to_cpu(eb, &key, i);
1096 if (key.type != BTRFS_ROOT_ITEM_KEY)
1097 continue;
1098 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1099 bytenr = btrfs_disk_root_bytenr(eb, ri);
1100 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1101 if (!extent_buffer_uptodate(tmp)) {
1102 fprintf(stderr,
1103 "Error reading log root block\n");
1104 return -EIO;
1106 ret = copy_tree_blocks(root, tmp, metadump, 0);
1107 free_extent_buffer(tmp);
1108 if (ret)
1109 return ret;
1110 } else {
1111 bytenr = btrfs_node_blockptr(eb, i);
1112 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1113 if (!extent_buffer_uptodate(tmp)) {
1114 fprintf(stderr, "Error reading log block\n");
1115 return -EIO;
1117 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1118 free_extent_buffer(tmp);
1119 if (ret)
1120 return ret;
1124 return 0;
1127 static int copy_log_trees(struct btrfs_root *root,
1128 struct metadump_struct *metadump,
1129 struct btrfs_path *path)
1131 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1133 if (blocknr == 0)
1134 return 0;
1136 if (!root->fs_info->log_root_tree ||
1137 !root->fs_info->log_root_tree->node) {
1138 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1139 return -EIO;
1142 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1143 metadump, 1);
1146 static int copy_space_cache(struct btrfs_root *root,
1147 struct metadump_struct *metadump,
1148 struct btrfs_path *path)
1150 struct extent_buffer *leaf;
1151 struct btrfs_file_extent_item *fi;
1152 struct btrfs_key key;
1153 u64 bytenr, num_bytes;
1154 int ret;
1156 root = root->fs_info->tree_root;
1158 key.objectid = 0;
1159 key.type = BTRFS_EXTENT_DATA_KEY;
1160 key.offset = 0;
1162 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1163 if (ret < 0) {
1164 fprintf(stderr, "Error searching for free space inode %d\n",
1165 ret);
1166 return ret;
1169 leaf = path->nodes[0];
1171 while (1) {
1172 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1173 ret = btrfs_next_leaf(root, path);
1174 if (ret < 0) {
1175 fprintf(stderr, "Error going to next leaf "
1176 "%d\n", ret);
1177 return ret;
1179 if (ret > 0)
1180 break;
1181 leaf = path->nodes[0];
1184 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1185 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1186 path->slots[0]++;
1187 continue;
1190 fi = btrfs_item_ptr(leaf, path->slots[0],
1191 struct btrfs_file_extent_item);
1192 if (btrfs_file_extent_type(leaf, fi) !=
1193 BTRFS_FILE_EXTENT_REG) {
1194 path->slots[0]++;
1195 continue;
1198 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1199 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1200 ret = add_extent(bytenr, num_bytes, metadump, 1);
1201 if (ret) {
1202 fprintf(stderr, "Error adding space cache blocks %d\n",
1203 ret);
1204 btrfs_release_path(path);
1205 return ret;
1207 path->slots[0]++;
1210 return 0;
1213 static int copy_from_extent_tree(struct metadump_struct *metadump,
1214 struct btrfs_path *path)
1216 struct btrfs_root *extent_root;
1217 struct extent_buffer *leaf;
1218 struct btrfs_extent_item *ei;
1219 struct btrfs_key key;
1220 u64 bytenr;
1221 u64 num_bytes;
1222 int ret;
1224 extent_root = metadump->root->fs_info->extent_root;
1225 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1226 key.objectid = bytenr;
1227 key.type = BTRFS_EXTENT_ITEM_KEY;
1228 key.offset = 0;
1230 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1231 if (ret < 0) {
1232 fprintf(stderr, "Error searching extent root %d\n", ret);
1233 return ret;
1235 ret = 0;
1237 leaf = path->nodes[0];
1239 while (1) {
1240 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1241 ret = btrfs_next_leaf(extent_root, path);
1242 if (ret < 0) {
1243 fprintf(stderr, "Error going to next leaf %d"
1244 "\n", ret);
1245 break;
1247 if (ret > 0) {
1248 ret = 0;
1249 break;
1251 leaf = path->nodes[0];
1254 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1255 if (key.objectid < bytenr ||
1256 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1257 key.type != BTRFS_METADATA_ITEM_KEY)) {
1258 path->slots[0]++;
1259 continue;
1262 bytenr = key.objectid;
1263 if (key.type == BTRFS_METADATA_ITEM_KEY)
1264 num_bytes = extent_root->leafsize;
1265 else
1266 num_bytes = key.offset;
1268 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1269 ei = btrfs_item_ptr(leaf, path->slots[0],
1270 struct btrfs_extent_item);
1271 if (btrfs_extent_flags(leaf, ei) &
1272 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1273 ret = add_extent(bytenr, num_bytes, metadump,
1275 if (ret) {
1276 fprintf(stderr, "Error adding block "
1277 "%d\n", ret);
1278 break;
1281 } else {
1282 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1283 ret = is_tree_block(extent_root, path, bytenr);
1284 if (ret < 0) {
1285 fprintf(stderr, "Error checking tree block "
1286 "%d\n", ret);
1287 break;
1290 if (ret) {
1291 ret = add_extent(bytenr, num_bytes, metadump,
1293 if (ret) {
1294 fprintf(stderr, "Error adding block "
1295 "%d\n", ret);
1296 break;
1299 ret = 0;
1300 #else
1301 fprintf(stderr, "Either extent tree corruption or "
1302 "you haven't built with V0 support\n");
1303 ret = -EIO;
1304 break;
1305 #endif
1307 bytenr += num_bytes;
1310 btrfs_release_path(path);
1312 return ret;
1315 static int create_metadump(const char *input, FILE *out, int num_threads,
1316 int compress_level, int sanitize, int walk_trees)
1318 struct btrfs_root *root;
1319 struct btrfs_path *path = NULL;
1320 struct metadump_struct metadump;
1321 int ret;
1322 int err = 0;
1324 root = open_ctree(input, 0, 0);
1325 if (!root) {
1326 fprintf(stderr, "Open ctree failed\n");
1327 return -EIO;
1330 BUG_ON(root->nodesize != root->leafsize);
1332 ret = metadump_init(&metadump, root, out, num_threads,
1333 compress_level, sanitize);
1334 if (ret) {
1335 fprintf(stderr, "Error initing metadump %d\n", ret);
1336 close_ctree(root);
1337 return ret;
1340 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1341 &metadump, 0);
1342 if (ret) {
1343 fprintf(stderr, "Error adding metadata %d\n", ret);
1344 err = ret;
1345 goto out;
1348 path = btrfs_alloc_path();
1349 if (!path) {
1350 fprintf(stderr, "Out of memory allocing path\n");
1351 err = -ENOMEM;
1352 goto out;
1355 if (walk_trees) {
1356 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1357 &metadump, 1);
1358 if (ret) {
1359 err = ret;
1360 goto out;
1363 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1364 &metadump, 1);
1365 if (ret) {
1366 err = ret;
1367 goto out;
1369 } else {
1370 ret = copy_from_extent_tree(&metadump, path);
1371 if (ret) {
1372 err = ret;
1373 goto out;
1377 ret = copy_log_trees(root, &metadump, path);
1378 if (ret) {
1379 err = ret;
1380 goto out;
1383 ret = copy_space_cache(root, &metadump, path);
1384 out:
1385 ret = flush_pending(&metadump, 1);
1386 if (ret) {
1387 if (!err)
1388 err = ret;
1389 fprintf(stderr, "Error flushing pending %d\n", ret);
1392 metadump_destroy(&metadump, num_threads);
1394 btrfs_free_path(path);
1395 ret = close_ctree(root);
1396 return err ? err : ret;
1399 static void update_super_old(u8 *buffer)
1401 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1402 struct btrfs_chunk *chunk;
1403 struct btrfs_disk_key *key;
1404 u32 sectorsize = btrfs_super_sectorsize(super);
1405 u64 flags = btrfs_super_flags(super);
1407 flags |= BTRFS_SUPER_FLAG_METADUMP;
1408 btrfs_set_super_flags(super, flags);
1410 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1411 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1412 sizeof(struct btrfs_disk_key));
1414 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1415 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1416 btrfs_set_disk_key_offset(key, 0);
1418 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1419 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1420 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1421 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1422 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1423 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1424 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1425 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1426 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1427 chunk->stripe.devid = super->dev_item.devid;
1428 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1429 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1430 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1431 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1434 static int update_super(struct mdrestore_struct *mdres, u8 *buffer)
1436 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1437 struct btrfs_chunk *chunk;
1438 struct btrfs_disk_key *disk_key;
1439 struct btrfs_key key;
1440 u64 flags = btrfs_super_flags(super);
1441 u32 new_array_size = 0;
1442 u32 array_size;
1443 u32 cur = 0;
1444 u8 *ptr, *write_ptr;
1445 int old_num_stripes;
1447 write_ptr = ptr = super->sys_chunk_array;
1448 array_size = btrfs_super_sys_array_size(super);
1450 while (cur < array_size) {
1451 disk_key = (struct btrfs_disk_key *)ptr;
1452 btrfs_disk_key_to_cpu(&key, disk_key);
1454 new_array_size += sizeof(*disk_key);
1455 memmove(write_ptr, ptr, sizeof(*disk_key));
1457 write_ptr += sizeof(*disk_key);
1458 ptr += sizeof(*disk_key);
1459 cur += sizeof(*disk_key);
1461 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1462 u64 physical, size = 0;
1464 chunk = (struct btrfs_chunk *)ptr;
1465 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1466 chunk = (struct btrfs_chunk *)write_ptr;
1468 memmove(write_ptr, ptr, sizeof(*chunk));
1469 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1470 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1471 btrfs_set_stack_chunk_type(chunk,
1472 BTRFS_BLOCK_GROUP_SYSTEM);
1473 btrfs_set_stack_stripe_devid(&chunk->stripe,
1474 super->dev_item.devid);
1475 physical = logical_to_physical(mdres, key.offset,
1476 &size);
1477 if (size != (u64)-1)
1478 btrfs_set_stack_stripe_offset(&chunk->stripe,
1479 physical);
1480 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1481 BTRFS_UUID_SIZE);
1482 new_array_size += sizeof(*chunk);
1483 } else {
1484 fprintf(stderr, "Bogus key in the sys chunk array "
1485 "%d\n", key.type);
1486 return -EIO;
1488 write_ptr += sizeof(*chunk);
1489 ptr += btrfs_chunk_item_size(old_num_stripes);
1490 cur += btrfs_chunk_item_size(old_num_stripes);
1493 if (mdres->clear_space_cache)
1494 btrfs_set_super_cache_generation(super, 0);
1496 flags |= BTRFS_SUPER_FLAG_METADUMP_V2;
1497 btrfs_set_super_flags(super, flags);
1498 btrfs_set_super_sys_array_size(super, new_array_size);
1499 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1501 return 0;
1504 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1506 struct extent_buffer *eb;
1508 eb = calloc(1, sizeof(struct extent_buffer) + size);
1509 if (!eb)
1510 return NULL;
1512 eb->start = bytenr;
1513 eb->len = size;
1514 return eb;
1517 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1519 struct btrfs_item *item;
1520 u32 nritems;
1521 u32 old_size;
1522 u32 old_data_start;
1523 u32 size_diff;
1524 u32 data_end;
1525 int i;
1527 old_size = btrfs_item_size_nr(eb, slot);
1528 if (old_size == new_size)
1529 return;
1531 nritems = btrfs_header_nritems(eb);
1532 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1534 old_data_start = btrfs_item_offset_nr(eb, slot);
1535 size_diff = old_size - new_size;
1537 for (i = slot; i < nritems; i++) {
1538 u32 ioff;
1539 item = btrfs_item_nr(i);
1540 ioff = btrfs_item_offset(eb, item);
1541 btrfs_set_item_offset(eb, item, ioff + size_diff);
1544 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1545 btrfs_leaf_data(eb) + data_end,
1546 old_data_start + new_size - data_end);
1547 item = btrfs_item_nr(slot);
1548 btrfs_set_item_size(eb, item, new_size);
1551 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1552 struct async_work *async, u8 *buffer,
1553 size_t size)
1555 struct extent_buffer *eb;
1556 size_t size_left = size;
1557 u64 bytenr = async->start;
1558 int i;
1560 if (size_left % mdres->leafsize)
1561 return 0;
1563 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1564 if (!eb)
1565 return -ENOMEM;
1567 while (size_left) {
1568 eb->start = bytenr;
1569 memcpy(eb->data, buffer, mdres->leafsize);
1571 if (btrfs_header_bytenr(eb) != bytenr)
1572 break;
1573 if (memcmp(mdres->fsid,
1574 eb->data + offsetof(struct btrfs_header, fsid),
1575 BTRFS_FSID_SIZE))
1576 break;
1578 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1579 goto next;
1581 if (btrfs_header_level(eb) != 0)
1582 goto next;
1584 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1585 struct btrfs_chunk chunk;
1586 struct btrfs_key key;
1587 u64 type, physical, size = (u64)-1;
1589 btrfs_item_key_to_cpu(eb, &key, i);
1590 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1591 continue;
1592 truncate_item(eb, i, sizeof(chunk));
1593 read_extent_buffer(eb, &chunk,
1594 btrfs_item_ptr_offset(eb, i),
1595 sizeof(chunk));
1597 size = 0;
1598 physical = logical_to_physical(mdres, key.offset,
1599 &size);
1601 /* Zero out the RAID profile */
1602 type = btrfs_stack_chunk_type(&chunk);
1603 type &= (BTRFS_BLOCK_GROUP_DATA |
1604 BTRFS_BLOCK_GROUP_SYSTEM |
1605 BTRFS_BLOCK_GROUP_METADATA |
1606 BTRFS_BLOCK_GROUP_DUP);
1607 btrfs_set_stack_chunk_type(&chunk, type);
1609 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1610 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1611 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1612 if (size != (u64)-1)
1613 btrfs_set_stack_stripe_offset(&chunk.stripe,
1614 physical);
1615 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1616 BTRFS_UUID_SIZE);
1617 write_extent_buffer(eb, &chunk,
1618 btrfs_item_ptr_offset(eb, i),
1619 sizeof(chunk));
1621 memcpy(buffer, eb->data, eb->len);
1622 csum_block(buffer, eb->len);
1623 next:
1624 size_left -= mdres->leafsize;
1625 buffer += mdres->leafsize;
1626 bytenr += mdres->leafsize;
1629 free(eb);
1630 return 0;
1633 static void write_backup_supers(int fd, u8 *buf)
1635 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1636 struct stat st;
1637 u64 size;
1638 u64 bytenr;
1639 int i;
1640 int ret;
1642 if (fstat(fd, &st)) {
1643 fprintf(stderr, "Couldn't stat restore point, won't be able "
1644 "to write backup supers: %d\n", errno);
1645 return;
1648 size = btrfs_device_size(fd, &st);
1650 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1651 bytenr = btrfs_sb_offset(i);
1652 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1653 break;
1654 btrfs_set_super_bytenr(super, bytenr);
1655 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1656 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1657 if (ret < BTRFS_SUPER_INFO_SIZE) {
1658 if (ret < 0)
1659 fprintf(stderr, "Problem writing out backup "
1660 "super block %d, err %d\n", i, errno);
1661 else
1662 fprintf(stderr, "Short write writing out "
1663 "backup super block\n");
1664 break;
1669 static void *restore_worker(void *data)
1671 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1672 struct async_work *async;
1673 size_t size;
1674 u8 *buffer;
1675 u8 *outbuf;
1676 int outfd;
1677 int ret;
1678 int compress_size = MAX_PENDING_SIZE * 4;
1680 outfd = fileno(mdres->out);
1681 buffer = malloc(compress_size);
1682 if (!buffer) {
1683 fprintf(stderr, "Error allocing buffer\n");
1684 pthread_mutex_lock(&mdres->mutex);
1685 if (!mdres->error)
1686 mdres->error = -ENOMEM;
1687 pthread_mutex_unlock(&mdres->mutex);
1688 pthread_exit(NULL);
1691 while (1) {
1692 u64 bytenr;
1693 off_t offset = 0;
1694 int err = 0;
1696 pthread_mutex_lock(&mdres->mutex);
1697 while (!mdres->leafsize || list_empty(&mdres->list)) {
1698 if (mdres->done) {
1699 pthread_mutex_unlock(&mdres->mutex);
1700 goto out;
1702 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1704 async = list_entry(mdres->list.next, struct async_work, list);
1705 list_del_init(&async->list);
1706 pthread_mutex_unlock(&mdres->mutex);
1708 if (mdres->compress_method == COMPRESS_ZLIB) {
1709 size = compress_size;
1710 ret = uncompress(buffer, (unsigned long *)&size,
1711 async->buffer, async->bufsize);
1712 if (ret != Z_OK) {
1713 fprintf(stderr, "Error decompressing %d\n",
1714 ret);
1715 err = -EIO;
1717 outbuf = buffer;
1718 } else {
1719 outbuf = async->buffer;
1720 size = async->bufsize;
1723 if (!mdres->multi_devices) {
1724 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1725 if (mdres->old_restore) {
1726 update_super_old(outbuf);
1727 } else {
1728 ret = update_super(mdres, outbuf);
1729 if (ret)
1730 err = ret;
1732 } else if (!mdres->old_restore) {
1733 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1734 if (ret)
1735 err = ret;
1739 if (!mdres->fixup_offset) {
1740 while (size) {
1741 u64 chunk_size = size;
1742 if (!mdres->multi_devices && !mdres->old_restore)
1743 bytenr = logical_to_physical(mdres,
1744 async->start + offset,
1745 &chunk_size);
1746 else
1747 bytenr = async->start + offset;
1749 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1750 bytenr);
1751 if (ret != chunk_size) {
1752 if (ret < 0) {
1753 fprintf(stderr, "Error writing to "
1754 "device %d\n", errno);
1755 err = errno;
1756 break;
1757 } else {
1758 fprintf(stderr, "Short write\n");
1759 err = -EIO;
1760 break;
1763 size -= chunk_size;
1764 offset += chunk_size;
1766 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1767 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1768 if (ret) {
1769 printk("Error write data\n");
1770 exit(1);
1775 /* backup super blocks are already there at fixup_offset stage */
1776 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1777 write_backup_supers(outfd, outbuf);
1779 pthread_mutex_lock(&mdres->mutex);
1780 if (err && !mdres->error)
1781 mdres->error = err;
1782 mdres->num_items--;
1783 pthread_mutex_unlock(&mdres->mutex);
1785 free(async->buffer);
1786 free(async);
1788 out:
1789 free(buffer);
1790 pthread_exit(NULL);
1793 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1795 struct rb_node *n;
1796 int i;
1798 while ((n = rb_first(&mdres->chunk_tree))) {
1799 struct fs_chunk *entry;
1801 entry = rb_entry(n, struct fs_chunk, l);
1802 rb_erase(n, &mdres->chunk_tree);
1803 rb_erase(&entry->p, &mdres->physical_tree);
1804 free(entry);
1806 pthread_mutex_lock(&mdres->mutex);
1807 mdres->done = 1;
1808 pthread_cond_broadcast(&mdres->cond);
1809 pthread_mutex_unlock(&mdres->mutex);
1811 for (i = 0; i < num_threads; i++)
1812 pthread_join(mdres->threads[i], NULL);
1814 pthread_cond_destroy(&mdres->cond);
1815 pthread_mutex_destroy(&mdres->mutex);
1816 free(mdres->threads);
1819 static int mdrestore_init(struct mdrestore_struct *mdres,
1820 FILE *in, FILE *out, int old_restore,
1821 int num_threads, int fixup_offset,
1822 struct btrfs_fs_info *info, int multi_devices)
1824 int i, ret = 0;
1826 memset(mdres, 0, sizeof(*mdres));
1827 pthread_cond_init(&mdres->cond, NULL);
1828 pthread_mutex_init(&mdres->mutex, NULL);
1829 INIT_LIST_HEAD(&mdres->list);
1830 INIT_LIST_HEAD(&mdres->overlapping_chunks);
1831 mdres->in = in;
1832 mdres->out = out;
1833 mdres->old_restore = old_restore;
1834 mdres->chunk_tree.rb_node = NULL;
1835 mdres->fixup_offset = fixup_offset;
1836 mdres->info = info;
1837 mdres->multi_devices = multi_devices;
1838 mdres->clear_space_cache = 0;
1839 mdres->last_physical_offset = 0;
1840 mdres->alloced_chunks = 0;
1842 if (!num_threads)
1843 return 0;
1845 mdres->num_threads = num_threads;
1846 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1847 if (!mdres->threads)
1848 return -ENOMEM;
1849 for (i = 0; i < num_threads; i++) {
1850 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1851 mdres);
1852 if (ret)
1853 break;
1855 if (ret)
1856 mdrestore_destroy(mdres, i + 1);
1857 return ret;
1860 static int fill_mdres_info(struct mdrestore_struct *mdres,
1861 struct async_work *async)
1863 struct btrfs_super_block *super;
1864 u8 *buffer = NULL;
1865 u8 *outbuf;
1866 int ret;
1868 /* We've already been initialized */
1869 if (mdres->leafsize)
1870 return 0;
1872 if (mdres->compress_method == COMPRESS_ZLIB) {
1873 size_t size = MAX_PENDING_SIZE * 2;
1875 buffer = malloc(MAX_PENDING_SIZE * 2);
1876 if (!buffer)
1877 return -ENOMEM;
1878 ret = uncompress(buffer, (unsigned long *)&size,
1879 async->buffer, async->bufsize);
1880 if (ret != Z_OK) {
1881 fprintf(stderr, "Error decompressing %d\n", ret);
1882 free(buffer);
1883 return -EIO;
1885 outbuf = buffer;
1886 } else {
1887 outbuf = async->buffer;
1890 super = (struct btrfs_super_block *)outbuf;
1891 mdres->leafsize = btrfs_super_leafsize(super);
1892 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1893 memcpy(mdres->uuid, super->dev_item.uuid,
1894 BTRFS_UUID_SIZE);
1895 mdres->devid = le64_to_cpu(super->dev_item.devid);
1896 free(buffer);
1897 return 0;
1900 static int add_cluster(struct meta_cluster *cluster,
1901 struct mdrestore_struct *mdres, u64 *next)
1903 struct meta_cluster_item *item;
1904 struct meta_cluster_header *header = &cluster->header;
1905 struct async_work *async;
1906 u64 bytenr;
1907 u32 i, nritems;
1908 int ret;
1910 mdres->compress_method = header->compress;
1912 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1913 nritems = le32_to_cpu(header->nritems);
1914 for (i = 0; i < nritems; i++) {
1915 item = &cluster->items[i];
1916 async = calloc(1, sizeof(*async));
1917 if (!async) {
1918 fprintf(stderr, "Error allocating async\n");
1919 return -ENOMEM;
1921 async->start = le64_to_cpu(item->bytenr);
1922 async->bufsize = le32_to_cpu(item->size);
1923 async->buffer = malloc(async->bufsize);
1924 if (!async->buffer) {
1925 fprintf(stderr, "Error allocing async buffer\n");
1926 free(async);
1927 return -ENOMEM;
1929 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1930 if (ret != 1) {
1931 fprintf(stderr, "Error reading buffer %d\n", errno);
1932 free(async->buffer);
1933 free(async);
1934 return -EIO;
1936 bytenr += async->bufsize;
1938 pthread_mutex_lock(&mdres->mutex);
1939 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1940 ret = fill_mdres_info(mdres, async);
1941 if (ret) {
1942 fprintf(stderr, "Error setting up restore\n");
1943 pthread_mutex_unlock(&mdres->mutex);
1944 free(async->buffer);
1945 free(async);
1946 return ret;
1949 list_add_tail(&async->list, &mdres->list);
1950 mdres->num_items++;
1951 pthread_cond_signal(&mdres->cond);
1952 pthread_mutex_unlock(&mdres->mutex);
1954 if (bytenr & BLOCK_MASK) {
1955 char buffer[BLOCK_MASK];
1956 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1958 bytenr += size;
1959 ret = fread(buffer, size, 1, mdres->in);
1960 if (ret != 1) {
1961 fprintf(stderr, "Error reading in buffer %d\n", errno);
1962 return -EIO;
1965 *next = bytenr;
1966 return 0;
1969 static int wait_for_worker(struct mdrestore_struct *mdres)
1971 int ret = 0;
1973 pthread_mutex_lock(&mdres->mutex);
1974 ret = mdres->error;
1975 while (!ret && mdres->num_items > 0) {
1976 struct timespec ts = {
1977 .tv_sec = 0,
1978 .tv_nsec = 10000000,
1980 pthread_mutex_unlock(&mdres->mutex);
1981 nanosleep(&ts, NULL);
1982 pthread_mutex_lock(&mdres->mutex);
1983 ret = mdres->error;
1985 pthread_mutex_unlock(&mdres->mutex);
1986 return ret;
1989 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
1990 u64 bytenr, u64 item_bytenr, u32 bufsize,
1991 u64 cluster_bytenr)
1993 struct extent_buffer *eb;
1994 int ret = 0;
1995 int i;
1997 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1998 if (!eb) {
1999 ret = -ENOMEM;
2000 goto out;
2003 while (item_bytenr != bytenr) {
2004 buffer += mdres->leafsize;
2005 item_bytenr += mdres->leafsize;
2008 memcpy(eb->data, buffer, mdres->leafsize);
2009 if (btrfs_header_bytenr(eb) != bytenr) {
2010 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
2011 ret = -EIO;
2012 goto out;
2015 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
2016 BTRFS_FSID_SIZE)) {
2017 fprintf(stderr, "Fsid doesn't match\n");
2018 ret = -EIO;
2019 goto out;
2022 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
2023 fprintf(stderr, "Does not belong to the chunk tree\n");
2024 ret = -EIO;
2025 goto out;
2028 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2029 struct btrfs_chunk chunk;
2030 struct fs_chunk *fs_chunk;
2031 struct btrfs_key key;
2033 if (btrfs_header_level(eb)) {
2034 u64 blockptr = btrfs_node_blockptr(eb, i);
2036 ret = search_for_chunk_blocks(mdres, blockptr,
2037 cluster_bytenr);
2038 if (ret)
2039 break;
2040 continue;
2043 /* Yay a leaf! We loves leafs! */
2044 btrfs_item_key_to_cpu(eb, &key, i);
2045 if (key.type != BTRFS_CHUNK_ITEM_KEY)
2046 continue;
2048 fs_chunk = malloc(sizeof(struct fs_chunk));
2049 if (!fs_chunk) {
2050 fprintf(stderr, "Erorr allocating chunk\n");
2051 ret = -ENOMEM;
2052 break;
2054 memset(fs_chunk, 0, sizeof(*fs_chunk));
2055 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
2056 sizeof(chunk));
2058 fs_chunk->logical = key.offset;
2059 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2060 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2061 INIT_LIST_HEAD(&fs_chunk->list);
2062 if (tree_search(&mdres->physical_tree, &fs_chunk->p,
2063 physical_cmp, 1) != NULL)
2064 list_add(&fs_chunk->list, &mdres->overlapping_chunks);
2065 else
2066 tree_insert(&mdres->physical_tree, &fs_chunk->p,
2067 physical_cmp);
2068 if (fs_chunk->physical + fs_chunk->bytes >
2069 mdres->last_physical_offset)
2070 mdres->last_physical_offset = fs_chunk->physical +
2071 fs_chunk->bytes;
2072 mdres->alloced_chunks += fs_chunk->bytes;
2073 tree_insert(&mdres->chunk_tree, &fs_chunk->l, chunk_cmp);
2075 out:
2076 free(eb);
2077 return ret;
2080 /* If you have to ask you aren't worthy */
2081 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2082 u64 search, u64 cluster_bytenr)
2084 struct meta_cluster *cluster;
2085 struct meta_cluster_header *header;
2086 struct meta_cluster_item *item;
2087 u64 current_cluster = cluster_bytenr, bytenr;
2088 u64 item_bytenr;
2089 u32 bufsize, nritems, i;
2090 u32 max_size = MAX_PENDING_SIZE * 2;
2091 u8 *buffer, *tmp = NULL;
2092 int ret = 0;
2094 cluster = malloc(BLOCK_SIZE);
2095 if (!cluster) {
2096 fprintf(stderr, "Error allocating cluster\n");
2097 return -ENOMEM;
2100 buffer = malloc(max_size);
2101 if (!buffer) {
2102 fprintf(stderr, "Error allocing buffer\n");
2103 free(cluster);
2104 return -ENOMEM;
2107 if (mdres->compress_method == COMPRESS_ZLIB) {
2108 tmp = malloc(max_size);
2109 if (!tmp) {
2110 fprintf(stderr, "Error allocing tmp buffer\n");
2111 free(cluster);
2112 free(buffer);
2113 return -ENOMEM;
2117 bytenr = current_cluster;
2118 while (1) {
2119 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2120 fprintf(stderr, "Error seeking: %d\n", errno);
2121 ret = -EIO;
2122 break;
2125 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2126 if (ret == 0) {
2127 if (cluster_bytenr != 0) {
2128 cluster_bytenr = 0;
2129 current_cluster = 0;
2130 bytenr = 0;
2131 continue;
2133 printf("ok this is where we screwed up?\n");
2134 ret = -EIO;
2135 break;
2136 } else if (ret < 0) {
2137 fprintf(stderr, "Error reading image\n");
2138 break;
2140 ret = 0;
2142 header = &cluster->header;
2143 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2144 le64_to_cpu(header->bytenr) != current_cluster) {
2145 fprintf(stderr, "bad header in metadump image\n");
2146 ret = -EIO;
2147 break;
2150 bytenr += BLOCK_SIZE;
2151 nritems = le32_to_cpu(header->nritems);
2152 for (i = 0; i < nritems; i++) {
2153 size_t size;
2155 item = &cluster->items[i];
2156 bufsize = le32_to_cpu(item->size);
2157 item_bytenr = le64_to_cpu(item->bytenr);
2159 if (bufsize > max_size) {
2160 fprintf(stderr, "item %u size %u too big\n",
2161 i, bufsize);
2162 ret = -EIO;
2163 break;
2166 if (mdres->compress_method == COMPRESS_ZLIB) {
2167 ret = fread(tmp, bufsize, 1, mdres->in);
2168 if (ret != 1) {
2169 fprintf(stderr, "Error reading: %d\n",
2170 errno);
2171 ret = -EIO;
2172 break;
2175 size = max_size;
2176 ret = uncompress(buffer,
2177 (unsigned long *)&size, tmp,
2178 bufsize);
2179 if (ret != Z_OK) {
2180 fprintf(stderr, "Error decompressing "
2181 "%d\n", ret);
2182 ret = -EIO;
2183 break;
2185 } else {
2186 ret = fread(buffer, bufsize, 1, mdres->in);
2187 if (ret != 1) {
2188 fprintf(stderr, "Error reading: %d\n",
2189 errno);
2190 ret = -EIO;
2191 break;
2193 size = bufsize;
2195 ret = 0;
2197 if (item_bytenr <= search &&
2198 item_bytenr + size > search) {
2199 ret = read_chunk_block(mdres, buffer, search,
2200 item_bytenr, size,
2201 current_cluster);
2202 if (!ret)
2203 ret = 1;
2204 break;
2206 bytenr += bufsize;
2208 if (ret) {
2209 if (ret > 0)
2210 ret = 0;
2211 break;
2213 if (bytenr & BLOCK_MASK)
2214 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2215 current_cluster = bytenr;
2218 free(tmp);
2219 free(buffer);
2220 free(cluster);
2221 return ret;
2224 static int build_chunk_tree(struct mdrestore_struct *mdres,
2225 struct meta_cluster *cluster)
2227 struct btrfs_super_block *super;
2228 struct meta_cluster_header *header;
2229 struct meta_cluster_item *item = NULL;
2230 u64 chunk_root_bytenr = 0;
2231 u32 i, nritems;
2232 u64 bytenr = 0;
2233 u8 *buffer;
2234 int ret;
2236 /* We can't seek with stdin so don't bother doing this */
2237 if (mdres->in == stdin)
2238 return 0;
2240 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2241 if (ret <= 0) {
2242 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2243 return -EIO;
2245 ret = 0;
2247 header = &cluster->header;
2248 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2249 le64_to_cpu(header->bytenr) != 0) {
2250 fprintf(stderr, "bad header in metadump image\n");
2251 return -EIO;
2254 bytenr += BLOCK_SIZE;
2255 mdres->compress_method = header->compress;
2256 nritems = le32_to_cpu(header->nritems);
2257 for (i = 0; i < nritems; i++) {
2258 item = &cluster->items[i];
2260 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2261 break;
2262 bytenr += le32_to_cpu(item->size);
2263 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2264 fprintf(stderr, "Error seeking: %d\n", errno);
2265 return -EIO;
2269 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2270 fprintf(stderr, "Huh, didn't find the super?\n");
2271 return -EINVAL;
2274 buffer = malloc(le32_to_cpu(item->size));
2275 if (!buffer) {
2276 fprintf(stderr, "Error allocing buffer\n");
2277 return -ENOMEM;
2280 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2281 if (ret != 1) {
2282 fprintf(stderr, "Error reading buffer: %d\n", errno);
2283 free(buffer);
2284 return -EIO;
2287 if (mdres->compress_method == COMPRESS_ZLIB) {
2288 size_t size = MAX_PENDING_SIZE * 2;
2289 u8 *tmp;
2291 tmp = malloc(MAX_PENDING_SIZE * 2);
2292 if (!tmp) {
2293 free(buffer);
2294 return -ENOMEM;
2296 ret = uncompress(tmp, (unsigned long *)&size,
2297 buffer, le32_to_cpu(item->size));
2298 if (ret != Z_OK) {
2299 fprintf(stderr, "Error decompressing %d\n", ret);
2300 free(buffer);
2301 free(tmp);
2302 return -EIO;
2304 free(buffer);
2305 buffer = tmp;
2308 pthread_mutex_lock(&mdres->mutex);
2309 super = (struct btrfs_super_block *)buffer;
2310 chunk_root_bytenr = btrfs_super_chunk_root(super);
2311 mdres->leafsize = btrfs_super_leafsize(super);
2312 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2313 memcpy(mdres->uuid, super->dev_item.uuid,
2314 BTRFS_UUID_SIZE);
2315 mdres->devid = le64_to_cpu(super->dev_item.devid);
2316 free(buffer);
2317 pthread_mutex_unlock(&mdres->mutex);
2319 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2322 static int range_contains_super(u64 physical, u64 bytes)
2324 u64 super_bytenr;
2325 int i;
2327 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2328 super_bytenr = btrfs_sb_offset(i);
2329 if (super_bytenr >= physical &&
2330 super_bytenr < physical + bytes)
2331 return 1;
2334 return 0;
2337 static void remap_overlapping_chunks(struct mdrestore_struct *mdres)
2339 struct fs_chunk *fs_chunk;
2341 while (!list_empty(&mdres->overlapping_chunks)) {
2342 fs_chunk = list_first_entry(&mdres->overlapping_chunks,
2343 struct fs_chunk, list);
2344 list_del_init(&fs_chunk->list);
2345 if (range_contains_super(fs_chunk->physical,
2346 fs_chunk->bytes)) {
2347 fprintf(stderr, "Remapping a chunk that had a super "
2348 "mirror inside of it, clearing space cache "
2349 "so we don't end up with corruption\n");
2350 mdres->clear_space_cache = 1;
2352 fs_chunk->physical = mdres->last_physical_offset;
2353 tree_insert(&mdres->physical_tree, &fs_chunk->p, physical_cmp);
2354 mdres->last_physical_offset += fs_chunk->bytes;
2358 static int fixup_devices(struct btrfs_fs_info *fs_info,
2359 struct mdrestore_struct *mdres, off_t dev_size)
2361 struct btrfs_trans_handle *trans;
2362 struct btrfs_dev_item *dev_item;
2363 struct btrfs_path *path;
2364 struct extent_buffer *leaf;
2365 struct btrfs_root *root = fs_info->chunk_root;
2366 struct btrfs_key key;
2367 u64 devid, cur_devid;
2368 int ret;
2370 path = btrfs_alloc_path();
2371 if (!path) {
2372 fprintf(stderr, "Error alloc'ing path\n");
2373 return -ENOMEM;
2376 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2377 if (IS_ERR(trans)) {
2378 fprintf(stderr, "Error starting transaction %ld\n",
2379 PTR_ERR(trans));
2380 btrfs_free_path(path);
2381 return PTR_ERR(trans);
2384 dev_item = &fs_info->super_copy->dev_item;
2386 devid = btrfs_stack_device_id(dev_item);
2388 btrfs_set_stack_device_total_bytes(dev_item, dev_size);
2389 btrfs_set_stack_device_bytes_used(dev_item, mdres->alloced_chunks);
2391 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2392 key.type = BTRFS_DEV_ITEM_KEY;
2393 key.offset = 0;
2395 again:
2396 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2397 if (ret < 0) {
2398 fprintf(stderr, "search failed %d\n", ret);
2399 exit(1);
2402 while (1) {
2403 leaf = path->nodes[0];
2404 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2405 ret = btrfs_next_leaf(root, path);
2406 if (ret < 0) {
2407 fprintf(stderr, "Error going to next leaf "
2408 "%d\n", ret);
2409 exit(1);
2411 if (ret > 0) {
2412 ret = 0;
2413 break;
2415 leaf = path->nodes[0];
2418 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2419 if (key.type > BTRFS_DEV_ITEM_KEY)
2420 break;
2421 if (key.type != BTRFS_DEV_ITEM_KEY) {
2422 path->slots[0]++;
2423 continue;
2426 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2427 struct btrfs_dev_item);
2428 cur_devid = btrfs_device_id(leaf, dev_item);
2429 if (devid != cur_devid) {
2430 ret = btrfs_del_item(trans, root, path);
2431 if (ret) {
2432 fprintf(stderr, "Error deleting item %d\n",
2433 ret);
2434 exit(1);
2436 btrfs_release_path(path);
2437 goto again;
2440 btrfs_set_device_total_bytes(leaf, dev_item, dev_size);
2441 btrfs_set_device_bytes_used(leaf, dev_item,
2442 mdres->alloced_chunks);
2443 btrfs_mark_buffer_dirty(leaf);
2444 path->slots[0]++;
2447 btrfs_free_path(path);
2448 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
2449 if (ret) {
2450 fprintf(stderr, "Commit failed %d\n", ret);
2451 return ret;
2453 return 0;
2456 static int restore_metadump(const char *input, FILE *out, int old_restore,
2457 int num_threads, int fixup_offset,
2458 const char *target, int multi_devices)
2460 struct meta_cluster *cluster = NULL;
2461 struct meta_cluster_header *header;
2462 struct mdrestore_struct mdrestore;
2463 struct btrfs_fs_info *info = NULL;
2464 u64 bytenr = 0;
2465 FILE *in = NULL;
2466 int ret = 0;
2468 if (!strcmp(input, "-")) {
2469 in = stdin;
2470 } else {
2471 in = fopen(input, "r");
2472 if (!in) {
2473 perror("unable to open metadump image");
2474 return 1;
2478 /* NOTE: open with write mode */
2479 if (fixup_offset) {
2480 BUG_ON(!target);
2481 info = open_ctree_fs_info(target, 0, 0,
2482 OPEN_CTREE_WRITES |
2483 OPEN_CTREE_RESTORE |
2484 OPEN_CTREE_PARTIAL);
2485 if (!info) {
2486 fprintf(stderr, "%s: open ctree failed\n", __func__);
2487 ret = -EIO;
2488 goto failed_open;
2492 cluster = malloc(BLOCK_SIZE);
2493 if (!cluster) {
2494 fprintf(stderr, "Error allocating cluster\n");
2495 ret = -ENOMEM;
2496 goto failed_info;
2499 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2500 fixup_offset, info, multi_devices);
2501 if (ret) {
2502 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2503 goto failed_cluster;
2506 if (!multi_devices && !old_restore) {
2507 ret = build_chunk_tree(&mdrestore, cluster);
2508 if (ret)
2509 goto out;
2510 if (!list_empty(&mdrestore.overlapping_chunks))
2511 remap_overlapping_chunks(&mdrestore);
2514 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2515 fprintf(stderr, "Error seeking %d\n", errno);
2516 goto out;
2519 while (!mdrestore.error) {
2520 ret = fread(cluster, BLOCK_SIZE, 1, in);
2521 if (!ret)
2522 break;
2524 header = &cluster->header;
2525 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2526 le64_to_cpu(header->bytenr) != bytenr) {
2527 fprintf(stderr, "bad header in metadump image\n");
2528 ret = -EIO;
2529 break;
2531 ret = add_cluster(cluster, &mdrestore, &bytenr);
2532 if (ret) {
2533 fprintf(stderr, "Error adding cluster\n");
2534 break;
2537 ret = wait_for_worker(&mdrestore);
2539 if (!ret && !multi_devices && !old_restore) {
2540 struct btrfs_root *root;
2541 struct stat st;
2543 root = open_ctree_fd(fileno(out), target, 0,
2544 OPEN_CTREE_PARTIAL |
2545 OPEN_CTREE_WRITES |
2546 OPEN_CTREE_NO_DEVICES);
2547 if (!root) {
2548 fprintf(stderr, "unable to open %s\n", target);
2549 ret = -EIO;
2550 goto out;
2552 info = root->fs_info;
2554 if (stat(target, &st)) {
2555 fprintf(stderr, "statting %s failed\n", target);
2556 close_ctree(info->chunk_root);
2557 return 1;
2560 ret = fixup_devices(info, &mdrestore, st.st_size);
2561 close_ctree(info->chunk_root);
2562 if (ret)
2563 goto out;
2565 out:
2566 mdrestore_destroy(&mdrestore, num_threads);
2567 failed_cluster:
2568 free(cluster);
2569 failed_info:
2570 if (fixup_offset && info)
2571 close_ctree(info->chunk_root);
2572 failed_open:
2573 if (in != stdin)
2574 fclose(in);
2575 return ret;
2578 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2579 const char *other_dev, u64 cur_devid)
2581 struct btrfs_key key;
2582 struct extent_buffer *leaf;
2583 struct btrfs_path path;
2584 struct btrfs_dev_item *dev_item;
2585 struct btrfs_super_block *disk_super;
2586 char dev_uuid[BTRFS_UUID_SIZE];
2587 char fs_uuid[BTRFS_UUID_SIZE];
2588 u64 devid, type, io_align, io_width;
2589 u64 sector_size, total_bytes, bytes_used;
2590 char buf[BTRFS_SUPER_INFO_SIZE];
2591 int fp = -1;
2592 int ret;
2594 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2595 key.type = BTRFS_DEV_ITEM_KEY;
2596 key.offset = cur_devid;
2598 btrfs_init_path(&path);
2599 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2600 if (ret) {
2601 fprintf(stderr, "ERROR: search key failed\n");
2602 ret = -EIO;
2603 goto out;
2606 leaf = path.nodes[0];
2607 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2608 struct btrfs_dev_item);
2610 devid = btrfs_device_id(leaf, dev_item);
2611 if (devid != cur_devid) {
2612 printk("ERROR: devid %llu mismatch with %llu\n", devid, cur_devid);
2613 ret = -EIO;
2614 goto out;
2617 type = btrfs_device_type(leaf, dev_item);
2618 io_align = btrfs_device_io_align(leaf, dev_item);
2619 io_width = btrfs_device_io_width(leaf, dev_item);
2620 sector_size = btrfs_device_sector_size(leaf, dev_item);
2621 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2622 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2623 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2624 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2626 btrfs_release_path(&path);
2628 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2630 /* update other devices' super block */
2631 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2632 if (fp < 0) {
2633 fprintf(stderr, "ERROR: could not open %s\n", other_dev);
2634 ret = -EIO;
2635 goto out;
2638 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2640 disk_super = (struct btrfs_super_block *)buf;
2641 dev_item = &disk_super->dev_item;
2643 btrfs_set_stack_device_type(dev_item, type);
2644 btrfs_set_stack_device_id(dev_item, devid);
2645 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2646 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2647 btrfs_set_stack_device_io_align(dev_item, io_align);
2648 btrfs_set_stack_device_io_width(dev_item, io_width);
2649 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2650 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2651 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2652 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2654 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2655 if (ret != BTRFS_SUPER_INFO_SIZE) {
2656 if (ret < 0)
2657 fprintf(stderr, "ERROR: cannot write superblock: %s\n", strerror(ret));
2658 else
2659 fprintf(stderr, "ERROR: cannot write superblock\n");
2660 ret = -EIO;
2661 goto out;
2664 write_backup_supers(fp, (u8 *)buf);
2666 out:
2667 if (fp != -1)
2668 close(fp);
2669 return ret;
2672 static void print_usage(int ret)
2674 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2675 fprintf(stderr, "\t-r \trestore metadump image\n");
2676 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2677 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2678 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2679 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2680 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2681 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2682 fprintf(stderr, "\n");
2683 fprintf(stderr, "\tIn the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).\n");
2684 fprintf(stderr, "\tIn the restore mode, source is the dumped image and target is the btrfs device/file.\n");
2685 exit(ret);
2688 int main(int argc, char *argv[])
2690 char *source;
2691 char *target;
2692 u64 num_threads = 0;
2693 u64 compress_level = 0;
2694 int create = 1;
2695 int old_restore = 0;
2696 int walk_trees = 0;
2697 int multi_devices = 0;
2698 int ret;
2699 int sanitize = 0;
2700 int dev_cnt = 0;
2701 int usage_error = 0;
2702 FILE *out;
2704 while (1) {
2705 static const struct option long_options[] = {
2706 { "help", no_argument, NULL, GETOPT_VAL_HELP},
2707 { NULL, 0, NULL, 0 }
2709 int c = getopt_long(argc, argv, "rc:t:oswm", long_options, NULL);
2710 if (c < 0)
2711 break;
2712 switch (c) {
2713 case 'r':
2714 create = 0;
2715 break;
2716 case 't':
2717 num_threads = arg_strtou64(optarg);
2718 if (num_threads > 32)
2719 print_usage(1);
2720 break;
2721 case 'c':
2722 compress_level = arg_strtou64(optarg);
2723 if (compress_level > 9)
2724 print_usage(1);
2725 break;
2726 case 'o':
2727 old_restore = 1;
2728 break;
2729 case 's':
2730 sanitize++;
2731 break;
2732 case 'w':
2733 walk_trees = 1;
2734 break;
2735 case 'm':
2736 create = 0;
2737 multi_devices = 1;
2738 break;
2739 case GETOPT_VAL_HELP:
2740 default:
2741 print_usage(c != GETOPT_VAL_HELP);
2745 argc = argc - optind;
2746 set_argv0(argv);
2747 if (check_argc_min(argc, 2))
2748 print_usage(1);
2750 dev_cnt = argc - 1;
2752 if (create) {
2753 if (old_restore) {
2754 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2755 usage_error++;
2757 } else {
2758 if (walk_trees || sanitize || compress_level) {
2759 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2760 usage_error++;
2762 if (multi_devices && dev_cnt < 2) {
2763 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2764 usage_error++;
2766 if (!multi_devices && dev_cnt != 1) {
2767 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2768 usage_error++;
2772 if (usage_error)
2773 print_usage(1);
2775 source = argv[optind];
2776 target = argv[optind + 1];
2778 if (create && !strcmp(target, "-")) {
2779 out = stdout;
2780 } else {
2781 out = fopen(target, "w+");
2782 if (!out) {
2783 perror("unable to create target file");
2784 exit(1);
2788 if (compress_level > 0 || create == 0) {
2789 if (num_threads == 0) {
2790 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2791 if (num_threads <= 0)
2792 num_threads = 1;
2794 } else {
2795 num_threads = 0;
2798 if (create) {
2799 ret = check_mounted(source);
2800 if (ret < 0) {
2801 fprintf(stderr, "Could not check mount status: %s\n",
2802 strerror(-ret));
2803 exit(1);
2804 } else if (ret)
2805 fprintf(stderr,
2806 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2808 ret = create_metadump(source, out, num_threads,
2809 compress_level, sanitize, walk_trees);
2810 } else {
2811 ret = restore_metadump(source, out, old_restore, num_threads,
2812 0, target, multi_devices);
2814 if (ret) {
2815 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2816 strerror(errno));
2817 goto out;
2820 /* extended support for multiple devices */
2821 if (!create && multi_devices) {
2822 struct btrfs_fs_info *info;
2823 u64 total_devs;
2824 int i;
2826 info = open_ctree_fs_info(target, 0, 0,
2827 OPEN_CTREE_PARTIAL |
2828 OPEN_CTREE_RESTORE);
2829 if (!info) {
2830 int e = errno;
2831 fprintf(stderr, "unable to open %s error = %s\n",
2832 target, strerror(e));
2833 return 1;
2836 total_devs = btrfs_super_num_devices(info->super_copy);
2837 if (total_devs != dev_cnt) {
2838 printk("it needs %llu devices but has only %d\n",
2839 total_devs, dev_cnt);
2840 close_ctree(info->chunk_root);
2841 goto out;
2844 /* update super block on other disks */
2845 for (i = 2; i <= dev_cnt; i++) {
2846 ret = update_disk_super_on_device(info,
2847 argv[optind + i], (u64)i);
2848 if (ret) {
2849 printk("update disk super failed devid=%d (error=%d)\n",
2850 i, ret);
2851 close_ctree(info->chunk_root);
2852 exit(1);
2856 close_ctree(info->chunk_root);
2858 /* fix metadata block to map correct chunk */
2859 ret = restore_metadump(source, out, 0, num_threads, 1,
2860 target, 1);
2861 if (ret) {
2862 fprintf(stderr, "fix metadump failed (error=%d)\n",
2863 ret);
2864 exit(1);
2867 out:
2868 if (out == stdout) {
2869 fflush(out);
2870 } else {
2871 fclose(out);
2872 if (ret && create) {
2873 int unlink_ret;
2875 unlink_ret = unlink(target);
2876 if (unlink_ret)
2877 fprintf(stderr,
2878 "unlink output file failed : %s\n",
2879 strerror(errno));
2883 btrfs_close_all_devices();
2885 return !!ret;