2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device
*dev
;
38 static inline int nr_parity_stripes(struct map_lookup
*map
)
40 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
42 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
48 static inline int nr_data_stripes(struct map_lookup
*map
)
50 return map
->num_stripes
- nr_parity_stripes(map
);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids
);
57 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
60 struct btrfs_device
*dev
;
62 list_for_each_entry(dev
, head
, dev_list
) {
63 if (dev
->devid
== devid
&&
64 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
71 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
73 struct btrfs_fs_devices
*fs_devices
;
75 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
76 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
82 static int device_list_add(const char *path
,
83 struct btrfs_super_block
*disk_super
,
84 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
86 struct btrfs_device
*device
;
87 struct btrfs_fs_devices
*fs_devices
;
88 u64 found_transid
= btrfs_super_generation(disk_super
);
90 fs_devices
= find_fsid(disk_super
->fsid
);
92 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
95 INIT_LIST_HEAD(&fs_devices
->devices
);
96 list_add(&fs_devices
->list
, &fs_uuids
);
97 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
98 fs_devices
->latest_devid
= devid
;
99 fs_devices
->latest_trans
= found_transid
;
100 fs_devices
->lowest_devid
= (u64
)-1;
103 device
= __find_device(&fs_devices
->devices
, devid
,
104 disk_super
->dev_item
.uuid
);
107 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
109 /* we can safely leave the fs_devices entry around */
113 device
->devid
= devid
;
114 device
->generation
= found_transid
;
115 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
117 device
->name
= kstrdup(path
, GFP_NOFS
);
122 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
123 if (!device
->label
) {
128 device
->total_devs
= btrfs_super_num_devices(disk_super
);
129 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
130 device
->total_bytes
=
131 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
133 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
134 list_add(&device
->dev_list
, &fs_devices
->devices
);
135 device
->fs_devices
= fs_devices
;
136 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
140 * The existing device has newer generation, so this one could
141 * be a stale one, don't add it.
143 if (found_transid
< device
->generation
) {
145 "adding device %s gen %llu but found an existing device %s gen %llu",
146 path
, found_transid
, device
->name
,
159 if (found_transid
> fs_devices
->latest_trans
) {
160 fs_devices
->latest_devid
= devid
;
161 fs_devices
->latest_trans
= found_transid
;
163 if (fs_devices
->lowest_devid
> devid
) {
164 fs_devices
->lowest_devid
= devid
;
166 *fs_devices_ret
= fs_devices
;
170 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
172 struct btrfs_fs_devices
*seed_devices
;
173 struct btrfs_device
*device
;
179 while (!list_empty(&fs_devices
->devices
)) {
180 device
= list_entry(fs_devices
->devices
.next
,
181 struct btrfs_device
, dev_list
);
182 if (device
->fd
!= -1) {
183 if (fsync(device
->fd
) == -1) {
184 warning("fsync on device %llu failed: %s",
185 device
->devid
, strerror(errno
));
188 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
189 fprintf(stderr
, "Warning, could not drop caches\n");
193 device
->writeable
= 0;
194 list_del(&device
->dev_list
);
195 /* free the memory */
201 seed_devices
= fs_devices
->seed
;
202 fs_devices
->seed
= NULL
;
204 struct btrfs_fs_devices
*orig
;
207 fs_devices
= seed_devices
;
208 list_del(&orig
->list
);
212 list_del(&fs_devices
->list
);
219 void btrfs_close_all_devices(void)
221 struct btrfs_fs_devices
*fs_devices
;
223 while (!list_empty(&fs_uuids
)) {
224 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
226 btrfs_close_devices(fs_devices
);
230 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
233 struct btrfs_device
*device
;
236 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
238 printk("no name for device %llu, skip it now\n", device
->devid
);
242 fd
= open(device
->name
, flags
);
245 error("cannot open device '%s': %s", device
->name
,
250 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
251 fprintf(stderr
, "Warning, could not drop caches\n");
253 if (device
->devid
== fs_devices
->latest_devid
)
254 fs_devices
->latest_bdev
= fd
;
255 if (device
->devid
== fs_devices
->lowest_devid
)
256 fs_devices
->lowest_bdev
= fd
;
259 device
->writeable
= 1;
263 btrfs_close_devices(fs_devices
);
267 int btrfs_scan_one_device(int fd
, const char *path
,
268 struct btrfs_fs_devices
**fs_devices_ret
,
269 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
271 struct btrfs_super_block
*disk_super
;
272 char buf
[BTRFS_SUPER_INFO_SIZE
];
276 disk_super
= (struct btrfs_super_block
*)buf
;
277 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
280 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
281 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
284 *total_devs
= btrfs_super_num_devices(disk_super
);
286 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
292 * find_free_dev_extent_start - find free space in the specified device
293 * @device: the device which we search the free space in
294 * @num_bytes: the size of the free space that we need
295 * @search_start: the position from which to begin the search
296 * @start: store the start of the free space.
297 * @len: the size of the free space. that we find, or the size
298 * of the max free space if we don't find suitable free space
300 * this uses a pretty simple search, the expectation is that it is
301 * called very infrequently and that a given device has a small number
304 * @start is used to store the start of the free space if we find. But if we
305 * don't find suitable free space, it will be used to store the start position
306 * of the max free space.
308 * @len is used to store the size of the free space that we find.
309 * But if we don't find suitable free space, it is used to store the size of
310 * the max free space.
312 static int find_free_dev_extent_start(struct btrfs_trans_handle
*trans
,
313 struct btrfs_device
*device
, u64 num_bytes
,
314 u64 search_start
, u64
*start
, u64
*len
)
316 struct btrfs_key key
;
317 struct btrfs_root
*root
= device
->dev_root
;
318 struct btrfs_dev_extent
*dev_extent
;
319 struct btrfs_path
*path
;
324 u64 search_end
= device
->total_bytes
;
327 struct extent_buffer
*l
;
328 u64 min_search_start
;
331 * We don't want to overwrite the superblock on the drive nor any area
332 * used by the boot loader (grub for example), so we make sure to start
333 * at an offset of at least 1MB.
335 min_search_start
= max(root
->fs_info
->alloc_start
, (u64
)SZ_1M
);
336 search_start
= max(search_start
, min_search_start
);
338 path
= btrfs_alloc_path();
342 max_hole_start
= search_start
;
345 if (search_start
>= search_end
) {
352 key
.objectid
= device
->devid
;
353 key
.offset
= search_start
;
354 key
.type
= BTRFS_DEV_EXTENT_KEY
;
356 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
360 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
367 slot
= path
->slots
[0];
368 if (slot
>= btrfs_header_nritems(l
)) {
369 ret
= btrfs_next_leaf(root
, path
);
377 btrfs_item_key_to_cpu(l
, &key
, slot
);
379 if (key
.objectid
< device
->devid
)
382 if (key
.objectid
> device
->devid
)
385 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
388 if (key
.offset
> search_start
) {
389 hole_size
= key
.offset
- search_start
;
392 * Have to check before we set max_hole_start, otherwise
393 * we could end up sending back this offset anyway.
395 if (hole_size
> max_hole_size
) {
396 max_hole_start
= search_start
;
397 max_hole_size
= hole_size
;
401 * If this free space is greater than which we need,
402 * it must be the max free space that we have found
403 * until now, so max_hole_start must point to the start
404 * of this free space and the length of this free space
405 * is stored in max_hole_size. Thus, we return
406 * max_hole_start and max_hole_size and go back to the
409 if (hole_size
>= num_bytes
) {
415 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
416 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
418 if (extent_end
> search_start
)
419 search_start
= extent_end
;
426 * At this point, search_start should be the end of
427 * allocated dev extents, and when shrinking the device,
428 * search_end may be smaller than search_start.
430 if (search_end
> search_start
) {
431 hole_size
= search_end
- search_start
;
433 if (hole_size
> max_hole_size
) {
434 max_hole_start
= search_start
;
435 max_hole_size
= hole_size
;
440 if (max_hole_size
< num_bytes
)
446 btrfs_free_path(path
);
447 *start
= max_hole_start
;
449 *len
= max_hole_size
;
453 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
454 struct btrfs_device
*device
, u64 num_bytes
,
457 /* FIXME use last free of some kind */
458 return find_free_dev_extent_start(trans
, device
,
459 num_bytes
, 0, start
, NULL
);
462 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
463 struct btrfs_device
*device
,
464 u64 chunk_tree
, u64 chunk_objectid
,
466 u64 num_bytes
, u64
*start
, int convert
)
469 struct btrfs_path
*path
;
470 struct btrfs_root
*root
= device
->dev_root
;
471 struct btrfs_dev_extent
*extent
;
472 struct extent_buffer
*leaf
;
473 struct btrfs_key key
;
475 path
= btrfs_alloc_path();
480 * For convert case, just skip search free dev_extent, as caller
481 * is responsible to make sure it's free.
484 ret
= find_free_dev_extent(trans
, device
, num_bytes
,
490 key
.objectid
= device
->devid
;
492 key
.type
= BTRFS_DEV_EXTENT_KEY
;
493 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
497 leaf
= path
->nodes
[0];
498 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
499 struct btrfs_dev_extent
);
500 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
501 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
502 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
504 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
505 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
508 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
509 btrfs_mark_buffer_dirty(leaf
);
511 btrfs_free_path(path
);
515 static int find_next_chunk(struct btrfs_fs_info
*fs_info
, u64
*offset
)
517 struct btrfs_root
*root
= fs_info
->chunk_root
;
518 struct btrfs_path
*path
;
520 struct btrfs_key key
;
521 struct btrfs_chunk
*chunk
;
522 struct btrfs_key found_key
;
524 path
= btrfs_alloc_path();
528 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
529 key
.offset
= (u64
)-1;
530 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
532 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
538 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
542 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
544 if (found_key
.objectid
!= BTRFS_FIRST_CHUNK_TREE_OBJECTID
)
547 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
549 *offset
= found_key
.offset
+
550 btrfs_chunk_length(path
->nodes
[0], chunk
);
555 btrfs_free_path(path
);
559 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
563 struct btrfs_key key
;
564 struct btrfs_key found_key
;
566 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
567 key
.type
= BTRFS_DEV_ITEM_KEY
;
568 key
.offset
= (u64
)-1;
570 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
576 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
581 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
583 *objectid
= found_key
.offset
+ 1;
587 btrfs_release_path(path
);
592 * the device information is stored in the chunk root
593 * the btrfs_device struct should be fully filled in
595 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
596 struct btrfs_fs_info
*fs_info
,
597 struct btrfs_device
*device
)
600 struct btrfs_path
*path
;
601 struct btrfs_dev_item
*dev_item
;
602 struct extent_buffer
*leaf
;
603 struct btrfs_key key
;
604 struct btrfs_root
*root
= fs_info
->chunk_root
;
608 path
= btrfs_alloc_path();
612 ret
= find_next_devid(root
, path
, &free_devid
);
616 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
617 key
.type
= BTRFS_DEV_ITEM_KEY
;
618 key
.offset
= free_devid
;
620 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
625 leaf
= path
->nodes
[0];
626 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
628 device
->devid
= free_devid
;
629 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
630 btrfs_set_device_generation(leaf
, dev_item
, 0);
631 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
632 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
633 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
634 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
635 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
636 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
637 btrfs_set_device_group(leaf
, dev_item
, 0);
638 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
639 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
640 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
642 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
643 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
644 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
645 write_extent_buffer(leaf
, fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
646 btrfs_mark_buffer_dirty(leaf
);
650 btrfs_free_path(path
);
654 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
655 struct btrfs_device
*device
)
658 struct btrfs_path
*path
;
659 struct btrfs_root
*root
;
660 struct btrfs_dev_item
*dev_item
;
661 struct extent_buffer
*leaf
;
662 struct btrfs_key key
;
664 root
= device
->dev_root
->fs_info
->chunk_root
;
666 path
= btrfs_alloc_path();
670 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
671 key
.type
= BTRFS_DEV_ITEM_KEY
;
672 key
.offset
= device
->devid
;
674 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
683 leaf
= path
->nodes
[0];
684 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
686 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
687 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
688 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
689 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
690 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
691 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
692 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
693 btrfs_mark_buffer_dirty(leaf
);
696 btrfs_free_path(path
);
700 int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
701 struct btrfs_chunk
*chunk
, int item_size
)
703 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
704 struct btrfs_disk_key disk_key
;
708 array_size
= btrfs_super_sys_array_size(super_copy
);
709 if (array_size
+ item_size
+ sizeof(disk_key
)
710 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
713 ptr
= super_copy
->sys_chunk_array
+ array_size
;
714 btrfs_cpu_key_to_disk(&disk_key
, key
);
715 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
716 ptr
+= sizeof(disk_key
);
717 memcpy(ptr
, chunk
, item_size
);
718 item_size
+= sizeof(disk_key
);
719 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
723 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
726 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
728 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
729 return calc_size
* (num_stripes
/ sub_stripes
);
730 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
731 return calc_size
* (num_stripes
- 1);
732 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
733 return calc_size
* (num_stripes
- 2);
735 return calc_size
* num_stripes
;
739 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
741 /* TODO, add a way to store the preferred stripe size */
742 return BTRFS_STRIPE_LEN
;
746 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
748 * It is not equal to "device->total_bytes - device->bytes_used".
749 * We do not allocate any chunk in 1M at beginning of device, and not
750 * allowed to allocate any chunk before alloc_start if it is specified.
751 * So search holes from max(1M, alloc_start) to device->total_bytes.
753 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
754 struct btrfs_device
*device
,
757 struct btrfs_path
*path
;
758 struct btrfs_root
*root
= device
->dev_root
;
759 struct btrfs_key key
;
760 struct btrfs_dev_extent
*dev_extent
= NULL
;
761 struct extent_buffer
*l
;
762 u64 search_start
= root
->fs_info
->alloc_start
;
763 u64 search_end
= device
->total_bytes
;
769 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
771 path
= btrfs_alloc_path();
775 key
.objectid
= device
->devid
;
776 key
.offset
= root
->fs_info
->alloc_start
;
777 key
.type
= BTRFS_DEV_EXTENT_KEY
;
780 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
783 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
789 slot
= path
->slots
[0];
790 if (slot
>= btrfs_header_nritems(l
)) {
791 ret
= btrfs_next_leaf(root
, path
);
798 btrfs_item_key_to_cpu(l
, &key
, slot
);
800 if (key
.objectid
< device
->devid
)
802 if (key
.objectid
> device
->devid
)
804 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
806 if (key
.offset
> search_end
)
808 if (key
.offset
> search_start
)
809 free_bytes
+= key
.offset
- search_start
;
811 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
812 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
814 if (extent_end
> search_start
)
815 search_start
= extent_end
;
816 if (search_start
> search_end
)
823 if (search_start
< search_end
)
824 free_bytes
+= search_end
- search_start
;
826 *avail_bytes
= free_bytes
;
829 btrfs_free_path(path
);
833 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
834 - sizeof(struct btrfs_item) \
835 - sizeof(struct btrfs_chunk)) \
836 / sizeof(struct btrfs_stripe) + 1)
838 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
839 - 2 * sizeof(struct btrfs_disk_key) \
840 - 2 * sizeof(struct btrfs_chunk)) \
841 / sizeof(struct btrfs_stripe) + 1)
843 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
844 struct btrfs_fs_info
*info
, u64
*start
,
845 u64
*num_bytes
, u64 type
)
848 struct btrfs_root
*extent_root
= info
->extent_root
;
849 struct btrfs_root
*chunk_root
= info
->chunk_root
;
850 struct btrfs_stripe
*stripes
;
851 struct btrfs_device
*device
= NULL
;
852 struct btrfs_chunk
*chunk
;
853 struct list_head private_devs
;
854 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
855 struct list_head
*cur
;
856 struct map_lookup
*map
;
857 int min_stripe_size
= SZ_1M
;
858 u64 calc_size
= SZ_8M
;
860 u64 max_chunk_size
= 4 * calc_size
;
871 int stripe_len
= BTRFS_STRIPE_LEN
;
872 struct btrfs_key key
;
875 if (list_empty(dev_list
)) {
879 if (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
880 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
882 max_chunk_size
= calc_size
* 2;
883 min_stripe_size
= SZ_1M
;
884 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
885 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
887 max_chunk_size
= 10 * calc_size
;
888 min_stripe_size
= SZ_64M
;
889 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
890 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
892 max_chunk_size
= 4 * calc_size
;
893 min_stripe_size
= SZ_32M
;
894 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
897 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
898 num_stripes
= min_t(u64
, 2,
899 btrfs_super_num_devices(info
->super_copy
));
904 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
908 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
909 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
910 if (num_stripes
> max_stripes
)
911 num_stripes
= max_stripes
;
914 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
915 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
916 if (num_stripes
> max_stripes
)
917 num_stripes
= max_stripes
;
920 num_stripes
&= ~(u32
)1;
924 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
925 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
926 if (num_stripes
> max_stripes
)
927 num_stripes
= max_stripes
;
931 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
932 btrfs_super_stripesize(info
->super_copy
));
934 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
935 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
936 if (num_stripes
> max_stripes
)
937 num_stripes
= max_stripes
;
941 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
942 btrfs_super_stripesize(info
->super_copy
));
945 /* we don't want a chunk larger than 10% of the FS */
946 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
947 max_chunk_size
= min(percent_max
, max_chunk_size
);
950 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
952 calc_size
= max_chunk_size
;
953 calc_size
/= num_stripes
;
954 calc_size
/= stripe_len
;
955 calc_size
*= stripe_len
;
957 /* we don't want tiny stripes */
958 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
960 calc_size
/= stripe_len
;
961 calc_size
*= stripe_len
;
962 INIT_LIST_HEAD(&private_devs
);
963 cur
= dev_list
->next
;
966 if (type
& BTRFS_BLOCK_GROUP_DUP
)
967 min_free
= calc_size
* 2;
969 min_free
= calc_size
;
971 /* build a private list of devices we will allocate from */
972 while(index
< num_stripes
) {
973 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
974 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
978 if (avail
>= min_free
) {
979 list_move_tail(&device
->dev_list
, &private_devs
);
981 if (type
& BTRFS_BLOCK_GROUP_DUP
)
983 } else if (avail
> max_avail
)
988 if (index
< num_stripes
) {
989 list_splice(&private_devs
, dev_list
);
990 if (index
>= min_stripes
) {
992 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
993 num_stripes
/= sub_stripes
;
994 num_stripes
*= sub_stripes
;
999 if (!looped
&& max_avail
> 0) {
1001 calc_size
= max_avail
;
1006 ret
= find_next_chunk(info
, &offset
);
1009 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1010 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1011 key
.offset
= offset
;
1013 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1017 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1023 stripes
= &chunk
->stripe
;
1024 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1025 num_stripes
, sub_stripes
);
1027 while(index
< num_stripes
) {
1028 struct btrfs_stripe
*stripe
;
1029 BUG_ON(list_empty(&private_devs
));
1030 cur
= private_devs
.next
;
1031 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1033 /* loop over this device again if we're doing a dup group */
1034 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1035 (index
== num_stripes
- 1))
1036 list_move_tail(&device
->dev_list
, dev_list
);
1038 ret
= btrfs_alloc_dev_extent(trans
, device
,
1039 info
->chunk_root
->root_key
.objectid
,
1040 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1041 calc_size
, &dev_offset
, 0);
1045 device
->bytes_used
+= calc_size
;
1046 ret
= btrfs_update_device(trans
, device
);
1050 map
->stripes
[index
].dev
= device
;
1051 map
->stripes
[index
].physical
= dev_offset
;
1052 stripe
= stripes
+ index
;
1053 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1054 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1055 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1058 BUG_ON(!list_empty(&private_devs
));
1060 /* key was set above */
1061 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1062 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1063 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1064 btrfs_set_stack_chunk_type(chunk
, type
);
1065 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1066 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1067 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1068 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1069 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1070 map
->sector_size
= info
->sectorsize
;
1071 map
->stripe_len
= stripe_len
;
1072 map
->io_align
= stripe_len
;
1073 map
->io_width
= stripe_len
;
1075 map
->num_stripes
= num_stripes
;
1076 map
->sub_stripes
= sub_stripes
;
1078 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1079 btrfs_chunk_item_size(num_stripes
));
1081 *start
= key
.offset
;;
1083 map
->ce
.start
= key
.offset
;
1084 map
->ce
.size
= *num_bytes
;
1086 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1090 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1091 ret
= btrfs_add_system_chunk(info
, &key
,
1092 chunk
, btrfs_chunk_item_size(num_stripes
));
1108 * Alloc a DATA chunk with SINGLE profile.
1110 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1111 * (btrfs logical bytenr == on-disk bytenr)
1112 * For that case, caller must make sure the chunk and dev_extent are not
1115 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1116 struct btrfs_fs_info
*info
, u64
*start
,
1117 u64 num_bytes
, u64 type
, int convert
)
1120 struct btrfs_root
*extent_root
= info
->extent_root
;
1121 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1122 struct btrfs_stripe
*stripes
;
1123 struct btrfs_device
*device
= NULL
;
1124 struct btrfs_chunk
*chunk
;
1125 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1126 struct list_head
*cur
;
1127 struct map_lookup
*map
;
1128 u64 calc_size
= SZ_8M
;
1129 int num_stripes
= 1;
1130 int sub_stripes
= 0;
1133 int stripe_len
= BTRFS_STRIPE_LEN
;
1134 struct btrfs_key key
;
1136 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1137 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1139 if (*start
!= round_down(*start
, info
->sectorsize
)) {
1140 error("DATA chunk start not sectorsize aligned: %llu",
1141 (unsigned long long)*start
);
1144 key
.offset
= *start
;
1145 dev_offset
= *start
;
1149 ret
= find_next_chunk(info
, &tmp
);
1155 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1159 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1165 stripes
= &chunk
->stripe
;
1166 calc_size
= num_bytes
;
1169 cur
= dev_list
->next
;
1170 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1172 while (index
< num_stripes
) {
1173 struct btrfs_stripe
*stripe
;
1175 ret
= btrfs_alloc_dev_extent(trans
, device
,
1176 info
->chunk_root
->root_key
.objectid
,
1177 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1178 calc_size
, &dev_offset
, convert
);
1181 device
->bytes_used
+= calc_size
;
1182 ret
= btrfs_update_device(trans
, device
);
1185 map
->stripes
[index
].dev
= device
;
1186 map
->stripes
[index
].physical
= dev_offset
;
1187 stripe
= stripes
+ index
;
1188 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1189 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1190 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1194 /* key was set above */
1195 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1196 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1197 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1198 btrfs_set_stack_chunk_type(chunk
, type
);
1199 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1200 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1201 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1202 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1203 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1204 map
->sector_size
= info
->sectorsize
;
1205 map
->stripe_len
= stripe_len
;
1206 map
->io_align
= stripe_len
;
1207 map
->io_width
= stripe_len
;
1209 map
->num_stripes
= num_stripes
;
1210 map
->sub_stripes
= sub_stripes
;
1212 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1213 btrfs_chunk_item_size(num_stripes
));
1216 *start
= key
.offset
;
1218 map
->ce
.start
= key
.offset
;
1219 map
->ce
.size
= num_bytes
;
1221 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1228 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
1230 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1231 struct cache_extent
*ce
;
1232 struct map_lookup
*map
;
1235 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1237 fprintf(stderr
, "No mapping for %llu-%llu\n",
1238 (unsigned long long)logical
,
1239 (unsigned long long)logical
+len
);
1242 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1243 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1244 "%llu-%llu\n", (unsigned long long)logical
,
1245 (unsigned long long)logical
+len
,
1246 (unsigned long long)ce
->start
,
1247 (unsigned long long)ce
->start
+ ce
->size
);
1250 map
= container_of(ce
, struct map_lookup
, ce
);
1252 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1253 ret
= map
->num_stripes
;
1254 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1255 ret
= map
->sub_stripes
;
1256 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1258 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1265 int btrfs_next_bg(struct btrfs_fs_info
*fs_info
, u64
*logical
,
1266 u64
*size
, u64 type
)
1268 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1269 struct cache_extent
*ce
;
1270 struct map_lookup
*map
;
1273 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1277 * only jump to next bg if our cur is not 0
1278 * As the initial logical for btrfs_next_bg() is 0, and
1279 * if we jump to next bg, we skipped a valid bg.
1282 ce
= next_cache_extent(ce
);
1288 map
= container_of(ce
, struct map_lookup
, ce
);
1289 if (map
->type
& type
) {
1290 *logical
= ce
->start
;
1295 ce
= next_cache_extent(ce
);
1301 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
,
1302 u64 chunk_start
, u64 physical
, u64 devid
,
1303 u64
**logical
, int *naddrs
, int *stripe_len
)
1305 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1306 struct cache_extent
*ce
;
1307 struct map_lookup
*map
;
1315 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1317 map
= container_of(ce
, struct map_lookup
, ce
);
1320 rmap_len
= map
->stripe_len
;
1321 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1322 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1323 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1324 length
= ce
->size
/ map
->num_stripes
;
1325 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1326 BTRFS_BLOCK_GROUP_RAID6
)) {
1327 length
= ce
->size
/ nr_data_stripes(map
);
1328 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1331 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1333 for (i
= 0; i
< map
->num_stripes
; i
++) {
1334 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1336 if (map
->stripes
[i
].physical
> physical
||
1337 map
->stripes
[i
].physical
+ length
<= physical
)
1340 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1343 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1344 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1346 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1347 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1348 } /* else if RAID[56], multiply by nr_data_stripes().
1349 * Alternatively, just use rmap_len below instead of
1350 * map->stripe_len */
1352 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1353 for (j
= 0; j
< nr
; j
++) {
1354 if (buf
[j
] == bytenr
)
1363 *stripe_len
= rmap_len
;
1368 static inline int parity_smaller(u64 a
, u64 b
)
1373 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1374 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1376 struct btrfs_bio_stripe s
;
1383 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1384 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1385 s
= bbio
->stripes
[i
];
1387 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1388 raid_map
[i
] = raid_map
[i
+1];
1389 bbio
->stripes
[i
+1] = s
;
1397 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1398 u64 logical
, u64
*length
,
1399 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1402 return __btrfs_map_block(fs_info
, rw
, logical
, length
, NULL
,
1403 multi_ret
, mirror_num
, raid_map_ret
);
1406 int __btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1407 u64 logical
, u64
*length
, u64
*type
,
1408 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1411 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1412 struct cache_extent
*ce
;
1413 struct map_lookup
*map
;
1417 u64
*raid_map
= NULL
;
1418 int stripes_allocated
= 8;
1419 int stripes_required
= 1;
1422 struct btrfs_multi_bio
*multi
= NULL
;
1424 if (multi_ret
&& rw
== READ
) {
1425 stripes_allocated
= 1;
1428 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1434 if (ce
->start
> logical
) {
1436 *length
= ce
->start
- logical
;
1441 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1446 map
= container_of(ce
, struct map_lookup
, ce
);
1447 offset
= logical
- ce
->start
;
1450 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1451 BTRFS_BLOCK_GROUP_DUP
)) {
1452 stripes_required
= map
->num_stripes
;
1453 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1454 stripes_required
= map
->sub_stripes
;
1457 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1458 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1459 /* RAID[56] write or recovery. Return all stripes */
1460 stripes_required
= map
->num_stripes
;
1462 /* Only allocate the map if we've already got a large enough multi_ret */
1463 if (stripes_allocated
>= stripes_required
) {
1464 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1472 /* if our multi bio struct is too small, back off and try again */
1473 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1474 stripes_allocated
= stripes_required
;
1481 * stripe_nr counts the total number of stripes we have to stride
1482 * to get to this block
1484 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1486 stripe_offset
= stripe_nr
* map
->stripe_len
;
1487 BUG_ON(offset
< stripe_offset
);
1489 /* stripe_offset is the offset of this block in its stripe*/
1490 stripe_offset
= offset
- stripe_offset
;
1492 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1493 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1494 BTRFS_BLOCK_GROUP_RAID10
|
1495 BTRFS_BLOCK_GROUP_DUP
)) {
1496 /* we limit the length of each bio to what fits in a stripe */
1497 *length
= min_t(u64
, ce
->size
- offset
,
1498 map
->stripe_len
- stripe_offset
);
1500 *length
= ce
->size
- offset
;
1506 multi
->num_stripes
= 1;
1508 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1510 multi
->num_stripes
= map
->num_stripes
;
1511 else if (mirror_num
)
1512 stripe_index
= mirror_num
- 1;
1514 stripe_index
= stripe_nr
% map
->num_stripes
;
1515 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1516 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1518 stripe_index
= stripe_nr
% factor
;
1519 stripe_index
*= map
->sub_stripes
;
1522 multi
->num_stripes
= map
->sub_stripes
;
1523 else if (mirror_num
)
1524 stripe_index
+= mirror_num
- 1;
1526 stripe_nr
= stripe_nr
/ factor
;
1527 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1529 multi
->num_stripes
= map
->num_stripes
;
1530 else if (mirror_num
)
1531 stripe_index
= mirror_num
- 1;
1532 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1533 BTRFS_BLOCK_GROUP_RAID6
)) {
1538 u64 raid56_full_stripe_start
;
1539 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1542 * align the start of our data stripe in the logical
1545 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1546 raid56_full_stripe_start
*= full_stripe_len
;
1548 /* get the data stripe number */
1549 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1550 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1552 /* Work out the disk rotation on this stripe-set */
1553 rot
= stripe_nr
% map
->num_stripes
;
1555 /* Fill in the logical address of each stripe */
1556 tmp
= stripe_nr
* nr_data_stripes(map
);
1558 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1559 raid_map
[(i
+rot
) % map
->num_stripes
] =
1560 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1562 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1563 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1564 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1566 *length
= map
->stripe_len
;
1569 multi
->num_stripes
= map
->num_stripes
;
1571 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1572 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1575 * Mirror #0 or #1 means the original data block.
1576 * Mirror #2 is RAID5 parity block.
1577 * Mirror #3 is RAID6 Q block.
1580 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1582 /* We distribute the parity blocks across stripes */
1583 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1587 * after this do_div call, stripe_nr is the number of stripes
1588 * on this device we have to walk to find the data, and
1589 * stripe_index is the number of our device in the stripe array
1591 stripe_index
= stripe_nr
% map
->num_stripes
;
1592 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1594 BUG_ON(stripe_index
>= map
->num_stripes
);
1596 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1597 multi
->stripes
[i
].physical
=
1598 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1599 stripe_nr
* map
->stripe_len
;
1600 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1609 sort_parity_stripes(multi
, raid_map
);
1610 *raid_map_ret
= raid_map
;
1616 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_info
*fs_info
, u64 devid
,
1619 struct btrfs_device
*device
;
1620 struct btrfs_fs_devices
*cur_devices
;
1622 cur_devices
= fs_info
->fs_devices
;
1623 while (cur_devices
) {
1625 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1626 fs_info
->ignore_fsid_mismatch
)) {
1627 device
= __find_device(&cur_devices
->devices
,
1632 cur_devices
= cur_devices
->seed
;
1637 struct btrfs_device
*
1638 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1639 u64 devid
, int instance
)
1641 struct list_head
*head
= &fs_devices
->devices
;
1642 struct btrfs_device
*dev
;
1645 list_for_each_entry(dev
, head
, dev_list
) {
1646 if (dev
->devid
== devid
&& num_found
++ == instance
)
1652 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
1654 struct cache_extent
*ce
;
1655 struct map_lookup
*map
;
1656 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1661 * During chunk recovering, we may fail to find block group's
1662 * corresponding chunk, we will rebuild it later
1664 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1665 if (!fs_info
->is_chunk_recover
)
1670 map
= container_of(ce
, struct map_lookup
, ce
);
1671 for (i
= 0; i
< map
->num_stripes
; i
++) {
1672 if (!map
->stripes
[i
].dev
->writeable
) {
1681 static struct btrfs_device
*fill_missing_device(u64 devid
)
1683 struct btrfs_device
*device
;
1685 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1686 device
->devid
= devid
;
1692 * slot == -1: SYSTEM chunk
1693 * return -EIO on error, otherwise return 0
1695 int btrfs_check_chunk_valid(struct btrfs_fs_info
*fs_info
,
1696 struct extent_buffer
*leaf
,
1697 struct btrfs_chunk
*chunk
,
1698 int slot
, u64 logical
)
1705 u32 chunk_ondisk_size
;
1706 u32 sectorsize
= fs_info
->sectorsize
;
1708 length
= btrfs_chunk_length(leaf
, chunk
);
1709 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1710 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1711 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1712 type
= btrfs_chunk_type(leaf
, chunk
);
1715 * These valid checks may be insufficient to cover every corner cases.
1717 if (!IS_ALIGNED(logical
, sectorsize
)) {
1718 error("invalid chunk logical %llu", logical
);
1721 if (btrfs_chunk_sector_size(leaf
, chunk
) != sectorsize
) {
1722 error("invalid chunk sectorsize %llu",
1723 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1726 if (!length
|| !IS_ALIGNED(length
, sectorsize
)) {
1727 error("invalid chunk length %llu", length
);
1730 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1731 error("invalid chunk stripe length: %llu", stripe_len
);
1734 /* Check on chunk item type */
1735 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1736 error("invalid chunk type %llu", type
);
1739 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1740 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1741 error("unrecognized chunk type: %llu",
1742 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1743 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1746 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1747 error("missing chunk type flag: %llu", type
);
1750 if (!(is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) ||
1751 (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0)) {
1752 error("conflicting chunk type detected: %llu", type
);
1755 if ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) &&
1756 !is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1757 error("conflicting chunk profile detected: %llu", type
);
1761 chunk_ondisk_size
= btrfs_chunk_item_size(num_stripes
);
1763 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1764 * it can't exceed the system chunk array size
1765 * For normal chunk, it should match its chunk item size.
1767 if (num_stripes
< 1 ||
1768 (slot
== -1 && chunk_ondisk_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1769 (slot
>= 0 && chunk_ondisk_size
> btrfs_item_size_nr(leaf
, slot
))) {
1770 error("invalid num_stripes: %u", num_stripes
);
1774 * Device number check against profile
1776 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& (sub_stripes
!= 2 ||
1777 !IS_ALIGNED(num_stripes
, sub_stripes
))) ||
1778 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1779 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1780 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1781 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1782 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1783 num_stripes
!= 1)) {
1784 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1785 num_stripes
, sub_stripes
,
1786 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1794 * Slot is used to verify the chunk item is valid
1796 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1798 static int read_one_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
1799 struct extent_buffer
*leaf
,
1800 struct btrfs_chunk
*chunk
, int slot
)
1802 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1803 struct map_lookup
*map
;
1804 struct cache_extent
*ce
;
1808 u8 uuid
[BTRFS_UUID_SIZE
];
1813 logical
= key
->offset
;
1814 length
= btrfs_chunk_length(leaf
, chunk
);
1815 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1816 /* Validation check */
1817 ret
= btrfs_check_chunk_valid(fs_info
, leaf
, chunk
, slot
, logical
);
1819 error("%s checksums match, but it has an invalid chunk, %s",
1820 (slot
== -1) ? "Superblock" : "Metadata",
1821 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1825 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1827 /* already mapped? */
1828 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1832 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1836 map
->ce
.start
= logical
;
1837 map
->ce
.size
= length
;
1838 map
->num_stripes
= num_stripes
;
1839 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1840 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1841 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1842 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1843 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1844 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1846 for (i
= 0; i
< num_stripes
; i
++) {
1847 map
->stripes
[i
].physical
=
1848 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1849 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1850 read_extent_buffer(leaf
, uuid
, (unsigned long)
1851 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1853 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
, uuid
,
1855 if (!map
->stripes
[i
].dev
) {
1856 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1857 printf("warning, device %llu is missing\n",
1858 (unsigned long long)devid
);
1859 list_add(&map
->stripes
[i
].dev
->dev_list
,
1860 &fs_info
->fs_devices
->devices
);
1864 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1870 static int fill_device_from_item(struct extent_buffer
*leaf
,
1871 struct btrfs_dev_item
*dev_item
,
1872 struct btrfs_device
*device
)
1876 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1877 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1878 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1879 device
->type
= btrfs_device_type(leaf
, dev_item
);
1880 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1881 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1882 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1884 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1885 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1890 static int open_seed_devices(struct btrfs_fs_info
*fs_info
, u8
*fsid
)
1892 struct btrfs_fs_devices
*fs_devices
;
1895 fs_devices
= fs_info
->fs_devices
->seed
;
1896 while (fs_devices
) {
1897 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1901 fs_devices
= fs_devices
->seed
;
1904 fs_devices
= find_fsid(fsid
);
1906 /* missing all seed devices */
1907 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1912 INIT_LIST_HEAD(&fs_devices
->devices
);
1913 list_add(&fs_devices
->list
, &fs_uuids
);
1914 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1917 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1921 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
1922 fs_info
->fs_devices
->seed
= fs_devices
;
1927 static int read_one_dev(struct btrfs_fs_info
*fs_info
,
1928 struct extent_buffer
*leaf
,
1929 struct btrfs_dev_item
*dev_item
)
1931 struct btrfs_device
*device
;
1934 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1935 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1937 devid
= btrfs_device_id(leaf
, dev_item
);
1938 read_extent_buffer(leaf
, dev_uuid
,
1939 (unsigned long)btrfs_device_uuid(dev_item
),
1941 read_extent_buffer(leaf
, fs_uuid
,
1942 (unsigned long)btrfs_device_fsid(dev_item
),
1945 if (memcmp(fs_uuid
, fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1946 ret
= open_seed_devices(fs_info
, fs_uuid
);
1951 device
= btrfs_find_device(fs_info
, devid
, dev_uuid
, fs_uuid
);
1953 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1957 list_add(&device
->dev_list
,
1958 &fs_info
->fs_devices
->devices
);
1961 fill_device_from_item(leaf
, dev_item
, device
);
1962 device
->dev_root
= fs_info
->dev_root
;
1966 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
1968 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
1969 struct extent_buffer
*sb
;
1970 struct btrfs_disk_key
*disk_key
;
1971 struct btrfs_chunk
*chunk
;
1973 unsigned long sb_array_offset
;
1979 struct btrfs_key key
;
1981 if (fs_info
->nodesize
< BTRFS_SUPER_INFO_SIZE
) {
1982 printf("ERROR: nodesize %u too small to read superblock\n",
1986 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
1989 btrfs_set_buffer_uptodate(sb
);
1990 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1991 array_size
= btrfs_super_sys_array_size(super_copy
);
1993 array_ptr
= super_copy
->sys_chunk_array
;
1994 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1997 while (cur_offset
< array_size
) {
1998 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
1999 len
= sizeof(*disk_key
);
2000 if (cur_offset
+ len
> array_size
)
2001 goto out_short_read
;
2003 btrfs_disk_key_to_cpu(&key
, disk_key
);
2006 sb_array_offset
+= len
;
2009 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2010 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
2012 * At least one btrfs_chunk with one stripe must be
2013 * present, exact stripe count check comes afterwards
2015 len
= btrfs_chunk_item_size(1);
2016 if (cur_offset
+ len
> array_size
)
2017 goto out_short_read
;
2019 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2022 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2023 num_stripes
, cur_offset
);
2028 len
= btrfs_chunk_item_size(num_stripes
);
2029 if (cur_offset
+ len
> array_size
)
2030 goto out_short_read
;
2032 ret
= read_one_chunk(fs_info
, &key
, sb
, chunk
, -1);
2037 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2038 (u32
)key
.type
, cur_offset
);
2043 sb_array_offset
+= len
;
2046 free_extent_buffer(sb
);
2050 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2052 free_extent_buffer(sb
);
2056 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
2058 struct btrfs_path
*path
;
2059 struct extent_buffer
*leaf
;
2060 struct btrfs_key key
;
2061 struct btrfs_key found_key
;
2062 struct btrfs_root
*root
= fs_info
->chunk_root
;
2066 path
= btrfs_alloc_path();
2071 * Read all device items, and then all the chunk items. All
2072 * device items are found before any chunk item (their object id
2073 * is smaller than the lowest possible object id for a chunk
2074 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2076 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2079 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2083 leaf
= path
->nodes
[0];
2084 slot
= path
->slots
[0];
2085 if (slot
>= btrfs_header_nritems(leaf
)) {
2086 ret
= btrfs_next_leaf(root
, path
);
2093 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2094 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2095 struct btrfs_dev_item
*dev_item
;
2096 dev_item
= btrfs_item_ptr(leaf
, slot
,
2097 struct btrfs_dev_item
);
2098 ret
= read_one_dev(fs_info
, leaf
, dev_item
);
2100 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2101 struct btrfs_chunk
*chunk
;
2102 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2103 ret
= read_one_chunk(fs_info
, &found_key
, leaf
, chunk
,
2112 btrfs_free_path(path
);
2116 struct list_head
*btrfs_scanned_uuids(void)
2121 static int rmw_eb(struct btrfs_fs_info
*info
,
2122 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2125 unsigned long orig_off
= 0;
2126 unsigned long dest_off
= 0;
2127 unsigned long copy_len
= eb
->len
;
2129 ret
= read_whole_eb(info
, eb
, 0);
2133 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2134 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2137 * | ----- orig_eb ------- |
2138 * | ----- stripe ------- |
2139 * | ----- orig_eb ------- |
2140 * | ----- orig_eb ------- |
2142 if (eb
->start
> orig_eb
->start
)
2143 orig_off
= eb
->start
- orig_eb
->start
;
2144 if (orig_eb
->start
> eb
->start
)
2145 dest_off
= orig_eb
->start
- eb
->start
;
2147 if (copy_len
> orig_eb
->len
- orig_off
)
2148 copy_len
= orig_eb
->len
- orig_off
;
2149 if (copy_len
> eb
->len
- dest_off
)
2150 copy_len
= eb
->len
- dest_off
;
2152 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2156 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2157 struct extent_buffer
*orig_eb
,
2158 struct extent_buffer
**ebs
,
2159 u64 stripe_len
, u64
*raid_map
,
2162 struct extent_buffer
**tmp_ebs
;
2163 u64 start
= orig_eb
->start
;
2168 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2172 /* Alloc memory in a row for data stripes */
2173 for (i
= 0; i
< num_stripes
; i
++) {
2174 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2177 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2184 for (i
= 0; i
< num_stripes
; i
++) {
2185 struct extent_buffer
*eb
= tmp_ebs
[i
];
2187 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2190 eb
->start
= raid_map
[i
];
2191 eb
->len
= stripe_len
;
2195 eb
->dev_bytenr
= (u64
)-1;
2197 this_eb_start
= raid_map
[i
];
2199 if (start
> this_eb_start
||
2200 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2201 ret
= rmw_eb(info
, eb
, orig_eb
);
2205 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2213 for (i
= 0; i
< num_stripes
; i
++)
2219 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2220 struct extent_buffer
*eb
,
2221 struct btrfs_multi_bio
*multi
,
2222 u64 stripe_len
, u64
*raid_map
)
2224 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2227 int alloc_size
= eb
->len
;
2230 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2231 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2232 if (!ebs
|| !pointers
) {
2238 if (stripe_len
> alloc_size
)
2239 alloc_size
= stripe_len
;
2241 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2242 multi
->num_stripes
);
2246 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2247 struct extent_buffer
*new_eb
;
2248 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2249 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2250 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2251 multi
->stripes
[i
].dev
->total_ios
++;
2252 if (ebs
[i
]->start
!= raid_map
[i
]) {
2254 goto out_free_split
;
2258 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2261 goto out_free_split
;
2263 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2264 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2265 multi
->stripes
[i
].dev
->total_ios
++;
2266 new_eb
->len
= stripe_len
;
2268 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2270 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2274 ebs
[multi
->num_stripes
- 2] = p_eb
;
2275 ebs
[multi
->num_stripes
- 1] = q_eb
;
2277 for (i
= 0; i
< multi
->num_stripes
; i
++)
2278 pointers
[i
] = ebs
[i
]->data
;
2280 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2282 ebs
[multi
->num_stripes
- 1] = p_eb
;
2283 for (i
= 0; i
< multi
->num_stripes
; i
++)
2284 pointers
[i
] = ebs
[i
]->data
;
2285 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2286 multi
->num_stripes
- 1, pointers
);
2288 goto out_free_split
;
2291 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2292 ret
= write_extent_to_disk(ebs
[i
]);
2294 goto out_free_split
;
2298 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2310 * Get stripe length from chunk item and its stripe items
2312 * Caller should only call this function after validating the chunk item
2313 * by using btrfs_check_chunk_valid().
2315 u64
btrfs_stripe_length(struct btrfs_fs_info
*fs_info
,
2316 struct extent_buffer
*leaf
,
2317 struct btrfs_chunk
*chunk
)
2321 u32 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2322 u64 profile
= btrfs_chunk_type(leaf
, chunk
) &
2323 BTRFS_BLOCK_GROUP_PROFILE_MASK
;
2325 chunk_len
= btrfs_chunk_length(leaf
, chunk
);
2328 case 0: /* Single profile */
2329 case BTRFS_BLOCK_GROUP_RAID1
:
2330 case BTRFS_BLOCK_GROUP_DUP
:
2331 stripe_len
= chunk_len
;
2333 case BTRFS_BLOCK_GROUP_RAID0
:
2334 stripe_len
= chunk_len
/ num_stripes
;
2336 case BTRFS_BLOCK_GROUP_RAID5
:
2337 stripe_len
= chunk_len
/ (num_stripes
- 1);
2339 case BTRFS_BLOCK_GROUP_RAID6
:
2340 stripe_len
= chunk_len
/ (num_stripes
- 2);
2342 case BTRFS_BLOCK_GROUP_RAID10
:
2343 stripe_len
= chunk_len
/ (num_stripes
/
2344 btrfs_chunk_sub_stripes(leaf
, chunk
));
2347 /* Invalid chunk profile found */
2354 * Return 0 if size of @device is already good
2355 * Return >0 if size of @device is not aligned but fixed without problems
2356 * Return <0 if something wrong happened when aligning the size of @device
2358 int btrfs_fix_device_size(struct btrfs_fs_info
*fs_info
,
2359 struct btrfs_device
*device
)
2361 struct btrfs_trans_handle
*trans
;
2362 struct btrfs_key key
;
2363 struct btrfs_path path
;
2364 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
2365 struct btrfs_dev_item
*di
;
2366 u64 old_bytes
= device
->total_bytes
;
2369 if (IS_ALIGNED(old_bytes
, fs_info
->sectorsize
))
2372 /* Align the in-memory total_bytes first, and use it as correct size */
2373 device
->total_bytes
= round_down(device
->total_bytes
,
2374 fs_info
->sectorsize
);
2376 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2377 key
.type
= BTRFS_DEV_ITEM_KEY
;
2378 key
.offset
= device
->devid
;
2380 trans
= btrfs_start_transaction(chunk_root
, 1);
2381 if (IS_ERR(trans
)) {
2382 ret
= PTR_ERR(trans
);
2383 error("error starting transaction: %d (%s)",
2384 ret
, strerror(-ret
));
2388 btrfs_init_path(&path
);
2389 ret
= btrfs_search_slot(trans
, chunk_root
, &key
, &path
, 0, 1);
2391 error("failed to find DEV_ITEM for devid %llu", device
->devid
);
2396 error("failed to search chunk root: %d (%s)",
2397 ret
, strerror(-ret
));
2400 di
= btrfs_item_ptr(path
.nodes
[0], path
.slots
[0], struct btrfs_dev_item
);
2401 btrfs_set_device_total_bytes(path
.nodes
[0], di
, device
->total_bytes
);
2402 btrfs_mark_buffer_dirty(path
.nodes
[0]);
2403 ret
= btrfs_commit_transaction(trans
, chunk_root
);
2405 error("failed to commit current transaction: %d (%s)",
2406 ret
, strerror(-ret
));
2407 btrfs_release_path(&path
);
2410 btrfs_release_path(&path
);
2411 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2412 device
->devid
, old_bytes
, device
->total_bytes
);
2416 /* We haven't modified anything, it's OK to commit current trans */
2417 btrfs_commit_transaction(trans
, chunk_root
);
2418 btrfs_release_path(&path
);
2423 * Return 0 if super block total_bytes matches all devices' total_bytes
2424 * Return >0 if super block total_bytes mismatch but fixed without problem
2425 * Return <0 if we failed to fix super block total_bytes
2427 int btrfs_fix_super_size(struct btrfs_fs_info
*fs_info
)
2429 struct btrfs_trans_handle
*trans
;
2430 struct btrfs_device
*device
;
2431 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2432 u64 total_bytes
= 0;
2433 u64 old_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2436 list_for_each_entry(device
, dev_list
, dev_list
) {
2438 * Caller should ensure this function is called after aligning
2439 * all devices' total_bytes.
2441 if (!IS_ALIGNED(device
->total_bytes
, fs_info
->sectorsize
)) {
2442 error("device %llu total_bytes %llu not aligned to %u",
2443 device
->devid
, device
->total_bytes
,
2444 fs_info
->sectorsize
);
2447 total_bytes
+= device
->total_bytes
;
2450 if (total_bytes
== old_bytes
)
2453 btrfs_set_super_total_bytes(fs_info
->super_copy
, total_bytes
);
2455 /* Commit transaction to update all super blocks */
2456 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
2457 if (IS_ERR(trans
)) {
2458 ret
= PTR_ERR(trans
);
2459 error("error starting transaction: %d (%s)",
2460 ret
, strerror(-ret
));
2463 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
2465 error("failed to commit current transaction: %d (%s)",
2466 ret
, strerror(-ret
));
2469 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2470 old_bytes
, total_bytes
);
2475 * Return 0 if all devices and super block sizes are good
2476 * Return >0 if any device/super size problem was found, but fixed
2477 * Return <0 if something wrong happened during fixing
2479 int btrfs_fix_device_and_super_size(struct btrfs_fs_info
*fs_info
)
2481 struct btrfs_device
*device
;
2482 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2483 bool have_bad_value
= false;
2486 /* Seed device is not supported yet */
2487 if (fs_info
->fs_devices
->seed
) {
2488 error("fixing device size with seed device is not supported yet");
2492 /* All devices must be set up before repairing */
2493 if (list_empty(dev_list
)) {
2494 error("no device found");
2497 list_for_each_entry(device
, dev_list
, dev_list
) {
2498 if (device
->fd
== -1 || !device
->writeable
) {
2499 error("devid %llu is missing or not writeable",
2502 "fixing device size needs all device(s) to be present and writeable");
2507 /* Repair total_bytes of each device */
2508 list_for_each_entry(device
, dev_list
, dev_list
) {
2509 ret
= btrfs_fix_device_size(fs_info
, device
);
2513 have_bad_value
= true;
2516 /* Repair super total_byte */
2517 ret
= btrfs_fix_super_size(fs_info
);
2519 have_bad_value
= true;
2520 if (have_bad_value
) {
2522 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2525 printf("No device size related problem found\n");