2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device
*dev
;
38 static inline int nr_parity_stripes(struct map_lookup
*map
)
40 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
42 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
48 static inline int nr_data_stripes(struct map_lookup
*map
)
50 return map
->num_stripes
- nr_parity_stripes(map
);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids
);
57 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
60 struct btrfs_device
*dev
;
62 list_for_each_entry(dev
, head
, dev_list
) {
63 if (dev
->devid
== devid
&&
64 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
71 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
73 struct btrfs_fs_devices
*fs_devices
;
75 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
76 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
82 static int device_list_add(const char *path
,
83 struct btrfs_super_block
*disk_super
,
84 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
86 struct btrfs_device
*device
;
87 struct btrfs_fs_devices
*fs_devices
;
88 u64 found_transid
= btrfs_super_generation(disk_super
);
90 fs_devices
= find_fsid(disk_super
->fsid
);
92 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
95 INIT_LIST_HEAD(&fs_devices
->devices
);
96 list_add(&fs_devices
->list
, &fs_uuids
);
97 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
98 fs_devices
->latest_devid
= devid
;
99 fs_devices
->latest_trans
= found_transid
;
100 fs_devices
->lowest_devid
= (u64
)-1;
103 device
= __find_device(&fs_devices
->devices
, devid
,
104 disk_super
->dev_item
.uuid
);
107 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
109 /* we can safely leave the fs_devices entry around */
113 device
->devid
= devid
;
114 device
->generation
= found_transid
;
115 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
117 device
->name
= kstrdup(path
, GFP_NOFS
);
122 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
123 if (!device
->label
) {
128 device
->total_devs
= btrfs_super_num_devices(disk_super
);
129 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
130 device
->total_bytes
=
131 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
133 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
134 list_add(&device
->dev_list
, &fs_devices
->devices
);
135 device
->fs_devices
= fs_devices
;
136 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
140 * The existing device has newer generation, so this one could
141 * be a stale one, don't add it.
143 if (found_transid
< device
->generation
) {
145 "adding device %s gen %llu but found an existing device %s gen %llu",
146 path
, found_transid
, device
->name
,
159 if (found_transid
> fs_devices
->latest_trans
) {
160 fs_devices
->latest_devid
= devid
;
161 fs_devices
->latest_trans
= found_transid
;
163 if (fs_devices
->lowest_devid
> devid
) {
164 fs_devices
->lowest_devid
= devid
;
166 *fs_devices_ret
= fs_devices
;
170 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
172 struct btrfs_fs_devices
*seed_devices
;
173 struct btrfs_device
*device
;
179 while (!list_empty(&fs_devices
->devices
)) {
180 device
= list_entry(fs_devices
->devices
.next
,
181 struct btrfs_device
, dev_list
);
182 if (device
->fd
!= -1) {
183 if (fsync(device
->fd
) == -1) {
184 warning("fsync on device %llu failed: %m",
188 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
189 fprintf(stderr
, "Warning, could not drop caches\n");
193 device
->writeable
= 0;
194 list_del(&device
->dev_list
);
195 /* free the memory */
201 seed_devices
= fs_devices
->seed
;
202 fs_devices
->seed
= NULL
;
204 struct btrfs_fs_devices
*orig
;
207 fs_devices
= seed_devices
;
208 list_del(&orig
->list
);
212 list_del(&fs_devices
->list
);
219 void btrfs_close_all_devices(void)
221 struct btrfs_fs_devices
*fs_devices
;
223 while (!list_empty(&fs_uuids
)) {
224 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
226 btrfs_close_devices(fs_devices
);
230 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
233 struct btrfs_device
*device
;
236 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
238 printk("no name for device %llu, skip it now\n", device
->devid
);
242 fd
= open(device
->name
, flags
);
245 error("cannot open device '%s': %m", device
->name
);
249 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
250 fprintf(stderr
, "Warning, could not drop caches\n");
252 if (device
->devid
== fs_devices
->latest_devid
)
253 fs_devices
->latest_bdev
= fd
;
254 if (device
->devid
== fs_devices
->lowest_devid
)
255 fs_devices
->lowest_bdev
= fd
;
258 device
->writeable
= 1;
262 btrfs_close_devices(fs_devices
);
266 int btrfs_scan_one_device(int fd
, const char *path
,
267 struct btrfs_fs_devices
**fs_devices_ret
,
268 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
270 struct btrfs_super_block
*disk_super
;
271 char buf
[BTRFS_SUPER_INFO_SIZE
];
275 disk_super
= (struct btrfs_super_block
*)buf
;
276 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
279 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
280 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
283 *total_devs
= btrfs_super_num_devices(disk_super
);
285 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
291 * find_free_dev_extent_start - find free space in the specified device
292 * @device: the device which we search the free space in
293 * @num_bytes: the size of the free space that we need
294 * @search_start: the position from which to begin the search
295 * @start: store the start of the free space.
296 * @len: the size of the free space. that we find, or the size
297 * of the max free space if we don't find suitable free space
299 * this uses a pretty simple search, the expectation is that it is
300 * called very infrequently and that a given device has a small number
303 * @start is used to store the start of the free space if we find. But if we
304 * don't find suitable free space, it will be used to store the start position
305 * of the max free space.
307 * @len is used to store the size of the free space that we find.
308 * But if we don't find suitable free space, it is used to store the size of
309 * the max free space.
311 static int find_free_dev_extent_start(struct btrfs_device
*device
,
312 u64 num_bytes
, u64 search_start
,
313 u64
*start
, u64
*len
)
315 struct btrfs_key key
;
316 struct btrfs_root
*root
= device
->dev_root
;
317 struct btrfs_dev_extent
*dev_extent
;
318 struct btrfs_path
*path
;
323 u64 search_end
= device
->total_bytes
;
326 struct extent_buffer
*l
;
327 u64 min_search_start
;
330 * We don't want to overwrite the superblock on the drive nor any area
331 * used by the boot loader (grub for example), so we make sure to start
332 * at an offset of at least 1MB.
334 min_search_start
= max(root
->fs_info
->alloc_start
, (u64
)SZ_1M
);
335 search_start
= max(search_start
, min_search_start
);
337 path
= btrfs_alloc_path();
341 max_hole_start
= search_start
;
344 if (search_start
>= search_end
) {
351 key
.objectid
= device
->devid
;
352 key
.offset
= search_start
;
353 key
.type
= BTRFS_DEV_EXTENT_KEY
;
355 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
359 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
366 slot
= path
->slots
[0];
367 if (slot
>= btrfs_header_nritems(l
)) {
368 ret
= btrfs_next_leaf(root
, path
);
376 btrfs_item_key_to_cpu(l
, &key
, slot
);
378 if (key
.objectid
< device
->devid
)
381 if (key
.objectid
> device
->devid
)
384 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
387 if (key
.offset
> search_start
) {
388 hole_size
= key
.offset
- search_start
;
391 * Have to check before we set max_hole_start, otherwise
392 * we could end up sending back this offset anyway.
394 if (hole_size
> max_hole_size
) {
395 max_hole_start
= search_start
;
396 max_hole_size
= hole_size
;
400 * If this free space is greater than which we need,
401 * it must be the max free space that we have found
402 * until now, so max_hole_start must point to the start
403 * of this free space and the length of this free space
404 * is stored in max_hole_size. Thus, we return
405 * max_hole_start and max_hole_size and go back to the
408 if (hole_size
>= num_bytes
) {
414 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
415 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
417 if (extent_end
> search_start
)
418 search_start
= extent_end
;
425 * At this point, search_start should be the end of
426 * allocated dev extents, and when shrinking the device,
427 * search_end may be smaller than search_start.
429 if (search_end
> search_start
) {
430 hole_size
= search_end
- search_start
;
432 if (hole_size
> max_hole_size
) {
433 max_hole_start
= search_start
;
434 max_hole_size
= hole_size
;
439 if (max_hole_size
< num_bytes
)
445 btrfs_free_path(path
);
446 *start
= max_hole_start
;
448 *len
= max_hole_size
;
452 static int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
455 /* FIXME use last free of some kind */
456 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, NULL
);
459 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
460 struct btrfs_device
*device
,
461 u64 chunk_offset
, u64 num_bytes
, u64
*start
,
465 struct btrfs_path
*path
;
466 struct btrfs_root
*root
= device
->dev_root
;
467 struct btrfs_dev_extent
*extent
;
468 struct extent_buffer
*leaf
;
469 struct btrfs_key key
;
471 path
= btrfs_alloc_path();
476 * For convert case, just skip search free dev_extent, as caller
477 * is responsible to make sure it's free.
480 ret
= find_free_dev_extent(device
, num_bytes
, start
);
485 key
.objectid
= device
->devid
;
487 key
.type
= BTRFS_DEV_EXTENT_KEY
;
488 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
492 leaf
= path
->nodes
[0];
493 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
494 struct btrfs_dev_extent
);
495 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, BTRFS_CHUNK_TREE_OBJECTID
);
496 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
497 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
498 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
500 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
501 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
504 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
505 btrfs_mark_buffer_dirty(leaf
);
507 btrfs_free_path(path
);
511 static int find_next_chunk(struct btrfs_fs_info
*fs_info
, u64
*offset
)
513 struct btrfs_root
*root
= fs_info
->chunk_root
;
514 struct btrfs_path
*path
;
516 struct btrfs_key key
;
517 struct btrfs_chunk
*chunk
;
518 struct btrfs_key found_key
;
520 path
= btrfs_alloc_path();
524 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
525 key
.offset
= (u64
)-1;
526 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
528 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
534 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
538 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
540 if (found_key
.objectid
!= BTRFS_FIRST_CHUNK_TREE_OBJECTID
)
543 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
545 *offset
= found_key
.offset
+
546 btrfs_chunk_length(path
->nodes
[0], chunk
);
551 btrfs_free_path(path
);
555 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
559 struct btrfs_key key
;
560 struct btrfs_key found_key
;
562 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
563 key
.type
= BTRFS_DEV_ITEM_KEY
;
564 key
.offset
= (u64
)-1;
566 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
572 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
577 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
579 *objectid
= found_key
.offset
+ 1;
583 btrfs_release_path(path
);
588 * the device information is stored in the chunk root
589 * the btrfs_device struct should be fully filled in
591 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
592 struct btrfs_fs_info
*fs_info
,
593 struct btrfs_device
*device
)
596 struct btrfs_path
*path
;
597 struct btrfs_dev_item
*dev_item
;
598 struct extent_buffer
*leaf
;
599 struct btrfs_key key
;
600 struct btrfs_root
*root
= fs_info
->chunk_root
;
604 path
= btrfs_alloc_path();
608 ret
= find_next_devid(root
, path
, &free_devid
);
612 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
613 key
.type
= BTRFS_DEV_ITEM_KEY
;
614 key
.offset
= free_devid
;
616 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
621 leaf
= path
->nodes
[0];
622 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
624 device
->devid
= free_devid
;
625 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
626 btrfs_set_device_generation(leaf
, dev_item
, 0);
627 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
628 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
629 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
630 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
631 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
632 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
633 btrfs_set_device_group(leaf
, dev_item
, 0);
634 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
635 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
636 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
638 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
639 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
640 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
641 write_extent_buffer(leaf
, fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
642 btrfs_mark_buffer_dirty(leaf
);
646 btrfs_free_path(path
);
650 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
651 struct btrfs_device
*device
)
654 struct btrfs_path
*path
;
655 struct btrfs_root
*root
;
656 struct btrfs_dev_item
*dev_item
;
657 struct extent_buffer
*leaf
;
658 struct btrfs_key key
;
660 root
= device
->dev_root
->fs_info
->chunk_root
;
662 path
= btrfs_alloc_path();
666 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
667 key
.type
= BTRFS_DEV_ITEM_KEY
;
668 key
.offset
= device
->devid
;
670 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
679 leaf
= path
->nodes
[0];
680 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
682 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
683 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
684 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
685 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
686 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
687 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
688 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
689 btrfs_mark_buffer_dirty(leaf
);
692 btrfs_free_path(path
);
696 int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
697 struct btrfs_chunk
*chunk
, int item_size
)
699 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
700 struct btrfs_disk_key disk_key
;
704 array_size
= btrfs_super_sys_array_size(super_copy
);
705 if (array_size
+ item_size
+ sizeof(disk_key
)
706 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
709 ptr
= super_copy
->sys_chunk_array
+ array_size
;
710 btrfs_cpu_key_to_disk(&disk_key
, key
);
711 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
712 ptr
+= sizeof(disk_key
);
713 memcpy(ptr
, chunk
, item_size
);
714 item_size
+= sizeof(disk_key
);
715 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
719 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
722 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
724 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
725 return calc_size
* (num_stripes
/ sub_stripes
);
726 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
727 return calc_size
* (num_stripes
- 1);
728 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
729 return calc_size
* (num_stripes
- 2);
731 return calc_size
* num_stripes
;
735 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
737 /* TODO, add a way to store the preferred stripe size */
738 return BTRFS_STRIPE_LEN
;
742 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
744 * It is not equal to "device->total_bytes - device->bytes_used".
745 * We do not allocate any chunk in 1M at beginning of device, and not
746 * allowed to allocate any chunk before alloc_start if it is specified.
747 * So search holes from max(1M, alloc_start) to device->total_bytes.
749 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
750 struct btrfs_device
*device
,
753 struct btrfs_path
*path
;
754 struct btrfs_root
*root
= device
->dev_root
;
755 struct btrfs_key key
;
756 struct btrfs_dev_extent
*dev_extent
= NULL
;
757 struct extent_buffer
*l
;
758 u64 search_start
= root
->fs_info
->alloc_start
;
759 u64 search_end
= device
->total_bytes
;
765 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
767 path
= btrfs_alloc_path();
771 key
.objectid
= device
->devid
;
772 key
.offset
= root
->fs_info
->alloc_start
;
773 key
.type
= BTRFS_DEV_EXTENT_KEY
;
776 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
779 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
785 slot
= path
->slots
[0];
786 if (slot
>= btrfs_header_nritems(l
)) {
787 ret
= btrfs_next_leaf(root
, path
);
794 btrfs_item_key_to_cpu(l
, &key
, slot
);
796 if (key
.objectid
< device
->devid
)
798 if (key
.objectid
> device
->devid
)
800 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
802 if (key
.offset
> search_end
)
804 if (key
.offset
> search_start
)
805 free_bytes
+= key
.offset
- search_start
;
807 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
808 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
810 if (extent_end
> search_start
)
811 search_start
= extent_end
;
812 if (search_start
> search_end
)
819 if (search_start
< search_end
)
820 free_bytes
+= search_end
- search_start
;
822 *avail_bytes
= free_bytes
;
825 btrfs_free_path(path
);
829 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) \
830 - sizeof(struct btrfs_item) \
831 - sizeof(struct btrfs_chunk)) \
832 / sizeof(struct btrfs_stripe) + 1)
834 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
835 - 2 * sizeof(struct btrfs_disk_key) \
836 - 2 * sizeof(struct btrfs_chunk)) \
837 / sizeof(struct btrfs_stripe) + 1)
839 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
840 struct btrfs_fs_info
*info
, u64
*start
,
841 u64
*num_bytes
, u64 type
)
844 struct btrfs_root
*extent_root
= info
->extent_root
;
845 struct btrfs_root
*chunk_root
= info
->chunk_root
;
846 struct btrfs_stripe
*stripes
;
847 struct btrfs_device
*device
= NULL
;
848 struct btrfs_chunk
*chunk
;
849 struct list_head private_devs
;
850 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
851 struct list_head
*cur
;
852 struct map_lookup
*map
;
853 int min_stripe_size
= SZ_1M
;
854 u64 calc_size
= SZ_8M
;
856 u64 max_chunk_size
= 4 * calc_size
;
867 int stripe_len
= BTRFS_STRIPE_LEN
;
868 struct btrfs_key key
;
871 if (list_empty(dev_list
)) {
875 if (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
876 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
878 max_chunk_size
= calc_size
* 2;
879 min_stripe_size
= SZ_1M
;
880 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
881 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
883 max_chunk_size
= 10 * calc_size
;
884 min_stripe_size
= SZ_64M
;
885 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
886 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
888 max_chunk_size
= 4 * calc_size
;
889 min_stripe_size
= SZ_32M
;
890 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
893 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
894 num_stripes
= min_t(u64
, 2,
895 btrfs_super_num_devices(info
->super_copy
));
900 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
904 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
905 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
906 if (num_stripes
> max_stripes
)
907 num_stripes
= max_stripes
;
910 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
911 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
912 if (num_stripes
> max_stripes
)
913 num_stripes
= max_stripes
;
916 num_stripes
&= ~(u32
)1;
920 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
921 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
922 if (num_stripes
> max_stripes
)
923 num_stripes
= max_stripes
;
927 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
928 btrfs_super_stripesize(info
->super_copy
));
930 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
931 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
932 if (num_stripes
> max_stripes
)
933 num_stripes
= max_stripes
;
937 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
938 btrfs_super_stripesize(info
->super_copy
));
941 /* we don't want a chunk larger than 10% of the FS */
942 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
943 max_chunk_size
= min(percent_max
, max_chunk_size
);
946 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
948 calc_size
= max_chunk_size
;
949 calc_size
/= num_stripes
;
950 calc_size
/= stripe_len
;
951 calc_size
*= stripe_len
;
953 /* we don't want tiny stripes */
954 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
956 calc_size
/= stripe_len
;
957 calc_size
*= stripe_len
;
958 INIT_LIST_HEAD(&private_devs
);
959 cur
= dev_list
->next
;
962 if (type
& BTRFS_BLOCK_GROUP_DUP
)
963 min_free
= calc_size
* 2;
965 min_free
= calc_size
;
967 /* build a private list of devices we will allocate from */
968 while(index
< num_stripes
) {
969 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
970 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
974 if (avail
>= min_free
) {
975 list_move_tail(&device
->dev_list
, &private_devs
);
977 if (type
& BTRFS_BLOCK_GROUP_DUP
)
979 } else if (avail
> max_avail
)
984 if (index
< num_stripes
) {
985 list_splice(&private_devs
, dev_list
);
986 if (index
>= min_stripes
) {
988 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
989 num_stripes
/= sub_stripes
;
990 num_stripes
*= sub_stripes
;
995 if (!looped
&& max_avail
> 0) {
997 calc_size
= max_avail
;
1002 ret
= find_next_chunk(info
, &offset
);
1005 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1006 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1007 key
.offset
= offset
;
1009 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1013 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1019 stripes
= &chunk
->stripe
;
1020 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1021 num_stripes
, sub_stripes
);
1023 while(index
< num_stripes
) {
1024 struct btrfs_stripe
*stripe
;
1025 BUG_ON(list_empty(&private_devs
));
1026 cur
= private_devs
.next
;
1027 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1029 /* loop over this device again if we're doing a dup group */
1030 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1031 (index
== num_stripes
- 1))
1032 list_move_tail(&device
->dev_list
, dev_list
);
1034 ret
= btrfs_alloc_dev_extent(trans
, device
, key
.offset
,
1035 calc_size
, &dev_offset
, 0);
1039 device
->bytes_used
+= calc_size
;
1040 ret
= btrfs_update_device(trans
, device
);
1044 map
->stripes
[index
].dev
= device
;
1045 map
->stripes
[index
].physical
= dev_offset
;
1046 stripe
= stripes
+ index
;
1047 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1048 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1049 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1052 BUG_ON(!list_empty(&private_devs
));
1054 /* key was set above */
1055 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1056 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1057 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1058 btrfs_set_stack_chunk_type(chunk
, type
);
1059 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1060 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1061 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1062 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1063 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1064 map
->sector_size
= info
->sectorsize
;
1065 map
->stripe_len
= stripe_len
;
1066 map
->io_align
= stripe_len
;
1067 map
->io_width
= stripe_len
;
1069 map
->num_stripes
= num_stripes
;
1070 map
->sub_stripes
= sub_stripes
;
1072 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1073 btrfs_chunk_item_size(num_stripes
));
1075 *start
= key
.offset
;;
1077 map
->ce
.start
= key
.offset
;
1078 map
->ce
.size
= *num_bytes
;
1080 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1084 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1085 ret
= btrfs_add_system_chunk(info
, &key
,
1086 chunk
, btrfs_chunk_item_size(num_stripes
));
1102 * Alloc a DATA chunk with SINGLE profile.
1104 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1105 * (btrfs logical bytenr == on-disk bytenr)
1106 * For that case, caller must make sure the chunk and dev_extent are not
1109 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1110 struct btrfs_fs_info
*info
, u64
*start
,
1111 u64 num_bytes
, u64 type
, int convert
)
1114 struct btrfs_root
*extent_root
= info
->extent_root
;
1115 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1116 struct btrfs_stripe
*stripes
;
1117 struct btrfs_device
*device
= NULL
;
1118 struct btrfs_chunk
*chunk
;
1119 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1120 struct list_head
*cur
;
1121 struct map_lookup
*map
;
1122 u64 calc_size
= SZ_8M
;
1123 int num_stripes
= 1;
1124 int sub_stripes
= 0;
1127 int stripe_len
= BTRFS_STRIPE_LEN
;
1128 struct btrfs_key key
;
1130 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1131 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1133 if (*start
!= round_down(*start
, info
->sectorsize
)) {
1134 error("DATA chunk start not sectorsize aligned: %llu",
1135 (unsigned long long)*start
);
1138 key
.offset
= *start
;
1139 dev_offset
= *start
;
1143 ret
= find_next_chunk(info
, &tmp
);
1149 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1153 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1159 stripes
= &chunk
->stripe
;
1160 calc_size
= num_bytes
;
1163 cur
= dev_list
->next
;
1164 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1166 while (index
< num_stripes
) {
1167 struct btrfs_stripe
*stripe
;
1169 ret
= btrfs_alloc_dev_extent(trans
, device
, key
.offset
,
1170 calc_size
, &dev_offset
, convert
);
1173 device
->bytes_used
+= calc_size
;
1174 ret
= btrfs_update_device(trans
, device
);
1177 map
->stripes
[index
].dev
= device
;
1178 map
->stripes
[index
].physical
= dev_offset
;
1179 stripe
= stripes
+ index
;
1180 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1181 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1182 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1186 /* key was set above */
1187 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1188 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1189 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1190 btrfs_set_stack_chunk_type(chunk
, type
);
1191 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1192 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1193 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1194 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1195 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1196 map
->sector_size
= info
->sectorsize
;
1197 map
->stripe_len
= stripe_len
;
1198 map
->io_align
= stripe_len
;
1199 map
->io_width
= stripe_len
;
1201 map
->num_stripes
= num_stripes
;
1202 map
->sub_stripes
= sub_stripes
;
1204 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1205 btrfs_chunk_item_size(num_stripes
));
1208 *start
= key
.offset
;
1210 map
->ce
.start
= key
.offset
;
1211 map
->ce
.size
= num_bytes
;
1213 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1220 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
1222 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1223 struct cache_extent
*ce
;
1224 struct map_lookup
*map
;
1227 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1229 fprintf(stderr
, "No mapping for %llu-%llu\n",
1230 (unsigned long long)logical
,
1231 (unsigned long long)logical
+len
);
1234 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1235 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1236 "%llu-%llu\n", (unsigned long long)logical
,
1237 (unsigned long long)logical
+len
,
1238 (unsigned long long)ce
->start
,
1239 (unsigned long long)ce
->start
+ ce
->size
);
1242 map
= container_of(ce
, struct map_lookup
, ce
);
1244 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1245 ret
= map
->num_stripes
;
1246 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1247 ret
= map
->sub_stripes
;
1248 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1250 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1257 int btrfs_next_bg(struct btrfs_fs_info
*fs_info
, u64
*logical
,
1258 u64
*size
, u64 type
)
1260 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1261 struct cache_extent
*ce
;
1262 struct map_lookup
*map
;
1265 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1269 * only jump to next bg if our cur is not 0
1270 * As the initial logical for btrfs_next_bg() is 0, and
1271 * if we jump to next bg, we skipped a valid bg.
1274 ce
= next_cache_extent(ce
);
1280 map
= container_of(ce
, struct map_lookup
, ce
);
1281 if (map
->type
& type
) {
1282 *logical
= ce
->start
;
1287 ce
= next_cache_extent(ce
);
1293 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
,
1294 u64 chunk_start
, u64 physical
, u64 devid
,
1295 u64
**logical
, int *naddrs
, int *stripe_len
)
1297 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1298 struct cache_extent
*ce
;
1299 struct map_lookup
*map
;
1307 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1309 map
= container_of(ce
, struct map_lookup
, ce
);
1312 rmap_len
= map
->stripe_len
;
1313 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1314 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1315 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1316 length
= ce
->size
/ map
->num_stripes
;
1317 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1318 BTRFS_BLOCK_GROUP_RAID6
)) {
1319 length
= ce
->size
/ nr_data_stripes(map
);
1320 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1323 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1325 for (i
= 0; i
< map
->num_stripes
; i
++) {
1326 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1328 if (map
->stripes
[i
].physical
> physical
||
1329 map
->stripes
[i
].physical
+ length
<= physical
)
1332 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1335 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1336 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1338 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1339 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1340 } /* else if RAID[56], multiply by nr_data_stripes().
1341 * Alternatively, just use rmap_len below instead of
1342 * map->stripe_len */
1344 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1345 for (j
= 0; j
< nr
; j
++) {
1346 if (buf
[j
] == bytenr
)
1355 *stripe_len
= rmap_len
;
1360 static inline int parity_smaller(u64 a
, u64 b
)
1365 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1366 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1368 struct btrfs_bio_stripe s
;
1375 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1376 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1377 s
= bbio
->stripes
[i
];
1379 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1380 raid_map
[i
] = raid_map
[i
+1];
1381 bbio
->stripes
[i
+1] = s
;
1389 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1390 u64 logical
, u64
*length
,
1391 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1394 return __btrfs_map_block(fs_info
, rw
, logical
, length
, NULL
,
1395 multi_ret
, mirror_num
, raid_map_ret
);
1398 int __btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1399 u64 logical
, u64
*length
, u64
*type
,
1400 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1403 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1404 struct cache_extent
*ce
;
1405 struct map_lookup
*map
;
1409 u64
*raid_map
= NULL
;
1410 int stripes_allocated
= 8;
1411 int stripes_required
= 1;
1414 struct btrfs_multi_bio
*multi
= NULL
;
1416 if (multi_ret
&& rw
== READ
) {
1417 stripes_allocated
= 1;
1420 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1426 if (ce
->start
> logical
) {
1428 *length
= ce
->start
- logical
;
1433 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1438 map
= container_of(ce
, struct map_lookup
, ce
);
1439 offset
= logical
- ce
->start
;
1442 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1443 BTRFS_BLOCK_GROUP_DUP
)) {
1444 stripes_required
= map
->num_stripes
;
1445 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1446 stripes_required
= map
->sub_stripes
;
1449 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1450 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1451 /* RAID[56] write or recovery. Return all stripes */
1452 stripes_required
= map
->num_stripes
;
1454 /* Only allocate the map if we've already got a large enough multi_ret */
1455 if (stripes_allocated
>= stripes_required
) {
1456 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1464 /* if our multi bio struct is too small, back off and try again */
1465 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1466 stripes_allocated
= stripes_required
;
1473 * stripe_nr counts the total number of stripes we have to stride
1474 * to get to this block
1476 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1478 stripe_offset
= stripe_nr
* map
->stripe_len
;
1479 BUG_ON(offset
< stripe_offset
);
1481 /* stripe_offset is the offset of this block in its stripe*/
1482 stripe_offset
= offset
- stripe_offset
;
1484 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1485 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1486 BTRFS_BLOCK_GROUP_RAID10
|
1487 BTRFS_BLOCK_GROUP_DUP
)) {
1488 /* we limit the length of each bio to what fits in a stripe */
1489 *length
= min_t(u64
, ce
->size
- offset
,
1490 map
->stripe_len
- stripe_offset
);
1492 *length
= ce
->size
- offset
;
1498 multi
->num_stripes
= 1;
1500 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1502 multi
->num_stripes
= map
->num_stripes
;
1503 else if (mirror_num
)
1504 stripe_index
= mirror_num
- 1;
1506 stripe_index
= stripe_nr
% map
->num_stripes
;
1507 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1508 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1510 stripe_index
= stripe_nr
% factor
;
1511 stripe_index
*= map
->sub_stripes
;
1514 multi
->num_stripes
= map
->sub_stripes
;
1515 else if (mirror_num
)
1516 stripe_index
+= mirror_num
- 1;
1518 stripe_nr
= stripe_nr
/ factor
;
1519 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1521 multi
->num_stripes
= map
->num_stripes
;
1522 else if (mirror_num
)
1523 stripe_index
= mirror_num
- 1;
1524 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1525 BTRFS_BLOCK_GROUP_RAID6
)) {
1530 u64 raid56_full_stripe_start
;
1531 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1534 * align the start of our data stripe in the logical
1537 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1538 raid56_full_stripe_start
*= full_stripe_len
;
1540 /* get the data stripe number */
1541 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1542 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1544 /* Work out the disk rotation on this stripe-set */
1545 rot
= stripe_nr
% map
->num_stripes
;
1547 /* Fill in the logical address of each stripe */
1548 tmp
= stripe_nr
* nr_data_stripes(map
);
1550 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1551 raid_map
[(i
+rot
) % map
->num_stripes
] =
1552 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1554 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1555 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1556 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1558 *length
= map
->stripe_len
;
1561 multi
->num_stripes
= map
->num_stripes
;
1563 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1564 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1567 * Mirror #0 or #1 means the original data block.
1568 * Mirror #2 is RAID5 parity block.
1569 * Mirror #3 is RAID6 Q block.
1572 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1574 /* We distribute the parity blocks across stripes */
1575 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1579 * after this do_div call, stripe_nr is the number of stripes
1580 * on this device we have to walk to find the data, and
1581 * stripe_index is the number of our device in the stripe array
1583 stripe_index
= stripe_nr
% map
->num_stripes
;
1584 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1586 BUG_ON(stripe_index
>= map
->num_stripes
);
1588 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1589 multi
->stripes
[i
].physical
=
1590 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1591 stripe_nr
* map
->stripe_len
;
1592 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1601 sort_parity_stripes(multi
, raid_map
);
1602 *raid_map_ret
= raid_map
;
1608 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_info
*fs_info
, u64 devid
,
1611 struct btrfs_device
*device
;
1612 struct btrfs_fs_devices
*cur_devices
;
1614 cur_devices
= fs_info
->fs_devices
;
1615 while (cur_devices
) {
1617 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1618 fs_info
->ignore_fsid_mismatch
)) {
1619 device
= __find_device(&cur_devices
->devices
,
1624 cur_devices
= cur_devices
->seed
;
1629 struct btrfs_device
*
1630 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1631 u64 devid
, int instance
)
1633 struct list_head
*head
= &fs_devices
->devices
;
1634 struct btrfs_device
*dev
;
1637 list_for_each_entry(dev
, head
, dev_list
) {
1638 if (dev
->devid
== devid
&& num_found
++ == instance
)
1644 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
1646 struct cache_extent
*ce
;
1647 struct map_lookup
*map
;
1648 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1653 * During chunk recovering, we may fail to find block group's
1654 * corresponding chunk, we will rebuild it later
1656 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1657 if (!fs_info
->is_chunk_recover
)
1662 map
= container_of(ce
, struct map_lookup
, ce
);
1663 for (i
= 0; i
< map
->num_stripes
; i
++) {
1664 if (!map
->stripes
[i
].dev
->writeable
) {
1673 static struct btrfs_device
*fill_missing_device(u64 devid
)
1675 struct btrfs_device
*device
;
1677 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1678 device
->devid
= devid
;
1684 * slot == -1: SYSTEM chunk
1685 * return -EIO on error, otherwise return 0
1687 int btrfs_check_chunk_valid(struct btrfs_fs_info
*fs_info
,
1688 struct extent_buffer
*leaf
,
1689 struct btrfs_chunk
*chunk
,
1690 int slot
, u64 logical
)
1697 u32 chunk_ondisk_size
;
1698 u32 sectorsize
= fs_info
->sectorsize
;
1700 length
= btrfs_chunk_length(leaf
, chunk
);
1701 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1702 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1703 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1704 type
= btrfs_chunk_type(leaf
, chunk
);
1707 * These valid checks may be insufficient to cover every corner cases.
1709 if (!IS_ALIGNED(logical
, sectorsize
)) {
1710 error("invalid chunk logical %llu", logical
);
1713 if (btrfs_chunk_sector_size(leaf
, chunk
) != sectorsize
) {
1714 error("invalid chunk sectorsize %llu",
1715 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1718 if (!length
|| !IS_ALIGNED(length
, sectorsize
)) {
1719 error("invalid chunk length %llu", length
);
1722 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1723 error("invalid chunk stripe length: %llu", stripe_len
);
1726 /* Check on chunk item type */
1727 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1728 error("invalid chunk type %llu", type
);
1731 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1732 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1733 error("unrecognized chunk type: %llu",
1734 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1735 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1738 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1739 error("missing chunk type flag: %llu", type
);
1742 if (!(is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) ||
1743 (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0)) {
1744 error("conflicting chunk type detected: %llu", type
);
1747 if ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) &&
1748 !is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1749 error("conflicting chunk profile detected: %llu", type
);
1753 chunk_ondisk_size
= btrfs_chunk_item_size(num_stripes
);
1755 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1756 * it can't exceed the system chunk array size
1757 * For normal chunk, it should match its chunk item size.
1759 if (num_stripes
< 1 ||
1760 (slot
== -1 && chunk_ondisk_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1761 (slot
>= 0 && chunk_ondisk_size
> btrfs_item_size_nr(leaf
, slot
))) {
1762 error("invalid num_stripes: %u", num_stripes
);
1766 * Device number check against profile
1768 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& (sub_stripes
!= 2 ||
1769 !IS_ALIGNED(num_stripes
, sub_stripes
))) ||
1770 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1771 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1772 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1773 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1774 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1775 num_stripes
!= 1)) {
1776 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1777 num_stripes
, sub_stripes
,
1778 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1786 * Slot is used to verify the chunk item is valid
1788 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1790 static int read_one_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
1791 struct extent_buffer
*leaf
,
1792 struct btrfs_chunk
*chunk
, int slot
)
1794 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1795 struct map_lookup
*map
;
1796 struct cache_extent
*ce
;
1800 u8 uuid
[BTRFS_UUID_SIZE
];
1805 logical
= key
->offset
;
1806 length
= btrfs_chunk_length(leaf
, chunk
);
1807 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1808 /* Validation check */
1809 ret
= btrfs_check_chunk_valid(fs_info
, leaf
, chunk
, slot
, logical
);
1811 error("%s checksums match, but it has an invalid chunk, %s",
1812 (slot
== -1) ? "Superblock" : "Metadata",
1813 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1817 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1819 /* already mapped? */
1820 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1824 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1828 map
->ce
.start
= logical
;
1829 map
->ce
.size
= length
;
1830 map
->num_stripes
= num_stripes
;
1831 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1832 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1833 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1834 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1835 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1836 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1838 for (i
= 0; i
< num_stripes
; i
++) {
1839 map
->stripes
[i
].physical
=
1840 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1841 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1842 read_extent_buffer(leaf
, uuid
, (unsigned long)
1843 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1845 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
, uuid
,
1847 if (!map
->stripes
[i
].dev
) {
1848 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1849 printf("warning, device %llu is missing\n",
1850 (unsigned long long)devid
);
1851 list_add(&map
->stripes
[i
].dev
->dev_list
,
1852 &fs_info
->fs_devices
->devices
);
1856 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1862 static int fill_device_from_item(struct extent_buffer
*leaf
,
1863 struct btrfs_dev_item
*dev_item
,
1864 struct btrfs_device
*device
)
1868 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1869 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1870 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1871 device
->type
= btrfs_device_type(leaf
, dev_item
);
1872 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1873 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1874 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1876 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1877 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1882 static int open_seed_devices(struct btrfs_fs_info
*fs_info
, u8
*fsid
)
1884 struct btrfs_fs_devices
*fs_devices
;
1887 fs_devices
= fs_info
->fs_devices
->seed
;
1888 while (fs_devices
) {
1889 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1893 fs_devices
= fs_devices
->seed
;
1896 fs_devices
= find_fsid(fsid
);
1898 /* missing all seed devices */
1899 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1904 INIT_LIST_HEAD(&fs_devices
->devices
);
1905 list_add(&fs_devices
->list
, &fs_uuids
);
1906 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1909 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1913 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
1914 fs_info
->fs_devices
->seed
= fs_devices
;
1919 static int read_one_dev(struct btrfs_fs_info
*fs_info
,
1920 struct extent_buffer
*leaf
,
1921 struct btrfs_dev_item
*dev_item
)
1923 struct btrfs_device
*device
;
1926 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1927 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1929 devid
= btrfs_device_id(leaf
, dev_item
);
1930 read_extent_buffer(leaf
, dev_uuid
,
1931 (unsigned long)btrfs_device_uuid(dev_item
),
1933 read_extent_buffer(leaf
, fs_uuid
,
1934 (unsigned long)btrfs_device_fsid(dev_item
),
1937 if (memcmp(fs_uuid
, fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1938 ret
= open_seed_devices(fs_info
, fs_uuid
);
1943 device
= btrfs_find_device(fs_info
, devid
, dev_uuid
, fs_uuid
);
1945 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1949 list_add(&device
->dev_list
,
1950 &fs_info
->fs_devices
->devices
);
1953 fill_device_from_item(leaf
, dev_item
, device
);
1954 device
->dev_root
= fs_info
->dev_root
;
1958 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
1960 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
1961 struct extent_buffer
*sb
;
1962 struct btrfs_disk_key
*disk_key
;
1963 struct btrfs_chunk
*chunk
;
1965 unsigned long sb_array_offset
;
1971 struct btrfs_key key
;
1973 if (fs_info
->nodesize
< BTRFS_SUPER_INFO_SIZE
) {
1974 printf("ERROR: nodesize %u too small to read superblock\n",
1978 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
1981 btrfs_set_buffer_uptodate(sb
);
1982 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1983 array_size
= btrfs_super_sys_array_size(super_copy
);
1985 array_ptr
= super_copy
->sys_chunk_array
;
1986 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1989 while (cur_offset
< array_size
) {
1990 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
1991 len
= sizeof(*disk_key
);
1992 if (cur_offset
+ len
> array_size
)
1993 goto out_short_read
;
1995 btrfs_disk_key_to_cpu(&key
, disk_key
);
1998 sb_array_offset
+= len
;
2001 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2002 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
2004 * At least one btrfs_chunk with one stripe must be
2005 * present, exact stripe count check comes afterwards
2007 len
= btrfs_chunk_item_size(1);
2008 if (cur_offset
+ len
> array_size
)
2009 goto out_short_read
;
2011 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2014 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2015 num_stripes
, cur_offset
);
2020 len
= btrfs_chunk_item_size(num_stripes
);
2021 if (cur_offset
+ len
> array_size
)
2022 goto out_short_read
;
2024 ret
= read_one_chunk(fs_info
, &key
, sb
, chunk
, -1);
2029 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2030 (u32
)key
.type
, cur_offset
);
2035 sb_array_offset
+= len
;
2038 free_extent_buffer(sb
);
2042 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2044 free_extent_buffer(sb
);
2048 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
2050 struct btrfs_path
*path
;
2051 struct extent_buffer
*leaf
;
2052 struct btrfs_key key
;
2053 struct btrfs_key found_key
;
2054 struct btrfs_root
*root
= fs_info
->chunk_root
;
2058 path
= btrfs_alloc_path();
2063 * Read all device items, and then all the chunk items. All
2064 * device items are found before any chunk item (their object id
2065 * is smaller than the lowest possible object id for a chunk
2066 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2068 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2071 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2075 leaf
= path
->nodes
[0];
2076 slot
= path
->slots
[0];
2077 if (slot
>= btrfs_header_nritems(leaf
)) {
2078 ret
= btrfs_next_leaf(root
, path
);
2085 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2086 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2087 struct btrfs_dev_item
*dev_item
;
2088 dev_item
= btrfs_item_ptr(leaf
, slot
,
2089 struct btrfs_dev_item
);
2090 ret
= read_one_dev(fs_info
, leaf
, dev_item
);
2092 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2093 struct btrfs_chunk
*chunk
;
2094 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2095 ret
= read_one_chunk(fs_info
, &found_key
, leaf
, chunk
,
2104 btrfs_free_path(path
);
2108 struct list_head
*btrfs_scanned_uuids(void)
2113 static int rmw_eb(struct btrfs_fs_info
*info
,
2114 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2117 unsigned long orig_off
= 0;
2118 unsigned long dest_off
= 0;
2119 unsigned long copy_len
= eb
->len
;
2121 ret
= read_whole_eb(info
, eb
, 0);
2125 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2126 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2129 * | ----- orig_eb ------- |
2130 * | ----- stripe ------- |
2131 * | ----- orig_eb ------- |
2132 * | ----- orig_eb ------- |
2134 if (eb
->start
> orig_eb
->start
)
2135 orig_off
= eb
->start
- orig_eb
->start
;
2136 if (orig_eb
->start
> eb
->start
)
2137 dest_off
= orig_eb
->start
- eb
->start
;
2139 if (copy_len
> orig_eb
->len
- orig_off
)
2140 copy_len
= orig_eb
->len
- orig_off
;
2141 if (copy_len
> eb
->len
- dest_off
)
2142 copy_len
= eb
->len
- dest_off
;
2144 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2148 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2149 struct extent_buffer
*orig_eb
,
2150 struct extent_buffer
**ebs
,
2151 u64 stripe_len
, u64
*raid_map
,
2154 struct extent_buffer
**tmp_ebs
;
2155 u64 start
= orig_eb
->start
;
2160 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2164 /* Alloc memory in a row for data stripes */
2165 for (i
= 0; i
< num_stripes
; i
++) {
2166 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2169 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2176 for (i
= 0; i
< num_stripes
; i
++) {
2177 struct extent_buffer
*eb
= tmp_ebs
[i
];
2179 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2182 eb
->start
= raid_map
[i
];
2183 eb
->len
= stripe_len
;
2187 eb
->dev_bytenr
= (u64
)-1;
2189 this_eb_start
= raid_map
[i
];
2191 if (start
> this_eb_start
||
2192 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2193 ret
= rmw_eb(info
, eb
, orig_eb
);
2197 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2205 for (i
= 0; i
< num_stripes
; i
++)
2211 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2212 struct extent_buffer
*eb
,
2213 struct btrfs_multi_bio
*multi
,
2214 u64 stripe_len
, u64
*raid_map
)
2216 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2219 int alloc_size
= eb
->len
;
2222 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2223 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2224 if (!ebs
|| !pointers
) {
2230 if (stripe_len
> alloc_size
)
2231 alloc_size
= stripe_len
;
2233 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2234 multi
->num_stripes
);
2238 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2239 struct extent_buffer
*new_eb
;
2240 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2241 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2242 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2243 multi
->stripes
[i
].dev
->total_ios
++;
2244 if (ebs
[i
]->start
!= raid_map
[i
]) {
2246 goto out_free_split
;
2250 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2253 goto out_free_split
;
2255 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2256 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2257 multi
->stripes
[i
].dev
->total_ios
++;
2258 new_eb
->len
= stripe_len
;
2260 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2262 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2266 ebs
[multi
->num_stripes
- 2] = p_eb
;
2267 ebs
[multi
->num_stripes
- 1] = q_eb
;
2269 for (i
= 0; i
< multi
->num_stripes
; i
++)
2270 pointers
[i
] = ebs
[i
]->data
;
2272 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2274 ebs
[multi
->num_stripes
- 1] = p_eb
;
2275 for (i
= 0; i
< multi
->num_stripes
; i
++)
2276 pointers
[i
] = ebs
[i
]->data
;
2277 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2278 multi
->num_stripes
- 1, pointers
);
2280 goto out_free_split
;
2283 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2284 ret
= write_extent_to_disk(ebs
[i
]);
2286 goto out_free_split
;
2290 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2302 * Get stripe length from chunk item and its stripe items
2304 * Caller should only call this function after validating the chunk item
2305 * by using btrfs_check_chunk_valid().
2307 u64
btrfs_stripe_length(struct btrfs_fs_info
*fs_info
,
2308 struct extent_buffer
*leaf
,
2309 struct btrfs_chunk
*chunk
)
2313 u32 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2314 u64 profile
= btrfs_chunk_type(leaf
, chunk
) &
2315 BTRFS_BLOCK_GROUP_PROFILE_MASK
;
2317 chunk_len
= btrfs_chunk_length(leaf
, chunk
);
2320 case 0: /* Single profile */
2321 case BTRFS_BLOCK_GROUP_RAID1
:
2322 case BTRFS_BLOCK_GROUP_DUP
:
2323 stripe_len
= chunk_len
;
2325 case BTRFS_BLOCK_GROUP_RAID0
:
2326 stripe_len
= chunk_len
/ num_stripes
;
2328 case BTRFS_BLOCK_GROUP_RAID5
:
2329 stripe_len
= chunk_len
/ (num_stripes
- 1);
2331 case BTRFS_BLOCK_GROUP_RAID6
:
2332 stripe_len
= chunk_len
/ (num_stripes
- 2);
2334 case BTRFS_BLOCK_GROUP_RAID10
:
2335 stripe_len
= chunk_len
/ (num_stripes
/
2336 btrfs_chunk_sub_stripes(leaf
, chunk
));
2339 /* Invalid chunk profile found */
2346 * Return 0 if size of @device is already good
2347 * Return >0 if size of @device is not aligned but fixed without problems
2348 * Return <0 if something wrong happened when aligning the size of @device
2350 int btrfs_fix_device_size(struct btrfs_fs_info
*fs_info
,
2351 struct btrfs_device
*device
)
2353 struct btrfs_trans_handle
*trans
;
2354 struct btrfs_key key
;
2355 struct btrfs_path path
;
2356 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
2357 struct btrfs_dev_item
*di
;
2358 u64 old_bytes
= device
->total_bytes
;
2361 if (IS_ALIGNED(old_bytes
, fs_info
->sectorsize
))
2364 /* Align the in-memory total_bytes first, and use it as correct size */
2365 device
->total_bytes
= round_down(device
->total_bytes
,
2366 fs_info
->sectorsize
);
2368 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2369 key
.type
= BTRFS_DEV_ITEM_KEY
;
2370 key
.offset
= device
->devid
;
2372 trans
= btrfs_start_transaction(chunk_root
, 1);
2373 if (IS_ERR(trans
)) {
2374 ret
= PTR_ERR(trans
);
2375 error("error starting transaction: %d (%s)",
2376 ret
, strerror(-ret
));
2380 btrfs_init_path(&path
);
2381 ret
= btrfs_search_slot(trans
, chunk_root
, &key
, &path
, 0, 1);
2383 error("failed to find DEV_ITEM for devid %llu", device
->devid
);
2388 error("failed to search chunk root: %d (%s)",
2389 ret
, strerror(-ret
));
2392 di
= btrfs_item_ptr(path
.nodes
[0], path
.slots
[0], struct btrfs_dev_item
);
2393 btrfs_set_device_total_bytes(path
.nodes
[0], di
, device
->total_bytes
);
2394 btrfs_mark_buffer_dirty(path
.nodes
[0]);
2395 ret
= btrfs_commit_transaction(trans
, chunk_root
);
2397 error("failed to commit current transaction: %d (%s)",
2398 ret
, strerror(-ret
));
2399 btrfs_release_path(&path
);
2402 btrfs_release_path(&path
);
2403 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2404 device
->devid
, old_bytes
, device
->total_bytes
);
2408 /* We haven't modified anything, it's OK to commit current trans */
2409 btrfs_commit_transaction(trans
, chunk_root
);
2410 btrfs_release_path(&path
);
2415 * Return 0 if super block total_bytes matches all devices' total_bytes
2416 * Return >0 if super block total_bytes mismatch but fixed without problem
2417 * Return <0 if we failed to fix super block total_bytes
2419 int btrfs_fix_super_size(struct btrfs_fs_info
*fs_info
)
2421 struct btrfs_trans_handle
*trans
;
2422 struct btrfs_device
*device
;
2423 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2424 u64 total_bytes
= 0;
2425 u64 old_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2428 list_for_each_entry(device
, dev_list
, dev_list
) {
2430 * Caller should ensure this function is called after aligning
2431 * all devices' total_bytes.
2433 if (!IS_ALIGNED(device
->total_bytes
, fs_info
->sectorsize
)) {
2434 error("device %llu total_bytes %llu not aligned to %u",
2435 device
->devid
, device
->total_bytes
,
2436 fs_info
->sectorsize
);
2439 total_bytes
+= device
->total_bytes
;
2442 if (total_bytes
== old_bytes
)
2445 btrfs_set_super_total_bytes(fs_info
->super_copy
, total_bytes
);
2447 /* Commit transaction to update all super blocks */
2448 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
2449 if (IS_ERR(trans
)) {
2450 ret
= PTR_ERR(trans
);
2451 error("error starting transaction: %d (%s)",
2452 ret
, strerror(-ret
));
2455 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
2457 error("failed to commit current transaction: %d (%s)",
2458 ret
, strerror(-ret
));
2461 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2462 old_bytes
, total_bytes
);
2467 * Return 0 if all devices and super block sizes are good
2468 * Return >0 if any device/super size problem was found, but fixed
2469 * Return <0 if something wrong happened during fixing
2471 int btrfs_fix_device_and_super_size(struct btrfs_fs_info
*fs_info
)
2473 struct btrfs_device
*device
;
2474 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2475 bool have_bad_value
= false;
2478 /* Seed device is not supported yet */
2479 if (fs_info
->fs_devices
->seed
) {
2480 error("fixing device size with seed device is not supported yet");
2484 /* All devices must be set up before repairing */
2485 if (list_empty(dev_list
)) {
2486 error("no device found");
2489 list_for_each_entry(device
, dev_list
, dev_list
) {
2490 if (device
->fd
== -1 || !device
->writeable
) {
2491 error("devid %llu is missing or not writeable",
2494 "fixing device size needs all device(s) to be present and writeable");
2499 /* Repair total_bytes of each device */
2500 list_for_each_entry(device
, dev_list
, dev_list
) {
2501 ret
= btrfs_fix_device_size(fs_info
, device
);
2505 have_bad_value
= true;
2508 /* Repair super total_byte */
2509 ret
= btrfs_fix_super_size(fs_info
);
2511 have_bad_value
= true;
2512 if (have_bad_value
) {
2514 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2517 printf("No device size related problem found\n");