2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device
*dev
;
38 static inline int nr_parity_stripes(struct map_lookup
*map
)
40 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
42 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
48 static inline int nr_data_stripes(struct map_lookup
*map
)
50 return map
->num_stripes
- nr_parity_stripes(map
);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids
);
57 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
60 struct btrfs_device
*dev
;
61 struct list_head
*cur
;
63 list_for_each(cur
, head
) {
64 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
65 if (dev
->devid
== devid
&&
66 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
73 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
75 struct list_head
*cur
;
76 struct btrfs_fs_devices
*fs_devices
;
78 list_for_each(cur
, &fs_uuids
) {
79 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
80 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
86 static int device_list_add(const char *path
,
87 struct btrfs_super_block
*disk_super
,
88 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
90 struct btrfs_device
*device
;
91 struct btrfs_fs_devices
*fs_devices
;
92 u64 found_transid
= btrfs_super_generation(disk_super
);
94 fs_devices
= find_fsid(disk_super
->fsid
);
96 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
99 INIT_LIST_HEAD(&fs_devices
->devices
);
100 list_add(&fs_devices
->list
, &fs_uuids
);
101 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
102 fs_devices
->latest_devid
= devid
;
103 fs_devices
->latest_trans
= found_transid
;
104 fs_devices
->lowest_devid
= (u64
)-1;
107 device
= __find_device(&fs_devices
->devices
, devid
,
108 disk_super
->dev_item
.uuid
);
111 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
113 /* we can safely leave the fs_devices entry around */
117 device
->devid
= devid
;
118 device
->generation
= found_transid
;
119 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
121 device
->name
= kstrdup(path
, GFP_NOFS
);
126 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
127 if (!device
->label
) {
132 device
->total_devs
= btrfs_super_num_devices(disk_super
);
133 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
134 device
->total_bytes
=
135 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
137 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
138 list_add(&device
->dev_list
, &fs_devices
->devices
);
139 device
->fs_devices
= fs_devices
;
140 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
144 * The existing device has newer generation, so this one could
145 * be a stale one, don't add it.
147 if (found_transid
< device
->generation
) {
149 "adding device %s gen %llu but found an existing device %s gen %llu",
150 path
, found_transid
, device
->name
,
163 if (found_transid
> fs_devices
->latest_trans
) {
164 fs_devices
->latest_devid
= devid
;
165 fs_devices
->latest_trans
= found_transid
;
167 if (fs_devices
->lowest_devid
> devid
) {
168 fs_devices
->lowest_devid
= devid
;
170 *fs_devices_ret
= fs_devices
;
174 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
176 struct btrfs_fs_devices
*seed_devices
;
177 struct btrfs_device
*device
;
183 while (!list_empty(&fs_devices
->devices
)) {
184 device
= list_entry(fs_devices
->devices
.next
,
185 struct btrfs_device
, dev_list
);
186 if (device
->fd
!= -1) {
187 if (fsync(device
->fd
) == -1) {
188 warning("fsync on device %llu failed: %s",
189 device
->devid
, strerror(errno
));
192 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
193 fprintf(stderr
, "Warning, could not drop caches\n");
197 device
->writeable
= 0;
198 list_del(&device
->dev_list
);
199 /* free the memory */
205 seed_devices
= fs_devices
->seed
;
206 fs_devices
->seed
= NULL
;
208 struct btrfs_fs_devices
*orig
;
211 fs_devices
= seed_devices
;
212 list_del(&orig
->list
);
216 list_del(&fs_devices
->list
);
223 void btrfs_close_all_devices(void)
225 struct btrfs_fs_devices
*fs_devices
;
227 while (!list_empty(&fs_uuids
)) {
228 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
230 btrfs_close_devices(fs_devices
);
234 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
237 struct list_head
*head
= &fs_devices
->devices
;
238 struct list_head
*cur
;
239 struct btrfs_device
*device
;
242 list_for_each(cur
, head
) {
243 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
245 printk("no name for device %llu, skip it now\n", device
->devid
);
249 fd
= open(device
->name
, flags
);
252 error("cannot open device '%s': %s", device
->name
,
257 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
258 fprintf(stderr
, "Warning, could not drop caches\n");
260 if (device
->devid
== fs_devices
->latest_devid
)
261 fs_devices
->latest_bdev
= fd
;
262 if (device
->devid
== fs_devices
->lowest_devid
)
263 fs_devices
->lowest_bdev
= fd
;
266 device
->writeable
= 1;
270 btrfs_close_devices(fs_devices
);
274 int btrfs_scan_one_device(int fd
, const char *path
,
275 struct btrfs_fs_devices
**fs_devices_ret
,
276 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
278 struct btrfs_super_block
*disk_super
;
279 char buf
[BTRFS_SUPER_INFO_SIZE
];
283 disk_super
= (struct btrfs_super_block
*)buf
;
284 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
287 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
288 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
291 *total_devs
= btrfs_super_num_devices(disk_super
);
293 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
299 * find_free_dev_extent_start - find free space in the specified device
300 * @device: the device which we search the free space in
301 * @num_bytes: the size of the free space that we need
302 * @search_start: the position from which to begin the search
303 * @start: store the start of the free space.
304 * @len: the size of the free space. that we find, or the size
305 * of the max free space if we don't find suitable free space
307 * this uses a pretty simple search, the expectation is that it is
308 * called very infrequently and that a given device has a small number
311 * @start is used to store the start of the free space if we find. But if we
312 * don't find suitable free space, it will be used to store the start position
313 * of the max free space.
315 * @len is used to store the size of the free space that we find.
316 * But if we don't find suitable free space, it is used to store the size of
317 * the max free space.
319 static int find_free_dev_extent_start(struct btrfs_trans_handle
*trans
,
320 struct btrfs_device
*device
, u64 num_bytes
,
321 u64 search_start
, u64
*start
, u64
*len
)
323 struct btrfs_key key
;
324 struct btrfs_root
*root
= device
->dev_root
;
325 struct btrfs_dev_extent
*dev_extent
;
326 struct btrfs_path
*path
;
331 u64 search_end
= device
->total_bytes
;
334 struct extent_buffer
*l
;
335 u64 min_search_start
;
338 * We don't want to overwrite the superblock on the drive nor any area
339 * used by the boot loader (grub for example), so we make sure to start
340 * at an offset of at least 1MB.
342 min_search_start
= max(root
->fs_info
->alloc_start
, (u64
)SZ_1M
);
343 search_start
= max(search_start
, min_search_start
);
345 path
= btrfs_alloc_path();
349 max_hole_start
= search_start
;
352 if (search_start
>= search_end
) {
359 key
.objectid
= device
->devid
;
360 key
.offset
= search_start
;
361 key
.type
= BTRFS_DEV_EXTENT_KEY
;
363 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
367 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
374 slot
= path
->slots
[0];
375 if (slot
>= btrfs_header_nritems(l
)) {
376 ret
= btrfs_next_leaf(root
, path
);
384 btrfs_item_key_to_cpu(l
, &key
, slot
);
386 if (key
.objectid
< device
->devid
)
389 if (key
.objectid
> device
->devid
)
392 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
395 if (key
.offset
> search_start
) {
396 hole_size
= key
.offset
- search_start
;
399 * Have to check before we set max_hole_start, otherwise
400 * we could end up sending back this offset anyway.
402 if (hole_size
> max_hole_size
) {
403 max_hole_start
= search_start
;
404 max_hole_size
= hole_size
;
408 * If this free space is greater than which we need,
409 * it must be the max free space that we have found
410 * until now, so max_hole_start must point to the start
411 * of this free space and the length of this free space
412 * is stored in max_hole_size. Thus, we return
413 * max_hole_start and max_hole_size and go back to the
416 if (hole_size
>= num_bytes
) {
422 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
423 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
425 if (extent_end
> search_start
)
426 search_start
= extent_end
;
433 * At this point, search_start should be the end of
434 * allocated dev extents, and when shrinking the device,
435 * search_end may be smaller than search_start.
437 if (search_end
> search_start
) {
438 hole_size
= search_end
- search_start
;
440 if (hole_size
> max_hole_size
) {
441 max_hole_start
= search_start
;
442 max_hole_size
= hole_size
;
447 if (max_hole_size
< num_bytes
)
453 btrfs_free_path(path
);
454 *start
= max_hole_start
;
456 *len
= max_hole_size
;
460 int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
461 struct btrfs_device
*device
, u64 num_bytes
,
464 /* FIXME use last free of some kind */
465 return find_free_dev_extent_start(trans
, device
,
466 num_bytes
, 0, start
, NULL
);
469 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
470 struct btrfs_device
*device
,
471 u64 chunk_tree
, u64 chunk_objectid
,
473 u64 num_bytes
, u64
*start
, int convert
)
476 struct btrfs_path
*path
;
477 struct btrfs_root
*root
= device
->dev_root
;
478 struct btrfs_dev_extent
*extent
;
479 struct extent_buffer
*leaf
;
480 struct btrfs_key key
;
482 path
= btrfs_alloc_path();
487 * For convert case, just skip search free dev_extent, as caller
488 * is responsible to make sure it's free.
491 ret
= find_free_dev_extent(trans
, device
, num_bytes
,
497 key
.objectid
= device
->devid
;
499 key
.type
= BTRFS_DEV_EXTENT_KEY
;
500 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
504 leaf
= path
->nodes
[0];
505 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
506 struct btrfs_dev_extent
);
507 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
508 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
509 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
511 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
512 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
515 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
516 btrfs_mark_buffer_dirty(leaf
);
518 btrfs_free_path(path
);
522 static int find_next_chunk(struct btrfs_fs_info
*fs_info
, u64
*offset
)
524 struct btrfs_root
*root
= fs_info
->chunk_root
;
525 struct btrfs_path
*path
;
527 struct btrfs_key key
;
528 struct btrfs_chunk
*chunk
;
529 struct btrfs_key found_key
;
531 path
= btrfs_alloc_path();
535 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
536 key
.offset
= (u64
)-1;
537 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
539 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
545 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
549 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
551 if (found_key
.objectid
!= BTRFS_FIRST_CHUNK_TREE_OBJECTID
)
554 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
556 *offset
= found_key
.offset
+
557 btrfs_chunk_length(path
->nodes
[0], chunk
);
562 btrfs_free_path(path
);
566 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
570 struct btrfs_key key
;
571 struct btrfs_key found_key
;
573 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
574 key
.type
= BTRFS_DEV_ITEM_KEY
;
575 key
.offset
= (u64
)-1;
577 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
583 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
588 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
590 *objectid
= found_key
.offset
+ 1;
594 btrfs_release_path(path
);
599 * the device information is stored in the chunk root
600 * the btrfs_device struct should be fully filled in
602 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
603 struct btrfs_fs_info
*fs_info
,
604 struct btrfs_device
*device
)
607 struct btrfs_path
*path
;
608 struct btrfs_dev_item
*dev_item
;
609 struct extent_buffer
*leaf
;
610 struct btrfs_key key
;
611 struct btrfs_root
*root
= fs_info
->chunk_root
;
615 path
= btrfs_alloc_path();
619 ret
= find_next_devid(root
, path
, &free_devid
);
623 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
624 key
.type
= BTRFS_DEV_ITEM_KEY
;
625 key
.offset
= free_devid
;
627 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
632 leaf
= path
->nodes
[0];
633 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
635 device
->devid
= free_devid
;
636 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
637 btrfs_set_device_generation(leaf
, dev_item
, 0);
638 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
639 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
640 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
641 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
642 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
643 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
644 btrfs_set_device_group(leaf
, dev_item
, 0);
645 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
646 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
647 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
649 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
650 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
651 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
652 write_extent_buffer(leaf
, fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
653 btrfs_mark_buffer_dirty(leaf
);
657 btrfs_free_path(path
);
661 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
662 struct btrfs_device
*device
)
665 struct btrfs_path
*path
;
666 struct btrfs_root
*root
;
667 struct btrfs_dev_item
*dev_item
;
668 struct extent_buffer
*leaf
;
669 struct btrfs_key key
;
671 root
= device
->dev_root
->fs_info
->chunk_root
;
673 path
= btrfs_alloc_path();
677 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
678 key
.type
= BTRFS_DEV_ITEM_KEY
;
679 key
.offset
= device
->devid
;
681 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
690 leaf
= path
->nodes
[0];
691 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
693 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
694 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
695 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
696 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
697 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
698 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
699 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
700 btrfs_mark_buffer_dirty(leaf
);
703 btrfs_free_path(path
);
707 int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
708 struct btrfs_chunk
*chunk
, int item_size
)
710 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
711 struct btrfs_disk_key disk_key
;
715 array_size
= btrfs_super_sys_array_size(super_copy
);
716 if (array_size
+ item_size
+ sizeof(disk_key
)
717 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
720 ptr
= super_copy
->sys_chunk_array
+ array_size
;
721 btrfs_cpu_key_to_disk(&disk_key
, key
);
722 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
723 ptr
+= sizeof(disk_key
);
724 memcpy(ptr
, chunk
, item_size
);
725 item_size
+= sizeof(disk_key
);
726 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
730 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
733 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
735 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
736 return calc_size
* (num_stripes
/ sub_stripes
);
737 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
738 return calc_size
* (num_stripes
- 1);
739 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
740 return calc_size
* (num_stripes
- 2);
742 return calc_size
* num_stripes
;
746 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
748 /* TODO, add a way to store the preferred stripe size */
749 return BTRFS_STRIPE_LEN
;
753 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
755 * It is not equal to "device->total_bytes - device->bytes_used".
756 * We do not allocate any chunk in 1M at beginning of device, and not
757 * allowed to allocate any chunk before alloc_start if it is specified.
758 * So search holes from max(1M, alloc_start) to device->total_bytes.
760 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
761 struct btrfs_device
*device
,
764 struct btrfs_path
*path
;
765 struct btrfs_root
*root
= device
->dev_root
;
766 struct btrfs_key key
;
767 struct btrfs_dev_extent
*dev_extent
= NULL
;
768 struct extent_buffer
*l
;
769 u64 search_start
= root
->fs_info
->alloc_start
;
770 u64 search_end
= device
->total_bytes
;
776 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
778 path
= btrfs_alloc_path();
782 key
.objectid
= device
->devid
;
783 key
.offset
= root
->fs_info
->alloc_start
;
784 key
.type
= BTRFS_DEV_EXTENT_KEY
;
787 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
790 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
796 slot
= path
->slots
[0];
797 if (slot
>= btrfs_header_nritems(l
)) {
798 ret
= btrfs_next_leaf(root
, path
);
805 btrfs_item_key_to_cpu(l
, &key
, slot
);
807 if (key
.objectid
< device
->devid
)
809 if (key
.objectid
> device
->devid
)
811 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
813 if (key
.offset
> search_end
)
815 if (key
.offset
> search_start
)
816 free_bytes
+= key
.offset
- search_start
;
818 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
819 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
821 if (extent_end
> search_start
)
822 search_start
= extent_end
;
823 if (search_start
> search_end
)
830 if (search_start
< search_end
)
831 free_bytes
+= search_end
- search_start
;
833 *avail_bytes
= free_bytes
;
836 btrfs_free_path(path
);
840 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
841 - sizeof(struct btrfs_item) \
842 - sizeof(struct btrfs_chunk)) \
843 / sizeof(struct btrfs_stripe) + 1)
845 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
846 - 2 * sizeof(struct btrfs_disk_key) \
847 - 2 * sizeof(struct btrfs_chunk)) \
848 / sizeof(struct btrfs_stripe) + 1)
850 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
851 struct btrfs_fs_info
*info
, u64
*start
,
852 u64
*num_bytes
, u64 type
)
855 struct btrfs_root
*extent_root
= info
->extent_root
;
856 struct btrfs_root
*chunk_root
= info
->chunk_root
;
857 struct btrfs_stripe
*stripes
;
858 struct btrfs_device
*device
= NULL
;
859 struct btrfs_chunk
*chunk
;
860 struct list_head private_devs
;
861 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
862 struct list_head
*cur
;
863 struct map_lookup
*map
;
864 int min_stripe_size
= SZ_1M
;
865 u64 calc_size
= SZ_8M
;
867 u64 max_chunk_size
= 4 * calc_size
;
878 int stripe_len
= BTRFS_STRIPE_LEN
;
879 struct btrfs_key key
;
882 if (list_empty(dev_list
)) {
886 if (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
887 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
889 max_chunk_size
= calc_size
* 2;
890 min_stripe_size
= SZ_1M
;
891 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
892 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
894 max_chunk_size
= 10 * calc_size
;
895 min_stripe_size
= SZ_64M
;
896 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
897 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
899 max_chunk_size
= 4 * calc_size
;
900 min_stripe_size
= SZ_32M
;
901 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
904 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
905 num_stripes
= min_t(u64
, 2,
906 btrfs_super_num_devices(info
->super_copy
));
911 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
915 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
916 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
917 if (num_stripes
> max_stripes
)
918 num_stripes
= max_stripes
;
921 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
922 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
923 if (num_stripes
> max_stripes
)
924 num_stripes
= max_stripes
;
927 num_stripes
&= ~(u32
)1;
931 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
932 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
933 if (num_stripes
> max_stripes
)
934 num_stripes
= max_stripes
;
938 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
939 btrfs_super_stripesize(info
->super_copy
));
941 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
942 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
943 if (num_stripes
> max_stripes
)
944 num_stripes
= max_stripes
;
948 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
949 btrfs_super_stripesize(info
->super_copy
));
952 /* we don't want a chunk larger than 10% of the FS */
953 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
954 max_chunk_size
= min(percent_max
, max_chunk_size
);
957 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
959 calc_size
= max_chunk_size
;
960 calc_size
/= num_stripes
;
961 calc_size
/= stripe_len
;
962 calc_size
*= stripe_len
;
964 /* we don't want tiny stripes */
965 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
967 calc_size
/= stripe_len
;
968 calc_size
*= stripe_len
;
969 INIT_LIST_HEAD(&private_devs
);
970 cur
= dev_list
->next
;
973 if (type
& BTRFS_BLOCK_GROUP_DUP
)
974 min_free
= calc_size
* 2;
976 min_free
= calc_size
;
978 /* build a private list of devices we will allocate from */
979 while(index
< num_stripes
) {
980 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
981 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
985 if (avail
>= min_free
) {
986 list_move_tail(&device
->dev_list
, &private_devs
);
988 if (type
& BTRFS_BLOCK_GROUP_DUP
)
990 } else if (avail
> max_avail
)
995 if (index
< num_stripes
) {
996 list_splice(&private_devs
, dev_list
);
997 if (index
>= min_stripes
) {
999 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1000 num_stripes
/= sub_stripes
;
1001 num_stripes
*= sub_stripes
;
1006 if (!looped
&& max_avail
> 0) {
1008 calc_size
= max_avail
;
1013 ret
= find_next_chunk(info
, &offset
);
1016 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1017 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1018 key
.offset
= offset
;
1020 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1024 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1030 stripes
= &chunk
->stripe
;
1031 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1032 num_stripes
, sub_stripes
);
1034 while(index
< num_stripes
) {
1035 struct btrfs_stripe
*stripe
;
1036 BUG_ON(list_empty(&private_devs
));
1037 cur
= private_devs
.next
;
1038 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1040 /* loop over this device again if we're doing a dup group */
1041 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1042 (index
== num_stripes
- 1))
1043 list_move_tail(&device
->dev_list
, dev_list
);
1045 ret
= btrfs_alloc_dev_extent(trans
, device
,
1046 info
->chunk_root
->root_key
.objectid
,
1047 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1048 calc_size
, &dev_offset
, 0);
1051 device
->bytes_used
+= calc_size
;
1052 ret
= btrfs_update_device(trans
, device
);
1055 map
->stripes
[index
].dev
= device
;
1056 map
->stripes
[index
].physical
= dev_offset
;
1057 stripe
= stripes
+ index
;
1058 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1059 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1060 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1063 BUG_ON(!list_empty(&private_devs
));
1065 /* key was set above */
1066 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1067 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1068 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1069 btrfs_set_stack_chunk_type(chunk
, type
);
1070 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1071 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1072 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1073 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1074 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1075 map
->sector_size
= info
->sectorsize
;
1076 map
->stripe_len
= stripe_len
;
1077 map
->io_align
= stripe_len
;
1078 map
->io_width
= stripe_len
;
1080 map
->num_stripes
= num_stripes
;
1081 map
->sub_stripes
= sub_stripes
;
1083 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1084 btrfs_chunk_item_size(num_stripes
));
1086 *start
= key
.offset
;;
1088 map
->ce
.start
= key
.offset
;
1089 map
->ce
.size
= *num_bytes
;
1091 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1094 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1095 ret
= btrfs_add_system_chunk(info
, &key
,
1096 chunk
, btrfs_chunk_item_size(num_stripes
));
1105 * Alloc a DATA chunk with SINGLE profile.
1107 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1108 * (btrfs logical bytenr == on-disk bytenr)
1109 * For that case, caller must make sure the chunk and dev_extent are not
1112 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1113 struct btrfs_fs_info
*info
, u64
*start
,
1114 u64 num_bytes
, u64 type
, int convert
)
1117 struct btrfs_root
*extent_root
= info
->extent_root
;
1118 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1119 struct btrfs_stripe
*stripes
;
1120 struct btrfs_device
*device
= NULL
;
1121 struct btrfs_chunk
*chunk
;
1122 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1123 struct list_head
*cur
;
1124 struct map_lookup
*map
;
1125 u64 calc_size
= SZ_8M
;
1126 int num_stripes
= 1;
1127 int sub_stripes
= 0;
1130 int stripe_len
= BTRFS_STRIPE_LEN
;
1131 struct btrfs_key key
;
1133 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1134 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1136 if (*start
!= round_down(*start
, info
->sectorsize
)) {
1137 error("DATA chunk start not sectorsize aligned: %llu",
1138 (unsigned long long)*start
);
1141 key
.offset
= *start
;
1142 dev_offset
= *start
;
1146 ret
= find_next_chunk(info
, &tmp
);
1152 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1156 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1162 stripes
= &chunk
->stripe
;
1163 calc_size
= num_bytes
;
1166 cur
= dev_list
->next
;
1167 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1169 while (index
< num_stripes
) {
1170 struct btrfs_stripe
*stripe
;
1172 ret
= btrfs_alloc_dev_extent(trans
, device
,
1173 info
->chunk_root
->root_key
.objectid
,
1174 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1175 calc_size
, &dev_offset
, convert
);
1178 device
->bytes_used
+= calc_size
;
1179 ret
= btrfs_update_device(trans
, device
);
1182 map
->stripes
[index
].dev
= device
;
1183 map
->stripes
[index
].physical
= dev_offset
;
1184 stripe
= stripes
+ index
;
1185 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1186 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1187 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1191 /* key was set above */
1192 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1193 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1194 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1195 btrfs_set_stack_chunk_type(chunk
, type
);
1196 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1197 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1198 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1199 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1200 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1201 map
->sector_size
= info
->sectorsize
;
1202 map
->stripe_len
= stripe_len
;
1203 map
->io_align
= stripe_len
;
1204 map
->io_width
= stripe_len
;
1206 map
->num_stripes
= num_stripes
;
1207 map
->sub_stripes
= sub_stripes
;
1209 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1210 btrfs_chunk_item_size(num_stripes
));
1213 *start
= key
.offset
;
1215 map
->ce
.start
= key
.offset
;
1216 map
->ce
.size
= num_bytes
;
1218 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1225 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
1227 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1228 struct cache_extent
*ce
;
1229 struct map_lookup
*map
;
1232 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1234 fprintf(stderr
, "No mapping for %llu-%llu\n",
1235 (unsigned long long)logical
,
1236 (unsigned long long)logical
+len
);
1239 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1240 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1241 "%llu-%llu\n", (unsigned long long)logical
,
1242 (unsigned long long)logical
+len
,
1243 (unsigned long long)ce
->start
,
1244 (unsigned long long)ce
->start
+ ce
->size
);
1247 map
= container_of(ce
, struct map_lookup
, ce
);
1249 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1250 ret
= map
->num_stripes
;
1251 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1252 ret
= map
->sub_stripes
;
1253 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1255 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1262 int btrfs_next_bg(struct btrfs_fs_info
*fs_info
, u64
*logical
,
1263 u64
*size
, u64 type
)
1265 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1266 struct cache_extent
*ce
;
1267 struct map_lookup
*map
;
1270 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1274 * only jump to next bg if our cur is not 0
1275 * As the initial logical for btrfs_next_bg() is 0, and
1276 * if we jump to next bg, we skipped a valid bg.
1279 ce
= next_cache_extent(ce
);
1285 map
= container_of(ce
, struct map_lookup
, ce
);
1286 if (map
->type
& type
) {
1287 *logical
= ce
->start
;
1292 ce
= next_cache_extent(ce
);
1298 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
,
1299 u64 chunk_start
, u64 physical
, u64 devid
,
1300 u64
**logical
, int *naddrs
, int *stripe_len
)
1302 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1303 struct cache_extent
*ce
;
1304 struct map_lookup
*map
;
1312 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1314 map
= container_of(ce
, struct map_lookup
, ce
);
1317 rmap_len
= map
->stripe_len
;
1318 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1319 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1320 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1321 length
= ce
->size
/ map
->num_stripes
;
1322 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1323 BTRFS_BLOCK_GROUP_RAID6
)) {
1324 length
= ce
->size
/ nr_data_stripes(map
);
1325 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1328 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1330 for (i
= 0; i
< map
->num_stripes
; i
++) {
1331 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1333 if (map
->stripes
[i
].physical
> physical
||
1334 map
->stripes
[i
].physical
+ length
<= physical
)
1337 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1340 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1341 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1343 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1344 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1345 } /* else if RAID[56], multiply by nr_data_stripes().
1346 * Alternatively, just use rmap_len below instead of
1347 * map->stripe_len */
1349 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1350 for (j
= 0; j
< nr
; j
++) {
1351 if (buf
[j
] == bytenr
)
1360 *stripe_len
= rmap_len
;
1365 static inline int parity_smaller(u64 a
, u64 b
)
1370 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1371 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1373 struct btrfs_bio_stripe s
;
1380 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1381 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1382 s
= bbio
->stripes
[i
];
1384 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1385 raid_map
[i
] = raid_map
[i
+1];
1386 bbio
->stripes
[i
+1] = s
;
1394 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1395 u64 logical
, u64
*length
,
1396 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1399 return __btrfs_map_block(fs_info
, rw
, logical
, length
, NULL
,
1400 multi_ret
, mirror_num
, raid_map_ret
);
1403 int __btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1404 u64 logical
, u64
*length
, u64
*type
,
1405 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1408 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1409 struct cache_extent
*ce
;
1410 struct map_lookup
*map
;
1414 u64
*raid_map
= NULL
;
1415 int stripes_allocated
= 8;
1416 int stripes_required
= 1;
1419 struct btrfs_multi_bio
*multi
= NULL
;
1421 if (multi_ret
&& rw
== READ
) {
1422 stripes_allocated
= 1;
1425 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1431 if (ce
->start
> logical
) {
1433 *length
= ce
->start
- logical
;
1438 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1443 map
= container_of(ce
, struct map_lookup
, ce
);
1444 offset
= logical
- ce
->start
;
1447 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1448 BTRFS_BLOCK_GROUP_DUP
)) {
1449 stripes_required
= map
->num_stripes
;
1450 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1451 stripes_required
= map
->sub_stripes
;
1454 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1455 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1456 /* RAID[56] write or recovery. Return all stripes */
1457 stripes_required
= map
->num_stripes
;
1459 /* Only allocate the map if we've already got a large enough multi_ret */
1460 if (stripes_allocated
>= stripes_required
) {
1461 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1469 /* if our multi bio struct is too small, back off and try again */
1470 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1471 stripes_allocated
= stripes_required
;
1478 * stripe_nr counts the total number of stripes we have to stride
1479 * to get to this block
1481 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1483 stripe_offset
= stripe_nr
* map
->stripe_len
;
1484 BUG_ON(offset
< stripe_offset
);
1486 /* stripe_offset is the offset of this block in its stripe*/
1487 stripe_offset
= offset
- stripe_offset
;
1489 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1490 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1491 BTRFS_BLOCK_GROUP_RAID10
|
1492 BTRFS_BLOCK_GROUP_DUP
)) {
1493 /* we limit the length of each bio to what fits in a stripe */
1494 *length
= min_t(u64
, ce
->size
- offset
,
1495 map
->stripe_len
- stripe_offset
);
1497 *length
= ce
->size
- offset
;
1503 multi
->num_stripes
= 1;
1505 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1507 multi
->num_stripes
= map
->num_stripes
;
1508 else if (mirror_num
)
1509 stripe_index
= mirror_num
- 1;
1511 stripe_index
= stripe_nr
% map
->num_stripes
;
1512 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1513 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1515 stripe_index
= stripe_nr
% factor
;
1516 stripe_index
*= map
->sub_stripes
;
1519 multi
->num_stripes
= map
->sub_stripes
;
1520 else if (mirror_num
)
1521 stripe_index
+= mirror_num
- 1;
1523 stripe_nr
= stripe_nr
/ factor
;
1524 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1526 multi
->num_stripes
= map
->num_stripes
;
1527 else if (mirror_num
)
1528 stripe_index
= mirror_num
- 1;
1529 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1530 BTRFS_BLOCK_GROUP_RAID6
)) {
1535 u64 raid56_full_stripe_start
;
1536 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1539 * align the start of our data stripe in the logical
1542 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1543 raid56_full_stripe_start
*= full_stripe_len
;
1545 /* get the data stripe number */
1546 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1547 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1549 /* Work out the disk rotation on this stripe-set */
1550 rot
= stripe_nr
% map
->num_stripes
;
1552 /* Fill in the logical address of each stripe */
1553 tmp
= stripe_nr
* nr_data_stripes(map
);
1555 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1556 raid_map
[(i
+rot
) % map
->num_stripes
] =
1557 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1559 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1560 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1561 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1563 *length
= map
->stripe_len
;
1566 multi
->num_stripes
= map
->num_stripes
;
1568 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1569 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1572 * Mirror #0 or #1 means the original data block.
1573 * Mirror #2 is RAID5 parity block.
1574 * Mirror #3 is RAID6 Q block.
1577 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1579 /* We distribute the parity blocks across stripes */
1580 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1584 * after this do_div call, stripe_nr is the number of stripes
1585 * on this device we have to walk to find the data, and
1586 * stripe_index is the number of our device in the stripe array
1588 stripe_index
= stripe_nr
% map
->num_stripes
;
1589 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1591 BUG_ON(stripe_index
>= map
->num_stripes
);
1593 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1594 multi
->stripes
[i
].physical
=
1595 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1596 stripe_nr
* map
->stripe_len
;
1597 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1606 sort_parity_stripes(multi
, raid_map
);
1607 *raid_map_ret
= raid_map
;
1613 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_info
*fs_info
, u64 devid
,
1616 struct btrfs_device
*device
;
1617 struct btrfs_fs_devices
*cur_devices
;
1619 cur_devices
= fs_info
->fs_devices
;
1620 while (cur_devices
) {
1622 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1623 fs_info
->ignore_fsid_mismatch
)) {
1624 device
= __find_device(&cur_devices
->devices
,
1629 cur_devices
= cur_devices
->seed
;
1634 struct btrfs_device
*
1635 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1636 u64 devid
, int instance
)
1638 struct list_head
*head
= &fs_devices
->devices
;
1639 struct btrfs_device
*dev
;
1642 list_for_each_entry(dev
, head
, dev_list
) {
1643 if (dev
->devid
== devid
&& num_found
++ == instance
)
1649 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
1651 struct cache_extent
*ce
;
1652 struct map_lookup
*map
;
1653 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1658 * During chunk recovering, we may fail to find block group's
1659 * corresponding chunk, we will rebuild it later
1661 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1662 if (!fs_info
->is_chunk_recover
)
1667 map
= container_of(ce
, struct map_lookup
, ce
);
1668 for (i
= 0; i
< map
->num_stripes
; i
++) {
1669 if (!map
->stripes
[i
].dev
->writeable
) {
1678 static struct btrfs_device
*fill_missing_device(u64 devid
)
1680 struct btrfs_device
*device
;
1682 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1683 device
->devid
= devid
;
1689 * slot == -1: SYSTEM chunk
1690 * return -EIO on error, otherwise return 0
1692 int btrfs_check_chunk_valid(struct btrfs_fs_info
*fs_info
,
1693 struct extent_buffer
*leaf
,
1694 struct btrfs_chunk
*chunk
,
1695 int slot
, u64 logical
)
1702 u32 chunk_ondisk_size
;
1703 u32 sectorsize
= fs_info
->sectorsize
;
1705 length
= btrfs_chunk_length(leaf
, chunk
);
1706 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1707 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1708 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1709 type
= btrfs_chunk_type(leaf
, chunk
);
1712 * These valid checks may be insufficient to cover every corner cases.
1714 if (!IS_ALIGNED(logical
, sectorsize
)) {
1715 error("invalid chunk logical %llu", logical
);
1718 if (btrfs_chunk_sector_size(leaf
, chunk
) != sectorsize
) {
1719 error("invalid chunk sectorsize %llu",
1720 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1723 if (!length
|| !IS_ALIGNED(length
, sectorsize
)) {
1724 error("invalid chunk length %llu", length
);
1727 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1728 error("invalid chunk stripe length: %llu", stripe_len
);
1731 /* Check on chunk item type */
1732 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1733 error("invalid chunk type %llu", type
);
1736 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1737 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1738 error("unrecognized chunk type: %llu",
1739 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1740 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1743 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1744 error("missing chunk type flag: %llu", type
);
1747 if (!(is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) ||
1748 (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0)) {
1749 error("conflicting chunk type detected: %llu", type
);
1752 if ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) &&
1753 !is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1754 error("conflicting chunk profile detected: %llu", type
);
1758 chunk_ondisk_size
= btrfs_chunk_item_size(num_stripes
);
1760 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1761 * it can't exceed the system chunk array size
1762 * For normal chunk, it should match its chunk item size.
1764 if (num_stripes
< 1 ||
1765 (slot
== -1 && chunk_ondisk_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1766 (slot
>= 0 && chunk_ondisk_size
> btrfs_item_size_nr(leaf
, slot
))) {
1767 error("invalid num_stripes: %u", num_stripes
);
1771 * Device number check against profile
1773 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& (sub_stripes
!= 2 ||
1774 !IS_ALIGNED(num_stripes
, sub_stripes
))) ||
1775 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1776 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1777 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1778 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1779 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1780 num_stripes
!= 1)) {
1781 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1782 num_stripes
, sub_stripes
,
1783 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1791 * Slot is used to verify the chunk item is valid
1793 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1795 static int read_one_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
1796 struct extent_buffer
*leaf
,
1797 struct btrfs_chunk
*chunk
, int slot
)
1799 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1800 struct map_lookup
*map
;
1801 struct cache_extent
*ce
;
1805 u8 uuid
[BTRFS_UUID_SIZE
];
1810 logical
= key
->offset
;
1811 length
= btrfs_chunk_length(leaf
, chunk
);
1812 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1813 /* Validation check */
1814 ret
= btrfs_check_chunk_valid(fs_info
, leaf
, chunk
, slot
, logical
);
1816 error("%s checksums match, but it has an invalid chunk, %s",
1817 (slot
== -1) ? "Superblock" : "Metadata",
1818 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1822 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1824 /* already mapped? */
1825 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1829 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1833 map
->ce
.start
= logical
;
1834 map
->ce
.size
= length
;
1835 map
->num_stripes
= num_stripes
;
1836 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1837 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1838 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1839 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1840 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1841 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1843 for (i
= 0; i
< num_stripes
; i
++) {
1844 map
->stripes
[i
].physical
=
1845 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1846 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1847 read_extent_buffer(leaf
, uuid
, (unsigned long)
1848 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1850 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
, uuid
,
1852 if (!map
->stripes
[i
].dev
) {
1853 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1854 printf("warning, device %llu is missing\n",
1855 (unsigned long long)devid
);
1856 list_add(&map
->stripes
[i
].dev
->dev_list
,
1857 &fs_info
->fs_devices
->devices
);
1861 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1867 static int fill_device_from_item(struct extent_buffer
*leaf
,
1868 struct btrfs_dev_item
*dev_item
,
1869 struct btrfs_device
*device
)
1873 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1874 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1875 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1876 device
->type
= btrfs_device_type(leaf
, dev_item
);
1877 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1878 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1879 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1881 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1882 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1887 static int open_seed_devices(struct btrfs_fs_info
*fs_info
, u8
*fsid
)
1889 struct btrfs_fs_devices
*fs_devices
;
1892 fs_devices
= fs_info
->fs_devices
->seed
;
1893 while (fs_devices
) {
1894 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1898 fs_devices
= fs_devices
->seed
;
1901 fs_devices
= find_fsid(fsid
);
1903 /* missing all seed devices */
1904 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1909 INIT_LIST_HEAD(&fs_devices
->devices
);
1910 list_add(&fs_devices
->list
, &fs_uuids
);
1911 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1914 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1918 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
1919 fs_info
->fs_devices
->seed
= fs_devices
;
1924 static int read_one_dev(struct btrfs_fs_info
*fs_info
,
1925 struct extent_buffer
*leaf
,
1926 struct btrfs_dev_item
*dev_item
)
1928 struct btrfs_device
*device
;
1931 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1932 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1934 devid
= btrfs_device_id(leaf
, dev_item
);
1935 read_extent_buffer(leaf
, dev_uuid
,
1936 (unsigned long)btrfs_device_uuid(dev_item
),
1938 read_extent_buffer(leaf
, fs_uuid
,
1939 (unsigned long)btrfs_device_fsid(dev_item
),
1942 if (memcmp(fs_uuid
, fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1943 ret
= open_seed_devices(fs_info
, fs_uuid
);
1948 device
= btrfs_find_device(fs_info
, devid
, dev_uuid
, fs_uuid
);
1950 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1954 list_add(&device
->dev_list
,
1955 &fs_info
->fs_devices
->devices
);
1958 fill_device_from_item(leaf
, dev_item
, device
);
1959 device
->dev_root
= fs_info
->dev_root
;
1963 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
1965 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
1966 struct extent_buffer
*sb
;
1967 struct btrfs_disk_key
*disk_key
;
1968 struct btrfs_chunk
*chunk
;
1970 unsigned long sb_array_offset
;
1976 struct btrfs_key key
;
1978 if (fs_info
->nodesize
< BTRFS_SUPER_INFO_SIZE
) {
1979 printf("ERROR: nodesize %u too small to read superblock\n",
1983 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
1986 btrfs_set_buffer_uptodate(sb
);
1987 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1988 array_size
= btrfs_super_sys_array_size(super_copy
);
1990 array_ptr
= super_copy
->sys_chunk_array
;
1991 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1994 while (cur_offset
< array_size
) {
1995 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
1996 len
= sizeof(*disk_key
);
1997 if (cur_offset
+ len
> array_size
)
1998 goto out_short_read
;
2000 btrfs_disk_key_to_cpu(&key
, disk_key
);
2003 sb_array_offset
+= len
;
2006 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2007 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
2009 * At least one btrfs_chunk with one stripe must be
2010 * present, exact stripe count check comes afterwards
2012 len
= btrfs_chunk_item_size(1);
2013 if (cur_offset
+ len
> array_size
)
2014 goto out_short_read
;
2016 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2019 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2020 num_stripes
, cur_offset
);
2025 len
= btrfs_chunk_item_size(num_stripes
);
2026 if (cur_offset
+ len
> array_size
)
2027 goto out_short_read
;
2029 ret
= read_one_chunk(fs_info
, &key
, sb
, chunk
, -1);
2034 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2035 (u32
)key
.type
, cur_offset
);
2040 sb_array_offset
+= len
;
2043 free_extent_buffer(sb
);
2047 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2049 free_extent_buffer(sb
);
2053 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
2055 struct btrfs_path
*path
;
2056 struct extent_buffer
*leaf
;
2057 struct btrfs_key key
;
2058 struct btrfs_key found_key
;
2059 struct btrfs_root
*root
= fs_info
->chunk_root
;
2063 path
= btrfs_alloc_path();
2068 * Read all device items, and then all the chunk items. All
2069 * device items are found before any chunk item (their object id
2070 * is smaller than the lowest possible object id for a chunk
2071 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2073 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2076 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2080 leaf
= path
->nodes
[0];
2081 slot
= path
->slots
[0];
2082 if (slot
>= btrfs_header_nritems(leaf
)) {
2083 ret
= btrfs_next_leaf(root
, path
);
2090 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2091 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2092 struct btrfs_dev_item
*dev_item
;
2093 dev_item
= btrfs_item_ptr(leaf
, slot
,
2094 struct btrfs_dev_item
);
2095 ret
= read_one_dev(fs_info
, leaf
, dev_item
);
2097 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2098 struct btrfs_chunk
*chunk
;
2099 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2100 ret
= read_one_chunk(fs_info
, &found_key
, leaf
, chunk
,
2109 btrfs_free_path(path
);
2113 struct list_head
*btrfs_scanned_uuids(void)
2118 static int rmw_eb(struct btrfs_fs_info
*info
,
2119 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2122 unsigned long orig_off
= 0;
2123 unsigned long dest_off
= 0;
2124 unsigned long copy_len
= eb
->len
;
2126 ret
= read_whole_eb(info
, eb
, 0);
2130 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2131 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2134 * | ----- orig_eb ------- |
2135 * | ----- stripe ------- |
2136 * | ----- orig_eb ------- |
2137 * | ----- orig_eb ------- |
2139 if (eb
->start
> orig_eb
->start
)
2140 orig_off
= eb
->start
- orig_eb
->start
;
2141 if (orig_eb
->start
> eb
->start
)
2142 dest_off
= orig_eb
->start
- eb
->start
;
2144 if (copy_len
> orig_eb
->len
- orig_off
)
2145 copy_len
= orig_eb
->len
- orig_off
;
2146 if (copy_len
> eb
->len
- dest_off
)
2147 copy_len
= eb
->len
- dest_off
;
2149 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2153 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2154 struct extent_buffer
*orig_eb
,
2155 struct extent_buffer
**ebs
,
2156 u64 stripe_len
, u64
*raid_map
,
2159 struct extent_buffer
**tmp_ebs
;
2160 u64 start
= orig_eb
->start
;
2165 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2169 /* Alloc memory in a row for data stripes */
2170 for (i
= 0; i
< num_stripes
; i
++) {
2171 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2174 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2181 for (i
= 0; i
< num_stripes
; i
++) {
2182 struct extent_buffer
*eb
= tmp_ebs
[i
];
2184 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2187 eb
->start
= raid_map
[i
];
2188 eb
->len
= stripe_len
;
2192 eb
->dev_bytenr
= (u64
)-1;
2194 this_eb_start
= raid_map
[i
];
2196 if (start
> this_eb_start
||
2197 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2198 ret
= rmw_eb(info
, eb
, orig_eb
);
2202 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2210 for (i
= 0; i
< num_stripes
; i
++)
2216 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2217 struct extent_buffer
*eb
,
2218 struct btrfs_multi_bio
*multi
,
2219 u64 stripe_len
, u64
*raid_map
)
2221 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2224 int alloc_size
= eb
->len
;
2227 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2228 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2229 if (!ebs
|| !pointers
) {
2235 if (stripe_len
> alloc_size
)
2236 alloc_size
= stripe_len
;
2238 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2239 multi
->num_stripes
);
2243 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2244 struct extent_buffer
*new_eb
;
2245 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2246 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2247 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2248 multi
->stripes
[i
].dev
->total_ios
++;
2249 if (ebs
[i
]->start
!= raid_map
[i
]) {
2251 goto out_free_split
;
2255 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2258 goto out_free_split
;
2260 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2261 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2262 multi
->stripes
[i
].dev
->total_ios
++;
2263 new_eb
->len
= stripe_len
;
2265 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2267 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2271 ebs
[multi
->num_stripes
- 2] = p_eb
;
2272 ebs
[multi
->num_stripes
- 1] = q_eb
;
2274 for (i
= 0; i
< multi
->num_stripes
; i
++)
2275 pointers
[i
] = ebs
[i
]->data
;
2277 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2279 ebs
[multi
->num_stripes
- 1] = p_eb
;
2280 for (i
= 0; i
< multi
->num_stripes
; i
++)
2281 pointers
[i
] = ebs
[i
]->data
;
2282 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2283 multi
->num_stripes
- 1, pointers
);
2285 goto out_free_split
;
2288 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2289 ret
= write_extent_to_disk(ebs
[i
]);
2291 goto out_free_split
;
2295 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2307 * Get stripe length from chunk item and its stripe items
2309 * Caller should only call this function after validating the chunk item
2310 * by using btrfs_check_chunk_valid().
2312 u64
btrfs_stripe_length(struct btrfs_fs_info
*fs_info
,
2313 struct extent_buffer
*leaf
,
2314 struct btrfs_chunk
*chunk
)
2318 u32 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2319 u64 profile
= btrfs_chunk_type(leaf
, chunk
) &
2320 BTRFS_BLOCK_GROUP_PROFILE_MASK
;
2322 chunk_len
= btrfs_chunk_length(leaf
, chunk
);
2325 case 0: /* Single profile */
2326 case BTRFS_BLOCK_GROUP_RAID1
:
2327 case BTRFS_BLOCK_GROUP_DUP
:
2328 stripe_len
= chunk_len
;
2330 case BTRFS_BLOCK_GROUP_RAID0
:
2331 stripe_len
= chunk_len
/ num_stripes
;
2333 case BTRFS_BLOCK_GROUP_RAID5
:
2334 stripe_len
= chunk_len
/ (num_stripes
- 1);
2336 case BTRFS_BLOCK_GROUP_RAID6
:
2337 stripe_len
= chunk_len
/ (num_stripes
- 2);
2339 case BTRFS_BLOCK_GROUP_RAID10
:
2340 stripe_len
= chunk_len
/ (num_stripes
/
2341 btrfs_chunk_sub_stripes(leaf
, chunk
));
2344 /* Invalid chunk profile found */