2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
34 struct btrfs_device
*dev
;
38 static inline int nr_parity_stripes(struct map_lookup
*map
)
40 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
42 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
48 static inline int nr_data_stripes(struct map_lookup
*map
)
50 return map
->num_stripes
- nr_parity_stripes(map
);
53 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
55 static LIST_HEAD(fs_uuids
);
57 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
60 struct btrfs_device
*dev
;
61 struct list_head
*cur
;
63 list_for_each(cur
, head
) {
64 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
65 if (dev
->devid
== devid
&&
66 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
73 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
75 struct list_head
*cur
;
76 struct btrfs_fs_devices
*fs_devices
;
78 list_for_each(cur
, &fs_uuids
) {
79 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
80 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
86 static int device_list_add(const char *path
,
87 struct btrfs_super_block
*disk_super
,
88 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
90 struct btrfs_device
*device
;
91 struct btrfs_fs_devices
*fs_devices
;
92 u64 found_transid
= btrfs_super_generation(disk_super
);
94 fs_devices
= find_fsid(disk_super
->fsid
);
96 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
99 INIT_LIST_HEAD(&fs_devices
->devices
);
100 list_add(&fs_devices
->list
, &fs_uuids
);
101 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
102 fs_devices
->latest_devid
= devid
;
103 fs_devices
->latest_trans
= found_transid
;
104 fs_devices
->lowest_devid
= (u64
)-1;
107 device
= __find_device(&fs_devices
->devices
, devid
,
108 disk_super
->dev_item
.uuid
);
111 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
113 /* we can safely leave the fs_devices entry around */
117 device
->devid
= devid
;
118 device
->generation
= found_transid
;
119 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
121 device
->name
= kstrdup(path
, GFP_NOFS
);
126 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
127 if (!device
->label
) {
132 device
->total_devs
= btrfs_super_num_devices(disk_super
);
133 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
134 device
->total_bytes
=
135 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
137 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
138 list_add(&device
->dev_list
, &fs_devices
->devices
);
139 device
->fs_devices
= fs_devices
;
140 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
141 char *name
= strdup(path
);
149 if (found_transid
> fs_devices
->latest_trans
) {
150 fs_devices
->latest_devid
= devid
;
151 fs_devices
->latest_trans
= found_transid
;
153 if (fs_devices
->lowest_devid
> devid
) {
154 fs_devices
->lowest_devid
= devid
;
156 *fs_devices_ret
= fs_devices
;
160 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
162 struct btrfs_fs_devices
*seed_devices
;
163 struct btrfs_device
*device
;
169 while (!list_empty(&fs_devices
->devices
)) {
170 device
= list_entry(fs_devices
->devices
.next
,
171 struct btrfs_device
, dev_list
);
172 if (device
->fd
!= -1) {
173 if (fsync(device
->fd
) == -1) {
174 warning("fsync on device %llu failed: %s",
175 device
->devid
, strerror(errno
));
178 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
179 fprintf(stderr
, "Warning, could not drop caches\n");
183 device
->writeable
= 0;
184 list_del(&device
->dev_list
);
185 /* free the memory */
191 seed_devices
= fs_devices
->seed
;
192 fs_devices
->seed
= NULL
;
194 struct btrfs_fs_devices
*orig
;
197 fs_devices
= seed_devices
;
198 list_del(&orig
->list
);
202 list_del(&fs_devices
->list
);
209 void btrfs_close_all_devices(void)
211 struct btrfs_fs_devices
*fs_devices
;
213 while (!list_empty(&fs_uuids
)) {
214 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
216 btrfs_close_devices(fs_devices
);
220 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
223 struct list_head
*head
= &fs_devices
->devices
;
224 struct list_head
*cur
;
225 struct btrfs_device
*device
;
228 list_for_each(cur
, head
) {
229 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
231 printk("no name for device %llu, skip it now\n", device
->devid
);
235 fd
= open(device
->name
, flags
);
238 error("cannot open device '%s': %s", device
->name
,
243 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
244 fprintf(stderr
, "Warning, could not drop caches\n");
246 if (device
->devid
== fs_devices
->latest_devid
)
247 fs_devices
->latest_bdev
= fd
;
248 if (device
->devid
== fs_devices
->lowest_devid
)
249 fs_devices
->lowest_bdev
= fd
;
252 device
->writeable
= 1;
256 btrfs_close_devices(fs_devices
);
260 int btrfs_scan_one_device(int fd
, const char *path
,
261 struct btrfs_fs_devices
**fs_devices_ret
,
262 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
264 struct btrfs_super_block
*disk_super
;
265 char buf
[BTRFS_SUPER_INFO_SIZE
];
269 disk_super
= (struct btrfs_super_block
*)buf
;
270 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
273 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
274 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
277 *total_devs
= btrfs_super_num_devices(disk_super
);
279 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
285 * find_free_dev_extent_start - find free space in the specified device
286 * @device: the device which we search the free space in
287 * @num_bytes: the size of the free space that we need
288 * @search_start: the position from which to begin the search
289 * @start: store the start of the free space.
290 * @len: the size of the free space. that we find, or the size
291 * of the max free space if we don't find suitable free space
293 * this uses a pretty simple search, the expectation is that it is
294 * called very infrequently and that a given device has a small number
297 * @start is used to store the start of the free space if we find. But if we
298 * don't find suitable free space, it will be used to store the start position
299 * of the max free space.
301 * @len is used to store the size of the free space that we find.
302 * But if we don't find suitable free space, it is used to store the size of
303 * the max free space.
305 static int find_free_dev_extent_start(struct btrfs_trans_handle
*trans
,
306 struct btrfs_device
*device
, u64 num_bytes
,
307 u64 search_start
, u64
*start
, u64
*len
)
309 struct btrfs_key key
;
310 struct btrfs_root
*root
= device
->dev_root
;
311 struct btrfs_dev_extent
*dev_extent
;
312 struct btrfs_path
*path
;
317 u64 search_end
= device
->total_bytes
;
320 struct extent_buffer
*l
;
321 u64 min_search_start
;
324 * We don't want to overwrite the superblock on the drive nor any area
325 * used by the boot loader (grub for example), so we make sure to start
326 * at an offset of at least 1MB.
328 min_search_start
= max(root
->fs_info
->alloc_start
, (u64
)SZ_1M
);
329 search_start
= max(search_start
, min_search_start
);
331 path
= btrfs_alloc_path();
335 max_hole_start
= search_start
;
338 if (search_start
>= search_end
) {
345 key
.objectid
= device
->devid
;
346 key
.offset
= search_start
;
347 key
.type
= BTRFS_DEV_EXTENT_KEY
;
349 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
353 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
360 slot
= path
->slots
[0];
361 if (slot
>= btrfs_header_nritems(l
)) {
362 ret
= btrfs_next_leaf(root
, path
);
370 btrfs_item_key_to_cpu(l
, &key
, slot
);
372 if (key
.objectid
< device
->devid
)
375 if (key
.objectid
> device
->devid
)
378 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
381 if (key
.offset
> search_start
) {
382 hole_size
= key
.offset
- search_start
;
385 * Have to check before we set max_hole_start, otherwise
386 * we could end up sending back this offset anyway.
388 if (hole_size
> max_hole_size
) {
389 max_hole_start
= search_start
;
390 max_hole_size
= hole_size
;
394 * If this free space is greater than which we need,
395 * it must be the max free space that we have found
396 * until now, so max_hole_start must point to the start
397 * of this free space and the length of this free space
398 * is stored in max_hole_size. Thus, we return
399 * max_hole_start and max_hole_size and go back to the
402 if (hole_size
>= num_bytes
) {
408 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
409 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
411 if (extent_end
> search_start
)
412 search_start
= extent_end
;
419 * At this point, search_start should be the end of
420 * allocated dev extents, and when shrinking the device,
421 * search_end may be smaller than search_start.
423 if (search_end
> search_start
) {
424 hole_size
= search_end
- search_start
;
426 if (hole_size
> max_hole_size
) {
427 max_hole_start
= search_start
;
428 max_hole_size
= hole_size
;
433 if (max_hole_size
< num_bytes
)
439 btrfs_free_path(path
);
440 *start
= max_hole_start
;
442 *len
= max_hole_size
;
446 int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
447 struct btrfs_device
*device
, u64 num_bytes
,
450 /* FIXME use last free of some kind */
451 return find_free_dev_extent_start(trans
, device
,
452 num_bytes
, 0, start
, NULL
);
455 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
456 struct btrfs_device
*device
,
457 u64 chunk_tree
, u64 chunk_objectid
,
459 u64 num_bytes
, u64
*start
, int convert
)
462 struct btrfs_path
*path
;
463 struct btrfs_root
*root
= device
->dev_root
;
464 struct btrfs_dev_extent
*extent
;
465 struct extent_buffer
*leaf
;
466 struct btrfs_key key
;
468 path
= btrfs_alloc_path();
473 * For convert case, just skip search free dev_extent, as caller
474 * is responsible to make sure it's free.
477 ret
= find_free_dev_extent(trans
, device
, num_bytes
,
483 key
.objectid
= device
->devid
;
485 key
.type
= BTRFS_DEV_EXTENT_KEY
;
486 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
490 leaf
= path
->nodes
[0];
491 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
492 struct btrfs_dev_extent
);
493 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
494 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
495 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
497 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
498 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
501 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
502 btrfs_mark_buffer_dirty(leaf
);
504 btrfs_free_path(path
);
508 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
510 struct btrfs_path
*path
;
512 struct btrfs_key key
;
513 struct btrfs_chunk
*chunk
;
514 struct btrfs_key found_key
;
516 path
= btrfs_alloc_path();
520 key
.objectid
= objectid
;
521 key
.offset
= (u64
)-1;
522 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
524 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
530 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
534 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
536 if (found_key
.objectid
!= objectid
)
539 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
541 *offset
= found_key
.offset
+
542 btrfs_chunk_length(path
->nodes
[0], chunk
);
547 btrfs_free_path(path
);
551 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
555 struct btrfs_key key
;
556 struct btrfs_key found_key
;
558 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
559 key
.type
= BTRFS_DEV_ITEM_KEY
;
560 key
.offset
= (u64
)-1;
562 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
568 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
573 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
575 *objectid
= found_key
.offset
+ 1;
579 btrfs_release_path(path
);
584 * the device information is stored in the chunk root
585 * the btrfs_device struct should be fully filled in
587 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
588 struct btrfs_fs_info
*fs_info
,
589 struct btrfs_device
*device
)
592 struct btrfs_path
*path
;
593 struct btrfs_dev_item
*dev_item
;
594 struct extent_buffer
*leaf
;
595 struct btrfs_key key
;
596 struct btrfs_root
*root
= fs_info
->chunk_root
;
600 path
= btrfs_alloc_path();
604 ret
= find_next_devid(root
, path
, &free_devid
);
608 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
609 key
.type
= BTRFS_DEV_ITEM_KEY
;
610 key
.offset
= free_devid
;
612 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
617 leaf
= path
->nodes
[0];
618 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
620 device
->devid
= free_devid
;
621 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
622 btrfs_set_device_generation(leaf
, dev_item
, 0);
623 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
624 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
625 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
626 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
627 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
628 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
629 btrfs_set_device_group(leaf
, dev_item
, 0);
630 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
631 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
632 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
634 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
635 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
636 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
637 write_extent_buffer(leaf
, fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
638 btrfs_mark_buffer_dirty(leaf
);
642 btrfs_free_path(path
);
646 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
647 struct btrfs_device
*device
)
650 struct btrfs_path
*path
;
651 struct btrfs_root
*root
;
652 struct btrfs_dev_item
*dev_item
;
653 struct extent_buffer
*leaf
;
654 struct btrfs_key key
;
656 root
= device
->dev_root
->fs_info
->chunk_root
;
658 path
= btrfs_alloc_path();
662 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
663 key
.type
= BTRFS_DEV_ITEM_KEY
;
664 key
.offset
= device
->devid
;
666 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
675 leaf
= path
->nodes
[0];
676 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
678 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
679 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
680 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
681 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
682 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
683 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
684 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
685 btrfs_mark_buffer_dirty(leaf
);
688 btrfs_free_path(path
);
692 int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
693 struct btrfs_chunk
*chunk
, int item_size
)
695 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
696 struct btrfs_disk_key disk_key
;
700 array_size
= btrfs_super_sys_array_size(super_copy
);
701 if (array_size
+ item_size
+ sizeof(disk_key
)
702 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
705 ptr
= super_copy
->sys_chunk_array
+ array_size
;
706 btrfs_cpu_key_to_disk(&disk_key
, key
);
707 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
708 ptr
+= sizeof(disk_key
);
709 memcpy(ptr
, chunk
, item_size
);
710 item_size
+= sizeof(disk_key
);
711 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
715 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
718 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
720 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
721 return calc_size
* (num_stripes
/ sub_stripes
);
722 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
723 return calc_size
* (num_stripes
- 1);
724 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
725 return calc_size
* (num_stripes
- 2);
727 return calc_size
* num_stripes
;
731 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
733 /* TODO, add a way to store the preferred stripe size */
734 return BTRFS_STRIPE_LEN
;
738 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
740 * It is not equal to "device->total_bytes - device->bytes_used".
741 * We do not allocate any chunk in 1M at beginning of device, and not
742 * allowed to allocate any chunk before alloc_start if it is specified.
743 * So search holes from max(1M, alloc_start) to device->total_bytes.
745 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
746 struct btrfs_device
*device
,
749 struct btrfs_path
*path
;
750 struct btrfs_root
*root
= device
->dev_root
;
751 struct btrfs_key key
;
752 struct btrfs_dev_extent
*dev_extent
= NULL
;
753 struct extent_buffer
*l
;
754 u64 search_start
= root
->fs_info
->alloc_start
;
755 u64 search_end
= device
->total_bytes
;
761 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
763 path
= btrfs_alloc_path();
767 key
.objectid
= device
->devid
;
768 key
.offset
= root
->fs_info
->alloc_start
;
769 key
.type
= BTRFS_DEV_EXTENT_KEY
;
772 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
775 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
781 slot
= path
->slots
[0];
782 if (slot
>= btrfs_header_nritems(l
)) {
783 ret
= btrfs_next_leaf(root
, path
);
790 btrfs_item_key_to_cpu(l
, &key
, slot
);
792 if (key
.objectid
< device
->devid
)
794 if (key
.objectid
> device
->devid
)
796 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
798 if (key
.offset
> search_end
)
800 if (key
.offset
> search_start
)
801 free_bytes
+= key
.offset
- search_start
;
803 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
804 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
806 if (extent_end
> search_start
)
807 search_start
= extent_end
;
808 if (search_start
> search_end
)
815 if (search_start
< search_end
)
816 free_bytes
+= search_end
- search_start
;
818 *avail_bytes
= free_bytes
;
821 btrfs_free_path(path
);
825 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
826 - sizeof(struct btrfs_item) \
827 - sizeof(struct btrfs_chunk)) \
828 / sizeof(struct btrfs_stripe) + 1)
830 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
831 - 2 * sizeof(struct btrfs_disk_key) \
832 - 2 * sizeof(struct btrfs_chunk)) \
833 / sizeof(struct btrfs_stripe) + 1)
835 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
836 struct btrfs_fs_info
*info
, u64
*start
,
837 u64
*num_bytes
, u64 type
)
840 struct btrfs_root
*extent_root
= info
->extent_root
;
841 struct btrfs_root
*chunk_root
= info
->chunk_root
;
842 struct btrfs_stripe
*stripes
;
843 struct btrfs_device
*device
= NULL
;
844 struct btrfs_chunk
*chunk
;
845 struct list_head private_devs
;
846 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
847 struct list_head
*cur
;
848 struct map_lookup
*map
;
849 int min_stripe_size
= SZ_1M
;
850 u64 calc_size
= SZ_8M
;
852 u64 max_chunk_size
= 4 * calc_size
;
863 int stripe_len
= BTRFS_STRIPE_LEN
;
864 struct btrfs_key key
;
867 if (list_empty(dev_list
)) {
871 if (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
872 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
874 max_chunk_size
= calc_size
* 2;
875 min_stripe_size
= SZ_1M
;
876 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
877 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
879 max_chunk_size
= 10 * calc_size
;
880 min_stripe_size
= SZ_64M
;
881 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
882 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
884 max_chunk_size
= 4 * calc_size
;
885 min_stripe_size
= SZ_32M
;
886 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
889 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
890 num_stripes
= min_t(u64
, 2,
891 btrfs_super_num_devices(info
->super_copy
));
896 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
900 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
901 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
902 if (num_stripes
> max_stripes
)
903 num_stripes
= max_stripes
;
906 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
907 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
908 if (num_stripes
> max_stripes
)
909 num_stripes
= max_stripes
;
912 num_stripes
&= ~(u32
)1;
916 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
917 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
918 if (num_stripes
> max_stripes
)
919 num_stripes
= max_stripes
;
923 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
924 btrfs_super_stripesize(info
->super_copy
));
926 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
927 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
928 if (num_stripes
> max_stripes
)
929 num_stripes
= max_stripes
;
933 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
934 btrfs_super_stripesize(info
->super_copy
));
937 /* we don't want a chunk larger than 10% of the FS */
938 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
939 max_chunk_size
= min(percent_max
, max_chunk_size
);
942 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
944 calc_size
= max_chunk_size
;
945 calc_size
/= num_stripes
;
946 calc_size
/= stripe_len
;
947 calc_size
*= stripe_len
;
949 /* we don't want tiny stripes */
950 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
952 calc_size
/= stripe_len
;
953 calc_size
*= stripe_len
;
954 INIT_LIST_HEAD(&private_devs
);
955 cur
= dev_list
->next
;
958 if (type
& BTRFS_BLOCK_GROUP_DUP
)
959 min_free
= calc_size
* 2;
961 min_free
= calc_size
;
963 /* build a private list of devices we will allocate from */
964 while(index
< num_stripes
) {
965 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
966 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
970 if (avail
>= min_free
) {
971 list_move_tail(&device
->dev_list
, &private_devs
);
973 if (type
& BTRFS_BLOCK_GROUP_DUP
)
975 } else if (avail
> max_avail
)
980 if (index
< num_stripes
) {
981 list_splice(&private_devs
, dev_list
);
982 if (index
>= min_stripes
) {
984 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
985 num_stripes
/= sub_stripes
;
986 num_stripes
*= sub_stripes
;
991 if (!looped
&& max_avail
> 0) {
993 calc_size
= max_avail
;
998 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1002 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1003 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1004 key
.offset
= offset
;
1006 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1010 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1016 stripes
= &chunk
->stripe
;
1017 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1018 num_stripes
, sub_stripes
);
1020 while(index
< num_stripes
) {
1021 struct btrfs_stripe
*stripe
;
1022 BUG_ON(list_empty(&private_devs
));
1023 cur
= private_devs
.next
;
1024 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1026 /* loop over this device again if we're doing a dup group */
1027 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1028 (index
== num_stripes
- 1))
1029 list_move_tail(&device
->dev_list
, dev_list
);
1031 ret
= btrfs_alloc_dev_extent(trans
, device
,
1032 info
->chunk_root
->root_key
.objectid
,
1033 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1034 calc_size
, &dev_offset
, 0);
1037 device
->bytes_used
+= calc_size
;
1038 ret
= btrfs_update_device(trans
, device
);
1041 map
->stripes
[index
].dev
= device
;
1042 map
->stripes
[index
].physical
= dev_offset
;
1043 stripe
= stripes
+ index
;
1044 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1045 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1046 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1049 BUG_ON(!list_empty(&private_devs
));
1051 /* key was set above */
1052 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1053 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1054 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1055 btrfs_set_stack_chunk_type(chunk
, type
);
1056 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1057 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1058 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1059 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1060 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1061 map
->sector_size
= info
->sectorsize
;
1062 map
->stripe_len
= stripe_len
;
1063 map
->io_align
= stripe_len
;
1064 map
->io_width
= stripe_len
;
1066 map
->num_stripes
= num_stripes
;
1067 map
->sub_stripes
= sub_stripes
;
1069 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1070 btrfs_chunk_item_size(num_stripes
));
1072 *start
= key
.offset
;;
1074 map
->ce
.start
= key
.offset
;
1075 map
->ce
.size
= *num_bytes
;
1077 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1080 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1081 ret
= btrfs_add_system_chunk(info
, &key
,
1082 chunk
, btrfs_chunk_item_size(num_stripes
));
1091 * Alloc a DATA chunk with SINGLE profile.
1093 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1094 * (btrfs logical bytenr == on-disk bytenr)
1095 * For that case, caller must make sure the chunk and dev_extent are not
1098 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1099 struct btrfs_fs_info
*info
, u64
*start
,
1100 u64 num_bytes
, u64 type
, int convert
)
1103 struct btrfs_root
*extent_root
= info
->extent_root
;
1104 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1105 struct btrfs_stripe
*stripes
;
1106 struct btrfs_device
*device
= NULL
;
1107 struct btrfs_chunk
*chunk
;
1108 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1109 struct list_head
*cur
;
1110 struct map_lookup
*map
;
1111 u64 calc_size
= SZ_8M
;
1112 int num_stripes
= 1;
1113 int sub_stripes
= 0;
1116 int stripe_len
= BTRFS_STRIPE_LEN
;
1117 struct btrfs_key key
;
1119 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1120 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1122 if (*start
!= round_down(*start
, info
->sectorsize
)) {
1123 error("DATA chunk start not sectorsize aligned: %llu",
1124 (unsigned long long)*start
);
1127 key
.offset
= *start
;
1128 dev_offset
= *start
;
1132 ret
= find_next_chunk(chunk_root
,
1133 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1140 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1144 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1150 stripes
= &chunk
->stripe
;
1151 calc_size
= num_bytes
;
1154 cur
= dev_list
->next
;
1155 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1157 while (index
< num_stripes
) {
1158 struct btrfs_stripe
*stripe
;
1160 ret
= btrfs_alloc_dev_extent(trans
, device
,
1161 info
->chunk_root
->root_key
.objectid
,
1162 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1163 calc_size
, &dev_offset
, convert
);
1166 device
->bytes_used
+= calc_size
;
1167 ret
= btrfs_update_device(trans
, device
);
1170 map
->stripes
[index
].dev
= device
;
1171 map
->stripes
[index
].physical
= dev_offset
;
1172 stripe
= stripes
+ index
;
1173 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1174 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1175 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1179 /* key was set above */
1180 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1181 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1182 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1183 btrfs_set_stack_chunk_type(chunk
, type
);
1184 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1185 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1186 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1187 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1188 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1189 map
->sector_size
= info
->sectorsize
;
1190 map
->stripe_len
= stripe_len
;
1191 map
->io_align
= stripe_len
;
1192 map
->io_width
= stripe_len
;
1194 map
->num_stripes
= num_stripes
;
1195 map
->sub_stripes
= sub_stripes
;
1197 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1198 btrfs_chunk_item_size(num_stripes
));
1201 *start
= key
.offset
;
1203 map
->ce
.start
= key
.offset
;
1204 map
->ce
.size
= num_bytes
;
1206 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1213 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
1215 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1216 struct cache_extent
*ce
;
1217 struct map_lookup
*map
;
1220 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1222 fprintf(stderr
, "No mapping for %llu-%llu\n",
1223 (unsigned long long)logical
,
1224 (unsigned long long)logical
+len
);
1227 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1228 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1229 "%llu-%llu\n", (unsigned long long)logical
,
1230 (unsigned long long)logical
+len
,
1231 (unsigned long long)ce
->start
,
1232 (unsigned long long)ce
->start
+ ce
->size
);
1235 map
= container_of(ce
, struct map_lookup
, ce
);
1237 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1238 ret
= map
->num_stripes
;
1239 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1240 ret
= map
->sub_stripes
;
1241 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1243 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1250 int btrfs_next_bg(struct btrfs_fs_info
*fs_info
, u64
*logical
,
1251 u64
*size
, u64 type
)
1253 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1254 struct cache_extent
*ce
;
1255 struct map_lookup
*map
;
1258 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1262 * only jump to next bg if our cur is not 0
1263 * As the initial logical for btrfs_next_bg() is 0, and
1264 * if we jump to next bg, we skipped a valid bg.
1267 ce
= next_cache_extent(ce
);
1273 map
= container_of(ce
, struct map_lookup
, ce
);
1274 if (map
->type
& type
) {
1275 *logical
= ce
->start
;
1280 ce
= next_cache_extent(ce
);
1286 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
,
1287 u64 chunk_start
, u64 physical
, u64 devid
,
1288 u64
**logical
, int *naddrs
, int *stripe_len
)
1290 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1291 struct cache_extent
*ce
;
1292 struct map_lookup
*map
;
1300 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1302 map
= container_of(ce
, struct map_lookup
, ce
);
1305 rmap_len
= map
->stripe_len
;
1306 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1307 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1308 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1309 length
= ce
->size
/ map
->num_stripes
;
1310 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1311 BTRFS_BLOCK_GROUP_RAID6
)) {
1312 length
= ce
->size
/ nr_data_stripes(map
);
1313 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1316 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1318 for (i
= 0; i
< map
->num_stripes
; i
++) {
1319 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1321 if (map
->stripes
[i
].physical
> physical
||
1322 map
->stripes
[i
].physical
+ length
<= physical
)
1325 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1328 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1329 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1331 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1332 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1333 } /* else if RAID[56], multiply by nr_data_stripes().
1334 * Alternatively, just use rmap_len below instead of
1335 * map->stripe_len */
1337 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1338 for (j
= 0; j
< nr
; j
++) {
1339 if (buf
[j
] == bytenr
)
1348 *stripe_len
= rmap_len
;
1353 static inline int parity_smaller(u64 a
, u64 b
)
1358 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1359 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1361 struct btrfs_bio_stripe s
;
1368 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1369 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1370 s
= bbio
->stripes
[i
];
1372 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1373 raid_map
[i
] = raid_map
[i
+1];
1374 bbio
->stripes
[i
+1] = s
;
1382 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1383 u64 logical
, u64
*length
,
1384 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1387 return __btrfs_map_block(fs_info
, rw
, logical
, length
, NULL
,
1388 multi_ret
, mirror_num
, raid_map_ret
);
1391 int __btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1392 u64 logical
, u64
*length
, u64
*type
,
1393 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1396 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1397 struct cache_extent
*ce
;
1398 struct map_lookup
*map
;
1402 u64
*raid_map
= NULL
;
1403 int stripes_allocated
= 8;
1404 int stripes_required
= 1;
1407 struct btrfs_multi_bio
*multi
= NULL
;
1409 if (multi_ret
&& rw
== READ
) {
1410 stripes_allocated
= 1;
1413 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1419 if (ce
->start
> logical
) {
1421 *length
= ce
->start
- logical
;
1426 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1431 map
= container_of(ce
, struct map_lookup
, ce
);
1432 offset
= logical
- ce
->start
;
1435 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1436 BTRFS_BLOCK_GROUP_DUP
)) {
1437 stripes_required
= map
->num_stripes
;
1438 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1439 stripes_required
= map
->sub_stripes
;
1442 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1443 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1444 /* RAID[56] write or recovery. Return all stripes */
1445 stripes_required
= map
->num_stripes
;
1447 /* Only allocate the map if we've already got a large enough multi_ret */
1448 if (stripes_allocated
>= stripes_required
) {
1449 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1457 /* if our multi bio struct is too small, back off and try again */
1458 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1459 stripes_allocated
= stripes_required
;
1466 * stripe_nr counts the total number of stripes we have to stride
1467 * to get to this block
1469 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1471 stripe_offset
= stripe_nr
* map
->stripe_len
;
1472 BUG_ON(offset
< stripe_offset
);
1474 /* stripe_offset is the offset of this block in its stripe*/
1475 stripe_offset
= offset
- stripe_offset
;
1477 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1478 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1479 BTRFS_BLOCK_GROUP_RAID10
|
1480 BTRFS_BLOCK_GROUP_DUP
)) {
1481 /* we limit the length of each bio to what fits in a stripe */
1482 *length
= min_t(u64
, ce
->size
- offset
,
1483 map
->stripe_len
- stripe_offset
);
1485 *length
= ce
->size
- offset
;
1491 multi
->num_stripes
= 1;
1493 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1495 multi
->num_stripes
= map
->num_stripes
;
1496 else if (mirror_num
)
1497 stripe_index
= mirror_num
- 1;
1499 stripe_index
= stripe_nr
% map
->num_stripes
;
1500 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1501 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1503 stripe_index
= stripe_nr
% factor
;
1504 stripe_index
*= map
->sub_stripes
;
1507 multi
->num_stripes
= map
->sub_stripes
;
1508 else if (mirror_num
)
1509 stripe_index
+= mirror_num
- 1;
1511 stripe_nr
= stripe_nr
/ factor
;
1512 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1514 multi
->num_stripes
= map
->num_stripes
;
1515 else if (mirror_num
)
1516 stripe_index
= mirror_num
- 1;
1517 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1518 BTRFS_BLOCK_GROUP_RAID6
)) {
1523 u64 raid56_full_stripe_start
;
1524 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1527 * align the start of our data stripe in the logical
1530 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1531 raid56_full_stripe_start
*= full_stripe_len
;
1533 /* get the data stripe number */
1534 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1535 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1537 /* Work out the disk rotation on this stripe-set */
1538 rot
= stripe_nr
% map
->num_stripes
;
1540 /* Fill in the logical address of each stripe */
1541 tmp
= stripe_nr
* nr_data_stripes(map
);
1543 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1544 raid_map
[(i
+rot
) % map
->num_stripes
] =
1545 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1547 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1548 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1549 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1551 *length
= map
->stripe_len
;
1554 multi
->num_stripes
= map
->num_stripes
;
1556 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1557 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1560 * Mirror #0 or #1 means the original data block.
1561 * Mirror #2 is RAID5 parity block.
1562 * Mirror #3 is RAID6 Q block.
1565 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1567 /* We distribute the parity blocks across stripes */
1568 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1572 * after this do_div call, stripe_nr is the number of stripes
1573 * on this device we have to walk to find the data, and
1574 * stripe_index is the number of our device in the stripe array
1576 stripe_index
= stripe_nr
% map
->num_stripes
;
1577 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1579 BUG_ON(stripe_index
>= map
->num_stripes
);
1581 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1582 multi
->stripes
[i
].physical
=
1583 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1584 stripe_nr
* map
->stripe_len
;
1585 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1594 sort_parity_stripes(multi
, raid_map
);
1595 *raid_map_ret
= raid_map
;
1601 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_info
*fs_info
, u64 devid
,
1604 struct btrfs_device
*device
;
1605 struct btrfs_fs_devices
*cur_devices
;
1607 cur_devices
= fs_info
->fs_devices
;
1608 while (cur_devices
) {
1610 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1611 fs_info
->ignore_fsid_mismatch
)) {
1612 device
= __find_device(&cur_devices
->devices
,
1617 cur_devices
= cur_devices
->seed
;
1622 struct btrfs_device
*
1623 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1624 u64 devid
, int instance
)
1626 struct list_head
*head
= &fs_devices
->devices
;
1627 struct btrfs_device
*dev
;
1630 list_for_each_entry(dev
, head
, dev_list
) {
1631 if (dev
->devid
== devid
&& num_found
++ == instance
)
1637 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
1639 struct cache_extent
*ce
;
1640 struct map_lookup
*map
;
1641 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1646 * During chunk recovering, we may fail to find block group's
1647 * corresponding chunk, we will rebuild it later
1649 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1650 if (!fs_info
->is_chunk_recover
)
1655 map
= container_of(ce
, struct map_lookup
, ce
);
1656 for (i
= 0; i
< map
->num_stripes
; i
++) {
1657 if (!map
->stripes
[i
].dev
->writeable
) {
1666 static struct btrfs_device
*fill_missing_device(u64 devid
)
1668 struct btrfs_device
*device
;
1670 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1671 device
->devid
= devid
;
1677 * slot == -1: SYSTEM chunk
1678 * return -EIO on error, otherwise return 0
1680 int btrfs_check_chunk_valid(struct btrfs_fs_info
*fs_info
,
1681 struct extent_buffer
*leaf
,
1682 struct btrfs_chunk
*chunk
,
1683 int slot
, u64 logical
)
1690 u32 chunk_ondisk_size
;
1691 u32 sectorsize
= fs_info
->sectorsize
;
1693 length
= btrfs_chunk_length(leaf
, chunk
);
1694 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1695 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1696 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1697 type
= btrfs_chunk_type(leaf
, chunk
);
1700 * These valid checks may be insufficient to cover every corner cases.
1702 if (!IS_ALIGNED(logical
, sectorsize
)) {
1703 error("invalid chunk logical %llu", logical
);
1706 if (btrfs_chunk_sector_size(leaf
, chunk
) != sectorsize
) {
1707 error("invalid chunk sectorsize %llu",
1708 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1711 if (!length
|| !IS_ALIGNED(length
, sectorsize
)) {
1712 error("invalid chunk length %llu", length
);
1715 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1716 error("invalid chunk stripe length: %llu", stripe_len
);
1719 /* Check on chunk item type */
1720 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1721 error("invalid chunk type %llu", type
);
1724 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1725 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1726 error("unrecognized chunk type: %llu",
1727 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1728 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1731 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1732 error("missing chunk type flag: %llu", type
);
1735 if (!(is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) ||
1736 (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0)) {
1737 error("conflicting chunk type detected: %llu", type
);
1740 if ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) &&
1741 !is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1742 error("conflicting chunk profile detected: %llu", type
);
1746 chunk_ondisk_size
= btrfs_chunk_item_size(num_stripes
);
1748 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1749 * it can't exceed the system chunk array size
1750 * For normal chunk, it should match its chunk item size.
1752 if (num_stripes
< 1 ||
1753 (slot
== -1 && chunk_ondisk_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1754 (slot
>= 0 && chunk_ondisk_size
> btrfs_item_size_nr(leaf
, slot
))) {
1755 error("invalid num_stripes: %u", num_stripes
);
1759 * Device number check against profile
1761 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& (sub_stripes
!= 2 ||
1762 !IS_ALIGNED(num_stripes
, sub_stripes
))) ||
1763 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1764 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1765 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1766 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1767 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1768 num_stripes
!= 1)) {
1769 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1770 num_stripes
, sub_stripes
,
1771 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1779 * Slot is used to verify the chunk item is valid
1781 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1783 static int read_one_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
1784 struct extent_buffer
*leaf
,
1785 struct btrfs_chunk
*chunk
, int slot
)
1787 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1788 struct map_lookup
*map
;
1789 struct cache_extent
*ce
;
1793 u8 uuid
[BTRFS_UUID_SIZE
];
1798 logical
= key
->offset
;
1799 length
= btrfs_chunk_length(leaf
, chunk
);
1800 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1801 /* Validation check */
1802 ret
= btrfs_check_chunk_valid(fs_info
, leaf
, chunk
, slot
, logical
);
1804 error("%s checksums match, but it has an invalid chunk, %s",
1805 (slot
== -1) ? "Superblock" : "Metadata",
1806 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1810 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1812 /* already mapped? */
1813 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1817 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1821 map
->ce
.start
= logical
;
1822 map
->ce
.size
= length
;
1823 map
->num_stripes
= num_stripes
;
1824 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1825 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1826 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1827 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1828 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1829 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1831 for (i
= 0; i
< num_stripes
; i
++) {
1832 map
->stripes
[i
].physical
=
1833 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1834 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1835 read_extent_buffer(leaf
, uuid
, (unsigned long)
1836 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1838 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
, uuid
,
1840 if (!map
->stripes
[i
].dev
) {
1841 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1842 printf("warning, device %llu is missing\n",
1843 (unsigned long long)devid
);
1844 list_add(&map
->stripes
[i
].dev
->dev_list
,
1845 &fs_info
->fs_devices
->devices
);
1849 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1855 static int fill_device_from_item(struct extent_buffer
*leaf
,
1856 struct btrfs_dev_item
*dev_item
,
1857 struct btrfs_device
*device
)
1861 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1862 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1863 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1864 device
->type
= btrfs_device_type(leaf
, dev_item
);
1865 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1866 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1867 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1869 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1870 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1875 static int open_seed_devices(struct btrfs_fs_info
*fs_info
, u8
*fsid
)
1877 struct btrfs_fs_devices
*fs_devices
;
1880 fs_devices
= fs_info
->fs_devices
->seed
;
1881 while (fs_devices
) {
1882 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1886 fs_devices
= fs_devices
->seed
;
1889 fs_devices
= find_fsid(fsid
);
1891 /* missing all seed devices */
1892 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1897 INIT_LIST_HEAD(&fs_devices
->devices
);
1898 list_add(&fs_devices
->list
, &fs_uuids
);
1899 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1902 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1906 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
1907 fs_info
->fs_devices
->seed
= fs_devices
;
1912 static int read_one_dev(struct btrfs_fs_info
*fs_info
,
1913 struct extent_buffer
*leaf
,
1914 struct btrfs_dev_item
*dev_item
)
1916 struct btrfs_device
*device
;
1919 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1920 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1922 devid
= btrfs_device_id(leaf
, dev_item
);
1923 read_extent_buffer(leaf
, dev_uuid
,
1924 (unsigned long)btrfs_device_uuid(dev_item
),
1926 read_extent_buffer(leaf
, fs_uuid
,
1927 (unsigned long)btrfs_device_fsid(dev_item
),
1930 if (memcmp(fs_uuid
, fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1931 ret
= open_seed_devices(fs_info
, fs_uuid
);
1936 device
= btrfs_find_device(fs_info
, devid
, dev_uuid
, fs_uuid
);
1938 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1942 list_add(&device
->dev_list
,
1943 &fs_info
->fs_devices
->devices
);
1946 fill_device_from_item(leaf
, dev_item
, device
);
1947 device
->dev_root
= fs_info
->dev_root
;
1951 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
1953 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
1954 struct extent_buffer
*sb
;
1955 struct btrfs_disk_key
*disk_key
;
1956 struct btrfs_chunk
*chunk
;
1958 unsigned long sb_array_offset
;
1964 struct btrfs_key key
;
1966 sb
= btrfs_find_create_tree_block(fs_info
,
1967 BTRFS_SUPER_INFO_OFFSET
,
1968 BTRFS_SUPER_INFO_SIZE
);
1971 btrfs_set_buffer_uptodate(sb
);
1972 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1973 array_size
= btrfs_super_sys_array_size(super_copy
);
1975 array_ptr
= super_copy
->sys_chunk_array
;
1976 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1979 while (cur_offset
< array_size
) {
1980 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
1981 len
= sizeof(*disk_key
);
1982 if (cur_offset
+ len
> array_size
)
1983 goto out_short_read
;
1985 btrfs_disk_key_to_cpu(&key
, disk_key
);
1988 sb_array_offset
+= len
;
1991 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1992 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
1994 * At least one btrfs_chunk with one stripe must be
1995 * present, exact stripe count check comes afterwards
1997 len
= btrfs_chunk_item_size(1);
1998 if (cur_offset
+ len
> array_size
)
1999 goto out_short_read
;
2001 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2004 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2005 num_stripes
, cur_offset
);
2010 len
= btrfs_chunk_item_size(num_stripes
);
2011 if (cur_offset
+ len
> array_size
)
2012 goto out_short_read
;
2014 ret
= read_one_chunk(fs_info
, &key
, sb
, chunk
, -1);
2019 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2020 (u32
)key
.type
, cur_offset
);
2025 sb_array_offset
+= len
;
2028 free_extent_buffer(sb
);
2032 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2034 free_extent_buffer(sb
);
2038 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
2040 struct btrfs_path
*path
;
2041 struct extent_buffer
*leaf
;
2042 struct btrfs_key key
;
2043 struct btrfs_key found_key
;
2044 struct btrfs_root
*root
= fs_info
->chunk_root
;
2048 path
= btrfs_alloc_path();
2053 * Read all device items, and then all the chunk items. All
2054 * device items are found before any chunk item (their object id
2055 * is smaller than the lowest possible object id for a chunk
2056 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2058 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2061 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2065 leaf
= path
->nodes
[0];
2066 slot
= path
->slots
[0];
2067 if (slot
>= btrfs_header_nritems(leaf
)) {
2068 ret
= btrfs_next_leaf(root
, path
);
2075 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2076 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2077 struct btrfs_dev_item
*dev_item
;
2078 dev_item
= btrfs_item_ptr(leaf
, slot
,
2079 struct btrfs_dev_item
);
2080 ret
= read_one_dev(fs_info
, leaf
, dev_item
);
2082 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2083 struct btrfs_chunk
*chunk
;
2084 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2085 ret
= read_one_chunk(fs_info
, &found_key
, leaf
, chunk
,
2094 btrfs_free_path(path
);
2098 struct list_head
*btrfs_scanned_uuids(void)
2103 static int rmw_eb(struct btrfs_fs_info
*info
,
2104 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2107 unsigned long orig_off
= 0;
2108 unsigned long dest_off
= 0;
2109 unsigned long copy_len
= eb
->len
;
2111 ret
= read_whole_eb(info
, eb
, 0);
2115 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2116 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2119 * | ----- orig_eb ------- |
2120 * | ----- stripe ------- |
2121 * | ----- orig_eb ------- |
2122 * | ----- orig_eb ------- |
2124 if (eb
->start
> orig_eb
->start
)
2125 orig_off
= eb
->start
- orig_eb
->start
;
2126 if (orig_eb
->start
> eb
->start
)
2127 dest_off
= orig_eb
->start
- eb
->start
;
2129 if (copy_len
> orig_eb
->len
- orig_off
)
2130 copy_len
= orig_eb
->len
- orig_off
;
2131 if (copy_len
> eb
->len
- dest_off
)
2132 copy_len
= eb
->len
- dest_off
;
2134 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2138 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2139 struct extent_buffer
*orig_eb
,
2140 struct extent_buffer
**ebs
,
2141 u64 stripe_len
, u64
*raid_map
,
2144 struct extent_buffer
**tmp_ebs
;
2145 u64 start
= orig_eb
->start
;
2150 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2154 /* Alloc memory in a row for data stripes */
2155 for (i
= 0; i
< num_stripes
; i
++) {
2156 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2159 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2166 for (i
= 0; i
< num_stripes
; i
++) {
2167 struct extent_buffer
*eb
= tmp_ebs
[i
];
2169 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2172 eb
->start
= raid_map
[i
];
2173 eb
->len
= stripe_len
;
2177 eb
->dev_bytenr
= (u64
)-1;
2179 this_eb_start
= raid_map
[i
];
2181 if (start
> this_eb_start
||
2182 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2183 ret
= rmw_eb(info
, eb
, orig_eb
);
2187 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2195 for (i
= 0; i
< num_stripes
; i
++)
2201 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2202 struct extent_buffer
*eb
,
2203 struct btrfs_multi_bio
*multi
,
2204 u64 stripe_len
, u64
*raid_map
)
2206 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2209 int alloc_size
= eb
->len
;
2212 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2213 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2214 if (!ebs
|| !pointers
) {
2220 if (stripe_len
> alloc_size
)
2221 alloc_size
= stripe_len
;
2223 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2224 multi
->num_stripes
);
2228 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2229 struct extent_buffer
*new_eb
;
2230 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2231 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2232 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2233 multi
->stripes
[i
].dev
->total_ios
++;
2234 if (ebs
[i
]->start
!= raid_map
[i
]) {
2236 goto out_free_split
;
2240 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2243 goto out_free_split
;
2245 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2246 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2247 multi
->stripes
[i
].dev
->total_ios
++;
2248 new_eb
->len
= stripe_len
;
2250 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2252 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2256 ebs
[multi
->num_stripes
- 2] = p_eb
;
2257 ebs
[multi
->num_stripes
- 1] = q_eb
;
2259 for (i
= 0; i
< multi
->num_stripes
; i
++)
2260 pointers
[i
] = ebs
[i
]->data
;
2262 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2264 ebs
[multi
->num_stripes
- 1] = p_eb
;
2265 for (i
= 0; i
< multi
->num_stripes
; i
++)
2266 pointers
[i
] = ebs
[i
]->data
;
2267 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2268 multi
->num_stripes
- 1, pointers
);
2270 goto out_free_split
;
2273 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2274 ret
= write_extent_to_disk(ebs
[i
]);
2276 goto out_free_split
;
2280 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2292 * Get stripe length from chunk item and its stripe items
2294 * Caller should only call this function after validating the chunk item
2295 * by using btrfs_check_chunk_valid().
2297 u64
btrfs_stripe_length(struct btrfs_fs_info
*fs_info
,
2298 struct extent_buffer
*leaf
,
2299 struct btrfs_chunk
*chunk
)
2303 u32 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2304 u64 profile
= btrfs_chunk_type(leaf
, chunk
) &
2305 BTRFS_BLOCK_GROUP_PROFILE_MASK
;
2307 chunk_len
= btrfs_chunk_length(leaf
, chunk
);
2310 case 0: /* Single profile */
2311 case BTRFS_BLOCK_GROUP_RAID1
:
2312 case BTRFS_BLOCK_GROUP_DUP
:
2313 stripe_len
= chunk_len
;
2315 case BTRFS_BLOCK_GROUP_RAID0
:
2316 stripe_len
= chunk_len
/ num_stripes
;
2318 case BTRFS_BLOCK_GROUP_RAID5
:
2319 stripe_len
= chunk_len
/ (num_stripes
- 1);
2321 case BTRFS_BLOCK_GROUP_RAID6
:
2322 stripe_len
= chunk_len
/ (num_stripes
- 2);
2324 case BTRFS_BLOCK_GROUP_RAID10
:
2325 stripe_len
= chunk_len
/ (num_stripes
/
2326 btrfs_chunk_sub_stripes(leaf
, chunk
));
2329 /* Invalid chunk profile found */