2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "kernel-lib/raid56.h"
33 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
34 [BTRFS_RAID_RAID10
] = {
37 .devs_max
= 0, /* 0 == as many as possible */
39 .tolerated_failures
= 1,
43 [BTRFS_RAID_RAID1
] = {
48 .tolerated_failures
= 1,
57 .tolerated_failures
= 0,
61 [BTRFS_RAID_RAID0
] = {
66 .tolerated_failures
= 0,
70 [BTRFS_RAID_SINGLE
] = {
75 .tolerated_failures
= 0,
79 [BTRFS_RAID_RAID5
] = {
84 .tolerated_failures
= 1,
88 [BTRFS_RAID_RAID6
] = {
93 .tolerated_failures
= 2,
100 struct btrfs_device
*dev
;
104 static inline int nr_parity_stripes(struct map_lookup
*map
)
106 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
108 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
114 static inline int nr_data_stripes(struct map_lookup
*map
)
116 return map
->num_stripes
- nr_parity_stripes(map
);
119 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
121 static LIST_HEAD(fs_uuids
);
124 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
127 * If devid and uuid are both specified, the match must be exact, otherwise
128 * only devid is used.
130 static struct btrfs_device
*find_device(struct btrfs_fs_devices
*fs_devices
,
133 struct list_head
*head
= &fs_devices
->devices
;
134 struct btrfs_device
*dev
;
136 list_for_each_entry(dev
, head
, dev_list
) {
137 if (dev
->devid
== devid
&&
138 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
145 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
147 struct btrfs_fs_devices
*fs_devices
;
149 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
150 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
156 static int device_list_add(const char *path
,
157 struct btrfs_super_block
*disk_super
,
158 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
160 struct btrfs_device
*device
;
161 struct btrfs_fs_devices
*fs_devices
;
162 u64 found_transid
= btrfs_super_generation(disk_super
);
164 fs_devices
= find_fsid(disk_super
->fsid
);
166 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
169 INIT_LIST_HEAD(&fs_devices
->devices
);
170 list_add(&fs_devices
->list
, &fs_uuids
);
171 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
172 fs_devices
->latest_devid
= devid
;
173 fs_devices
->latest_trans
= found_transid
;
174 fs_devices
->lowest_devid
= (u64
)-1;
177 device
= find_device(fs_devices
, devid
,
178 disk_super
->dev_item
.uuid
);
181 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
183 /* we can safely leave the fs_devices entry around */
187 device
->devid
= devid
;
188 device
->generation
= found_transid
;
189 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
191 device
->name
= kstrdup(path
, GFP_NOFS
);
196 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
197 if (!device
->label
) {
202 device
->total_devs
= btrfs_super_num_devices(disk_super
);
203 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
204 device
->total_bytes
=
205 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
207 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
208 list_add(&device
->dev_list
, &fs_devices
->devices
);
209 device
->fs_devices
= fs_devices
;
210 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
214 * The existing device has newer generation, so this one could
215 * be a stale one, don't add it.
217 if (found_transid
< device
->generation
) {
219 "adding device %s gen %llu but found an existing device %s gen %llu",
220 path
, found_transid
, device
->name
,
233 if (found_transid
> fs_devices
->latest_trans
) {
234 fs_devices
->latest_devid
= devid
;
235 fs_devices
->latest_trans
= found_transid
;
237 if (fs_devices
->lowest_devid
> devid
) {
238 fs_devices
->lowest_devid
= devid
;
240 *fs_devices_ret
= fs_devices
;
244 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
246 struct btrfs_fs_devices
*seed_devices
;
247 struct btrfs_device
*device
;
253 while (!list_empty(&fs_devices
->devices
)) {
254 device
= list_entry(fs_devices
->devices
.next
,
255 struct btrfs_device
, dev_list
);
256 if (device
->fd
!= -1) {
257 if (device
->writeable
&& fsync(device
->fd
) == -1) {
258 warning("fsync on device %llu failed: %m",
262 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
263 fprintf(stderr
, "Warning, could not drop caches\n");
267 device
->writeable
= 0;
268 list_del(&device
->dev_list
);
269 /* free the memory */
275 seed_devices
= fs_devices
->seed
;
276 fs_devices
->seed
= NULL
;
278 struct btrfs_fs_devices
*orig
;
281 fs_devices
= seed_devices
;
282 list_del(&orig
->list
);
286 list_del(&fs_devices
->list
);
293 void btrfs_close_all_devices(void)
295 struct btrfs_fs_devices
*fs_devices
;
297 while (!list_empty(&fs_uuids
)) {
298 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
300 btrfs_close_devices(fs_devices
);
304 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
307 struct btrfs_device
*device
;
310 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
312 printk("no name for device %llu, skip it now\n", device
->devid
);
316 fd
= open(device
->name
, flags
);
319 error("cannot open device '%s': %m", device
->name
);
323 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
324 fprintf(stderr
, "Warning, could not drop caches\n");
326 if (device
->devid
== fs_devices
->latest_devid
)
327 fs_devices
->latest_bdev
= fd
;
328 if (device
->devid
== fs_devices
->lowest_devid
)
329 fs_devices
->lowest_bdev
= fd
;
332 device
->writeable
= 1;
336 btrfs_close_devices(fs_devices
);
340 int btrfs_scan_one_device(int fd
, const char *path
,
341 struct btrfs_fs_devices
**fs_devices_ret
,
342 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
344 struct btrfs_super_block
*disk_super
;
345 char buf
[BTRFS_SUPER_INFO_SIZE
];
349 disk_super
= (struct btrfs_super_block
*)buf
;
350 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
353 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
354 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
357 *total_devs
= btrfs_super_num_devices(disk_super
);
359 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
365 * find_free_dev_extent_start - find free space in the specified device
366 * @device: the device which we search the free space in
367 * @num_bytes: the size of the free space that we need
368 * @search_start: the position from which to begin the search
369 * @start: store the start of the free space.
370 * @len: the size of the free space. that we find, or the size
371 * of the max free space if we don't find suitable free space
373 * this uses a pretty simple search, the expectation is that it is
374 * called very infrequently and that a given device has a small number
377 * @start is used to store the start of the free space if we find. But if we
378 * don't find suitable free space, it will be used to store the start position
379 * of the max free space.
381 * @len is used to store the size of the free space that we find.
382 * But if we don't find suitable free space, it is used to store the size of
383 * the max free space.
385 static int find_free_dev_extent_start(struct btrfs_device
*device
,
386 u64 num_bytes
, u64 search_start
,
387 u64
*start
, u64
*len
)
389 struct btrfs_key key
;
390 struct btrfs_root
*root
= device
->dev_root
;
391 struct btrfs_dev_extent
*dev_extent
;
392 struct btrfs_path
*path
;
397 u64 search_end
= device
->total_bytes
;
400 struct extent_buffer
*l
;
401 u64 min_search_start
;
404 * We don't want to overwrite the superblock on the drive nor any area
405 * used by the boot loader (grub for example), so we make sure to start
406 * at an offset of at least 1MB.
408 min_search_start
= max(root
->fs_info
->alloc_start
, (u64
)SZ_1M
);
409 search_start
= max(search_start
, min_search_start
);
411 path
= btrfs_alloc_path();
415 max_hole_start
= search_start
;
418 if (search_start
>= search_end
) {
423 path
->reada
= READA_FORWARD
;
425 key
.objectid
= device
->devid
;
426 key
.offset
= search_start
;
427 key
.type
= BTRFS_DEV_EXTENT_KEY
;
429 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
433 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
440 slot
= path
->slots
[0];
441 if (slot
>= btrfs_header_nritems(l
)) {
442 ret
= btrfs_next_leaf(root
, path
);
450 btrfs_item_key_to_cpu(l
, &key
, slot
);
452 if (key
.objectid
< device
->devid
)
455 if (key
.objectid
> device
->devid
)
458 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
461 if (key
.offset
> search_start
) {
462 hole_size
= key
.offset
- search_start
;
465 * Have to check before we set max_hole_start, otherwise
466 * we could end up sending back this offset anyway.
468 if (hole_size
> max_hole_size
) {
469 max_hole_start
= search_start
;
470 max_hole_size
= hole_size
;
474 * If this free space is greater than which we need,
475 * it must be the max free space that we have found
476 * until now, so max_hole_start must point to the start
477 * of this free space and the length of this free space
478 * is stored in max_hole_size. Thus, we return
479 * max_hole_start and max_hole_size and go back to the
482 if (hole_size
>= num_bytes
) {
488 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
489 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
491 if (extent_end
> search_start
)
492 search_start
= extent_end
;
499 * At this point, search_start should be the end of
500 * allocated dev extents, and when shrinking the device,
501 * search_end may be smaller than search_start.
503 if (search_end
> search_start
) {
504 hole_size
= search_end
- search_start
;
506 if (hole_size
> max_hole_size
) {
507 max_hole_start
= search_start
;
508 max_hole_size
= hole_size
;
513 if (max_hole_size
< num_bytes
)
519 btrfs_free_path(path
);
520 *start
= max_hole_start
;
522 *len
= max_hole_size
;
526 static int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
527 u64
*start
, u64
*len
)
529 /* FIXME use last free of some kind */
530 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, len
);
533 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
534 struct btrfs_device
*device
,
535 u64 chunk_offset
, u64 num_bytes
, u64
*start
,
539 struct btrfs_path
*path
;
540 struct btrfs_root
*root
= device
->dev_root
;
541 struct btrfs_dev_extent
*extent
;
542 struct extent_buffer
*leaf
;
543 struct btrfs_key key
;
545 path
= btrfs_alloc_path();
550 * For convert case, just skip search free dev_extent, as caller
551 * is responsible to make sure it's free.
554 ret
= find_free_dev_extent(device
, num_bytes
, start
, NULL
);
559 key
.objectid
= device
->devid
;
561 key
.type
= BTRFS_DEV_EXTENT_KEY
;
562 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
566 leaf
= path
->nodes
[0];
567 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
568 struct btrfs_dev_extent
);
569 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, BTRFS_CHUNK_TREE_OBJECTID
);
570 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
571 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
572 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
574 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
575 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
578 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
579 btrfs_mark_buffer_dirty(leaf
);
581 btrfs_free_path(path
);
585 static int find_next_chunk(struct btrfs_fs_info
*fs_info
, u64
*offset
)
587 struct btrfs_root
*root
= fs_info
->chunk_root
;
588 struct btrfs_path
*path
;
590 struct btrfs_key key
;
591 struct btrfs_chunk
*chunk
;
592 struct btrfs_key found_key
;
594 path
= btrfs_alloc_path();
598 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
599 key
.offset
= (u64
)-1;
600 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
602 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
608 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
612 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
614 if (found_key
.objectid
!= BTRFS_FIRST_CHUNK_TREE_OBJECTID
)
617 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
619 *offset
= found_key
.offset
+
620 btrfs_chunk_length(path
->nodes
[0], chunk
);
625 btrfs_free_path(path
);
629 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
633 struct btrfs_key key
;
634 struct btrfs_key found_key
;
636 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
637 key
.type
= BTRFS_DEV_ITEM_KEY
;
638 key
.offset
= (u64
)-1;
640 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
646 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
651 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
653 *objectid
= found_key
.offset
+ 1;
657 btrfs_release_path(path
);
662 * the device information is stored in the chunk root
663 * the btrfs_device struct should be fully filled in
665 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
666 struct btrfs_fs_info
*fs_info
,
667 struct btrfs_device
*device
)
670 struct btrfs_path
*path
;
671 struct btrfs_dev_item
*dev_item
;
672 struct extent_buffer
*leaf
;
673 struct btrfs_key key
;
674 struct btrfs_root
*root
= fs_info
->chunk_root
;
678 path
= btrfs_alloc_path();
682 ret
= find_next_devid(root
, path
, &free_devid
);
686 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
687 key
.type
= BTRFS_DEV_ITEM_KEY
;
688 key
.offset
= free_devid
;
690 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
695 leaf
= path
->nodes
[0];
696 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
698 device
->devid
= free_devid
;
699 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
700 btrfs_set_device_generation(leaf
, dev_item
, 0);
701 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
702 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
703 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
704 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
705 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
706 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
707 btrfs_set_device_group(leaf
, dev_item
, 0);
708 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
709 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
710 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
712 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
713 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
714 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
715 write_extent_buffer(leaf
, fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
716 btrfs_mark_buffer_dirty(leaf
);
720 btrfs_free_path(path
);
724 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
725 struct btrfs_device
*device
)
728 struct btrfs_path
*path
;
729 struct btrfs_root
*root
;
730 struct btrfs_dev_item
*dev_item
;
731 struct extent_buffer
*leaf
;
732 struct btrfs_key key
;
734 root
= device
->dev_root
->fs_info
->chunk_root
;
736 path
= btrfs_alloc_path();
740 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
741 key
.type
= BTRFS_DEV_ITEM_KEY
;
742 key
.offset
= device
->devid
;
744 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
753 leaf
= path
->nodes
[0];
754 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
756 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
757 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
758 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
759 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
760 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
761 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
762 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
763 btrfs_mark_buffer_dirty(leaf
);
766 btrfs_free_path(path
);
770 int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
771 struct btrfs_chunk
*chunk
, int item_size
)
773 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
774 struct btrfs_disk_key disk_key
;
778 array_size
= btrfs_super_sys_array_size(super_copy
);
779 if (array_size
+ item_size
+ sizeof(disk_key
)
780 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
783 ptr
= super_copy
->sys_chunk_array
+ array_size
;
784 btrfs_cpu_key_to_disk(&disk_key
, key
);
785 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
786 ptr
+= sizeof(disk_key
);
787 memcpy(ptr
, chunk
, item_size
);
788 item_size
+= sizeof(disk_key
);
789 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
793 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
796 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
798 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
799 return calc_size
* (num_stripes
/ sub_stripes
);
800 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
801 return calc_size
* (num_stripes
- 1);
802 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
803 return calc_size
* (num_stripes
- 2);
805 return calc_size
* num_stripes
;
809 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
811 /* TODO, add a way to store the preferred stripe size */
812 return BTRFS_STRIPE_LEN
;
816 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
818 * It is not equal to "device->total_bytes - device->bytes_used".
819 * We do not allocate any chunk in 1M at beginning of device, and not
820 * allowed to allocate any chunk before alloc_start if it is specified.
821 * So search holes from max(1M, alloc_start) to device->total_bytes.
823 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
824 struct btrfs_device
*device
,
827 struct btrfs_path
*path
;
828 struct btrfs_root
*root
= device
->dev_root
;
829 struct btrfs_key key
;
830 struct btrfs_dev_extent
*dev_extent
= NULL
;
831 struct extent_buffer
*l
;
832 u64 search_start
= root
->fs_info
->alloc_start
;
833 u64 search_end
= device
->total_bytes
;
839 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
841 path
= btrfs_alloc_path();
845 key
.objectid
= device
->devid
;
846 key
.offset
= root
->fs_info
->alloc_start
;
847 key
.type
= BTRFS_DEV_EXTENT_KEY
;
849 path
->reada
= READA_FORWARD
;
850 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
853 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
859 slot
= path
->slots
[0];
860 if (slot
>= btrfs_header_nritems(l
)) {
861 ret
= btrfs_next_leaf(root
, path
);
868 btrfs_item_key_to_cpu(l
, &key
, slot
);
870 if (key
.objectid
< device
->devid
)
872 if (key
.objectid
> device
->devid
)
874 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
876 if (key
.offset
> search_end
)
878 if (key
.offset
> search_start
)
879 free_bytes
+= key
.offset
- search_start
;
881 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
882 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
884 if (extent_end
> search_start
)
885 search_start
= extent_end
;
886 if (search_start
> search_end
)
893 if (search_start
< search_end
)
894 free_bytes
+= search_end
- search_start
;
896 *avail_bytes
= free_bytes
;
899 btrfs_free_path(path
);
903 #define BTRFS_MAX_DEVS(info) ((BTRFS_LEAF_DATA_SIZE(info) \
904 - sizeof(struct btrfs_item) \
905 - sizeof(struct btrfs_chunk)) \
906 / sizeof(struct btrfs_stripe) + 1)
908 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
909 - 2 * sizeof(struct btrfs_disk_key) \
910 - 2 * sizeof(struct btrfs_chunk)) \
911 / sizeof(struct btrfs_stripe) + 1)
913 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
914 struct btrfs_fs_info
*info
, u64
*start
,
915 u64
*num_bytes
, u64 type
)
918 struct btrfs_root
*extent_root
= info
->extent_root
;
919 struct btrfs_root
*chunk_root
= info
->chunk_root
;
920 struct btrfs_stripe
*stripes
;
921 struct btrfs_device
*device
= NULL
;
922 struct btrfs_chunk
*chunk
;
923 struct list_head private_devs
;
924 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
925 struct list_head
*cur
;
926 struct map_lookup
*map
;
927 int min_stripe_size
= SZ_1M
;
928 u64 calc_size
= SZ_8M
;
930 u64 max_chunk_size
= 4 * calc_size
;
941 int stripe_len
= BTRFS_STRIPE_LEN
;
942 struct btrfs_key key
;
945 if (list_empty(dev_list
)) {
949 if (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
950 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
952 max_chunk_size
= calc_size
* 2;
953 min_stripe_size
= SZ_1M
;
954 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
955 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
957 max_chunk_size
= 10 * calc_size
;
958 min_stripe_size
= SZ_64M
;
959 max_stripes
= BTRFS_MAX_DEVS(info
);
960 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
962 max_chunk_size
= 4 * calc_size
;
963 min_stripe_size
= SZ_32M
;
964 max_stripes
= BTRFS_MAX_DEVS(info
);
967 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
968 num_stripes
= min_t(u64
, 2,
969 btrfs_super_num_devices(info
->super_copy
));
974 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
978 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
979 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
980 if (num_stripes
> max_stripes
)
981 num_stripes
= max_stripes
;
984 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
985 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
986 if (num_stripes
> max_stripes
)
987 num_stripes
= max_stripes
;
990 num_stripes
&= ~(u32
)1;
994 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
995 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
996 if (num_stripes
> max_stripes
)
997 num_stripes
= max_stripes
;
1001 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
1002 btrfs_super_stripesize(info
->super_copy
));
1004 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
1005 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
1006 if (num_stripes
> max_stripes
)
1007 num_stripes
= max_stripes
;
1008 if (num_stripes
< 3)
1011 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
1012 btrfs_super_stripesize(info
->super_copy
));
1015 /* we don't want a chunk larger than 10% of the FS */
1016 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
1017 max_chunk_size
= min(percent_max
, max_chunk_size
);
1020 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
1022 calc_size
= max_chunk_size
;
1023 calc_size
/= num_stripes
;
1024 calc_size
/= stripe_len
;
1025 calc_size
*= stripe_len
;
1027 /* we don't want tiny stripes */
1028 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
1030 calc_size
/= stripe_len
;
1031 calc_size
*= stripe_len
;
1032 INIT_LIST_HEAD(&private_devs
);
1033 cur
= dev_list
->next
;
1036 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1037 min_free
= calc_size
* 2;
1039 min_free
= calc_size
;
1041 /* build a private list of devices we will allocate from */
1042 while(index
< num_stripes
) {
1043 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1044 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
1048 if (avail
>= min_free
) {
1049 list_move_tail(&device
->dev_list
, &private_devs
);
1051 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1053 } else if (avail
> max_avail
)
1055 if (cur
== dev_list
)
1058 if (index
< num_stripes
) {
1059 list_splice(&private_devs
, dev_list
);
1060 if (index
>= min_stripes
) {
1061 num_stripes
= index
;
1062 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1063 num_stripes
/= sub_stripes
;
1064 num_stripes
*= sub_stripes
;
1069 if (!looped
&& max_avail
> 0) {
1071 calc_size
= max_avail
;
1076 ret
= find_next_chunk(info
, &offset
);
1079 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1080 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1081 key
.offset
= offset
;
1083 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1087 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1093 stripes
= &chunk
->stripe
;
1094 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1095 num_stripes
, sub_stripes
);
1097 while(index
< num_stripes
) {
1098 struct btrfs_stripe
*stripe
;
1099 BUG_ON(list_empty(&private_devs
));
1100 cur
= private_devs
.next
;
1101 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1103 /* loop over this device again if we're doing a dup group */
1104 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1105 (index
== num_stripes
- 1))
1106 list_move_tail(&device
->dev_list
, dev_list
);
1108 ret
= btrfs_alloc_dev_extent(trans
, device
, key
.offset
,
1109 calc_size
, &dev_offset
, 0);
1113 device
->bytes_used
+= calc_size
;
1114 ret
= btrfs_update_device(trans
, device
);
1118 map
->stripes
[index
].dev
= device
;
1119 map
->stripes
[index
].physical
= dev_offset
;
1120 stripe
= stripes
+ index
;
1121 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1122 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1123 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1126 BUG_ON(!list_empty(&private_devs
));
1128 /* key was set above */
1129 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1130 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1131 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1132 btrfs_set_stack_chunk_type(chunk
, type
);
1133 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1134 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1135 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1136 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1137 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1138 map
->sector_size
= info
->sectorsize
;
1139 map
->stripe_len
= stripe_len
;
1140 map
->io_align
= stripe_len
;
1141 map
->io_width
= stripe_len
;
1143 map
->num_stripes
= num_stripes
;
1144 map
->sub_stripes
= sub_stripes
;
1146 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1147 btrfs_chunk_item_size(num_stripes
));
1149 *start
= key
.offset
;;
1151 map
->ce
.start
= key
.offset
;
1152 map
->ce
.size
= *num_bytes
;
1154 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1158 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1159 ret
= btrfs_add_system_chunk(info
, &key
,
1160 chunk
, btrfs_chunk_item_size(num_stripes
));
1176 * Alloc a DATA chunk with SINGLE profile.
1178 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1179 * (btrfs logical bytenr == on-disk bytenr)
1180 * For that case, caller must make sure the chunk and dev_extent are not
1183 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1184 struct btrfs_fs_info
*info
, u64
*start
,
1185 u64 num_bytes
, u64 type
, int convert
)
1188 struct btrfs_root
*extent_root
= info
->extent_root
;
1189 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1190 struct btrfs_stripe
*stripes
;
1191 struct btrfs_device
*device
= NULL
;
1192 struct btrfs_chunk
*chunk
;
1193 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1194 struct list_head
*cur
;
1195 struct map_lookup
*map
;
1196 u64 calc_size
= SZ_8M
;
1197 int num_stripes
= 1;
1198 int sub_stripes
= 0;
1201 int stripe_len
= BTRFS_STRIPE_LEN
;
1202 struct btrfs_key key
;
1204 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1205 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1207 if (*start
!= round_down(*start
, info
->sectorsize
)) {
1208 error("DATA chunk start not sectorsize aligned: %llu",
1209 (unsigned long long)*start
);
1212 key
.offset
= *start
;
1213 dev_offset
= *start
;
1217 ret
= find_next_chunk(info
, &tmp
);
1223 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1227 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1233 stripes
= &chunk
->stripe
;
1234 calc_size
= num_bytes
;
1237 cur
= dev_list
->next
;
1238 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1240 while (index
< num_stripes
) {
1241 struct btrfs_stripe
*stripe
;
1243 ret
= btrfs_alloc_dev_extent(trans
, device
, key
.offset
,
1244 calc_size
, &dev_offset
, convert
);
1247 device
->bytes_used
+= calc_size
;
1248 ret
= btrfs_update_device(trans
, device
);
1251 map
->stripes
[index
].dev
= device
;
1252 map
->stripes
[index
].physical
= dev_offset
;
1253 stripe
= stripes
+ index
;
1254 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1255 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1256 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1260 /* key was set above */
1261 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1262 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1263 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1264 btrfs_set_stack_chunk_type(chunk
, type
);
1265 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1266 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1267 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1268 btrfs_set_stack_chunk_sector_size(chunk
, info
->sectorsize
);
1269 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1270 map
->sector_size
= info
->sectorsize
;
1271 map
->stripe_len
= stripe_len
;
1272 map
->io_align
= stripe_len
;
1273 map
->io_width
= stripe_len
;
1275 map
->num_stripes
= num_stripes
;
1276 map
->sub_stripes
= sub_stripes
;
1278 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1279 btrfs_chunk_item_size(num_stripes
));
1282 *start
= key
.offset
;
1284 map
->ce
.start
= key
.offset
;
1285 map
->ce
.size
= num_bytes
;
1287 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1294 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
1296 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1297 struct cache_extent
*ce
;
1298 struct map_lookup
*map
;
1301 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1303 fprintf(stderr
, "No mapping for %llu-%llu\n",
1304 (unsigned long long)logical
,
1305 (unsigned long long)logical
+len
);
1308 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1309 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1310 "%llu-%llu\n", (unsigned long long)logical
,
1311 (unsigned long long)logical
+len
,
1312 (unsigned long long)ce
->start
,
1313 (unsigned long long)ce
->start
+ ce
->size
);
1316 map
= container_of(ce
, struct map_lookup
, ce
);
1318 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1319 ret
= map
->num_stripes
;
1320 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1321 ret
= map
->sub_stripes
;
1322 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1324 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1331 int btrfs_next_bg(struct btrfs_fs_info
*fs_info
, u64
*logical
,
1332 u64
*size
, u64 type
)
1334 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1335 struct cache_extent
*ce
;
1336 struct map_lookup
*map
;
1339 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1343 * only jump to next bg if our cur is not 0
1344 * As the initial logical for btrfs_next_bg() is 0, and
1345 * if we jump to next bg, we skipped a valid bg.
1348 ce
= next_cache_extent(ce
);
1354 map
= container_of(ce
, struct map_lookup
, ce
);
1355 if (map
->type
& type
) {
1356 *logical
= ce
->start
;
1361 ce
= next_cache_extent(ce
);
1367 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
1368 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
1370 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1371 struct cache_extent
*ce
;
1372 struct map_lookup
*map
;
1380 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1382 map
= container_of(ce
, struct map_lookup
, ce
);
1385 rmap_len
= map
->stripe_len
;
1386 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1387 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1388 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1389 length
= ce
->size
/ map
->num_stripes
;
1390 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1391 BTRFS_BLOCK_GROUP_RAID6
)) {
1392 length
= ce
->size
/ nr_data_stripes(map
);
1393 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1396 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1398 for (i
= 0; i
< map
->num_stripes
; i
++) {
1399 if (map
->stripes
[i
].physical
> physical
||
1400 map
->stripes
[i
].physical
+ length
<= physical
)
1403 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1406 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1407 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1409 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1410 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1411 } /* else if RAID[56], multiply by nr_data_stripes().
1412 * Alternatively, just use rmap_len below instead of
1413 * map->stripe_len */
1415 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1416 for (j
= 0; j
< nr
; j
++) {
1417 if (buf
[j
] == bytenr
)
1426 *stripe_len
= rmap_len
;
1431 static inline int parity_smaller(u64 a
, u64 b
)
1436 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1437 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1439 struct btrfs_bio_stripe s
;
1446 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1447 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1448 s
= bbio
->stripes
[i
];
1450 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1451 raid_map
[i
] = raid_map
[i
+1];
1452 bbio
->stripes
[i
+1] = s
;
1460 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1461 u64 logical
, u64
*length
,
1462 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1465 return __btrfs_map_block(fs_info
, rw
, logical
, length
, NULL
,
1466 multi_ret
, mirror_num
, raid_map_ret
);
1469 int __btrfs_map_block(struct btrfs_fs_info
*fs_info
, int rw
,
1470 u64 logical
, u64
*length
, u64
*type
,
1471 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1474 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1475 struct cache_extent
*ce
;
1476 struct map_lookup
*map
;
1480 u64
*raid_map
= NULL
;
1481 int stripes_allocated
= 8;
1482 int stripes_required
= 1;
1485 struct btrfs_multi_bio
*multi
= NULL
;
1487 if (multi_ret
&& rw
== READ
) {
1488 stripes_allocated
= 1;
1491 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1497 if (ce
->start
> logical
) {
1499 *length
= ce
->start
- logical
;
1504 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1509 map
= container_of(ce
, struct map_lookup
, ce
);
1510 offset
= logical
- ce
->start
;
1513 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1514 BTRFS_BLOCK_GROUP_DUP
)) {
1515 stripes_required
= map
->num_stripes
;
1516 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1517 stripes_required
= map
->sub_stripes
;
1520 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1521 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1522 /* RAID[56] write or recovery. Return all stripes */
1523 stripes_required
= map
->num_stripes
;
1525 /* Only allocate the map if we've already got a large enough multi_ret */
1526 if (stripes_allocated
>= stripes_required
) {
1527 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1535 /* if our multi bio struct is too small, back off and try again */
1536 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1537 stripes_allocated
= stripes_required
;
1544 * stripe_nr counts the total number of stripes we have to stride
1545 * to get to this block
1547 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1549 stripe_offset
= stripe_nr
* map
->stripe_len
;
1550 BUG_ON(offset
< stripe_offset
);
1552 /* stripe_offset is the offset of this block in its stripe*/
1553 stripe_offset
= offset
- stripe_offset
;
1555 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1556 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1557 BTRFS_BLOCK_GROUP_RAID10
|
1558 BTRFS_BLOCK_GROUP_DUP
)) {
1559 /* we limit the length of each bio to what fits in a stripe */
1560 *length
= min_t(u64
, ce
->size
- offset
,
1561 map
->stripe_len
- stripe_offset
);
1563 *length
= ce
->size
- offset
;
1569 multi
->num_stripes
= 1;
1571 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1573 multi
->num_stripes
= map
->num_stripes
;
1574 else if (mirror_num
)
1575 stripe_index
= mirror_num
- 1;
1577 stripe_index
= stripe_nr
% map
->num_stripes
;
1578 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1579 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1581 stripe_index
= stripe_nr
% factor
;
1582 stripe_index
*= map
->sub_stripes
;
1585 multi
->num_stripes
= map
->sub_stripes
;
1586 else if (mirror_num
)
1587 stripe_index
+= mirror_num
- 1;
1589 stripe_nr
= stripe_nr
/ factor
;
1590 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1592 multi
->num_stripes
= map
->num_stripes
;
1593 else if (mirror_num
)
1594 stripe_index
= mirror_num
- 1;
1595 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1596 BTRFS_BLOCK_GROUP_RAID6
)) {
1601 u64 raid56_full_stripe_start
;
1602 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1605 * align the start of our data stripe in the logical
1608 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1609 raid56_full_stripe_start
*= full_stripe_len
;
1611 /* get the data stripe number */
1612 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1613 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1615 /* Work out the disk rotation on this stripe-set */
1616 rot
= stripe_nr
% map
->num_stripes
;
1618 /* Fill in the logical address of each stripe */
1619 tmp
= stripe_nr
* nr_data_stripes(map
);
1621 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1622 raid_map
[(i
+rot
) % map
->num_stripes
] =
1623 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1625 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1626 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1627 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1629 *length
= map
->stripe_len
;
1632 multi
->num_stripes
= map
->num_stripes
;
1634 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1635 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1638 * Mirror #0 or #1 means the original data block.
1639 * Mirror #2 is RAID5 parity block.
1640 * Mirror #3 is RAID6 Q block.
1643 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1645 /* We distribute the parity blocks across stripes */
1646 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1650 * after this do_div call, stripe_nr is the number of stripes
1651 * on this device we have to walk to find the data, and
1652 * stripe_index is the number of our device in the stripe array
1654 stripe_index
= stripe_nr
% map
->num_stripes
;
1655 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1657 BUG_ON(stripe_index
>= map
->num_stripes
);
1659 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1660 multi
->stripes
[i
].physical
=
1661 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1662 stripe_nr
* map
->stripe_len
;
1663 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1672 sort_parity_stripes(multi
, raid_map
);
1673 *raid_map_ret
= raid_map
;
1679 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_info
*fs_info
, u64 devid
,
1682 struct btrfs_device
*device
;
1683 struct btrfs_fs_devices
*cur_devices
;
1685 cur_devices
= fs_info
->fs_devices
;
1686 while (cur_devices
) {
1688 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1689 fs_info
->ignore_fsid_mismatch
)) {
1690 device
= find_device(cur_devices
, devid
, uuid
);
1694 cur_devices
= cur_devices
->seed
;
1699 struct btrfs_device
*
1700 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1701 u64 devid
, int instance
)
1703 struct list_head
*head
= &fs_devices
->devices
;
1704 struct btrfs_device
*dev
;
1707 list_for_each_entry(dev
, head
, dev_list
) {
1708 if (dev
->devid
== devid
&& num_found
++ == instance
)
1714 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
1716 struct cache_extent
*ce
;
1717 struct map_lookup
*map
;
1718 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1723 * During chunk recovering, we may fail to find block group's
1724 * corresponding chunk, we will rebuild it later
1726 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1727 if (!fs_info
->is_chunk_recover
)
1732 map
= container_of(ce
, struct map_lookup
, ce
);
1733 for (i
= 0; i
< map
->num_stripes
; i
++) {
1734 if (!map
->stripes
[i
].dev
->writeable
) {
1743 static struct btrfs_device
*fill_missing_device(u64 devid
)
1745 struct btrfs_device
*device
;
1747 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1748 device
->devid
= devid
;
1754 * slot == -1: SYSTEM chunk
1755 * return -EIO on error, otherwise return 0
1757 int btrfs_check_chunk_valid(struct btrfs_fs_info
*fs_info
,
1758 struct extent_buffer
*leaf
,
1759 struct btrfs_chunk
*chunk
,
1760 int slot
, u64 logical
)
1767 u32 chunk_ondisk_size
;
1768 u32 sectorsize
= fs_info
->sectorsize
;
1770 length
= btrfs_chunk_length(leaf
, chunk
);
1771 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1772 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1773 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1774 type
= btrfs_chunk_type(leaf
, chunk
);
1777 * These valid checks may be insufficient to cover every corner cases.
1779 if (!IS_ALIGNED(logical
, sectorsize
)) {
1780 error("invalid chunk logical %llu", logical
);
1783 if (btrfs_chunk_sector_size(leaf
, chunk
) != sectorsize
) {
1784 error("invalid chunk sectorsize %llu",
1785 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1788 if (!length
|| !IS_ALIGNED(length
, sectorsize
)) {
1789 error("invalid chunk length %llu", length
);
1792 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1793 error("invalid chunk stripe length: %llu", stripe_len
);
1796 /* Check on chunk item type */
1797 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1798 error("invalid chunk type %llu", type
);
1801 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1802 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1803 error("unrecognized chunk type: %llu",
1804 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1805 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1808 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1809 error("missing chunk type flag: %llu", type
);
1812 if (!(is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) ||
1813 (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0)) {
1814 error("conflicting chunk type detected: %llu", type
);
1817 if ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) &&
1818 !is_power_of_2(type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1819 error("conflicting chunk profile detected: %llu", type
);
1823 chunk_ondisk_size
= btrfs_chunk_item_size(num_stripes
);
1825 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1826 * it can't exceed the system chunk array size
1827 * For normal chunk, it should match its chunk item size.
1829 if (num_stripes
< 1 ||
1830 (slot
== -1 && chunk_ondisk_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1831 (slot
>= 0 && chunk_ondisk_size
> btrfs_item_size_nr(leaf
, slot
))) {
1832 error("invalid num_stripes: %u", num_stripes
);
1836 * Device number check against profile
1838 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& (sub_stripes
!= 2 ||
1839 !IS_ALIGNED(num_stripes
, sub_stripes
))) ||
1840 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1841 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1842 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1843 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1844 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1845 num_stripes
!= 1)) {
1846 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1847 num_stripes
, sub_stripes
,
1848 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1856 * Slot is used to verify the chunk item is valid
1858 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1860 static int read_one_chunk(struct btrfs_fs_info
*fs_info
, struct btrfs_key
*key
,
1861 struct extent_buffer
*leaf
,
1862 struct btrfs_chunk
*chunk
, int slot
)
1864 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
1865 struct map_lookup
*map
;
1866 struct cache_extent
*ce
;
1870 u8 uuid
[BTRFS_UUID_SIZE
];
1875 logical
= key
->offset
;
1876 length
= btrfs_chunk_length(leaf
, chunk
);
1877 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1878 /* Validation check */
1879 ret
= btrfs_check_chunk_valid(fs_info
, leaf
, chunk
, slot
, logical
);
1881 error("%s checksums match, but it has an invalid chunk, %s",
1882 (slot
== -1) ? "Superblock" : "Metadata",
1883 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1887 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1889 /* already mapped? */
1890 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1894 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1898 map
->ce
.start
= logical
;
1899 map
->ce
.size
= length
;
1900 map
->num_stripes
= num_stripes
;
1901 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1902 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1903 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1904 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1905 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1906 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1908 for (i
= 0; i
< num_stripes
; i
++) {
1909 map
->stripes
[i
].physical
=
1910 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1911 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1912 read_extent_buffer(leaf
, uuid
, (unsigned long)
1913 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1915 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
, devid
, uuid
,
1917 if (!map
->stripes
[i
].dev
) {
1918 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1919 printf("warning, device %llu is missing\n",
1920 (unsigned long long)devid
);
1921 list_add(&map
->stripes
[i
].dev
->dev_list
,
1922 &fs_info
->fs_devices
->devices
);
1926 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1928 error("failed to add chunk map start=%llu len=%llu: %d (%s)",
1929 map
->ce
.start
, map
->ce
.size
, ret
, strerror(-ret
));
1935 static int fill_device_from_item(struct extent_buffer
*leaf
,
1936 struct btrfs_dev_item
*dev_item
,
1937 struct btrfs_device
*device
)
1941 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1942 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1943 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1944 device
->type
= btrfs_device_type(leaf
, dev_item
);
1945 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1946 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1947 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1949 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1950 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1955 static int open_seed_devices(struct btrfs_fs_info
*fs_info
, u8
*fsid
)
1957 struct btrfs_fs_devices
*fs_devices
;
1960 fs_devices
= fs_info
->fs_devices
->seed
;
1961 while (fs_devices
) {
1962 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1966 fs_devices
= fs_devices
->seed
;
1969 fs_devices
= find_fsid(fsid
);
1971 /* missing all seed devices */
1972 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1977 INIT_LIST_HEAD(&fs_devices
->devices
);
1978 list_add(&fs_devices
->list
, &fs_uuids
);
1979 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1982 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1986 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
1987 fs_info
->fs_devices
->seed
= fs_devices
;
1992 static int read_one_dev(struct btrfs_fs_info
*fs_info
,
1993 struct extent_buffer
*leaf
,
1994 struct btrfs_dev_item
*dev_item
)
1996 struct btrfs_device
*device
;
1999 u8 fs_uuid
[BTRFS_UUID_SIZE
];
2000 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2002 devid
= btrfs_device_id(leaf
, dev_item
);
2003 read_extent_buffer(leaf
, dev_uuid
,
2004 (unsigned long)btrfs_device_uuid(dev_item
),
2006 read_extent_buffer(leaf
, fs_uuid
,
2007 (unsigned long)btrfs_device_fsid(dev_item
),
2010 if (memcmp(fs_uuid
, fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
2011 ret
= open_seed_devices(fs_info
, fs_uuid
);
2016 device
= btrfs_find_device(fs_info
, devid
, dev_uuid
, fs_uuid
);
2018 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
2022 list_add(&device
->dev_list
,
2023 &fs_info
->fs_devices
->devices
);
2026 fill_device_from_item(leaf
, dev_item
, device
);
2027 device
->dev_root
= fs_info
->dev_root
;
2031 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
2033 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2034 struct extent_buffer
*sb
;
2035 struct btrfs_disk_key
*disk_key
;
2036 struct btrfs_chunk
*chunk
;
2038 unsigned long sb_array_offset
;
2044 struct btrfs_key key
;
2046 if (fs_info
->nodesize
< BTRFS_SUPER_INFO_SIZE
) {
2047 printf("ERROR: nodesize %u too small to read superblock\n",
2051 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
2054 btrfs_set_buffer_uptodate(sb
);
2055 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
2056 array_size
= btrfs_super_sys_array_size(super_copy
);
2058 array_ptr
= super_copy
->sys_chunk_array
;
2059 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
2062 while (cur_offset
< array_size
) {
2063 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
2064 len
= sizeof(*disk_key
);
2065 if (cur_offset
+ len
> array_size
)
2066 goto out_short_read
;
2068 btrfs_disk_key_to_cpu(&key
, disk_key
);
2071 sb_array_offset
+= len
;
2074 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2075 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
2077 * At least one btrfs_chunk with one stripe must be
2078 * present, exact stripe count check comes afterwards
2080 len
= btrfs_chunk_item_size(1);
2081 if (cur_offset
+ len
> array_size
)
2082 goto out_short_read
;
2084 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2087 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
2088 num_stripes
, cur_offset
);
2093 len
= btrfs_chunk_item_size(num_stripes
);
2094 if (cur_offset
+ len
> array_size
)
2095 goto out_short_read
;
2097 ret
= read_one_chunk(fs_info
, &key
, sb
, chunk
, -1);
2102 "ERROR: unexpected item type %u in sys_array at offset %u\n",
2103 (u32
)key
.type
, cur_offset
);
2108 sb_array_offset
+= len
;
2111 free_extent_buffer(sb
);
2115 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
2117 free_extent_buffer(sb
);
2121 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
2123 struct btrfs_path
*path
;
2124 struct extent_buffer
*leaf
;
2125 struct btrfs_key key
;
2126 struct btrfs_key found_key
;
2127 struct btrfs_root
*root
= fs_info
->chunk_root
;
2131 path
= btrfs_alloc_path();
2136 * Read all device items, and then all the chunk items. All
2137 * device items are found before any chunk item (their object id
2138 * is smaller than the lowest possible object id for a chunk
2139 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
2141 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2144 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2148 leaf
= path
->nodes
[0];
2149 slot
= path
->slots
[0];
2150 if (slot
>= btrfs_header_nritems(leaf
)) {
2151 ret
= btrfs_next_leaf(root
, path
);
2158 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2159 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2160 struct btrfs_dev_item
*dev_item
;
2161 dev_item
= btrfs_item_ptr(leaf
, slot
,
2162 struct btrfs_dev_item
);
2163 ret
= read_one_dev(fs_info
, leaf
, dev_item
);
2166 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2167 struct btrfs_chunk
*chunk
;
2168 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2169 ret
= read_one_chunk(fs_info
, &found_key
, leaf
, chunk
,
2179 btrfs_free_path(path
);
2183 struct list_head
*btrfs_scanned_uuids(void)
2188 static int rmw_eb(struct btrfs_fs_info
*info
,
2189 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2192 unsigned long orig_off
= 0;
2193 unsigned long dest_off
= 0;
2194 unsigned long copy_len
= eb
->len
;
2196 ret
= read_whole_eb(info
, eb
, 0);
2200 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2201 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2204 * | ----- orig_eb ------- |
2205 * | ----- stripe ------- |
2206 * | ----- orig_eb ------- |
2207 * | ----- orig_eb ------- |
2209 if (eb
->start
> orig_eb
->start
)
2210 orig_off
= eb
->start
- orig_eb
->start
;
2211 if (orig_eb
->start
> eb
->start
)
2212 dest_off
= orig_eb
->start
- eb
->start
;
2214 if (copy_len
> orig_eb
->len
- orig_off
)
2215 copy_len
= orig_eb
->len
- orig_off
;
2216 if (copy_len
> eb
->len
- dest_off
)
2217 copy_len
= eb
->len
- dest_off
;
2219 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2223 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2224 struct extent_buffer
*orig_eb
,
2225 struct extent_buffer
**ebs
,
2226 u64 stripe_len
, u64
*raid_map
,
2229 struct extent_buffer
**tmp_ebs
;
2230 u64 start
= orig_eb
->start
;
2235 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2239 /* Alloc memory in a row for data stripes */
2240 for (i
= 0; i
< num_stripes
; i
++) {
2241 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2244 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2251 for (i
= 0; i
< num_stripes
; i
++) {
2252 struct extent_buffer
*eb
= tmp_ebs
[i
];
2254 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2257 eb
->start
= raid_map
[i
];
2258 eb
->len
= stripe_len
;
2262 eb
->dev_bytenr
= (u64
)-1;
2264 this_eb_start
= raid_map
[i
];
2266 if (start
> this_eb_start
||
2267 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2268 ret
= rmw_eb(info
, eb
, orig_eb
);
2272 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2280 for (i
= 0; i
< num_stripes
; i
++)
2286 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2287 struct extent_buffer
*eb
,
2288 struct btrfs_multi_bio
*multi
,
2289 u64 stripe_len
, u64
*raid_map
)
2291 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2294 int alloc_size
= eb
->len
;
2297 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2298 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2299 if (!ebs
|| !pointers
) {
2305 if (stripe_len
> alloc_size
)
2306 alloc_size
= stripe_len
;
2308 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2309 multi
->num_stripes
);
2313 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2314 struct extent_buffer
*new_eb
;
2315 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2316 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2317 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2318 multi
->stripes
[i
].dev
->total_ios
++;
2319 if (ebs
[i
]->start
!= raid_map
[i
]) {
2321 goto out_free_split
;
2325 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2328 goto out_free_split
;
2330 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2331 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2332 multi
->stripes
[i
].dev
->total_ios
++;
2333 new_eb
->len
= stripe_len
;
2335 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2337 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2341 ebs
[multi
->num_stripes
- 2] = p_eb
;
2342 ebs
[multi
->num_stripes
- 1] = q_eb
;
2344 for (i
= 0; i
< multi
->num_stripes
; i
++)
2345 pointers
[i
] = ebs
[i
]->data
;
2347 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2349 ebs
[multi
->num_stripes
- 1] = p_eb
;
2350 for (i
= 0; i
< multi
->num_stripes
; i
++)
2351 pointers
[i
] = ebs
[i
]->data
;
2352 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2353 multi
->num_stripes
- 1, pointers
);
2355 goto out_free_split
;
2358 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2359 ret
= write_extent_to_disk(ebs
[i
]);
2361 goto out_free_split
;
2365 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2377 * Get stripe length from chunk item and its stripe items
2379 * Caller should only call this function after validating the chunk item
2380 * by using btrfs_check_chunk_valid().
2382 u64
btrfs_stripe_length(struct btrfs_fs_info
*fs_info
,
2383 struct extent_buffer
*leaf
,
2384 struct btrfs_chunk
*chunk
)
2388 u32 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2389 u64 profile
= btrfs_chunk_type(leaf
, chunk
) &
2390 BTRFS_BLOCK_GROUP_PROFILE_MASK
;
2392 chunk_len
= btrfs_chunk_length(leaf
, chunk
);
2395 case 0: /* Single profile */
2396 case BTRFS_BLOCK_GROUP_RAID1
:
2397 case BTRFS_BLOCK_GROUP_DUP
:
2398 stripe_len
= chunk_len
;
2400 case BTRFS_BLOCK_GROUP_RAID0
:
2401 stripe_len
= chunk_len
/ num_stripes
;
2403 case BTRFS_BLOCK_GROUP_RAID5
:
2404 stripe_len
= chunk_len
/ (num_stripes
- 1);
2406 case BTRFS_BLOCK_GROUP_RAID6
:
2407 stripe_len
= chunk_len
/ (num_stripes
- 2);
2409 case BTRFS_BLOCK_GROUP_RAID10
:
2410 stripe_len
= chunk_len
/ (num_stripes
/
2411 btrfs_chunk_sub_stripes(leaf
, chunk
));
2414 /* Invalid chunk profile found */
2421 * Return 0 if size of @device is already good
2422 * Return >0 if size of @device is not aligned but fixed without problems
2423 * Return <0 if something wrong happened when aligning the size of @device
2425 int btrfs_fix_device_size(struct btrfs_fs_info
*fs_info
,
2426 struct btrfs_device
*device
)
2428 struct btrfs_trans_handle
*trans
;
2429 struct btrfs_key key
;
2430 struct btrfs_path path
;
2431 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
2432 struct btrfs_dev_item
*di
;
2433 u64 old_bytes
= device
->total_bytes
;
2436 if (IS_ALIGNED(old_bytes
, fs_info
->sectorsize
))
2439 /* Align the in-memory total_bytes first, and use it as correct size */
2440 device
->total_bytes
= round_down(device
->total_bytes
,
2441 fs_info
->sectorsize
);
2443 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2444 key
.type
= BTRFS_DEV_ITEM_KEY
;
2445 key
.offset
= device
->devid
;
2447 trans
= btrfs_start_transaction(chunk_root
, 1);
2448 if (IS_ERR(trans
)) {
2449 ret
= PTR_ERR(trans
);
2450 error("error starting transaction: %d (%s)",
2451 ret
, strerror(-ret
));
2455 btrfs_init_path(&path
);
2456 ret
= btrfs_search_slot(trans
, chunk_root
, &key
, &path
, 0, 1);
2458 error("failed to find DEV_ITEM for devid %llu", device
->devid
);
2463 error("failed to search chunk root: %d (%s)",
2464 ret
, strerror(-ret
));
2467 di
= btrfs_item_ptr(path
.nodes
[0], path
.slots
[0], struct btrfs_dev_item
);
2468 btrfs_set_device_total_bytes(path
.nodes
[0], di
, device
->total_bytes
);
2469 btrfs_mark_buffer_dirty(path
.nodes
[0]);
2470 ret
= btrfs_commit_transaction(trans
, chunk_root
);
2472 error("failed to commit current transaction: %d (%s)",
2473 ret
, strerror(-ret
));
2474 btrfs_release_path(&path
);
2477 btrfs_release_path(&path
);
2478 printf("Fixed device size for devid %llu, old size: %llu new size: %llu\n",
2479 device
->devid
, old_bytes
, device
->total_bytes
);
2483 /* We haven't modified anything, it's OK to commit current trans */
2484 btrfs_commit_transaction(trans
, chunk_root
);
2485 btrfs_release_path(&path
);
2490 * Return 0 if super block total_bytes matches all devices' total_bytes
2491 * Return >0 if super block total_bytes mismatch but fixed without problem
2492 * Return <0 if we failed to fix super block total_bytes
2494 int btrfs_fix_super_size(struct btrfs_fs_info
*fs_info
)
2496 struct btrfs_trans_handle
*trans
;
2497 struct btrfs_device
*device
;
2498 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2499 u64 total_bytes
= 0;
2500 u64 old_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2503 list_for_each_entry(device
, dev_list
, dev_list
) {
2505 * Caller should ensure this function is called after aligning
2506 * all devices' total_bytes.
2508 if (!IS_ALIGNED(device
->total_bytes
, fs_info
->sectorsize
)) {
2509 error("device %llu total_bytes %llu not aligned to %u",
2510 device
->devid
, device
->total_bytes
,
2511 fs_info
->sectorsize
);
2514 total_bytes
+= device
->total_bytes
;
2517 if (total_bytes
== old_bytes
)
2520 btrfs_set_super_total_bytes(fs_info
->super_copy
, total_bytes
);
2522 /* Commit transaction to update all super blocks */
2523 trans
= btrfs_start_transaction(fs_info
->tree_root
, 1);
2524 if (IS_ERR(trans
)) {
2525 ret
= PTR_ERR(trans
);
2526 error("error starting transaction: %d (%s)",
2527 ret
, strerror(-ret
));
2530 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
2532 error("failed to commit current transaction: %d (%s)",
2533 ret
, strerror(-ret
));
2536 printf("Fixed super total bytes, old size: %llu new size: %llu\n",
2537 old_bytes
, total_bytes
);
2542 * Return 0 if all devices and super block sizes are good
2543 * Return >0 if any device/super size problem was found, but fixed
2544 * Return <0 if something wrong happened during fixing
2546 int btrfs_fix_device_and_super_size(struct btrfs_fs_info
*fs_info
)
2548 struct btrfs_device
*device
;
2549 struct list_head
*dev_list
= &fs_info
->fs_devices
->devices
;
2550 bool have_bad_value
= false;
2553 /* Seed device is not supported yet */
2554 if (fs_info
->fs_devices
->seed
) {
2555 error("fixing device size with seed device is not supported yet");
2559 /* All devices must be set up before repairing */
2560 if (list_empty(dev_list
)) {
2561 error("no device found");
2564 list_for_each_entry(device
, dev_list
, dev_list
) {
2565 if (device
->fd
== -1 || !device
->writeable
) {
2566 error("devid %llu is missing or not writeable",
2569 "fixing device size needs all device(s) to be present and writeable");
2574 /* Repair total_bytes of each device */
2575 list_for_each_entry(device
, dev_list
, dev_list
) {
2576 ret
= btrfs_fix_device_size(fs_info
, device
);
2580 have_bad_value
= true;
2583 /* Repair super total_byte */
2584 ret
= btrfs_fix_super_size(fs_info
);
2586 have_bad_value
= true;
2587 if (have_bad_value
) {
2589 "Fixed unaligned/mismatched total_bytes for super block and device items\n");
2592 printf("No device size related problem found\n");