2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
33 struct btrfs_device
*dev
;
37 static inline int nr_parity_stripes(struct map_lookup
*map
)
39 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
41 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
47 static inline int nr_data_stripes(struct map_lookup
*map
)
49 return map
->num_stripes
- nr_parity_stripes(map
);
52 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
54 static LIST_HEAD(fs_uuids
);
56 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
59 struct btrfs_device
*dev
;
60 struct list_head
*cur
;
62 list_for_each(cur
, head
) {
63 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
64 if (dev
->devid
== devid
&&
65 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
72 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
74 struct list_head
*cur
;
75 struct btrfs_fs_devices
*fs_devices
;
77 list_for_each(cur
, &fs_uuids
) {
78 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
79 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
85 static int device_list_add(const char *path
,
86 struct btrfs_super_block
*disk_super
,
87 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
89 struct btrfs_device
*device
;
90 struct btrfs_fs_devices
*fs_devices
;
91 u64 found_transid
= btrfs_super_generation(disk_super
);
93 fs_devices
= find_fsid(disk_super
->fsid
);
95 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
98 INIT_LIST_HEAD(&fs_devices
->devices
);
99 list_add(&fs_devices
->list
, &fs_uuids
);
100 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
101 fs_devices
->latest_devid
= devid
;
102 fs_devices
->latest_trans
= found_transid
;
103 fs_devices
->lowest_devid
= (u64
)-1;
106 device
= __find_device(&fs_devices
->devices
, devid
,
107 disk_super
->dev_item
.uuid
);
110 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
112 /* we can safely leave the fs_devices entry around */
116 device
->devid
= devid
;
117 device
->generation
= found_transid
;
118 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
120 device
->name
= kstrdup(path
, GFP_NOFS
);
125 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
126 if (!device
->label
) {
131 device
->total_devs
= btrfs_super_num_devices(disk_super
);
132 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
133 device
->total_bytes
=
134 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
136 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
137 list_add(&device
->dev_list
, &fs_devices
->devices
);
138 device
->fs_devices
= fs_devices
;
139 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
140 char *name
= strdup(path
);
148 if (found_transid
> fs_devices
->latest_trans
) {
149 fs_devices
->latest_devid
= devid
;
150 fs_devices
->latest_trans
= found_transid
;
152 if (fs_devices
->lowest_devid
> devid
) {
153 fs_devices
->lowest_devid
= devid
;
155 *fs_devices_ret
= fs_devices
;
159 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
161 struct btrfs_fs_devices
*seed_devices
;
162 struct btrfs_device
*device
;
165 while (!list_empty(&fs_devices
->devices
)) {
166 device
= list_entry(fs_devices
->devices
.next
,
167 struct btrfs_device
, dev_list
);
168 if (device
->fd
!= -1) {
170 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
171 fprintf(stderr
, "Warning, could not drop caches\n");
175 device
->writeable
= 0;
176 list_del(&device
->dev_list
);
177 /* free the memory */
183 seed_devices
= fs_devices
->seed
;
184 fs_devices
->seed
= NULL
;
186 struct btrfs_fs_devices
*orig
;
189 fs_devices
= seed_devices
;
190 list_del(&orig
->list
);
194 list_del(&fs_devices
->list
);
201 void btrfs_close_all_devices(void)
203 struct btrfs_fs_devices
*fs_devices
;
205 while (!list_empty(&fs_uuids
)) {
206 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
208 btrfs_close_devices(fs_devices
);
212 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
215 struct list_head
*head
= &fs_devices
->devices
;
216 struct list_head
*cur
;
217 struct btrfs_device
*device
;
220 list_for_each(cur
, head
) {
221 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
223 printk("no name for device %llu, skip it now\n", device
->devid
);
227 fd
= open(device
->name
, flags
);
233 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
234 fprintf(stderr
, "Warning, could not drop caches\n");
236 if (device
->devid
== fs_devices
->latest_devid
)
237 fs_devices
->latest_bdev
= fd
;
238 if (device
->devid
== fs_devices
->lowest_devid
)
239 fs_devices
->lowest_bdev
= fd
;
242 device
->writeable
= 1;
246 btrfs_close_devices(fs_devices
);
250 int btrfs_scan_one_device(int fd
, const char *path
,
251 struct btrfs_fs_devices
**fs_devices_ret
,
252 u64
*total_devs
, u64 super_offset
, int super_recover
)
254 struct btrfs_super_block
*disk_super
;
255 char buf
[BTRFS_SUPER_INFO_SIZE
];
259 disk_super
= (struct btrfs_super_block
*)buf
;
260 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, super_recover
);
263 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
264 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
267 *total_devs
= btrfs_super_num_devices(disk_super
);
269 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
275 * this uses a pretty simple search, the expectation is that it is
276 * called very infrequently and that a given device has a small number
279 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
280 struct btrfs_device
*device
,
281 struct btrfs_path
*path
,
282 u64 num_bytes
, u64
*start
)
284 struct btrfs_key key
;
285 struct btrfs_root
*root
= device
->dev_root
;
286 struct btrfs_dev_extent
*dev_extent
= NULL
;
289 u64 search_start
= root
->fs_info
->alloc_start
;
290 u64 search_end
= device
->total_bytes
;
294 struct extent_buffer
*l
;
299 /* FIXME use last free of some kind */
301 /* we don't want to overwrite the superblock on the drive,
302 * so we make sure to start at an offset of at least 1MB
304 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
306 if (search_start
>= search_end
) {
311 key
.objectid
= device
->devid
;
312 key
.offset
= search_start
;
313 key
.type
= BTRFS_DEV_EXTENT_KEY
;
314 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
317 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
321 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
324 slot
= path
->slots
[0];
325 if (slot
>= btrfs_header_nritems(l
)) {
326 ret
= btrfs_next_leaf(root
, path
);
333 if (search_start
>= search_end
) {
337 *start
= search_start
;
341 *start
= last_byte
> search_start
?
342 last_byte
: search_start
;
343 if (search_end
<= *start
) {
349 btrfs_item_key_to_cpu(l
, &key
, slot
);
351 if (key
.objectid
< device
->devid
)
354 if (key
.objectid
> device
->devid
)
357 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
359 if (last_byte
< search_start
)
360 last_byte
= search_start
;
361 hole_size
= key
.offset
- last_byte
;
362 if (key
.offset
> last_byte
&&
363 hole_size
>= num_bytes
) {
368 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
373 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
374 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
380 /* we have to make sure we didn't find an extent that has already
381 * been allocated by the map tree or the original allocation
383 btrfs_release_path(path
);
384 BUG_ON(*start
< search_start
);
386 if (*start
+ num_bytes
> search_end
) {
390 /* check for pending inserts here */
394 btrfs_release_path(path
);
398 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
399 struct btrfs_device
*device
,
400 u64 chunk_tree
, u64 chunk_objectid
,
402 u64 num_bytes
, u64
*start
)
405 struct btrfs_path
*path
;
406 struct btrfs_root
*root
= device
->dev_root
;
407 struct btrfs_dev_extent
*extent
;
408 struct extent_buffer
*leaf
;
409 struct btrfs_key key
;
411 path
= btrfs_alloc_path();
415 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
420 key
.objectid
= device
->devid
;
422 key
.type
= BTRFS_DEV_EXTENT_KEY
;
423 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
427 leaf
= path
->nodes
[0];
428 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
429 struct btrfs_dev_extent
);
430 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
431 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
432 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
434 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
435 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
438 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
439 btrfs_mark_buffer_dirty(leaf
);
441 btrfs_free_path(path
);
445 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
447 struct btrfs_path
*path
;
449 struct btrfs_key key
;
450 struct btrfs_chunk
*chunk
;
451 struct btrfs_key found_key
;
453 path
= btrfs_alloc_path();
456 key
.objectid
= objectid
;
457 key
.offset
= (u64
)-1;
458 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
460 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
466 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
470 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
472 if (found_key
.objectid
!= objectid
)
475 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
477 *offset
= found_key
.offset
+
478 btrfs_chunk_length(path
->nodes
[0], chunk
);
483 btrfs_free_path(path
);
487 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
491 struct btrfs_key key
;
492 struct btrfs_key found_key
;
494 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
495 key
.type
= BTRFS_DEV_ITEM_KEY
;
496 key
.offset
= (u64
)-1;
498 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
504 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
509 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
511 *objectid
= found_key
.offset
+ 1;
515 btrfs_release_path(path
);
520 * the device information is stored in the chunk root
521 * the btrfs_device struct should be fully filled in
523 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
524 struct btrfs_root
*root
,
525 struct btrfs_device
*device
)
528 struct btrfs_path
*path
;
529 struct btrfs_dev_item
*dev_item
;
530 struct extent_buffer
*leaf
;
531 struct btrfs_key key
;
535 root
= root
->fs_info
->chunk_root
;
537 path
= btrfs_alloc_path();
541 ret
= find_next_devid(root
, path
, &free_devid
);
545 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
546 key
.type
= BTRFS_DEV_ITEM_KEY
;
547 key
.offset
= free_devid
;
549 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
554 leaf
= path
->nodes
[0];
555 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
557 device
->devid
= free_devid
;
558 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
559 btrfs_set_device_generation(leaf
, dev_item
, 0);
560 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
561 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
562 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
563 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
564 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
565 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
566 btrfs_set_device_group(leaf
, dev_item
, 0);
567 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
568 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
569 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
571 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
572 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
573 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
574 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
575 btrfs_mark_buffer_dirty(leaf
);
579 btrfs_free_path(path
);
583 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
584 struct btrfs_device
*device
)
587 struct btrfs_path
*path
;
588 struct btrfs_root
*root
;
589 struct btrfs_dev_item
*dev_item
;
590 struct extent_buffer
*leaf
;
591 struct btrfs_key key
;
593 root
= device
->dev_root
->fs_info
->chunk_root
;
595 path
= btrfs_alloc_path();
599 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
600 key
.type
= BTRFS_DEV_ITEM_KEY
;
601 key
.offset
= device
->devid
;
603 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
612 leaf
= path
->nodes
[0];
613 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
615 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
616 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
617 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
618 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
619 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
620 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
621 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
622 btrfs_mark_buffer_dirty(leaf
);
625 btrfs_free_path(path
);
629 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
630 struct btrfs_root
*root
,
631 struct btrfs_key
*key
,
632 struct btrfs_chunk
*chunk
, int item_size
)
634 struct btrfs_super_block
*super_copy
= root
->fs_info
->super_copy
;
635 struct btrfs_disk_key disk_key
;
639 array_size
= btrfs_super_sys_array_size(super_copy
);
640 if (array_size
+ item_size
+ sizeof(disk_key
)
641 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
644 ptr
= super_copy
->sys_chunk_array
+ array_size
;
645 btrfs_cpu_key_to_disk(&disk_key
, key
);
646 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
647 ptr
+= sizeof(disk_key
);
648 memcpy(ptr
, chunk
, item_size
);
649 item_size
+= sizeof(disk_key
);
650 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
654 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
657 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
659 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
660 return calc_size
* (num_stripes
/ sub_stripes
);
661 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
662 return calc_size
* (num_stripes
- 1);
663 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
664 return calc_size
* (num_stripes
- 2);
666 return calc_size
* num_stripes
;
670 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
672 /* TODO, add a way to store the preferred stripe size */
673 return BTRFS_STRIPE_LEN
;
677 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
679 * It is not equal to "device->total_bytes - device->bytes_used".
680 * We do not allocate any chunk in 1M at beginning of device, and not
681 * allowed to allocate any chunk before alloc_start if it is specified.
682 * So search holes from max(1M, alloc_start) to device->total_bytes.
684 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
685 struct btrfs_device
*device
,
688 struct btrfs_path
*path
;
689 struct btrfs_root
*root
= device
->dev_root
;
690 struct btrfs_key key
;
691 struct btrfs_dev_extent
*dev_extent
= NULL
;
692 struct extent_buffer
*l
;
693 u64 search_start
= root
->fs_info
->alloc_start
;
694 u64 search_end
= device
->total_bytes
;
700 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
702 path
= btrfs_alloc_path();
706 key
.objectid
= device
->devid
;
707 key
.offset
= root
->fs_info
->alloc_start
;
708 key
.type
= BTRFS_DEV_EXTENT_KEY
;
711 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
714 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
720 slot
= path
->slots
[0];
721 if (slot
>= btrfs_header_nritems(l
)) {
722 ret
= btrfs_next_leaf(root
, path
);
729 btrfs_item_key_to_cpu(l
, &key
, slot
);
731 if (key
.objectid
< device
->devid
)
733 if (key
.objectid
> device
->devid
)
735 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
737 if (key
.offset
> search_end
)
739 if (key
.offset
> search_start
)
740 free_bytes
+= key
.offset
- search_start
;
742 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
743 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
745 if (extent_end
> search_start
)
746 search_start
= extent_end
;
747 if (search_start
> search_end
)
754 if (search_start
< search_end
)
755 free_bytes
+= search_end
- search_start
;
757 *avail_bytes
= free_bytes
;
760 btrfs_free_path(path
);
764 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
765 - sizeof(struct btrfs_item) \
766 - sizeof(struct btrfs_chunk)) \
767 / sizeof(struct btrfs_stripe) + 1)
769 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
770 - 2 * sizeof(struct btrfs_disk_key) \
771 - 2 * sizeof(struct btrfs_chunk)) \
772 / sizeof(struct btrfs_stripe) + 1)
774 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
775 struct btrfs_root
*extent_root
, u64
*start
,
776 u64
*num_bytes
, u64 type
)
779 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
780 struct btrfs_root
*chunk_root
= info
->chunk_root
;
781 struct btrfs_stripe
*stripes
;
782 struct btrfs_device
*device
= NULL
;
783 struct btrfs_chunk
*chunk
;
784 struct list_head private_devs
;
785 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
786 struct list_head
*cur
;
787 struct map_lookup
*map
;
788 int min_stripe_size
= 1 * 1024 * 1024;
789 u64 calc_size
= 8 * 1024 * 1024;
791 u64 max_chunk_size
= 4 * calc_size
;
802 int stripe_len
= BTRFS_STRIPE_LEN
;
803 struct btrfs_key key
;
806 if (list_empty(dev_list
)) {
810 if (type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
811 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
812 BTRFS_BLOCK_GROUP_RAID10
|
813 BTRFS_BLOCK_GROUP_DUP
)) {
814 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
815 calc_size
= 8 * 1024 * 1024;
816 max_chunk_size
= calc_size
* 2;
817 min_stripe_size
= 1 * 1024 * 1024;
818 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
819 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
820 calc_size
= 1024 * 1024 * 1024;
821 max_chunk_size
= 10 * calc_size
;
822 min_stripe_size
= 64 * 1024 * 1024;
823 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
824 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
825 calc_size
= 1024 * 1024 * 1024;
826 max_chunk_size
= 4 * calc_size
;
827 min_stripe_size
= 32 * 1024 * 1024;
828 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
831 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
832 num_stripes
= min_t(u64
, 2,
833 btrfs_super_num_devices(info
->super_copy
));
838 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
842 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
843 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
844 if (num_stripes
> max_stripes
)
845 num_stripes
= max_stripes
;
848 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
849 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
850 if (num_stripes
> max_stripes
)
851 num_stripes
= max_stripes
;
854 num_stripes
&= ~(u32
)1;
858 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
859 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
860 if (num_stripes
> max_stripes
)
861 num_stripes
= max_stripes
;
865 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
866 btrfs_super_stripesize(info
->super_copy
));
868 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
869 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
870 if (num_stripes
> max_stripes
)
871 num_stripes
= max_stripes
;
875 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
876 btrfs_super_stripesize(info
->super_copy
));
879 /* we don't want a chunk larger than 10% of the FS */
880 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
881 max_chunk_size
= min(percent_max
, max_chunk_size
);
884 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
886 calc_size
= max_chunk_size
;
887 calc_size
/= num_stripes
;
888 calc_size
/= stripe_len
;
889 calc_size
*= stripe_len
;
891 /* we don't want tiny stripes */
892 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
894 calc_size
/= stripe_len
;
895 calc_size
*= stripe_len
;
896 INIT_LIST_HEAD(&private_devs
);
897 cur
= dev_list
->next
;
900 if (type
& BTRFS_BLOCK_GROUP_DUP
)
901 min_free
= calc_size
* 2;
903 min_free
= calc_size
;
905 /* build a private list of devices we will allocate from */
906 while(index
< num_stripes
) {
907 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
908 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
912 if (avail
>= min_free
) {
913 list_move_tail(&device
->dev_list
, &private_devs
);
915 if (type
& BTRFS_BLOCK_GROUP_DUP
)
917 } else if (avail
> max_avail
)
922 if (index
< num_stripes
) {
923 list_splice(&private_devs
, dev_list
);
924 if (index
>= min_stripes
) {
926 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
927 num_stripes
/= sub_stripes
;
928 num_stripes
*= sub_stripes
;
933 if (!looped
&& max_avail
> 0) {
935 calc_size
= max_avail
;
940 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
944 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
945 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
948 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
952 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
958 stripes
= &chunk
->stripe
;
959 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
960 num_stripes
, sub_stripes
);
962 while(index
< num_stripes
) {
963 struct btrfs_stripe
*stripe
;
964 BUG_ON(list_empty(&private_devs
));
965 cur
= private_devs
.next
;
966 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
968 /* loop over this device again if we're doing a dup group */
969 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
970 (index
== num_stripes
- 1))
971 list_move_tail(&device
->dev_list
, dev_list
);
973 ret
= btrfs_alloc_dev_extent(trans
, device
,
974 info
->chunk_root
->root_key
.objectid
,
975 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
976 calc_size
, &dev_offset
);
979 device
->bytes_used
+= calc_size
;
980 ret
= btrfs_update_device(trans
, device
);
983 map
->stripes
[index
].dev
= device
;
984 map
->stripes
[index
].physical
= dev_offset
;
985 stripe
= stripes
+ index
;
986 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
987 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
988 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
991 BUG_ON(!list_empty(&private_devs
));
993 /* key was set above */
994 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
995 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
996 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
997 btrfs_set_stack_chunk_type(chunk
, type
);
998 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
999 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1000 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1001 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1002 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1003 map
->sector_size
= extent_root
->sectorsize
;
1004 map
->stripe_len
= stripe_len
;
1005 map
->io_align
= stripe_len
;
1006 map
->io_width
= stripe_len
;
1008 map
->num_stripes
= num_stripes
;
1009 map
->sub_stripes
= sub_stripes
;
1011 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1012 btrfs_chunk_item_size(num_stripes
));
1014 *start
= key
.offset
;;
1016 map
->ce
.start
= key
.offset
;
1017 map
->ce
.size
= *num_bytes
;
1019 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1022 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1023 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
1024 chunk
, btrfs_chunk_item_size(num_stripes
));
1032 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1033 struct btrfs_root
*extent_root
, u64
*start
,
1034 u64 num_bytes
, u64 type
)
1037 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1038 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1039 struct btrfs_stripe
*stripes
;
1040 struct btrfs_device
*device
= NULL
;
1041 struct btrfs_chunk
*chunk
;
1042 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1043 struct list_head
*cur
;
1044 struct map_lookup
*map
;
1045 u64 calc_size
= 8 * 1024 * 1024;
1046 int num_stripes
= 1;
1047 int sub_stripes
= 0;
1050 int stripe_len
= BTRFS_STRIPE_LEN
;
1051 struct btrfs_key key
;
1053 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1054 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1055 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1060 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1064 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1070 stripes
= &chunk
->stripe
;
1071 calc_size
= num_bytes
;
1074 cur
= dev_list
->next
;
1075 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1077 while (index
< num_stripes
) {
1078 struct btrfs_stripe
*stripe
;
1080 ret
= btrfs_alloc_dev_extent(trans
, device
,
1081 info
->chunk_root
->root_key
.objectid
,
1082 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1083 calc_size
, &dev_offset
);
1086 device
->bytes_used
+= calc_size
;
1087 ret
= btrfs_update_device(trans
, device
);
1090 map
->stripes
[index
].dev
= device
;
1091 map
->stripes
[index
].physical
= dev_offset
;
1092 stripe
= stripes
+ index
;
1093 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1094 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1095 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1099 /* key was set above */
1100 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1101 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1102 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1103 btrfs_set_stack_chunk_type(chunk
, type
);
1104 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1105 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1106 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1107 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1108 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1109 map
->sector_size
= extent_root
->sectorsize
;
1110 map
->stripe_len
= stripe_len
;
1111 map
->io_align
= stripe_len
;
1112 map
->io_width
= stripe_len
;
1114 map
->num_stripes
= num_stripes
;
1115 map
->sub_stripes
= sub_stripes
;
1117 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1118 btrfs_chunk_item_size(num_stripes
));
1120 *start
= key
.offset
;
1122 map
->ce
.start
= key
.offset
;
1123 map
->ce
.size
= num_bytes
;
1125 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1132 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
1134 struct cache_extent
*ce
;
1135 struct map_lookup
*map
;
1138 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1140 fprintf(stderr
, "No mapping for %llu-%llu\n",
1141 (unsigned long long)logical
,
1142 (unsigned long long)logical
+len
);
1145 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1146 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1147 "%llu-%llu\n", (unsigned long long)logical
,
1148 (unsigned long long)logical
+len
,
1149 (unsigned long long)ce
->start
,
1150 (unsigned long long)ce
->start
+ ce
->size
);
1153 map
= container_of(ce
, struct map_lookup
, ce
);
1155 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1156 ret
= map
->num_stripes
;
1157 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1158 ret
= map
->sub_stripes
;
1159 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1161 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1168 int btrfs_next_metadata(struct btrfs_mapping_tree
*map_tree
, u64
*logical
,
1171 struct cache_extent
*ce
;
1172 struct map_lookup
*map
;
1174 ce
= search_cache_extent(&map_tree
->cache_tree
, *logical
);
1177 ce
= next_cache_extent(ce
);
1181 map
= container_of(ce
, struct map_lookup
, ce
);
1182 if (map
->type
& BTRFS_BLOCK_GROUP_METADATA
) {
1183 *logical
= ce
->start
;
1192 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
1193 u64 chunk_start
, u64 physical
, u64 devid
,
1194 u64
**logical
, int *naddrs
, int *stripe_len
)
1196 struct cache_extent
*ce
;
1197 struct map_lookup
*map
;
1205 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1207 map
= container_of(ce
, struct map_lookup
, ce
);
1210 rmap_len
= map
->stripe_len
;
1211 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1212 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1213 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1214 length
= ce
->size
/ map
->num_stripes
;
1215 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1216 BTRFS_BLOCK_GROUP_RAID6
)) {
1217 length
= ce
->size
/ nr_data_stripes(map
);
1218 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1221 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1223 for (i
= 0; i
< map
->num_stripes
; i
++) {
1224 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1226 if (map
->stripes
[i
].physical
> physical
||
1227 map
->stripes
[i
].physical
+ length
<= physical
)
1230 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1233 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1234 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1236 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1237 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1238 } /* else if RAID[56], multiply by nr_data_stripes().
1239 * Alternatively, just use rmap_len below instead of
1240 * map->stripe_len */
1242 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1243 for (j
= 0; j
< nr
; j
++) {
1244 if (buf
[j
] == bytenr
)
1253 *stripe_len
= rmap_len
;
1258 static inline int parity_smaller(u64 a
, u64 b
)
1263 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1264 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1266 struct btrfs_bio_stripe s
;
1273 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1274 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1275 s
= bbio
->stripes
[i
];
1277 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1278 raid_map
[i
] = raid_map
[i
+1];
1279 bbio
->stripes
[i
+1] = s
;
1287 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1288 u64 logical
, u64
*length
,
1289 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1292 return __btrfs_map_block(map_tree
, rw
, logical
, length
, NULL
,
1293 multi_ret
, mirror_num
, raid_map_ret
);
1296 int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1297 u64 logical
, u64
*length
, u64
*type
,
1298 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1301 struct cache_extent
*ce
;
1302 struct map_lookup
*map
;
1306 u64
*raid_map
= NULL
;
1307 int stripes_allocated
= 8;
1308 int stripes_required
= 1;
1311 struct btrfs_multi_bio
*multi
= NULL
;
1313 if (multi_ret
&& rw
== READ
) {
1314 stripes_allocated
= 1;
1317 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1323 if (ce
->start
> logical
) {
1325 *length
= ce
->start
- logical
;
1330 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1335 map
= container_of(ce
, struct map_lookup
, ce
);
1336 offset
= logical
- ce
->start
;
1339 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1340 BTRFS_BLOCK_GROUP_DUP
)) {
1341 stripes_required
= map
->num_stripes
;
1342 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1343 stripes_required
= map
->sub_stripes
;
1346 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1347 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1348 /* RAID[56] write or recovery. Return all stripes */
1349 stripes_required
= map
->num_stripes
;
1351 /* Only allocate the map if we've already got a large enough multi_ret */
1352 if (stripes_allocated
>= stripes_required
) {
1353 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1361 /* if our multi bio struct is too small, back off and try again */
1362 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1363 stripes_allocated
= stripes_required
;
1370 * stripe_nr counts the total number of stripes we have to stride
1371 * to get to this block
1373 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1375 stripe_offset
= stripe_nr
* map
->stripe_len
;
1376 BUG_ON(offset
< stripe_offset
);
1378 /* stripe_offset is the offset of this block in its stripe*/
1379 stripe_offset
= offset
- stripe_offset
;
1381 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1382 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1383 BTRFS_BLOCK_GROUP_RAID10
|
1384 BTRFS_BLOCK_GROUP_DUP
)) {
1385 /* we limit the length of each bio to what fits in a stripe */
1386 *length
= min_t(u64
, ce
->size
- offset
,
1387 map
->stripe_len
- stripe_offset
);
1389 *length
= ce
->size
- offset
;
1395 multi
->num_stripes
= 1;
1397 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1399 multi
->num_stripes
= map
->num_stripes
;
1400 else if (mirror_num
)
1401 stripe_index
= mirror_num
- 1;
1403 stripe_index
= stripe_nr
% map
->num_stripes
;
1404 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1405 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1407 stripe_index
= stripe_nr
% factor
;
1408 stripe_index
*= map
->sub_stripes
;
1411 multi
->num_stripes
= map
->sub_stripes
;
1412 else if (mirror_num
)
1413 stripe_index
+= mirror_num
- 1;
1415 stripe_nr
= stripe_nr
/ factor
;
1416 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1418 multi
->num_stripes
= map
->num_stripes
;
1419 else if (mirror_num
)
1420 stripe_index
= mirror_num
- 1;
1421 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1422 BTRFS_BLOCK_GROUP_RAID6
)) {
1427 u64 raid56_full_stripe_start
;
1428 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1431 * align the start of our data stripe in the logical
1434 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1435 raid56_full_stripe_start
*= full_stripe_len
;
1437 /* get the data stripe number */
1438 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1439 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1441 /* Work out the disk rotation on this stripe-set */
1442 rot
= stripe_nr
% map
->num_stripes
;
1444 /* Fill in the logical address of each stripe */
1445 tmp
= stripe_nr
* nr_data_stripes(map
);
1447 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1448 raid_map
[(i
+rot
) % map
->num_stripes
] =
1449 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1451 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1452 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1453 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1455 *length
= map
->stripe_len
;
1458 multi
->num_stripes
= map
->num_stripes
;
1460 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1461 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1464 * Mirror #0 or #1 means the original data block.
1465 * Mirror #2 is RAID5 parity block.
1466 * Mirror #3 is RAID6 Q block.
1469 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1471 /* We distribute the parity blocks across stripes */
1472 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1476 * after this do_div call, stripe_nr is the number of stripes
1477 * on this device we have to walk to find the data, and
1478 * stripe_index is the number of our device in the stripe array
1480 stripe_index
= stripe_nr
% map
->num_stripes
;
1481 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1483 BUG_ON(stripe_index
>= map
->num_stripes
);
1485 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1486 multi
->stripes
[i
].physical
=
1487 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1488 stripe_nr
* map
->stripe_len
;
1489 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1498 sort_parity_stripes(multi
, raid_map
);
1499 *raid_map_ret
= raid_map
;
1505 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1508 struct btrfs_device
*device
;
1509 struct btrfs_fs_devices
*cur_devices
;
1511 cur_devices
= root
->fs_info
->fs_devices
;
1512 while (cur_devices
) {
1514 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1515 root
->fs_info
->ignore_fsid_mismatch
)) {
1516 device
= __find_device(&cur_devices
->devices
,
1521 cur_devices
= cur_devices
->seed
;
1526 struct btrfs_device
*
1527 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1528 u64 devid
, int instance
)
1530 struct list_head
*head
= &fs_devices
->devices
;
1531 struct btrfs_device
*dev
;
1534 list_for_each_entry(dev
, head
, dev_list
) {
1535 if (dev
->devid
== devid
&& num_found
++ == instance
)
1541 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
1543 struct cache_extent
*ce
;
1544 struct map_lookup
*map
;
1545 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1550 * During chunk recovering, we may fail to find block group's
1551 * corresponding chunk, we will rebuild it later
1553 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1554 if (!root
->fs_info
->is_chunk_recover
)
1559 map
= container_of(ce
, struct map_lookup
, ce
);
1560 for (i
= 0; i
< map
->num_stripes
; i
++) {
1561 if (!map
->stripes
[i
].dev
->writeable
) {
1570 static struct btrfs_device
*fill_missing_device(u64 devid
)
1572 struct btrfs_device
*device
;
1574 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1575 device
->devid
= devid
;
1581 * Slot is used to verfy the chunk item is valid
1583 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1585 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1586 struct extent_buffer
*leaf
,
1587 struct btrfs_chunk
*chunk
, int slot
)
1589 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1590 struct map_lookup
*map
;
1591 struct cache_extent
*ce
;
1595 u8 uuid
[BTRFS_UUID_SIZE
];
1600 logical
= key
->offset
;
1601 length
= btrfs_chunk_length(leaf
, chunk
);
1603 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1605 /* already mapped? */
1606 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1610 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1611 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1615 map
->ce
.start
= logical
;
1616 map
->ce
.size
= length
;
1617 map
->num_stripes
= num_stripes
;
1618 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1619 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1620 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1621 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1622 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1623 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1625 /* Check on chunk item type */
1626 if (map
->type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1627 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1628 fprintf(stderr
, "Unknown chunk type bits: %llu\n",
1629 map
->type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1630 BTRFS_BLOCK_GROUP_PROFILE_MASK
));
1636 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1637 * it can't exceed the system chunk array size
1638 * For normal chunk, it should match its chunk item size.
1640 if (num_stripes
< 1 ||
1641 (slot
== -1 && sizeof(struct btrfs_stripe
) * num_stripes
>
1642 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1643 (slot
>= 0 && sizeof(struct btrfs_stripe
) * (num_stripes
- 1) >
1644 btrfs_item_size_nr(leaf
, slot
))) {
1645 fprintf(stderr
, "Invalid num_stripes: %u\n",
1652 * Device number check against profile
1654 if ((map
->type
& BTRFS_BLOCK_GROUP_RAID10
&& map
->sub_stripes
== 0) ||
1655 (map
->type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1656 (map
->type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1657 (map
->type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1658 (map
->type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1659 ((map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1660 num_stripes
!= 1)) {
1662 "Invalid num_stripes:sub_stripes %u:%u for profile %llu\n",
1663 num_stripes
, map
->sub_stripes
,
1664 map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1669 for (i
= 0; i
< num_stripes
; i
++) {
1670 map
->stripes
[i
].physical
=
1671 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1672 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1673 read_extent_buffer(leaf
, uuid
, (unsigned long)
1674 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1676 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
1678 if (!map
->stripes
[i
].dev
) {
1679 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1680 printf("warning, device %llu is missing\n",
1681 (unsigned long long)devid
);
1685 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1694 static int fill_device_from_item(struct extent_buffer
*leaf
,
1695 struct btrfs_dev_item
*dev_item
,
1696 struct btrfs_device
*device
)
1700 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1701 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1702 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1703 device
->type
= btrfs_device_type(leaf
, dev_item
);
1704 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1705 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1706 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1708 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1709 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1714 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
1716 struct btrfs_fs_devices
*fs_devices
;
1719 fs_devices
= root
->fs_info
->fs_devices
->seed
;
1720 while (fs_devices
) {
1721 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1725 fs_devices
= fs_devices
->seed
;
1728 fs_devices
= find_fsid(fsid
);
1730 /* missing all seed devices */
1731 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1736 INIT_LIST_HEAD(&fs_devices
->devices
);
1737 list_add(&fs_devices
->list
, &fs_uuids
);
1738 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1741 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1745 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
1746 root
->fs_info
->fs_devices
->seed
= fs_devices
;
1751 static int read_one_dev(struct btrfs_root
*root
,
1752 struct extent_buffer
*leaf
,
1753 struct btrfs_dev_item
*dev_item
)
1755 struct btrfs_device
*device
;
1758 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1759 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1761 devid
= btrfs_device_id(leaf
, dev_item
);
1762 read_extent_buffer(leaf
, dev_uuid
,
1763 (unsigned long)btrfs_device_uuid(dev_item
),
1765 read_extent_buffer(leaf
, fs_uuid
,
1766 (unsigned long)btrfs_device_fsid(dev_item
),
1769 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1770 ret
= open_seed_devices(root
, fs_uuid
);
1775 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1777 printk("warning devid %llu not found already\n",
1778 (unsigned long long)devid
);
1779 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1783 list_add(&device
->dev_list
,
1784 &root
->fs_info
->fs_devices
->devices
);
1787 fill_device_from_item(leaf
, dev_item
, device
);
1788 device
->dev_root
= root
->fs_info
->dev_root
;
1792 int btrfs_read_sys_array(struct btrfs_root
*root
)
1794 struct btrfs_super_block
*super_copy
= root
->fs_info
->super_copy
;
1795 struct extent_buffer
*sb
;
1796 struct btrfs_disk_key
*disk_key
;
1797 struct btrfs_chunk
*chunk
;
1798 struct btrfs_key key
;
1805 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
1806 BTRFS_SUPER_INFO_SIZE
);
1809 btrfs_set_buffer_uptodate(sb
);
1810 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1811 array_end
= ((u8
*)super_copy
->sys_chunk_array
) +
1812 btrfs_super_sys_array_size(super_copy
);
1815 * we do this loop twice, once for the device items and
1816 * once for all of the chunks. This way there are device
1817 * structs filled in for every chunk
1819 ptr
= super_copy
->sys_chunk_array
;
1821 while (ptr
< array_end
) {
1822 disk_key
= (struct btrfs_disk_key
*)ptr
;
1823 btrfs_disk_key_to_cpu(&key
, disk_key
);
1825 len
= sizeof(*disk_key
);
1828 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1829 chunk
= (struct btrfs_chunk
*)(ptr
- (u8
*)super_copy
);
1830 ret
= read_one_chunk(root
, &key
, sb
, chunk
, -1);
1833 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1834 len
= btrfs_chunk_item_size(num_stripes
);
1840 free_extent_buffer(sb
);
1844 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1846 struct btrfs_path
*path
;
1847 struct extent_buffer
*leaf
;
1848 struct btrfs_key key
;
1849 struct btrfs_key found_key
;
1853 root
= root
->fs_info
->chunk_root
;
1855 path
= btrfs_alloc_path();
1860 * Read all device items, and then all the chunk items. All
1861 * device items are found before any chunk item (their object id
1862 * is smaller than the lowest possible object id for a chunk
1863 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
1865 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1868 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1872 leaf
= path
->nodes
[0];
1873 slot
= path
->slots
[0];
1874 if (slot
>= btrfs_header_nritems(leaf
)) {
1875 ret
= btrfs_next_leaf(root
, path
);
1882 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1883 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1884 struct btrfs_dev_item
*dev_item
;
1885 dev_item
= btrfs_item_ptr(leaf
, slot
,
1886 struct btrfs_dev_item
);
1887 ret
= read_one_dev(root
, leaf
, dev_item
);
1889 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1890 struct btrfs_chunk
*chunk
;
1891 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1892 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
,
1901 btrfs_free_path(path
);
1905 struct list_head
*btrfs_scanned_uuids(void)
1910 static int rmw_eb(struct btrfs_fs_info
*info
,
1911 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
1914 unsigned long orig_off
= 0;
1915 unsigned long dest_off
= 0;
1916 unsigned long copy_len
= eb
->len
;
1918 ret
= read_whole_eb(info
, eb
, 0);
1922 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
1923 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
1926 * | ----- orig_eb ------- |
1927 * | ----- stripe ------- |
1928 * | ----- orig_eb ------- |
1929 * | ----- orig_eb ------- |
1931 if (eb
->start
> orig_eb
->start
)
1932 orig_off
= eb
->start
- orig_eb
->start
;
1933 if (orig_eb
->start
> eb
->start
)
1934 dest_off
= orig_eb
->start
- eb
->start
;
1936 if (copy_len
> orig_eb
->len
- orig_off
)
1937 copy_len
= orig_eb
->len
- orig_off
;
1938 if (copy_len
> eb
->len
- dest_off
)
1939 copy_len
= eb
->len
- dest_off
;
1941 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
1945 static void split_eb_for_raid56(struct btrfs_fs_info
*info
,
1946 struct extent_buffer
*orig_eb
,
1947 struct extent_buffer
**ebs
,
1948 u64 stripe_len
, u64
*raid_map
,
1951 struct extent_buffer
*eb
;
1952 u64 start
= orig_eb
->start
;
1957 for (i
= 0; i
< num_stripes
; i
++) {
1958 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
1961 eb
= calloc(1, sizeof(struct extent_buffer
) + stripe_len
);
1965 eb
->start
= raid_map
[i
];
1966 eb
->len
= stripe_len
;
1970 eb
->dev_bytenr
= (u64
)-1;
1972 this_eb_start
= raid_map
[i
];
1974 if (start
> this_eb_start
||
1975 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
1976 ret
= rmw_eb(info
, eb
, orig_eb
);
1979 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
, stripe_len
);
1985 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
1986 struct extent_buffer
*eb
,
1987 struct btrfs_multi_bio
*multi
,
1988 u64 stripe_len
, u64
*raid_map
)
1990 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
1994 int alloc_size
= eb
->len
;
1996 ebs
= kmalloc(sizeof(*ebs
) * multi
->num_stripes
, GFP_NOFS
);
1999 if (stripe_len
> alloc_size
)
2000 alloc_size
= stripe_len
;
2002 split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2003 multi
->num_stripes
);
2005 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2006 struct extent_buffer
*new_eb
;
2007 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2008 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2009 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2010 multi
->stripes
[i
].dev
->total_ios
++;
2011 BUG_ON(ebs
[i
]->start
!= raid_map
[i
]);
2014 new_eb
= kmalloc(sizeof(*eb
) + alloc_size
, GFP_NOFS
);
2016 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2017 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2018 multi
->stripes
[i
].dev
->total_ios
++;
2019 new_eb
->len
= stripe_len
;
2021 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2023 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2029 pointers
= kmalloc(sizeof(*pointers
) * multi
->num_stripes
,
2033 ebs
[multi
->num_stripes
- 2] = p_eb
;
2034 ebs
[multi
->num_stripes
- 1] = q_eb
;
2036 for (i
= 0; i
< multi
->num_stripes
; i
++)
2037 pointers
[i
] = ebs
[i
]->data
;
2039 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2042 ebs
[multi
->num_stripes
- 1] = p_eb
;
2043 memcpy(p_eb
->data
, ebs
[0]->data
, stripe_len
);
2044 for (j
= 1; j
< multi
->num_stripes
- 1; j
++) {
2045 for (i
= 0; i
< stripe_len
; i
+= sizeof(unsigned long)) {
2046 *(unsigned long *)(p_eb
->data
+ i
) ^=
2047 *(unsigned long *)(ebs
[j
]->data
+ i
);
2052 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2053 ret
= write_extent_to_disk(ebs
[i
]);