2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 600
22 #include <sys/types.h>
24 #include <uuid/uuid.h>
29 #include "transaction.h"
30 #include "print-tree.h"
34 struct btrfs_device
*dev
;
39 struct cache_extent ce
;
47 struct btrfs_bio_stripe stripes
[];
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static LIST_HEAD(fs_uuids
);
55 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
58 struct btrfs_device
*dev
;
59 struct list_head
*cur
;
61 list_for_each(cur
, head
) {
62 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
63 if (dev
->devid
== devid
&&
64 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
71 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
73 struct list_head
*cur
;
74 struct btrfs_fs_devices
*fs_devices
;
76 list_for_each(cur
, &fs_uuids
) {
77 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
78 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
84 static int device_list_add(const char *path
,
85 struct btrfs_super_block
*disk_super
,
86 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
88 struct btrfs_device
*device
;
89 struct btrfs_fs_devices
*fs_devices
;
90 u64 found_transid
= btrfs_super_generation(disk_super
);
92 fs_devices
= find_fsid(disk_super
->fsid
);
94 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
97 INIT_LIST_HEAD(&fs_devices
->devices
);
98 list_add(&fs_devices
->list
, &fs_uuids
);
99 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
100 fs_devices
->latest_devid
= devid
;
101 fs_devices
->latest_trans
= found_transid
;
102 fs_devices
->lowest_devid
= (u64
)-1;
105 device
= __find_device(&fs_devices
->devices
, devid
,
106 disk_super
->dev_item
.uuid
);
109 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
111 /* we can safely leave the fs_devices entry around */
114 device
->devid
= devid
;
115 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
117 device
->name
= kstrdup(path
, GFP_NOFS
);
122 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
123 device
->total_devs
= btrfs_super_num_devices(disk_super
);
124 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
125 device
->total_bytes
=
126 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
128 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
129 list_add(&device
->dev_list
, &fs_devices
->devices
);
130 device
->fs_devices
= fs_devices
;
133 if (found_transid
> fs_devices
->latest_trans
) {
134 fs_devices
->latest_devid
= devid
;
135 fs_devices
->latest_trans
= found_transid
;
137 if (fs_devices
->lowest_devid
> devid
) {
138 fs_devices
->lowest_devid
= devid
;
140 *fs_devices_ret
= fs_devices
;
144 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
146 struct btrfs_fs_devices
*seed_devices
;
147 struct list_head
*cur
;
148 struct btrfs_device
*device
;
150 list_for_each(cur
, &fs_devices
->devices
) {
151 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
154 device
->writeable
= 0;
157 seed_devices
= fs_devices
->seed
;
158 fs_devices
->seed
= NULL
;
160 fs_devices
= seed_devices
;
167 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
170 struct list_head
*head
= &fs_devices
->devices
;
171 struct list_head
*cur
;
172 struct btrfs_device
*device
;
175 list_for_each(cur
, head
) {
176 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
178 fd
= open(device
->name
, flags
);
184 if (device
->devid
== fs_devices
->latest_devid
)
185 fs_devices
->latest_bdev
= fd
;
186 if (device
->devid
== fs_devices
->lowest_devid
)
187 fs_devices
->lowest_bdev
= fd
;
190 device
->writeable
= 1;
194 btrfs_close_devices(fs_devices
);
198 int btrfs_scan_one_device(int fd
, const char *path
,
199 struct btrfs_fs_devices
**fs_devices_ret
,
200 u64
*total_devs
, u64 super_offset
)
202 struct btrfs_super_block
*disk_super
;
213 disk_super
= (struct btrfs_super_block
*)buf
;
214 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
);
219 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
220 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
223 *total_devs
= btrfs_super_num_devices(disk_super
);
224 uuid_unparse(disk_super
->fsid
, uuidbuf
);
226 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
235 * this uses a pretty simple search, the expectation is that it is
236 * called very infrequently and that a given device has a small number
239 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
240 struct btrfs_device
*device
,
241 struct btrfs_path
*path
,
242 u64 num_bytes
, u64
*start
)
244 struct btrfs_key key
;
245 struct btrfs_root
*root
= device
->dev_root
;
246 struct btrfs_dev_extent
*dev_extent
= NULL
;
249 u64 search_start
= 0;
250 u64 search_end
= device
->total_bytes
;
254 struct extent_buffer
*l
;
259 /* FIXME use last free of some kind */
261 /* we don't want to overwrite the superblock on the drive,
262 * so we make sure to start at an offset of at least 1MB
264 search_start
= max((u64
)1024 * 1024, search_start
);
266 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
267 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
269 key
.objectid
= device
->devid
;
270 key
.offset
= search_start
;
271 key
.type
= BTRFS_DEV_EXTENT_KEY
;
272 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
275 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
279 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
282 slot
= path
->slots
[0];
283 if (slot
>= btrfs_header_nritems(l
)) {
284 ret
= btrfs_next_leaf(root
, path
);
291 if (search_start
>= search_end
) {
295 *start
= search_start
;
299 *start
= last_byte
> search_start
?
300 last_byte
: search_start
;
301 if (search_end
<= *start
) {
307 btrfs_item_key_to_cpu(l
, &key
, slot
);
309 if (key
.objectid
< device
->devid
)
312 if (key
.objectid
> device
->devid
)
315 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
317 if (last_byte
< search_start
)
318 last_byte
= search_start
;
319 hole_size
= key
.offset
- last_byte
;
320 if (key
.offset
> last_byte
&&
321 hole_size
>= num_bytes
) {
326 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
331 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
332 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
338 /* we have to make sure we didn't find an extent that has already
339 * been allocated by the map tree or the original allocation
341 btrfs_release_path(root
, path
);
342 BUG_ON(*start
< search_start
);
344 if (*start
+ num_bytes
> search_end
) {
348 /* check for pending inserts here */
352 btrfs_release_path(root
, path
);
356 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
357 struct btrfs_device
*device
,
358 u64 chunk_tree
, u64 chunk_objectid
,
360 u64 num_bytes
, u64
*start
)
363 struct btrfs_path
*path
;
364 struct btrfs_root
*root
= device
->dev_root
;
365 struct btrfs_dev_extent
*extent
;
366 struct extent_buffer
*leaf
;
367 struct btrfs_key key
;
369 path
= btrfs_alloc_path();
373 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
378 key
.objectid
= device
->devid
;
380 key
.type
= BTRFS_DEV_EXTENT_KEY
;
381 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
385 leaf
= path
->nodes
[0];
386 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
387 struct btrfs_dev_extent
);
388 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
389 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
390 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
392 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
393 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
396 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
397 btrfs_mark_buffer_dirty(leaf
);
399 btrfs_free_path(path
);
403 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
405 struct btrfs_path
*path
;
407 struct btrfs_key key
;
408 struct btrfs_chunk
*chunk
;
409 struct btrfs_key found_key
;
411 path
= btrfs_alloc_path();
414 key
.objectid
= objectid
;
415 key
.offset
= (u64
)-1;
416 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
418 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
424 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
428 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
430 if (found_key
.objectid
!= objectid
)
433 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
435 *offset
= found_key
.offset
+
436 btrfs_chunk_length(path
->nodes
[0], chunk
);
441 btrfs_free_path(path
);
445 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
449 struct btrfs_key key
;
450 struct btrfs_key found_key
;
452 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
453 key
.type
= BTRFS_DEV_ITEM_KEY
;
454 key
.offset
= (u64
)-1;
456 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
462 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
467 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
469 *objectid
= found_key
.offset
+ 1;
473 btrfs_release_path(root
, path
);
478 * the device information is stored in the chunk root
479 * the btrfs_device struct should be fully filled in
481 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
482 struct btrfs_root
*root
,
483 struct btrfs_device
*device
)
486 struct btrfs_path
*path
;
487 struct btrfs_dev_item
*dev_item
;
488 struct extent_buffer
*leaf
;
489 struct btrfs_key key
;
493 root
= root
->fs_info
->chunk_root
;
495 path
= btrfs_alloc_path();
499 ret
= find_next_devid(root
, path
, &free_devid
);
503 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
504 key
.type
= BTRFS_DEV_ITEM_KEY
;
505 key
.offset
= free_devid
;
507 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
512 leaf
= path
->nodes
[0];
513 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
515 device
->devid
= free_devid
;
516 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
517 btrfs_set_device_generation(leaf
, dev_item
, 0);
518 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
519 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
520 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
521 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
522 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
523 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
524 btrfs_set_device_group(leaf
, dev_item
, 0);
525 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
526 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
527 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
529 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
530 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
531 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
532 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
533 btrfs_mark_buffer_dirty(leaf
);
537 btrfs_free_path(path
);
541 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
542 struct btrfs_device
*device
)
545 struct btrfs_path
*path
;
546 struct btrfs_root
*root
;
547 struct btrfs_dev_item
*dev_item
;
548 struct extent_buffer
*leaf
;
549 struct btrfs_key key
;
551 root
= device
->dev_root
->fs_info
->chunk_root
;
553 path
= btrfs_alloc_path();
557 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
558 key
.type
= BTRFS_DEV_ITEM_KEY
;
559 key
.offset
= device
->devid
;
561 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
570 leaf
= path
->nodes
[0];
571 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
573 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
574 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
575 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
576 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
577 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
578 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
579 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
580 btrfs_mark_buffer_dirty(leaf
);
583 btrfs_free_path(path
);
587 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
588 struct btrfs_root
*root
,
589 struct btrfs_key
*key
,
590 struct btrfs_chunk
*chunk
, int item_size
)
592 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
593 struct btrfs_disk_key disk_key
;
597 array_size
= btrfs_super_sys_array_size(super_copy
);
598 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
601 ptr
= super_copy
->sys_chunk_array
+ array_size
;
602 btrfs_cpu_key_to_disk(&disk_key
, key
);
603 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
604 ptr
+= sizeof(disk_key
);
605 memcpy(ptr
, chunk
, item_size
);
606 item_size
+= sizeof(disk_key
);
607 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
611 static u64
div_factor(u64 num
, int factor
)
619 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
622 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
624 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
625 return calc_size
* (num_stripes
/ sub_stripes
);
627 return calc_size
* num_stripes
;
631 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
632 struct btrfs_root
*extent_root
, u64
*start
,
633 u64
*num_bytes
, u64 type
)
636 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
637 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
638 struct btrfs_stripe
*stripes
;
639 struct btrfs_device
*device
= NULL
;
640 struct btrfs_chunk
*chunk
;
641 struct list_head private_devs
;
642 struct list_head
*dev_list
= &extent_root
->fs_info
->fs_devices
->devices
;
643 struct list_head
*cur
;
644 struct map_lookup
*map
;
645 int min_stripe_size
= 1 * 1024 * 1024;
647 u64 calc_size
= 8 * 1024 * 1024;
649 u64 max_chunk_size
= 4 * calc_size
;
659 int stripe_len
= 64 * 1024;
660 struct btrfs_key key
;
662 if (list_empty(dev_list
)) {
666 if (type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
667 BTRFS_BLOCK_GROUP_RAID10
|
668 BTRFS_BLOCK_GROUP_DUP
)) {
669 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
670 calc_size
= 8 * 1024 * 1024;
671 max_chunk_size
= calc_size
* 2;
672 min_stripe_size
= 1 * 1024 * 1024;
673 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
674 calc_size
= 1024 * 1024 * 1024;
675 max_chunk_size
= 10 * calc_size
;
676 min_stripe_size
= 64 * 1024 * 1024;
677 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
678 calc_size
= 1024 * 1024 * 1024;
679 max_chunk_size
= 4 * calc_size
;
680 min_stripe_size
= 32 * 1024 * 1024;
683 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
684 num_stripes
= min_t(u64
, 2,
685 btrfs_super_num_devices(&info
->super_copy
));
690 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
694 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
695 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
698 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
699 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
702 num_stripes
&= ~(u32
)1;
707 /* we don't want a chunk larger than 10% of the FS */
708 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
709 max_chunk_size
= min(percent_max
, max_chunk_size
);
712 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
714 calc_size
= max_chunk_size
;
715 calc_size
/= num_stripes
;
716 calc_size
/= stripe_len
;
717 calc_size
*= stripe_len
;
719 /* we don't want tiny stripes */
720 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
722 calc_size
/= stripe_len
;
723 calc_size
*= stripe_len
;
724 INIT_LIST_HEAD(&private_devs
);
725 cur
= dev_list
->next
;
728 if (type
& BTRFS_BLOCK_GROUP_DUP
)
729 min_free
= calc_size
* 2;
731 min_free
= calc_size
;
733 /* build a private list of devices we will allocate from */
734 while(index
< num_stripes
) {
735 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
736 avail
= device
->total_bytes
- device
->bytes_used
;
738 if (avail
>= min_free
) {
739 list_move_tail(&device
->dev_list
, &private_devs
);
741 if (type
& BTRFS_BLOCK_GROUP_DUP
)
743 } else if (avail
> max_avail
)
748 if (index
< num_stripes
) {
749 list_splice(&private_devs
, dev_list
);
750 if (index
>= min_stripes
) {
752 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
753 num_stripes
/= sub_stripes
;
754 num_stripes
*= sub_stripes
;
759 if (!looped
&& max_avail
> 0) {
761 calc_size
= max_avail
;
766 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
767 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
768 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
773 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
777 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
783 stripes
= &chunk
->stripe
;
784 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
785 num_stripes
, sub_stripes
);
787 while(index
< num_stripes
) {
788 struct btrfs_stripe
*stripe
;
789 BUG_ON(list_empty(&private_devs
));
790 cur
= private_devs
.next
;
791 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
793 /* loop over this device again if we're doing a dup group */
794 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
795 (index
== num_stripes
- 1))
796 list_move_tail(&device
->dev_list
, dev_list
);
798 ret
= btrfs_alloc_dev_extent(trans
, device
,
799 info
->chunk_root
->root_key
.objectid
,
800 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
801 calc_size
, &dev_offset
);
804 device
->bytes_used
+= calc_size
;
805 ret
= btrfs_update_device(trans
, device
);
808 map
->stripes
[index
].dev
= device
;
809 map
->stripes
[index
].physical
= dev_offset
;
810 stripe
= stripes
+ index
;
811 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
812 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
813 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
814 physical
= dev_offset
;
817 BUG_ON(!list_empty(&private_devs
));
819 /* key was set above */
820 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
821 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
822 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
823 btrfs_set_stack_chunk_type(chunk
, type
);
824 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
825 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
826 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
827 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
828 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
829 map
->sector_size
= extent_root
->sectorsize
;
830 map
->stripe_len
= stripe_len
;
831 map
->io_align
= stripe_len
;
832 map
->io_width
= stripe_len
;
834 map
->num_stripes
= num_stripes
;
835 map
->sub_stripes
= sub_stripes
;
837 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
838 btrfs_chunk_item_size(num_stripes
));
840 *start
= key
.offset
;;
842 map
->ce
.start
= key
.offset
;
843 map
->ce
.size
= *num_bytes
;
845 ret
= insert_existing_cache_extent(
846 &extent_root
->fs_info
->mapping_tree
.cache_tree
,
850 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
851 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
852 chunk
, btrfs_chunk_item_size(num_stripes
));
860 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
862 cache_tree_init(&tree
->cache_tree
);
865 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
867 struct cache_extent
*ce
;
868 struct map_lookup
*map
;
872 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
874 BUG_ON(ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
);
875 map
= container_of(ce
, struct map_lookup
, ce
);
877 offset
= logical
- ce
->start
;
878 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
879 ret
= map
->num_stripes
;
880 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
881 ret
= map
->sub_stripes
;
887 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
888 u64 chunk_start
, u64 physical
, u64 devid
,
889 u64
**logical
, int *naddrs
, int *stripe_len
)
891 struct cache_extent
*ce
;
892 struct map_lookup
*map
;
899 ce
= find_first_cache_extent(&map_tree
->cache_tree
, chunk_start
);
901 map
= container_of(ce
, struct map_lookup
, ce
);
904 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
905 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
906 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
907 length
= ce
->size
/ map
->num_stripes
;
909 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
911 for (i
= 0; i
< map
->num_stripes
; i
++) {
912 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
914 if (map
->stripes
[i
].physical
> physical
||
915 map
->stripes
[i
].physical
+ length
<= physical
)
918 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
921 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
922 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
924 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
925 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
927 bytenr
= ce
->start
+ stripe_nr
* map
->stripe_len
;
928 for (j
= 0; j
< nr
; j
++) {
929 if (buf
[j
] == bytenr
)
938 *stripe_len
= map
->stripe_len
;
943 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
944 u64 logical
, u64
*length
,
945 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
947 struct cache_extent
*ce
;
948 struct map_lookup
*map
;
952 int stripes_allocated
= 8;
953 int stripes_required
= 1;
956 struct btrfs_multi_bio
*multi
= NULL
;
958 if (multi_ret
&& rw
== READ
) {
959 stripes_allocated
= 1;
963 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
969 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
971 BUG_ON(ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
);
972 map
= container_of(ce
, struct map_lookup
, ce
);
973 offset
= logical
- ce
->start
;
976 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
977 BTRFS_BLOCK_GROUP_DUP
)) {
978 stripes_required
= map
->num_stripes
;
979 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
980 stripes_required
= map
->sub_stripes
;
983 /* if our multi bio struct is too small, back off and try again */
984 if (multi_ret
&& rw
== WRITE
&&
985 stripes_allocated
< stripes_required
) {
986 stripes_allocated
= map
->num_stripes
;
992 * stripe_nr counts the total number of stripes we have to stride
993 * to get to this block
995 stripe_nr
= stripe_nr
/ map
->stripe_len
;
997 stripe_offset
= stripe_nr
* map
->stripe_len
;
998 BUG_ON(offset
< stripe_offset
);
1000 /* stripe_offset is the offset of this block in its stripe*/
1001 stripe_offset
= offset
- stripe_offset
;
1003 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1004 BTRFS_BLOCK_GROUP_RAID10
|
1005 BTRFS_BLOCK_GROUP_DUP
)) {
1006 /* we limit the length of each bio to what fits in a stripe */
1007 *length
= min_t(u64
, ce
->size
- offset
,
1008 map
->stripe_len
- stripe_offset
);
1010 *length
= ce
->size
- offset
;
1016 multi
->num_stripes
= 1;
1018 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1020 multi
->num_stripes
= map
->num_stripes
;
1021 else if (mirror_num
)
1022 stripe_index
= mirror_num
- 1;
1024 stripe_index
= stripe_nr
% map
->num_stripes
;
1025 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1026 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1028 stripe_index
= stripe_nr
% factor
;
1029 stripe_index
*= map
->sub_stripes
;
1032 multi
->num_stripes
= map
->sub_stripes
;
1033 else if (mirror_num
)
1034 stripe_index
+= mirror_num
- 1;
1036 stripe_index
= stripe_nr
% map
->sub_stripes
;
1038 stripe_nr
= stripe_nr
/ factor
;
1039 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1041 multi
->num_stripes
= map
->num_stripes
;
1042 else if (mirror_num
)
1043 stripe_index
= mirror_num
- 1;
1046 * after this do_div call, stripe_nr is the number of stripes
1047 * on this device we have to walk to find the data, and
1048 * stripe_index is the number of our device in the stripe array
1050 stripe_index
= stripe_nr
% map
->num_stripes
;
1051 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1053 BUG_ON(stripe_index
>= map
->num_stripes
);
1055 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1056 multi
->stripes
[i
].physical
=
1057 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1058 stripe_nr
* map
->stripe_len
;
1059 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1067 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1070 struct btrfs_device
*device
;
1071 struct btrfs_fs_devices
*cur_devices
;
1073 cur_devices
= root
->fs_info
->fs_devices
;
1074 while (cur_devices
) {
1076 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1077 device
= __find_device(&cur_devices
->devices
,
1082 cur_devices
= cur_devices
->seed
;
1087 int btrfs_bootstrap_super_map(struct btrfs_mapping_tree
*map_tree
,
1088 struct btrfs_fs_devices
*fs_devices
)
1090 struct map_lookup
*map
;
1091 u64 logical
= BTRFS_SUPER_INFO_OFFSET
;
1092 u64 length
= BTRFS_SUPER_INFO_SIZE
;
1093 int num_stripes
= 0;
1094 int sub_stripes
= 0;
1097 struct list_head
*cur
;
1099 list_for_each(cur
, &fs_devices
->devices
) {
1102 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1106 map
->ce
.start
= logical
;
1107 map
->ce
.size
= length
;
1108 map
->num_stripes
= num_stripes
;
1109 map
->sub_stripes
= sub_stripes
;
1110 map
->io_width
= length
;
1111 map
->io_align
= length
;
1112 map
->sector_size
= length
;
1113 map
->stripe_len
= length
;
1114 map
->type
= BTRFS_BLOCK_GROUP_RAID1
;
1117 list_for_each(cur
, &fs_devices
->devices
) {
1118 struct btrfs_device
*device
= list_entry(cur
,
1119 struct btrfs_device
,
1121 map
->stripes
[i
].physical
= logical
;
1122 map
->stripes
[i
].dev
= device
;
1125 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1126 if (ret
== -EEXIST
) {
1127 struct cache_extent
*old
;
1128 struct map_lookup
*old_map
;
1129 old
= find_cache_extent(&map_tree
->cache_tree
, logical
, length
);
1130 old_map
= container_of(old
, struct map_lookup
, ce
);
1131 remove_cache_extent(&map_tree
->cache_tree
, old
);
1133 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
,
1140 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
1142 struct cache_extent
*ce
;
1143 struct map_lookup
*map
;
1144 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1148 ce
= find_first_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1151 map
= container_of(ce
, struct map_lookup
, ce
);
1152 for (i
= 0; i
< map
->num_stripes
; i
++) {
1153 if (!map
->stripes
[i
].dev
->writeable
) {
1162 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1163 struct extent_buffer
*leaf
,
1164 struct btrfs_chunk
*chunk
)
1166 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1167 struct map_lookup
*map
;
1168 struct cache_extent
*ce
;
1172 u8 uuid
[BTRFS_UUID_SIZE
];
1177 logical
= key
->offset
;
1178 length
= btrfs_chunk_length(leaf
, chunk
);
1180 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
1182 /* already mapped? */
1183 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1187 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1188 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1192 map
->ce
.start
= logical
;
1193 map
->ce
.size
= length
;
1194 map
->num_stripes
= num_stripes
;
1195 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1196 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1197 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1198 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1199 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1200 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1202 for (i
= 0; i
< num_stripes
; i
++) {
1203 map
->stripes
[i
].physical
=
1204 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1205 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1206 read_extent_buffer(leaf
, uuid
, (unsigned long)
1207 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1209 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
1211 if (!map
->stripes
[i
].dev
) {
1217 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1223 static int fill_device_from_item(struct extent_buffer
*leaf
,
1224 struct btrfs_dev_item
*dev_item
,
1225 struct btrfs_device
*device
)
1229 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1230 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1231 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1232 device
->type
= btrfs_device_type(leaf
, dev_item
);
1233 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1234 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1235 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1237 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1238 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1243 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
1245 struct btrfs_fs_devices
*fs_devices
;
1248 fs_devices
= root
->fs_info
->fs_devices
->seed
;
1249 while (fs_devices
) {
1250 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1254 fs_devices
= fs_devices
->seed
;
1257 fs_devices
= find_fsid(fsid
);
1263 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1267 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
1268 root
->fs_info
->fs_devices
->seed
= fs_devices
;
1273 static int read_one_dev(struct btrfs_root
*root
,
1274 struct extent_buffer
*leaf
,
1275 struct btrfs_dev_item
*dev_item
)
1277 struct btrfs_device
*device
;
1280 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1281 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1283 devid
= btrfs_device_id(leaf
, dev_item
);
1284 read_extent_buffer(leaf
, dev_uuid
,
1285 (unsigned long)btrfs_device_uuid(dev_item
),
1287 read_extent_buffer(leaf
, fs_uuid
,
1288 (unsigned long)btrfs_device_fsid(dev_item
),
1291 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1292 ret
= open_seed_devices(root
, fs_uuid
);
1297 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1299 printk("warning devid %llu not found already\n",
1300 (unsigned long long)devid
);
1301 device
= kmalloc(sizeof(*device
), GFP_NOFS
);
1304 device
->total_ios
= 0;
1305 list_add(&device
->dev_list
,
1306 &root
->fs_info
->fs_devices
->devices
);
1309 fill_device_from_item(leaf
, dev_item
, device
);
1310 device
->dev_root
= root
->fs_info
->dev_root
;
1314 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
1316 struct btrfs_dev_item
*dev_item
;
1318 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
1320 return read_one_dev(root
, buf
, dev_item
);
1323 int btrfs_read_sys_array(struct btrfs_root
*root
)
1325 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1326 struct extent_buffer
*sb
;
1327 struct btrfs_disk_key
*disk_key
;
1328 struct btrfs_chunk
*chunk
;
1329 struct btrfs_key key
;
1334 unsigned long sb_ptr
;
1338 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
1339 BTRFS_SUPER_INFO_SIZE
);
1342 btrfs_set_buffer_uptodate(sb
);
1343 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
1344 array_size
= btrfs_super_sys_array_size(super_copy
);
1347 * we do this loop twice, once for the device items and
1348 * once for all of the chunks. This way there are device
1349 * structs filled in for every chunk
1351 ptr
= super_copy
->sys_chunk_array
;
1352 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1355 while (cur
< array_size
) {
1356 disk_key
= (struct btrfs_disk_key
*)ptr
;
1357 btrfs_disk_key_to_cpu(&key
, disk_key
);
1359 len
= sizeof(*disk_key
);
1364 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1365 chunk
= (struct btrfs_chunk
*)sb_ptr
;
1366 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
1368 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1369 len
= btrfs_chunk_item_size(num_stripes
);
1377 free_extent_buffer(sb
);
1381 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1383 struct btrfs_path
*path
;
1384 struct extent_buffer
*leaf
;
1385 struct btrfs_key key
;
1386 struct btrfs_key found_key
;
1390 root
= root
->fs_info
->chunk_root
;
1392 path
= btrfs_alloc_path();
1396 /* first we search for all of the device items, and then we
1397 * read in all of the chunk items. This way we can create chunk
1398 * mappings that reference all of the devices that are afound
1400 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1404 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1406 leaf
= path
->nodes
[0];
1407 slot
= path
->slots
[0];
1408 if (slot
>= btrfs_header_nritems(leaf
)) {
1409 ret
= btrfs_next_leaf(root
, path
);
1416 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1417 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1418 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
1420 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1421 struct btrfs_dev_item
*dev_item
;
1422 dev_item
= btrfs_item_ptr(leaf
, slot
,
1423 struct btrfs_dev_item
);
1424 ret
= read_one_dev(root
, leaf
, dev_item
);
1427 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1428 struct btrfs_chunk
*chunk
;
1429 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1430 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
1435 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1437 btrfs_release_path(root
, path
);
1441 btrfs_free_path(path
);
1447 struct list_head
*btrfs_scanned_uuids(void)