2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct btrfs_device
*device
);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
44 static DEFINE_MUTEX(uuid_mutex
);
45 static LIST_HEAD(fs_uuids
);
47 void btrfs_lock_volumes(void)
49 mutex_lock(&uuid_mutex
);
52 void btrfs_unlock_volumes(void)
54 mutex_unlock(&uuid_mutex
);
57 static void lock_chunks(struct btrfs_root
*root
)
59 mutex_lock(&root
->fs_info
->chunk_mutex
);
62 static void unlock_chunks(struct btrfs_root
*root
)
64 mutex_unlock(&root
->fs_info
->chunk_mutex
);
67 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
69 struct btrfs_device
*device
;
70 WARN_ON(fs_devices
->opened
);
71 while (!list_empty(&fs_devices
->devices
)) {
72 device
= list_entry(fs_devices
->devices
.next
,
73 struct btrfs_device
, dev_list
);
74 list_del(&device
->dev_list
);
81 int btrfs_cleanup_fs_uuids(void)
83 struct btrfs_fs_devices
*fs_devices
;
85 while (!list_empty(&fs_uuids
)) {
86 fs_devices
= list_entry(fs_uuids
.next
,
87 struct btrfs_fs_devices
, list
);
88 list_del(&fs_devices
->list
);
89 free_fs_devices(fs_devices
);
94 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
97 struct btrfs_device
*dev
;
99 list_for_each_entry(dev
, head
, dev_list
) {
100 if (dev
->devid
== devid
&&
101 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
108 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
110 struct btrfs_fs_devices
*fs_devices
;
112 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
113 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
119 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
120 struct bio
*head
, struct bio
*tail
)
123 struct bio
*old_head
;
125 old_head
= pending_bios
->head
;
126 pending_bios
->head
= head
;
127 if (pending_bios
->tail
)
128 tail
->bi_next
= old_head
;
130 pending_bios
->tail
= tail
;
134 * we try to collect pending bios for a device so we don't get a large
135 * number of procs sending bios down to the same device. This greatly
136 * improves the schedulers ability to collect and merge the bios.
138 * But, it also turns into a long list of bios to process and that is sure
139 * to eventually make the worker thread block. The solution here is to
140 * make some progress and then put this work struct back at the end of
141 * the list if the block device is congested. This way, multiple devices
142 * can make progress from a single worker thread.
144 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
147 struct backing_dev_info
*bdi
;
148 struct btrfs_fs_info
*fs_info
;
149 struct btrfs_pending_bios
*pending_bios
;
153 unsigned long num_run
;
154 unsigned long batch_run
= 0;
156 unsigned long last_waited
= 0;
159 bdi
= blk_get_backing_dev_info(device
->bdev
);
160 fs_info
= device
->dev_root
->fs_info
;
161 limit
= btrfs_async_submit_limit(fs_info
);
162 limit
= limit
* 2 / 3;
165 spin_lock(&device
->io_lock
);
170 /* take all the bios off the list at once and process them
171 * later on (without the lock held). But, remember the
172 * tail and other pointers so the bios can be properly reinserted
173 * into the list if we hit congestion
175 if (!force_reg
&& device
->pending_sync_bios
.head
) {
176 pending_bios
= &device
->pending_sync_bios
;
179 pending_bios
= &device
->pending_bios
;
183 pending
= pending_bios
->head
;
184 tail
= pending_bios
->tail
;
185 WARN_ON(pending
&& !tail
);
188 * if pending was null this time around, no bios need processing
189 * at all and we can stop. Otherwise it'll loop back up again
190 * and do an additional check so no bios are missed.
192 * device->running_pending is used to synchronize with the
195 if (device
->pending_sync_bios
.head
== NULL
&&
196 device
->pending_bios
.head
== NULL
) {
198 device
->running_pending
= 0;
201 device
->running_pending
= 1;
204 pending_bios
->head
= NULL
;
205 pending_bios
->tail
= NULL
;
207 spin_unlock(&device
->io_lock
);
212 /* we want to work on both lists, but do more bios on the
213 * sync list than the regular list
216 pending_bios
!= &device
->pending_sync_bios
&&
217 device
->pending_sync_bios
.head
) ||
218 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
219 device
->pending_bios
.head
)) {
220 spin_lock(&device
->io_lock
);
221 requeue_list(pending_bios
, pending
, tail
);
226 pending
= pending
->bi_next
;
228 atomic_dec(&fs_info
->nr_async_bios
);
230 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
231 waitqueue_active(&fs_info
->async_submit_wait
))
232 wake_up(&fs_info
->async_submit_wait
);
234 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
236 submit_bio(cur
->bi_rw
, cur
);
243 * we made progress, there is more work to do and the bdi
244 * is now congested. Back off and let other work structs
247 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
248 fs_info
->fs_devices
->open_devices
> 1) {
249 struct io_context
*ioc
;
251 ioc
= current
->io_context
;
254 * the main goal here is that we don't want to
255 * block if we're going to be able to submit
256 * more requests without blocking.
258 * This code does two great things, it pokes into
259 * the elevator code from a filesystem _and_
260 * it makes assumptions about how batching works.
262 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
263 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
265 ioc
->last_waited
== last_waited
)) {
267 * we want to go through our batch of
268 * requests and stop. So, we copy out
269 * the ioc->last_waited time and test
270 * against it before looping
272 last_waited
= ioc
->last_waited
;
277 spin_lock(&device
->io_lock
);
278 requeue_list(pending_bios
, pending
, tail
);
279 device
->running_pending
= 1;
281 spin_unlock(&device
->io_lock
);
282 btrfs_requeue_work(&device
->work
);
291 spin_lock(&device
->io_lock
);
292 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
294 spin_unlock(&device
->io_lock
);
300 static void pending_bios_fn(struct btrfs_work
*work
)
302 struct btrfs_device
*device
;
304 device
= container_of(work
, struct btrfs_device
, work
);
305 run_scheduled_bios(device
);
308 static noinline
int device_list_add(const char *path
,
309 struct btrfs_super_block
*disk_super
,
310 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
312 struct btrfs_device
*device
;
313 struct btrfs_fs_devices
*fs_devices
;
314 u64 found_transid
= btrfs_super_generation(disk_super
);
317 fs_devices
= find_fsid(disk_super
->fsid
);
319 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
322 INIT_LIST_HEAD(&fs_devices
->devices
);
323 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
324 list_add(&fs_devices
->list
, &fs_uuids
);
325 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
326 fs_devices
->latest_devid
= devid
;
327 fs_devices
->latest_trans
= found_transid
;
328 mutex_init(&fs_devices
->device_list_mutex
);
331 device
= __find_device(&fs_devices
->devices
, devid
,
332 disk_super
->dev_item
.uuid
);
335 if (fs_devices
->opened
)
338 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
340 /* we can safely leave the fs_devices entry around */
343 device
->devid
= devid
;
344 device
->work
.func
= pending_bios_fn
;
345 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
347 spin_lock_init(&device
->io_lock
);
348 device
->name
= kstrdup(path
, GFP_NOFS
);
353 INIT_LIST_HEAD(&device
->dev_alloc_list
);
355 mutex_lock(&fs_devices
->device_list_mutex
);
356 list_add(&device
->dev_list
, &fs_devices
->devices
);
357 mutex_unlock(&fs_devices
->device_list_mutex
);
359 device
->fs_devices
= fs_devices
;
360 fs_devices
->num_devices
++;
361 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
362 name
= kstrdup(path
, GFP_NOFS
);
367 if (device
->missing
) {
368 fs_devices
->missing_devices
--;
373 if (found_transid
> fs_devices
->latest_trans
) {
374 fs_devices
->latest_devid
= devid
;
375 fs_devices
->latest_trans
= found_transid
;
377 *fs_devices_ret
= fs_devices
;
381 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
383 struct btrfs_fs_devices
*fs_devices
;
384 struct btrfs_device
*device
;
385 struct btrfs_device
*orig_dev
;
387 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
389 return ERR_PTR(-ENOMEM
);
391 INIT_LIST_HEAD(&fs_devices
->devices
);
392 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
393 INIT_LIST_HEAD(&fs_devices
->list
);
394 mutex_init(&fs_devices
->device_list_mutex
);
395 fs_devices
->latest_devid
= orig
->latest_devid
;
396 fs_devices
->latest_trans
= orig
->latest_trans
;
397 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
399 mutex_lock(&orig
->device_list_mutex
);
400 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
401 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
405 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
411 device
->devid
= orig_dev
->devid
;
412 device
->work
.func
= pending_bios_fn
;
413 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
414 spin_lock_init(&device
->io_lock
);
415 INIT_LIST_HEAD(&device
->dev_list
);
416 INIT_LIST_HEAD(&device
->dev_alloc_list
);
418 list_add(&device
->dev_list
, &fs_devices
->devices
);
419 device
->fs_devices
= fs_devices
;
420 fs_devices
->num_devices
++;
422 mutex_unlock(&orig
->device_list_mutex
);
425 mutex_unlock(&orig
->device_list_mutex
);
426 free_fs_devices(fs_devices
);
427 return ERR_PTR(-ENOMEM
);
430 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
432 struct btrfs_device
*device
, *next
;
434 mutex_lock(&uuid_mutex
);
436 mutex_lock(&fs_devices
->device_list_mutex
);
437 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
438 if (device
->in_fs_metadata
)
442 blkdev_put(device
->bdev
, device
->mode
);
444 fs_devices
->open_devices
--;
446 if (device
->writeable
) {
447 list_del_init(&device
->dev_alloc_list
);
448 device
->writeable
= 0;
449 fs_devices
->rw_devices
--;
451 list_del_init(&device
->dev_list
);
452 fs_devices
->num_devices
--;
456 mutex_unlock(&fs_devices
->device_list_mutex
);
458 if (fs_devices
->seed
) {
459 fs_devices
= fs_devices
->seed
;
463 mutex_unlock(&uuid_mutex
);
467 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
469 struct btrfs_device
*device
;
471 if (--fs_devices
->opened
> 0)
474 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
476 blkdev_put(device
->bdev
, device
->mode
);
477 fs_devices
->open_devices
--;
479 if (device
->writeable
) {
480 list_del_init(&device
->dev_alloc_list
);
481 fs_devices
->rw_devices
--;
485 device
->writeable
= 0;
486 device
->in_fs_metadata
= 0;
488 WARN_ON(fs_devices
->open_devices
);
489 WARN_ON(fs_devices
->rw_devices
);
490 fs_devices
->opened
= 0;
491 fs_devices
->seeding
= 0;
496 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
498 struct btrfs_fs_devices
*seed_devices
= NULL
;
501 mutex_lock(&uuid_mutex
);
502 ret
= __btrfs_close_devices(fs_devices
);
503 if (!fs_devices
->opened
) {
504 seed_devices
= fs_devices
->seed
;
505 fs_devices
->seed
= NULL
;
507 mutex_unlock(&uuid_mutex
);
509 while (seed_devices
) {
510 fs_devices
= seed_devices
;
511 seed_devices
= fs_devices
->seed
;
512 __btrfs_close_devices(fs_devices
);
513 free_fs_devices(fs_devices
);
518 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
519 fmode_t flags
, void *holder
)
521 struct block_device
*bdev
;
522 struct list_head
*head
= &fs_devices
->devices
;
523 struct btrfs_device
*device
;
524 struct block_device
*latest_bdev
= NULL
;
525 struct buffer_head
*bh
;
526 struct btrfs_super_block
*disk_super
;
527 u64 latest_devid
= 0;
528 u64 latest_transid
= 0;
535 list_for_each_entry(device
, head
, dev_list
) {
541 bdev
= blkdev_get_by_path(device
->name
, flags
, holder
);
543 printk(KERN_INFO
"open %s failed\n", device
->name
);
546 set_blocksize(bdev
, 4096);
548 bh
= btrfs_read_dev_super(bdev
);
554 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
555 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
556 if (devid
!= device
->devid
)
559 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
563 device
->generation
= btrfs_super_generation(disk_super
);
564 if (!latest_transid
|| device
->generation
> latest_transid
) {
565 latest_devid
= devid
;
566 latest_transid
= device
->generation
;
570 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
571 device
->writeable
= 0;
573 device
->writeable
= !bdev_read_only(bdev
);
578 device
->in_fs_metadata
= 0;
579 device
->mode
= flags
;
581 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
582 fs_devices
->rotating
= 1;
584 fs_devices
->open_devices
++;
585 if (device
->writeable
) {
586 fs_devices
->rw_devices
++;
587 list_add(&device
->dev_alloc_list
,
588 &fs_devices
->alloc_list
);
595 blkdev_put(bdev
, flags
);
599 if (fs_devices
->open_devices
== 0) {
603 fs_devices
->seeding
= seeding
;
604 fs_devices
->opened
= 1;
605 fs_devices
->latest_bdev
= latest_bdev
;
606 fs_devices
->latest_devid
= latest_devid
;
607 fs_devices
->latest_trans
= latest_transid
;
608 fs_devices
->total_rw_bytes
= 0;
613 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
614 fmode_t flags
, void *holder
)
618 mutex_lock(&uuid_mutex
);
619 if (fs_devices
->opened
) {
620 fs_devices
->opened
++;
623 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
625 mutex_unlock(&uuid_mutex
);
629 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
630 struct btrfs_fs_devices
**fs_devices_ret
)
632 struct btrfs_super_block
*disk_super
;
633 struct block_device
*bdev
;
634 struct buffer_head
*bh
;
639 mutex_lock(&uuid_mutex
);
642 bdev
= blkdev_get_by_path(path
, flags
, holder
);
649 ret
= set_blocksize(bdev
, 4096);
652 bh
= btrfs_read_dev_super(bdev
);
657 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
658 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
659 transid
= btrfs_super_generation(disk_super
);
660 if (disk_super
->label
[0])
661 printk(KERN_INFO
"device label %s ", disk_super
->label
);
663 /* FIXME, make a readl uuid parser */
664 printk(KERN_INFO
"device fsid %llx-%llx ",
665 *(unsigned long long *)disk_super
->fsid
,
666 *(unsigned long long *)(disk_super
->fsid
+ 8));
668 printk(KERN_CONT
"devid %llu transid %llu %s\n",
669 (unsigned long long)devid
, (unsigned long long)transid
, path
);
670 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
674 blkdev_put(bdev
, flags
);
676 mutex_unlock(&uuid_mutex
);
680 /* helper to account the used device space in the range */
681 int btrfs_account_dev_extents_size(struct btrfs_device
*device
, u64 start
,
682 u64 end
, u64
*length
)
684 struct btrfs_key key
;
685 struct btrfs_root
*root
= device
->dev_root
;
686 struct btrfs_dev_extent
*dev_extent
;
687 struct btrfs_path
*path
;
691 struct extent_buffer
*l
;
695 if (start
>= device
->total_bytes
)
698 path
= btrfs_alloc_path();
703 key
.objectid
= device
->devid
;
705 key
.type
= BTRFS_DEV_EXTENT_KEY
;
707 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
711 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
718 slot
= path
->slots
[0];
719 if (slot
>= btrfs_header_nritems(l
)) {
720 ret
= btrfs_next_leaf(root
, path
);
728 btrfs_item_key_to_cpu(l
, &key
, slot
);
730 if (key
.objectid
< device
->devid
)
733 if (key
.objectid
> device
->devid
)
736 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
739 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
740 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
742 if (key
.offset
<= start
&& extent_end
> end
) {
743 *length
= end
- start
+ 1;
745 } else if (key
.offset
<= start
&& extent_end
> start
)
746 *length
+= extent_end
- start
;
747 else if (key
.offset
> start
&& extent_end
<= end
)
748 *length
+= extent_end
- key
.offset
;
749 else if (key
.offset
> start
&& key
.offset
<= end
) {
750 *length
+= end
- key
.offset
+ 1;
752 } else if (key
.offset
> end
)
760 btrfs_free_path(path
);
765 * find_free_dev_extent - find free space in the specified device
766 * @trans: transaction handler
767 * @device: the device which we search the free space in
768 * @num_bytes: the size of the free space that we need
769 * @start: store the start of the free space.
770 * @len: the size of the free space. that we find, or the size of the max
771 * free space if we don't find suitable free space
773 * this uses a pretty simple search, the expectation is that it is
774 * called very infrequently and that a given device has a small number
777 * @start is used to store the start of the free space if we find. But if we
778 * don't find suitable free space, it will be used to store the start position
779 * of the max free space.
781 * @len is used to store the size of the free space that we find.
782 * But if we don't find suitable free space, it is used to store the size of
783 * the max free space.
785 int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
786 struct btrfs_device
*device
, u64 num_bytes
,
787 u64
*start
, u64
*len
)
789 struct btrfs_key key
;
790 struct btrfs_root
*root
= device
->dev_root
;
791 struct btrfs_dev_extent
*dev_extent
;
792 struct btrfs_path
*path
;
798 u64 search_end
= device
->total_bytes
;
801 struct extent_buffer
*l
;
803 /* FIXME use last free of some kind */
805 /* we don't want to overwrite the superblock on the drive,
806 * so we make sure to start at an offset of at least 1MB
808 search_start
= 1024 * 1024;
810 if (root
->fs_info
->alloc_start
+ num_bytes
<= search_end
)
811 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
813 max_hole_start
= search_start
;
816 if (search_start
>= search_end
) {
821 path
= btrfs_alloc_path();
828 key
.objectid
= device
->devid
;
829 key
.offset
= search_start
;
830 key
.type
= BTRFS_DEV_EXTENT_KEY
;
832 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
836 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
843 slot
= path
->slots
[0];
844 if (slot
>= btrfs_header_nritems(l
)) {
845 ret
= btrfs_next_leaf(root
, path
);
853 btrfs_item_key_to_cpu(l
, &key
, slot
);
855 if (key
.objectid
< device
->devid
)
858 if (key
.objectid
> device
->devid
)
861 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
864 if (key
.offset
> search_start
) {
865 hole_size
= key
.offset
- search_start
;
867 if (hole_size
> max_hole_size
) {
868 max_hole_start
= search_start
;
869 max_hole_size
= hole_size
;
873 * If this free space is greater than which we need,
874 * it must be the max free space that we have found
875 * until now, so max_hole_start must point to the start
876 * of this free space and the length of this free space
877 * is stored in max_hole_size. Thus, we return
878 * max_hole_start and max_hole_size and go back to the
881 if (hole_size
>= num_bytes
) {
887 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
888 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
890 if (extent_end
> search_start
)
891 search_start
= extent_end
;
897 hole_size
= search_end
- search_start
;
898 if (hole_size
> max_hole_size
) {
899 max_hole_start
= search_start
;
900 max_hole_size
= hole_size
;
904 if (hole_size
< num_bytes
)
910 btrfs_free_path(path
);
912 *start
= max_hole_start
;
914 *len
= max_hole_size
;
918 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
919 struct btrfs_device
*device
,
923 struct btrfs_path
*path
;
924 struct btrfs_root
*root
= device
->dev_root
;
925 struct btrfs_key key
;
926 struct btrfs_key found_key
;
927 struct extent_buffer
*leaf
= NULL
;
928 struct btrfs_dev_extent
*extent
= NULL
;
930 path
= btrfs_alloc_path();
934 key
.objectid
= device
->devid
;
936 key
.type
= BTRFS_DEV_EXTENT_KEY
;
938 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
940 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
941 BTRFS_DEV_EXTENT_KEY
);
943 leaf
= path
->nodes
[0];
944 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
945 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
946 struct btrfs_dev_extent
);
947 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
948 btrfs_dev_extent_length(leaf
, extent
) < start
);
950 } else if (ret
== 0) {
951 leaf
= path
->nodes
[0];
952 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
953 struct btrfs_dev_extent
);
957 if (device
->bytes_used
> 0)
958 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
959 ret
= btrfs_del_item(trans
, root
, path
);
962 btrfs_free_path(path
);
966 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
967 struct btrfs_device
*device
,
968 u64 chunk_tree
, u64 chunk_objectid
,
969 u64 chunk_offset
, u64 start
, u64 num_bytes
)
972 struct btrfs_path
*path
;
973 struct btrfs_root
*root
= device
->dev_root
;
974 struct btrfs_dev_extent
*extent
;
975 struct extent_buffer
*leaf
;
976 struct btrfs_key key
;
978 WARN_ON(!device
->in_fs_metadata
);
979 path
= btrfs_alloc_path();
983 key
.objectid
= device
->devid
;
985 key
.type
= BTRFS_DEV_EXTENT_KEY
;
986 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
990 leaf
= path
->nodes
[0];
991 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
992 struct btrfs_dev_extent
);
993 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
994 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
995 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
997 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
998 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
1001 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1002 btrfs_mark_buffer_dirty(leaf
);
1003 btrfs_free_path(path
);
1007 static noinline
int find_next_chunk(struct btrfs_root
*root
,
1008 u64 objectid
, u64
*offset
)
1010 struct btrfs_path
*path
;
1012 struct btrfs_key key
;
1013 struct btrfs_chunk
*chunk
;
1014 struct btrfs_key found_key
;
1016 path
= btrfs_alloc_path();
1019 key
.objectid
= objectid
;
1020 key
.offset
= (u64
)-1;
1021 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1023 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1029 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
1033 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1035 if (found_key
.objectid
!= objectid
)
1038 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1039 struct btrfs_chunk
);
1040 *offset
= found_key
.offset
+
1041 btrfs_chunk_length(path
->nodes
[0], chunk
);
1046 btrfs_free_path(path
);
1050 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
1053 struct btrfs_key key
;
1054 struct btrfs_key found_key
;
1055 struct btrfs_path
*path
;
1057 root
= root
->fs_info
->chunk_root
;
1059 path
= btrfs_alloc_path();
1063 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1064 key
.type
= BTRFS_DEV_ITEM_KEY
;
1065 key
.offset
= (u64
)-1;
1067 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1073 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
1074 BTRFS_DEV_ITEM_KEY
);
1078 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1080 *objectid
= found_key
.offset
+ 1;
1084 btrfs_free_path(path
);
1089 * the device information is stored in the chunk root
1090 * the btrfs_device struct should be fully filled in
1092 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
1093 struct btrfs_root
*root
,
1094 struct btrfs_device
*device
)
1097 struct btrfs_path
*path
;
1098 struct btrfs_dev_item
*dev_item
;
1099 struct extent_buffer
*leaf
;
1100 struct btrfs_key key
;
1103 root
= root
->fs_info
->chunk_root
;
1105 path
= btrfs_alloc_path();
1109 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1110 key
.type
= BTRFS_DEV_ITEM_KEY
;
1111 key
.offset
= device
->devid
;
1113 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1118 leaf
= path
->nodes
[0];
1119 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1121 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1122 btrfs_set_device_generation(leaf
, dev_item
, 0);
1123 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1124 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1125 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1126 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1127 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1128 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1129 btrfs_set_device_group(leaf
, dev_item
, 0);
1130 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1131 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1132 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1134 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1135 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1136 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
1137 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
1138 btrfs_mark_buffer_dirty(leaf
);
1142 btrfs_free_path(path
);
1146 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
1147 struct btrfs_device
*device
)
1150 struct btrfs_path
*path
;
1151 struct btrfs_key key
;
1152 struct btrfs_trans_handle
*trans
;
1154 root
= root
->fs_info
->chunk_root
;
1156 path
= btrfs_alloc_path();
1160 trans
= btrfs_start_transaction(root
, 0);
1161 if (IS_ERR(trans
)) {
1162 btrfs_free_path(path
);
1163 return PTR_ERR(trans
);
1165 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1166 key
.type
= BTRFS_DEV_ITEM_KEY
;
1167 key
.offset
= device
->devid
;
1170 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1179 ret
= btrfs_del_item(trans
, root
, path
);
1183 btrfs_free_path(path
);
1184 unlock_chunks(root
);
1185 btrfs_commit_transaction(trans
, root
);
1189 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
1191 struct btrfs_device
*device
;
1192 struct btrfs_device
*next_device
;
1193 struct block_device
*bdev
;
1194 struct buffer_head
*bh
= NULL
;
1195 struct btrfs_super_block
*disk_super
;
1202 mutex_lock(&uuid_mutex
);
1203 mutex_lock(&root
->fs_info
->volume_mutex
);
1205 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1206 root
->fs_info
->avail_system_alloc_bits
|
1207 root
->fs_info
->avail_metadata_alloc_bits
;
1209 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1210 root
->fs_info
->fs_devices
->num_devices
<= 4) {
1211 printk(KERN_ERR
"btrfs: unable to go below four devices "
1217 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1218 root
->fs_info
->fs_devices
->num_devices
<= 2) {
1219 printk(KERN_ERR
"btrfs: unable to go below two "
1220 "devices on raid1\n");
1225 if (strcmp(device_path
, "missing") == 0) {
1226 struct list_head
*devices
;
1227 struct btrfs_device
*tmp
;
1230 devices
= &root
->fs_info
->fs_devices
->devices
;
1231 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1232 list_for_each_entry(tmp
, devices
, dev_list
) {
1233 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1238 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1243 printk(KERN_ERR
"btrfs: no missing devices found to "
1248 bdev
= blkdev_get_by_path(device_path
, FMODE_READ
| FMODE_EXCL
,
1249 root
->fs_info
->bdev_holder
);
1251 ret
= PTR_ERR(bdev
);
1255 set_blocksize(bdev
, 4096);
1256 bh
= btrfs_read_dev_super(bdev
);
1261 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1262 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
1263 dev_uuid
= disk_super
->dev_item
.uuid
;
1264 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1272 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1273 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1279 if (device
->writeable
) {
1280 list_del_init(&device
->dev_alloc_list
);
1281 root
->fs_info
->fs_devices
->rw_devices
--;
1284 ret
= btrfs_shrink_device(device
, 0);
1288 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1292 device
->in_fs_metadata
= 0;
1295 * the device list mutex makes sure that we don't change
1296 * the device list while someone else is writing out all
1297 * the device supers.
1299 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1300 list_del_init(&device
->dev_list
);
1301 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1303 device
->fs_devices
->num_devices
--;
1305 if (device
->missing
)
1306 root
->fs_info
->fs_devices
->missing_devices
--;
1308 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1309 struct btrfs_device
, dev_list
);
1310 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1311 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1312 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1313 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1316 blkdev_put(device
->bdev
, device
->mode
);
1317 device
->bdev
= NULL
;
1318 device
->fs_devices
->open_devices
--;
1321 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1322 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1324 if (device
->fs_devices
->open_devices
== 0) {
1325 struct btrfs_fs_devices
*fs_devices
;
1326 fs_devices
= root
->fs_info
->fs_devices
;
1327 while (fs_devices
) {
1328 if (fs_devices
->seed
== device
->fs_devices
)
1330 fs_devices
= fs_devices
->seed
;
1332 fs_devices
->seed
= device
->fs_devices
->seed
;
1333 device
->fs_devices
->seed
= NULL
;
1334 __btrfs_close_devices(device
->fs_devices
);
1335 free_fs_devices(device
->fs_devices
);
1339 * at this point, the device is zero sized. We want to
1340 * remove it from the devices list and zero out the old super
1342 if (device
->writeable
) {
1343 /* make sure this device isn't detected as part of
1346 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1347 set_buffer_dirty(bh
);
1348 sync_dirty_buffer(bh
);
1351 kfree(device
->name
);
1359 blkdev_put(bdev
, FMODE_READ
| FMODE_EXCL
);
1361 mutex_unlock(&root
->fs_info
->volume_mutex
);
1362 mutex_unlock(&uuid_mutex
);
1365 if (device
->writeable
) {
1366 list_add(&device
->dev_alloc_list
,
1367 &root
->fs_info
->fs_devices
->alloc_list
);
1368 root
->fs_info
->fs_devices
->rw_devices
++;
1374 * does all the dirty work required for changing file system's UUID.
1376 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1377 struct btrfs_root
*root
)
1379 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1380 struct btrfs_fs_devices
*old_devices
;
1381 struct btrfs_fs_devices
*seed_devices
;
1382 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1383 struct btrfs_device
*device
;
1386 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1387 if (!fs_devices
->seeding
)
1390 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1394 old_devices
= clone_fs_devices(fs_devices
);
1395 if (IS_ERR(old_devices
)) {
1396 kfree(seed_devices
);
1397 return PTR_ERR(old_devices
);
1400 list_add(&old_devices
->list
, &fs_uuids
);
1402 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1403 seed_devices
->opened
= 1;
1404 INIT_LIST_HEAD(&seed_devices
->devices
);
1405 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1406 mutex_init(&seed_devices
->device_list_mutex
);
1407 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1408 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1409 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1410 device
->fs_devices
= seed_devices
;
1413 fs_devices
->seeding
= 0;
1414 fs_devices
->num_devices
= 0;
1415 fs_devices
->open_devices
= 0;
1416 fs_devices
->seed
= seed_devices
;
1418 generate_random_uuid(fs_devices
->fsid
);
1419 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1420 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1421 super_flags
= btrfs_super_flags(disk_super
) &
1422 ~BTRFS_SUPER_FLAG_SEEDING
;
1423 btrfs_set_super_flags(disk_super
, super_flags
);
1429 * strore the expected generation for seed devices in device items.
1431 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1432 struct btrfs_root
*root
)
1434 struct btrfs_path
*path
;
1435 struct extent_buffer
*leaf
;
1436 struct btrfs_dev_item
*dev_item
;
1437 struct btrfs_device
*device
;
1438 struct btrfs_key key
;
1439 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1440 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1444 path
= btrfs_alloc_path();
1448 root
= root
->fs_info
->chunk_root
;
1449 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1451 key
.type
= BTRFS_DEV_ITEM_KEY
;
1454 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1458 leaf
= path
->nodes
[0];
1460 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1461 ret
= btrfs_next_leaf(root
, path
);
1466 leaf
= path
->nodes
[0];
1467 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1468 btrfs_release_path(root
, path
);
1472 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1473 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1474 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1477 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1478 struct btrfs_dev_item
);
1479 devid
= btrfs_device_id(leaf
, dev_item
);
1480 read_extent_buffer(leaf
, dev_uuid
,
1481 (unsigned long)btrfs_device_uuid(dev_item
),
1483 read_extent_buffer(leaf
, fs_uuid
,
1484 (unsigned long)btrfs_device_fsid(dev_item
),
1486 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1489 if (device
->fs_devices
->seeding
) {
1490 btrfs_set_device_generation(leaf
, dev_item
,
1491 device
->generation
);
1492 btrfs_mark_buffer_dirty(leaf
);
1500 btrfs_free_path(path
);
1504 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1506 struct btrfs_trans_handle
*trans
;
1507 struct btrfs_device
*device
;
1508 struct block_device
*bdev
;
1509 struct list_head
*devices
;
1510 struct super_block
*sb
= root
->fs_info
->sb
;
1512 int seeding_dev
= 0;
1515 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1518 bdev
= blkdev_get_by_path(device_path
, FMODE_EXCL
,
1519 root
->fs_info
->bdev_holder
);
1521 return PTR_ERR(bdev
);
1523 if (root
->fs_info
->fs_devices
->seeding
) {
1525 down_write(&sb
->s_umount
);
1526 mutex_lock(&uuid_mutex
);
1529 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1530 mutex_lock(&root
->fs_info
->volume_mutex
);
1532 devices
= &root
->fs_info
->fs_devices
->devices
;
1534 * we have the volume lock, so we don't need the extra
1535 * device list mutex while reading the list here.
1537 list_for_each_entry(device
, devices
, dev_list
) {
1538 if (device
->bdev
== bdev
) {
1544 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1546 /* we can safely leave the fs_devices entry around */
1551 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1552 if (!device
->name
) {
1558 ret
= find_next_devid(root
, &device
->devid
);
1560 kfree(device
->name
);
1565 trans
= btrfs_start_transaction(root
, 0);
1566 if (IS_ERR(trans
)) {
1567 kfree(device
->name
);
1569 ret
= PTR_ERR(trans
);
1575 device
->writeable
= 1;
1576 device
->work
.func
= pending_bios_fn
;
1577 generate_random_uuid(device
->uuid
);
1578 spin_lock_init(&device
->io_lock
);
1579 device
->generation
= trans
->transid
;
1580 device
->io_width
= root
->sectorsize
;
1581 device
->io_align
= root
->sectorsize
;
1582 device
->sector_size
= root
->sectorsize
;
1583 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1584 device
->disk_total_bytes
= device
->total_bytes
;
1585 device
->dev_root
= root
->fs_info
->dev_root
;
1586 device
->bdev
= bdev
;
1587 device
->in_fs_metadata
= 1;
1588 device
->mode
= FMODE_EXCL
;
1589 set_blocksize(device
->bdev
, 4096);
1592 sb
->s_flags
&= ~MS_RDONLY
;
1593 ret
= btrfs_prepare_sprout(trans
, root
);
1597 device
->fs_devices
= root
->fs_info
->fs_devices
;
1600 * we don't want write_supers to jump in here with our device
1603 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1604 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1605 list_add(&device
->dev_alloc_list
,
1606 &root
->fs_info
->fs_devices
->alloc_list
);
1607 root
->fs_info
->fs_devices
->num_devices
++;
1608 root
->fs_info
->fs_devices
->open_devices
++;
1609 root
->fs_info
->fs_devices
->rw_devices
++;
1610 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1612 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
1613 root
->fs_info
->fs_devices
->rotating
= 1;
1615 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1616 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1617 total_bytes
+ device
->total_bytes
);
1619 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1620 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1622 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1625 ret
= init_first_rw_device(trans
, root
, device
);
1627 ret
= btrfs_finish_sprout(trans
, root
);
1630 ret
= btrfs_add_device(trans
, root
, device
);
1634 * we've got more storage, clear any full flags on the space
1637 btrfs_clear_space_info_full(root
->fs_info
);
1639 unlock_chunks(root
);
1640 btrfs_commit_transaction(trans
, root
);
1643 mutex_unlock(&uuid_mutex
);
1644 up_write(&sb
->s_umount
);
1646 ret
= btrfs_relocate_sys_chunks(root
);
1650 mutex_unlock(&root
->fs_info
->volume_mutex
);
1653 blkdev_put(bdev
, FMODE_EXCL
);
1655 mutex_unlock(&uuid_mutex
);
1656 up_write(&sb
->s_umount
);
1661 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1662 struct btrfs_device
*device
)
1665 struct btrfs_path
*path
;
1666 struct btrfs_root
*root
;
1667 struct btrfs_dev_item
*dev_item
;
1668 struct extent_buffer
*leaf
;
1669 struct btrfs_key key
;
1671 root
= device
->dev_root
->fs_info
->chunk_root
;
1673 path
= btrfs_alloc_path();
1677 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1678 key
.type
= BTRFS_DEV_ITEM_KEY
;
1679 key
.offset
= device
->devid
;
1681 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1690 leaf
= path
->nodes
[0];
1691 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1693 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1694 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1695 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1696 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1697 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1698 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->disk_total_bytes
);
1699 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1700 btrfs_mark_buffer_dirty(leaf
);
1703 btrfs_free_path(path
);
1707 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1708 struct btrfs_device
*device
, u64 new_size
)
1710 struct btrfs_super_block
*super_copy
=
1711 &device
->dev_root
->fs_info
->super_copy
;
1712 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1713 u64 diff
= new_size
- device
->total_bytes
;
1715 if (!device
->writeable
)
1717 if (new_size
<= device
->total_bytes
)
1720 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1721 device
->fs_devices
->total_rw_bytes
+= diff
;
1723 device
->total_bytes
= new_size
;
1724 device
->disk_total_bytes
= new_size
;
1725 btrfs_clear_space_info_full(device
->dev_root
->fs_info
);
1727 return btrfs_update_device(trans
, device
);
1730 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1731 struct btrfs_device
*device
, u64 new_size
)
1734 lock_chunks(device
->dev_root
);
1735 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1736 unlock_chunks(device
->dev_root
);
1740 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1741 struct btrfs_root
*root
,
1742 u64 chunk_tree
, u64 chunk_objectid
,
1746 struct btrfs_path
*path
;
1747 struct btrfs_key key
;
1749 root
= root
->fs_info
->chunk_root
;
1750 path
= btrfs_alloc_path();
1754 key
.objectid
= chunk_objectid
;
1755 key
.offset
= chunk_offset
;
1756 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1758 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1761 ret
= btrfs_del_item(trans
, root
, path
);
1764 btrfs_free_path(path
);
1768 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1771 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1772 struct btrfs_disk_key
*disk_key
;
1773 struct btrfs_chunk
*chunk
;
1780 struct btrfs_key key
;
1782 array_size
= btrfs_super_sys_array_size(super_copy
);
1784 ptr
= super_copy
->sys_chunk_array
;
1787 while (cur
< array_size
) {
1788 disk_key
= (struct btrfs_disk_key
*)ptr
;
1789 btrfs_disk_key_to_cpu(&key
, disk_key
);
1791 len
= sizeof(*disk_key
);
1793 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1794 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1795 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1796 len
+= btrfs_chunk_item_size(num_stripes
);
1801 if (key
.objectid
== chunk_objectid
&&
1802 key
.offset
== chunk_offset
) {
1803 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1805 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1814 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1815 u64 chunk_tree
, u64 chunk_objectid
,
1818 struct extent_map_tree
*em_tree
;
1819 struct btrfs_root
*extent_root
;
1820 struct btrfs_trans_handle
*trans
;
1821 struct extent_map
*em
;
1822 struct map_lookup
*map
;
1826 root
= root
->fs_info
->chunk_root
;
1827 extent_root
= root
->fs_info
->extent_root
;
1828 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1830 ret
= btrfs_can_relocate(extent_root
, chunk_offset
);
1834 /* step one, relocate all the extents inside this chunk */
1835 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1839 trans
= btrfs_start_transaction(root
, 0);
1840 BUG_ON(IS_ERR(trans
));
1845 * step two, delete the device extents and the
1846 * chunk tree entries
1848 read_lock(&em_tree
->lock
);
1849 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1850 read_unlock(&em_tree
->lock
);
1852 BUG_ON(em
->start
> chunk_offset
||
1853 em
->start
+ em
->len
< chunk_offset
);
1854 map
= (struct map_lookup
*)em
->bdev
;
1856 for (i
= 0; i
< map
->num_stripes
; i
++) {
1857 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1858 map
->stripes
[i
].physical
);
1861 if (map
->stripes
[i
].dev
) {
1862 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1866 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1871 trace_btrfs_chunk_free(root
, map
, chunk_offset
, em
->len
);
1873 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1874 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1878 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1881 write_lock(&em_tree
->lock
);
1882 remove_extent_mapping(em_tree
, em
);
1883 write_unlock(&em_tree
->lock
);
1888 /* once for the tree */
1889 free_extent_map(em
);
1891 free_extent_map(em
);
1893 unlock_chunks(root
);
1894 btrfs_end_transaction(trans
, root
);
1898 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1900 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1901 struct btrfs_path
*path
;
1902 struct extent_buffer
*leaf
;
1903 struct btrfs_chunk
*chunk
;
1904 struct btrfs_key key
;
1905 struct btrfs_key found_key
;
1906 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1908 bool retried
= false;
1912 path
= btrfs_alloc_path();
1917 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1918 key
.offset
= (u64
)-1;
1919 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1922 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1927 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1934 leaf
= path
->nodes
[0];
1935 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1937 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1938 struct btrfs_chunk
);
1939 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1940 btrfs_release_path(chunk_root
, path
);
1942 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1943 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1952 if (found_key
.offset
== 0)
1954 key
.offset
= found_key
.offset
- 1;
1957 if (failed
&& !retried
) {
1961 } else if (failed
&& retried
) {
1966 btrfs_free_path(path
);
1970 static u64
div_factor(u64 num
, int factor
)
1979 int btrfs_balance(struct btrfs_root
*dev_root
)
1982 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
1983 struct btrfs_device
*device
;
1986 struct btrfs_path
*path
;
1987 struct btrfs_key key
;
1988 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
1989 struct btrfs_trans_handle
*trans
;
1990 struct btrfs_key found_key
;
1992 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
1995 if (!capable(CAP_SYS_ADMIN
))
1998 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
1999 dev_root
= dev_root
->fs_info
->dev_root
;
2001 /* step one make some room on all the devices */
2002 list_for_each_entry(device
, devices
, dev_list
) {
2003 old_size
= device
->total_bytes
;
2004 size_to_free
= div_factor(old_size
, 1);
2005 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
2006 if (!device
->writeable
||
2007 device
->total_bytes
- device
->bytes_used
> size_to_free
)
2010 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
2015 trans
= btrfs_start_transaction(dev_root
, 0);
2016 BUG_ON(IS_ERR(trans
));
2018 ret
= btrfs_grow_device(trans
, device
, old_size
);
2021 btrfs_end_transaction(trans
, dev_root
);
2024 /* step two, relocate all the chunks */
2025 path
= btrfs_alloc_path();
2028 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2029 key
.offset
= (u64
)-1;
2030 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2033 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
2038 * this shouldn't happen, it means the last relocate
2044 ret
= btrfs_previous_item(chunk_root
, path
, 0,
2045 BTRFS_CHUNK_ITEM_KEY
);
2049 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2051 if (found_key
.objectid
!= key
.objectid
)
2054 /* chunk zero is special */
2055 if (found_key
.offset
== 0)
2058 btrfs_release_path(chunk_root
, path
);
2059 ret
= btrfs_relocate_chunk(chunk_root
,
2060 chunk_root
->root_key
.objectid
,
2063 BUG_ON(ret
&& ret
!= -ENOSPC
);
2064 key
.offset
= found_key
.offset
- 1;
2068 btrfs_free_path(path
);
2069 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
2074 * shrinking a device means finding all of the device extents past
2075 * the new size, and then following the back refs to the chunks.
2076 * The chunk relocation code actually frees the device extent
2078 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
2080 struct btrfs_trans_handle
*trans
;
2081 struct btrfs_root
*root
= device
->dev_root
;
2082 struct btrfs_dev_extent
*dev_extent
= NULL
;
2083 struct btrfs_path
*path
;
2091 bool retried
= false;
2092 struct extent_buffer
*l
;
2093 struct btrfs_key key
;
2094 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2095 u64 old_total
= btrfs_super_total_bytes(super_copy
);
2096 u64 old_size
= device
->total_bytes
;
2097 u64 diff
= device
->total_bytes
- new_size
;
2099 if (new_size
>= device
->total_bytes
)
2102 path
= btrfs_alloc_path();
2110 device
->total_bytes
= new_size
;
2111 if (device
->writeable
)
2112 device
->fs_devices
->total_rw_bytes
-= diff
;
2113 unlock_chunks(root
);
2116 key
.objectid
= device
->devid
;
2117 key
.offset
= (u64
)-1;
2118 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2121 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2125 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
2130 btrfs_release_path(root
, path
);
2135 slot
= path
->slots
[0];
2136 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
2138 if (key
.objectid
!= device
->devid
) {
2139 btrfs_release_path(root
, path
);
2143 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2144 length
= btrfs_dev_extent_length(l
, dev_extent
);
2146 if (key
.offset
+ length
<= new_size
) {
2147 btrfs_release_path(root
, path
);
2151 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2152 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2153 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2154 btrfs_release_path(root
, path
);
2156 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
2158 if (ret
&& ret
!= -ENOSPC
)
2165 if (failed
&& !retried
) {
2169 } else if (failed
&& retried
) {
2173 device
->total_bytes
= old_size
;
2174 if (device
->writeable
)
2175 device
->fs_devices
->total_rw_bytes
+= diff
;
2176 unlock_chunks(root
);
2180 /* Shrinking succeeded, else we would be at "done". */
2181 trans
= btrfs_start_transaction(root
, 0);
2182 if (IS_ERR(trans
)) {
2183 ret
= PTR_ERR(trans
);
2189 device
->disk_total_bytes
= new_size
;
2190 /* Now btrfs_update_device() will change the on-disk size. */
2191 ret
= btrfs_update_device(trans
, device
);
2193 unlock_chunks(root
);
2194 btrfs_end_transaction(trans
, root
);
2197 WARN_ON(diff
> old_total
);
2198 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
2199 unlock_chunks(root
);
2200 btrfs_end_transaction(trans
, root
);
2202 btrfs_free_path(path
);
2206 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
2207 struct btrfs_root
*root
,
2208 struct btrfs_key
*key
,
2209 struct btrfs_chunk
*chunk
, int item_size
)
2211 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2212 struct btrfs_disk_key disk_key
;
2216 array_size
= btrfs_super_sys_array_size(super_copy
);
2217 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
2220 ptr
= super_copy
->sys_chunk_array
+ array_size
;
2221 btrfs_cpu_key_to_disk(&disk_key
, key
);
2222 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
2223 ptr
+= sizeof(disk_key
);
2224 memcpy(ptr
, chunk
, item_size
);
2225 item_size
+= sizeof(disk_key
);
2226 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
2230 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
2231 int num_stripes
, int sub_stripes
)
2233 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
2235 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
2236 return calc_size
* (num_stripes
/ sub_stripes
);
2238 return calc_size
* num_stripes
;
2241 /* Used to sort the devices by max_avail(descending sort) */
2242 int btrfs_cmp_device_free_bytes(const void *dev_info1
, const void *dev_info2
)
2244 if (((struct btrfs_device_info
*)dev_info1
)->max_avail
>
2245 ((struct btrfs_device_info
*)dev_info2
)->max_avail
)
2247 else if (((struct btrfs_device_info
*)dev_info1
)->max_avail
<
2248 ((struct btrfs_device_info
*)dev_info2
)->max_avail
)
2254 static int __btrfs_calc_nstripes(struct btrfs_fs_devices
*fs_devices
, u64 type
,
2255 int *num_stripes
, int *min_stripes
,
2262 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
2263 *num_stripes
= fs_devices
->rw_devices
;
2266 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
2270 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
2271 if (fs_devices
->rw_devices
< 2)
2276 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2277 *num_stripes
= fs_devices
->rw_devices
;
2278 if (*num_stripes
< 4)
2280 *num_stripes
&= ~(u32
)1;
2288 static u64
__btrfs_calc_stripe_size(struct btrfs_fs_devices
*fs_devices
,
2289 u64 proposed_size
, u64 type
,
2290 int num_stripes
, int small_stripe
)
2292 int min_stripe_size
= 1 * 1024 * 1024;
2293 u64 calc_size
= proposed_size
;
2294 u64 max_chunk_size
= calc_size
;
2297 if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
2298 BTRFS_BLOCK_GROUP_DUP
|
2299 BTRFS_BLOCK_GROUP_RAID10
))
2302 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2303 max_chunk_size
= 10 * calc_size
;
2304 min_stripe_size
= 64 * 1024 * 1024;
2305 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2306 max_chunk_size
= 256 * 1024 * 1024;
2307 min_stripe_size
= 32 * 1024 * 1024;
2308 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2309 calc_size
= 8 * 1024 * 1024;
2310 max_chunk_size
= calc_size
* 2;
2311 min_stripe_size
= 1 * 1024 * 1024;
2314 /* we don't want a chunk larger than 10% of writeable space */
2315 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2318 if (calc_size
* num_stripes
> max_chunk_size
* ncopies
) {
2319 calc_size
= max_chunk_size
* ncopies
;
2320 do_div(calc_size
, num_stripes
);
2321 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2322 calc_size
*= BTRFS_STRIPE_LEN
;
2325 /* we don't want tiny stripes */
2327 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2330 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
2331 * we end up with something bigger than a stripe
2333 calc_size
= max_t(u64
, calc_size
, BTRFS_STRIPE_LEN
);
2335 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2336 calc_size
*= BTRFS_STRIPE_LEN
;
2341 static struct map_lookup
*__shrink_map_lookup_stripes(struct map_lookup
*map
,
2344 struct map_lookup
*new;
2345 size_t len
= map_lookup_size(num_stripes
);
2347 BUG_ON(map
->num_stripes
< num_stripes
);
2349 if (map
->num_stripes
== num_stripes
)
2352 new = kmalloc(len
, GFP_NOFS
);
2354 /* just change map->num_stripes */
2355 map
->num_stripes
= num_stripes
;
2359 memcpy(new, map
, len
);
2360 new->num_stripes
= num_stripes
;
2366 * helper to allocate device space from btrfs_device_info, in which we stored
2367 * max free space information of every device. It is used when we can not
2368 * allocate chunks by default size.
2370 * By this helper, we can allocate a new chunk as larger as possible.
2372 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle
*trans
,
2373 struct btrfs_fs_devices
*fs_devices
,
2374 struct btrfs_device_info
*devices
,
2375 int nr_device
, u64 type
,
2376 struct map_lookup
**map_lookup
,
2377 int min_stripes
, u64
*stripe_size
)
2379 int i
, index
, sort_again
= 0;
2380 int min_devices
= min_stripes
;
2381 u64 max_avail
, min_free
;
2382 struct map_lookup
*map
= *map_lookup
;
2385 if (nr_device
< min_stripes
)
2388 btrfs_descending_sort_devices(devices
, nr_device
);
2390 max_avail
= devices
[0].max_avail
;
2394 for (i
= 0; i
< nr_device
; i
++) {
2396 * if dev_offset = 0, it means the free space of this device
2397 * is less than what we need, and we didn't search max avail
2398 * extent on this device, so do it now.
2400 if (!devices
[i
].dev_offset
) {
2401 ret
= find_free_dev_extent(trans
, devices
[i
].dev
,
2403 &devices
[i
].dev_offset
,
2404 &devices
[i
].max_avail
);
2405 if (ret
!= 0 && ret
!= -ENOSPC
)
2411 /* we update the max avail free extent of each devices, sort again */
2413 btrfs_descending_sort_devices(devices
, nr_device
);
2415 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2418 if (!devices
[min_devices
- 1].max_avail
)
2421 max_avail
= devices
[min_devices
- 1].max_avail
;
2422 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2423 do_div(max_avail
, 2);
2425 max_avail
= __btrfs_calc_stripe_size(fs_devices
, max_avail
, type
,
2427 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2428 min_free
= max_avail
* 2;
2430 min_free
= max_avail
;
2432 if (min_free
> devices
[min_devices
- 1].max_avail
)
2435 map
= __shrink_map_lookup_stripes(map
, min_stripes
);
2436 *stripe_size
= max_avail
;
2439 for (i
= 0; i
< min_stripes
; i
++) {
2440 map
->stripes
[i
].dev
= devices
[index
].dev
;
2441 map
->stripes
[i
].physical
= devices
[index
].dev_offset
;
2442 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2444 map
->stripes
[i
].dev
= devices
[index
].dev
;
2445 map
->stripes
[i
].physical
= devices
[index
].dev_offset
+
2455 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2456 struct btrfs_root
*extent_root
,
2457 struct map_lookup
**map_ret
,
2458 u64
*num_bytes
, u64
*stripe_size
,
2459 u64 start
, u64 type
)
2461 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2462 struct btrfs_device
*device
= NULL
;
2463 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
2464 struct list_head
*cur
;
2465 struct map_lookup
*map
;
2466 struct extent_map_tree
*em_tree
;
2467 struct extent_map
*em
;
2468 struct btrfs_device_info
*devices_info
;
2469 struct list_head private_devs
;
2470 u64 calc_size
= 1024 * 1024 * 1024;
2477 int min_devices
; /* the min number of devices we need */
2482 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
2483 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
2485 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
2487 if (list_empty(&fs_devices
->alloc_list
))
2490 ret
= __btrfs_calc_nstripes(fs_devices
, type
, &num_stripes
,
2491 &min_stripes
, &sub_stripes
);
2495 devices_info
= kzalloc(sizeof(*devices_info
) * fs_devices
->rw_devices
,
2500 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2505 map
->num_stripes
= num_stripes
;
2507 cur
= fs_devices
->alloc_list
.next
;
2511 calc_size
= __btrfs_calc_stripe_size(fs_devices
, calc_size
, type
,
2514 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2515 min_free
= calc_size
* 2;
2518 min_free
= calc_size
;
2519 min_devices
= min_stripes
;
2522 INIT_LIST_HEAD(&private_devs
);
2523 while (index
< num_stripes
) {
2524 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2525 BUG_ON(!device
->writeable
);
2526 if (device
->total_bytes
> device
->bytes_used
)
2527 avail
= device
->total_bytes
- device
->bytes_used
;
2532 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2533 ret
= find_free_dev_extent(trans
, device
, min_free
,
2534 &devices_info
[i
].dev_offset
,
2535 &devices_info
[i
].max_avail
);
2537 list_move_tail(&device
->dev_alloc_list
,
2539 map
->stripes
[index
].dev
= device
;
2540 map
->stripes
[index
].physical
=
2541 devices_info
[i
].dev_offset
;
2543 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2544 map
->stripes
[index
].dev
= device
;
2545 map
->stripes
[index
].physical
=
2546 devices_info
[i
].dev_offset
+
2550 } else if (ret
!= -ENOSPC
)
2553 devices_info
[i
].dev
= device
;
2555 } else if (device
->in_fs_metadata
&&
2556 avail
>= BTRFS_STRIPE_LEN
) {
2557 devices_info
[i
].dev
= device
;
2558 devices_info
[i
].max_avail
= avail
;
2562 if (cur
== &fs_devices
->alloc_list
)
2566 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2567 if (index
< num_stripes
) {
2568 if (index
>= min_stripes
) {
2569 num_stripes
= index
;
2570 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2571 num_stripes
/= sub_stripes
;
2572 num_stripes
*= sub_stripes
;
2575 map
= __shrink_map_lookup_stripes(map
, num_stripes
);
2576 } else if (i
>= min_devices
) {
2577 ret
= __btrfs_alloc_tiny_space(trans
, fs_devices
,
2578 devices_info
, i
, type
,
2588 map
->sector_size
= extent_root
->sectorsize
;
2589 map
->stripe_len
= BTRFS_STRIPE_LEN
;
2590 map
->io_align
= BTRFS_STRIPE_LEN
;
2591 map
->io_width
= BTRFS_STRIPE_LEN
;
2593 map
->sub_stripes
= sub_stripes
;
2596 *stripe_size
= calc_size
;
2597 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2598 map
->num_stripes
, sub_stripes
);
2600 trace_btrfs_chunk_alloc(info
->chunk_root
, map
, start
, *num_bytes
);
2602 em
= alloc_extent_map(GFP_NOFS
);
2607 em
->bdev
= (struct block_device
*)map
;
2609 em
->len
= *num_bytes
;
2610 em
->block_start
= 0;
2611 em
->block_len
= em
->len
;
2613 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2614 write_lock(&em_tree
->lock
);
2615 ret
= add_extent_mapping(em_tree
, em
);
2616 write_unlock(&em_tree
->lock
);
2618 free_extent_map(em
);
2620 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2621 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2626 while (index
< map
->num_stripes
) {
2627 device
= map
->stripes
[index
].dev
;
2628 dev_offset
= map
->stripes
[index
].physical
;
2630 ret
= btrfs_alloc_dev_extent(trans
, device
,
2631 info
->chunk_root
->root_key
.objectid
,
2632 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2633 start
, dev_offset
, calc_size
);
2638 kfree(devices_info
);
2643 kfree(devices_info
);
2647 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2648 struct btrfs_root
*extent_root
,
2649 struct map_lookup
*map
, u64 chunk_offset
,
2650 u64 chunk_size
, u64 stripe_size
)
2653 struct btrfs_key key
;
2654 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2655 struct btrfs_device
*device
;
2656 struct btrfs_chunk
*chunk
;
2657 struct btrfs_stripe
*stripe
;
2658 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2662 chunk
= kzalloc(item_size
, GFP_NOFS
);
2667 while (index
< map
->num_stripes
) {
2668 device
= map
->stripes
[index
].dev
;
2669 device
->bytes_used
+= stripe_size
;
2670 ret
= btrfs_update_device(trans
, device
);
2676 stripe
= &chunk
->stripe
;
2677 while (index
< map
->num_stripes
) {
2678 device
= map
->stripes
[index
].dev
;
2679 dev_offset
= map
->stripes
[index
].physical
;
2681 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2682 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2683 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2688 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2689 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2690 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2691 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2692 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2693 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2694 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2695 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2696 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2698 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2699 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2700 key
.offset
= chunk_offset
;
2702 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2705 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2706 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2716 * Chunk allocation falls into two parts. The first part does works
2717 * that make the new allocated chunk useable, but not do any operation
2718 * that modifies the chunk tree. The second part does the works that
2719 * require modifying the chunk tree. This division is important for the
2720 * bootstrap process of adding storage to a seed btrfs.
2722 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2723 struct btrfs_root
*extent_root
, u64 type
)
2728 struct map_lookup
*map
;
2729 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2732 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2737 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2738 &stripe_size
, chunk_offset
, type
);
2742 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2743 chunk_size
, stripe_size
);
2748 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2749 struct btrfs_root
*root
,
2750 struct btrfs_device
*device
)
2753 u64 sys_chunk_offset
;
2757 u64 sys_stripe_size
;
2759 struct map_lookup
*map
;
2760 struct map_lookup
*sys_map
;
2761 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2762 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2765 ret
= find_next_chunk(fs_info
->chunk_root
,
2766 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2769 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2770 (fs_info
->metadata_alloc_profile
&
2771 fs_info
->avail_metadata_alloc_bits
);
2772 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2774 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2775 &stripe_size
, chunk_offset
, alloc_profile
);
2778 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2780 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2781 (fs_info
->system_alloc_profile
&
2782 fs_info
->avail_system_alloc_bits
);
2783 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2785 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2786 &sys_chunk_size
, &sys_stripe_size
,
2787 sys_chunk_offset
, alloc_profile
);
2790 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2794 * Modifying chunk tree needs allocating new blocks from both
2795 * system block group and metadata block group. So we only can
2796 * do operations require modifying the chunk tree after both
2797 * block groups were created.
2799 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2800 chunk_size
, stripe_size
);
2803 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2804 sys_chunk_offset
, sys_chunk_size
,
2810 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2812 struct extent_map
*em
;
2813 struct map_lookup
*map
;
2814 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2818 read_lock(&map_tree
->map_tree
.lock
);
2819 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2820 read_unlock(&map_tree
->map_tree
.lock
);
2824 if (btrfs_test_opt(root
, DEGRADED
)) {
2825 free_extent_map(em
);
2829 map
= (struct map_lookup
*)em
->bdev
;
2830 for (i
= 0; i
< map
->num_stripes
; i
++) {
2831 if (!map
->stripes
[i
].dev
->writeable
) {
2836 free_extent_map(em
);
2840 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2842 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2845 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2847 struct extent_map
*em
;
2850 write_lock(&tree
->map_tree
.lock
);
2851 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2853 remove_extent_mapping(&tree
->map_tree
, em
);
2854 write_unlock(&tree
->map_tree
.lock
);
2859 free_extent_map(em
);
2860 /* once for the tree */
2861 free_extent_map(em
);
2865 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2867 struct extent_map
*em
;
2868 struct map_lookup
*map
;
2869 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2872 read_lock(&em_tree
->lock
);
2873 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2874 read_unlock(&em_tree
->lock
);
2877 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2878 map
= (struct map_lookup
*)em
->bdev
;
2879 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2880 ret
= map
->num_stripes
;
2881 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2882 ret
= map
->sub_stripes
;
2885 free_extent_map(em
);
2889 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2893 if (map
->stripes
[optimal
].dev
->bdev
)
2895 for (i
= first
; i
< first
+ num
; i
++) {
2896 if (map
->stripes
[i
].dev
->bdev
)
2899 /* we couldn't find one that doesn't fail. Just return something
2900 * and the io error handling code will clean up eventually
2905 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2906 u64 logical
, u64
*length
,
2907 struct btrfs_multi_bio
**multi_ret
,
2910 struct extent_map
*em
;
2911 struct map_lookup
*map
;
2912 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2915 u64 stripe_end_offset
;
2919 int stripes_allocated
= 8;
2920 int stripes_required
= 1;
2925 struct btrfs_multi_bio
*multi
= NULL
;
2927 if (multi_ret
&& !(rw
& (REQ_WRITE
| REQ_DISCARD
)))
2928 stripes_allocated
= 1;
2931 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2936 atomic_set(&multi
->error
, 0);
2939 read_lock(&em_tree
->lock
);
2940 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2941 read_unlock(&em_tree
->lock
);
2944 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2945 (unsigned long long)logical
,
2946 (unsigned long long)*length
);
2950 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2951 map
= (struct map_lookup
*)em
->bdev
;
2952 offset
= logical
- em
->start
;
2954 if (mirror_num
> map
->num_stripes
)
2957 /* if our multi bio struct is too small, back off and try again */
2958 if (rw
& REQ_WRITE
) {
2959 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
2960 BTRFS_BLOCK_GROUP_DUP
)) {
2961 stripes_required
= map
->num_stripes
;
2963 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2964 stripes_required
= map
->sub_stripes
;
2968 if (rw
& REQ_DISCARD
) {
2969 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
2970 BTRFS_BLOCK_GROUP_RAID1
|
2971 BTRFS_BLOCK_GROUP_DUP
|
2972 BTRFS_BLOCK_GROUP_RAID10
)) {
2973 stripes_required
= map
->num_stripes
;
2976 if (multi_ret
&& (rw
& (REQ_WRITE
| REQ_DISCARD
)) &&
2977 stripes_allocated
< stripes_required
) {
2978 stripes_allocated
= map
->num_stripes
;
2979 free_extent_map(em
);
2985 * stripe_nr counts the total number of stripes we have to stride
2986 * to get to this block
2988 do_div(stripe_nr
, map
->stripe_len
);
2990 stripe_offset
= stripe_nr
* map
->stripe_len
;
2991 BUG_ON(offset
< stripe_offset
);
2993 /* stripe_offset is the offset of this block in its stripe*/
2994 stripe_offset
= offset
- stripe_offset
;
2996 if (rw
& REQ_DISCARD
)
2997 *length
= min_t(u64
, em
->len
- offset
, *length
);
2998 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
2999 BTRFS_BLOCK_GROUP_RAID1
|
3000 BTRFS_BLOCK_GROUP_RAID10
|
3001 BTRFS_BLOCK_GROUP_DUP
)) {
3002 /* we limit the length of each bio to what fits in a stripe */
3003 *length
= min_t(u64
, em
->len
- offset
,
3004 map
->stripe_len
- stripe_offset
);
3006 *length
= em
->len
- offset
;
3014 stripe_nr_orig
= stripe_nr
;
3015 stripe_nr_end
= (offset
+ *length
+ map
->stripe_len
- 1) &
3016 (~(map
->stripe_len
- 1));
3017 do_div(stripe_nr_end
, map
->stripe_len
);
3018 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
3020 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3021 if (rw
& REQ_DISCARD
)
3022 num_stripes
= min_t(u64
, map
->num_stripes
,
3023 stripe_nr_end
- stripe_nr_orig
);
3024 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3025 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3026 if (rw
& (REQ_WRITE
| REQ_DISCARD
))
3027 num_stripes
= map
->num_stripes
;
3028 else if (mirror_num
)
3029 stripe_index
= mirror_num
- 1;
3031 stripe_index
= find_live_mirror(map
, 0,
3033 current
->pid
% map
->num_stripes
);
3036 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3037 if (rw
& (REQ_WRITE
| REQ_DISCARD
))
3038 num_stripes
= map
->num_stripes
;
3039 else if (mirror_num
)
3040 stripe_index
= mirror_num
- 1;
3042 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3043 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3045 stripe_index
= do_div(stripe_nr
, factor
);
3046 stripe_index
*= map
->sub_stripes
;
3049 num_stripes
= map
->sub_stripes
;
3050 else if (rw
& REQ_DISCARD
)
3051 num_stripes
= min_t(u64
, map
->sub_stripes
*
3052 (stripe_nr_end
- stripe_nr_orig
),
3054 else if (mirror_num
)
3055 stripe_index
+= mirror_num
- 1;
3057 stripe_index
= find_live_mirror(map
, stripe_index
,
3058 map
->sub_stripes
, stripe_index
+
3059 current
->pid
% map
->sub_stripes
);
3063 * after this do_div call, stripe_nr is the number of stripes
3064 * on this device we have to walk to find the data, and
3065 * stripe_index is the number of our device in the stripe array
3067 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3069 BUG_ON(stripe_index
>= map
->num_stripes
);
3071 if (rw
& REQ_DISCARD
) {
3072 for (i
= 0; i
< num_stripes
; i
++) {
3073 multi
->stripes
[i
].physical
=
3074 map
->stripes
[stripe_index
].physical
+
3075 stripe_offset
+ stripe_nr
* map
->stripe_len
;
3076 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
3078 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3080 u32 last_stripe
= 0;
3083 div_u64_rem(stripe_nr_end
- 1,
3087 for (j
= 0; j
< map
->num_stripes
; j
++) {
3090 div_u64_rem(stripe_nr_end
- 1 - j
,
3091 map
->num_stripes
, &test
);
3092 if (test
== stripe_index
)
3095 stripes
= stripe_nr_end
- 1 - j
;
3096 do_div(stripes
, map
->num_stripes
);
3097 multi
->stripes
[i
].length
= map
->stripe_len
*
3098 (stripes
- stripe_nr
+ 1);
3101 multi
->stripes
[i
].length
-=
3105 if (stripe_index
== last_stripe
)
3106 multi
->stripes
[i
].length
-=
3108 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3111 int factor
= map
->num_stripes
/
3113 u32 last_stripe
= 0;
3115 div_u64_rem(stripe_nr_end
- 1,
3116 factor
, &last_stripe
);
3117 last_stripe
*= map
->sub_stripes
;
3119 for (j
= 0; j
< factor
; j
++) {
3122 div_u64_rem(stripe_nr_end
- 1 - j
,
3126 stripe_index
/ map
->sub_stripes
)
3129 stripes
= stripe_nr_end
- 1 - j
;
3130 do_div(stripes
, factor
);
3131 multi
->stripes
[i
].length
= map
->stripe_len
*
3132 (stripes
- stripe_nr
+ 1);
3134 if (i
< map
->sub_stripes
) {
3135 multi
->stripes
[i
].length
-=
3137 if (i
== map
->sub_stripes
- 1)
3140 if (stripe_index
>= last_stripe
&&
3141 stripe_index
<= (last_stripe
+
3142 map
->sub_stripes
- 1)) {
3143 multi
->stripes
[i
].length
-=
3147 multi
->stripes
[i
].length
= *length
;
3150 if (stripe_index
== map
->num_stripes
) {
3151 /* This could only happen for RAID0/10 */
3157 for (i
= 0; i
< num_stripes
; i
++) {
3158 multi
->stripes
[i
].physical
=
3159 map
->stripes
[stripe_index
].physical
+
3161 stripe_nr
* map
->stripe_len
;
3162 multi
->stripes
[i
].dev
=
3163 map
->stripes
[stripe_index
].dev
;
3169 multi
->num_stripes
= num_stripes
;
3170 multi
->max_errors
= max_errors
;
3173 free_extent_map(em
);
3177 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
3178 u64 logical
, u64
*length
,
3179 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
3181 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
3185 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
3186 u64 chunk_start
, u64 physical
, u64 devid
,
3187 u64
**logical
, int *naddrs
, int *stripe_len
)
3189 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
3190 struct extent_map
*em
;
3191 struct map_lookup
*map
;
3198 read_lock(&em_tree
->lock
);
3199 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
3200 read_unlock(&em_tree
->lock
);
3202 BUG_ON(!em
|| em
->start
!= chunk_start
);
3203 map
= (struct map_lookup
*)em
->bdev
;
3206 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
3207 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
3208 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
3209 do_div(length
, map
->num_stripes
);
3211 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
3214 for (i
= 0; i
< map
->num_stripes
; i
++) {
3215 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
3217 if (map
->stripes
[i
].physical
> physical
||
3218 map
->stripes
[i
].physical
+ length
<= physical
)
3221 stripe_nr
= physical
- map
->stripes
[i
].physical
;
3222 do_div(stripe_nr
, map
->stripe_len
);
3224 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3225 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3226 do_div(stripe_nr
, map
->sub_stripes
);
3227 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3228 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3230 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
3231 WARN_ON(nr
>= map
->num_stripes
);
3232 for (j
= 0; j
< nr
; j
++) {
3233 if (buf
[j
] == bytenr
)
3237 WARN_ON(nr
>= map
->num_stripes
);
3244 *stripe_len
= map
->stripe_len
;
3246 free_extent_map(em
);
3250 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
3252 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
3253 int is_orig_bio
= 0;
3256 atomic_inc(&multi
->error
);
3258 if (bio
== multi
->orig_bio
)
3261 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
3264 bio
= multi
->orig_bio
;
3266 bio
->bi_private
= multi
->private;
3267 bio
->bi_end_io
= multi
->end_io
;
3268 /* only send an error to the higher layers if it is
3269 * beyond the tolerance of the multi-bio
3271 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
3275 * this bio is actually up to date, we didn't
3276 * go over the max number of errors
3278 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
3283 bio_endio(bio
, err
);
3284 } else if (!is_orig_bio
) {
3289 struct async_sched
{
3292 struct btrfs_fs_info
*info
;
3293 struct btrfs_work work
;
3297 * see run_scheduled_bios for a description of why bios are collected for
3300 * This will add one bio to the pending list for a device and make sure
3301 * the work struct is scheduled.
3303 static noinline
int schedule_bio(struct btrfs_root
*root
,
3304 struct btrfs_device
*device
,
3305 int rw
, struct bio
*bio
)
3307 int should_queue
= 1;
3308 struct btrfs_pending_bios
*pending_bios
;
3310 /* don't bother with additional async steps for reads, right now */
3311 if (!(rw
& REQ_WRITE
)) {
3313 submit_bio(rw
, bio
);
3319 * nr_async_bios allows us to reliably return congestion to the
3320 * higher layers. Otherwise, the async bio makes it appear we have
3321 * made progress against dirty pages when we've really just put it
3322 * on a queue for later
3324 atomic_inc(&root
->fs_info
->nr_async_bios
);
3325 WARN_ON(bio
->bi_next
);
3326 bio
->bi_next
= NULL
;
3329 spin_lock(&device
->io_lock
);
3330 if (bio
->bi_rw
& REQ_SYNC
)
3331 pending_bios
= &device
->pending_sync_bios
;
3333 pending_bios
= &device
->pending_bios
;
3335 if (pending_bios
->tail
)
3336 pending_bios
->tail
->bi_next
= bio
;
3338 pending_bios
->tail
= bio
;
3339 if (!pending_bios
->head
)
3340 pending_bios
->head
= bio
;
3341 if (device
->running_pending
)
3344 spin_unlock(&device
->io_lock
);
3347 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
3352 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
3353 int mirror_num
, int async_submit
)
3355 struct btrfs_mapping_tree
*map_tree
;
3356 struct btrfs_device
*dev
;
3357 struct bio
*first_bio
= bio
;
3358 u64 logical
= (u64
)bio
->bi_sector
<< 9;
3361 struct btrfs_multi_bio
*multi
= NULL
;
3366 length
= bio
->bi_size
;
3367 map_tree
= &root
->fs_info
->mapping_tree
;
3368 map_length
= length
;
3370 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
3374 total_devs
= multi
->num_stripes
;
3375 if (map_length
< length
) {
3376 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
3377 "len %llu\n", (unsigned long long)logical
,
3378 (unsigned long long)length
,
3379 (unsigned long long)map_length
);
3382 multi
->end_io
= first_bio
->bi_end_io
;
3383 multi
->private = first_bio
->bi_private
;
3384 multi
->orig_bio
= first_bio
;
3385 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
3387 while (dev_nr
< total_devs
) {
3388 if (total_devs
> 1) {
3389 if (dev_nr
< total_devs
- 1) {
3390 bio
= bio_clone(first_bio
, GFP_NOFS
);
3395 bio
->bi_private
= multi
;
3396 bio
->bi_end_io
= end_bio_multi_stripe
;
3398 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
3399 dev
= multi
->stripes
[dev_nr
].dev
;
3400 if (dev
&& dev
->bdev
&& (rw
!= WRITE
|| dev
->writeable
)) {
3401 bio
->bi_bdev
= dev
->bdev
;
3403 schedule_bio(root
, dev
, rw
, bio
);
3405 submit_bio(rw
, bio
);
3407 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
3408 bio
->bi_sector
= logical
>> 9;
3409 bio_endio(bio
, -EIO
);
3413 if (total_devs
== 1)
3418 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
3421 struct btrfs_device
*device
;
3422 struct btrfs_fs_devices
*cur_devices
;
3424 cur_devices
= root
->fs_info
->fs_devices
;
3425 while (cur_devices
) {
3427 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3428 device
= __find_device(&cur_devices
->devices
,
3433 cur_devices
= cur_devices
->seed
;
3438 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
3439 u64 devid
, u8
*dev_uuid
)
3441 struct btrfs_device
*device
;
3442 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
3444 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
3447 list_add(&device
->dev_list
,
3448 &fs_devices
->devices
);
3449 device
->dev_root
= root
->fs_info
->dev_root
;
3450 device
->devid
= devid
;
3451 device
->work
.func
= pending_bios_fn
;
3452 device
->fs_devices
= fs_devices
;
3453 device
->missing
= 1;
3454 fs_devices
->num_devices
++;
3455 fs_devices
->missing_devices
++;
3456 spin_lock_init(&device
->io_lock
);
3457 INIT_LIST_HEAD(&device
->dev_alloc_list
);
3458 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
3462 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
3463 struct extent_buffer
*leaf
,
3464 struct btrfs_chunk
*chunk
)
3466 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
3467 struct map_lookup
*map
;
3468 struct extent_map
*em
;
3472 u8 uuid
[BTRFS_UUID_SIZE
];
3477 logical
= key
->offset
;
3478 length
= btrfs_chunk_length(leaf
, chunk
);
3480 read_lock(&map_tree
->map_tree
.lock
);
3481 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
3482 read_unlock(&map_tree
->map_tree
.lock
);
3484 /* already mapped? */
3485 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
3486 free_extent_map(em
);
3489 free_extent_map(em
);
3492 em
= alloc_extent_map(GFP_NOFS
);
3495 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3496 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
3498 free_extent_map(em
);
3502 em
->bdev
= (struct block_device
*)map
;
3503 em
->start
= logical
;
3505 em
->block_start
= 0;
3506 em
->block_len
= em
->len
;
3508 map
->num_stripes
= num_stripes
;
3509 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
3510 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
3511 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
3512 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
3513 map
->type
= btrfs_chunk_type(leaf
, chunk
);
3514 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
3515 for (i
= 0; i
< num_stripes
; i
++) {
3516 map
->stripes
[i
].physical
=
3517 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
3518 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
3519 read_extent_buffer(leaf
, uuid
, (unsigned long)
3520 btrfs_stripe_dev_uuid_nr(chunk
, i
),
3522 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
3524 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
3526 free_extent_map(em
);
3529 if (!map
->stripes
[i
].dev
) {
3530 map
->stripes
[i
].dev
=
3531 add_missing_dev(root
, devid
, uuid
);
3532 if (!map
->stripes
[i
].dev
) {
3534 free_extent_map(em
);
3538 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
3541 write_lock(&map_tree
->map_tree
.lock
);
3542 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
3543 write_unlock(&map_tree
->map_tree
.lock
);
3545 free_extent_map(em
);
3550 static int fill_device_from_item(struct extent_buffer
*leaf
,
3551 struct btrfs_dev_item
*dev_item
,
3552 struct btrfs_device
*device
)
3556 device
->devid
= btrfs_device_id(leaf
, dev_item
);
3557 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
3558 device
->total_bytes
= device
->disk_total_bytes
;
3559 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
3560 device
->type
= btrfs_device_type(leaf
, dev_item
);
3561 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
3562 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
3563 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
3565 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
3566 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
3571 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
3573 struct btrfs_fs_devices
*fs_devices
;
3576 mutex_lock(&uuid_mutex
);
3578 fs_devices
= root
->fs_info
->fs_devices
->seed
;
3579 while (fs_devices
) {
3580 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3584 fs_devices
= fs_devices
->seed
;
3587 fs_devices
= find_fsid(fsid
);
3593 fs_devices
= clone_fs_devices(fs_devices
);
3594 if (IS_ERR(fs_devices
)) {
3595 ret
= PTR_ERR(fs_devices
);
3599 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3600 root
->fs_info
->bdev_holder
);
3604 if (!fs_devices
->seeding
) {
3605 __btrfs_close_devices(fs_devices
);
3606 free_fs_devices(fs_devices
);
3611 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3612 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3614 mutex_unlock(&uuid_mutex
);
3618 static int read_one_dev(struct btrfs_root
*root
,
3619 struct extent_buffer
*leaf
,
3620 struct btrfs_dev_item
*dev_item
)
3622 struct btrfs_device
*device
;
3625 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3626 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3628 devid
= btrfs_device_id(leaf
, dev_item
);
3629 read_extent_buffer(leaf
, dev_uuid
,
3630 (unsigned long)btrfs_device_uuid(dev_item
),
3632 read_extent_buffer(leaf
, fs_uuid
,
3633 (unsigned long)btrfs_device_fsid(dev_item
),
3636 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3637 ret
= open_seed_devices(root
, fs_uuid
);
3638 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3642 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3643 if (!device
|| !device
->bdev
) {
3644 if (!btrfs_test_opt(root
, DEGRADED
))
3648 printk(KERN_WARNING
"warning devid %llu missing\n",
3649 (unsigned long long)devid
);
3650 device
= add_missing_dev(root
, devid
, dev_uuid
);
3653 } else if (!device
->missing
) {
3655 * this happens when a device that was properly setup
3656 * in the device info lists suddenly goes bad.
3657 * device->bdev is NULL, and so we have to set
3658 * device->missing to one here
3660 root
->fs_info
->fs_devices
->missing_devices
++;
3661 device
->missing
= 1;
3665 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3666 BUG_ON(device
->writeable
);
3667 if (device
->generation
!=
3668 btrfs_device_generation(leaf
, dev_item
))
3672 fill_device_from_item(leaf
, dev_item
, device
);
3673 device
->dev_root
= root
->fs_info
->dev_root
;
3674 device
->in_fs_metadata
= 1;
3675 if (device
->writeable
)
3676 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3681 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3683 struct btrfs_dev_item
*dev_item
;
3685 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3687 return read_one_dev(root
, buf
, dev_item
);
3690 int btrfs_read_sys_array(struct btrfs_root
*root
)
3692 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3693 struct extent_buffer
*sb
;
3694 struct btrfs_disk_key
*disk_key
;
3695 struct btrfs_chunk
*chunk
;
3697 unsigned long sb_ptr
;
3703 struct btrfs_key key
;
3705 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3706 BTRFS_SUPER_INFO_SIZE
);
3709 btrfs_set_buffer_uptodate(sb
);
3710 btrfs_set_buffer_lockdep_class(sb
, 0);
3712 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3713 array_size
= btrfs_super_sys_array_size(super_copy
);
3715 ptr
= super_copy
->sys_chunk_array
;
3716 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3719 while (cur
< array_size
) {
3720 disk_key
= (struct btrfs_disk_key
*)ptr
;
3721 btrfs_disk_key_to_cpu(&key
, disk_key
);
3723 len
= sizeof(*disk_key
); ptr
+= len
;
3727 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3728 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3729 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3732 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3733 len
= btrfs_chunk_item_size(num_stripes
);
3742 free_extent_buffer(sb
);
3746 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3748 struct btrfs_path
*path
;
3749 struct extent_buffer
*leaf
;
3750 struct btrfs_key key
;
3751 struct btrfs_key found_key
;
3755 root
= root
->fs_info
->chunk_root
;
3757 path
= btrfs_alloc_path();
3761 /* first we search for all of the device items, and then we
3762 * read in all of the chunk items. This way we can create chunk
3763 * mappings that reference all of the devices that are afound
3765 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3769 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3773 leaf
= path
->nodes
[0];
3774 slot
= path
->slots
[0];
3775 if (slot
>= btrfs_header_nritems(leaf
)) {
3776 ret
= btrfs_next_leaf(root
, path
);
3783 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3784 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3785 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3787 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3788 struct btrfs_dev_item
*dev_item
;
3789 dev_item
= btrfs_item_ptr(leaf
, slot
,
3790 struct btrfs_dev_item
);
3791 ret
= read_one_dev(root
, leaf
, dev_item
);
3795 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3796 struct btrfs_chunk
*chunk
;
3797 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3798 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3804 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3806 btrfs_release_path(root
, path
);
3811 btrfs_free_path(path
);