1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
19 #include "extent_map.h"
21 #include "transaction.h"
22 #include "print-tree.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
35 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
36 [BTRFS_RAID_RAID10
] = {
39 .devs_max
= 0, /* 0 == as many as possible */
41 .tolerated_failures
= 1,
45 .raid_name
= "raid10",
46 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
47 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
49 [BTRFS_RAID_RAID1
] = {
54 .tolerated_failures
= 1,
59 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
60 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
62 [BTRFS_RAID_RAID1C3
] = {
67 .tolerated_failures
= 2,
71 .raid_name
= "raid1c3",
72 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C3
,
73 .mindev_error
= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET
,
75 [BTRFS_RAID_RAID1C4
] = {
80 .tolerated_failures
= 3,
84 .raid_name
= "raid1c4",
85 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C4
,
86 .mindev_error
= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET
,
93 .tolerated_failures
= 0,
98 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
101 [BTRFS_RAID_RAID0
] = {
106 .tolerated_failures
= 0,
110 .raid_name
= "raid0",
111 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
114 [BTRFS_RAID_SINGLE
] = {
119 .tolerated_failures
= 0,
123 .raid_name
= "single",
127 [BTRFS_RAID_RAID5
] = {
132 .tolerated_failures
= 1,
136 .raid_name
= "raid5",
137 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
138 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
140 [BTRFS_RAID_RAID6
] = {
145 .tolerated_failures
= 2,
149 .raid_name
= "raid6",
150 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
151 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
155 const char *btrfs_bg_type_to_raid_name(u64 flags
)
157 const int index
= btrfs_bg_flags_to_raid_index(flags
);
159 if (index
>= BTRFS_NR_RAID_TYPES
)
162 return btrfs_raid_array
[index
].raid_name
;
166 * Fill @buf with textual description of @bg_flags, no more than @size_buf
167 * bytes including terminating null byte.
169 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
174 u64 flags
= bg_flags
;
175 u32 size_bp
= size_buf
;
182 #define DESCRIBE_FLAG(flag, desc) \
184 if (flags & (flag)) { \
185 ret = snprintf(bp, size_bp, "%s|", (desc)); \
186 if (ret < 0 || ret >= size_bp) \
194 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
198 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
199 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
200 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
201 btrfs_raid_array
[i
].raid_name
);
205 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
209 if (size_bp
< size_buf
)
210 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
213 * The text is trimmed, it's up to the caller to provide sufficiently
219 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
220 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
221 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
);
222 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
223 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
224 enum btrfs_map_op op
,
225 u64 logical
, u64
*length
,
226 struct btrfs_bio
**bbio_ret
,
227 int mirror_num
, int need_raid_map
);
233 * There are several mutexes that protect manipulation of devices and low-level
234 * structures like chunks but not block groups, extents or files
236 * uuid_mutex (global lock)
237 * ------------------------
238 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
239 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
240 * device) or requested by the device= mount option
242 * the mutex can be very coarse and can cover long-running operations
244 * protects: updates to fs_devices counters like missing devices, rw devices,
245 * seeding, structure cloning, opening/closing devices at mount/umount time
247 * global::fs_devs - add, remove, updates to the global list
249 * does not protect: manipulation of the fs_devices::devices list!
251 * btrfs_device::name - renames (write side), read is RCU
253 * fs_devices::device_list_mutex (per-fs, with RCU)
254 * ------------------------------------------------
255 * protects updates to fs_devices::devices, ie. adding and deleting
257 * simple list traversal with read-only actions can be done with RCU protection
259 * may be used to exclude some operations from running concurrently without any
260 * modifications to the list (see write_all_supers)
264 * protects balance structures (status, state) and context accessed from
265 * several places (internally, ioctl)
269 * protects chunks, adding or removing during allocation, trim or when a new
270 * device is added/removed. Additionally it also protects post_commit_list of
271 * individual devices, since they can be added to the transaction's
272 * post_commit_list only with chunk_mutex held.
276 * a big lock that is held by the cleaner thread and prevents running subvolume
277 * cleaning together with relocation or delayed iputs
290 * Exclusive operations, BTRFS_FS_EXCL_OP
291 * ======================================
293 * Maintains the exclusivity of the following operations that apply to the
294 * whole filesystem and cannot run in parallel.
299 * - Device replace (*)
302 * The device operations (as above) can be in one of the following states:
308 * Only device operations marked with (*) can go into the Paused state for the
311 * - ioctl (only Balance can be Paused through ioctl)
312 * - filesystem remounted as read-only
313 * - filesystem unmounted and mounted as read-only
314 * - system power-cycle and filesystem mounted as read-only
315 * - filesystem or device errors leading to forced read-only
317 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
318 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
319 * A device operation in Paused or Running state can be canceled or resumed
320 * either by ioctl (Balance only) or when remounted as read-write.
321 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
325 DEFINE_MUTEX(uuid_mutex
);
326 static LIST_HEAD(fs_uuids
);
327 struct list_head
* __attribute_const__
btrfs_get_fs_uuids(void)
333 * alloc_fs_devices - allocate struct btrfs_fs_devices
334 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
335 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
337 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
338 * The returned struct is not linked onto any lists and can be destroyed with
339 * kfree() right away.
341 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
,
342 const u8
*metadata_fsid
)
344 struct btrfs_fs_devices
*fs_devs
;
346 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
348 return ERR_PTR(-ENOMEM
);
350 mutex_init(&fs_devs
->device_list_mutex
);
352 INIT_LIST_HEAD(&fs_devs
->devices
);
353 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
354 INIT_LIST_HEAD(&fs_devs
->fs_list
);
356 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
359 memcpy(fs_devs
->metadata_uuid
, metadata_fsid
, BTRFS_FSID_SIZE
);
361 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
366 void btrfs_free_device(struct btrfs_device
*device
)
368 WARN_ON(!list_empty(&device
->post_commit_list
));
369 rcu_string_free(device
->name
);
370 extent_io_tree_release(&device
->alloc_state
);
371 bio_put(device
->flush_bio
);
375 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
377 struct btrfs_device
*device
;
378 WARN_ON(fs_devices
->opened
);
379 while (!list_empty(&fs_devices
->devices
)) {
380 device
= list_entry(fs_devices
->devices
.next
,
381 struct btrfs_device
, dev_list
);
382 list_del(&device
->dev_list
);
383 btrfs_free_device(device
);
388 void __exit
btrfs_cleanup_fs_uuids(void)
390 struct btrfs_fs_devices
*fs_devices
;
392 while (!list_empty(&fs_uuids
)) {
393 fs_devices
= list_entry(fs_uuids
.next
,
394 struct btrfs_fs_devices
, fs_list
);
395 list_del(&fs_devices
->fs_list
);
396 free_fs_devices(fs_devices
);
401 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
402 * Returned struct is not linked onto any lists and must be destroyed using
405 static struct btrfs_device
*__alloc_device(void)
407 struct btrfs_device
*dev
;
409 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
411 return ERR_PTR(-ENOMEM
);
414 * Preallocate a bio that's always going to be used for flushing device
415 * barriers and matches the device lifespan
417 dev
->flush_bio
= bio_alloc_bioset(GFP_KERNEL
, 0, NULL
);
418 if (!dev
->flush_bio
) {
420 return ERR_PTR(-ENOMEM
);
423 INIT_LIST_HEAD(&dev
->dev_list
);
424 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
425 INIT_LIST_HEAD(&dev
->post_commit_list
);
427 atomic_set(&dev
->reada_in_flight
, 0);
428 atomic_set(&dev
->dev_stats_ccnt
, 0);
429 btrfs_device_data_ordered_init(dev
);
430 INIT_RADIX_TREE(&dev
->reada_zones
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
431 INIT_RADIX_TREE(&dev
->reada_extents
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
432 extent_io_tree_init(NULL
, &dev
->alloc_state
, 0, NULL
);
437 static noinline
struct btrfs_fs_devices
*find_fsid(
438 const u8
*fsid
, const u8
*metadata_fsid
)
440 struct btrfs_fs_devices
*fs_devices
;
444 /* Handle non-split brain cases */
445 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
447 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0
448 && memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
449 BTRFS_FSID_SIZE
) == 0)
452 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
459 static struct btrfs_fs_devices
*find_fsid_with_metadata_uuid(
460 struct btrfs_super_block
*disk_super
)
463 struct btrfs_fs_devices
*fs_devices
;
466 * Handle scanned device having completed its fsid change but
467 * belonging to a fs_devices that was created by first scanning
468 * a device which didn't have its fsid/metadata_uuid changed
469 * at all and the CHANGING_FSID_V2 flag set.
471 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
472 if (fs_devices
->fsid_change
&&
473 memcmp(disk_super
->metadata_uuid
, fs_devices
->fsid
,
474 BTRFS_FSID_SIZE
) == 0 &&
475 memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
476 BTRFS_FSID_SIZE
) == 0) {
481 * Handle scanned device having completed its fsid change but
482 * belonging to a fs_devices that was created by a device that
483 * has an outdated pair of fsid/metadata_uuid and
484 * CHANGING_FSID_V2 flag set.
486 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
487 if (fs_devices
->fsid_change
&&
488 memcmp(fs_devices
->metadata_uuid
,
489 fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0 &&
490 memcmp(disk_super
->metadata_uuid
, fs_devices
->metadata_uuid
,
491 BTRFS_FSID_SIZE
) == 0) {
496 return find_fsid(disk_super
->fsid
, disk_super
->metadata_uuid
);
501 btrfs_get_bdev_and_sb(const char *device_path
, fmode_t flags
, void *holder
,
502 int flush
, struct block_device
**bdev
,
503 struct buffer_head
**bh
)
507 *bdev
= blkdev_get_by_path(device_path
, flags
, holder
);
510 ret
= PTR_ERR(*bdev
);
515 filemap_write_and_wait((*bdev
)->bd_inode
->i_mapping
);
516 ret
= set_blocksize(*bdev
, BTRFS_BDEV_BLOCKSIZE
);
518 blkdev_put(*bdev
, flags
);
521 invalidate_bdev(*bdev
);
522 *bh
= btrfs_read_dev_super(*bdev
);
525 blkdev_put(*bdev
, flags
);
537 static bool device_path_matched(const char *path
, struct btrfs_device
*device
)
542 found
= strcmp(rcu_str_deref(device
->name
), path
);
549 * Search and remove all stale (devices which are not mounted) devices.
550 * When both inputs are NULL, it will search and release all stale devices.
551 * path: Optional. When provided will it release all unmounted devices
552 * matching this path only.
553 * skip_dev: Optional. Will skip this device when searching for the stale
555 * Return: 0 for success or if @path is NULL.
556 * -EBUSY if @path is a mounted device.
557 * -ENOENT if @path does not match any device in the list.
559 static int btrfs_free_stale_devices(const char *path
,
560 struct btrfs_device
*skip_device
)
562 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
563 struct btrfs_device
*device
, *tmp_device
;
569 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
571 mutex_lock(&fs_devices
->device_list_mutex
);
572 list_for_each_entry_safe(device
, tmp_device
,
573 &fs_devices
->devices
, dev_list
) {
574 if (skip_device
&& skip_device
== device
)
576 if (path
&& !device
->name
)
578 if (path
&& !device_path_matched(path
, device
))
580 if (fs_devices
->opened
) {
581 /* for an already deleted device return 0 */
582 if (path
&& ret
!= 0)
587 /* delete the stale device */
588 fs_devices
->num_devices
--;
589 list_del(&device
->dev_list
);
590 btrfs_free_device(device
);
593 if (fs_devices
->num_devices
== 0)
596 mutex_unlock(&fs_devices
->device_list_mutex
);
598 if (fs_devices
->num_devices
== 0) {
599 btrfs_sysfs_remove_fsid(fs_devices
);
600 list_del(&fs_devices
->fs_list
);
601 free_fs_devices(fs_devices
);
608 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
609 struct btrfs_device
*device
, fmode_t flags
,
612 struct request_queue
*q
;
613 struct block_device
*bdev
;
614 struct buffer_head
*bh
;
615 struct btrfs_super_block
*disk_super
;
624 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
629 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
630 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
631 if (devid
!= device
->devid
)
634 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
637 device
->generation
= btrfs_super_generation(disk_super
);
639 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
640 if (btrfs_super_incompat_flags(disk_super
) &
641 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
643 "BTRFS: Invalid seeding and uuid-changed device detected\n");
647 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
648 fs_devices
->seeding
= true;
650 if (bdev_read_only(bdev
))
651 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
653 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
656 q
= bdev_get_queue(bdev
);
657 if (!blk_queue_nonrot(q
))
658 fs_devices
->rotating
= true;
661 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
662 device
->mode
= flags
;
664 fs_devices
->open_devices
++;
665 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
666 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
667 fs_devices
->rw_devices
++;
668 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
676 blkdev_put(bdev
, flags
);
682 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
683 * being created with a disk that has already completed its fsid change. Such
684 * disk can belong to an fs which has its FSID changed or to one which doesn't.
685 * Handle both cases here.
687 static struct btrfs_fs_devices
*find_fsid_inprogress(
688 struct btrfs_super_block
*disk_super
)
690 struct btrfs_fs_devices
*fs_devices
;
692 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
693 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
694 BTRFS_FSID_SIZE
) != 0 &&
695 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
696 BTRFS_FSID_SIZE
) == 0 && !fs_devices
->fsid_change
) {
701 return find_fsid(disk_super
->fsid
, NULL
);
705 static struct btrfs_fs_devices
*find_fsid_changed(
706 struct btrfs_super_block
*disk_super
)
708 struct btrfs_fs_devices
*fs_devices
;
711 * Handles the case where scanned device is part of an fs that had
712 * multiple successful changes of FSID but curently device didn't
713 * observe it. Meaning our fsid will be different than theirs. We need
714 * to handle two subcases :
715 * 1 - The fs still continues to have different METADATA/FSID uuids.
716 * 2 - The fs is switched back to its original FSID (METADATA/FSID
719 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
721 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
722 BTRFS_FSID_SIZE
) != 0 &&
723 memcmp(fs_devices
->metadata_uuid
, disk_super
->metadata_uuid
,
724 BTRFS_FSID_SIZE
) == 0 &&
725 memcmp(fs_devices
->fsid
, disk_super
->fsid
,
726 BTRFS_FSID_SIZE
) != 0)
729 /* Unchanged UUIDs */
730 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
731 BTRFS_FSID_SIZE
) == 0 &&
732 memcmp(fs_devices
->fsid
, disk_super
->metadata_uuid
,
733 BTRFS_FSID_SIZE
) == 0)
740 static struct btrfs_fs_devices
*find_fsid_reverted_metadata(
741 struct btrfs_super_block
*disk_super
)
743 struct btrfs_fs_devices
*fs_devices
;
746 * Handle the case where the scanned device is part of an fs whose last
747 * metadata UUID change reverted it to the original FSID. At the same
748 * time * fs_devices was first created by another constitutent device
749 * which didn't fully observe the operation. This results in an
750 * btrfs_fs_devices created with metadata/fsid different AND
751 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
752 * fs_devices equal to the FSID of the disk.
754 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
755 if (memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
756 BTRFS_FSID_SIZE
) != 0 &&
757 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
758 BTRFS_FSID_SIZE
) == 0 &&
759 fs_devices
->fsid_change
)
766 * Add new device to list of registered devices
769 * device pointer which was just added or updated when successful
770 * error pointer when failed
772 static noinline
struct btrfs_device
*device_list_add(const char *path
,
773 struct btrfs_super_block
*disk_super
,
774 bool *new_device_added
)
776 struct btrfs_device
*device
;
777 struct btrfs_fs_devices
*fs_devices
= NULL
;
778 struct rcu_string
*name
;
779 u64 found_transid
= btrfs_super_generation(disk_super
);
780 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
781 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
782 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
783 bool fsid_change_in_progress
= (btrfs_super_flags(disk_super
) &
784 BTRFS_SUPER_FLAG_CHANGING_FSID_V2
);
786 if (fsid_change_in_progress
) {
787 if (!has_metadata_uuid
)
788 fs_devices
= find_fsid_inprogress(disk_super
);
790 fs_devices
= find_fsid_changed(disk_super
);
791 } else if (has_metadata_uuid
) {
792 fs_devices
= find_fsid_with_metadata_uuid(disk_super
);
794 fs_devices
= find_fsid_reverted_metadata(disk_super
);
796 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
801 if (has_metadata_uuid
)
802 fs_devices
= alloc_fs_devices(disk_super
->fsid
,
803 disk_super
->metadata_uuid
);
805 fs_devices
= alloc_fs_devices(disk_super
->fsid
, NULL
);
807 if (IS_ERR(fs_devices
))
808 return ERR_CAST(fs_devices
);
810 fs_devices
->fsid_change
= fsid_change_in_progress
;
812 mutex_lock(&fs_devices
->device_list_mutex
);
813 list_add(&fs_devices
->fs_list
, &fs_uuids
);
817 mutex_lock(&fs_devices
->device_list_mutex
);
818 device
= btrfs_find_device(fs_devices
, devid
,
819 disk_super
->dev_item
.uuid
, NULL
, false);
822 * If this disk has been pulled into an fs devices created by
823 * a device which had the CHANGING_FSID_V2 flag then replace the
824 * metadata_uuid/fsid values of the fs_devices.
826 if (fs_devices
->fsid_change
&&
827 found_transid
> fs_devices
->latest_generation
) {
828 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
831 if (has_metadata_uuid
)
832 memcpy(fs_devices
->metadata_uuid
,
833 disk_super
->metadata_uuid
,
836 memcpy(fs_devices
->metadata_uuid
,
837 disk_super
->fsid
, BTRFS_FSID_SIZE
);
839 fs_devices
->fsid_change
= false;
844 if (fs_devices
->opened
) {
845 mutex_unlock(&fs_devices
->device_list_mutex
);
846 return ERR_PTR(-EBUSY
);
849 device
= btrfs_alloc_device(NULL
, &devid
,
850 disk_super
->dev_item
.uuid
);
851 if (IS_ERR(device
)) {
852 mutex_unlock(&fs_devices
->device_list_mutex
);
853 /* we can safely leave the fs_devices entry around */
857 name
= rcu_string_strdup(path
, GFP_NOFS
);
859 btrfs_free_device(device
);
860 mutex_unlock(&fs_devices
->device_list_mutex
);
861 return ERR_PTR(-ENOMEM
);
863 rcu_assign_pointer(device
->name
, name
);
865 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
866 fs_devices
->num_devices
++;
868 device
->fs_devices
= fs_devices
;
869 *new_device_added
= true;
871 if (disk_super
->label
[0])
873 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
874 disk_super
->label
, devid
, found_transid
, path
,
875 current
->comm
, task_pid_nr(current
));
878 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
879 disk_super
->fsid
, devid
, found_transid
, path
,
880 current
->comm
, task_pid_nr(current
));
882 } else if (!device
->name
|| strcmp(device
->name
->str
, path
)) {
884 * When FS is already mounted.
885 * 1. If you are here and if the device->name is NULL that
886 * means this device was missing at time of FS mount.
887 * 2. If you are here and if the device->name is different
888 * from 'path' that means either
889 * a. The same device disappeared and reappeared with
891 * b. The missing-disk-which-was-replaced, has
894 * We must allow 1 and 2a above. But 2b would be a spurious
897 * Further in case of 1 and 2a above, the disk at 'path'
898 * would have missed some transaction when it was away and
899 * in case of 2a the stale bdev has to be updated as well.
900 * 2b must not be allowed at all time.
904 * For now, we do allow update to btrfs_fs_device through the
905 * btrfs dev scan cli after FS has been mounted. We're still
906 * tracking a problem where systems fail mount by subvolume id
907 * when we reject replacement on a mounted FS.
909 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
911 * That is if the FS is _not_ mounted and if you
912 * are here, that means there is more than one
913 * disk with same uuid and devid.We keep the one
914 * with larger generation number or the last-in if
915 * generation are equal.
917 mutex_unlock(&fs_devices
->device_list_mutex
);
918 return ERR_PTR(-EEXIST
);
922 * We are going to replace the device path for a given devid,
923 * make sure it's the same device if the device is mounted
926 struct block_device
*path_bdev
;
928 path_bdev
= lookup_bdev(path
);
929 if (IS_ERR(path_bdev
)) {
930 mutex_unlock(&fs_devices
->device_list_mutex
);
931 return ERR_CAST(path_bdev
);
934 if (device
->bdev
!= path_bdev
) {
936 mutex_unlock(&fs_devices
->device_list_mutex
);
937 btrfs_warn_in_rcu(device
->fs_info
,
938 "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
939 disk_super
->fsid
, devid
,
940 rcu_str_deref(device
->name
), path
);
941 return ERR_PTR(-EEXIST
);
944 btrfs_info_in_rcu(device
->fs_info
,
945 "device fsid %pU devid %llu moved old:%s new:%s",
946 disk_super
->fsid
, devid
,
947 rcu_str_deref(device
->name
), path
);
950 name
= rcu_string_strdup(path
, GFP_NOFS
);
952 mutex_unlock(&fs_devices
->device_list_mutex
);
953 return ERR_PTR(-ENOMEM
);
955 rcu_string_free(device
->name
);
956 rcu_assign_pointer(device
->name
, name
);
957 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
958 fs_devices
->missing_devices
--;
959 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
964 * Unmount does not free the btrfs_device struct but would zero
965 * generation along with most of the other members. So just update
966 * it back. We need it to pick the disk with largest generation
969 if (!fs_devices
->opened
) {
970 device
->generation
= found_transid
;
971 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
972 fs_devices
->latest_generation
);
975 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
977 mutex_unlock(&fs_devices
->device_list_mutex
);
981 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
983 struct btrfs_fs_devices
*fs_devices
;
984 struct btrfs_device
*device
;
985 struct btrfs_device
*orig_dev
;
988 fs_devices
= alloc_fs_devices(orig
->fsid
, NULL
);
989 if (IS_ERR(fs_devices
))
992 mutex_lock(&orig
->device_list_mutex
);
993 fs_devices
->total_devices
= orig
->total_devices
;
995 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
996 struct rcu_string
*name
;
998 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
1000 if (IS_ERR(device
)) {
1001 ret
= PTR_ERR(device
);
1006 * This is ok to do without rcu read locked because we hold the
1007 * uuid mutex so nothing we touch in here is going to disappear.
1009 if (orig_dev
->name
) {
1010 name
= rcu_string_strdup(orig_dev
->name
->str
,
1013 btrfs_free_device(device
);
1017 rcu_assign_pointer(device
->name
, name
);
1020 list_add(&device
->dev_list
, &fs_devices
->devices
);
1021 device
->fs_devices
= fs_devices
;
1022 fs_devices
->num_devices
++;
1024 mutex_unlock(&orig
->device_list_mutex
);
1027 mutex_unlock(&orig
->device_list_mutex
);
1028 free_fs_devices(fs_devices
);
1029 return ERR_PTR(ret
);
1033 * After we have read the system tree and know devids belonging to
1034 * this filesystem, remove the device which does not belong there.
1036 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
, int step
)
1038 struct btrfs_device
*device
, *next
;
1039 struct btrfs_device
*latest_dev
= NULL
;
1041 mutex_lock(&uuid_mutex
);
1043 /* This is the initialized path, it is safe to release the devices. */
1044 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1045 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
1046 &device
->dev_state
)) {
1047 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1048 &device
->dev_state
) &&
1050 device
->generation
> latest_dev
->generation
)) {
1051 latest_dev
= device
;
1056 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
) {
1058 * In the first step, keep the device which has
1059 * the correct fsid and the devid that is used
1060 * for the dev_replace procedure.
1061 * In the second step, the dev_replace state is
1062 * read from the device tree and it is known
1063 * whether the procedure is really active or
1064 * not, which means whether this device is
1065 * used or whether it should be removed.
1067 if (step
== 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1068 &device
->dev_state
)) {
1073 blkdev_put(device
->bdev
, device
->mode
);
1074 device
->bdev
= NULL
;
1075 fs_devices
->open_devices
--;
1077 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1078 list_del_init(&device
->dev_alloc_list
);
1079 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1080 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1081 &device
->dev_state
))
1082 fs_devices
->rw_devices
--;
1084 list_del_init(&device
->dev_list
);
1085 fs_devices
->num_devices
--;
1086 btrfs_free_device(device
);
1089 if (fs_devices
->seed
) {
1090 fs_devices
= fs_devices
->seed
;
1094 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1096 mutex_unlock(&uuid_mutex
);
1099 static void btrfs_close_bdev(struct btrfs_device
*device
)
1104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1105 sync_blockdev(device
->bdev
);
1106 invalidate_bdev(device
->bdev
);
1109 blkdev_put(device
->bdev
, device
->mode
);
1112 static void btrfs_close_one_device(struct btrfs_device
*device
)
1114 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1116 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1117 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1118 list_del_init(&device
->dev_alloc_list
);
1119 fs_devices
->rw_devices
--;
1122 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
1123 fs_devices
->missing_devices
--;
1125 btrfs_close_bdev(device
);
1127 fs_devices
->open_devices
--;
1128 device
->bdev
= NULL
;
1130 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1132 device
->fs_info
= NULL
;
1133 atomic_set(&device
->dev_stats_ccnt
, 0);
1134 extent_io_tree_release(&device
->alloc_state
);
1136 /* Verify the device is back in a pristine state */
1137 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT
, &device
->dev_state
));
1138 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1139 ASSERT(list_empty(&device
->dev_alloc_list
));
1140 ASSERT(list_empty(&device
->post_commit_list
));
1141 ASSERT(atomic_read(&device
->reada_in_flight
) == 0);
1144 static int close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1146 struct btrfs_device
*device
, *tmp
;
1148 if (--fs_devices
->opened
> 0)
1151 mutex_lock(&fs_devices
->device_list_mutex
);
1152 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
) {
1153 btrfs_close_one_device(device
);
1155 mutex_unlock(&fs_devices
->device_list_mutex
);
1157 WARN_ON(fs_devices
->open_devices
);
1158 WARN_ON(fs_devices
->rw_devices
);
1159 fs_devices
->opened
= 0;
1160 fs_devices
->seeding
= false;
1165 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1167 struct btrfs_fs_devices
*seed_devices
= NULL
;
1170 mutex_lock(&uuid_mutex
);
1171 ret
= close_fs_devices(fs_devices
);
1172 if (!fs_devices
->opened
) {
1173 seed_devices
= fs_devices
->seed
;
1174 fs_devices
->seed
= NULL
;
1176 mutex_unlock(&uuid_mutex
);
1178 while (seed_devices
) {
1179 fs_devices
= seed_devices
;
1180 seed_devices
= fs_devices
->seed
;
1181 close_fs_devices(fs_devices
);
1182 free_fs_devices(fs_devices
);
1187 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1188 fmode_t flags
, void *holder
)
1190 struct btrfs_device
*device
;
1191 struct btrfs_device
*latest_dev
= NULL
;
1194 flags
|= FMODE_EXCL
;
1196 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1197 /* Just open everything we can; ignore failures here */
1198 if (btrfs_open_one_device(fs_devices
, device
, flags
, holder
))
1202 device
->generation
> latest_dev
->generation
)
1203 latest_dev
= device
;
1205 if (fs_devices
->open_devices
== 0) {
1209 fs_devices
->opened
= 1;
1210 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1211 fs_devices
->total_rw_bytes
= 0;
1216 static int devid_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1218 struct btrfs_device
*dev1
, *dev2
;
1220 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1221 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1223 if (dev1
->devid
< dev2
->devid
)
1225 else if (dev1
->devid
> dev2
->devid
)
1230 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1231 fmode_t flags
, void *holder
)
1235 lockdep_assert_held(&uuid_mutex
);
1237 mutex_lock(&fs_devices
->device_list_mutex
);
1238 if (fs_devices
->opened
) {
1239 fs_devices
->opened
++;
1242 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1243 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1245 mutex_unlock(&fs_devices
->device_list_mutex
);
1250 static void btrfs_release_disk_super(struct page
*page
)
1256 static int btrfs_read_disk_super(struct block_device
*bdev
, u64 bytenr
,
1258 struct btrfs_super_block
**disk_super
)
1263 /* make sure our super fits in the device */
1264 if (bytenr
+ PAGE_SIZE
>= i_size_read(bdev
->bd_inode
))
1267 /* make sure our super fits in the page */
1268 if (sizeof(**disk_super
) > PAGE_SIZE
)
1271 /* make sure our super doesn't straddle pages on disk */
1272 index
= bytenr
>> PAGE_SHIFT
;
1273 if ((bytenr
+ sizeof(**disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1276 /* pull in the page with our super */
1277 *page
= read_cache_page_gfp(bdev
->bd_inode
->i_mapping
,
1280 if (IS_ERR_OR_NULL(*page
))
1285 /* align our pointer to the offset of the super block */
1286 *disk_super
= p
+ offset_in_page(bytenr
);
1288 if (btrfs_super_bytenr(*disk_super
) != bytenr
||
1289 btrfs_super_magic(*disk_super
) != BTRFS_MAGIC
) {
1290 btrfs_release_disk_super(*page
);
1294 if ((*disk_super
)->label
[0] &&
1295 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1])
1296 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1] = '\0';
1301 int btrfs_forget_devices(const char *path
)
1305 mutex_lock(&uuid_mutex
);
1306 ret
= btrfs_free_stale_devices(strlen(path
) ? path
: NULL
, NULL
);
1307 mutex_unlock(&uuid_mutex
);
1313 * Look for a btrfs signature on a device. This may be called out of the mount path
1314 * and we are not allowed to call set_blocksize during the scan. The superblock
1315 * is read via pagecache
1317 struct btrfs_device
*btrfs_scan_one_device(const char *path
, fmode_t flags
,
1320 struct btrfs_super_block
*disk_super
;
1321 bool new_device_added
= false;
1322 struct btrfs_device
*device
= NULL
;
1323 struct block_device
*bdev
;
1327 lockdep_assert_held(&uuid_mutex
);
1330 * we would like to check all the supers, but that would make
1331 * a btrfs mount succeed after a mkfs from a different FS.
1332 * So, we need to add a special mount option to scan for
1333 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1335 bytenr
= btrfs_sb_offset(0);
1336 flags
|= FMODE_EXCL
;
1338 bdev
= blkdev_get_by_path(path
, flags
, holder
);
1340 return ERR_CAST(bdev
);
1342 if (btrfs_read_disk_super(bdev
, bytenr
, &page
, &disk_super
)) {
1343 device
= ERR_PTR(-EINVAL
);
1344 goto error_bdev_put
;
1347 device
= device_list_add(path
, disk_super
, &new_device_added
);
1348 if (!IS_ERR(device
)) {
1349 if (new_device_added
)
1350 btrfs_free_stale_devices(path
, device
);
1353 btrfs_release_disk_super(page
);
1356 blkdev_put(bdev
, flags
);
1362 * Try to find a chunk that intersects [start, start + len] range and when one
1363 * such is found, record the end of it in *start
1365 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1368 u64 physical_start
, physical_end
;
1370 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1372 if (!find_first_extent_bit(&device
->alloc_state
, *start
,
1373 &physical_start
, &physical_end
,
1374 CHUNK_ALLOCATED
, NULL
)) {
1376 if (in_range(physical_start
, *start
, len
) ||
1377 in_range(*start
, physical_start
,
1378 physical_end
- physical_start
)) {
1379 *start
= physical_end
+ 1;
1388 * find_free_dev_extent_start - find free space in the specified device
1389 * @device: the device which we search the free space in
1390 * @num_bytes: the size of the free space that we need
1391 * @search_start: the position from which to begin the search
1392 * @start: store the start of the free space.
1393 * @len: the size of the free space. that we find, or the size
1394 * of the max free space if we don't find suitable free space
1396 * this uses a pretty simple search, the expectation is that it is
1397 * called very infrequently and that a given device has a small number
1400 * @start is used to store the start of the free space if we find. But if we
1401 * don't find suitable free space, it will be used to store the start position
1402 * of the max free space.
1404 * @len is used to store the size of the free space that we find.
1405 * But if we don't find suitable free space, it is used to store the size of
1406 * the max free space.
1408 * NOTE: This function will search *commit* root of device tree, and does extra
1409 * check to ensure dev extents are not double allocated.
1410 * This makes the function safe to allocate dev extents but may not report
1411 * correct usable device space, as device extent freed in current transaction
1412 * is not reported as avaiable.
1414 static int find_free_dev_extent_start(struct btrfs_device
*device
,
1415 u64 num_bytes
, u64 search_start
, u64
*start
,
1418 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1419 struct btrfs_root
*root
= fs_info
->dev_root
;
1420 struct btrfs_key key
;
1421 struct btrfs_dev_extent
*dev_extent
;
1422 struct btrfs_path
*path
;
1427 u64 search_end
= device
->total_bytes
;
1430 struct extent_buffer
*l
;
1433 * We don't want to overwrite the superblock on the drive nor any area
1434 * used by the boot loader (grub for example), so we make sure to start
1435 * at an offset of at least 1MB.
1437 search_start
= max_t(u64
, search_start
, SZ_1M
);
1439 path
= btrfs_alloc_path();
1443 max_hole_start
= search_start
;
1447 if (search_start
>= search_end
||
1448 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1453 path
->reada
= READA_FORWARD
;
1454 path
->search_commit_root
= 1;
1455 path
->skip_locking
= 1;
1457 key
.objectid
= device
->devid
;
1458 key
.offset
= search_start
;
1459 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1461 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1465 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
1472 slot
= path
->slots
[0];
1473 if (slot
>= btrfs_header_nritems(l
)) {
1474 ret
= btrfs_next_leaf(root
, path
);
1482 btrfs_item_key_to_cpu(l
, &key
, slot
);
1484 if (key
.objectid
< device
->devid
)
1487 if (key
.objectid
> device
->devid
)
1490 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1493 if (key
.offset
> search_start
) {
1494 hole_size
= key
.offset
- search_start
;
1497 * Have to check before we set max_hole_start, otherwise
1498 * we could end up sending back this offset anyway.
1500 if (contains_pending_extent(device
, &search_start
,
1502 if (key
.offset
>= search_start
)
1503 hole_size
= key
.offset
- search_start
;
1508 if (hole_size
> max_hole_size
) {
1509 max_hole_start
= search_start
;
1510 max_hole_size
= hole_size
;
1514 * If this free space is greater than which we need,
1515 * it must be the max free space that we have found
1516 * until now, so max_hole_start must point to the start
1517 * of this free space and the length of this free space
1518 * is stored in max_hole_size. Thus, we return
1519 * max_hole_start and max_hole_size and go back to the
1522 if (hole_size
>= num_bytes
) {
1528 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1529 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1531 if (extent_end
> search_start
)
1532 search_start
= extent_end
;
1539 * At this point, search_start should be the end of
1540 * allocated dev extents, and when shrinking the device,
1541 * search_end may be smaller than search_start.
1543 if (search_end
> search_start
) {
1544 hole_size
= search_end
- search_start
;
1546 if (contains_pending_extent(device
, &search_start
, hole_size
)) {
1547 btrfs_release_path(path
);
1551 if (hole_size
> max_hole_size
) {
1552 max_hole_start
= search_start
;
1553 max_hole_size
= hole_size
;
1558 if (max_hole_size
< num_bytes
)
1564 btrfs_free_path(path
);
1565 *start
= max_hole_start
;
1567 *len
= max_hole_size
;
1571 int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1572 u64
*start
, u64
*len
)
1574 /* FIXME use last free of some kind */
1575 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, len
);
1578 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1579 struct btrfs_device
*device
,
1580 u64 start
, u64
*dev_extent_len
)
1582 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1583 struct btrfs_root
*root
= fs_info
->dev_root
;
1585 struct btrfs_path
*path
;
1586 struct btrfs_key key
;
1587 struct btrfs_key found_key
;
1588 struct extent_buffer
*leaf
= NULL
;
1589 struct btrfs_dev_extent
*extent
= NULL
;
1591 path
= btrfs_alloc_path();
1595 key
.objectid
= device
->devid
;
1597 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1599 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1601 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1602 BTRFS_DEV_EXTENT_KEY
);
1605 leaf
= path
->nodes
[0];
1606 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1607 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1608 struct btrfs_dev_extent
);
1609 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1610 btrfs_dev_extent_length(leaf
, extent
) < start
);
1612 btrfs_release_path(path
);
1614 } else if (ret
== 0) {
1615 leaf
= path
->nodes
[0];
1616 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1617 struct btrfs_dev_extent
);
1619 btrfs_handle_fs_error(fs_info
, ret
, "Slot search failed");
1623 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1625 ret
= btrfs_del_item(trans
, root
, path
);
1627 btrfs_handle_fs_error(fs_info
, ret
,
1628 "Failed to remove dev extent item");
1630 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1633 btrfs_free_path(path
);
1637 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1638 struct btrfs_device
*device
,
1639 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1642 struct btrfs_path
*path
;
1643 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1644 struct btrfs_root
*root
= fs_info
->dev_root
;
1645 struct btrfs_dev_extent
*extent
;
1646 struct extent_buffer
*leaf
;
1647 struct btrfs_key key
;
1649 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
));
1650 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1651 path
= btrfs_alloc_path();
1655 key
.objectid
= device
->devid
;
1657 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1658 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1663 leaf
= path
->nodes
[0];
1664 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1665 struct btrfs_dev_extent
);
1666 btrfs_set_dev_extent_chunk_tree(leaf
, extent
,
1667 BTRFS_CHUNK_TREE_OBJECTID
);
1668 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
1669 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
1670 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1672 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1673 btrfs_mark_buffer_dirty(leaf
);
1675 btrfs_free_path(path
);
1679 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1681 struct extent_map_tree
*em_tree
;
1682 struct extent_map
*em
;
1686 em_tree
= &fs_info
->mapping_tree
;
1687 read_lock(&em_tree
->lock
);
1688 n
= rb_last(&em_tree
->map
.rb_root
);
1690 em
= rb_entry(n
, struct extent_map
, rb_node
);
1691 ret
= em
->start
+ em
->len
;
1693 read_unlock(&em_tree
->lock
);
1698 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1702 struct btrfs_key key
;
1703 struct btrfs_key found_key
;
1704 struct btrfs_path
*path
;
1706 path
= btrfs_alloc_path();
1710 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1711 key
.type
= BTRFS_DEV_ITEM_KEY
;
1712 key
.offset
= (u64
)-1;
1714 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1720 btrfs_err(fs_info
, "corrupted chunk tree devid -1 matched");
1725 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1726 BTRFS_DEV_ITEMS_OBJECTID
,
1727 BTRFS_DEV_ITEM_KEY
);
1731 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1733 *devid_ret
= found_key
.offset
+ 1;
1737 btrfs_free_path(path
);
1742 * the device information is stored in the chunk root
1743 * the btrfs_device struct should be fully filled in
1745 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
1746 struct btrfs_device
*device
)
1749 struct btrfs_path
*path
;
1750 struct btrfs_dev_item
*dev_item
;
1751 struct extent_buffer
*leaf
;
1752 struct btrfs_key key
;
1755 path
= btrfs_alloc_path();
1759 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1760 key
.type
= BTRFS_DEV_ITEM_KEY
;
1761 key
.offset
= device
->devid
;
1763 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
1764 &key
, sizeof(*dev_item
));
1768 leaf
= path
->nodes
[0];
1769 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1771 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1772 btrfs_set_device_generation(leaf
, dev_item
, 0);
1773 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1774 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1775 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1776 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1777 btrfs_set_device_total_bytes(leaf
, dev_item
,
1778 btrfs_device_get_disk_total_bytes(device
));
1779 btrfs_set_device_bytes_used(leaf
, dev_item
,
1780 btrfs_device_get_bytes_used(device
));
1781 btrfs_set_device_group(leaf
, dev_item
, 0);
1782 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1783 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1784 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1786 ptr
= btrfs_device_uuid(dev_item
);
1787 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1788 ptr
= btrfs_device_fsid(dev_item
);
1789 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
1790 ptr
, BTRFS_FSID_SIZE
);
1791 btrfs_mark_buffer_dirty(leaf
);
1795 btrfs_free_path(path
);
1800 * Function to update ctime/mtime for a given device path.
1801 * Mainly used for ctime/mtime based probe like libblkid.
1803 static void update_dev_time(const char *path_name
)
1807 filp
= filp_open(path_name
, O_RDWR
, 0);
1810 file_update_time(filp
);
1811 filp_close(filp
, NULL
);
1814 static int btrfs_rm_dev_item(struct btrfs_device
*device
)
1816 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
1818 struct btrfs_path
*path
;
1819 struct btrfs_key key
;
1820 struct btrfs_trans_handle
*trans
;
1822 path
= btrfs_alloc_path();
1826 trans
= btrfs_start_transaction(root
, 0);
1827 if (IS_ERR(trans
)) {
1828 btrfs_free_path(path
);
1829 return PTR_ERR(trans
);
1831 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1832 key
.type
= BTRFS_DEV_ITEM_KEY
;
1833 key
.offset
= device
->devid
;
1835 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1839 btrfs_abort_transaction(trans
, ret
);
1840 btrfs_end_transaction(trans
);
1844 ret
= btrfs_del_item(trans
, root
, path
);
1846 btrfs_abort_transaction(trans
, ret
);
1847 btrfs_end_transaction(trans
);
1851 btrfs_free_path(path
);
1853 ret
= btrfs_commit_transaction(trans
);
1858 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1859 * filesystem. It's up to the caller to adjust that number regarding eg. device
1862 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
1870 seq
= read_seqbegin(&fs_info
->profiles_lock
);
1872 all_avail
= fs_info
->avail_data_alloc_bits
|
1873 fs_info
->avail_system_alloc_bits
|
1874 fs_info
->avail_metadata_alloc_bits
;
1875 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
1877 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
1878 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
1881 if (num_devices
< btrfs_raid_array
[i
].devs_min
) {
1882 int ret
= btrfs_raid_array
[i
].mindev_error
;
1892 static struct btrfs_device
* btrfs_find_next_active_device(
1893 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
1895 struct btrfs_device
*next_device
;
1897 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
1898 if (next_device
!= device
&&
1899 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
1900 && next_device
->bdev
)
1908 * Helper function to check if the given device is part of s_bdev / latest_bdev
1909 * and replace it with the provided or the next active device, in the context
1910 * where this function called, there should be always be another device (or
1911 * this_dev) which is active.
1913 void __cold
btrfs_assign_next_active_device(struct btrfs_device
*device
,
1914 struct btrfs_device
*this_dev
)
1916 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1917 struct btrfs_device
*next_device
;
1920 next_device
= this_dev
;
1922 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
1924 ASSERT(next_device
);
1926 if (fs_info
->sb
->s_bdev
&&
1927 (fs_info
->sb
->s_bdev
== device
->bdev
))
1928 fs_info
->sb
->s_bdev
= next_device
->bdev
;
1930 if (fs_info
->fs_devices
->latest_bdev
== device
->bdev
)
1931 fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1935 * Return btrfs_fs_devices::num_devices excluding the device that's being
1936 * currently replaced.
1938 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
1940 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
1942 down_read(&fs_info
->dev_replace
.rwsem
);
1943 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
1944 ASSERT(num_devices
> 1);
1947 up_read(&fs_info
->dev_replace
.rwsem
);
1952 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
, const char *device_path
,
1955 struct btrfs_device
*device
;
1956 struct btrfs_fs_devices
*cur_devices
;
1957 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
1961 mutex_lock(&uuid_mutex
);
1963 num_devices
= btrfs_num_devices(fs_info
);
1965 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
1969 device
= btrfs_find_device_by_devspec(fs_info
, devid
, device_path
);
1971 if (IS_ERR(device
)) {
1972 if (PTR_ERR(device
) == -ENOENT
&&
1973 strcmp(device_path
, "missing") == 0)
1974 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
1976 ret
= PTR_ERR(device
);
1980 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
1981 btrfs_warn_in_rcu(fs_info
,
1982 "cannot remove device %s (devid %llu) due to active swapfile",
1983 rcu_str_deref(device
->name
), device
->devid
);
1988 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1989 ret
= BTRFS_ERROR_DEV_TGT_REPLACE
;
1993 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1994 fs_info
->fs_devices
->rw_devices
== 1) {
1995 ret
= BTRFS_ERROR_DEV_ONLY_WRITABLE
;
1999 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2000 mutex_lock(&fs_info
->chunk_mutex
);
2001 list_del_init(&device
->dev_alloc_list
);
2002 device
->fs_devices
->rw_devices
--;
2003 mutex_unlock(&fs_info
->chunk_mutex
);
2006 mutex_unlock(&uuid_mutex
);
2007 ret
= btrfs_shrink_device(device
, 0);
2008 mutex_lock(&uuid_mutex
);
2013 * TODO: the superblock still includes this device in its num_devices
2014 * counter although write_all_supers() is not locked out. This
2015 * could give a filesystem state which requires a degraded mount.
2017 ret
= btrfs_rm_dev_item(device
);
2021 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2022 btrfs_scrub_cancel_dev(device
);
2025 * the device list mutex makes sure that we don't change
2026 * the device list while someone else is writing out all
2027 * the device supers. Whoever is writing all supers, should
2028 * lock the device list mutex before getting the number of
2029 * devices in the super block (super_copy). Conversely,
2030 * whoever updates the number of devices in the super block
2031 * (super_copy) should hold the device list mutex.
2035 * In normal cases the cur_devices == fs_devices. But in case
2036 * of deleting a seed device, the cur_devices should point to
2037 * its own fs_devices listed under the fs_devices->seed.
2039 cur_devices
= device
->fs_devices
;
2040 mutex_lock(&fs_devices
->device_list_mutex
);
2041 list_del_rcu(&device
->dev_list
);
2043 cur_devices
->num_devices
--;
2044 cur_devices
->total_devices
--;
2045 /* Update total_devices of the parent fs_devices if it's seed */
2046 if (cur_devices
!= fs_devices
)
2047 fs_devices
->total_devices
--;
2049 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2050 cur_devices
->missing_devices
--;
2052 btrfs_assign_next_active_device(device
, NULL
);
2055 cur_devices
->open_devices
--;
2056 /* remove sysfs entry */
2057 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2060 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2061 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2062 mutex_unlock(&fs_devices
->device_list_mutex
);
2065 * at this point, the device is zero sized and detached from
2066 * the devices list. All that's left is to zero out the old
2067 * supers and free the device.
2069 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2070 btrfs_scratch_superblocks(device
->bdev
, device
->name
->str
);
2072 btrfs_close_bdev(device
);
2074 btrfs_free_device(device
);
2076 if (cur_devices
->open_devices
== 0) {
2077 while (fs_devices
) {
2078 if (fs_devices
->seed
== cur_devices
) {
2079 fs_devices
->seed
= cur_devices
->seed
;
2082 fs_devices
= fs_devices
->seed
;
2084 cur_devices
->seed
= NULL
;
2085 close_fs_devices(cur_devices
);
2086 free_fs_devices(cur_devices
);
2090 mutex_unlock(&uuid_mutex
);
2094 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2095 mutex_lock(&fs_info
->chunk_mutex
);
2096 list_add(&device
->dev_alloc_list
,
2097 &fs_devices
->alloc_list
);
2098 device
->fs_devices
->rw_devices
++;
2099 mutex_unlock(&fs_info
->chunk_mutex
);
2104 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2106 struct btrfs_fs_devices
*fs_devices
;
2108 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2111 * in case of fs with no seed, srcdev->fs_devices will point
2112 * to fs_devices of fs_info. However when the dev being replaced is
2113 * a seed dev it will point to the seed's local fs_devices. In short
2114 * srcdev will have its correct fs_devices in both the cases.
2116 fs_devices
= srcdev
->fs_devices
;
2118 list_del_rcu(&srcdev
->dev_list
);
2119 list_del(&srcdev
->dev_alloc_list
);
2120 fs_devices
->num_devices
--;
2121 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2122 fs_devices
->missing_devices
--;
2124 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2125 fs_devices
->rw_devices
--;
2128 fs_devices
->open_devices
--;
2131 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2133 struct btrfs_fs_info
*fs_info
= srcdev
->fs_info
;
2134 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2136 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
)) {
2137 /* zero out the old super if it is writable */
2138 btrfs_scratch_superblocks(srcdev
->bdev
, srcdev
->name
->str
);
2141 btrfs_close_bdev(srcdev
);
2143 btrfs_free_device(srcdev
);
2145 /* if this is no devs we rather delete the fs_devices */
2146 if (!fs_devices
->num_devices
) {
2147 struct btrfs_fs_devices
*tmp_fs_devices
;
2150 * On a mounted FS, num_devices can't be zero unless it's a
2151 * seed. In case of a seed device being replaced, the replace
2152 * target added to the sprout FS, so there will be no more
2153 * device left under the seed FS.
2155 ASSERT(fs_devices
->seeding
);
2157 tmp_fs_devices
= fs_info
->fs_devices
;
2158 while (tmp_fs_devices
) {
2159 if (tmp_fs_devices
->seed
== fs_devices
) {
2160 tmp_fs_devices
->seed
= fs_devices
->seed
;
2163 tmp_fs_devices
= tmp_fs_devices
->seed
;
2165 fs_devices
->seed
= NULL
;
2166 close_fs_devices(fs_devices
);
2167 free_fs_devices(fs_devices
);
2171 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2173 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2175 mutex_lock(&fs_devices
->device_list_mutex
);
2177 btrfs_sysfs_rm_device_link(fs_devices
, tgtdev
);
2180 fs_devices
->open_devices
--;
2182 fs_devices
->num_devices
--;
2184 btrfs_assign_next_active_device(tgtdev
, NULL
);
2186 list_del_rcu(&tgtdev
->dev_list
);
2188 mutex_unlock(&fs_devices
->device_list_mutex
);
2191 * The update_dev_time() with in btrfs_scratch_superblocks()
2192 * may lead to a call to btrfs_show_devname() which will try
2193 * to hold device_list_mutex. And here this device
2194 * is already out of device list, so we don't have to hold
2195 * the device_list_mutex lock.
2197 btrfs_scratch_superblocks(tgtdev
->bdev
, tgtdev
->name
->str
);
2199 btrfs_close_bdev(tgtdev
);
2201 btrfs_free_device(tgtdev
);
2204 static struct btrfs_device
*btrfs_find_device_by_path(
2205 struct btrfs_fs_info
*fs_info
, const char *device_path
)
2208 struct btrfs_super_block
*disk_super
;
2211 struct block_device
*bdev
;
2212 struct buffer_head
*bh
;
2213 struct btrfs_device
*device
;
2215 ret
= btrfs_get_bdev_and_sb(device_path
, FMODE_READ
,
2216 fs_info
->bdev_holder
, 0, &bdev
, &bh
);
2218 return ERR_PTR(ret
);
2219 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
2220 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2221 dev_uuid
= disk_super
->dev_item
.uuid
;
2222 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2223 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2224 disk_super
->metadata_uuid
, true);
2226 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2227 disk_super
->fsid
, true);
2231 device
= ERR_PTR(-ENOENT
);
2232 blkdev_put(bdev
, FMODE_READ
);
2237 * Lookup a device given by device id, or the path if the id is 0.
2239 struct btrfs_device
*btrfs_find_device_by_devspec(
2240 struct btrfs_fs_info
*fs_info
, u64 devid
,
2241 const char *device_path
)
2243 struct btrfs_device
*device
;
2246 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
,
2249 return ERR_PTR(-ENOENT
);
2253 if (!device_path
|| !device_path
[0])
2254 return ERR_PTR(-EINVAL
);
2256 if (strcmp(device_path
, "missing") == 0) {
2257 /* Find first missing device */
2258 list_for_each_entry(device
, &fs_info
->fs_devices
->devices
,
2260 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
2261 &device
->dev_state
) && !device
->bdev
)
2264 return ERR_PTR(-ENOENT
);
2267 return btrfs_find_device_by_path(fs_info
, device_path
);
2271 * does all the dirty work required for changing file system's UUID.
2273 static int btrfs_prepare_sprout(struct btrfs_fs_info
*fs_info
)
2275 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2276 struct btrfs_fs_devices
*old_devices
;
2277 struct btrfs_fs_devices
*seed_devices
;
2278 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2279 struct btrfs_device
*device
;
2282 lockdep_assert_held(&uuid_mutex
);
2283 if (!fs_devices
->seeding
)
2286 seed_devices
= alloc_fs_devices(NULL
, NULL
);
2287 if (IS_ERR(seed_devices
))
2288 return PTR_ERR(seed_devices
);
2290 old_devices
= clone_fs_devices(fs_devices
);
2291 if (IS_ERR(old_devices
)) {
2292 kfree(seed_devices
);
2293 return PTR_ERR(old_devices
);
2296 list_add(&old_devices
->fs_list
, &fs_uuids
);
2298 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2299 seed_devices
->opened
= 1;
2300 INIT_LIST_HEAD(&seed_devices
->devices
);
2301 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2302 mutex_init(&seed_devices
->device_list_mutex
);
2304 mutex_lock(&fs_devices
->device_list_mutex
);
2305 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2307 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2308 device
->fs_devices
= seed_devices
;
2310 mutex_lock(&fs_info
->chunk_mutex
);
2311 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
2312 mutex_unlock(&fs_info
->chunk_mutex
);
2314 fs_devices
->seeding
= false;
2315 fs_devices
->num_devices
= 0;
2316 fs_devices
->open_devices
= 0;
2317 fs_devices
->missing_devices
= 0;
2318 fs_devices
->rotating
= false;
2319 fs_devices
->seed
= seed_devices
;
2321 generate_random_uuid(fs_devices
->fsid
);
2322 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2323 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2324 mutex_unlock(&fs_devices
->device_list_mutex
);
2326 super_flags
= btrfs_super_flags(disk_super
) &
2327 ~BTRFS_SUPER_FLAG_SEEDING
;
2328 btrfs_set_super_flags(disk_super
, super_flags
);
2334 * Store the expected generation for seed devices in device items.
2336 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2338 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2339 struct btrfs_root
*root
= fs_info
->chunk_root
;
2340 struct btrfs_path
*path
;
2341 struct extent_buffer
*leaf
;
2342 struct btrfs_dev_item
*dev_item
;
2343 struct btrfs_device
*device
;
2344 struct btrfs_key key
;
2345 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2346 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2350 path
= btrfs_alloc_path();
2354 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2356 key
.type
= BTRFS_DEV_ITEM_KEY
;
2359 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2363 leaf
= path
->nodes
[0];
2365 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2366 ret
= btrfs_next_leaf(root
, path
);
2371 leaf
= path
->nodes
[0];
2372 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2373 btrfs_release_path(path
);
2377 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2378 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2379 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2382 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2383 struct btrfs_dev_item
);
2384 devid
= btrfs_device_id(leaf
, dev_item
);
2385 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2387 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2389 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2391 BUG_ON(!device
); /* Logic error */
2393 if (device
->fs_devices
->seeding
) {
2394 btrfs_set_device_generation(leaf
, dev_item
,
2395 device
->generation
);
2396 btrfs_mark_buffer_dirty(leaf
);
2404 btrfs_free_path(path
);
2408 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2410 struct btrfs_root
*root
= fs_info
->dev_root
;
2411 struct request_queue
*q
;
2412 struct btrfs_trans_handle
*trans
;
2413 struct btrfs_device
*device
;
2414 struct block_device
*bdev
;
2415 struct super_block
*sb
= fs_info
->sb
;
2416 struct rcu_string
*name
;
2417 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2418 u64 orig_super_total_bytes
;
2419 u64 orig_super_num_devices
;
2420 int seeding_dev
= 0;
2422 bool unlocked
= false;
2424 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2427 bdev
= blkdev_get_by_path(device_path
, FMODE_WRITE
| FMODE_EXCL
,
2428 fs_info
->bdev_holder
);
2430 return PTR_ERR(bdev
);
2432 if (fs_devices
->seeding
) {
2434 down_write(&sb
->s_umount
);
2435 mutex_lock(&uuid_mutex
);
2438 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
2440 mutex_lock(&fs_devices
->device_list_mutex
);
2441 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
2442 if (device
->bdev
== bdev
) {
2445 &fs_devices
->device_list_mutex
);
2449 mutex_unlock(&fs_devices
->device_list_mutex
);
2451 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
);
2452 if (IS_ERR(device
)) {
2453 /* we can safely leave the fs_devices entry around */
2454 ret
= PTR_ERR(device
);
2458 name
= rcu_string_strdup(device_path
, GFP_KERNEL
);
2461 goto error_free_device
;
2463 rcu_assign_pointer(device
->name
, name
);
2465 trans
= btrfs_start_transaction(root
, 0);
2466 if (IS_ERR(trans
)) {
2467 ret
= PTR_ERR(trans
);
2468 goto error_free_device
;
2471 q
= bdev_get_queue(bdev
);
2472 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2473 device
->generation
= trans
->transid
;
2474 device
->io_width
= fs_info
->sectorsize
;
2475 device
->io_align
= fs_info
->sectorsize
;
2476 device
->sector_size
= fs_info
->sectorsize
;
2477 device
->total_bytes
= round_down(i_size_read(bdev
->bd_inode
),
2478 fs_info
->sectorsize
);
2479 device
->disk_total_bytes
= device
->total_bytes
;
2480 device
->commit_total_bytes
= device
->total_bytes
;
2481 device
->fs_info
= fs_info
;
2482 device
->bdev
= bdev
;
2483 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2484 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2485 device
->mode
= FMODE_EXCL
;
2486 device
->dev_stats_valid
= 1;
2487 set_blocksize(device
->bdev
, BTRFS_BDEV_BLOCKSIZE
);
2490 sb
->s_flags
&= ~SB_RDONLY
;
2491 ret
= btrfs_prepare_sprout(fs_info
);
2493 btrfs_abort_transaction(trans
, ret
);
2498 device
->fs_devices
= fs_devices
;
2500 mutex_lock(&fs_devices
->device_list_mutex
);
2501 mutex_lock(&fs_info
->chunk_mutex
);
2502 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2503 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2504 fs_devices
->num_devices
++;
2505 fs_devices
->open_devices
++;
2506 fs_devices
->rw_devices
++;
2507 fs_devices
->total_devices
++;
2508 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2510 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2512 if (!blk_queue_nonrot(q
))
2513 fs_devices
->rotating
= true;
2515 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2516 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2517 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2518 fs_info
->sectorsize
));
2520 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2521 btrfs_set_super_num_devices(fs_info
->super_copy
,
2522 orig_super_num_devices
+ 1);
2524 /* add sysfs device entry */
2525 btrfs_sysfs_add_device_link(fs_devices
, device
);
2528 * we've got more storage, clear any full flags on the space
2531 btrfs_clear_space_info_full(fs_info
);
2533 mutex_unlock(&fs_info
->chunk_mutex
);
2534 mutex_unlock(&fs_devices
->device_list_mutex
);
2537 mutex_lock(&fs_info
->chunk_mutex
);
2538 ret
= init_first_rw_device(trans
);
2539 mutex_unlock(&fs_info
->chunk_mutex
);
2541 btrfs_abort_transaction(trans
, ret
);
2546 ret
= btrfs_add_dev_item(trans
, device
);
2548 btrfs_abort_transaction(trans
, ret
);
2553 ret
= btrfs_finish_sprout(trans
);
2555 btrfs_abort_transaction(trans
, ret
);
2559 btrfs_sysfs_update_sprout_fsid(fs_devices
,
2560 fs_info
->fs_devices
->fsid
);
2563 ret
= btrfs_commit_transaction(trans
);
2566 mutex_unlock(&uuid_mutex
);
2567 up_write(&sb
->s_umount
);
2570 if (ret
) /* transaction commit */
2573 ret
= btrfs_relocate_sys_chunks(fs_info
);
2575 btrfs_handle_fs_error(fs_info
, ret
,
2576 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2577 trans
= btrfs_attach_transaction(root
);
2578 if (IS_ERR(trans
)) {
2579 if (PTR_ERR(trans
) == -ENOENT
)
2581 ret
= PTR_ERR(trans
);
2585 ret
= btrfs_commit_transaction(trans
);
2588 /* Update ctime/mtime for libblkid */
2589 update_dev_time(device_path
);
2593 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2594 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2595 mutex_lock(&fs_info
->chunk_mutex
);
2596 list_del_rcu(&device
->dev_list
);
2597 list_del(&device
->dev_alloc_list
);
2598 fs_info
->fs_devices
->num_devices
--;
2599 fs_info
->fs_devices
->open_devices
--;
2600 fs_info
->fs_devices
->rw_devices
--;
2601 fs_info
->fs_devices
->total_devices
--;
2602 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2603 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2604 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2605 orig_super_total_bytes
);
2606 btrfs_set_super_num_devices(fs_info
->super_copy
,
2607 orig_super_num_devices
);
2608 mutex_unlock(&fs_info
->chunk_mutex
);
2609 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2612 sb
->s_flags
|= SB_RDONLY
;
2614 btrfs_end_transaction(trans
);
2616 btrfs_free_device(device
);
2618 blkdev_put(bdev
, FMODE_EXCL
);
2619 if (seeding_dev
&& !unlocked
) {
2620 mutex_unlock(&uuid_mutex
);
2621 up_write(&sb
->s_umount
);
2626 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
2627 struct btrfs_device
*device
)
2630 struct btrfs_path
*path
;
2631 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2632 struct btrfs_dev_item
*dev_item
;
2633 struct extent_buffer
*leaf
;
2634 struct btrfs_key key
;
2636 path
= btrfs_alloc_path();
2640 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2641 key
.type
= BTRFS_DEV_ITEM_KEY
;
2642 key
.offset
= device
->devid
;
2644 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2653 leaf
= path
->nodes
[0];
2654 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2656 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2657 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2658 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2659 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2660 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2661 btrfs_set_device_total_bytes(leaf
, dev_item
,
2662 btrfs_device_get_disk_total_bytes(device
));
2663 btrfs_set_device_bytes_used(leaf
, dev_item
,
2664 btrfs_device_get_bytes_used(device
));
2665 btrfs_mark_buffer_dirty(leaf
);
2668 btrfs_free_path(path
);
2672 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
2673 struct btrfs_device
*device
, u64 new_size
)
2675 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2676 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2680 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2683 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2685 mutex_lock(&fs_info
->chunk_mutex
);
2686 old_total
= btrfs_super_total_bytes(super_copy
);
2687 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
2689 if (new_size
<= device
->total_bytes
||
2690 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2691 mutex_unlock(&fs_info
->chunk_mutex
);
2695 btrfs_set_super_total_bytes(super_copy
,
2696 round_down(old_total
+ diff
, fs_info
->sectorsize
));
2697 device
->fs_devices
->total_rw_bytes
+= diff
;
2699 btrfs_device_set_total_bytes(device
, new_size
);
2700 btrfs_device_set_disk_total_bytes(device
, new_size
);
2701 btrfs_clear_space_info_full(device
->fs_info
);
2702 if (list_empty(&device
->post_commit_list
))
2703 list_add_tail(&device
->post_commit_list
,
2704 &trans
->transaction
->dev_update_list
);
2705 mutex_unlock(&fs_info
->chunk_mutex
);
2707 return btrfs_update_device(trans
, device
);
2710 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2712 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2713 struct btrfs_root
*root
= fs_info
->chunk_root
;
2715 struct btrfs_path
*path
;
2716 struct btrfs_key key
;
2718 path
= btrfs_alloc_path();
2722 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2723 key
.offset
= chunk_offset
;
2724 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2726 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2729 else if (ret
> 0) { /* Logic error or corruption */
2730 btrfs_handle_fs_error(fs_info
, -ENOENT
,
2731 "Failed lookup while freeing chunk.");
2736 ret
= btrfs_del_item(trans
, root
, path
);
2738 btrfs_handle_fs_error(fs_info
, ret
,
2739 "Failed to delete chunk item.");
2741 btrfs_free_path(path
);
2745 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
2747 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2748 struct btrfs_disk_key
*disk_key
;
2749 struct btrfs_chunk
*chunk
;
2756 struct btrfs_key key
;
2758 mutex_lock(&fs_info
->chunk_mutex
);
2759 array_size
= btrfs_super_sys_array_size(super_copy
);
2761 ptr
= super_copy
->sys_chunk_array
;
2764 while (cur
< array_size
) {
2765 disk_key
= (struct btrfs_disk_key
*)ptr
;
2766 btrfs_disk_key_to_cpu(&key
, disk_key
);
2768 len
= sizeof(*disk_key
);
2770 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2771 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
2772 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
2773 len
+= btrfs_chunk_item_size(num_stripes
);
2778 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
2779 key
.offset
== chunk_offset
) {
2780 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
2782 btrfs_set_super_sys_array_size(super_copy
, array_size
);
2788 mutex_unlock(&fs_info
->chunk_mutex
);
2793 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2794 * @logical: Logical block offset in bytes.
2795 * @length: Length of extent in bytes.
2797 * Return: Chunk mapping or ERR_PTR.
2799 struct extent_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
2800 u64 logical
, u64 length
)
2802 struct extent_map_tree
*em_tree
;
2803 struct extent_map
*em
;
2805 em_tree
= &fs_info
->mapping_tree
;
2806 read_lock(&em_tree
->lock
);
2807 em
= lookup_extent_mapping(em_tree
, logical
, length
);
2808 read_unlock(&em_tree
->lock
);
2811 btrfs_crit(fs_info
, "unable to find logical %llu length %llu",
2813 return ERR_PTR(-EINVAL
);
2816 if (em
->start
> logical
|| em
->start
+ em
->len
< logical
) {
2818 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2819 logical
, length
, em
->start
, em
->start
+ em
->len
);
2820 free_extent_map(em
);
2821 return ERR_PTR(-EINVAL
);
2824 /* callers are responsible for dropping em's ref. */
2828 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2830 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2831 struct extent_map
*em
;
2832 struct map_lookup
*map
;
2833 u64 dev_extent_len
= 0;
2835 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2837 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
2840 * This is a logic error, but we don't want to just rely on the
2841 * user having built with ASSERT enabled, so if ASSERT doesn't
2842 * do anything we still error out.
2847 map
= em
->map_lookup
;
2848 mutex_lock(&fs_info
->chunk_mutex
);
2849 check_system_chunk(trans
, map
->type
);
2850 mutex_unlock(&fs_info
->chunk_mutex
);
2853 * Take the device list mutex to prevent races with the final phase of
2854 * a device replace operation that replaces the device object associated
2855 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2857 mutex_lock(&fs_devices
->device_list_mutex
);
2858 for (i
= 0; i
< map
->num_stripes
; i
++) {
2859 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
2860 ret
= btrfs_free_dev_extent(trans
, device
,
2861 map
->stripes
[i
].physical
,
2864 mutex_unlock(&fs_devices
->device_list_mutex
);
2865 btrfs_abort_transaction(trans
, ret
);
2869 if (device
->bytes_used
> 0) {
2870 mutex_lock(&fs_info
->chunk_mutex
);
2871 btrfs_device_set_bytes_used(device
,
2872 device
->bytes_used
- dev_extent_len
);
2873 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
2874 btrfs_clear_space_info_full(fs_info
);
2875 mutex_unlock(&fs_info
->chunk_mutex
);
2878 ret
= btrfs_update_device(trans
, device
);
2880 mutex_unlock(&fs_devices
->device_list_mutex
);
2881 btrfs_abort_transaction(trans
, ret
);
2885 mutex_unlock(&fs_devices
->device_list_mutex
);
2887 ret
= btrfs_free_chunk(trans
, chunk_offset
);
2889 btrfs_abort_transaction(trans
, ret
);
2893 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, em
->len
);
2895 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2896 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
2898 btrfs_abort_transaction(trans
, ret
);
2903 ret
= btrfs_remove_block_group(trans
, chunk_offset
, em
);
2905 btrfs_abort_transaction(trans
, ret
);
2911 free_extent_map(em
);
2915 static int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
2917 struct btrfs_root
*root
= fs_info
->chunk_root
;
2918 struct btrfs_trans_handle
*trans
;
2919 struct btrfs_block_group
*block_group
;
2923 * Prevent races with automatic removal of unused block groups.
2924 * After we relocate and before we remove the chunk with offset
2925 * chunk_offset, automatic removal of the block group can kick in,
2926 * resulting in a failure when calling btrfs_remove_chunk() below.
2928 * Make sure to acquire this mutex before doing a tree search (dev
2929 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2930 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2931 * we release the path used to search the chunk/dev tree and before
2932 * the current task acquires this mutex and calls us.
2934 lockdep_assert_held(&fs_info
->delete_unused_bgs_mutex
);
2936 /* step one, relocate all the extents inside this chunk */
2937 btrfs_scrub_pause(fs_info
);
2938 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
2939 btrfs_scrub_continue(fs_info
);
2943 block_group
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2946 btrfs_discard_cancel_work(&fs_info
->discard_ctl
, block_group
);
2947 btrfs_put_block_group(block_group
);
2949 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
2951 if (IS_ERR(trans
)) {
2952 ret
= PTR_ERR(trans
);
2953 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
2958 * step two, delete the device extents and the
2959 * chunk tree entries
2961 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
2962 btrfs_end_transaction(trans
);
2966 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
2968 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
2969 struct btrfs_path
*path
;
2970 struct extent_buffer
*leaf
;
2971 struct btrfs_chunk
*chunk
;
2972 struct btrfs_key key
;
2973 struct btrfs_key found_key
;
2975 bool retried
= false;
2979 path
= btrfs_alloc_path();
2984 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2985 key
.offset
= (u64
)-1;
2986 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2989 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
2990 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
2992 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
2995 BUG_ON(ret
== 0); /* Corruption */
2997 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3000 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3006 leaf
= path
->nodes
[0];
3007 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3009 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3010 struct btrfs_chunk
);
3011 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3012 btrfs_release_path(path
);
3014 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3015 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3021 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3023 if (found_key
.offset
== 0)
3025 key
.offset
= found_key
.offset
- 1;
3028 if (failed
&& !retried
) {
3032 } else if (WARN_ON(failed
&& retried
)) {
3036 btrfs_free_path(path
);
3041 * return 1 : allocate a data chunk successfully,
3042 * return <0: errors during allocating a data chunk,
3043 * return 0 : no need to allocate a data chunk.
3045 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3048 struct btrfs_block_group
*cache
;
3052 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3054 chunk_type
= cache
->flags
;
3055 btrfs_put_block_group(cache
);
3057 if (!(chunk_type
& BTRFS_BLOCK_GROUP_DATA
))
3060 spin_lock(&fs_info
->data_sinfo
->lock
);
3061 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3062 spin_unlock(&fs_info
->data_sinfo
->lock
);
3065 struct btrfs_trans_handle
*trans
;
3068 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3070 return PTR_ERR(trans
);
3072 ret
= btrfs_force_chunk_alloc(trans
, BTRFS_BLOCK_GROUP_DATA
);
3073 btrfs_end_transaction(trans
);
3082 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3083 struct btrfs_balance_control
*bctl
)
3085 struct btrfs_root
*root
= fs_info
->tree_root
;
3086 struct btrfs_trans_handle
*trans
;
3087 struct btrfs_balance_item
*item
;
3088 struct btrfs_disk_balance_args disk_bargs
;
3089 struct btrfs_path
*path
;
3090 struct extent_buffer
*leaf
;
3091 struct btrfs_key key
;
3094 path
= btrfs_alloc_path();
3098 trans
= btrfs_start_transaction(root
, 0);
3099 if (IS_ERR(trans
)) {
3100 btrfs_free_path(path
);
3101 return PTR_ERR(trans
);
3104 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3105 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3108 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3113 leaf
= path
->nodes
[0];
3114 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3116 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3118 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3119 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3120 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3121 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3122 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3123 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3125 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3127 btrfs_mark_buffer_dirty(leaf
);
3129 btrfs_free_path(path
);
3130 err
= btrfs_commit_transaction(trans
);
3136 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3138 struct btrfs_root
*root
= fs_info
->tree_root
;
3139 struct btrfs_trans_handle
*trans
;
3140 struct btrfs_path
*path
;
3141 struct btrfs_key key
;
3144 path
= btrfs_alloc_path();
3148 trans
= btrfs_start_transaction(root
, 0);
3149 if (IS_ERR(trans
)) {
3150 btrfs_free_path(path
);
3151 return PTR_ERR(trans
);
3154 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3155 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3158 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3166 ret
= btrfs_del_item(trans
, root
, path
);
3168 btrfs_free_path(path
);
3169 err
= btrfs_commit_transaction(trans
);
3176 * This is a heuristic used to reduce the number of chunks balanced on
3177 * resume after balance was interrupted.
3179 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3182 * Turn on soft mode for chunk types that were being converted.
3184 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3185 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3186 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3187 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3188 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3189 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3192 * Turn on usage filter if is not already used. The idea is
3193 * that chunks that we have already balanced should be
3194 * reasonably full. Don't do it for chunks that are being
3195 * converted - that will keep us from relocating unconverted
3196 * (albeit full) chunks.
3198 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3199 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3200 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3201 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3202 bctl
->data
.usage
= 90;
3204 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3205 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3206 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3207 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3208 bctl
->sys
.usage
= 90;
3210 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3211 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3212 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3213 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3214 bctl
->meta
.usage
= 90;
3219 * Clear the balance status in fs_info and delete the balance item from disk.
3221 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3223 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3226 BUG_ON(!fs_info
->balance_ctl
);
3228 spin_lock(&fs_info
->balance_lock
);
3229 fs_info
->balance_ctl
= NULL
;
3230 spin_unlock(&fs_info
->balance_lock
);
3233 ret
= del_balance_item(fs_info
);
3235 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3239 * Balance filters. Return 1 if chunk should be filtered out
3240 * (should not be balanced).
3242 static int chunk_profiles_filter(u64 chunk_type
,
3243 struct btrfs_balance_args
*bargs
)
3245 chunk_type
= chunk_to_extended(chunk_type
) &
3246 BTRFS_EXTENDED_PROFILE_MASK
;
3248 if (bargs
->profiles
& chunk_type
)
3254 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3255 struct btrfs_balance_args
*bargs
)
3257 struct btrfs_block_group
*cache
;
3259 u64 user_thresh_min
;
3260 u64 user_thresh_max
;
3263 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3264 chunk_used
= cache
->used
;
3266 if (bargs
->usage_min
== 0)
3267 user_thresh_min
= 0;
3269 user_thresh_min
= div_factor_fine(cache
->length
,
3272 if (bargs
->usage_max
== 0)
3273 user_thresh_max
= 1;
3274 else if (bargs
->usage_max
> 100)
3275 user_thresh_max
= cache
->length
;
3277 user_thresh_max
= div_factor_fine(cache
->length
,
3280 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3283 btrfs_put_block_group(cache
);
3287 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3288 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3290 struct btrfs_block_group
*cache
;
3291 u64 chunk_used
, user_thresh
;
3294 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3295 chunk_used
= cache
->used
;
3297 if (bargs
->usage_min
== 0)
3299 else if (bargs
->usage
> 100)
3300 user_thresh
= cache
->length
;
3302 user_thresh
= div_factor_fine(cache
->length
, bargs
->usage
);
3304 if (chunk_used
< user_thresh
)
3307 btrfs_put_block_group(cache
);
3311 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3312 struct btrfs_chunk
*chunk
,
3313 struct btrfs_balance_args
*bargs
)
3315 struct btrfs_stripe
*stripe
;
3316 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3319 for (i
= 0; i
< num_stripes
; i
++) {
3320 stripe
= btrfs_stripe_nr(chunk
, i
);
3321 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3328 static u64
calc_data_stripes(u64 type
, int num_stripes
)
3330 const int index
= btrfs_bg_flags_to_raid_index(type
);
3331 const int ncopies
= btrfs_raid_array
[index
].ncopies
;
3332 const int nparity
= btrfs_raid_array
[index
].nparity
;
3335 return num_stripes
- nparity
;
3337 return num_stripes
/ ncopies
;
3340 /* [pstart, pend) */
3341 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3342 struct btrfs_chunk
*chunk
,
3343 struct btrfs_balance_args
*bargs
)
3345 struct btrfs_stripe
*stripe
;
3346 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3353 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3356 type
= btrfs_chunk_type(leaf
, chunk
);
3357 factor
= calc_data_stripes(type
, num_stripes
);
3359 for (i
= 0; i
< num_stripes
; i
++) {
3360 stripe
= btrfs_stripe_nr(chunk
, i
);
3361 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3364 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3365 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3366 stripe_length
= div_u64(stripe_length
, factor
);
3368 if (stripe_offset
< bargs
->pend
&&
3369 stripe_offset
+ stripe_length
> bargs
->pstart
)
3376 /* [vstart, vend) */
3377 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
3378 struct btrfs_chunk
*chunk
,
3380 struct btrfs_balance_args
*bargs
)
3382 if (chunk_offset
< bargs
->vend
&&
3383 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
3384 /* at least part of the chunk is inside this vrange */
3390 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
3391 struct btrfs_chunk
*chunk
,
3392 struct btrfs_balance_args
*bargs
)
3394 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3396 if (bargs
->stripes_min
<= num_stripes
3397 && num_stripes
<= bargs
->stripes_max
)
3403 static int chunk_soft_convert_filter(u64 chunk_type
,
3404 struct btrfs_balance_args
*bargs
)
3406 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
3409 chunk_type
= chunk_to_extended(chunk_type
) &
3410 BTRFS_EXTENDED_PROFILE_MASK
;
3412 if (bargs
->target
== chunk_type
)
3418 static int should_balance_chunk(struct extent_buffer
*leaf
,
3419 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
3421 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3422 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3423 struct btrfs_balance_args
*bargs
= NULL
;
3424 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3427 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
3428 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
3432 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3433 bargs
= &bctl
->data
;
3434 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3436 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3437 bargs
= &bctl
->meta
;
3439 /* profiles filter */
3440 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
3441 chunk_profiles_filter(chunk_type
, bargs
)) {
3446 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3447 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
3449 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3450 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
3455 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
3456 chunk_devid_filter(leaf
, chunk
, bargs
)) {
3460 /* drange filter, makes sense only with devid filter */
3461 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
3462 chunk_drange_filter(leaf
, chunk
, bargs
)) {
3467 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
3468 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
3472 /* stripes filter */
3473 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
3474 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
3478 /* soft profile changing mode */
3479 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
3480 chunk_soft_convert_filter(chunk_type
, bargs
)) {
3485 * limited by count, must be the last filter
3487 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
3488 if (bargs
->limit
== 0)
3492 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
3494 * Same logic as the 'limit' filter; the minimum cannot be
3495 * determined here because we do not have the global information
3496 * about the count of all chunks that satisfy the filters.
3498 if (bargs
->limit_max
== 0)
3507 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
3509 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3510 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3512 struct btrfs_chunk
*chunk
;
3513 struct btrfs_path
*path
= NULL
;
3514 struct btrfs_key key
;
3515 struct btrfs_key found_key
;
3516 struct extent_buffer
*leaf
;
3519 int enospc_errors
= 0;
3520 bool counting
= true;
3521 /* The single value limit and min/max limits use the same bytes in the */
3522 u64 limit_data
= bctl
->data
.limit
;
3523 u64 limit_meta
= bctl
->meta
.limit
;
3524 u64 limit_sys
= bctl
->sys
.limit
;
3528 int chunk_reserved
= 0;
3530 path
= btrfs_alloc_path();
3536 /* zero out stat counters */
3537 spin_lock(&fs_info
->balance_lock
);
3538 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
3539 spin_unlock(&fs_info
->balance_lock
);
3543 * The single value limit and min/max limits use the same bytes
3546 bctl
->data
.limit
= limit_data
;
3547 bctl
->meta
.limit
= limit_meta
;
3548 bctl
->sys
.limit
= limit_sys
;
3550 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3551 key
.offset
= (u64
)-1;
3552 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3555 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
3556 atomic_read(&fs_info
->balance_cancel_req
)) {
3561 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3562 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3564 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3569 * this shouldn't happen, it means the last relocate
3573 BUG(); /* FIXME break ? */
3575 ret
= btrfs_previous_item(chunk_root
, path
, 0,
3576 BTRFS_CHUNK_ITEM_KEY
);
3578 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3583 leaf
= path
->nodes
[0];
3584 slot
= path
->slots
[0];
3585 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3587 if (found_key
.objectid
!= key
.objectid
) {
3588 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3592 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3593 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3596 spin_lock(&fs_info
->balance_lock
);
3597 bctl
->stat
.considered
++;
3598 spin_unlock(&fs_info
->balance_lock
);
3601 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
3603 btrfs_release_path(path
);
3605 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3610 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3611 spin_lock(&fs_info
->balance_lock
);
3612 bctl
->stat
.expected
++;
3613 spin_unlock(&fs_info
->balance_lock
);
3615 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3617 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3619 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3626 * Apply limit_min filter, no need to check if the LIMITS
3627 * filter is used, limit_min is 0 by default
3629 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
3630 count_data
< bctl
->data
.limit_min
)
3631 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
3632 count_meta
< bctl
->meta
.limit_min
)
3633 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
3634 count_sys
< bctl
->sys
.limit_min
)) {
3635 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3639 if (!chunk_reserved
) {
3641 * We may be relocating the only data chunk we have,
3642 * which could potentially end up with losing data's
3643 * raid profile, so lets allocate an empty one in
3646 ret
= btrfs_may_alloc_data_chunk(fs_info
,
3649 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3651 } else if (ret
== 1) {
3656 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3657 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3658 if (ret
== -ENOSPC
) {
3660 } else if (ret
== -ETXTBSY
) {
3662 "skipping relocation of block group %llu due to active swapfile",
3668 spin_lock(&fs_info
->balance_lock
);
3669 bctl
->stat
.completed
++;
3670 spin_unlock(&fs_info
->balance_lock
);
3673 if (found_key
.offset
== 0)
3675 key
.offset
= found_key
.offset
- 1;
3679 btrfs_release_path(path
);
3684 btrfs_free_path(path
);
3685 if (enospc_errors
) {
3686 btrfs_info(fs_info
, "%d enospc errors during balance",
3696 * alloc_profile_is_valid - see if a given profile is valid and reduced
3697 * @flags: profile to validate
3698 * @extended: if true @flags is treated as an extended profile
3700 static int alloc_profile_is_valid(u64 flags
, int extended
)
3702 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
3703 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
3705 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
3707 /* 1) check that all other bits are zeroed */
3711 /* 2) see if profile is reduced */
3713 return !extended
; /* "0" is valid for usual profiles */
3715 return has_single_bit_set(flags
);
3718 static inline int balance_need_close(struct btrfs_fs_info
*fs_info
)
3720 /* cancel requested || normal exit path */
3721 return atomic_read(&fs_info
->balance_cancel_req
) ||
3722 (atomic_read(&fs_info
->balance_pause_req
) == 0 &&
3723 atomic_read(&fs_info
->balance_cancel_req
) == 0);
3726 /* Non-zero return value signifies invalidity */
3727 static inline int validate_convert_profile(struct btrfs_balance_args
*bctl_arg
,
3730 return ((bctl_arg
->flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3731 (!alloc_profile_is_valid(bctl_arg
->target
, 1) ||
3732 (bctl_arg
->target
& ~allowed
)));
3736 * Fill @buf with textual description of balance filter flags @bargs, up to
3737 * @size_buf including the terminating null. The output may be trimmed if it
3738 * does not fit into the provided buffer.
3740 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
3744 u32 size_bp
= size_buf
;
3746 u64 flags
= bargs
->flags
;
3747 char tmp_buf
[128] = {'\0'};
3752 #define CHECK_APPEND_NOARG(a) \
3754 ret = snprintf(bp, size_bp, (a)); \
3755 if (ret < 0 || ret >= size_bp) \
3756 goto out_overflow; \
3761 #define CHECK_APPEND_1ARG(a, v1) \
3763 ret = snprintf(bp, size_bp, (a), (v1)); \
3764 if (ret < 0 || ret >= size_bp) \
3765 goto out_overflow; \
3770 #define CHECK_APPEND_2ARG(a, v1, v2) \
3772 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3773 if (ret < 0 || ret >= size_bp) \
3774 goto out_overflow; \
3779 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3780 CHECK_APPEND_1ARG("convert=%s,",
3781 btrfs_bg_type_to_raid_name(bargs
->target
));
3783 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
3784 CHECK_APPEND_NOARG("soft,");
3786 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
3787 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
3789 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
3792 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
3793 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
3795 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
3796 CHECK_APPEND_2ARG("usage=%u..%u,",
3797 bargs
->usage_min
, bargs
->usage_max
);
3799 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
3800 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
3802 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
3803 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3804 bargs
->pstart
, bargs
->pend
);
3806 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
3807 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3808 bargs
->vstart
, bargs
->vend
);
3810 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
3811 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
3813 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
3814 CHECK_APPEND_2ARG("limit=%u..%u,",
3815 bargs
->limit_min
, bargs
->limit_max
);
3817 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
3818 CHECK_APPEND_2ARG("stripes=%u..%u,",
3819 bargs
->stripes_min
, bargs
->stripes_max
);
3821 #undef CHECK_APPEND_2ARG
3822 #undef CHECK_APPEND_1ARG
3823 #undef CHECK_APPEND_NOARG
3827 if (size_bp
< size_buf
)
3828 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
3833 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
3835 u32 size_buf
= 1024;
3836 char tmp_buf
[192] = {'\0'};
3839 u32 size_bp
= size_buf
;
3841 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3843 buf
= kzalloc(size_buf
, GFP_KERNEL
);
3849 #define CHECK_APPEND_1ARG(a, v1) \
3851 ret = snprintf(bp, size_bp, (a), (v1)); \
3852 if (ret < 0 || ret >= size_bp) \
3853 goto out_overflow; \
3858 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
3859 CHECK_APPEND_1ARG("%s", "-f ");
3861 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
3862 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
3863 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
3866 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
3867 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
3868 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
3871 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
3872 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
3873 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
3876 #undef CHECK_APPEND_1ARG
3880 if (size_bp
< size_buf
)
3881 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
3882 btrfs_info(fs_info
, "balance: %s %s",
3883 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
3884 "resume" : "start", buf
);
3890 * Should be called with balance mutexe held
3892 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
3893 struct btrfs_balance_control
*bctl
,
3894 struct btrfs_ioctl_balance_args
*bargs
)
3896 u64 meta_target
, data_target
;
3902 bool reducing_redundancy
;
3905 if (btrfs_fs_closing(fs_info
) ||
3906 atomic_read(&fs_info
->balance_pause_req
) ||
3907 atomic_read(&fs_info
->balance_cancel_req
)) {
3912 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
3913 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
3917 * In case of mixed groups both data and meta should be picked,
3918 * and identical options should be given for both of them.
3920 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
3921 if (mixed
&& (bctl
->flags
& allowed
)) {
3922 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
3923 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
3924 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
3926 "balance: mixed groups data and metadata options must be the same");
3933 * rw_devices will not change at the moment, device add/delete/replace
3934 * are excluded by EXCL_OP
3936 num_devices
= fs_info
->fs_devices
->rw_devices
;
3939 * SINGLE profile on-disk has no profile bit, but in-memory we have a
3940 * special bit for it, to make it easier to distinguish. Thus we need
3941 * to set it manually, or balance would refuse the profile.
3943 allowed
= BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
3944 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++)
3945 if (num_devices
>= btrfs_raid_array
[i
].devs_min
)
3946 allowed
|= btrfs_raid_array
[i
].bg_flag
;
3948 if (validate_convert_profile(&bctl
->data
, allowed
)) {
3950 "balance: invalid convert data profile %s",
3951 btrfs_bg_type_to_raid_name(bctl
->data
.target
));
3955 if (validate_convert_profile(&bctl
->meta
, allowed
)) {
3957 "balance: invalid convert metadata profile %s",
3958 btrfs_bg_type_to_raid_name(bctl
->meta
.target
));
3962 if (validate_convert_profile(&bctl
->sys
, allowed
)) {
3964 "balance: invalid convert system profile %s",
3965 btrfs_bg_type_to_raid_name(bctl
->sys
.target
));
3971 * Allow to reduce metadata or system integrity only if force set for
3972 * profiles with redundancy (copies, parity)
3975 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++) {
3976 if (btrfs_raid_array
[i
].ncopies
>= 2 ||
3977 btrfs_raid_array
[i
].tolerated_failures
>= 1)
3978 allowed
|= btrfs_raid_array
[i
].bg_flag
;
3981 seq
= read_seqbegin(&fs_info
->profiles_lock
);
3983 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3984 (fs_info
->avail_system_alloc_bits
& allowed
) &&
3985 !(bctl
->sys
.target
& allowed
)) ||
3986 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3987 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
3988 !(bctl
->meta
.target
& allowed
)))
3989 reducing_redundancy
= true;
3991 reducing_redundancy
= false;
3993 /* if we're not converting, the target field is uninitialized */
3994 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
3995 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
3996 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
3997 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
3998 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4000 if (reducing_redundancy
) {
4001 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4003 "balance: force reducing metadata redundancy");
4006 "balance: reduces metadata redundancy, use --force if you want this");
4012 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4013 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4015 "balance: metadata profile %s has lower redundancy than data profile %s",
4016 btrfs_bg_type_to_raid_name(meta_target
),
4017 btrfs_bg_type_to_raid_name(data_target
));
4020 if (fs_info
->send_in_progress
) {
4021 btrfs_warn_rl(fs_info
,
4022 "cannot run balance while send operations are in progress (%d in progress)",
4023 fs_info
->send_in_progress
);
4028 ret
= insert_balance_item(fs_info
, bctl
);
4029 if (ret
&& ret
!= -EEXIST
)
4032 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4033 BUG_ON(ret
== -EEXIST
);
4034 BUG_ON(fs_info
->balance_ctl
);
4035 spin_lock(&fs_info
->balance_lock
);
4036 fs_info
->balance_ctl
= bctl
;
4037 spin_unlock(&fs_info
->balance_lock
);
4039 BUG_ON(ret
!= -EEXIST
);
4040 spin_lock(&fs_info
->balance_lock
);
4041 update_balance_args(bctl
);
4042 spin_unlock(&fs_info
->balance_lock
);
4045 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4046 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4047 describe_balance_start_or_resume(fs_info
);
4048 mutex_unlock(&fs_info
->balance_mutex
);
4050 ret
= __btrfs_balance(fs_info
);
4052 mutex_lock(&fs_info
->balance_mutex
);
4053 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
))
4054 btrfs_info(fs_info
, "balance: paused");
4055 else if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_cancel_req
))
4056 btrfs_info(fs_info
, "balance: canceled");
4058 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4060 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4063 memset(bargs
, 0, sizeof(*bargs
));
4064 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4067 if ((ret
&& ret
!= -ECANCELED
&& ret
!= -ENOSPC
) ||
4068 balance_need_close(fs_info
)) {
4069 reset_balance_state(fs_info
);
4070 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4073 wake_up(&fs_info
->balance_wait_q
);
4077 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4078 reset_balance_state(fs_info
);
4081 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4086 static int balance_kthread(void *data
)
4088 struct btrfs_fs_info
*fs_info
= data
;
4091 mutex_lock(&fs_info
->balance_mutex
);
4092 if (fs_info
->balance_ctl
)
4093 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4094 mutex_unlock(&fs_info
->balance_mutex
);
4099 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4101 struct task_struct
*tsk
;
4103 mutex_lock(&fs_info
->balance_mutex
);
4104 if (!fs_info
->balance_ctl
) {
4105 mutex_unlock(&fs_info
->balance_mutex
);
4108 mutex_unlock(&fs_info
->balance_mutex
);
4110 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4111 btrfs_info(fs_info
, "balance: resume skipped");
4116 * A ro->rw remount sequence should continue with the paused balance
4117 * regardless of who pauses it, system or the user as of now, so set
4120 spin_lock(&fs_info
->balance_lock
);
4121 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4122 spin_unlock(&fs_info
->balance_lock
);
4124 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4125 return PTR_ERR_OR_ZERO(tsk
);
4128 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4130 struct btrfs_balance_control
*bctl
;
4131 struct btrfs_balance_item
*item
;
4132 struct btrfs_disk_balance_args disk_bargs
;
4133 struct btrfs_path
*path
;
4134 struct extent_buffer
*leaf
;
4135 struct btrfs_key key
;
4138 path
= btrfs_alloc_path();
4142 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4143 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4146 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4149 if (ret
> 0) { /* ret = -ENOENT; */
4154 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4160 leaf
= path
->nodes
[0];
4161 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4163 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4164 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4166 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4167 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4168 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4169 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4170 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4171 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4174 * This should never happen, as the paused balance state is recovered
4175 * during mount without any chance of other exclusive ops to collide.
4177 * This gives the exclusive op status to balance and keeps in paused
4178 * state until user intervention (cancel or umount). If the ownership
4179 * cannot be assigned, show a message but do not fail. The balance
4180 * is in a paused state and must have fs_info::balance_ctl properly
4183 if (test_and_set_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
))
4185 "balance: cannot set exclusive op status, resume manually");
4187 mutex_lock(&fs_info
->balance_mutex
);
4188 BUG_ON(fs_info
->balance_ctl
);
4189 spin_lock(&fs_info
->balance_lock
);
4190 fs_info
->balance_ctl
= bctl
;
4191 spin_unlock(&fs_info
->balance_lock
);
4192 mutex_unlock(&fs_info
->balance_mutex
);
4194 btrfs_free_path(path
);
4198 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4202 mutex_lock(&fs_info
->balance_mutex
);
4203 if (!fs_info
->balance_ctl
) {
4204 mutex_unlock(&fs_info
->balance_mutex
);
4208 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4209 atomic_inc(&fs_info
->balance_pause_req
);
4210 mutex_unlock(&fs_info
->balance_mutex
);
4212 wait_event(fs_info
->balance_wait_q
,
4213 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4215 mutex_lock(&fs_info
->balance_mutex
);
4216 /* we are good with balance_ctl ripped off from under us */
4217 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4218 atomic_dec(&fs_info
->balance_pause_req
);
4223 mutex_unlock(&fs_info
->balance_mutex
);
4227 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4229 mutex_lock(&fs_info
->balance_mutex
);
4230 if (!fs_info
->balance_ctl
) {
4231 mutex_unlock(&fs_info
->balance_mutex
);
4236 * A paused balance with the item stored on disk can be resumed at
4237 * mount time if the mount is read-write. Otherwise it's still paused
4238 * and we must not allow cancelling as it deletes the item.
4240 if (sb_rdonly(fs_info
->sb
)) {
4241 mutex_unlock(&fs_info
->balance_mutex
);
4245 atomic_inc(&fs_info
->balance_cancel_req
);
4247 * if we are running just wait and return, balance item is
4248 * deleted in btrfs_balance in this case
4250 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4251 mutex_unlock(&fs_info
->balance_mutex
);
4252 wait_event(fs_info
->balance_wait_q
,
4253 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4254 mutex_lock(&fs_info
->balance_mutex
);
4256 mutex_unlock(&fs_info
->balance_mutex
);
4258 * Lock released to allow other waiters to continue, we'll
4259 * reexamine the status again.
4261 mutex_lock(&fs_info
->balance_mutex
);
4263 if (fs_info
->balance_ctl
) {
4264 reset_balance_state(fs_info
);
4265 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4266 btrfs_info(fs_info
, "balance: canceled");
4270 BUG_ON(fs_info
->balance_ctl
||
4271 test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4272 atomic_dec(&fs_info
->balance_cancel_req
);
4273 mutex_unlock(&fs_info
->balance_mutex
);
4277 static int btrfs_uuid_scan_kthread(void *data
)
4279 struct btrfs_fs_info
*fs_info
= data
;
4280 struct btrfs_root
*root
= fs_info
->tree_root
;
4281 struct btrfs_key key
;
4282 struct btrfs_path
*path
= NULL
;
4284 struct extent_buffer
*eb
;
4286 struct btrfs_root_item root_item
;
4288 struct btrfs_trans_handle
*trans
= NULL
;
4290 path
= btrfs_alloc_path();
4297 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4301 ret
= btrfs_search_forward(root
, &key
, path
,
4302 BTRFS_OLDEST_GENERATION
);
4309 if (key
.type
!= BTRFS_ROOT_ITEM_KEY
||
4310 (key
.objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
4311 key
.objectid
!= BTRFS_FS_TREE_OBJECTID
) ||
4312 key
.objectid
> BTRFS_LAST_FREE_OBJECTID
)
4315 eb
= path
->nodes
[0];
4316 slot
= path
->slots
[0];
4317 item_size
= btrfs_item_size_nr(eb
, slot
);
4318 if (item_size
< sizeof(root_item
))
4321 read_extent_buffer(eb
, &root_item
,
4322 btrfs_item_ptr_offset(eb
, slot
),
4323 (int)sizeof(root_item
));
4324 if (btrfs_root_refs(&root_item
) == 0)
4327 if (!btrfs_is_empty_uuid(root_item
.uuid
) ||
4328 !btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4332 btrfs_release_path(path
);
4334 * 1 - subvol uuid item
4335 * 1 - received_subvol uuid item
4337 trans
= btrfs_start_transaction(fs_info
->uuid_root
, 2);
4338 if (IS_ERR(trans
)) {
4339 ret
= PTR_ERR(trans
);
4347 if (!btrfs_is_empty_uuid(root_item
.uuid
)) {
4348 ret
= btrfs_uuid_tree_add(trans
, root_item
.uuid
,
4349 BTRFS_UUID_KEY_SUBVOL
,
4352 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4358 if (!btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4359 ret
= btrfs_uuid_tree_add(trans
,
4360 root_item
.received_uuid
,
4361 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4364 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4372 ret
= btrfs_end_transaction(trans
);
4378 btrfs_release_path(path
);
4379 if (key
.offset
< (u64
)-1) {
4381 } else if (key
.type
< BTRFS_ROOT_ITEM_KEY
) {
4383 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4384 } else if (key
.objectid
< (u64
)-1) {
4386 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4395 btrfs_free_path(path
);
4396 if (trans
&& !IS_ERR(trans
))
4397 btrfs_end_transaction(trans
);
4399 btrfs_warn(fs_info
, "btrfs_uuid_scan_kthread failed %d", ret
);
4401 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
);
4402 up(&fs_info
->uuid_tree_rescan_sem
);
4407 * Callback for btrfs_uuid_tree_iterate().
4409 * 0 check succeeded, the entry is not outdated.
4410 * < 0 if an error occurred.
4411 * > 0 if the check failed, which means the caller shall remove the entry.
4413 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info
*fs_info
,
4414 u8
*uuid
, u8 type
, u64 subid
)
4416 struct btrfs_key key
;
4418 struct btrfs_root
*subvol_root
;
4420 if (type
!= BTRFS_UUID_KEY_SUBVOL
&&
4421 type
!= BTRFS_UUID_KEY_RECEIVED_SUBVOL
)
4424 key
.objectid
= subid
;
4425 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4426 key
.offset
= (u64
)-1;
4427 subvol_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4428 if (IS_ERR(subvol_root
)) {
4429 ret
= PTR_ERR(subvol_root
);
4436 case BTRFS_UUID_KEY_SUBVOL
:
4437 if (memcmp(uuid
, subvol_root
->root_item
.uuid
, BTRFS_UUID_SIZE
))
4440 case BTRFS_UUID_KEY_RECEIVED_SUBVOL
:
4441 if (memcmp(uuid
, subvol_root
->root_item
.received_uuid
,
4451 static int btrfs_uuid_rescan_kthread(void *data
)
4453 struct btrfs_fs_info
*fs_info
= (struct btrfs_fs_info
*)data
;
4457 * 1st step is to iterate through the existing UUID tree and
4458 * to delete all entries that contain outdated data.
4459 * 2nd step is to add all missing entries to the UUID tree.
4461 ret
= btrfs_uuid_tree_iterate(fs_info
, btrfs_check_uuid_tree_entry
);
4463 btrfs_warn(fs_info
, "iterating uuid_tree failed %d", ret
);
4464 up(&fs_info
->uuid_tree_rescan_sem
);
4467 return btrfs_uuid_scan_kthread(data
);
4470 int btrfs_create_uuid_tree(struct btrfs_fs_info
*fs_info
)
4472 struct btrfs_trans_handle
*trans
;
4473 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
4474 struct btrfs_root
*uuid_root
;
4475 struct task_struct
*task
;
4482 trans
= btrfs_start_transaction(tree_root
, 2);
4484 return PTR_ERR(trans
);
4486 uuid_root
= btrfs_create_tree(trans
, BTRFS_UUID_TREE_OBJECTID
);
4487 if (IS_ERR(uuid_root
)) {
4488 ret
= PTR_ERR(uuid_root
);
4489 btrfs_abort_transaction(trans
, ret
);
4490 btrfs_end_transaction(trans
);
4494 fs_info
->uuid_root
= uuid_root
;
4496 ret
= btrfs_commit_transaction(trans
);
4500 down(&fs_info
->uuid_tree_rescan_sem
);
4501 task
= kthread_run(btrfs_uuid_scan_kthread
, fs_info
, "btrfs-uuid");
4503 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4504 btrfs_warn(fs_info
, "failed to start uuid_scan task");
4505 up(&fs_info
->uuid_tree_rescan_sem
);
4506 return PTR_ERR(task
);
4512 int btrfs_check_uuid_tree(struct btrfs_fs_info
*fs_info
)
4514 struct task_struct
*task
;
4516 down(&fs_info
->uuid_tree_rescan_sem
);
4517 task
= kthread_run(btrfs_uuid_rescan_kthread
, fs_info
, "btrfs-uuid");
4519 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4520 btrfs_warn(fs_info
, "failed to start uuid_rescan task");
4521 up(&fs_info
->uuid_tree_rescan_sem
);
4522 return PTR_ERR(task
);
4529 * shrinking a device means finding all of the device extents past
4530 * the new size, and then following the back refs to the chunks.
4531 * The chunk relocation code actually frees the device extent
4533 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4535 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4536 struct btrfs_root
*root
= fs_info
->dev_root
;
4537 struct btrfs_trans_handle
*trans
;
4538 struct btrfs_dev_extent
*dev_extent
= NULL
;
4539 struct btrfs_path
*path
;
4545 bool retried
= false;
4546 struct extent_buffer
*l
;
4547 struct btrfs_key key
;
4548 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4549 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4550 u64 old_size
= btrfs_device_get_total_bytes(device
);
4554 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4556 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4558 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4561 path
= btrfs_alloc_path();
4565 path
->reada
= READA_BACK
;
4567 trans
= btrfs_start_transaction(root
, 0);
4568 if (IS_ERR(trans
)) {
4569 btrfs_free_path(path
);
4570 return PTR_ERR(trans
);
4573 mutex_lock(&fs_info
->chunk_mutex
);
4575 btrfs_device_set_total_bytes(device
, new_size
);
4576 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4577 device
->fs_devices
->total_rw_bytes
-= diff
;
4578 atomic64_sub(diff
, &fs_info
->free_chunk_space
);
4582 * Once the device's size has been set to the new size, ensure all
4583 * in-memory chunks are synced to disk so that the loop below sees them
4584 * and relocates them accordingly.
4586 if (contains_pending_extent(device
, &start
, diff
)) {
4587 mutex_unlock(&fs_info
->chunk_mutex
);
4588 ret
= btrfs_commit_transaction(trans
);
4592 mutex_unlock(&fs_info
->chunk_mutex
);
4593 btrfs_end_transaction(trans
);
4597 key
.objectid
= device
->devid
;
4598 key
.offset
= (u64
)-1;
4599 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4602 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
4603 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4605 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4609 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
4611 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4616 btrfs_release_path(path
);
4621 slot
= path
->slots
[0];
4622 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
4624 if (key
.objectid
!= device
->devid
) {
4625 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4626 btrfs_release_path(path
);
4630 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
4631 length
= btrfs_dev_extent_length(l
, dev_extent
);
4633 if (key
.offset
+ length
<= new_size
) {
4634 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4635 btrfs_release_path(path
);
4639 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
4640 btrfs_release_path(path
);
4643 * We may be relocating the only data chunk we have,
4644 * which could potentially end up with losing data's
4645 * raid profile, so lets allocate an empty one in
4648 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
4650 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4654 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
4655 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4656 if (ret
== -ENOSPC
) {
4659 if (ret
== -ETXTBSY
) {
4661 "could not shrink block group %llu due to active swapfile",
4666 } while (key
.offset
-- > 0);
4668 if (failed
&& !retried
) {
4672 } else if (failed
&& retried
) {
4677 /* Shrinking succeeded, else we would be at "done". */
4678 trans
= btrfs_start_transaction(root
, 0);
4679 if (IS_ERR(trans
)) {
4680 ret
= PTR_ERR(trans
);
4684 mutex_lock(&fs_info
->chunk_mutex
);
4685 btrfs_device_set_disk_total_bytes(device
, new_size
);
4686 if (list_empty(&device
->post_commit_list
))
4687 list_add_tail(&device
->post_commit_list
,
4688 &trans
->transaction
->dev_update_list
);
4690 WARN_ON(diff
> old_total
);
4691 btrfs_set_super_total_bytes(super_copy
,
4692 round_down(old_total
- diff
, fs_info
->sectorsize
));
4693 mutex_unlock(&fs_info
->chunk_mutex
);
4695 /* Now btrfs_update_device() will change the on-disk size. */
4696 ret
= btrfs_update_device(trans
, device
);
4698 btrfs_abort_transaction(trans
, ret
);
4699 btrfs_end_transaction(trans
);
4701 ret
= btrfs_commit_transaction(trans
);
4704 btrfs_free_path(path
);
4706 mutex_lock(&fs_info
->chunk_mutex
);
4707 btrfs_device_set_total_bytes(device
, old_size
);
4708 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
4709 device
->fs_devices
->total_rw_bytes
+= diff
;
4710 atomic64_add(diff
, &fs_info
->free_chunk_space
);
4711 mutex_unlock(&fs_info
->chunk_mutex
);
4716 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
4717 struct btrfs_key
*key
,
4718 struct btrfs_chunk
*chunk
, int item_size
)
4720 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4721 struct btrfs_disk_key disk_key
;
4725 mutex_lock(&fs_info
->chunk_mutex
);
4726 array_size
= btrfs_super_sys_array_size(super_copy
);
4727 if (array_size
+ item_size
+ sizeof(disk_key
)
4728 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) {
4729 mutex_unlock(&fs_info
->chunk_mutex
);
4733 ptr
= super_copy
->sys_chunk_array
+ array_size
;
4734 btrfs_cpu_key_to_disk(&disk_key
, key
);
4735 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
4736 ptr
+= sizeof(disk_key
);
4737 memcpy(ptr
, chunk
, item_size
);
4738 item_size
+= sizeof(disk_key
);
4739 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
4740 mutex_unlock(&fs_info
->chunk_mutex
);
4746 * sort the devices in descending order by max_avail, total_avail
4748 static int btrfs_cmp_device_info(const void *a
, const void *b
)
4750 const struct btrfs_device_info
*di_a
= a
;
4751 const struct btrfs_device_info
*di_b
= b
;
4753 if (di_a
->max_avail
> di_b
->max_avail
)
4755 if (di_a
->max_avail
< di_b
->max_avail
)
4757 if (di_a
->total_avail
> di_b
->total_avail
)
4759 if (di_a
->total_avail
< di_b
->total_avail
)
4764 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
4766 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
4769 btrfs_set_fs_incompat(info
, RAID56
);
4772 static void check_raid1c34_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
4774 if (!(type
& (BTRFS_BLOCK_GROUP_RAID1C3
| BTRFS_BLOCK_GROUP_RAID1C4
)))
4777 btrfs_set_fs_incompat(info
, RAID1C34
);
4780 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
4781 u64 start
, u64 type
)
4783 struct btrfs_fs_info
*info
= trans
->fs_info
;
4784 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
4785 struct btrfs_device
*device
;
4786 struct map_lookup
*map
= NULL
;
4787 struct extent_map_tree
*em_tree
;
4788 struct extent_map
*em
;
4789 struct btrfs_device_info
*devices_info
= NULL
;
4791 int num_stripes
; /* total number of stripes to allocate */
4792 int data_stripes
; /* number of stripes that count for
4794 int sub_stripes
; /* sub_stripes info for map */
4795 int dev_stripes
; /* stripes per dev */
4796 int devs_max
; /* max devs to use */
4797 int devs_min
; /* min devs needed */
4798 int devs_increment
; /* ndevs has to be a multiple of this */
4799 int ncopies
; /* how many copies to data has */
4800 int nparity
; /* number of stripes worth of bytes to
4801 store parity information */
4803 u64 max_stripe_size
;
4812 BUG_ON(!alloc_profile_is_valid(type
, 0));
4814 if (list_empty(&fs_devices
->alloc_list
)) {
4815 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
4816 btrfs_debug(info
, "%s: no writable device", __func__
);
4820 index
= btrfs_bg_flags_to_raid_index(type
);
4822 sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
4823 dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
4824 devs_max
= btrfs_raid_array
[index
].devs_max
;
4826 devs_max
= BTRFS_MAX_DEVS(info
);
4827 devs_min
= btrfs_raid_array
[index
].devs_min
;
4828 devs_increment
= btrfs_raid_array
[index
].devs_increment
;
4829 ncopies
= btrfs_raid_array
[index
].ncopies
;
4830 nparity
= btrfs_raid_array
[index
].nparity
;
4832 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
4833 max_stripe_size
= SZ_1G
;
4834 max_chunk_size
= BTRFS_MAX_DATA_CHUNK_SIZE
;
4835 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
4836 /* for larger filesystems, use larger metadata chunks */
4837 if (fs_devices
->total_rw_bytes
> 50ULL * SZ_1G
)
4838 max_stripe_size
= SZ_1G
;
4840 max_stripe_size
= SZ_256M
;
4841 max_chunk_size
= max_stripe_size
;
4842 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
4843 max_stripe_size
= SZ_32M
;
4844 max_chunk_size
= 2 * max_stripe_size
;
4845 devs_max
= min_t(int, devs_max
, BTRFS_MAX_DEVS_SYS_CHUNK
);
4847 btrfs_err(info
, "invalid chunk type 0x%llx requested",
4852 /* We don't want a chunk larger than 10% of writable space */
4853 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
4856 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
4862 * in the first pass through the devices list, we gather information
4863 * about the available holes on each device.
4866 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
4870 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4872 "BTRFS: read-only device in alloc_list\n");
4876 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
4877 &device
->dev_state
) ||
4878 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4881 if (device
->total_bytes
> device
->bytes_used
)
4882 total_avail
= device
->total_bytes
- device
->bytes_used
;
4886 /* If there is no space on this device, skip it. */
4887 if (total_avail
== 0)
4890 ret
= find_free_dev_extent(device
,
4891 max_stripe_size
* dev_stripes
,
4892 &dev_offset
, &max_avail
);
4893 if (ret
&& ret
!= -ENOSPC
)
4897 max_avail
= max_stripe_size
* dev_stripes
;
4899 if (max_avail
< BTRFS_STRIPE_LEN
* dev_stripes
) {
4900 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
4902 "%s: devid %llu has no free space, have=%llu want=%u",
4903 __func__
, device
->devid
, max_avail
,
4904 BTRFS_STRIPE_LEN
* dev_stripes
);
4908 if (ndevs
== fs_devices
->rw_devices
) {
4909 WARN(1, "%s: found more than %llu devices\n",
4910 __func__
, fs_devices
->rw_devices
);
4913 devices_info
[ndevs
].dev_offset
= dev_offset
;
4914 devices_info
[ndevs
].max_avail
= max_avail
;
4915 devices_info
[ndevs
].total_avail
= total_avail
;
4916 devices_info
[ndevs
].dev
= device
;
4921 * now sort the devices by hole size / available space
4923 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
4924 btrfs_cmp_device_info
, NULL
);
4927 * Round down to number of usable stripes, devs_increment can be any
4928 * number so we can't use round_down()
4930 ndevs
-= ndevs
% devs_increment
;
4932 if (ndevs
< devs_min
) {
4934 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
4936 "%s: not enough devices with free space: have=%d minimum required=%d",
4937 __func__
, ndevs
, devs_min
);
4942 ndevs
= min(ndevs
, devs_max
);
4945 * The primary goal is to maximize the number of stripes, so use as
4946 * many devices as possible, even if the stripes are not maximum sized.
4948 * The DUP profile stores more than one stripe per device, the
4949 * max_avail is the total size so we have to adjust.
4951 stripe_size
= div_u64(devices_info
[ndevs
- 1].max_avail
, dev_stripes
);
4952 num_stripes
= ndevs
* dev_stripes
;
4955 * this will have to be fixed for RAID1 and RAID10 over
4958 data_stripes
= (num_stripes
- nparity
) / ncopies
;
4961 * Use the number of data stripes to figure out how big this chunk
4962 * is really going to be in terms of logical address space,
4963 * and compare that answer with the max chunk size. If it's higher,
4964 * we try to reduce stripe_size.
4966 if (stripe_size
* data_stripes
> max_chunk_size
) {
4968 * Reduce stripe_size, round it up to a 16MB boundary again and
4969 * then use it, unless it ends up being even bigger than the
4970 * previous value we had already.
4972 stripe_size
= min(round_up(div_u64(max_chunk_size
,
4973 data_stripes
), SZ_16M
),
4977 /* align to BTRFS_STRIPE_LEN */
4978 stripe_size
= round_down(stripe_size
, BTRFS_STRIPE_LEN
);
4980 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
4985 map
->num_stripes
= num_stripes
;
4987 for (i
= 0; i
< ndevs
; ++i
) {
4988 for (j
= 0; j
< dev_stripes
; ++j
) {
4989 int s
= i
* dev_stripes
+ j
;
4990 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
4991 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
4995 map
->stripe_len
= BTRFS_STRIPE_LEN
;
4996 map
->io_align
= BTRFS_STRIPE_LEN
;
4997 map
->io_width
= BTRFS_STRIPE_LEN
;
4999 map
->sub_stripes
= sub_stripes
;
5001 chunk_size
= stripe_size
* data_stripes
;
5003 trace_btrfs_chunk_alloc(info
, map
, start
, chunk_size
);
5005 em
= alloc_extent_map();
5011 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
5012 em
->map_lookup
= map
;
5014 em
->len
= chunk_size
;
5015 em
->block_start
= 0;
5016 em
->block_len
= em
->len
;
5017 em
->orig_block_len
= stripe_size
;
5019 em_tree
= &info
->mapping_tree
;
5020 write_lock(&em_tree
->lock
);
5021 ret
= add_extent_mapping(em_tree
, em
, 0);
5023 write_unlock(&em_tree
->lock
);
5024 free_extent_map(em
);
5027 write_unlock(&em_tree
->lock
);
5029 ret
= btrfs_make_block_group(trans
, 0, type
, start
, chunk_size
);
5031 goto error_del_extent
;
5033 for (i
= 0; i
< map
->num_stripes
; i
++) {
5034 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5036 btrfs_device_set_bytes_used(dev
, dev
->bytes_used
+ stripe_size
);
5037 if (list_empty(&dev
->post_commit_list
))
5038 list_add_tail(&dev
->post_commit_list
,
5039 &trans
->transaction
->dev_update_list
);
5042 atomic64_sub(stripe_size
* map
->num_stripes
, &info
->free_chunk_space
);
5044 free_extent_map(em
);
5045 check_raid56_incompat_flag(info
, type
);
5046 check_raid1c34_incompat_flag(info
, type
);
5048 kfree(devices_info
);
5052 write_lock(&em_tree
->lock
);
5053 remove_extent_mapping(em_tree
, em
);
5054 write_unlock(&em_tree
->lock
);
5056 /* One for our allocation */
5057 free_extent_map(em
);
5058 /* One for the tree reference */
5059 free_extent_map(em
);
5061 kfree(devices_info
);
5065 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
5066 u64 chunk_offset
, u64 chunk_size
)
5068 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5069 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
5070 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5071 struct btrfs_key key
;
5072 struct btrfs_device
*device
;
5073 struct btrfs_chunk
*chunk
;
5074 struct btrfs_stripe
*stripe
;
5075 struct extent_map
*em
;
5076 struct map_lookup
*map
;
5083 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, chunk_size
);
5087 map
= em
->map_lookup
;
5088 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5089 stripe_size
= em
->orig_block_len
;
5091 chunk
= kzalloc(item_size
, GFP_NOFS
);
5098 * Take the device list mutex to prevent races with the final phase of
5099 * a device replace operation that replaces the device object associated
5100 * with the map's stripes, because the device object's id can change
5101 * at any time during that final phase of the device replace operation
5102 * (dev-replace.c:btrfs_dev_replace_finishing()).
5104 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
5105 for (i
= 0; i
< map
->num_stripes
; i
++) {
5106 device
= map
->stripes
[i
].dev
;
5107 dev_offset
= map
->stripes
[i
].physical
;
5109 ret
= btrfs_update_device(trans
, device
);
5112 ret
= btrfs_alloc_dev_extent(trans
, device
, chunk_offset
,
5113 dev_offset
, stripe_size
);
5118 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5122 stripe
= &chunk
->stripe
;
5123 for (i
= 0; i
< map
->num_stripes
; i
++) {
5124 device
= map
->stripes
[i
].dev
;
5125 dev_offset
= map
->stripes
[i
].physical
;
5127 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5128 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5129 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5132 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5134 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
5135 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
5136 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
5137 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5138 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5139 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
5140 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
5141 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5142 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5144 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5145 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5146 key
.offset
= chunk_offset
;
5148 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5149 if (ret
== 0 && map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5151 * TODO: Cleanup of inserted chunk root in case of
5154 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5159 free_extent_map(em
);
5164 * Chunk allocation falls into two parts. The first part does work
5165 * that makes the new allocated chunk usable, but does not do any operation
5166 * that modifies the chunk tree. The second part does the work that
5167 * requires modifying the chunk tree. This division is important for the
5168 * bootstrap process of adding storage to a seed btrfs.
5170 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
5174 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
5175 chunk_offset
= find_next_chunk(trans
->fs_info
);
5176 return __btrfs_alloc_chunk(trans
, chunk_offset
, type
);
5179 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5181 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5183 u64 sys_chunk_offset
;
5187 chunk_offset
= find_next_chunk(fs_info
);
5188 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5189 ret
= __btrfs_alloc_chunk(trans
, chunk_offset
, alloc_profile
);
5193 sys_chunk_offset
= find_next_chunk(fs_info
);
5194 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5195 ret
= __btrfs_alloc_chunk(trans
, sys_chunk_offset
, alloc_profile
);
5199 static inline int btrfs_chunk_max_errors(struct map_lookup
*map
)
5201 const int index
= btrfs_bg_flags_to_raid_index(map
->type
);
5203 return btrfs_raid_array
[index
].tolerated_failures
;
5206 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5208 struct extent_map
*em
;
5209 struct map_lookup
*map
;
5214 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5218 map
= em
->map_lookup
;
5219 for (i
= 0; i
< map
->num_stripes
; i
++) {
5220 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5221 &map
->stripes
[i
].dev
->dev_state
)) {
5225 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5226 &map
->stripes
[i
].dev
->dev_state
)) {
5233 * If the number of missing devices is larger than max errors,
5234 * we can not write the data into that chunk successfully, so
5237 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5240 free_extent_map(em
);
5244 void btrfs_mapping_tree_free(struct extent_map_tree
*tree
)
5246 struct extent_map
*em
;
5249 write_lock(&tree
->lock
);
5250 em
= lookup_extent_mapping(tree
, 0, (u64
)-1);
5252 remove_extent_mapping(tree
, em
);
5253 write_unlock(&tree
->lock
);
5257 free_extent_map(em
);
5258 /* once for the tree */
5259 free_extent_map(em
);
5263 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5265 struct extent_map
*em
;
5266 struct map_lookup
*map
;
5269 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5272 * We could return errors for these cases, but that could get
5273 * ugly and we'd probably do the same thing which is just not do
5274 * anything else and exit, so return 1 so the callers don't try
5275 * to use other copies.
5279 map
= em
->map_lookup
;
5280 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1_MASK
))
5281 ret
= map
->num_stripes
;
5282 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5283 ret
= map
->sub_stripes
;
5284 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5286 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5288 * There could be two corrupted data stripes, we need
5289 * to loop retry in order to rebuild the correct data.
5291 * Fail a stripe at a time on every retry except the
5292 * stripe under reconstruction.
5294 ret
= map
->num_stripes
;
5297 free_extent_map(em
);
5299 down_read(&fs_info
->dev_replace
.rwsem
);
5300 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
) &&
5301 fs_info
->dev_replace
.tgtdev
)
5303 up_read(&fs_info
->dev_replace
.rwsem
);
5308 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5311 struct extent_map
*em
;
5312 struct map_lookup
*map
;
5313 unsigned long len
= fs_info
->sectorsize
;
5315 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5317 if (!WARN_ON(IS_ERR(em
))) {
5318 map
= em
->map_lookup
;
5319 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5320 len
= map
->stripe_len
* nr_data_stripes(map
);
5321 free_extent_map(em
);
5326 int btrfs_is_parity_mirror(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5328 struct extent_map
*em
;
5329 struct map_lookup
*map
;
5332 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5334 if(!WARN_ON(IS_ERR(em
))) {
5335 map
= em
->map_lookup
;
5336 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5338 free_extent_map(em
);
5343 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5344 struct map_lookup
*map
, int first
,
5345 int dev_replace_is_ongoing
)
5349 int preferred_mirror
;
5351 struct btrfs_device
*srcdev
;
5354 (BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
)));
5356 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5357 num_stripes
= map
->sub_stripes
;
5359 num_stripes
= map
->num_stripes
;
5361 preferred_mirror
= first
+ current
->pid
% num_stripes
;
5363 if (dev_replace_is_ongoing
&&
5364 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5365 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5366 srcdev
= fs_info
->dev_replace
.srcdev
;
5371 * try to avoid the drive that is the source drive for a
5372 * dev-replace procedure, only choose it if no other non-missing
5373 * mirror is available
5375 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
5376 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
5377 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
5378 return preferred_mirror
;
5379 for (i
= first
; i
< first
+ num_stripes
; i
++) {
5380 if (map
->stripes
[i
].dev
->bdev
&&
5381 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
5386 /* we couldn't find one that doesn't fail. Just return something
5387 * and the io error handling code will clean up eventually
5389 return preferred_mirror
;
5392 static inline int parity_smaller(u64 a
, u64 b
)
5397 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5398 static void sort_parity_stripes(struct btrfs_bio
*bbio
, int num_stripes
)
5400 struct btrfs_bio_stripe s
;
5407 for (i
= 0; i
< num_stripes
- 1; i
++) {
5408 if (parity_smaller(bbio
->raid_map
[i
],
5409 bbio
->raid_map
[i
+1])) {
5410 s
= bbio
->stripes
[i
];
5411 l
= bbio
->raid_map
[i
];
5412 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
5413 bbio
->raid_map
[i
] = bbio
->raid_map
[i
+1];
5414 bbio
->stripes
[i
+1] = s
;
5415 bbio
->raid_map
[i
+1] = l
;
5423 static struct btrfs_bio
*alloc_btrfs_bio(int total_stripes
, int real_stripes
)
5425 struct btrfs_bio
*bbio
= kzalloc(
5426 /* the size of the btrfs_bio */
5427 sizeof(struct btrfs_bio
) +
5428 /* plus the variable array for the stripes */
5429 sizeof(struct btrfs_bio_stripe
) * (total_stripes
) +
5430 /* plus the variable array for the tgt dev */
5431 sizeof(int) * (real_stripes
) +
5433 * plus the raid_map, which includes both the tgt dev
5436 sizeof(u64
) * (total_stripes
),
5437 GFP_NOFS
|__GFP_NOFAIL
);
5439 atomic_set(&bbio
->error
, 0);
5440 refcount_set(&bbio
->refs
, 1);
5445 void btrfs_get_bbio(struct btrfs_bio
*bbio
)
5447 WARN_ON(!refcount_read(&bbio
->refs
));
5448 refcount_inc(&bbio
->refs
);
5451 void btrfs_put_bbio(struct btrfs_bio
*bbio
)
5455 if (refcount_dec_and_test(&bbio
->refs
))
5459 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5461 * Please note that, discard won't be sent to target device of device
5464 static int __btrfs_map_block_for_discard(struct btrfs_fs_info
*fs_info
,
5465 u64 logical
, u64
*length_ret
,
5466 struct btrfs_bio
**bbio_ret
)
5468 struct extent_map
*em
;
5469 struct map_lookup
*map
;
5470 struct btrfs_bio
*bbio
;
5471 u64 length
= *length_ret
;
5475 u64 stripe_end_offset
;
5482 u32 sub_stripes
= 0;
5483 u64 stripes_per_dev
= 0;
5484 u32 remaining_stripes
= 0;
5485 u32 last_stripe
= 0;
5489 /* discard always return a bbio */
5492 em
= btrfs_get_chunk_map(fs_info
, logical
, length
);
5496 map
= em
->map_lookup
;
5497 /* we don't discard raid56 yet */
5498 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5503 offset
= logical
- em
->start
;
5504 length
= min_t(u64
, em
->start
+ em
->len
- logical
, length
);
5505 *length_ret
= length
;
5507 stripe_len
= map
->stripe_len
;
5509 * stripe_nr counts the total number of stripes we have to stride
5510 * to get to this block
5512 stripe_nr
= div64_u64(offset
, stripe_len
);
5514 /* stripe_offset is the offset of this block in its stripe */
5515 stripe_offset
= offset
- stripe_nr
* stripe_len
;
5517 stripe_nr_end
= round_up(offset
+ length
, map
->stripe_len
);
5518 stripe_nr_end
= div64_u64(stripe_nr_end
, map
->stripe_len
);
5519 stripe_cnt
= stripe_nr_end
- stripe_nr
;
5520 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
5523 * after this, stripe_nr is the number of stripes on this
5524 * device we have to walk to find the data, and stripe_index is
5525 * the number of our device in the stripe array
5529 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5530 BTRFS_BLOCK_GROUP_RAID10
)) {
5531 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
5534 sub_stripes
= map
->sub_stripes
;
5536 factor
= map
->num_stripes
/ sub_stripes
;
5537 num_stripes
= min_t(u64
, map
->num_stripes
,
5538 sub_stripes
* stripe_cnt
);
5539 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
5540 stripe_index
*= sub_stripes
;
5541 stripes_per_dev
= div_u64_rem(stripe_cnt
, factor
,
5542 &remaining_stripes
);
5543 div_u64_rem(stripe_nr_end
- 1, factor
, &last_stripe
);
5544 last_stripe
*= sub_stripes
;
5545 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
5546 BTRFS_BLOCK_GROUP_DUP
)) {
5547 num_stripes
= map
->num_stripes
;
5549 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
5553 bbio
= alloc_btrfs_bio(num_stripes
, 0);
5559 for (i
= 0; i
< num_stripes
; i
++) {
5560 bbio
->stripes
[i
].physical
=
5561 map
->stripes
[stripe_index
].physical
+
5562 stripe_offset
+ stripe_nr
* map
->stripe_len
;
5563 bbio
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
5565 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5566 BTRFS_BLOCK_GROUP_RAID10
)) {
5567 bbio
->stripes
[i
].length
= stripes_per_dev
*
5570 if (i
/ sub_stripes
< remaining_stripes
)
5571 bbio
->stripes
[i
].length
+=
5575 * Special for the first stripe and
5578 * |-------|...|-------|
5582 if (i
< sub_stripes
)
5583 bbio
->stripes
[i
].length
-=
5586 if (stripe_index
>= last_stripe
&&
5587 stripe_index
<= (last_stripe
+
5589 bbio
->stripes
[i
].length
-=
5592 if (i
== sub_stripes
- 1)
5595 bbio
->stripes
[i
].length
= length
;
5599 if (stripe_index
== map
->num_stripes
) {
5606 bbio
->map_type
= map
->type
;
5607 bbio
->num_stripes
= num_stripes
;
5609 free_extent_map(em
);
5614 * In dev-replace case, for repair case (that's the only case where the mirror
5615 * is selected explicitly when calling btrfs_map_block), blocks left of the
5616 * left cursor can also be read from the target drive.
5618 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5620 * For READ, it also needs to be supported using the same mirror number.
5622 * If the requested block is not left of the left cursor, EIO is returned. This
5623 * can happen because btrfs_num_copies() returns one more in the dev-replace
5626 static int get_extra_mirror_from_replace(struct btrfs_fs_info
*fs_info
,
5627 u64 logical
, u64 length
,
5628 u64 srcdev_devid
, int *mirror_num
,
5631 struct btrfs_bio
*bbio
= NULL
;
5633 int index_srcdev
= 0;
5635 u64 physical_of_found
= 0;
5639 ret
= __btrfs_map_block(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
5640 logical
, &length
, &bbio
, 0, 0);
5642 ASSERT(bbio
== NULL
);
5646 num_stripes
= bbio
->num_stripes
;
5647 if (*mirror_num
> num_stripes
) {
5649 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5650 * that means that the requested area is not left of the left
5653 btrfs_put_bbio(bbio
);
5658 * process the rest of the function using the mirror_num of the source
5659 * drive. Therefore look it up first. At the end, patch the device
5660 * pointer to the one of the target drive.
5662 for (i
= 0; i
< num_stripes
; i
++) {
5663 if (bbio
->stripes
[i
].dev
->devid
!= srcdev_devid
)
5667 * In case of DUP, in order to keep it simple, only add the
5668 * mirror with the lowest physical address
5671 physical_of_found
<= bbio
->stripes
[i
].physical
)
5676 physical_of_found
= bbio
->stripes
[i
].physical
;
5679 btrfs_put_bbio(bbio
);
5685 *mirror_num
= index_srcdev
+ 1;
5686 *physical
= physical_of_found
;
5690 static void handle_ops_on_dev_replace(enum btrfs_map_op op
,
5691 struct btrfs_bio
**bbio_ret
,
5692 struct btrfs_dev_replace
*dev_replace
,
5693 int *num_stripes_ret
, int *max_errors_ret
)
5695 struct btrfs_bio
*bbio
= *bbio_ret
;
5696 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
5697 int tgtdev_indexes
= 0;
5698 int num_stripes
= *num_stripes_ret
;
5699 int max_errors
= *max_errors_ret
;
5702 if (op
== BTRFS_MAP_WRITE
) {
5703 int index_where_to_add
;
5706 * duplicate the write operations while the dev replace
5707 * procedure is running. Since the copying of the old disk to
5708 * the new disk takes place at run time while the filesystem is
5709 * mounted writable, the regular write operations to the old
5710 * disk have to be duplicated to go to the new disk as well.
5712 * Note that device->missing is handled by the caller, and that
5713 * the write to the old disk is already set up in the stripes
5716 index_where_to_add
= num_stripes
;
5717 for (i
= 0; i
< num_stripes
; i
++) {
5718 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5719 /* write to new disk, too */
5720 struct btrfs_bio_stripe
*new =
5721 bbio
->stripes
+ index_where_to_add
;
5722 struct btrfs_bio_stripe
*old
=
5725 new->physical
= old
->physical
;
5726 new->length
= old
->length
;
5727 new->dev
= dev_replace
->tgtdev
;
5728 bbio
->tgtdev_map
[i
] = index_where_to_add
;
5729 index_where_to_add
++;
5734 num_stripes
= index_where_to_add
;
5735 } else if (op
== BTRFS_MAP_GET_READ_MIRRORS
) {
5736 int index_srcdev
= 0;
5738 u64 physical_of_found
= 0;
5741 * During the dev-replace procedure, the target drive can also
5742 * be used to read data in case it is needed to repair a corrupt
5743 * block elsewhere. This is possible if the requested area is
5744 * left of the left cursor. In this area, the target drive is a
5745 * full copy of the source drive.
5747 for (i
= 0; i
< num_stripes
; i
++) {
5748 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5750 * In case of DUP, in order to keep it simple,
5751 * only add the mirror with the lowest physical
5755 physical_of_found
<=
5756 bbio
->stripes
[i
].physical
)
5760 physical_of_found
= bbio
->stripes
[i
].physical
;
5764 struct btrfs_bio_stripe
*tgtdev_stripe
=
5765 bbio
->stripes
+ num_stripes
;
5767 tgtdev_stripe
->physical
= physical_of_found
;
5768 tgtdev_stripe
->length
=
5769 bbio
->stripes
[index_srcdev
].length
;
5770 tgtdev_stripe
->dev
= dev_replace
->tgtdev
;
5771 bbio
->tgtdev_map
[index_srcdev
] = num_stripes
;
5778 *num_stripes_ret
= num_stripes
;
5779 *max_errors_ret
= max_errors
;
5780 bbio
->num_tgtdevs
= tgtdev_indexes
;
5784 static bool need_full_stripe(enum btrfs_map_op op
)
5786 return (op
== BTRFS_MAP_WRITE
|| op
== BTRFS_MAP_GET_READ_MIRRORS
);
5790 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5791 * tuple. This information is used to calculate how big a
5792 * particular bio can get before it straddles a stripe.
5794 * @fs_info - the filesystem
5795 * @logical - address that we want to figure out the geometry of
5796 * @len - the length of IO we are going to perform, starting at @logical
5797 * @op - type of operation - write or read
5798 * @io_geom - pointer used to return values
5800 * Returns < 0 in case a chunk for the given logical address cannot be found,
5801 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5803 int btrfs_get_io_geometry(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
5804 u64 logical
, u64 len
, struct btrfs_io_geometry
*io_geom
)
5806 struct extent_map
*em
;
5807 struct map_lookup
*map
;
5812 u64 raid56_full_stripe_start
= (u64
)-1;
5816 ASSERT(op
!= BTRFS_MAP_DISCARD
);
5818 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5822 map
= em
->map_lookup
;
5823 /* Offset of this logical address in the chunk */
5824 offset
= logical
- em
->start
;
5825 /* Len of a stripe in a chunk */
5826 stripe_len
= map
->stripe_len
;
5827 /* Stripe wher this block falls in */
5828 stripe_nr
= div64_u64(offset
, stripe_len
);
5829 /* Offset of stripe in the chunk */
5830 stripe_offset
= stripe_nr
* stripe_len
;
5831 if (offset
< stripe_offset
) {
5833 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5834 stripe_offset
, offset
, em
->start
, logical
, stripe_len
);
5839 /* stripe_offset is the offset of this block in its stripe */
5840 stripe_offset
= offset
- stripe_offset
;
5841 data_stripes
= nr_data_stripes(map
);
5843 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
5844 u64 max_len
= stripe_len
- stripe_offset
;
5847 * In case of raid56, we need to know the stripe aligned start
5849 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5850 unsigned long full_stripe_len
= stripe_len
* data_stripes
;
5851 raid56_full_stripe_start
= offset
;
5854 * Allow a write of a full stripe, but make sure we
5855 * don't allow straddling of stripes
5857 raid56_full_stripe_start
= div64_u64(raid56_full_stripe_start
,
5859 raid56_full_stripe_start
*= full_stripe_len
;
5862 * For writes to RAID[56], allow a full stripeset across
5863 * all disks. For other RAID types and for RAID[56]
5864 * reads, just allow a single stripe (on a single disk).
5866 if (op
== BTRFS_MAP_WRITE
) {
5867 max_len
= stripe_len
* data_stripes
-
5868 (offset
- raid56_full_stripe_start
);
5871 len
= min_t(u64
, em
->len
- offset
, max_len
);
5873 len
= em
->len
- offset
;
5877 io_geom
->offset
= offset
;
5878 io_geom
->stripe_len
= stripe_len
;
5879 io_geom
->stripe_nr
= stripe_nr
;
5880 io_geom
->stripe_offset
= stripe_offset
;
5881 io_geom
->raid56_stripe_offset
= raid56_full_stripe_start
;
5885 free_extent_map(em
);
5889 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
5890 enum btrfs_map_op op
,
5891 u64 logical
, u64
*length
,
5892 struct btrfs_bio
**bbio_ret
,
5893 int mirror_num
, int need_raid_map
)
5895 struct extent_map
*em
;
5896 struct map_lookup
*map
;
5906 int tgtdev_indexes
= 0;
5907 struct btrfs_bio
*bbio
= NULL
;
5908 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
5909 int dev_replace_is_ongoing
= 0;
5910 int num_alloc_stripes
;
5911 int patch_the_first_stripe_for_dev_replace
= 0;
5912 u64 physical_to_patch_in_first_stripe
= 0;
5913 u64 raid56_full_stripe_start
= (u64
)-1;
5914 struct btrfs_io_geometry geom
;
5918 if (op
== BTRFS_MAP_DISCARD
)
5919 return __btrfs_map_block_for_discard(fs_info
, logical
,
5922 ret
= btrfs_get_io_geometry(fs_info
, op
, logical
, *length
, &geom
);
5926 em
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
5927 ASSERT(!IS_ERR(em
));
5928 map
= em
->map_lookup
;
5931 stripe_len
= geom
.stripe_len
;
5932 stripe_nr
= geom
.stripe_nr
;
5933 stripe_offset
= geom
.stripe_offset
;
5934 raid56_full_stripe_start
= geom
.raid56_stripe_offset
;
5935 data_stripes
= nr_data_stripes(map
);
5937 down_read(&dev_replace
->rwsem
);
5938 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
5940 * Hold the semaphore for read during the whole operation, write is
5941 * requested at commit time but must wait.
5943 if (!dev_replace_is_ongoing
)
5944 up_read(&dev_replace
->rwsem
);
5946 if (dev_replace_is_ongoing
&& mirror_num
== map
->num_stripes
+ 1 &&
5947 !need_full_stripe(op
) && dev_replace
->tgtdev
!= NULL
) {
5948 ret
= get_extra_mirror_from_replace(fs_info
, logical
, *length
,
5949 dev_replace
->srcdev
->devid
,
5951 &physical_to_patch_in_first_stripe
);
5955 patch_the_first_stripe_for_dev_replace
= 1;
5956 } else if (mirror_num
> map
->num_stripes
) {
5962 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
5963 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
5965 if (!need_full_stripe(op
))
5967 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1_MASK
) {
5968 if (need_full_stripe(op
))
5969 num_stripes
= map
->num_stripes
;
5970 else if (mirror_num
)
5971 stripe_index
= mirror_num
- 1;
5973 stripe_index
= find_live_mirror(fs_info
, map
, 0,
5974 dev_replace_is_ongoing
);
5975 mirror_num
= stripe_index
+ 1;
5978 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
5979 if (need_full_stripe(op
)) {
5980 num_stripes
= map
->num_stripes
;
5981 } else if (mirror_num
) {
5982 stripe_index
= mirror_num
- 1;
5987 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
5988 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
5990 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
5991 stripe_index
*= map
->sub_stripes
;
5993 if (need_full_stripe(op
))
5994 num_stripes
= map
->sub_stripes
;
5995 else if (mirror_num
)
5996 stripe_index
+= mirror_num
- 1;
5998 int old_stripe_index
= stripe_index
;
5999 stripe_index
= find_live_mirror(fs_info
, map
,
6001 dev_replace_is_ongoing
);
6002 mirror_num
= stripe_index
- old_stripe_index
+ 1;
6005 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6006 if (need_raid_map
&& (need_full_stripe(op
) || mirror_num
> 1)) {
6007 /* push stripe_nr back to the start of the full stripe */
6008 stripe_nr
= div64_u64(raid56_full_stripe_start
,
6009 stripe_len
* data_stripes
);
6011 /* RAID[56] write or recovery. Return all stripes */
6012 num_stripes
= map
->num_stripes
;
6013 max_errors
= nr_parity_stripes(map
);
6015 *length
= map
->stripe_len
;
6020 * Mirror #0 or #1 means the original data block.
6021 * Mirror #2 is RAID5 parity block.
6022 * Mirror #3 is RAID6 Q block.
6024 stripe_nr
= div_u64_rem(stripe_nr
,
6025 data_stripes
, &stripe_index
);
6027 stripe_index
= data_stripes
+ mirror_num
- 2;
6029 /* We distribute the parity blocks across stripes */
6030 div_u64_rem(stripe_nr
+ stripe_index
, map
->num_stripes
,
6032 if (!need_full_stripe(op
) && mirror_num
<= 1)
6037 * after this, stripe_nr is the number of stripes on this
6038 * device we have to walk to find the data, and stripe_index is
6039 * the number of our device in the stripe array
6041 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6043 mirror_num
= stripe_index
+ 1;
6045 if (stripe_index
>= map
->num_stripes
) {
6047 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6048 stripe_index
, map
->num_stripes
);
6053 num_alloc_stripes
= num_stripes
;
6054 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
) {
6055 if (op
== BTRFS_MAP_WRITE
)
6056 num_alloc_stripes
<<= 1;
6057 if (op
== BTRFS_MAP_GET_READ_MIRRORS
)
6058 num_alloc_stripes
++;
6059 tgtdev_indexes
= num_stripes
;
6062 bbio
= alloc_btrfs_bio(num_alloc_stripes
, tgtdev_indexes
);
6067 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
)
6068 bbio
->tgtdev_map
= (int *)(bbio
->stripes
+ num_alloc_stripes
);
6070 /* build raid_map */
6071 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&& need_raid_map
&&
6072 (need_full_stripe(op
) || mirror_num
> 1)) {
6076 bbio
->raid_map
= (u64
*)((void *)bbio
->stripes
+
6077 sizeof(struct btrfs_bio_stripe
) *
6079 sizeof(int) * tgtdev_indexes
);
6081 /* Work out the disk rotation on this stripe-set */
6082 div_u64_rem(stripe_nr
, num_stripes
, &rot
);
6084 /* Fill in the logical address of each stripe */
6085 tmp
= stripe_nr
* data_stripes
;
6086 for (i
= 0; i
< data_stripes
; i
++)
6087 bbio
->raid_map
[(i
+rot
) % num_stripes
] =
6088 em
->start
+ (tmp
+ i
) * map
->stripe_len
;
6090 bbio
->raid_map
[(i
+rot
) % map
->num_stripes
] = RAID5_P_STRIPE
;
6091 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
6092 bbio
->raid_map
[(i
+rot
+1) % num_stripes
] =
6097 for (i
= 0; i
< num_stripes
; i
++) {
6098 bbio
->stripes
[i
].physical
=
6099 map
->stripes
[stripe_index
].physical
+
6101 stripe_nr
* map
->stripe_len
;
6102 bbio
->stripes
[i
].dev
=
6103 map
->stripes
[stripe_index
].dev
;
6107 if (need_full_stripe(op
))
6108 max_errors
= btrfs_chunk_max_errors(map
);
6111 sort_parity_stripes(bbio
, num_stripes
);
6113 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6114 need_full_stripe(op
)) {
6115 handle_ops_on_dev_replace(op
, &bbio
, dev_replace
, &num_stripes
,
6120 bbio
->map_type
= map
->type
;
6121 bbio
->num_stripes
= num_stripes
;
6122 bbio
->max_errors
= max_errors
;
6123 bbio
->mirror_num
= mirror_num
;
6126 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6127 * mirror_num == num_stripes + 1 && dev_replace target drive is
6128 * available as a mirror
6130 if (patch_the_first_stripe_for_dev_replace
&& num_stripes
> 0) {
6131 WARN_ON(num_stripes
> 1);
6132 bbio
->stripes
[0].dev
= dev_replace
->tgtdev
;
6133 bbio
->stripes
[0].physical
= physical_to_patch_in_first_stripe
;
6134 bbio
->mirror_num
= map
->num_stripes
+ 1;
6137 if (dev_replace_is_ongoing
) {
6138 lockdep_assert_held(&dev_replace
->rwsem
);
6139 /* Unlock and let waiting writers proceed */
6140 up_read(&dev_replace
->rwsem
);
6142 free_extent_map(em
);
6146 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6147 u64 logical
, u64
*length
,
6148 struct btrfs_bio
**bbio_ret
, int mirror_num
)
6150 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
,
6154 /* For Scrub/replace */
6155 int btrfs_map_sblock(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6156 u64 logical
, u64
*length
,
6157 struct btrfs_bio
**bbio_ret
)
6159 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
, 0, 1);
6162 static inline void btrfs_end_bbio(struct btrfs_bio
*bbio
, struct bio
*bio
)
6164 bio
->bi_private
= bbio
->private;
6165 bio
->bi_end_io
= bbio
->end_io
;
6168 btrfs_put_bbio(bbio
);
6171 static void btrfs_end_bio(struct bio
*bio
)
6173 struct btrfs_bio
*bbio
= bio
->bi_private
;
6174 int is_orig_bio
= 0;
6176 if (bio
->bi_status
) {
6177 atomic_inc(&bbio
->error
);
6178 if (bio
->bi_status
== BLK_STS_IOERR
||
6179 bio
->bi_status
== BLK_STS_TARGET
) {
6180 unsigned int stripe_index
=
6181 btrfs_io_bio(bio
)->stripe_index
;
6182 struct btrfs_device
*dev
;
6184 BUG_ON(stripe_index
>= bbio
->num_stripes
);
6185 dev
= bbio
->stripes
[stripe_index
].dev
;
6187 if (bio_op(bio
) == REQ_OP_WRITE
)
6188 btrfs_dev_stat_inc_and_print(dev
,
6189 BTRFS_DEV_STAT_WRITE_ERRS
);
6190 else if (!(bio
->bi_opf
& REQ_RAHEAD
))
6191 btrfs_dev_stat_inc_and_print(dev
,
6192 BTRFS_DEV_STAT_READ_ERRS
);
6193 if (bio
->bi_opf
& REQ_PREFLUSH
)
6194 btrfs_dev_stat_inc_and_print(dev
,
6195 BTRFS_DEV_STAT_FLUSH_ERRS
);
6200 if (bio
== bbio
->orig_bio
)
6203 btrfs_bio_counter_dec(bbio
->fs_info
);
6205 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6208 bio
= bbio
->orig_bio
;
6211 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6212 /* only send an error to the higher layers if it is
6213 * beyond the tolerance of the btrfs bio
6215 if (atomic_read(&bbio
->error
) > bbio
->max_errors
) {
6216 bio
->bi_status
= BLK_STS_IOERR
;
6219 * this bio is actually up to date, we didn't
6220 * go over the max number of errors
6222 bio
->bi_status
= BLK_STS_OK
;
6225 btrfs_end_bbio(bbio
, bio
);
6226 } else if (!is_orig_bio
) {
6231 static void submit_stripe_bio(struct btrfs_bio
*bbio
, struct bio
*bio
,
6232 u64 physical
, int dev_nr
)
6234 struct btrfs_device
*dev
= bbio
->stripes
[dev_nr
].dev
;
6235 struct btrfs_fs_info
*fs_info
= bbio
->fs_info
;
6237 bio
->bi_private
= bbio
;
6238 btrfs_io_bio(bio
)->stripe_index
= dev_nr
;
6239 bio
->bi_end_io
= btrfs_end_bio
;
6240 bio
->bi_iter
.bi_sector
= physical
>> 9;
6241 btrfs_debug_in_rcu(fs_info
,
6242 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6243 bio_op(bio
), bio
->bi_opf
, (u64
)bio
->bi_iter
.bi_sector
,
6244 (u_long
)dev
->bdev
->bd_dev
, rcu_str_deref(dev
->name
), dev
->devid
,
6245 bio
->bi_iter
.bi_size
);
6246 bio_set_dev(bio
, dev
->bdev
);
6248 btrfs_bio_counter_inc_noblocked(fs_info
);
6250 btrfsic_submit_bio(bio
);
6253 static void bbio_error(struct btrfs_bio
*bbio
, struct bio
*bio
, u64 logical
)
6255 atomic_inc(&bbio
->error
);
6256 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6257 /* Should be the original bio. */
6258 WARN_ON(bio
!= bbio
->orig_bio
);
6260 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6261 bio
->bi_iter
.bi_sector
= logical
>> 9;
6262 if (atomic_read(&bbio
->error
) > bbio
->max_errors
)
6263 bio
->bi_status
= BLK_STS_IOERR
;
6265 bio
->bi_status
= BLK_STS_OK
;
6266 btrfs_end_bbio(bbio
, bio
);
6270 blk_status_t
btrfs_map_bio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
6273 struct btrfs_device
*dev
;
6274 struct bio
*first_bio
= bio
;
6275 u64 logical
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
6281 struct btrfs_bio
*bbio
= NULL
;
6283 length
= bio
->bi_iter
.bi_size
;
6284 map_length
= length
;
6286 btrfs_bio_counter_inc_blocked(fs_info
);
6287 ret
= __btrfs_map_block(fs_info
, btrfs_op(bio
), logical
,
6288 &map_length
, &bbio
, mirror_num
, 1);
6290 btrfs_bio_counter_dec(fs_info
);
6291 return errno_to_blk_status(ret
);
6294 total_devs
= bbio
->num_stripes
;
6295 bbio
->orig_bio
= first_bio
;
6296 bbio
->private = first_bio
->bi_private
;
6297 bbio
->end_io
= first_bio
->bi_end_io
;
6298 bbio
->fs_info
= fs_info
;
6299 atomic_set(&bbio
->stripes_pending
, bbio
->num_stripes
);
6301 if ((bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) &&
6302 ((bio_op(bio
) == REQ_OP_WRITE
) || (mirror_num
> 1))) {
6303 /* In this case, map_length has been set to the length of
6304 a single stripe; not the whole write */
6305 if (bio_op(bio
) == REQ_OP_WRITE
) {
6306 ret
= raid56_parity_write(fs_info
, bio
, bbio
,
6309 ret
= raid56_parity_recover(fs_info
, bio
, bbio
,
6310 map_length
, mirror_num
, 1);
6313 btrfs_bio_counter_dec(fs_info
);
6314 return errno_to_blk_status(ret
);
6317 if (map_length
< length
) {
6319 "mapping failed logical %llu bio len %llu len %llu",
6320 logical
, length
, map_length
);
6324 for (dev_nr
= 0; dev_nr
< total_devs
; dev_nr
++) {
6325 dev
= bbio
->stripes
[dev_nr
].dev
;
6326 if (!dev
|| !dev
->bdev
|| test_bit(BTRFS_DEV_STATE_MISSING
,
6328 (bio_op(first_bio
) == REQ_OP_WRITE
&&
6329 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))) {
6330 bbio_error(bbio
, first_bio
, logical
);
6334 if (dev_nr
< total_devs
- 1)
6335 bio
= btrfs_bio_clone(first_bio
);
6339 submit_stripe_bio(bbio
, bio
, bbio
->stripes
[dev_nr
].physical
,
6342 btrfs_bio_counter_dec(fs_info
);
6347 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6350 * If devid and uuid are both specified, the match must be exact, otherwise
6351 * only devid is used.
6353 * If @seed is true, traverse through the seed devices.
6355 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_devices
*fs_devices
,
6356 u64 devid
, u8
*uuid
, u8
*fsid
,
6359 struct btrfs_device
*device
;
6361 while (fs_devices
) {
6363 !memcmp(fs_devices
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
)) {
6364 list_for_each_entry(device
, &fs_devices
->devices
,
6366 if (device
->devid
== devid
&&
6367 (!uuid
|| memcmp(device
->uuid
, uuid
,
6368 BTRFS_UUID_SIZE
) == 0))
6373 fs_devices
= fs_devices
->seed
;
6380 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6381 u64 devid
, u8
*dev_uuid
)
6383 struct btrfs_device
*device
;
6385 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
);
6389 list_add(&device
->dev_list
, &fs_devices
->devices
);
6390 device
->fs_devices
= fs_devices
;
6391 fs_devices
->num_devices
++;
6393 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6394 fs_devices
->missing_devices
++;
6400 * btrfs_alloc_device - allocate struct btrfs_device
6401 * @fs_info: used only for generating a new devid, can be NULL if
6402 * devid is provided (i.e. @devid != NULL).
6403 * @devid: a pointer to devid for this device. If NULL a new devid
6405 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6408 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6409 * on error. Returned struct is not linked onto any lists and must be
6410 * destroyed with btrfs_free_device.
6412 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6416 struct btrfs_device
*dev
;
6419 if (WARN_ON(!devid
&& !fs_info
))
6420 return ERR_PTR(-EINVAL
);
6422 dev
= __alloc_device();
6431 ret
= find_next_devid(fs_info
, &tmp
);
6433 btrfs_free_device(dev
);
6434 return ERR_PTR(ret
);
6440 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6442 generate_random_uuid(dev
->uuid
);
6447 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6448 u64 devid
, u8
*uuid
, bool error
)
6451 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6454 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6458 static u64
calc_stripe_length(u64 type
, u64 chunk_len
, int num_stripes
)
6460 int index
= btrfs_bg_flags_to_raid_index(type
);
6461 int ncopies
= btrfs_raid_array
[index
].ncopies
;
6462 const int nparity
= btrfs_raid_array
[index
].nparity
;
6466 data_stripes
= num_stripes
- nparity
;
6468 data_stripes
= num_stripes
/ ncopies
;
6470 return div_u64(chunk_len
, data_stripes
);
6473 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6474 struct btrfs_chunk
*chunk
)
6476 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6477 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
6478 struct map_lookup
*map
;
6479 struct extent_map
*em
;
6483 u8 uuid
[BTRFS_UUID_SIZE
];
6488 logical
= key
->offset
;
6489 length
= btrfs_chunk_length(leaf
, chunk
);
6490 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6493 * Only need to verify chunk item if we're reading from sys chunk array,
6494 * as chunk item in tree block is already verified by tree-checker.
6496 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
6497 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
6502 read_lock(&map_tree
->lock
);
6503 em
= lookup_extent_mapping(map_tree
, logical
, 1);
6504 read_unlock(&map_tree
->lock
);
6506 /* already mapped? */
6507 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
6508 free_extent_map(em
);
6511 free_extent_map(em
);
6514 em
= alloc_extent_map();
6517 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
6519 free_extent_map(em
);
6523 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
6524 em
->map_lookup
= map
;
6525 em
->start
= logical
;
6528 em
->block_start
= 0;
6529 em
->block_len
= em
->len
;
6531 map
->num_stripes
= num_stripes
;
6532 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
6533 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
6534 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
6535 map
->type
= btrfs_chunk_type(leaf
, chunk
);
6536 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
6537 map
->verified_stripes
= 0;
6538 em
->orig_block_len
= calc_stripe_length(map
->type
, em
->len
,
6540 for (i
= 0; i
< num_stripes
; i
++) {
6541 map
->stripes
[i
].physical
=
6542 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
6543 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
6544 read_extent_buffer(leaf
, uuid
, (unsigned long)
6545 btrfs_stripe_dev_uuid_nr(chunk
, i
),
6547 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
,
6548 devid
, uuid
, NULL
, true);
6549 if (!map
->stripes
[i
].dev
&&
6550 !btrfs_test_opt(fs_info
, DEGRADED
)) {
6551 free_extent_map(em
);
6552 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6555 if (!map
->stripes
[i
].dev
) {
6556 map
->stripes
[i
].dev
=
6557 add_missing_dev(fs_info
->fs_devices
, devid
,
6559 if (IS_ERR(map
->stripes
[i
].dev
)) {
6560 free_extent_map(em
);
6562 "failed to init missing dev %llu: %ld",
6563 devid
, PTR_ERR(map
->stripes
[i
].dev
));
6564 return PTR_ERR(map
->stripes
[i
].dev
);
6566 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6568 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
6569 &(map
->stripes
[i
].dev
->dev_state
));
6573 write_lock(&map_tree
->lock
);
6574 ret
= add_extent_mapping(map_tree
, em
, 0);
6575 write_unlock(&map_tree
->lock
);
6578 "failed to add chunk map, start=%llu len=%llu: %d",
6579 em
->start
, em
->len
, ret
);
6581 free_extent_map(em
);
6586 static void fill_device_from_item(struct extent_buffer
*leaf
,
6587 struct btrfs_dev_item
*dev_item
,
6588 struct btrfs_device
*device
)
6592 device
->devid
= btrfs_device_id(leaf
, dev_item
);
6593 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
6594 device
->total_bytes
= device
->disk_total_bytes
;
6595 device
->commit_total_bytes
= device
->disk_total_bytes
;
6596 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
6597 device
->commit_bytes_used
= device
->bytes_used
;
6598 device
->type
= btrfs_device_type(leaf
, dev_item
);
6599 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
6600 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
6601 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
6602 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
6603 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
6605 ptr
= btrfs_device_uuid(dev_item
);
6606 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
6609 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
6612 struct btrfs_fs_devices
*fs_devices
;
6615 lockdep_assert_held(&uuid_mutex
);
6618 fs_devices
= fs_info
->fs_devices
->seed
;
6619 while (fs_devices
) {
6620 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
6623 fs_devices
= fs_devices
->seed
;
6626 fs_devices
= find_fsid(fsid
, NULL
);
6628 if (!btrfs_test_opt(fs_info
, DEGRADED
))
6629 return ERR_PTR(-ENOENT
);
6631 fs_devices
= alloc_fs_devices(fsid
, NULL
);
6632 if (IS_ERR(fs_devices
))
6635 fs_devices
->seeding
= true;
6636 fs_devices
->opened
= 1;
6640 fs_devices
= clone_fs_devices(fs_devices
);
6641 if (IS_ERR(fs_devices
))
6644 ret
= open_fs_devices(fs_devices
, FMODE_READ
, fs_info
->bdev_holder
);
6646 free_fs_devices(fs_devices
);
6647 fs_devices
= ERR_PTR(ret
);
6651 if (!fs_devices
->seeding
) {
6652 close_fs_devices(fs_devices
);
6653 free_fs_devices(fs_devices
);
6654 fs_devices
= ERR_PTR(-EINVAL
);
6658 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
6659 fs_info
->fs_devices
->seed
= fs_devices
;
6664 static int read_one_dev(struct extent_buffer
*leaf
,
6665 struct btrfs_dev_item
*dev_item
)
6667 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6668 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
6669 struct btrfs_device
*device
;
6672 u8 fs_uuid
[BTRFS_FSID_SIZE
];
6673 u8 dev_uuid
[BTRFS_UUID_SIZE
];
6675 devid
= btrfs_device_id(leaf
, dev_item
);
6676 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
6678 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
6681 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
6682 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
6683 if (IS_ERR(fs_devices
))
6684 return PTR_ERR(fs_devices
);
6687 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
6690 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6691 btrfs_report_missing_device(fs_info
, devid
,
6696 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
6697 if (IS_ERR(device
)) {
6699 "failed to add missing dev %llu: %ld",
6700 devid
, PTR_ERR(device
));
6701 return PTR_ERR(device
);
6703 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
6705 if (!device
->bdev
) {
6706 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6707 btrfs_report_missing_device(fs_info
,
6708 devid
, dev_uuid
, true);
6711 btrfs_report_missing_device(fs_info
, devid
,
6715 if (!device
->bdev
&&
6716 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
6718 * this happens when a device that was properly setup
6719 * in the device info lists suddenly goes bad.
6720 * device->bdev is NULL, and so we have to set
6721 * device->missing to one here
6723 device
->fs_devices
->missing_devices
++;
6724 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6727 /* Move the device to its own fs_devices */
6728 if (device
->fs_devices
!= fs_devices
) {
6729 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
6730 &device
->dev_state
));
6732 list_move(&device
->dev_list
, &fs_devices
->devices
);
6733 device
->fs_devices
->num_devices
--;
6734 fs_devices
->num_devices
++;
6736 device
->fs_devices
->missing_devices
--;
6737 fs_devices
->missing_devices
++;
6739 device
->fs_devices
= fs_devices
;
6743 if (device
->fs_devices
!= fs_info
->fs_devices
) {
6744 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
6745 if (device
->generation
!=
6746 btrfs_device_generation(leaf
, dev_item
))
6750 fill_device_from_item(leaf
, dev_item
, device
);
6751 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
6752 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
6753 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
6754 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
6755 atomic64_add(device
->total_bytes
- device
->bytes_used
,
6756 &fs_info
->free_chunk_space
);
6762 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
6764 struct btrfs_root
*root
= fs_info
->tree_root
;
6765 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
6766 struct extent_buffer
*sb
;
6767 struct btrfs_disk_key
*disk_key
;
6768 struct btrfs_chunk
*chunk
;
6770 unsigned long sb_array_offset
;
6777 struct btrfs_key key
;
6779 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
6781 * This will create extent buffer of nodesize, superblock size is
6782 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6783 * overallocate but we can keep it as-is, only the first page is used.
6785 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
6788 set_extent_buffer_uptodate(sb
);
6789 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, sb
, 0);
6791 * The sb extent buffer is artificial and just used to read the system array.
6792 * set_extent_buffer_uptodate() call does not properly mark all it's
6793 * pages up-to-date when the page is larger: extent does not cover the
6794 * whole page and consequently check_page_uptodate does not find all
6795 * the page's extents up-to-date (the hole beyond sb),
6796 * write_extent_buffer then triggers a WARN_ON.
6798 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6799 * but sb spans only this function. Add an explicit SetPageUptodate call
6800 * to silence the warning eg. on PowerPC 64.
6802 if (PAGE_SIZE
> BTRFS_SUPER_INFO_SIZE
)
6803 SetPageUptodate(sb
->pages
[0]);
6805 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
6806 array_size
= btrfs_super_sys_array_size(super_copy
);
6808 array_ptr
= super_copy
->sys_chunk_array
;
6809 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
6812 while (cur_offset
< array_size
) {
6813 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
6814 len
= sizeof(*disk_key
);
6815 if (cur_offset
+ len
> array_size
)
6816 goto out_short_read
;
6818 btrfs_disk_key_to_cpu(&key
, disk_key
);
6821 sb_array_offset
+= len
;
6824 if (key
.type
!= BTRFS_CHUNK_ITEM_KEY
) {
6826 "unexpected item type %u in sys_array at offset %u",
6827 (u32
)key
.type
, cur_offset
);
6832 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
6834 * At least one btrfs_chunk with one stripe must be present,
6835 * exact stripe count check comes afterwards
6837 len
= btrfs_chunk_item_size(1);
6838 if (cur_offset
+ len
> array_size
)
6839 goto out_short_read
;
6841 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
6844 "invalid number of stripes %u in sys_array at offset %u",
6845 num_stripes
, cur_offset
);
6850 type
= btrfs_chunk_type(sb
, chunk
);
6851 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
6853 "invalid chunk type %llu in sys_array at offset %u",
6859 len
= btrfs_chunk_item_size(num_stripes
);
6860 if (cur_offset
+ len
> array_size
)
6861 goto out_short_read
;
6863 ret
= read_one_chunk(&key
, sb
, chunk
);
6868 sb_array_offset
+= len
;
6871 clear_extent_buffer_uptodate(sb
);
6872 free_extent_buffer_stale(sb
);
6876 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
6878 clear_extent_buffer_uptodate(sb
);
6879 free_extent_buffer_stale(sb
);
6884 * Check if all chunks in the fs are OK for read-write degraded mount
6886 * If the @failing_dev is specified, it's accounted as missing.
6888 * Return true if all chunks meet the minimal RW mount requirements.
6889 * Return false if any chunk doesn't meet the minimal RW mount requirements.
6891 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
6892 struct btrfs_device
*failing_dev
)
6894 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
6895 struct extent_map
*em
;
6899 read_lock(&map_tree
->lock
);
6900 em
= lookup_extent_mapping(map_tree
, 0, (u64
)-1);
6901 read_unlock(&map_tree
->lock
);
6902 /* No chunk at all? Return false anyway */
6908 struct map_lookup
*map
;
6913 map
= em
->map_lookup
;
6915 btrfs_get_num_tolerated_disk_barrier_failures(
6917 for (i
= 0; i
< map
->num_stripes
; i
++) {
6918 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
6920 if (!dev
|| !dev
->bdev
||
6921 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
6922 dev
->last_flush_error
)
6924 else if (failing_dev
&& failing_dev
== dev
)
6927 if (missing
> max_tolerated
) {
6930 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
6931 em
->start
, missing
, max_tolerated
);
6932 free_extent_map(em
);
6936 next_start
= extent_map_end(em
);
6937 free_extent_map(em
);
6939 read_lock(&map_tree
->lock
);
6940 em
= lookup_extent_mapping(map_tree
, next_start
,
6941 (u64
)(-1) - next_start
);
6942 read_unlock(&map_tree
->lock
);
6948 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
6950 struct btrfs_root
*root
= fs_info
->chunk_root
;
6951 struct btrfs_path
*path
;
6952 struct extent_buffer
*leaf
;
6953 struct btrfs_key key
;
6954 struct btrfs_key found_key
;
6959 path
= btrfs_alloc_path();
6964 * uuid_mutex is needed only if we are mounting a sprout FS
6965 * otherwise we don't need it.
6967 mutex_lock(&uuid_mutex
);
6968 mutex_lock(&fs_info
->chunk_mutex
);
6971 * Read all device items, and then all the chunk items. All
6972 * device items are found before any chunk item (their object id
6973 * is smaller than the lowest possible object id for a chunk
6974 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6976 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
6979 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
6983 leaf
= path
->nodes
[0];
6984 slot
= path
->slots
[0];
6985 if (slot
>= btrfs_header_nritems(leaf
)) {
6986 ret
= btrfs_next_leaf(root
, path
);
6993 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
6994 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
6995 struct btrfs_dev_item
*dev_item
;
6996 dev_item
= btrfs_item_ptr(leaf
, slot
,
6997 struct btrfs_dev_item
);
6998 ret
= read_one_dev(leaf
, dev_item
);
7002 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7003 struct btrfs_chunk
*chunk
;
7004 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7005 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7013 * After loading chunk tree, we've got all device information,
7014 * do another round of validation checks.
7016 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7018 "super_num_devices %llu mismatch with num_devices %llu found here",
7019 btrfs_super_num_devices(fs_info
->super_copy
),
7024 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7025 fs_info
->fs_devices
->total_rw_bytes
) {
7027 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7028 btrfs_super_total_bytes(fs_info
->super_copy
),
7029 fs_info
->fs_devices
->total_rw_bytes
);
7035 mutex_unlock(&fs_info
->chunk_mutex
);
7036 mutex_unlock(&uuid_mutex
);
7038 btrfs_free_path(path
);
7042 void btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7044 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7045 struct btrfs_device
*device
;
7047 while (fs_devices
) {
7048 mutex_lock(&fs_devices
->device_list_mutex
);
7049 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7050 device
->fs_info
= fs_info
;
7051 mutex_unlock(&fs_devices
->device_list_mutex
);
7053 fs_devices
= fs_devices
->seed
;
7057 static u64
btrfs_dev_stats_value(const struct extent_buffer
*eb
,
7058 const struct btrfs_dev_stats_item
*ptr
,
7063 read_extent_buffer(eb
, &val
,
7064 offsetof(struct btrfs_dev_stats_item
, values
) +
7065 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7070 static void btrfs_set_dev_stats_value(struct extent_buffer
*eb
,
7071 struct btrfs_dev_stats_item
*ptr
,
7074 write_extent_buffer(eb
, &val
,
7075 offsetof(struct btrfs_dev_stats_item
, values
) +
7076 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7080 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7082 struct btrfs_key key
;
7083 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7084 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7085 struct extent_buffer
*eb
;
7088 struct btrfs_device
*device
;
7089 struct btrfs_path
*path
= NULL
;
7092 path
= btrfs_alloc_path();
7096 mutex_lock(&fs_devices
->device_list_mutex
);
7097 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7099 struct btrfs_dev_stats_item
*ptr
;
7101 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7102 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7103 key
.offset
= device
->devid
;
7104 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, path
, 0, 0);
7106 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7107 btrfs_dev_stat_set(device
, i
, 0);
7108 device
->dev_stats_valid
= 1;
7109 btrfs_release_path(path
);
7112 slot
= path
->slots
[0];
7113 eb
= path
->nodes
[0];
7114 item_size
= btrfs_item_size_nr(eb
, slot
);
7116 ptr
= btrfs_item_ptr(eb
, slot
,
7117 struct btrfs_dev_stats_item
);
7119 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7120 if (item_size
>= (1 + i
) * sizeof(__le64
))
7121 btrfs_dev_stat_set(device
, i
,
7122 btrfs_dev_stats_value(eb
, ptr
, i
));
7124 btrfs_dev_stat_set(device
, i
, 0);
7127 device
->dev_stats_valid
= 1;
7128 btrfs_dev_stat_print_on_load(device
);
7129 btrfs_release_path(path
);
7131 mutex_unlock(&fs_devices
->device_list_mutex
);
7133 btrfs_free_path(path
);
7134 return ret
< 0 ? ret
: 0;
7137 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7138 struct btrfs_device
*device
)
7140 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7141 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7142 struct btrfs_path
*path
;
7143 struct btrfs_key key
;
7144 struct extent_buffer
*eb
;
7145 struct btrfs_dev_stats_item
*ptr
;
7149 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7150 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7151 key
.offset
= device
->devid
;
7153 path
= btrfs_alloc_path();
7156 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7158 btrfs_warn_in_rcu(fs_info
,
7159 "error %d while searching for dev_stats item for device %s",
7160 ret
, rcu_str_deref(device
->name
));
7165 btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7166 /* need to delete old one and insert a new one */
7167 ret
= btrfs_del_item(trans
, dev_root
, path
);
7169 btrfs_warn_in_rcu(fs_info
,
7170 "delete too small dev_stats item for device %s failed %d",
7171 rcu_str_deref(device
->name
), ret
);
7178 /* need to insert a new item */
7179 btrfs_release_path(path
);
7180 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7181 &key
, sizeof(*ptr
));
7183 btrfs_warn_in_rcu(fs_info
,
7184 "insert dev_stats item for device %s failed %d",
7185 rcu_str_deref(device
->name
), ret
);
7190 eb
= path
->nodes
[0];
7191 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7192 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7193 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7194 btrfs_dev_stat_read(device
, i
));
7195 btrfs_mark_buffer_dirty(eb
);
7198 btrfs_free_path(path
);
7203 * called from commit_transaction. Writes all changed device stats to disk.
7205 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7207 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7208 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7209 struct btrfs_device
*device
;
7213 mutex_lock(&fs_devices
->device_list_mutex
);
7214 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7215 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7216 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7221 * There is a LOAD-LOAD control dependency between the value of
7222 * dev_stats_ccnt and updating the on-disk values which requires
7223 * reading the in-memory counters. Such control dependencies
7224 * require explicit read memory barriers.
7226 * This memory barriers pairs with smp_mb__before_atomic in
7227 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7228 * barrier implied by atomic_xchg in
7229 * btrfs_dev_stats_read_and_reset
7233 ret
= update_dev_stat_item(trans
, device
);
7235 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7237 mutex_unlock(&fs_devices
->device_list_mutex
);
7242 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7244 btrfs_dev_stat_inc(dev
, index
);
7245 btrfs_dev_stat_print_on_error(dev
);
7248 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
)
7250 if (!dev
->dev_stats_valid
)
7252 btrfs_err_rl_in_rcu(dev
->fs_info
,
7253 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7254 rcu_str_deref(dev
->name
),
7255 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7256 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7257 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7258 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7259 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7262 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7266 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7267 if (btrfs_dev_stat_read(dev
, i
) != 0)
7269 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7270 return; /* all values == 0, suppress message */
7272 btrfs_info_in_rcu(dev
->fs_info
,
7273 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7274 rcu_str_deref(dev
->name
),
7275 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7276 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7277 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7278 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7279 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7282 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7283 struct btrfs_ioctl_get_dev_stats
*stats
)
7285 struct btrfs_device
*dev
;
7286 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7289 mutex_lock(&fs_devices
->device_list_mutex
);
7290 dev
= btrfs_find_device(fs_info
->fs_devices
, stats
->devid
, NULL
, NULL
,
7292 mutex_unlock(&fs_devices
->device_list_mutex
);
7295 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7297 } else if (!dev
->dev_stats_valid
) {
7298 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7300 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7301 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7302 if (stats
->nr_items
> i
)
7304 btrfs_dev_stat_read_and_reset(dev
, i
);
7306 btrfs_dev_stat_set(dev
, i
, 0);
7308 btrfs_info(fs_info
, "device stats zeroed by %s (%d)",
7309 current
->comm
, task_pid_nr(current
));
7311 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7312 if (stats
->nr_items
> i
)
7313 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7315 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7316 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7320 void btrfs_scratch_superblocks(struct block_device
*bdev
, const char *device_path
)
7322 struct buffer_head
*bh
;
7323 struct btrfs_super_block
*disk_super
;
7329 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
;
7332 if (btrfs_read_dev_one_super(bdev
, copy_num
, &bh
))
7335 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
7337 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
7338 set_buffer_dirty(bh
);
7339 sync_dirty_buffer(bh
);
7343 /* Notify udev that device has changed */
7344 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
7346 /* Update ctime/mtime for device path for libblkid */
7347 update_dev_time(device_path
);
7351 * Update the size and bytes used for each device where it changed. This is
7352 * delayed since we would otherwise get errors while writing out the
7355 * Must be invoked during transaction commit.
7357 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7359 struct btrfs_device
*curr
, *next
;
7361 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7363 if (list_empty(&trans
->dev_update_list
))
7367 * We don't need the device_list_mutex here. This list is owned by the
7368 * transaction and the transaction must complete before the device is
7371 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7372 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7374 list_del_init(&curr
->post_commit_list
);
7375 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7376 curr
->commit_bytes_used
= curr
->bytes_used
;
7378 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7381 void btrfs_set_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7383 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7384 while (fs_devices
) {
7385 fs_devices
->fs_info
= fs_info
;
7386 fs_devices
= fs_devices
->seed
;
7390 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7392 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7393 while (fs_devices
) {
7394 fs_devices
->fs_info
= NULL
;
7395 fs_devices
= fs_devices
->seed
;
7400 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7402 int btrfs_bg_type_to_factor(u64 flags
)
7404 const int index
= btrfs_bg_flags_to_raid_index(flags
);
7406 return btrfs_raid_array
[index
].ncopies
;
7411 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7412 u64 chunk_offset
, u64 devid
,
7413 u64 physical_offset
, u64 physical_len
)
7415 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7416 struct extent_map
*em
;
7417 struct map_lookup
*map
;
7418 struct btrfs_device
*dev
;
7424 read_lock(&em_tree
->lock
);
7425 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
7426 read_unlock(&em_tree
->lock
);
7430 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7431 physical_offset
, devid
);
7436 map
= em
->map_lookup
;
7437 stripe_len
= calc_stripe_length(map
->type
, em
->len
, map
->num_stripes
);
7438 if (physical_len
!= stripe_len
) {
7440 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7441 physical_offset
, devid
, em
->start
, physical_len
,
7447 for (i
= 0; i
< map
->num_stripes
; i
++) {
7448 if (map
->stripes
[i
].dev
->devid
== devid
&&
7449 map
->stripes
[i
].physical
== physical_offset
) {
7451 if (map
->verified_stripes
>= map
->num_stripes
) {
7453 "too many dev extents for chunk %llu found",
7458 map
->verified_stripes
++;
7464 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7465 physical_offset
, devid
);
7469 /* Make sure no dev extent is beyond device bondary */
7470 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
7472 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7477 /* It's possible this device is a dummy for seed device */
7478 if (dev
->disk_total_bytes
== 0) {
7479 dev
= btrfs_find_device(fs_info
->fs_devices
->seed
, devid
, NULL
,
7482 btrfs_err(fs_info
, "failed to find seed devid %llu",
7489 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7491 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7492 devid
, physical_offset
, physical_len
,
7493 dev
->disk_total_bytes
);
7498 free_extent_map(em
);
7502 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
7504 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7505 struct extent_map
*em
;
7506 struct rb_node
*node
;
7509 read_lock(&em_tree
->lock
);
7510 for (node
= rb_first_cached(&em_tree
->map
); node
; node
= rb_next(node
)) {
7511 em
= rb_entry(node
, struct extent_map
, rb_node
);
7512 if (em
->map_lookup
->num_stripes
!=
7513 em
->map_lookup
->verified_stripes
) {
7515 "chunk %llu has missing dev extent, have %d expect %d",
7516 em
->start
, em
->map_lookup
->verified_stripes
,
7517 em
->map_lookup
->num_stripes
);
7523 read_unlock(&em_tree
->lock
);
7528 * Ensure that all dev extents are mapped to correct chunk, otherwise
7529 * later chunk allocation/free would cause unexpected behavior.
7531 * NOTE: This will iterate through the whole device tree, which should be of
7532 * the same size level as the chunk tree. This slightly increases mount time.
7534 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
7536 struct btrfs_path
*path
;
7537 struct btrfs_root
*root
= fs_info
->dev_root
;
7538 struct btrfs_key key
;
7540 u64 prev_dev_ext_end
= 0;
7544 key
.type
= BTRFS_DEV_EXTENT_KEY
;
7547 path
= btrfs_alloc_path();
7551 path
->reada
= READA_FORWARD
;
7552 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7556 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
7557 ret
= btrfs_next_item(root
, path
);
7560 /* No dev extents at all? Not good */
7567 struct extent_buffer
*leaf
= path
->nodes
[0];
7568 struct btrfs_dev_extent
*dext
;
7569 int slot
= path
->slots
[0];
7571 u64 physical_offset
;
7575 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7576 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
7578 devid
= key
.objectid
;
7579 physical_offset
= key
.offset
;
7581 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
7582 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
7583 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
7585 /* Check if this dev extent overlaps with the previous one */
7586 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
7588 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7589 devid
, physical_offset
, prev_dev_ext_end
);
7594 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
7595 physical_offset
, physical_len
);
7599 prev_dev_ext_end
= physical_offset
+ physical_len
;
7601 ret
= btrfs_next_item(root
, path
);
7610 /* Ensure all chunks have corresponding dev extents */
7611 ret
= verify_chunk_dev_extent_mapping(fs_info
);
7613 btrfs_free_path(path
);
7618 * Check whether the given block group or device is pinned by any inode being
7619 * used as a swapfile.
7621 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
7623 struct btrfs_swapfile_pin
*sp
;
7624 struct rb_node
*node
;
7626 spin_lock(&fs_info
->swapfile_pins_lock
);
7627 node
= fs_info
->swapfile_pins
.rb_node
;
7629 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
7631 node
= node
->rb_left
;
7632 else if (ptr
> sp
->ptr
)
7633 node
= node
->rb_right
;
7637 spin_unlock(&fs_info
->swapfile_pins_lock
);
7638 return node
!= NULL
;