1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/slab.h>
9 #include <linux/ratelimit.h>
10 #include <linux/kthread.h>
11 #include <linux/semaphore.h>
12 #include <linux/uuid.h>
13 #include <linux/list_sort.h>
14 #include <linux/namei.h>
18 #include "transaction.h"
21 #include "rcu-string.h"
22 #include "dev-replace.h"
24 #include "tree-checker.h"
25 #include "space-info.h"
26 #include "block-group.h"
30 #include "accessors.h"
31 #include "uuid-tree.h"
33 #include "relocation.h"
36 #include "raid-stripe-tree.h"
38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
39 BTRFS_BLOCK_GROUP_RAID10 | \
40 BTRFS_BLOCK_GROUP_RAID56_MASK)
42 struct btrfs_io_geometry
{
48 u64 raid56_full_stripe_start
;
53 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
54 [BTRFS_RAID_RAID10
] = {
57 .devs_max
= 0, /* 0 == as many as possible */
59 .tolerated_failures
= 1,
63 .raid_name
= "raid10",
64 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
65 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
67 [BTRFS_RAID_RAID1
] = {
72 .tolerated_failures
= 1,
77 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
78 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
80 [BTRFS_RAID_RAID1C3
] = {
85 .tolerated_failures
= 2,
89 .raid_name
= "raid1c3",
90 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C3
,
91 .mindev_error
= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET
,
93 [BTRFS_RAID_RAID1C4
] = {
98 .tolerated_failures
= 3,
102 .raid_name
= "raid1c4",
103 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C4
,
104 .mindev_error
= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET
,
111 .tolerated_failures
= 0,
116 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
119 [BTRFS_RAID_RAID0
] = {
124 .tolerated_failures
= 0,
128 .raid_name
= "raid0",
129 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
132 [BTRFS_RAID_SINGLE
] = {
137 .tolerated_failures
= 0,
141 .raid_name
= "single",
145 [BTRFS_RAID_RAID5
] = {
150 .tolerated_failures
= 1,
154 .raid_name
= "raid5",
155 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
156 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
158 [BTRFS_RAID_RAID6
] = {
163 .tolerated_failures
= 2,
167 .raid_name
= "raid6",
168 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
169 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
175 * can be used as index to access btrfs_raid_array[].
177 enum btrfs_raid_types __attribute_const__
btrfs_bg_flags_to_raid_index(u64 flags
)
179 const u64 profile
= (flags
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
182 return BTRFS_RAID_SINGLE
;
184 return BTRFS_BG_FLAG_TO_INDEX(profile
);
187 const char *btrfs_bg_type_to_raid_name(u64 flags
)
189 const int index
= btrfs_bg_flags_to_raid_index(flags
);
191 if (index
>= BTRFS_NR_RAID_TYPES
)
194 return btrfs_raid_array
[index
].raid_name
;
197 int btrfs_nr_parity_stripes(u64 type
)
199 enum btrfs_raid_types index
= btrfs_bg_flags_to_raid_index(type
);
201 return btrfs_raid_array
[index
].nparity
;
205 * Fill @buf with textual description of @bg_flags, no more than @size_buf
206 * bytes including terminating null byte.
208 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
213 u64 flags
= bg_flags
;
214 u32 size_bp
= size_buf
;
221 #define DESCRIBE_FLAG(flag, desc) \
223 if (flags & (flag)) { \
224 ret = snprintf(bp, size_bp, "%s|", (desc)); \
225 if (ret < 0 || ret >= size_bp) \
233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
238 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
239 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
240 btrfs_raid_array
[i
].raid_name
);
244 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
248 if (size_bp
< size_buf
)
249 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
252 * The text is trimmed, it's up to the caller to provide sufficiently
258 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
260 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
266 * There are several mutexes that protect manipulation of devices and low-level
267 * structures like chunks but not block groups, extents or files
269 * uuid_mutex (global lock)
270 * ------------------------
271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
273 * device) or requested by the device= mount option
275 * the mutex can be very coarse and can cover long-running operations
277 * protects: updates to fs_devices counters like missing devices, rw devices,
278 * seeding, structure cloning, opening/closing devices at mount/umount time
280 * global::fs_devs - add, remove, updates to the global list
282 * does not protect: manipulation of the fs_devices::devices list in general
283 * but in mount context it could be used to exclude list modifications by eg.
286 * btrfs_device::name - renames (write side), read is RCU
288 * fs_devices::device_list_mutex (per-fs, with RCU)
289 * ------------------------------------------------
290 * protects updates to fs_devices::devices, ie. adding and deleting
292 * simple list traversal with read-only actions can be done with RCU protection
294 * may be used to exclude some operations from running concurrently without any
295 * modifications to the list (see write_all_supers)
297 * Is not required at mount and close times, because our device list is
298 * protected by the uuid_mutex at that point.
302 * protects balance structures (status, state) and context accessed from
303 * several places (internally, ioctl)
307 * protects chunks, adding or removing during allocation, trim or when a new
308 * device is added/removed. Additionally it also protects post_commit_list of
309 * individual devices, since they can be added to the transaction's
310 * post_commit_list only with chunk_mutex held.
314 * a big lock that is held by the cleaner thread and prevents running subvolume
315 * cleaning together with relocation or delayed iputs
327 * Exclusive operations
328 * ====================
330 * Maintains the exclusivity of the following operations that apply to the
331 * whole filesystem and cannot run in parallel.
336 * - Device replace (*)
339 * The device operations (as above) can be in one of the following states:
345 * Only device operations marked with (*) can go into the Paused state for the
348 * - ioctl (only Balance can be Paused through ioctl)
349 * - filesystem remounted as read-only
350 * - filesystem unmounted and mounted as read-only
351 * - system power-cycle and filesystem mounted as read-only
352 * - filesystem or device errors leading to forced read-only
354 * The status of exclusive operation is set and cleared atomically.
355 * During the course of Paused state, fs_info::exclusive_operation remains set.
356 * A device operation in Paused or Running state can be canceled or resumed
357 * either by ioctl (Balance only) or when remounted as read-write.
358 * The exclusive status is cleared when the device operation is canceled or
362 DEFINE_MUTEX(uuid_mutex
);
363 static LIST_HEAD(fs_uuids
);
364 struct list_head
* __attribute_const__
btrfs_get_fs_uuids(void)
370 * Allocate new btrfs_fs_devices structure identified by a fsid.
372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
373 * fs_devices::metadata_fsid
375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
376 * The returned struct is not linked onto any lists and can be destroyed with
377 * kfree() right away.
379 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
)
381 struct btrfs_fs_devices
*fs_devs
;
383 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
387 mutex_init(&fs_devs
->device_list_mutex
);
389 INIT_LIST_HEAD(&fs_devs
->devices
);
390 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
391 INIT_LIST_HEAD(&fs_devs
->fs_list
);
392 INIT_LIST_HEAD(&fs_devs
->seed_list
);
395 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
396 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
402 static void btrfs_free_device(struct btrfs_device
*device
)
404 WARN_ON(!list_empty(&device
->post_commit_list
));
405 rcu_string_free(device
->name
);
406 extent_io_tree_release(&device
->alloc_state
);
407 btrfs_destroy_dev_zone_info(device
);
411 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
413 struct btrfs_device
*device
;
415 WARN_ON(fs_devices
->opened
);
416 while (!list_empty(&fs_devices
->devices
)) {
417 device
= list_entry(fs_devices
->devices
.next
,
418 struct btrfs_device
, dev_list
);
419 list_del(&device
->dev_list
);
420 btrfs_free_device(device
);
425 void __exit
btrfs_cleanup_fs_uuids(void)
427 struct btrfs_fs_devices
*fs_devices
;
429 while (!list_empty(&fs_uuids
)) {
430 fs_devices
= list_entry(fs_uuids
.next
,
431 struct btrfs_fs_devices
, fs_list
);
432 list_del(&fs_devices
->fs_list
);
433 free_fs_devices(fs_devices
);
437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices
*fs_devices
,
438 const u8
*fsid
, const u8
*metadata_fsid
)
440 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0)
446 if (memcmp(metadata_fsid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
) != 0)
452 static noinline
struct btrfs_fs_devices
*find_fsid(
453 const u8
*fsid
, const u8
*metadata_fsid
)
455 struct btrfs_fs_devices
*fs_devices
;
459 /* Handle non-split brain cases */
460 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
461 if (match_fsid_fs_devices(fs_devices
, fsid
, metadata_fsid
))
468 btrfs_get_bdev_and_sb(const char *device_path
, blk_mode_t flags
, void *holder
,
469 int flush
, struct file
**bdev_file
,
470 struct btrfs_super_block
**disk_super
)
472 struct block_device
*bdev
;
475 *bdev_file
= bdev_file_open_by_path(device_path
, flags
, holder
, NULL
);
477 if (IS_ERR(*bdev_file
)) {
478 ret
= PTR_ERR(*bdev_file
);
479 btrfs_err(NULL
, "failed to open device for path %s with flags 0x%x: %d",
480 device_path
, flags
, ret
);
483 bdev
= file_bdev(*bdev_file
);
488 ret
= set_blocksize(*bdev_file
, BTRFS_BDEV_BLOCKSIZE
);
494 invalidate_bdev(bdev
);
495 *disk_super
= btrfs_read_dev_super(bdev
);
496 if (IS_ERR(*disk_super
)) {
497 ret
= PTR_ERR(*disk_super
);
511 * Search and remove all stale devices (which are not mounted). When both
512 * inputs are NULL, it will search and release all stale devices.
514 * @devt: Optional. When provided will it release all unmounted devices
515 * matching this devt only.
516 * @skip_device: Optional. Will skip this device when searching for the stale
519 * Return: 0 for success or if @devt is 0.
520 * -EBUSY if @devt is a mounted device.
521 * -ENOENT if @devt does not match any device in the list.
523 static int btrfs_free_stale_devices(dev_t devt
, struct btrfs_device
*skip_device
)
525 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
526 struct btrfs_device
*device
, *tmp_device
;
530 lockdep_assert_held(&uuid_mutex
);
532 /* Return good status if there is no instance of devt. */
534 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
536 mutex_lock(&fs_devices
->device_list_mutex
);
537 list_for_each_entry_safe(device
, tmp_device
,
538 &fs_devices
->devices
, dev_list
) {
539 if (skip_device
&& skip_device
== device
)
541 if (devt
&& devt
!= device
->devt
)
543 if (fs_devices
->opened
) {
549 /* delete the stale device */
550 fs_devices
->num_devices
--;
551 list_del(&device
->dev_list
);
552 btrfs_free_device(device
);
556 mutex_unlock(&fs_devices
->device_list_mutex
);
558 if (fs_devices
->num_devices
== 0) {
559 btrfs_sysfs_remove_fsid(fs_devices
);
560 list_del(&fs_devices
->fs_list
);
561 free_fs_devices(fs_devices
);
565 /* If there is at least one freed device return 0. */
572 static struct btrfs_fs_devices
*find_fsid_by_device(
573 struct btrfs_super_block
*disk_super
,
574 dev_t devt
, bool *same_fsid_diff_dev
)
576 struct btrfs_fs_devices
*fsid_fs_devices
;
577 struct btrfs_fs_devices
*devt_fs_devices
;
578 const bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
579 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
580 bool found_by_devt
= false;
582 /* Find the fs_device by the usual method, if found use it. */
583 fsid_fs_devices
= find_fsid(disk_super
->fsid
,
584 has_metadata_uuid
? disk_super
->metadata_uuid
: NULL
);
586 /* The temp_fsid feature is supported only with single device filesystem. */
587 if (btrfs_super_num_devices(disk_super
) != 1)
588 return fsid_fs_devices
;
591 * A seed device is an integral component of the sprout device, which
592 * functions as a multi-device filesystem. So, temp-fsid feature is
595 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
)
596 return fsid_fs_devices
;
598 /* Try to find a fs_devices by matching devt. */
599 list_for_each_entry(devt_fs_devices
, &fs_uuids
, fs_list
) {
600 struct btrfs_device
*device
;
602 list_for_each_entry(device
, &devt_fs_devices
->devices
, dev_list
) {
603 if (device
->devt
== devt
) {
604 found_by_devt
= true;
613 /* Existing device. */
614 if (fsid_fs_devices
== NULL
) {
615 if (devt_fs_devices
->opened
== 0) {
619 /* temp_fsid is mounting a subvol. */
620 return devt_fs_devices
;
623 /* Regular or temp_fsid device mounting a subvol. */
624 return devt_fs_devices
;
628 if (fsid_fs_devices
== NULL
) {
631 /* sb::fsid is already used create a new temp_fsid. */
632 *same_fsid_diff_dev
= true;
641 * This is only used on mount, and we are protected from competing things
642 * messing with our fs_devices by the uuid_mutex, thus we do not need the
643 * fs_devices->device_list_mutex here.
645 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
646 struct btrfs_device
*device
, blk_mode_t flags
,
649 struct file
*bdev_file
;
650 struct btrfs_super_block
*disk_super
;
659 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
660 &bdev_file
, &disk_super
);
664 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
665 if (devid
!= device
->devid
)
666 goto error_free_page
;
668 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
669 goto error_free_page
;
671 device
->generation
= btrfs_super_generation(disk_super
);
673 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
674 if (btrfs_super_incompat_flags(disk_super
) &
675 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
677 "BTRFS: Invalid seeding and uuid-changed device detected\n");
678 goto error_free_page
;
681 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
682 fs_devices
->seeding
= true;
684 if (bdev_read_only(file_bdev(bdev_file
)))
685 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
687 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
690 if (!bdev_nonrot(file_bdev(bdev_file
)))
691 fs_devices
->rotating
= true;
693 if (bdev_max_discard_sectors(file_bdev(bdev_file
)))
694 fs_devices
->discardable
= true;
696 device
->bdev_file
= bdev_file
;
697 device
->bdev
= file_bdev(bdev_file
);
698 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
700 if (device
->devt
!= device
->bdev
->bd_dev
) {
702 "device %s maj:min changed from %d:%d to %d:%d",
703 device
->name
->str
, MAJOR(device
->devt
),
704 MINOR(device
->devt
), MAJOR(device
->bdev
->bd_dev
),
705 MINOR(device
->bdev
->bd_dev
));
707 device
->devt
= device
->bdev
->bd_dev
;
710 fs_devices
->open_devices
++;
711 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
712 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
713 fs_devices
->rw_devices
++;
714 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
716 btrfs_release_disk_super(disk_super
);
721 btrfs_release_disk_super(disk_super
);
727 const u8
*btrfs_sb_fsid_ptr(const struct btrfs_super_block
*sb
)
729 bool has_metadata_uuid
= (btrfs_super_incompat_flags(sb
) &
730 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
732 return has_metadata_uuid
? sb
->metadata_uuid
: sb
->fsid
;
736 * We can have very weird soft links passed in.
737 * One example is "/proc/self/fd/<fd>", which can be a soft link to
740 * But it's never a good idea to use those weird names.
741 * Here we check if the path (not following symlinks) is a good one inside
744 static bool is_good_dev_path(const char *dev_path
)
746 struct path path
= { .mnt
= NULL
, .dentry
= NULL
};
747 char *path_buf
= NULL
;
749 bool is_good
= false;
755 path_buf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
760 * Do not follow soft link, just check if the original path is inside
763 ret
= kern_path(dev_path
, 0, &path
);
766 resolved_path
= d_path(&path
, path_buf
, PATH_MAX
);
767 if (IS_ERR(resolved_path
))
769 if (strncmp(resolved_path
, "/dev/", strlen("/dev/")))
778 static int get_canonical_dev_path(const char *dev_path
, char *canonical
)
780 struct path path
= { .mnt
= NULL
, .dentry
= NULL
};
781 char *path_buf
= NULL
;
790 path_buf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
796 ret
= kern_path(dev_path
, LOOKUP_FOLLOW
, &path
);
799 resolved_path
= d_path(&path
, path_buf
, PATH_MAX
);
800 ret
= strscpy(canonical
, resolved_path
, PATH_MAX
);
807 static bool is_same_device(struct btrfs_device
*device
, const char *new_path
)
809 struct path old
= { .mnt
= NULL
, .dentry
= NULL
};
810 struct path
new = { .mnt
= NULL
, .dentry
= NULL
};
811 char *old_path
= NULL
;
812 bool is_same
= false;
818 old_path
= kzalloc(PATH_MAX
, GFP_NOFS
);
823 ret
= strscpy(old_path
, rcu_str_deref(device
->name
), PATH_MAX
);
828 ret
= kern_path(old_path
, LOOKUP_FOLLOW
, &old
);
831 ret
= kern_path(new_path
, LOOKUP_FOLLOW
, &new);
834 if (path_equal(&old
, &new))
844 * Add new device to list of registered devices
847 * device pointer which was just added or updated when successful
848 * error pointer when failed
850 static noinline
struct btrfs_device
*device_list_add(const char *path
,
851 struct btrfs_super_block
*disk_super
,
852 bool *new_device_added
)
854 struct btrfs_device
*device
;
855 struct btrfs_fs_devices
*fs_devices
= NULL
;
856 struct rcu_string
*name
;
857 u64 found_transid
= btrfs_super_generation(disk_super
);
858 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
861 bool same_fsid_diff_dev
= false;
862 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
863 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
865 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2
) {
867 "device %s has incomplete metadata_uuid change, please use btrfstune to complete",
869 return ERR_PTR(-EAGAIN
);
872 error
= lookup_bdev(path
, &path_devt
);
874 btrfs_err(NULL
, "failed to lookup block device for path %s: %d",
876 return ERR_PTR(error
);
879 fs_devices
= find_fsid_by_device(disk_super
, path_devt
, &same_fsid_diff_dev
);
882 fs_devices
= alloc_fs_devices(disk_super
->fsid
);
883 if (IS_ERR(fs_devices
))
884 return ERR_CAST(fs_devices
);
886 if (has_metadata_uuid
)
887 memcpy(fs_devices
->metadata_uuid
,
888 disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
890 if (same_fsid_diff_dev
) {
891 generate_random_uuid(fs_devices
->fsid
);
892 fs_devices
->temp_fsid
= true;
893 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n",
894 path
, MAJOR(path_devt
), MINOR(path_devt
),
898 mutex_lock(&fs_devices
->device_list_mutex
);
899 list_add(&fs_devices
->fs_list
, &fs_uuids
);
903 struct btrfs_dev_lookup_args args
= {
905 .uuid
= disk_super
->dev_item
.uuid
,
908 mutex_lock(&fs_devices
->device_list_mutex
);
909 device
= btrfs_find_device(fs_devices
, &args
);
911 if (found_transid
> fs_devices
->latest_generation
) {
912 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
914 memcpy(fs_devices
->metadata_uuid
,
915 btrfs_sb_fsid_ptr(disk_super
), BTRFS_FSID_SIZE
);
920 unsigned int nofs_flag
;
922 if (fs_devices
->opened
) {
924 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
925 path
, MAJOR(path_devt
), MINOR(path_devt
),
926 fs_devices
->fsid
, current
->comm
,
927 task_pid_nr(current
));
928 mutex_unlock(&fs_devices
->device_list_mutex
);
929 return ERR_PTR(-EBUSY
);
932 nofs_flag
= memalloc_nofs_save();
933 device
= btrfs_alloc_device(NULL
, &devid
,
934 disk_super
->dev_item
.uuid
, path
);
935 memalloc_nofs_restore(nofs_flag
);
936 if (IS_ERR(device
)) {
937 mutex_unlock(&fs_devices
->device_list_mutex
);
938 /* we can safely leave the fs_devices entry around */
942 device
->devt
= path_devt
;
944 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
945 fs_devices
->num_devices
++;
947 device
->fs_devices
= fs_devices
;
948 *new_device_added
= true;
950 if (disk_super
->label
[0])
952 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
953 disk_super
->label
, devid
, found_transid
, path
,
954 MAJOR(path_devt
), MINOR(path_devt
),
955 current
->comm
, task_pid_nr(current
));
958 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
959 disk_super
->fsid
, devid
, found_transid
, path
,
960 MAJOR(path_devt
), MINOR(path_devt
),
961 current
->comm
, task_pid_nr(current
));
963 } else if (!device
->name
|| !is_same_device(device
, path
)) {
965 * When FS is already mounted.
966 * 1. If you are here and if the device->name is NULL that
967 * means this device was missing at time of FS mount.
968 * 2. If you are here and if the device->name is different
969 * from 'path' that means either
970 * a. The same device disappeared and reappeared with
972 * b. The missing-disk-which-was-replaced, has
975 * We must allow 1 and 2a above. But 2b would be a spurious
978 * Further in case of 1 and 2a above, the disk at 'path'
979 * would have missed some transaction when it was away and
980 * in case of 2a the stale bdev has to be updated as well.
981 * 2b must not be allowed at all time.
985 * For now, we do allow update to btrfs_fs_device through the
986 * btrfs dev scan cli after FS has been mounted. We're still
987 * tracking a problem where systems fail mount by subvolume id
988 * when we reject replacement on a mounted FS.
990 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
992 * That is if the FS is _not_ mounted and if you
993 * are here, that means there is more than one
994 * disk with same uuid and devid.We keep the one
995 * with larger generation number or the last-in if
996 * generation are equal.
998 mutex_unlock(&fs_devices
->device_list_mutex
);
1000 "device %s already registered with a higher generation, found %llu expect %llu",
1001 path
, found_transid
, device
->generation
);
1002 return ERR_PTR(-EEXIST
);
1006 * We are going to replace the device path for a given devid,
1007 * make sure it's the same device if the device is mounted
1009 * NOTE: the device->fs_info may not be reliable here so pass
1010 * in a NULL to message helpers instead. This avoids a possible
1011 * use-after-free when the fs_info and fs_info->sb are already
1015 if (device
->devt
!= path_devt
) {
1016 mutex_unlock(&fs_devices
->device_list_mutex
);
1017 btrfs_warn_in_rcu(NULL
,
1018 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
1019 path
, devid
, found_transid
,
1021 task_pid_nr(current
));
1022 return ERR_PTR(-EEXIST
);
1024 btrfs_info_in_rcu(NULL
,
1025 "devid %llu device path %s changed to %s scanned by %s (%d)",
1026 devid
, btrfs_dev_name(device
),
1027 path
, current
->comm
,
1028 task_pid_nr(current
));
1031 name
= rcu_string_strdup(path
, GFP_NOFS
);
1033 mutex_unlock(&fs_devices
->device_list_mutex
);
1034 return ERR_PTR(-ENOMEM
);
1036 rcu_string_free(device
->name
);
1037 rcu_assign_pointer(device
->name
, name
);
1038 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1039 fs_devices
->missing_devices
--;
1040 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1042 device
->devt
= path_devt
;
1046 * Unmount does not free the btrfs_device struct but would zero
1047 * generation along with most of the other members. So just update
1048 * it back. We need it to pick the disk with largest generation
1051 if (!fs_devices
->opened
) {
1052 device
->generation
= found_transid
;
1053 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
1054 fs_devices
->latest_generation
);
1057 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
1059 mutex_unlock(&fs_devices
->device_list_mutex
);
1063 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
1065 struct btrfs_fs_devices
*fs_devices
;
1066 struct btrfs_device
*device
;
1067 struct btrfs_device
*orig_dev
;
1070 lockdep_assert_held(&uuid_mutex
);
1072 fs_devices
= alloc_fs_devices(orig
->fsid
);
1073 if (IS_ERR(fs_devices
))
1076 fs_devices
->total_devices
= orig
->total_devices
;
1078 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
1079 const char *dev_path
= NULL
;
1082 * This is ok to do without RCU read locked because we hold the
1083 * uuid mutex so nothing we touch in here is going to disappear.
1086 dev_path
= orig_dev
->name
->str
;
1088 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
1089 orig_dev
->uuid
, dev_path
);
1090 if (IS_ERR(device
)) {
1091 ret
= PTR_ERR(device
);
1095 if (orig_dev
->zone_info
) {
1096 struct btrfs_zoned_device_info
*zone_info
;
1098 zone_info
= btrfs_clone_dev_zone_info(orig_dev
);
1100 btrfs_free_device(device
);
1104 device
->zone_info
= zone_info
;
1107 list_add(&device
->dev_list
, &fs_devices
->devices
);
1108 device
->fs_devices
= fs_devices
;
1109 fs_devices
->num_devices
++;
1113 free_fs_devices(fs_devices
);
1114 return ERR_PTR(ret
);
1117 static void __btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
,
1118 struct btrfs_device
**latest_dev
)
1120 struct btrfs_device
*device
, *next
;
1122 /* This is the initialized path, it is safe to release the devices. */
1123 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1124 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
)) {
1125 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1126 &device
->dev_state
) &&
1127 !test_bit(BTRFS_DEV_STATE_MISSING
,
1128 &device
->dev_state
) &&
1130 device
->generation
> (*latest_dev
)->generation
)) {
1131 *latest_dev
= device
;
1137 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1138 * in btrfs_init_dev_replace() so just continue.
1140 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
)
1143 if (device
->bdev_file
) {
1144 fput(device
->bdev_file
);
1145 device
->bdev
= NULL
;
1146 device
->bdev_file
= NULL
;
1147 fs_devices
->open_devices
--;
1149 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1150 list_del_init(&device
->dev_alloc_list
);
1151 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1152 fs_devices
->rw_devices
--;
1154 list_del_init(&device
->dev_list
);
1155 fs_devices
->num_devices
--;
1156 btrfs_free_device(device
);
1162 * After we have read the system tree and know devids belonging to this
1163 * filesystem, remove the device which does not belong there.
1165 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
)
1167 struct btrfs_device
*latest_dev
= NULL
;
1168 struct btrfs_fs_devices
*seed_dev
;
1170 mutex_lock(&uuid_mutex
);
1171 __btrfs_free_extra_devids(fs_devices
, &latest_dev
);
1173 list_for_each_entry(seed_dev
, &fs_devices
->seed_list
, seed_list
)
1174 __btrfs_free_extra_devids(seed_dev
, &latest_dev
);
1176 fs_devices
->latest_dev
= latest_dev
;
1178 mutex_unlock(&uuid_mutex
);
1181 static void btrfs_close_bdev(struct btrfs_device
*device
)
1186 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1187 sync_blockdev(device
->bdev
);
1188 invalidate_bdev(device
->bdev
);
1191 fput(device
->bdev_file
);
1194 static void btrfs_close_one_device(struct btrfs_device
*device
)
1196 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1198 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1199 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1200 list_del_init(&device
->dev_alloc_list
);
1201 fs_devices
->rw_devices
--;
1204 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
)
1205 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
1207 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1208 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1209 fs_devices
->missing_devices
--;
1212 btrfs_close_bdev(device
);
1214 fs_devices
->open_devices
--;
1215 device
->bdev
= NULL
;
1216 device
->bdev_file
= NULL
;
1218 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1219 btrfs_destroy_dev_zone_info(device
);
1221 device
->fs_info
= NULL
;
1222 atomic_set(&device
->dev_stats_ccnt
, 0);
1223 extent_io_tree_release(&device
->alloc_state
);
1226 * Reset the flush error record. We might have a transient flush error
1227 * in this mount, and if so we aborted the current transaction and set
1228 * the fs to an error state, guaranteeing no super blocks can be further
1229 * committed. However that error might be transient and if we unmount the
1230 * filesystem and mount it again, we should allow the mount to succeed
1231 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1232 * filesystem again we still get flush errors, then we will again abort
1233 * any transaction and set the error state, guaranteeing no commits of
1234 * unsafe super blocks.
1236 device
->last_flush_error
= 0;
1238 /* Verify the device is back in a pristine state */
1239 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT
, &device
->dev_state
));
1240 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1241 WARN_ON(!list_empty(&device
->dev_alloc_list
));
1242 WARN_ON(!list_empty(&device
->post_commit_list
));
1245 static void close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1247 struct btrfs_device
*device
, *tmp
;
1249 lockdep_assert_held(&uuid_mutex
);
1251 if (--fs_devices
->opened
> 0)
1254 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
)
1255 btrfs_close_one_device(device
);
1257 WARN_ON(fs_devices
->open_devices
);
1258 WARN_ON(fs_devices
->rw_devices
);
1259 fs_devices
->opened
= 0;
1260 fs_devices
->seeding
= false;
1261 fs_devices
->fs_info
= NULL
;
1264 void btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1267 struct btrfs_fs_devices
*tmp
;
1269 mutex_lock(&uuid_mutex
);
1270 close_fs_devices(fs_devices
);
1271 if (!fs_devices
->opened
) {
1272 list_splice_init(&fs_devices
->seed_list
, &list
);
1275 * If the struct btrfs_fs_devices is not assembled with any
1276 * other device, it can be re-initialized during the next mount
1277 * without the needing device-scan step. Therefore, it can be
1280 if (fs_devices
->num_devices
== 1) {
1281 list_del(&fs_devices
->fs_list
);
1282 free_fs_devices(fs_devices
);
1287 list_for_each_entry_safe(fs_devices
, tmp
, &list
, seed_list
) {
1288 close_fs_devices(fs_devices
);
1289 list_del(&fs_devices
->seed_list
);
1290 free_fs_devices(fs_devices
);
1292 mutex_unlock(&uuid_mutex
);
1295 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1296 blk_mode_t flags
, void *holder
)
1298 struct btrfs_device
*device
;
1299 struct btrfs_device
*latest_dev
= NULL
;
1300 struct btrfs_device
*tmp_device
;
1303 list_for_each_entry_safe(device
, tmp_device
, &fs_devices
->devices
,
1307 ret2
= btrfs_open_one_device(fs_devices
, device
, flags
, holder
);
1309 (!latest_dev
|| device
->generation
> latest_dev
->generation
)) {
1310 latest_dev
= device
;
1311 } else if (ret2
== -ENODATA
) {
1312 fs_devices
->num_devices
--;
1313 list_del(&device
->dev_list
);
1314 btrfs_free_device(device
);
1316 if (ret
== 0 && ret2
!= 0)
1320 if (fs_devices
->open_devices
== 0) {
1326 fs_devices
->opened
= 1;
1327 fs_devices
->latest_dev
= latest_dev
;
1328 fs_devices
->total_rw_bytes
= 0;
1329 fs_devices
->chunk_alloc_policy
= BTRFS_CHUNK_ALLOC_REGULAR
;
1330 fs_devices
->read_policy
= BTRFS_READ_POLICY_PID
;
1335 static int devid_cmp(void *priv
, const struct list_head
*a
,
1336 const struct list_head
*b
)
1338 const struct btrfs_device
*dev1
, *dev2
;
1340 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1341 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1343 if (dev1
->devid
< dev2
->devid
)
1345 else if (dev1
->devid
> dev2
->devid
)
1350 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1351 blk_mode_t flags
, void *holder
)
1355 lockdep_assert_held(&uuid_mutex
);
1357 * The device_list_mutex cannot be taken here in case opening the
1358 * underlying device takes further locks like open_mutex.
1360 * We also don't need the lock here as this is called during mount and
1361 * exclusion is provided by uuid_mutex
1364 if (fs_devices
->opened
) {
1365 fs_devices
->opened
++;
1368 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1369 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1375 void btrfs_release_disk_super(struct btrfs_super_block
*super
)
1377 struct page
*page
= virt_to_page(super
);
1382 static struct btrfs_super_block
*btrfs_read_disk_super(struct block_device
*bdev
,
1383 u64 bytenr
, u64 bytenr_orig
)
1385 struct btrfs_super_block
*disk_super
;
1390 /* make sure our super fits in the device */
1391 if (bytenr
+ PAGE_SIZE
>= bdev_nr_bytes(bdev
))
1392 return ERR_PTR(-EINVAL
);
1394 /* make sure our super fits in the page */
1395 if (sizeof(*disk_super
) > PAGE_SIZE
)
1396 return ERR_PTR(-EINVAL
);
1398 /* make sure our super doesn't straddle pages on disk */
1399 index
= bytenr
>> PAGE_SHIFT
;
1400 if ((bytenr
+ sizeof(*disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1401 return ERR_PTR(-EINVAL
);
1403 /* pull in the page with our super */
1404 page
= read_cache_page_gfp(bdev
->bd_mapping
, index
, GFP_KERNEL
);
1407 return ERR_CAST(page
);
1409 p
= page_address(page
);
1411 /* align our pointer to the offset of the super block */
1412 disk_super
= p
+ offset_in_page(bytenr
);
1414 if (btrfs_super_bytenr(disk_super
) != bytenr_orig
||
1415 btrfs_super_magic(disk_super
) != BTRFS_MAGIC
) {
1416 btrfs_release_disk_super(p
);
1417 return ERR_PTR(-EINVAL
);
1420 if (disk_super
->label
[0] && disk_super
->label
[BTRFS_LABEL_SIZE
- 1])
1421 disk_super
->label
[BTRFS_LABEL_SIZE
- 1] = 0;
1426 int btrfs_forget_devices(dev_t devt
)
1430 mutex_lock(&uuid_mutex
);
1431 ret
= btrfs_free_stale_devices(devt
, NULL
);
1432 mutex_unlock(&uuid_mutex
);
1437 static bool btrfs_skip_registration(struct btrfs_super_block
*disk_super
,
1438 const char *path
, dev_t devt
,
1441 struct btrfs_fs_devices
*fs_devices
;
1444 * Do not skip device registration for mounted devices with matching
1445 * maj:min but different paths. Booting without initrd relies on
1446 * /dev/root initially, later replaced with the actual root device.
1447 * A successful scan ensures grub2-probe selects the correct device.
1449 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
1450 struct btrfs_device
*device
;
1452 mutex_lock(&fs_devices
->device_list_mutex
);
1454 if (!fs_devices
->opened
) {
1455 mutex_unlock(&fs_devices
->device_list_mutex
);
1459 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1460 if (device
->bdev
&& (device
->bdev
->bd_dev
== devt
) &&
1461 strcmp(device
->name
->str
, path
) != 0) {
1462 mutex_unlock(&fs_devices
->device_list_mutex
);
1464 /* Do not skip registration. */
1468 mutex_unlock(&fs_devices
->device_list_mutex
);
1471 if (!mount_arg_dev
&& btrfs_super_num_devices(disk_super
) == 1 &&
1472 !(btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
))
1479 * Look for a btrfs signature on a device. This may be called out of the mount path
1480 * and we are not allowed to call set_blocksize during the scan. The superblock
1481 * is read via pagecache.
1483 * With @mount_arg_dev it's a scan during mount time that will always register
1484 * the device or return an error. Multi-device and seeding devices are registered
1487 struct btrfs_device
*btrfs_scan_one_device(const char *path
, blk_mode_t flags
,
1490 struct btrfs_super_block
*disk_super
;
1491 bool new_device_added
= false;
1492 struct btrfs_device
*device
= NULL
;
1493 struct file
*bdev_file
;
1494 char *canonical_path
= NULL
;
1499 lockdep_assert_held(&uuid_mutex
);
1501 if (!is_good_dev_path(path
)) {
1502 canonical_path
= kmalloc(PATH_MAX
, GFP_KERNEL
);
1503 if (canonical_path
) {
1504 ret
= get_canonical_dev_path(path
, canonical_path
);
1506 kfree(canonical_path
);
1507 canonical_path
= NULL
;
1512 * Avoid an exclusive open here, as the systemd-udev may initiate the
1513 * device scan which may race with the user's mount or mkfs command,
1514 * resulting in failure.
1515 * Since the device scan is solely for reading purposes, there is no
1516 * need for an exclusive open. Additionally, the devices are read again
1517 * during the mount process. It is ok to get some inconsistent
1518 * values temporarily, as the device paths of the fsid are the only
1519 * required information for assembling the volume.
1521 bdev_file
= bdev_file_open_by_path(path
, flags
, NULL
, NULL
);
1522 if (IS_ERR(bdev_file
))
1523 return ERR_CAST(bdev_file
);
1526 * We would like to check all the super blocks, but doing so would
1527 * allow a mount to succeed after a mkfs from a different filesystem.
1528 * Currently, recovery from a bad primary btrfs superblock is done
1529 * using the userspace command 'btrfs check --super'.
1531 ret
= btrfs_sb_log_location_bdev(file_bdev(bdev_file
), 0, READ
, &bytenr
);
1533 device
= ERR_PTR(ret
);
1534 goto error_bdev_put
;
1537 disk_super
= btrfs_read_disk_super(file_bdev(bdev_file
), bytenr
,
1538 btrfs_sb_offset(0));
1539 if (IS_ERR(disk_super
)) {
1540 device
= ERR_CAST(disk_super
);
1541 goto error_bdev_put
;
1544 devt
= file_bdev(bdev_file
)->bd_dev
;
1545 if (btrfs_skip_registration(disk_super
, path
, devt
, mount_arg_dev
)) {
1546 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n",
1547 path
, MAJOR(devt
), MINOR(devt
));
1549 btrfs_free_stale_devices(devt
, NULL
);
1552 goto free_disk_super
;
1555 device
= device_list_add(canonical_path
? : path
, disk_super
,
1557 if (!IS_ERR(device
) && new_device_added
)
1558 btrfs_free_stale_devices(device
->devt
, device
);
1561 btrfs_release_disk_super(disk_super
);
1565 kfree(canonical_path
);
1571 * Try to find a chunk that intersects [start, start + len] range and when one
1572 * such is found, record the end of it in *start
1574 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1577 u64 physical_start
, physical_end
;
1579 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1581 if (find_first_extent_bit(&device
->alloc_state
, *start
,
1582 &physical_start
, &physical_end
,
1583 CHUNK_ALLOCATED
, NULL
)) {
1585 if (in_range(physical_start
, *start
, len
) ||
1586 in_range(*start
, physical_start
,
1587 physical_end
+ 1 - physical_start
)) {
1588 *start
= physical_end
+ 1;
1595 static u64
dev_extent_search_start(struct btrfs_device
*device
)
1597 switch (device
->fs_devices
->chunk_alloc_policy
) {
1598 case BTRFS_CHUNK_ALLOC_REGULAR
:
1599 return BTRFS_DEVICE_RANGE_RESERVED
;
1600 case BTRFS_CHUNK_ALLOC_ZONED
:
1602 * We don't care about the starting region like regular
1603 * allocator, because we anyway use/reserve the first two zones
1604 * for superblock logging.
1612 static bool dev_extent_hole_check_zoned(struct btrfs_device
*device
,
1613 u64
*hole_start
, u64
*hole_size
,
1616 u64 zone_size
= device
->zone_info
->zone_size
;
1619 bool changed
= false;
1621 ASSERT(IS_ALIGNED(*hole_start
, zone_size
));
1623 while (*hole_size
> 0) {
1624 pos
= btrfs_find_allocatable_zones(device
, *hole_start
,
1625 *hole_start
+ *hole_size
,
1627 if (pos
!= *hole_start
) {
1628 *hole_size
= *hole_start
+ *hole_size
- pos
;
1631 if (*hole_size
< num_bytes
)
1635 ret
= btrfs_ensure_empty_zones(device
, pos
, num_bytes
);
1637 /* Range is ensured to be empty */
1641 /* Given hole range was invalid (outside of device) */
1642 if (ret
== -ERANGE
) {
1643 *hole_start
+= *hole_size
;
1648 *hole_start
+= zone_size
;
1649 *hole_size
-= zone_size
;
1657 * Check if specified hole is suitable for allocation.
1659 * @device: the device which we have the hole
1660 * @hole_start: starting position of the hole
1661 * @hole_size: the size of the hole
1662 * @num_bytes: the size of the free space that we need
1664 * This function may modify @hole_start and @hole_size to reflect the suitable
1665 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1667 static bool dev_extent_hole_check(struct btrfs_device
*device
, u64
*hole_start
,
1668 u64
*hole_size
, u64 num_bytes
)
1670 bool changed
= false;
1671 u64 hole_end
= *hole_start
+ *hole_size
;
1675 * Check before we set max_hole_start, otherwise we could end up
1676 * sending back this offset anyway.
1678 if (contains_pending_extent(device
, hole_start
, *hole_size
)) {
1679 if (hole_end
>= *hole_start
)
1680 *hole_size
= hole_end
- *hole_start
;
1686 switch (device
->fs_devices
->chunk_alloc_policy
) {
1687 case BTRFS_CHUNK_ALLOC_REGULAR
:
1688 /* No extra check */
1690 case BTRFS_CHUNK_ALLOC_ZONED
:
1691 if (dev_extent_hole_check_zoned(device
, hole_start
,
1692 hole_size
, num_bytes
)) {
1695 * The changed hole can contain pending extent.
1696 * Loop again to check that.
1712 * Find free space in the specified device.
1714 * @device: the device which we search the free space in
1715 * @num_bytes: the size of the free space that we need
1716 * @search_start: the position from which to begin the search
1717 * @start: store the start of the free space.
1718 * @len: the size of the free space. that we find, or the size
1719 * of the max free space if we don't find suitable free space
1721 * This does a pretty simple search, the expectation is that it is called very
1722 * infrequently and that a given device has a small number of extents.
1724 * @start is used to store the start of the free space if we find. But if we
1725 * don't find suitable free space, it will be used to store the start position
1726 * of the max free space.
1728 * @len is used to store the size of the free space that we find.
1729 * But if we don't find suitable free space, it is used to store the size of
1730 * the max free space.
1732 * NOTE: This function will search *commit* root of device tree, and does extra
1733 * check to ensure dev extents are not double allocated.
1734 * This makes the function safe to allocate dev extents but may not report
1735 * correct usable device space, as device extent freed in current transaction
1736 * is not reported as available.
1738 static int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1739 u64
*start
, u64
*len
)
1741 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1742 struct btrfs_root
*root
= fs_info
->dev_root
;
1743 struct btrfs_key key
;
1744 struct btrfs_dev_extent
*dev_extent
;
1745 struct btrfs_path
*path
;
1749 u64 max_hole_size
= 0;
1751 u64 search_end
= device
->total_bytes
;
1754 struct extent_buffer
*l
;
1756 search_start
= dev_extent_search_start(device
);
1757 max_hole_start
= search_start
;
1759 WARN_ON(device
->zone_info
&&
1760 !IS_ALIGNED(num_bytes
, device
->zone_info
->zone_size
));
1762 path
= btrfs_alloc_path();
1768 if (search_start
>= search_end
||
1769 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1774 path
->reada
= READA_FORWARD
;
1775 path
->search_commit_root
= 1;
1776 path
->skip_locking
= 1;
1778 key
.objectid
= device
->devid
;
1779 key
.offset
= search_start
;
1780 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1782 ret
= btrfs_search_backwards(root
, &key
, path
);
1786 while (search_start
< search_end
) {
1788 slot
= path
->slots
[0];
1789 if (slot
>= btrfs_header_nritems(l
)) {
1790 ret
= btrfs_next_leaf(root
, path
);
1798 btrfs_item_key_to_cpu(l
, &key
, slot
);
1800 if (key
.objectid
< device
->devid
)
1803 if (key
.objectid
> device
->devid
)
1806 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1809 if (key
.offset
> search_end
)
1812 if (key
.offset
> search_start
) {
1813 hole_size
= key
.offset
- search_start
;
1814 dev_extent_hole_check(device
, &search_start
, &hole_size
,
1817 if (hole_size
> max_hole_size
) {
1818 max_hole_start
= search_start
;
1819 max_hole_size
= hole_size
;
1823 * If this free space is greater than which we need,
1824 * it must be the max free space that we have found
1825 * until now, so max_hole_start must point to the start
1826 * of this free space and the length of this free space
1827 * is stored in max_hole_size. Thus, we return
1828 * max_hole_start and max_hole_size and go back to the
1831 if (hole_size
>= num_bytes
) {
1837 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1838 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1840 if (extent_end
> search_start
)
1841 search_start
= extent_end
;
1848 * At this point, search_start should be the end of
1849 * allocated dev extents, and when shrinking the device,
1850 * search_end may be smaller than search_start.
1852 if (search_end
> search_start
) {
1853 hole_size
= search_end
- search_start
;
1854 if (dev_extent_hole_check(device
, &search_start
, &hole_size
,
1856 btrfs_release_path(path
);
1860 if (hole_size
> max_hole_size
) {
1861 max_hole_start
= search_start
;
1862 max_hole_size
= hole_size
;
1867 if (max_hole_size
< num_bytes
)
1872 ASSERT(max_hole_start
+ max_hole_size
<= search_end
);
1874 btrfs_free_path(path
);
1875 *start
= max_hole_start
;
1877 *len
= max_hole_size
;
1881 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1882 struct btrfs_device
*device
,
1883 u64 start
, u64
*dev_extent_len
)
1885 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1886 struct btrfs_root
*root
= fs_info
->dev_root
;
1888 struct btrfs_path
*path
;
1889 struct btrfs_key key
;
1890 struct btrfs_key found_key
;
1891 struct extent_buffer
*leaf
= NULL
;
1892 struct btrfs_dev_extent
*extent
= NULL
;
1894 path
= btrfs_alloc_path();
1898 key
.objectid
= device
->devid
;
1900 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1902 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1904 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1905 BTRFS_DEV_EXTENT_KEY
);
1908 leaf
= path
->nodes
[0];
1909 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1910 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1911 struct btrfs_dev_extent
);
1912 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1913 btrfs_dev_extent_length(leaf
, extent
) < start
);
1915 btrfs_release_path(path
);
1917 } else if (ret
== 0) {
1918 leaf
= path
->nodes
[0];
1919 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1920 struct btrfs_dev_extent
);
1925 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1927 ret
= btrfs_del_item(trans
, root
, path
);
1929 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1931 btrfs_free_path(path
);
1935 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1940 read_lock(&fs_info
->mapping_tree_lock
);
1941 n
= rb_last(&fs_info
->mapping_tree
.rb_root
);
1943 struct btrfs_chunk_map
*map
;
1945 map
= rb_entry(n
, struct btrfs_chunk_map
, rb_node
);
1946 ret
= map
->start
+ map
->chunk_len
;
1948 read_unlock(&fs_info
->mapping_tree_lock
);
1953 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1957 struct btrfs_key key
;
1958 struct btrfs_key found_key
;
1959 struct btrfs_path
*path
;
1961 path
= btrfs_alloc_path();
1965 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1966 key
.type
= BTRFS_DEV_ITEM_KEY
;
1967 key
.offset
= (u64
)-1;
1969 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1975 btrfs_err(fs_info
, "corrupted chunk tree devid -1 matched");
1980 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1981 BTRFS_DEV_ITEMS_OBJECTID
,
1982 BTRFS_DEV_ITEM_KEY
);
1986 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1988 *devid_ret
= found_key
.offset
+ 1;
1992 btrfs_free_path(path
);
1997 * the device information is stored in the chunk root
1998 * the btrfs_device struct should be fully filled in
2000 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
2001 struct btrfs_device
*device
)
2004 struct btrfs_path
*path
;
2005 struct btrfs_dev_item
*dev_item
;
2006 struct extent_buffer
*leaf
;
2007 struct btrfs_key key
;
2010 path
= btrfs_alloc_path();
2014 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2015 key
.type
= BTRFS_DEV_ITEM_KEY
;
2016 key
.offset
= device
->devid
;
2018 btrfs_reserve_chunk_metadata(trans
, true);
2019 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
2020 &key
, sizeof(*dev_item
));
2021 btrfs_trans_release_chunk_metadata(trans
);
2025 leaf
= path
->nodes
[0];
2026 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2028 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2029 btrfs_set_device_generation(leaf
, dev_item
, 0);
2030 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2031 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2032 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2033 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2034 btrfs_set_device_total_bytes(leaf
, dev_item
,
2035 btrfs_device_get_disk_total_bytes(device
));
2036 btrfs_set_device_bytes_used(leaf
, dev_item
,
2037 btrfs_device_get_bytes_used(device
));
2038 btrfs_set_device_group(leaf
, dev_item
, 0);
2039 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
2040 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
2041 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
2043 ptr
= btrfs_device_uuid(dev_item
);
2044 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
2045 ptr
= btrfs_device_fsid(dev_item
);
2046 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
2047 ptr
, BTRFS_FSID_SIZE
);
2048 btrfs_mark_buffer_dirty(trans
, leaf
);
2052 btrfs_free_path(path
);
2057 * Function to update ctime/mtime for a given device path.
2058 * Mainly used for ctime/mtime based probe like libblkid.
2060 * We don't care about errors here, this is just to be kind to userspace.
2062 static void update_dev_time(const char *device_path
)
2067 ret
= kern_path(device_path
, LOOKUP_FOLLOW
, &path
);
2071 inode_update_time(d_inode(path
.dentry
), S_MTIME
| S_CTIME
| S_VERSION
);
2075 static int btrfs_rm_dev_item(struct btrfs_trans_handle
*trans
,
2076 struct btrfs_device
*device
)
2078 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2080 struct btrfs_path
*path
;
2081 struct btrfs_key key
;
2083 path
= btrfs_alloc_path();
2087 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2088 key
.type
= BTRFS_DEV_ITEM_KEY
;
2089 key
.offset
= device
->devid
;
2091 btrfs_reserve_chunk_metadata(trans
, false);
2092 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2093 btrfs_trans_release_chunk_metadata(trans
);
2100 ret
= btrfs_del_item(trans
, root
, path
);
2102 btrfs_free_path(path
);
2107 * Verify that @num_devices satisfies the RAID profile constraints in the whole
2108 * filesystem. It's up to the caller to adjust that number regarding eg. device
2111 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
2119 seq
= read_seqbegin(&fs_info
->profiles_lock
);
2121 all_avail
= fs_info
->avail_data_alloc_bits
|
2122 fs_info
->avail_system_alloc_bits
|
2123 fs_info
->avail_metadata_alloc_bits
;
2124 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
2126 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
2127 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
2130 if (num_devices
< btrfs_raid_array
[i
].devs_min
)
2131 return btrfs_raid_array
[i
].mindev_error
;
2137 static struct btrfs_device
* btrfs_find_next_active_device(
2138 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
2140 struct btrfs_device
*next_device
;
2142 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
2143 if (next_device
!= device
&&
2144 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
2145 && next_device
->bdev
)
2153 * Helper function to check if the given device is part of s_bdev / latest_dev
2154 * and replace it with the provided or the next active device, in the context
2155 * where this function called, there should be always be another device (or
2156 * this_dev) which is active.
2158 void __cold
btrfs_assign_next_active_device(struct btrfs_device
*device
,
2159 struct btrfs_device
*next_device
)
2161 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2164 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
2166 ASSERT(next_device
);
2168 if (fs_info
->sb
->s_bdev
&&
2169 (fs_info
->sb
->s_bdev
== device
->bdev
))
2170 fs_info
->sb
->s_bdev
= next_device
->bdev
;
2172 if (fs_info
->fs_devices
->latest_dev
->bdev
== device
->bdev
)
2173 fs_info
->fs_devices
->latest_dev
= next_device
;
2177 * Return btrfs_fs_devices::num_devices excluding the device that's being
2178 * currently replaced.
2180 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
2182 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
2184 down_read(&fs_info
->dev_replace
.rwsem
);
2185 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
2186 ASSERT(num_devices
> 1);
2189 up_read(&fs_info
->dev_replace
.rwsem
);
2194 static void btrfs_scratch_superblock(struct btrfs_fs_info
*fs_info
,
2195 struct block_device
*bdev
, int copy_num
)
2197 struct btrfs_super_block
*disk_super
;
2198 const size_t len
= sizeof(disk_super
->magic
);
2199 const u64 bytenr
= btrfs_sb_offset(copy_num
);
2202 disk_super
= btrfs_read_disk_super(bdev
, bytenr
, bytenr
);
2203 if (IS_ERR(disk_super
))
2206 memset(&disk_super
->magic
, 0, len
);
2207 folio_mark_dirty(virt_to_folio(disk_super
));
2208 btrfs_release_disk_super(disk_super
);
2210 ret
= sync_blockdev_range(bdev
, bytenr
, bytenr
+ len
- 1);
2212 btrfs_warn(fs_info
, "error clearing superblock number %d (%d)",
2216 void btrfs_scratch_superblocks(struct btrfs_fs_info
*fs_info
, struct btrfs_device
*device
)
2219 struct block_device
*bdev
= device
->bdev
;
2224 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
; copy_num
++) {
2225 if (bdev_is_zoned(bdev
))
2226 btrfs_reset_sb_log_zones(bdev
, copy_num
);
2228 btrfs_scratch_superblock(fs_info
, bdev
, copy_num
);
2231 /* Notify udev that device has changed */
2232 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
2234 /* Update ctime/mtime for device path for libblkid */
2235 update_dev_time(device
->name
->str
);
2238 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
,
2239 struct btrfs_dev_lookup_args
*args
,
2240 struct file
**bdev_file
)
2242 struct btrfs_trans_handle
*trans
;
2243 struct btrfs_device
*device
;
2244 struct btrfs_fs_devices
*cur_devices
;
2245 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2249 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
2250 btrfs_err(fs_info
, "device remove not supported on extent tree v2 yet");
2255 * The device list in fs_devices is accessed without locks (neither
2256 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2257 * filesystem and another device rm cannot run.
2259 num_devices
= btrfs_num_devices(fs_info
);
2261 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
2265 device
= btrfs_find_device(fs_info
->fs_devices
, args
);
2268 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
2274 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
2275 btrfs_warn_in_rcu(fs_info
,
2276 "cannot remove device %s (devid %llu) due to active swapfile",
2277 btrfs_dev_name(device
), device
->devid
);
2281 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
2282 return BTRFS_ERROR_DEV_TGT_REPLACE
;
2284 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
2285 fs_info
->fs_devices
->rw_devices
== 1)
2286 return BTRFS_ERROR_DEV_ONLY_WRITABLE
;
2288 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2289 mutex_lock(&fs_info
->chunk_mutex
);
2290 list_del_init(&device
->dev_alloc_list
);
2291 device
->fs_devices
->rw_devices
--;
2292 mutex_unlock(&fs_info
->chunk_mutex
);
2295 ret
= btrfs_shrink_device(device
, 0);
2299 trans
= btrfs_start_transaction(fs_info
->chunk_root
, 0);
2300 if (IS_ERR(trans
)) {
2301 ret
= PTR_ERR(trans
);
2305 ret
= btrfs_rm_dev_item(trans
, device
);
2307 /* Any error in dev item removal is critical */
2309 "failed to remove device item for devid %llu: %d",
2310 device
->devid
, ret
);
2311 btrfs_abort_transaction(trans
, ret
);
2312 btrfs_end_transaction(trans
);
2316 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2317 btrfs_scrub_cancel_dev(device
);
2320 * the device list mutex makes sure that we don't change
2321 * the device list while someone else is writing out all
2322 * the device supers. Whoever is writing all supers, should
2323 * lock the device list mutex before getting the number of
2324 * devices in the super block (super_copy). Conversely,
2325 * whoever updates the number of devices in the super block
2326 * (super_copy) should hold the device list mutex.
2330 * In normal cases the cur_devices == fs_devices. But in case
2331 * of deleting a seed device, the cur_devices should point to
2332 * its own fs_devices listed under the fs_devices->seed_list.
2334 cur_devices
= device
->fs_devices
;
2335 mutex_lock(&fs_devices
->device_list_mutex
);
2336 list_del_rcu(&device
->dev_list
);
2338 cur_devices
->num_devices
--;
2339 cur_devices
->total_devices
--;
2340 /* Update total_devices of the parent fs_devices if it's seed */
2341 if (cur_devices
!= fs_devices
)
2342 fs_devices
->total_devices
--;
2344 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2345 cur_devices
->missing_devices
--;
2347 btrfs_assign_next_active_device(device
, NULL
);
2349 if (device
->bdev_file
) {
2350 cur_devices
->open_devices
--;
2351 /* remove sysfs entry */
2352 btrfs_sysfs_remove_device(device
);
2355 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2356 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2357 mutex_unlock(&fs_devices
->device_list_mutex
);
2360 * At this point, the device is zero sized and detached from the
2361 * devices list. All that's left is to zero out the old supers and
2364 * We cannot call btrfs_close_bdev() here because we're holding the sb
2365 * write lock, and fput() on the block device will pull in the
2366 * ->open_mutex on the block device and it's dependencies. Instead
2367 * just flush the device and let the caller do the final bdev_release.
2369 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2370 btrfs_scratch_superblocks(fs_info
, device
);
2372 sync_blockdev(device
->bdev
);
2373 invalidate_bdev(device
->bdev
);
2377 *bdev_file
= device
->bdev_file
;
2379 btrfs_free_device(device
);
2382 * This can happen if cur_devices is the private seed devices list. We
2383 * cannot call close_fs_devices() here because it expects the uuid_mutex
2384 * to be held, but in fact we don't need that for the private
2385 * seed_devices, we can simply decrement cur_devices->opened and then
2386 * remove it from our list and free the fs_devices.
2388 if (cur_devices
->num_devices
== 0) {
2389 list_del_init(&cur_devices
->seed_list
);
2390 ASSERT(cur_devices
->opened
== 1);
2391 cur_devices
->opened
--;
2392 free_fs_devices(cur_devices
);
2395 ret
= btrfs_commit_transaction(trans
);
2400 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2401 mutex_lock(&fs_info
->chunk_mutex
);
2402 list_add(&device
->dev_alloc_list
,
2403 &fs_devices
->alloc_list
);
2404 device
->fs_devices
->rw_devices
++;
2405 mutex_unlock(&fs_info
->chunk_mutex
);
2410 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2412 struct btrfs_fs_devices
*fs_devices
;
2414 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2417 * in case of fs with no seed, srcdev->fs_devices will point
2418 * to fs_devices of fs_info. However when the dev being replaced is
2419 * a seed dev it will point to the seed's local fs_devices. In short
2420 * srcdev will have its correct fs_devices in both the cases.
2422 fs_devices
= srcdev
->fs_devices
;
2424 list_del_rcu(&srcdev
->dev_list
);
2425 list_del(&srcdev
->dev_alloc_list
);
2426 fs_devices
->num_devices
--;
2427 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2428 fs_devices
->missing_devices
--;
2430 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2431 fs_devices
->rw_devices
--;
2434 fs_devices
->open_devices
--;
2437 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2439 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2441 mutex_lock(&uuid_mutex
);
2443 btrfs_close_bdev(srcdev
);
2445 btrfs_free_device(srcdev
);
2447 /* if this is no devs we rather delete the fs_devices */
2448 if (!fs_devices
->num_devices
) {
2450 * On a mounted FS, num_devices can't be zero unless it's a
2451 * seed. In case of a seed device being replaced, the replace
2452 * target added to the sprout FS, so there will be no more
2453 * device left under the seed FS.
2455 ASSERT(fs_devices
->seeding
);
2457 list_del_init(&fs_devices
->seed_list
);
2458 close_fs_devices(fs_devices
);
2459 free_fs_devices(fs_devices
);
2461 mutex_unlock(&uuid_mutex
);
2464 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2466 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2468 mutex_lock(&fs_devices
->device_list_mutex
);
2470 btrfs_sysfs_remove_device(tgtdev
);
2473 fs_devices
->open_devices
--;
2475 fs_devices
->num_devices
--;
2477 btrfs_assign_next_active_device(tgtdev
, NULL
);
2479 list_del_rcu(&tgtdev
->dev_list
);
2481 mutex_unlock(&fs_devices
->device_list_mutex
);
2483 btrfs_scratch_superblocks(tgtdev
->fs_info
, tgtdev
);
2485 btrfs_close_bdev(tgtdev
);
2487 btrfs_free_device(tgtdev
);
2491 * Populate args from device at path.
2493 * @fs_info: the filesystem
2494 * @args: the args to populate
2495 * @path: the path to the device
2497 * This will read the super block of the device at @path and populate @args with
2498 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to
2499 * lookup a device to operate on, but need to do it before we take any locks.
2500 * This properly handles the special case of "missing" that a user may pass in,
2501 * and does some basic sanity checks. The caller must make sure that @path is
2502 * properly NUL terminated before calling in, and must call
2503 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2506 * Return: 0 for success, -errno for failure
2508 int btrfs_get_dev_args_from_path(struct btrfs_fs_info
*fs_info
,
2509 struct btrfs_dev_lookup_args
*args
,
2512 struct btrfs_super_block
*disk_super
;
2513 struct file
*bdev_file
;
2516 if (!path
|| !path
[0])
2518 if (!strcmp(path
, "missing")) {
2519 args
->missing
= true;
2523 args
->uuid
= kzalloc(BTRFS_UUID_SIZE
, GFP_KERNEL
);
2524 args
->fsid
= kzalloc(BTRFS_FSID_SIZE
, GFP_KERNEL
);
2525 if (!args
->uuid
|| !args
->fsid
) {
2526 btrfs_put_dev_args_from_path(args
);
2530 ret
= btrfs_get_bdev_and_sb(path
, BLK_OPEN_READ
, NULL
, 0,
2531 &bdev_file
, &disk_super
);
2533 btrfs_put_dev_args_from_path(args
);
2537 args
->devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2538 memcpy(args
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
);
2539 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2540 memcpy(args
->fsid
, disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
2542 memcpy(args
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
2543 btrfs_release_disk_super(disk_super
);
2549 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2550 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2551 * that don't need to be freed.
2553 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args
*args
)
2561 struct btrfs_device
*btrfs_find_device_by_devspec(
2562 struct btrfs_fs_info
*fs_info
, u64 devid
,
2563 const char *device_path
)
2565 BTRFS_DEV_LOOKUP_ARGS(args
);
2566 struct btrfs_device
*device
;
2571 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2573 return ERR_PTR(-ENOENT
);
2577 ret
= btrfs_get_dev_args_from_path(fs_info
, &args
, device_path
);
2579 return ERR_PTR(ret
);
2580 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2581 btrfs_put_dev_args_from_path(&args
);
2583 return ERR_PTR(-ENOENT
);
2587 static struct btrfs_fs_devices
*btrfs_init_sprout(struct btrfs_fs_info
*fs_info
)
2589 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2590 struct btrfs_fs_devices
*old_devices
;
2591 struct btrfs_fs_devices
*seed_devices
;
2593 lockdep_assert_held(&uuid_mutex
);
2594 if (!fs_devices
->seeding
)
2595 return ERR_PTR(-EINVAL
);
2598 * Private copy of the seed devices, anchored at
2599 * fs_info->fs_devices->seed_list
2601 seed_devices
= alloc_fs_devices(NULL
);
2602 if (IS_ERR(seed_devices
))
2603 return seed_devices
;
2606 * It's necessary to retain a copy of the original seed fs_devices in
2607 * fs_uuids so that filesystems which have been seeded can successfully
2608 * reference the seed device from open_seed_devices. This also supports
2611 old_devices
= clone_fs_devices(fs_devices
);
2612 if (IS_ERR(old_devices
)) {
2613 kfree(seed_devices
);
2617 list_add(&old_devices
->fs_list
, &fs_uuids
);
2619 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2620 seed_devices
->opened
= 1;
2621 INIT_LIST_HEAD(&seed_devices
->devices
);
2622 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2623 mutex_init(&seed_devices
->device_list_mutex
);
2625 return seed_devices
;
2629 * Splice seed devices into the sprout fs_devices.
2630 * Generate a new fsid for the sprouted read-write filesystem.
2632 static void btrfs_setup_sprout(struct btrfs_fs_info
*fs_info
,
2633 struct btrfs_fs_devices
*seed_devices
)
2635 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2636 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2637 struct btrfs_device
*device
;
2641 * We are updating the fsid, the thread leading to device_list_add()
2642 * could race, so uuid_mutex is needed.
2644 lockdep_assert_held(&uuid_mutex
);
2647 * The threads listed below may traverse dev_list but can do that without
2648 * device_list_mutex:
2649 * - All device ops and balance - as we are in btrfs_exclop_start.
2650 * - Various dev_list readers - are using RCU.
2651 * - btrfs_ioctl_fitrim() - is using RCU.
2653 * For-read threads as below are using device_list_mutex:
2654 * - Readonly scrub btrfs_scrub_dev()
2655 * - Readonly scrub btrfs_scrub_progress()
2656 * - btrfs_get_dev_stats()
2658 lockdep_assert_held(&fs_devices
->device_list_mutex
);
2660 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2662 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2663 device
->fs_devices
= seed_devices
;
2665 fs_devices
->seeding
= false;
2666 fs_devices
->num_devices
= 0;
2667 fs_devices
->open_devices
= 0;
2668 fs_devices
->missing_devices
= 0;
2669 fs_devices
->rotating
= false;
2670 list_add(&seed_devices
->seed_list
, &fs_devices
->seed_list
);
2672 generate_random_uuid(fs_devices
->fsid
);
2673 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2674 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2676 super_flags
= btrfs_super_flags(disk_super
) &
2677 ~BTRFS_SUPER_FLAG_SEEDING
;
2678 btrfs_set_super_flags(disk_super
, super_flags
);
2682 * Store the expected generation for seed devices in device items.
2684 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2686 BTRFS_DEV_LOOKUP_ARGS(args
);
2687 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2688 struct btrfs_root
*root
= fs_info
->chunk_root
;
2689 struct btrfs_path
*path
;
2690 struct extent_buffer
*leaf
;
2691 struct btrfs_dev_item
*dev_item
;
2692 struct btrfs_device
*device
;
2693 struct btrfs_key key
;
2694 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2695 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2698 path
= btrfs_alloc_path();
2702 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2704 key
.type
= BTRFS_DEV_ITEM_KEY
;
2707 btrfs_reserve_chunk_metadata(trans
, false);
2708 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2709 btrfs_trans_release_chunk_metadata(trans
);
2713 leaf
= path
->nodes
[0];
2715 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2716 ret
= btrfs_next_leaf(root
, path
);
2721 leaf
= path
->nodes
[0];
2722 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2723 btrfs_release_path(path
);
2727 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2728 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2729 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2732 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2733 struct btrfs_dev_item
);
2734 args
.devid
= btrfs_device_id(leaf
, dev_item
);
2735 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2737 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2739 args
.uuid
= dev_uuid
;
2740 args
.fsid
= fs_uuid
;
2741 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2742 BUG_ON(!device
); /* Logic error */
2744 if (device
->fs_devices
->seeding
) {
2745 btrfs_set_device_generation(leaf
, dev_item
,
2746 device
->generation
);
2747 btrfs_mark_buffer_dirty(trans
, leaf
);
2755 btrfs_free_path(path
);
2759 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2761 struct btrfs_root
*root
= fs_info
->dev_root
;
2762 struct btrfs_trans_handle
*trans
;
2763 struct btrfs_device
*device
;
2764 struct file
*bdev_file
;
2765 struct super_block
*sb
= fs_info
->sb
;
2766 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2767 struct btrfs_fs_devices
*seed_devices
= NULL
;
2768 u64 orig_super_total_bytes
;
2769 u64 orig_super_num_devices
;
2771 bool seeding_dev
= false;
2772 bool locked
= false;
2774 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2777 bdev_file
= bdev_file_open_by_path(device_path
, BLK_OPEN_WRITE
,
2778 fs_info
->bdev_holder
, NULL
);
2779 if (IS_ERR(bdev_file
))
2780 return PTR_ERR(bdev_file
);
2782 if (!btrfs_check_device_zone_type(fs_info
, file_bdev(bdev_file
))) {
2787 if (fs_devices
->seeding
) {
2789 down_write(&sb
->s_umount
);
2790 mutex_lock(&uuid_mutex
);
2794 sync_blockdev(file_bdev(bdev_file
));
2797 list_for_each_entry_rcu(device
, &fs_devices
->devices
, dev_list
) {
2798 if (device
->bdev
== file_bdev(bdev_file
)) {
2806 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
, device_path
);
2807 if (IS_ERR(device
)) {
2808 /* we can safely leave the fs_devices entry around */
2809 ret
= PTR_ERR(device
);
2813 device
->fs_info
= fs_info
;
2814 device
->bdev_file
= bdev_file
;
2815 device
->bdev
= file_bdev(bdev_file
);
2816 ret
= lookup_bdev(device_path
, &device
->devt
);
2818 goto error_free_device
;
2820 ret
= btrfs_get_dev_zone_info(device
, false);
2822 goto error_free_device
;
2824 trans
= btrfs_start_transaction(root
, 0);
2825 if (IS_ERR(trans
)) {
2826 ret
= PTR_ERR(trans
);
2827 goto error_free_zone
;
2830 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2831 device
->generation
= trans
->transid
;
2832 device
->io_width
= fs_info
->sectorsize
;
2833 device
->io_align
= fs_info
->sectorsize
;
2834 device
->sector_size
= fs_info
->sectorsize
;
2835 device
->total_bytes
=
2836 round_down(bdev_nr_bytes(device
->bdev
), fs_info
->sectorsize
);
2837 device
->disk_total_bytes
= device
->total_bytes
;
2838 device
->commit_total_bytes
= device
->total_bytes
;
2839 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2840 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2841 device
->dev_stats_valid
= 1;
2842 set_blocksize(device
->bdev_file
, BTRFS_BDEV_BLOCKSIZE
);
2845 /* GFP_KERNEL allocation must not be under device_list_mutex */
2846 seed_devices
= btrfs_init_sprout(fs_info
);
2847 if (IS_ERR(seed_devices
)) {
2848 ret
= PTR_ERR(seed_devices
);
2849 btrfs_abort_transaction(trans
, ret
);
2854 mutex_lock(&fs_devices
->device_list_mutex
);
2856 btrfs_setup_sprout(fs_info
, seed_devices
);
2857 btrfs_assign_next_active_device(fs_info
->fs_devices
->latest_dev
,
2861 device
->fs_devices
= fs_devices
;
2863 mutex_lock(&fs_info
->chunk_mutex
);
2864 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2865 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2866 fs_devices
->num_devices
++;
2867 fs_devices
->open_devices
++;
2868 fs_devices
->rw_devices
++;
2869 fs_devices
->total_devices
++;
2870 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2872 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2874 if (!bdev_nonrot(device
->bdev
))
2875 fs_devices
->rotating
= true;
2877 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2878 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2879 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2880 fs_info
->sectorsize
));
2882 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2883 btrfs_set_super_num_devices(fs_info
->super_copy
,
2884 orig_super_num_devices
+ 1);
2887 * we've got more storage, clear any full flags on the space
2890 btrfs_clear_space_info_full(fs_info
);
2892 mutex_unlock(&fs_info
->chunk_mutex
);
2894 /* Add sysfs device entry */
2895 btrfs_sysfs_add_device(device
);
2897 mutex_unlock(&fs_devices
->device_list_mutex
);
2900 mutex_lock(&fs_info
->chunk_mutex
);
2901 ret
= init_first_rw_device(trans
);
2902 mutex_unlock(&fs_info
->chunk_mutex
);
2904 btrfs_abort_transaction(trans
, ret
);
2909 ret
= btrfs_add_dev_item(trans
, device
);
2911 btrfs_abort_transaction(trans
, ret
);
2916 ret
= btrfs_finish_sprout(trans
);
2918 btrfs_abort_transaction(trans
, ret
);
2923 * fs_devices now represents the newly sprouted filesystem and
2924 * its fsid has been changed by btrfs_sprout_splice().
2926 btrfs_sysfs_update_sprout_fsid(fs_devices
);
2929 ret
= btrfs_commit_transaction(trans
);
2932 mutex_unlock(&uuid_mutex
);
2933 up_write(&sb
->s_umount
);
2936 if (ret
) /* transaction commit */
2939 ret
= btrfs_relocate_sys_chunks(fs_info
);
2941 btrfs_handle_fs_error(fs_info
, ret
,
2942 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2943 trans
= btrfs_attach_transaction(root
);
2944 if (IS_ERR(trans
)) {
2945 if (PTR_ERR(trans
) == -ENOENT
)
2947 ret
= PTR_ERR(trans
);
2951 ret
= btrfs_commit_transaction(trans
);
2955 * Now that we have written a new super block to this device, check all
2956 * other fs_devices list if device_path alienates any other scanned
2958 * We can ignore the return value as it typically returns -EINVAL and
2959 * only succeeds if the device was an alien.
2961 btrfs_forget_devices(device
->devt
);
2963 /* Update ctime/mtime for blkid or udev */
2964 update_dev_time(device_path
);
2969 btrfs_sysfs_remove_device(device
);
2970 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2971 mutex_lock(&fs_info
->chunk_mutex
);
2972 list_del_rcu(&device
->dev_list
);
2973 list_del(&device
->dev_alloc_list
);
2974 fs_info
->fs_devices
->num_devices
--;
2975 fs_info
->fs_devices
->open_devices
--;
2976 fs_info
->fs_devices
->rw_devices
--;
2977 fs_info
->fs_devices
->total_devices
--;
2978 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2979 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2980 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2981 orig_super_total_bytes
);
2982 btrfs_set_super_num_devices(fs_info
->super_copy
,
2983 orig_super_num_devices
);
2984 mutex_unlock(&fs_info
->chunk_mutex
);
2985 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2988 btrfs_end_transaction(trans
);
2990 btrfs_destroy_dev_zone_info(device
);
2992 btrfs_free_device(device
);
2996 mutex_unlock(&uuid_mutex
);
2997 up_write(&sb
->s_umount
);
3002 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
3003 struct btrfs_device
*device
)
3006 struct btrfs_path
*path
;
3007 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
3008 struct btrfs_dev_item
*dev_item
;
3009 struct extent_buffer
*leaf
;
3010 struct btrfs_key key
;
3012 path
= btrfs_alloc_path();
3016 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3017 key
.type
= BTRFS_DEV_ITEM_KEY
;
3018 key
.offset
= device
->devid
;
3020 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
3029 leaf
= path
->nodes
[0];
3030 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
3032 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
3033 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
3034 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
3035 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
3036 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
3037 btrfs_set_device_total_bytes(leaf
, dev_item
,
3038 btrfs_device_get_disk_total_bytes(device
));
3039 btrfs_set_device_bytes_used(leaf
, dev_item
,
3040 btrfs_device_get_bytes_used(device
));
3041 btrfs_mark_buffer_dirty(trans
, leaf
);
3044 btrfs_free_path(path
);
3048 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
3049 struct btrfs_device
*device
, u64 new_size
)
3051 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
3052 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
3057 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
3060 new_size
= round_down(new_size
, fs_info
->sectorsize
);
3062 mutex_lock(&fs_info
->chunk_mutex
);
3063 old_total
= btrfs_super_total_bytes(super_copy
);
3064 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
3066 if (new_size
<= device
->total_bytes
||
3067 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
3068 mutex_unlock(&fs_info
->chunk_mutex
);
3072 btrfs_set_super_total_bytes(super_copy
,
3073 round_down(old_total
+ diff
, fs_info
->sectorsize
));
3074 device
->fs_devices
->total_rw_bytes
+= diff
;
3075 atomic64_add(diff
, &fs_info
->free_chunk_space
);
3077 btrfs_device_set_total_bytes(device
, new_size
);
3078 btrfs_device_set_disk_total_bytes(device
, new_size
);
3079 btrfs_clear_space_info_full(device
->fs_info
);
3080 if (list_empty(&device
->post_commit_list
))
3081 list_add_tail(&device
->post_commit_list
,
3082 &trans
->transaction
->dev_update_list
);
3083 mutex_unlock(&fs_info
->chunk_mutex
);
3085 btrfs_reserve_chunk_metadata(trans
, false);
3086 ret
= btrfs_update_device(trans
, device
);
3087 btrfs_trans_release_chunk_metadata(trans
);
3092 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
3094 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3095 struct btrfs_root
*root
= fs_info
->chunk_root
;
3097 struct btrfs_path
*path
;
3098 struct btrfs_key key
;
3100 path
= btrfs_alloc_path();
3104 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3105 key
.offset
= chunk_offset
;
3106 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3108 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3111 else if (ret
> 0) { /* Logic error or corruption */
3112 btrfs_err(fs_info
, "failed to lookup chunk %llu when freeing",
3114 btrfs_abort_transaction(trans
, -ENOENT
);
3119 ret
= btrfs_del_item(trans
, root
, path
);
3121 btrfs_err(fs_info
, "failed to delete chunk %llu item", chunk_offset
);
3122 btrfs_abort_transaction(trans
, ret
);
3126 btrfs_free_path(path
);
3130 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3132 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
3133 struct btrfs_disk_key
*disk_key
;
3134 struct btrfs_chunk
*chunk
;
3141 struct btrfs_key key
;
3143 lockdep_assert_held(&fs_info
->chunk_mutex
);
3144 array_size
= btrfs_super_sys_array_size(super_copy
);
3146 ptr
= super_copy
->sys_chunk_array
;
3149 while (cur
< array_size
) {
3150 disk_key
= (struct btrfs_disk_key
*)ptr
;
3151 btrfs_disk_key_to_cpu(&key
, disk_key
);
3153 len
= sizeof(*disk_key
);
3155 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3156 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
3157 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
3158 len
+= btrfs_chunk_item_size(num_stripes
);
3163 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
3164 key
.offset
== chunk_offset
) {
3165 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
3167 btrfs_set_super_sys_array_size(super_copy
, array_size
);
3176 struct btrfs_chunk_map
*btrfs_find_chunk_map_nolock(struct btrfs_fs_info
*fs_info
,
3177 u64 logical
, u64 length
)
3179 struct rb_node
*node
= fs_info
->mapping_tree
.rb_root
.rb_node
;
3180 struct rb_node
*prev
= NULL
;
3181 struct rb_node
*orig_prev
;
3182 struct btrfs_chunk_map
*map
;
3183 struct btrfs_chunk_map
*prev_map
= NULL
;
3186 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
3190 if (logical
< map
->start
) {
3191 node
= node
->rb_left
;
3192 } else if (logical
>= map
->start
+ map
->chunk_len
) {
3193 node
= node
->rb_right
;
3195 refcount_inc(&map
->refs
);
3204 while (prev
&& logical
>= prev_map
->start
+ prev_map
->chunk_len
) {
3205 prev
= rb_next(prev
);
3206 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3211 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3212 while (prev
&& logical
< prev_map
->start
) {
3213 prev
= rb_prev(prev
);
3214 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3219 u64 end
= logical
+ length
;
3222 * Caller can pass a U64_MAX length when it wants to get any
3223 * chunk starting at an offset of 'logical' or higher, so deal
3224 * with underflow by resetting the end offset to U64_MAX.
3229 if (end
> prev_map
->start
&&
3230 logical
< prev_map
->start
+ prev_map
->chunk_len
) {
3231 refcount_inc(&prev_map
->refs
);
3239 struct btrfs_chunk_map
*btrfs_find_chunk_map(struct btrfs_fs_info
*fs_info
,
3240 u64 logical
, u64 length
)
3242 struct btrfs_chunk_map
*map
;
3244 read_lock(&fs_info
->mapping_tree_lock
);
3245 map
= btrfs_find_chunk_map_nolock(fs_info
, logical
, length
);
3246 read_unlock(&fs_info
->mapping_tree_lock
);
3252 * Find the mapping containing the given logical extent.
3254 * @logical: Logical block offset in bytes.
3255 * @length: Length of extent in bytes.
3257 * Return: Chunk mapping or ERR_PTR.
3259 struct btrfs_chunk_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
3260 u64 logical
, u64 length
)
3262 struct btrfs_chunk_map
*map
;
3264 map
= btrfs_find_chunk_map(fs_info
, logical
, length
);
3266 if (unlikely(!map
)) {
3268 "unable to find chunk map for logical %llu length %llu",
3270 return ERR_PTR(-EINVAL
);
3273 if (unlikely(map
->start
> logical
|| map
->start
+ map
->chunk_len
<= logical
)) {
3275 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3276 logical
, logical
+ length
, map
->start
,
3277 map
->start
+ map
->chunk_len
);
3278 btrfs_free_chunk_map(map
);
3279 return ERR_PTR(-EINVAL
);
3282 /* Callers are responsible for dropping the reference. */
3286 static int remove_chunk_item(struct btrfs_trans_handle
*trans
,
3287 struct btrfs_chunk_map
*map
, u64 chunk_offset
)
3292 * Removing chunk items and updating the device items in the chunks btree
3293 * requires holding the chunk_mutex.
3294 * See the comment at btrfs_chunk_alloc() for the details.
3296 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
3298 for (i
= 0; i
< map
->num_stripes
; i
++) {
3301 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
3306 return btrfs_free_chunk(trans
, chunk_offset
);
3309 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
3311 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3312 struct btrfs_chunk_map
*map
;
3313 u64 dev_extent_len
= 0;
3315 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
3317 map
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
3320 * This is a logic error, but we don't want to just rely on the
3321 * user having built with ASSERT enabled, so if ASSERT doesn't
3322 * do anything we still error out.
3325 return PTR_ERR(map
);
3329 * First delete the device extent items from the devices btree.
3330 * We take the device_list_mutex to avoid racing with the finishing phase
3331 * of a device replace operation. See the comment below before acquiring
3332 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3333 * because that can result in a deadlock when deleting the device extent
3334 * items from the devices btree - COWing an extent buffer from the btree
3335 * may result in allocating a new metadata chunk, which would attempt to
3336 * lock again fs_info->chunk_mutex.
3338 mutex_lock(&fs_devices
->device_list_mutex
);
3339 for (i
= 0; i
< map
->num_stripes
; i
++) {
3340 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
3341 ret
= btrfs_free_dev_extent(trans
, device
,
3342 map
->stripes
[i
].physical
,
3345 mutex_unlock(&fs_devices
->device_list_mutex
);
3346 btrfs_abort_transaction(trans
, ret
);
3350 if (device
->bytes_used
> 0) {
3351 mutex_lock(&fs_info
->chunk_mutex
);
3352 btrfs_device_set_bytes_used(device
,
3353 device
->bytes_used
- dev_extent_len
);
3354 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
3355 btrfs_clear_space_info_full(fs_info
);
3356 mutex_unlock(&fs_info
->chunk_mutex
);
3359 mutex_unlock(&fs_devices
->device_list_mutex
);
3362 * We acquire fs_info->chunk_mutex for 2 reasons:
3364 * 1) Just like with the first phase of the chunk allocation, we must
3365 * reserve system space, do all chunk btree updates and deletions, and
3366 * update the system chunk array in the superblock while holding this
3367 * mutex. This is for similar reasons as explained on the comment at
3368 * the top of btrfs_chunk_alloc();
3370 * 2) Prevent races with the final phase of a device replace operation
3371 * that replaces the device object associated with the map's stripes,
3372 * because the device object's id can change at any time during that
3373 * final phase of the device replace operation
3374 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3375 * replaced device and then see it with an ID of
3376 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3377 * the device item, which does not exists on the chunk btree.
3378 * The finishing phase of device replace acquires both the
3379 * device_list_mutex and the chunk_mutex, in that order, so we are
3380 * safe by just acquiring the chunk_mutex.
3382 trans
->removing_chunk
= true;
3383 mutex_lock(&fs_info
->chunk_mutex
);
3385 check_system_chunk(trans
, map
->type
);
3387 ret
= remove_chunk_item(trans
, map
, chunk_offset
);
3389 * Normally we should not get -ENOSPC since we reserved space before
3390 * through the call to check_system_chunk().
3392 * Despite our system space_info having enough free space, we may not
3393 * be able to allocate extents from its block groups, because all have
3394 * an incompatible profile, which will force us to allocate a new system
3395 * block group with the right profile, or right after we called
3396 * check_system_space() above, a scrub turned the only system block group
3397 * with enough free space into RO mode.
3398 * This is explained with more detail at do_chunk_alloc().
3400 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3402 if (ret
== -ENOSPC
) {
3403 const u64 sys_flags
= btrfs_system_alloc_profile(fs_info
);
3404 struct btrfs_block_group
*sys_bg
;
3406 sys_bg
= btrfs_create_chunk(trans
, sys_flags
);
3407 if (IS_ERR(sys_bg
)) {
3408 ret
= PTR_ERR(sys_bg
);
3409 btrfs_abort_transaction(trans
, ret
);
3413 ret
= btrfs_chunk_alloc_add_chunk_item(trans
, sys_bg
);
3415 btrfs_abort_transaction(trans
, ret
);
3419 ret
= remove_chunk_item(trans
, map
, chunk_offset
);
3421 btrfs_abort_transaction(trans
, ret
);
3425 btrfs_abort_transaction(trans
, ret
);
3429 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, map
->chunk_len
);
3431 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3432 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
3434 btrfs_abort_transaction(trans
, ret
);
3439 mutex_unlock(&fs_info
->chunk_mutex
);
3440 trans
->removing_chunk
= false;
3443 * We are done with chunk btree updates and deletions, so release the
3444 * system space we previously reserved (with check_system_chunk()).
3446 btrfs_trans_release_chunk_metadata(trans
);
3448 ret
= btrfs_remove_block_group(trans
, map
);
3450 btrfs_abort_transaction(trans
, ret
);
3455 if (trans
->removing_chunk
) {
3456 mutex_unlock(&fs_info
->chunk_mutex
);
3457 trans
->removing_chunk
= false;
3460 btrfs_free_chunk_map(map
);
3464 int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3466 struct btrfs_root
*root
= fs_info
->chunk_root
;
3467 struct btrfs_trans_handle
*trans
;
3468 struct btrfs_block_group
*block_group
;
3472 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
3474 "relocate: not supported on extent tree v2 yet");
3479 * Prevent races with automatic removal of unused block groups.
3480 * After we relocate and before we remove the chunk with offset
3481 * chunk_offset, automatic removal of the block group can kick in,
3482 * resulting in a failure when calling btrfs_remove_chunk() below.
3484 * Make sure to acquire this mutex before doing a tree search (dev
3485 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3486 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3487 * we release the path used to search the chunk/dev tree and before
3488 * the current task acquires this mutex and calls us.
3490 lockdep_assert_held(&fs_info
->reclaim_bgs_lock
);
3492 /* step one, relocate all the extents inside this chunk */
3493 btrfs_scrub_pause(fs_info
);
3494 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
3495 btrfs_scrub_continue(fs_info
);
3498 * If we had a transaction abort, stop all running scrubs.
3499 * See transaction.c:cleanup_transaction() why we do it here.
3501 if (BTRFS_FS_ERROR(fs_info
))
3502 btrfs_scrub_cancel(fs_info
);
3506 block_group
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3509 btrfs_discard_cancel_work(&fs_info
->discard_ctl
, block_group
);
3510 length
= block_group
->length
;
3511 btrfs_put_block_group(block_group
);
3514 * On a zoned file system, discard the whole block group, this will
3515 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3516 * resetting the zone fails, don't treat it as a fatal problem from the
3517 * filesystem's point of view.
3519 if (btrfs_is_zoned(fs_info
)) {
3520 ret
= btrfs_discard_extent(fs_info
, chunk_offset
, length
, NULL
);
3523 "failed to reset zone %llu after relocation",
3527 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
3529 if (IS_ERR(trans
)) {
3530 ret
= PTR_ERR(trans
);
3531 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
3536 * step two, delete the device extents and the
3537 * chunk tree entries
3539 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
3540 btrfs_end_transaction(trans
);
3544 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
3546 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3547 struct btrfs_path
*path
;
3548 struct extent_buffer
*leaf
;
3549 struct btrfs_chunk
*chunk
;
3550 struct btrfs_key key
;
3551 struct btrfs_key found_key
;
3553 bool retried
= false;
3557 path
= btrfs_alloc_path();
3562 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3563 key
.offset
= (u64
)-1;
3564 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3567 mutex_lock(&fs_info
->reclaim_bgs_lock
);
3568 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3570 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3575 * On the first search we would find chunk tree with
3576 * offset -1, which is not possible. On subsequent
3577 * loops this would find an existing item on an invalid
3578 * offset (one less than the previous one, wrong
3579 * alignment and size).
3582 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3586 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3589 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3595 leaf
= path
->nodes
[0];
3596 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3598 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3599 struct btrfs_chunk
);
3600 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3601 btrfs_release_path(path
);
3603 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3604 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3610 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3612 if (found_key
.offset
== 0)
3614 key
.offset
= found_key
.offset
- 1;
3617 if (failed
&& !retried
) {
3621 } else if (WARN_ON(failed
&& retried
)) {
3625 btrfs_free_path(path
);
3630 * return 1 : allocate a data chunk successfully,
3631 * return <0: errors during allocating a data chunk,
3632 * return 0 : no need to allocate a data chunk.
3634 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3637 struct btrfs_block_group
*cache
;
3641 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3643 chunk_type
= cache
->flags
;
3644 btrfs_put_block_group(cache
);
3646 if (!(chunk_type
& BTRFS_BLOCK_GROUP_DATA
))
3649 spin_lock(&fs_info
->data_sinfo
->lock
);
3650 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3651 spin_unlock(&fs_info
->data_sinfo
->lock
);
3654 struct btrfs_trans_handle
*trans
;
3657 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3659 return PTR_ERR(trans
);
3661 ret
= btrfs_force_chunk_alloc(trans
, BTRFS_BLOCK_GROUP_DATA
);
3662 btrfs_end_transaction(trans
);
3671 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args
*cpu
,
3672 const struct btrfs_disk_balance_args
*disk
)
3674 memset(cpu
, 0, sizeof(*cpu
));
3676 cpu
->profiles
= le64_to_cpu(disk
->profiles
);
3677 cpu
->usage
= le64_to_cpu(disk
->usage
);
3678 cpu
->devid
= le64_to_cpu(disk
->devid
);
3679 cpu
->pstart
= le64_to_cpu(disk
->pstart
);
3680 cpu
->pend
= le64_to_cpu(disk
->pend
);
3681 cpu
->vstart
= le64_to_cpu(disk
->vstart
);
3682 cpu
->vend
= le64_to_cpu(disk
->vend
);
3683 cpu
->target
= le64_to_cpu(disk
->target
);
3684 cpu
->flags
= le64_to_cpu(disk
->flags
);
3685 cpu
->limit
= le64_to_cpu(disk
->limit
);
3686 cpu
->stripes_min
= le32_to_cpu(disk
->stripes_min
);
3687 cpu
->stripes_max
= le32_to_cpu(disk
->stripes_max
);
3690 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args
*disk
,
3691 const struct btrfs_balance_args
*cpu
)
3693 memset(disk
, 0, sizeof(*disk
));
3695 disk
->profiles
= cpu_to_le64(cpu
->profiles
);
3696 disk
->usage
= cpu_to_le64(cpu
->usage
);
3697 disk
->devid
= cpu_to_le64(cpu
->devid
);
3698 disk
->pstart
= cpu_to_le64(cpu
->pstart
);
3699 disk
->pend
= cpu_to_le64(cpu
->pend
);
3700 disk
->vstart
= cpu_to_le64(cpu
->vstart
);
3701 disk
->vend
= cpu_to_le64(cpu
->vend
);
3702 disk
->target
= cpu_to_le64(cpu
->target
);
3703 disk
->flags
= cpu_to_le64(cpu
->flags
);
3704 disk
->limit
= cpu_to_le64(cpu
->limit
);
3705 disk
->stripes_min
= cpu_to_le32(cpu
->stripes_min
);
3706 disk
->stripes_max
= cpu_to_le32(cpu
->stripes_max
);
3709 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3710 struct btrfs_balance_control
*bctl
)
3712 struct btrfs_root
*root
= fs_info
->tree_root
;
3713 struct btrfs_trans_handle
*trans
;
3714 struct btrfs_balance_item
*item
;
3715 struct btrfs_disk_balance_args disk_bargs
;
3716 struct btrfs_path
*path
;
3717 struct extent_buffer
*leaf
;
3718 struct btrfs_key key
;
3721 path
= btrfs_alloc_path();
3725 trans
= btrfs_start_transaction(root
, 0);
3726 if (IS_ERR(trans
)) {
3727 btrfs_free_path(path
);
3728 return PTR_ERR(trans
);
3731 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3732 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3735 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3740 leaf
= path
->nodes
[0];
3741 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3743 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3745 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3746 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3747 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3748 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3749 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3750 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3752 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3754 btrfs_mark_buffer_dirty(trans
, leaf
);
3756 btrfs_free_path(path
);
3757 err
= btrfs_commit_transaction(trans
);
3763 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3765 struct btrfs_root
*root
= fs_info
->tree_root
;
3766 struct btrfs_trans_handle
*trans
;
3767 struct btrfs_path
*path
;
3768 struct btrfs_key key
;
3771 path
= btrfs_alloc_path();
3775 trans
= btrfs_start_transaction_fallback_global_rsv(root
, 0);
3776 if (IS_ERR(trans
)) {
3777 btrfs_free_path(path
);
3778 return PTR_ERR(trans
);
3781 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3782 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3785 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3793 ret
= btrfs_del_item(trans
, root
, path
);
3795 btrfs_free_path(path
);
3796 err
= btrfs_commit_transaction(trans
);
3803 * This is a heuristic used to reduce the number of chunks balanced on
3804 * resume after balance was interrupted.
3806 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3809 * Turn on soft mode for chunk types that were being converted.
3811 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3812 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3813 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3814 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3815 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3816 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3819 * Turn on usage filter if is not already used. The idea is
3820 * that chunks that we have already balanced should be
3821 * reasonably full. Don't do it for chunks that are being
3822 * converted - that will keep us from relocating unconverted
3823 * (albeit full) chunks.
3825 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3826 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3827 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3828 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3829 bctl
->data
.usage
= 90;
3831 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3832 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3833 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3834 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3835 bctl
->sys
.usage
= 90;
3837 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3838 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3839 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3840 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3841 bctl
->meta
.usage
= 90;
3846 * Clear the balance status in fs_info and delete the balance item from disk.
3848 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3850 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3853 ASSERT(fs_info
->balance_ctl
);
3855 spin_lock(&fs_info
->balance_lock
);
3856 fs_info
->balance_ctl
= NULL
;
3857 spin_unlock(&fs_info
->balance_lock
);
3860 ret
= del_balance_item(fs_info
);
3862 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3866 * Balance filters. Return 1 if chunk should be filtered out
3867 * (should not be balanced).
3869 static int chunk_profiles_filter(u64 chunk_type
,
3870 struct btrfs_balance_args
*bargs
)
3872 chunk_type
= chunk_to_extended(chunk_type
) &
3873 BTRFS_EXTENDED_PROFILE_MASK
;
3875 if (bargs
->profiles
& chunk_type
)
3881 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3882 struct btrfs_balance_args
*bargs
)
3884 struct btrfs_block_group
*cache
;
3886 u64 user_thresh_min
;
3887 u64 user_thresh_max
;
3890 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3891 chunk_used
= cache
->used
;
3893 if (bargs
->usage_min
== 0)
3894 user_thresh_min
= 0;
3896 user_thresh_min
= mult_perc(cache
->length
, bargs
->usage_min
);
3898 if (bargs
->usage_max
== 0)
3899 user_thresh_max
= 1;
3900 else if (bargs
->usage_max
> 100)
3901 user_thresh_max
= cache
->length
;
3903 user_thresh_max
= mult_perc(cache
->length
, bargs
->usage_max
);
3905 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3908 btrfs_put_block_group(cache
);
3912 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3913 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3915 struct btrfs_block_group
*cache
;
3916 u64 chunk_used
, user_thresh
;
3919 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3920 chunk_used
= cache
->used
;
3922 if (bargs
->usage_min
== 0)
3924 else if (bargs
->usage
> 100)
3925 user_thresh
= cache
->length
;
3927 user_thresh
= mult_perc(cache
->length
, bargs
->usage
);
3929 if (chunk_used
< user_thresh
)
3932 btrfs_put_block_group(cache
);
3936 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3937 struct btrfs_chunk
*chunk
,
3938 struct btrfs_balance_args
*bargs
)
3940 struct btrfs_stripe
*stripe
;
3941 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3944 for (i
= 0; i
< num_stripes
; i
++) {
3945 stripe
= btrfs_stripe_nr(chunk
, i
);
3946 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3953 static u64
calc_data_stripes(u64 type
, int num_stripes
)
3955 const int index
= btrfs_bg_flags_to_raid_index(type
);
3956 const int ncopies
= btrfs_raid_array
[index
].ncopies
;
3957 const int nparity
= btrfs_raid_array
[index
].nparity
;
3959 return (num_stripes
- nparity
) / ncopies
;
3962 /* [pstart, pend) */
3963 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3964 struct btrfs_chunk
*chunk
,
3965 struct btrfs_balance_args
*bargs
)
3967 struct btrfs_stripe
*stripe
;
3968 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3975 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3978 type
= btrfs_chunk_type(leaf
, chunk
);
3979 factor
= calc_data_stripes(type
, num_stripes
);
3981 for (i
= 0; i
< num_stripes
; i
++) {
3982 stripe
= btrfs_stripe_nr(chunk
, i
);
3983 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3986 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3987 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3988 stripe_length
= div_u64(stripe_length
, factor
);
3990 if (stripe_offset
< bargs
->pend
&&
3991 stripe_offset
+ stripe_length
> bargs
->pstart
)
3998 /* [vstart, vend) */
3999 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
4000 struct btrfs_chunk
*chunk
,
4002 struct btrfs_balance_args
*bargs
)
4004 if (chunk_offset
< bargs
->vend
&&
4005 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
4006 /* at least part of the chunk is inside this vrange */
4012 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
4013 struct btrfs_chunk
*chunk
,
4014 struct btrfs_balance_args
*bargs
)
4016 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
4018 if (bargs
->stripes_min
<= num_stripes
4019 && num_stripes
<= bargs
->stripes_max
)
4025 static int chunk_soft_convert_filter(u64 chunk_type
,
4026 struct btrfs_balance_args
*bargs
)
4028 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
4031 chunk_type
= chunk_to_extended(chunk_type
) &
4032 BTRFS_EXTENDED_PROFILE_MASK
;
4034 if (bargs
->target
== chunk_type
)
4040 static int should_balance_chunk(struct extent_buffer
*leaf
,
4041 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
4043 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
4044 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4045 struct btrfs_balance_args
*bargs
= NULL
;
4046 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
4049 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
4050 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
4054 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
4055 bargs
= &bctl
->data
;
4056 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
4058 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
4059 bargs
= &bctl
->meta
;
4061 /* profiles filter */
4062 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
4063 chunk_profiles_filter(chunk_type
, bargs
)) {
4068 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
4069 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
4071 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
4072 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
4077 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
4078 chunk_devid_filter(leaf
, chunk
, bargs
)) {
4082 /* drange filter, makes sense only with devid filter */
4083 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
4084 chunk_drange_filter(leaf
, chunk
, bargs
)) {
4089 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
4090 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
4094 /* stripes filter */
4095 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
4096 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
4100 /* soft profile changing mode */
4101 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
4102 chunk_soft_convert_filter(chunk_type
, bargs
)) {
4107 * limited by count, must be the last filter
4109 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
4110 if (bargs
->limit
== 0)
4114 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
4116 * Same logic as the 'limit' filter; the minimum cannot be
4117 * determined here because we do not have the global information
4118 * about the count of all chunks that satisfy the filters.
4120 if (bargs
->limit_max
== 0)
4129 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
4131 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4132 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
4134 struct btrfs_chunk
*chunk
;
4135 struct btrfs_path
*path
= NULL
;
4136 struct btrfs_key key
;
4137 struct btrfs_key found_key
;
4138 struct extent_buffer
*leaf
;
4141 int enospc_errors
= 0;
4142 bool counting
= true;
4143 /* The single value limit and min/max limits use the same bytes in the */
4144 u64 limit_data
= bctl
->data
.limit
;
4145 u64 limit_meta
= bctl
->meta
.limit
;
4146 u64 limit_sys
= bctl
->sys
.limit
;
4150 int chunk_reserved
= 0;
4152 path
= btrfs_alloc_path();
4158 /* zero out stat counters */
4159 spin_lock(&fs_info
->balance_lock
);
4160 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
4161 spin_unlock(&fs_info
->balance_lock
);
4165 * The single value limit and min/max limits use the same bytes
4168 bctl
->data
.limit
= limit_data
;
4169 bctl
->meta
.limit
= limit_meta
;
4170 bctl
->sys
.limit
= limit_sys
;
4172 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
4173 key
.offset
= (u64
)-1;
4174 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
4177 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
4178 atomic_read(&fs_info
->balance_cancel_req
)) {
4183 mutex_lock(&fs_info
->reclaim_bgs_lock
);
4184 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
4186 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4191 * this shouldn't happen, it means the last relocate
4195 BUG(); /* FIXME break ? */
4197 ret
= btrfs_previous_item(chunk_root
, path
, 0,
4198 BTRFS_CHUNK_ITEM_KEY
);
4200 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4205 leaf
= path
->nodes
[0];
4206 slot
= path
->slots
[0];
4207 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
4209 if (found_key
.objectid
!= key
.objectid
) {
4210 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4214 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
4215 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
4218 spin_lock(&fs_info
->balance_lock
);
4219 bctl
->stat
.considered
++;
4220 spin_unlock(&fs_info
->balance_lock
);
4223 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
4225 btrfs_release_path(path
);
4227 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4232 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4233 spin_lock(&fs_info
->balance_lock
);
4234 bctl
->stat
.expected
++;
4235 spin_unlock(&fs_info
->balance_lock
);
4237 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
4239 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
4241 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
4248 * Apply limit_min filter, no need to check if the LIMITS
4249 * filter is used, limit_min is 0 by default
4251 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
4252 count_data
< bctl
->data
.limit_min
)
4253 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
4254 count_meta
< bctl
->meta
.limit_min
)
4255 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
4256 count_sys
< bctl
->sys
.limit_min
)) {
4257 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4261 if (!chunk_reserved
) {
4263 * We may be relocating the only data chunk we have,
4264 * which could potentially end up with losing data's
4265 * raid profile, so lets allocate an empty one in
4268 ret
= btrfs_may_alloc_data_chunk(fs_info
,
4271 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4273 } else if (ret
== 1) {
4278 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
4279 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4280 if (ret
== -ENOSPC
) {
4282 } else if (ret
== -ETXTBSY
) {
4284 "skipping relocation of block group %llu due to active swapfile",
4290 spin_lock(&fs_info
->balance_lock
);
4291 bctl
->stat
.completed
++;
4292 spin_unlock(&fs_info
->balance_lock
);
4295 if (found_key
.offset
== 0)
4297 key
.offset
= found_key
.offset
- 1;
4301 btrfs_release_path(path
);
4306 btrfs_free_path(path
);
4307 if (enospc_errors
) {
4308 btrfs_info(fs_info
, "%d enospc errors during balance",
4318 * See if a given profile is valid and reduced.
4320 * @flags: profile to validate
4321 * @extended: if true @flags is treated as an extended profile
4323 static int alloc_profile_is_valid(u64 flags
, int extended
)
4325 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
4326 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
4328 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
4330 /* 1) check that all other bits are zeroed */
4334 /* 2) see if profile is reduced */
4336 return !extended
; /* "0" is valid for usual profiles */
4338 return has_single_bit_set(flags
);
4342 * Validate target profile against allowed profiles and return true if it's OK.
4343 * Otherwise print the error message and return false.
4345 static inline int validate_convert_profile(struct btrfs_fs_info
*fs_info
,
4346 const struct btrfs_balance_args
*bargs
,
4347 u64 allowed
, const char *type
)
4349 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
4352 /* Profile is valid and does not have bits outside of the allowed set */
4353 if (alloc_profile_is_valid(bargs
->target
, 1) &&
4354 (bargs
->target
& ~allowed
) == 0)
4357 btrfs_err(fs_info
, "balance: invalid convert %s profile %s",
4358 type
, btrfs_bg_type_to_raid_name(bargs
->target
));
4363 * Fill @buf with textual description of balance filter flags @bargs, up to
4364 * @size_buf including the terminating null. The output may be trimmed if it
4365 * does not fit into the provided buffer.
4367 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
4371 u32 size_bp
= size_buf
;
4373 u64 flags
= bargs
->flags
;
4374 char tmp_buf
[128] = {'\0'};
4379 #define CHECK_APPEND_NOARG(a) \
4381 ret = snprintf(bp, size_bp, (a)); \
4382 if (ret < 0 || ret >= size_bp) \
4383 goto out_overflow; \
4388 #define CHECK_APPEND_1ARG(a, v1) \
4390 ret = snprintf(bp, size_bp, (a), (v1)); \
4391 if (ret < 0 || ret >= size_bp) \
4392 goto out_overflow; \
4397 #define CHECK_APPEND_2ARG(a, v1, v2) \
4399 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
4400 if (ret < 0 || ret >= size_bp) \
4401 goto out_overflow; \
4406 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
)
4407 CHECK_APPEND_1ARG("convert=%s,",
4408 btrfs_bg_type_to_raid_name(bargs
->target
));
4410 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
4411 CHECK_APPEND_NOARG("soft,");
4413 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
4414 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
4416 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
4419 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
4420 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
4422 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
4423 CHECK_APPEND_2ARG("usage=%u..%u,",
4424 bargs
->usage_min
, bargs
->usage_max
);
4426 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
4427 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
4429 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
4430 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4431 bargs
->pstart
, bargs
->pend
);
4433 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
4434 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4435 bargs
->vstart
, bargs
->vend
);
4437 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
4438 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
4440 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
4441 CHECK_APPEND_2ARG("limit=%u..%u,",
4442 bargs
->limit_min
, bargs
->limit_max
);
4444 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
4445 CHECK_APPEND_2ARG("stripes=%u..%u,",
4446 bargs
->stripes_min
, bargs
->stripes_max
);
4448 #undef CHECK_APPEND_2ARG
4449 #undef CHECK_APPEND_1ARG
4450 #undef CHECK_APPEND_NOARG
4454 if (size_bp
< size_buf
)
4455 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
4460 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
4462 u32 size_buf
= 1024;
4463 char tmp_buf
[192] = {'\0'};
4466 u32 size_bp
= size_buf
;
4468 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4470 buf
= kzalloc(size_buf
, GFP_KERNEL
);
4476 #define CHECK_APPEND_1ARG(a, v1) \
4478 ret = snprintf(bp, size_bp, (a), (v1)); \
4479 if (ret < 0 || ret >= size_bp) \
4480 goto out_overflow; \
4485 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
4486 CHECK_APPEND_1ARG("%s", "-f ");
4488 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
4489 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
4490 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
4493 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
4494 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
4495 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
4498 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
4499 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
4500 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
4503 #undef CHECK_APPEND_1ARG
4507 if (size_bp
< size_buf
)
4508 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
4509 btrfs_info(fs_info
, "balance: %s %s",
4510 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
4511 "resume" : "start", buf
);
4517 * Should be called with balance mutexe held
4519 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
4520 struct btrfs_balance_control
*bctl
,
4521 struct btrfs_ioctl_balance_args
*bargs
)
4523 u64 meta_target
, data_target
;
4529 bool reducing_redundancy
;
4530 bool paused
= false;
4533 if (btrfs_fs_closing(fs_info
) ||
4534 atomic_read(&fs_info
->balance_pause_req
) ||
4535 btrfs_should_cancel_balance(fs_info
)) {
4540 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
4541 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
4545 * In case of mixed groups both data and meta should be picked,
4546 * and identical options should be given for both of them.
4548 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
4549 if (mixed
&& (bctl
->flags
& allowed
)) {
4550 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
4551 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
4552 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
4554 "balance: mixed groups data and metadata options must be the same");
4561 * rw_devices will not change at the moment, device add/delete/replace
4564 num_devices
= fs_info
->fs_devices
->rw_devices
;
4567 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4568 * special bit for it, to make it easier to distinguish. Thus we need
4569 * to set it manually, or balance would refuse the profile.
4571 allowed
= BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
4572 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++)
4573 if (num_devices
>= btrfs_raid_array
[i
].devs_min
)
4574 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4576 if (!validate_convert_profile(fs_info
, &bctl
->data
, allowed
, "data") ||
4577 !validate_convert_profile(fs_info
, &bctl
->meta
, allowed
, "metadata") ||
4578 !validate_convert_profile(fs_info
, &bctl
->sys
, allowed
, "system")) {
4584 * Allow to reduce metadata or system integrity only if force set for
4585 * profiles with redundancy (copies, parity)
4588 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++) {
4589 if (btrfs_raid_array
[i
].ncopies
>= 2 ||
4590 btrfs_raid_array
[i
].tolerated_failures
>= 1)
4591 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4594 seq
= read_seqbegin(&fs_info
->profiles_lock
);
4596 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4597 (fs_info
->avail_system_alloc_bits
& allowed
) &&
4598 !(bctl
->sys
.target
& allowed
)) ||
4599 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4600 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
4601 !(bctl
->meta
.target
& allowed
)))
4602 reducing_redundancy
= true;
4604 reducing_redundancy
= false;
4606 /* if we're not converting, the target field is uninitialized */
4607 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4608 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
4609 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4610 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
4611 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4613 if (reducing_redundancy
) {
4614 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4616 "balance: force reducing metadata redundancy");
4619 "balance: reduces metadata redundancy, use --force if you want this");
4625 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4626 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4628 "balance: metadata profile %s has lower redundancy than data profile %s",
4629 btrfs_bg_type_to_raid_name(meta_target
),
4630 btrfs_bg_type_to_raid_name(data_target
));
4633 ret
= insert_balance_item(fs_info
, bctl
);
4634 if (ret
&& ret
!= -EEXIST
)
4637 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4638 BUG_ON(ret
== -EEXIST
);
4639 BUG_ON(fs_info
->balance_ctl
);
4640 spin_lock(&fs_info
->balance_lock
);
4641 fs_info
->balance_ctl
= bctl
;
4642 spin_unlock(&fs_info
->balance_lock
);
4644 BUG_ON(ret
!= -EEXIST
);
4645 spin_lock(&fs_info
->balance_lock
);
4646 update_balance_args(bctl
);
4647 spin_unlock(&fs_info
->balance_lock
);
4650 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4651 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4652 describe_balance_start_or_resume(fs_info
);
4653 mutex_unlock(&fs_info
->balance_mutex
);
4655 ret
= __btrfs_balance(fs_info
);
4657 mutex_lock(&fs_info
->balance_mutex
);
4658 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
)) {
4659 btrfs_info(fs_info
, "balance: paused");
4660 btrfs_exclop_balance(fs_info
, BTRFS_EXCLOP_BALANCE_PAUSED
);
4664 * Balance can be canceled by:
4666 * - Regular cancel request
4667 * Then ret == -ECANCELED and balance_cancel_req > 0
4669 * - Fatal signal to "btrfs" process
4670 * Either the signal caught by wait_reserve_ticket() and callers
4671 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4673 * Either way, in this case balance_cancel_req = 0, and
4674 * ret == -EINTR or ret == -ECANCELED.
4676 * So here we only check the return value to catch canceled balance.
4678 else if (ret
== -ECANCELED
|| ret
== -EINTR
)
4679 btrfs_info(fs_info
, "balance: canceled");
4681 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4683 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4686 memset(bargs
, 0, sizeof(*bargs
));
4687 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4690 /* We didn't pause, we can clean everything up. */
4692 reset_balance_state(fs_info
);
4693 btrfs_exclop_finish(fs_info
);
4696 wake_up(&fs_info
->balance_wait_q
);
4700 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4701 reset_balance_state(fs_info
);
4704 btrfs_exclop_finish(fs_info
);
4709 static int balance_kthread(void *data
)
4711 struct btrfs_fs_info
*fs_info
= data
;
4714 sb_start_write(fs_info
->sb
);
4715 mutex_lock(&fs_info
->balance_mutex
);
4716 if (fs_info
->balance_ctl
)
4717 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4718 mutex_unlock(&fs_info
->balance_mutex
);
4719 sb_end_write(fs_info
->sb
);
4724 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4726 struct task_struct
*tsk
;
4728 mutex_lock(&fs_info
->balance_mutex
);
4729 if (!fs_info
->balance_ctl
) {
4730 mutex_unlock(&fs_info
->balance_mutex
);
4733 mutex_unlock(&fs_info
->balance_mutex
);
4735 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4736 btrfs_info(fs_info
, "balance: resume skipped");
4740 spin_lock(&fs_info
->super_lock
);
4741 ASSERT(fs_info
->exclusive_operation
== BTRFS_EXCLOP_BALANCE_PAUSED
);
4742 fs_info
->exclusive_operation
= BTRFS_EXCLOP_BALANCE
;
4743 spin_unlock(&fs_info
->super_lock
);
4745 * A ro->rw remount sequence should continue with the paused balance
4746 * regardless of who pauses it, system or the user as of now, so set
4749 spin_lock(&fs_info
->balance_lock
);
4750 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4751 spin_unlock(&fs_info
->balance_lock
);
4753 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4754 return PTR_ERR_OR_ZERO(tsk
);
4757 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4759 struct btrfs_balance_control
*bctl
;
4760 struct btrfs_balance_item
*item
;
4761 struct btrfs_disk_balance_args disk_bargs
;
4762 struct btrfs_path
*path
;
4763 struct extent_buffer
*leaf
;
4764 struct btrfs_key key
;
4767 path
= btrfs_alloc_path();
4771 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4772 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4775 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4778 if (ret
> 0) { /* ret = -ENOENT; */
4783 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4789 leaf
= path
->nodes
[0];
4790 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4792 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4793 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4795 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4796 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4797 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4798 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4799 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4800 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4803 * This should never happen, as the paused balance state is recovered
4804 * during mount without any chance of other exclusive ops to collide.
4806 * This gives the exclusive op status to balance and keeps in paused
4807 * state until user intervention (cancel or umount). If the ownership
4808 * cannot be assigned, show a message but do not fail. The balance
4809 * is in a paused state and must have fs_info::balance_ctl properly
4812 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_BALANCE_PAUSED
))
4814 "balance: cannot set exclusive op status, resume manually");
4816 btrfs_release_path(path
);
4818 mutex_lock(&fs_info
->balance_mutex
);
4819 BUG_ON(fs_info
->balance_ctl
);
4820 spin_lock(&fs_info
->balance_lock
);
4821 fs_info
->balance_ctl
= bctl
;
4822 spin_unlock(&fs_info
->balance_lock
);
4823 mutex_unlock(&fs_info
->balance_mutex
);
4825 btrfs_free_path(path
);
4829 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4833 mutex_lock(&fs_info
->balance_mutex
);
4834 if (!fs_info
->balance_ctl
) {
4835 mutex_unlock(&fs_info
->balance_mutex
);
4839 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4840 atomic_inc(&fs_info
->balance_pause_req
);
4841 mutex_unlock(&fs_info
->balance_mutex
);
4843 wait_event(fs_info
->balance_wait_q
,
4844 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4846 mutex_lock(&fs_info
->balance_mutex
);
4847 /* we are good with balance_ctl ripped off from under us */
4848 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4849 atomic_dec(&fs_info
->balance_pause_req
);
4854 mutex_unlock(&fs_info
->balance_mutex
);
4858 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4860 mutex_lock(&fs_info
->balance_mutex
);
4861 if (!fs_info
->balance_ctl
) {
4862 mutex_unlock(&fs_info
->balance_mutex
);
4867 * A paused balance with the item stored on disk can be resumed at
4868 * mount time if the mount is read-write. Otherwise it's still paused
4869 * and we must not allow cancelling as it deletes the item.
4871 if (sb_rdonly(fs_info
->sb
)) {
4872 mutex_unlock(&fs_info
->balance_mutex
);
4876 atomic_inc(&fs_info
->balance_cancel_req
);
4878 * if we are running just wait and return, balance item is
4879 * deleted in btrfs_balance in this case
4881 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4882 mutex_unlock(&fs_info
->balance_mutex
);
4883 wait_event(fs_info
->balance_wait_q
,
4884 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4885 mutex_lock(&fs_info
->balance_mutex
);
4887 mutex_unlock(&fs_info
->balance_mutex
);
4889 * Lock released to allow other waiters to continue, we'll
4890 * reexamine the status again.
4892 mutex_lock(&fs_info
->balance_mutex
);
4894 if (fs_info
->balance_ctl
) {
4895 reset_balance_state(fs_info
);
4896 btrfs_exclop_finish(fs_info
);
4897 btrfs_info(fs_info
, "balance: canceled");
4901 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4902 atomic_dec(&fs_info
->balance_cancel_req
);
4903 mutex_unlock(&fs_info
->balance_mutex
);
4908 * shrinking a device means finding all of the device extents past
4909 * the new size, and then following the back refs to the chunks.
4910 * The chunk relocation code actually frees the device extent
4912 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4914 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4915 struct btrfs_root
*root
= fs_info
->dev_root
;
4916 struct btrfs_trans_handle
*trans
;
4917 struct btrfs_dev_extent
*dev_extent
= NULL
;
4918 struct btrfs_path
*path
;
4924 bool retried
= false;
4925 struct extent_buffer
*l
;
4926 struct btrfs_key key
;
4927 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4928 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4929 u64 old_size
= btrfs_device_get_total_bytes(device
);
4934 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4936 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4938 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4941 path
= btrfs_alloc_path();
4945 path
->reada
= READA_BACK
;
4947 trans
= btrfs_start_transaction(root
, 0);
4948 if (IS_ERR(trans
)) {
4949 btrfs_free_path(path
);
4950 return PTR_ERR(trans
);
4953 mutex_lock(&fs_info
->chunk_mutex
);
4955 btrfs_device_set_total_bytes(device
, new_size
);
4956 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4957 device
->fs_devices
->total_rw_bytes
-= diff
;
4960 * The new free_chunk_space is new_size - used, so we have to
4961 * subtract the delta of the old free_chunk_space which included
4962 * old_size - used. If used > new_size then just subtract this
4963 * entire device's free space.
4965 if (device
->bytes_used
< new_size
)
4966 free_diff
= (old_size
- device
->bytes_used
) -
4967 (new_size
- device
->bytes_used
);
4969 free_diff
= old_size
- device
->bytes_used
;
4970 atomic64_sub(free_diff
, &fs_info
->free_chunk_space
);
4974 * Once the device's size has been set to the new size, ensure all
4975 * in-memory chunks are synced to disk so that the loop below sees them
4976 * and relocates them accordingly.
4978 if (contains_pending_extent(device
, &start
, diff
)) {
4979 mutex_unlock(&fs_info
->chunk_mutex
);
4980 ret
= btrfs_commit_transaction(trans
);
4984 mutex_unlock(&fs_info
->chunk_mutex
);
4985 btrfs_end_transaction(trans
);
4989 key
.objectid
= device
->devid
;
4990 key
.offset
= (u64
)-1;
4991 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4994 mutex_lock(&fs_info
->reclaim_bgs_lock
);
4995 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4997 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5001 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
5003 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5007 btrfs_release_path(path
);
5012 slot
= path
->slots
[0];
5013 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
5015 if (key
.objectid
!= device
->devid
) {
5016 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5017 btrfs_release_path(path
);
5021 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
5022 length
= btrfs_dev_extent_length(l
, dev_extent
);
5024 if (key
.offset
+ length
<= new_size
) {
5025 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5026 btrfs_release_path(path
);
5030 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
5031 btrfs_release_path(path
);
5034 * We may be relocating the only data chunk we have,
5035 * which could potentially end up with losing data's
5036 * raid profile, so lets allocate an empty one in
5039 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
5041 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5045 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
5046 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
5047 if (ret
== -ENOSPC
) {
5050 if (ret
== -ETXTBSY
) {
5052 "could not shrink block group %llu due to active swapfile",
5057 } while (key
.offset
-- > 0);
5059 if (failed
&& !retried
) {
5063 } else if (failed
&& retried
) {
5068 /* Shrinking succeeded, else we would be at "done". */
5069 trans
= btrfs_start_transaction(root
, 0);
5070 if (IS_ERR(trans
)) {
5071 ret
= PTR_ERR(trans
);
5075 mutex_lock(&fs_info
->chunk_mutex
);
5076 /* Clear all state bits beyond the shrunk device size */
5077 clear_extent_bits(&device
->alloc_state
, new_size
, (u64
)-1,
5080 btrfs_device_set_disk_total_bytes(device
, new_size
);
5081 if (list_empty(&device
->post_commit_list
))
5082 list_add_tail(&device
->post_commit_list
,
5083 &trans
->transaction
->dev_update_list
);
5085 WARN_ON(diff
> old_total
);
5086 btrfs_set_super_total_bytes(super_copy
,
5087 round_down(old_total
- diff
, fs_info
->sectorsize
));
5088 mutex_unlock(&fs_info
->chunk_mutex
);
5090 btrfs_reserve_chunk_metadata(trans
, false);
5091 /* Now btrfs_update_device() will change the on-disk size. */
5092 ret
= btrfs_update_device(trans
, device
);
5093 btrfs_trans_release_chunk_metadata(trans
);
5095 btrfs_abort_transaction(trans
, ret
);
5096 btrfs_end_transaction(trans
);
5098 ret
= btrfs_commit_transaction(trans
);
5101 btrfs_free_path(path
);
5103 mutex_lock(&fs_info
->chunk_mutex
);
5104 btrfs_device_set_total_bytes(device
, old_size
);
5105 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5106 device
->fs_devices
->total_rw_bytes
+= diff
;
5107 atomic64_add(free_diff
, &fs_info
->free_chunk_space
);
5109 mutex_unlock(&fs_info
->chunk_mutex
);
5114 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
5115 struct btrfs_key
*key
,
5116 struct btrfs_chunk
*chunk
, int item_size
)
5118 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
5119 struct btrfs_disk_key disk_key
;
5123 lockdep_assert_held(&fs_info
->chunk_mutex
);
5125 array_size
= btrfs_super_sys_array_size(super_copy
);
5126 if (array_size
+ item_size
+ sizeof(disk_key
)
5127 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
5130 ptr
= super_copy
->sys_chunk_array
+ array_size
;
5131 btrfs_cpu_key_to_disk(&disk_key
, key
);
5132 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
5133 ptr
+= sizeof(disk_key
);
5134 memcpy(ptr
, chunk
, item_size
);
5135 item_size
+= sizeof(disk_key
);
5136 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
5142 * sort the devices in descending order by max_avail, total_avail
5144 static int btrfs_cmp_device_info(const void *a
, const void *b
)
5146 const struct btrfs_device_info
*di_a
= a
;
5147 const struct btrfs_device_info
*di_b
= b
;
5149 if (di_a
->max_avail
> di_b
->max_avail
)
5151 if (di_a
->max_avail
< di_b
->max_avail
)
5153 if (di_a
->total_avail
> di_b
->total_avail
)
5155 if (di_a
->total_avail
< di_b
->total_avail
)
5160 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
5162 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
5165 btrfs_set_fs_incompat(info
, RAID56
);
5168 static void check_raid1c34_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
5170 if (!(type
& (BTRFS_BLOCK_GROUP_RAID1C3
| BTRFS_BLOCK_GROUP_RAID1C4
)))
5173 btrfs_set_fs_incompat(info
, RAID1C34
);
5177 * Structure used internally for btrfs_create_chunk() function.
5178 * Wraps needed parameters.
5180 struct alloc_chunk_ctl
{
5183 /* Total number of stripes to allocate */
5185 /* sub_stripes info for map */
5187 /* Stripes per device */
5189 /* Maximum number of devices to use */
5191 /* Minimum number of devices to use */
5193 /* ndevs has to be a multiple of this */
5195 /* Number of copies */
5197 /* Number of stripes worth of bytes to store parity information */
5199 u64 max_stripe_size
;
5207 static void init_alloc_chunk_ctl_policy_regular(
5208 struct btrfs_fs_devices
*fs_devices
,
5209 struct alloc_chunk_ctl
*ctl
)
5211 struct btrfs_space_info
*space_info
;
5213 space_info
= btrfs_find_space_info(fs_devices
->fs_info
, ctl
->type
);
5216 ctl
->max_chunk_size
= READ_ONCE(space_info
->chunk_size
);
5217 ctl
->max_stripe_size
= min_t(u64
, ctl
->max_chunk_size
, SZ_1G
);
5219 if (ctl
->type
& BTRFS_BLOCK_GROUP_SYSTEM
)
5220 ctl
->devs_max
= min_t(int, ctl
->devs_max
, BTRFS_MAX_DEVS_SYS_CHUNK
);
5222 /* We don't want a chunk larger than 10% of writable space */
5223 ctl
->max_chunk_size
= min(mult_perc(fs_devices
->total_rw_bytes
, 10),
5224 ctl
->max_chunk_size
);
5225 ctl
->dev_extent_min
= btrfs_stripe_nr_to_offset(ctl
->dev_stripes
);
5228 static void init_alloc_chunk_ctl_policy_zoned(
5229 struct btrfs_fs_devices
*fs_devices
,
5230 struct alloc_chunk_ctl
*ctl
)
5232 u64 zone_size
= fs_devices
->fs_info
->zone_size
;
5234 int min_num_stripes
= ctl
->devs_min
* ctl
->dev_stripes
;
5235 int min_data_stripes
= (min_num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5236 u64 min_chunk_size
= min_data_stripes
* zone_size
;
5237 u64 type
= ctl
->type
;
5239 ctl
->max_stripe_size
= zone_size
;
5240 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
5241 ctl
->max_chunk_size
= round_down(BTRFS_MAX_DATA_CHUNK_SIZE
,
5243 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
5244 ctl
->max_chunk_size
= ctl
->max_stripe_size
;
5245 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5246 ctl
->max_chunk_size
= 2 * ctl
->max_stripe_size
;
5247 ctl
->devs_max
= min_t(int, ctl
->devs_max
,
5248 BTRFS_MAX_DEVS_SYS_CHUNK
);
5253 /* We don't want a chunk larger than 10% of writable space */
5254 limit
= max(round_down(mult_perc(fs_devices
->total_rw_bytes
, 10),
5257 ctl
->max_chunk_size
= min(limit
, ctl
->max_chunk_size
);
5258 ctl
->dev_extent_min
= zone_size
* ctl
->dev_stripes
;
5261 static void init_alloc_chunk_ctl(struct btrfs_fs_devices
*fs_devices
,
5262 struct alloc_chunk_ctl
*ctl
)
5264 int index
= btrfs_bg_flags_to_raid_index(ctl
->type
);
5266 ctl
->sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
5267 ctl
->dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
5268 ctl
->devs_max
= btrfs_raid_array
[index
].devs_max
;
5270 ctl
->devs_max
= BTRFS_MAX_DEVS(fs_devices
->fs_info
);
5271 ctl
->devs_min
= btrfs_raid_array
[index
].devs_min
;
5272 ctl
->devs_increment
= btrfs_raid_array
[index
].devs_increment
;
5273 ctl
->ncopies
= btrfs_raid_array
[index
].ncopies
;
5274 ctl
->nparity
= btrfs_raid_array
[index
].nparity
;
5277 switch (fs_devices
->chunk_alloc_policy
) {
5278 case BTRFS_CHUNK_ALLOC_REGULAR
:
5279 init_alloc_chunk_ctl_policy_regular(fs_devices
, ctl
);
5281 case BTRFS_CHUNK_ALLOC_ZONED
:
5282 init_alloc_chunk_ctl_policy_zoned(fs_devices
, ctl
);
5289 static int gather_device_info(struct btrfs_fs_devices
*fs_devices
,
5290 struct alloc_chunk_ctl
*ctl
,
5291 struct btrfs_device_info
*devices_info
)
5293 struct btrfs_fs_info
*info
= fs_devices
->fs_info
;
5294 struct btrfs_device
*device
;
5296 u64 dev_extent_want
= ctl
->max_stripe_size
* ctl
->dev_stripes
;
5303 * in the first pass through the devices list, we gather information
5304 * about the available holes on each device.
5306 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
5307 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5309 "BTRFS: read-only device in alloc_list\n");
5313 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
5314 &device
->dev_state
) ||
5315 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
5318 if (device
->total_bytes
> device
->bytes_used
)
5319 total_avail
= device
->total_bytes
- device
->bytes_used
;
5323 /* If there is no space on this device, skip it. */
5324 if (total_avail
< ctl
->dev_extent_min
)
5327 ret
= find_free_dev_extent(device
, dev_extent_want
, &dev_offset
,
5329 if (ret
&& ret
!= -ENOSPC
)
5333 max_avail
= dev_extent_want
;
5335 if (max_avail
< ctl
->dev_extent_min
) {
5336 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5338 "%s: devid %llu has no free space, have=%llu want=%llu",
5339 __func__
, device
->devid
, max_avail
,
5340 ctl
->dev_extent_min
);
5344 if (ndevs
== fs_devices
->rw_devices
) {
5345 WARN(1, "%s: found more than %llu devices\n",
5346 __func__
, fs_devices
->rw_devices
);
5349 devices_info
[ndevs
].dev_offset
= dev_offset
;
5350 devices_info
[ndevs
].max_avail
= max_avail
;
5351 devices_info
[ndevs
].total_avail
= total_avail
;
5352 devices_info
[ndevs
].dev
= device
;
5358 * now sort the devices by hole size / available space
5360 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
5361 btrfs_cmp_device_info
, NULL
);
5366 static int decide_stripe_size_regular(struct alloc_chunk_ctl
*ctl
,
5367 struct btrfs_device_info
*devices_info
)
5369 /* Number of stripes that count for block group size */
5373 * The primary goal is to maximize the number of stripes, so use as
5374 * many devices as possible, even if the stripes are not maximum sized.
5376 * The DUP profile stores more than one stripe per device, the
5377 * max_avail is the total size so we have to adjust.
5379 ctl
->stripe_size
= div_u64(devices_info
[ctl
->ndevs
- 1].max_avail
,
5381 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5383 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5384 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5387 * Use the number of data stripes to figure out how big this chunk is
5388 * really going to be in terms of logical address space, and compare
5389 * that answer with the max chunk size. If it's higher, we try to
5390 * reduce stripe_size.
5392 if (ctl
->stripe_size
* data_stripes
> ctl
->max_chunk_size
) {
5394 * Reduce stripe_size, round it up to a 16MB boundary again and
5395 * then use it, unless it ends up being even bigger than the
5396 * previous value we had already.
5398 ctl
->stripe_size
= min(round_up(div_u64(ctl
->max_chunk_size
,
5399 data_stripes
), SZ_16M
),
5403 /* Stripe size should not go beyond 1G. */
5404 ctl
->stripe_size
= min_t(u64
, ctl
->stripe_size
, SZ_1G
);
5406 /* Align to BTRFS_STRIPE_LEN */
5407 ctl
->stripe_size
= round_down(ctl
->stripe_size
, BTRFS_STRIPE_LEN
);
5408 ctl
->chunk_size
= ctl
->stripe_size
* data_stripes
;
5413 static int decide_stripe_size_zoned(struct alloc_chunk_ctl
*ctl
,
5414 struct btrfs_device_info
*devices_info
)
5416 u64 zone_size
= devices_info
[0].dev
->zone_info
->zone_size
;
5417 /* Number of stripes that count for block group size */
5421 * It should hold because:
5422 * dev_extent_min == dev_extent_want == zone_size * dev_stripes
5424 ASSERT(devices_info
[ctl
->ndevs
- 1].max_avail
== ctl
->dev_extent_min
);
5426 ctl
->stripe_size
= zone_size
;
5427 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5428 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5430 /* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
5431 if (ctl
->stripe_size
* data_stripes
> ctl
->max_chunk_size
) {
5432 ctl
->ndevs
= div_u64(div_u64(ctl
->max_chunk_size
* ctl
->ncopies
,
5433 ctl
->stripe_size
) + ctl
->nparity
,
5435 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5436 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5437 ASSERT(ctl
->stripe_size
* data_stripes
<= ctl
->max_chunk_size
);
5440 ctl
->chunk_size
= ctl
->stripe_size
* data_stripes
;
5445 static int decide_stripe_size(struct btrfs_fs_devices
*fs_devices
,
5446 struct alloc_chunk_ctl
*ctl
,
5447 struct btrfs_device_info
*devices_info
)
5449 struct btrfs_fs_info
*info
= fs_devices
->fs_info
;
5452 * Round down to number of usable stripes, devs_increment can be any
5453 * number so we can't use round_down() that requires power of 2, while
5454 * rounddown is safe.
5456 ctl
->ndevs
= rounddown(ctl
->ndevs
, ctl
->devs_increment
);
5458 if (ctl
->ndevs
< ctl
->devs_min
) {
5459 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
5461 "%s: not enough devices with free space: have=%d minimum required=%d",
5462 __func__
, ctl
->ndevs
, ctl
->devs_min
);
5467 ctl
->ndevs
= min(ctl
->ndevs
, ctl
->devs_max
);
5469 switch (fs_devices
->chunk_alloc_policy
) {
5470 case BTRFS_CHUNK_ALLOC_REGULAR
:
5471 return decide_stripe_size_regular(ctl
, devices_info
);
5472 case BTRFS_CHUNK_ALLOC_ZONED
:
5473 return decide_stripe_size_zoned(ctl
, devices_info
);
5479 static void chunk_map_device_set_bits(struct btrfs_chunk_map
*map
, unsigned int bits
)
5481 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5482 struct btrfs_io_stripe
*stripe
= &map
->stripes
[i
];
5483 struct btrfs_device
*device
= stripe
->dev
;
5485 set_extent_bit(&device
->alloc_state
, stripe
->physical
,
5486 stripe
->physical
+ map
->stripe_size
- 1,
5487 bits
| EXTENT_NOWAIT
, NULL
);
5491 static void chunk_map_device_clear_bits(struct btrfs_chunk_map
*map
, unsigned int bits
)
5493 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5494 struct btrfs_io_stripe
*stripe
= &map
->stripes
[i
];
5495 struct btrfs_device
*device
= stripe
->dev
;
5497 __clear_extent_bit(&device
->alloc_state
, stripe
->physical
,
5498 stripe
->physical
+ map
->stripe_size
- 1,
5499 bits
| EXTENT_NOWAIT
,
5504 void btrfs_remove_chunk_map(struct btrfs_fs_info
*fs_info
, struct btrfs_chunk_map
*map
)
5506 write_lock(&fs_info
->mapping_tree_lock
);
5507 rb_erase_cached(&map
->rb_node
, &fs_info
->mapping_tree
);
5508 RB_CLEAR_NODE(&map
->rb_node
);
5509 chunk_map_device_clear_bits(map
, CHUNK_ALLOCATED
);
5510 write_unlock(&fs_info
->mapping_tree_lock
);
5512 /* Once for the tree reference. */
5513 btrfs_free_chunk_map(map
);
5517 int btrfs_add_chunk_map(struct btrfs_fs_info
*fs_info
, struct btrfs_chunk_map
*map
)
5520 struct rb_node
*parent
= NULL
;
5521 bool leftmost
= true;
5523 write_lock(&fs_info
->mapping_tree_lock
);
5524 p
= &fs_info
->mapping_tree
.rb_root
.rb_node
;
5526 struct btrfs_chunk_map
*entry
;
5529 entry
= rb_entry(parent
, struct btrfs_chunk_map
, rb_node
);
5531 if (map
->start
< entry
->start
) {
5533 } else if (map
->start
> entry
->start
) {
5534 p
= &(*p
)->rb_right
;
5537 write_unlock(&fs_info
->mapping_tree_lock
);
5541 rb_link_node(&map
->rb_node
, parent
, p
);
5542 rb_insert_color_cached(&map
->rb_node
, &fs_info
->mapping_tree
, leftmost
);
5543 chunk_map_device_set_bits(map
, CHUNK_ALLOCATED
);
5544 chunk_map_device_clear_bits(map
, CHUNK_TRIMMED
);
5545 write_unlock(&fs_info
->mapping_tree_lock
);
5551 struct btrfs_chunk_map
*btrfs_alloc_chunk_map(int num_stripes
, gfp_t gfp
)
5553 struct btrfs_chunk_map
*map
;
5555 map
= kmalloc(btrfs_chunk_map_size(num_stripes
), gfp
);
5559 refcount_set(&map
->refs
, 1);
5560 RB_CLEAR_NODE(&map
->rb_node
);
5565 static struct btrfs_block_group
*create_chunk(struct btrfs_trans_handle
*trans
,
5566 struct alloc_chunk_ctl
*ctl
,
5567 struct btrfs_device_info
*devices_info
)
5569 struct btrfs_fs_info
*info
= trans
->fs_info
;
5570 struct btrfs_chunk_map
*map
;
5571 struct btrfs_block_group
*block_group
;
5572 u64 start
= ctl
->start
;
5573 u64 type
= ctl
->type
;
5576 map
= btrfs_alloc_chunk_map(ctl
->num_stripes
, GFP_NOFS
);
5578 return ERR_PTR(-ENOMEM
);
5581 map
->chunk_len
= ctl
->chunk_size
;
5582 map
->stripe_size
= ctl
->stripe_size
;
5584 map
->io_align
= BTRFS_STRIPE_LEN
;
5585 map
->io_width
= BTRFS_STRIPE_LEN
;
5586 map
->sub_stripes
= ctl
->sub_stripes
;
5587 map
->num_stripes
= ctl
->num_stripes
;
5589 for (int i
= 0; i
< ctl
->ndevs
; i
++) {
5590 for (int j
= 0; j
< ctl
->dev_stripes
; j
++) {
5591 int s
= i
* ctl
->dev_stripes
+ j
;
5592 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
5593 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
5594 j
* ctl
->stripe_size
;
5598 trace_btrfs_chunk_alloc(info
, map
, start
, ctl
->chunk_size
);
5600 ret
= btrfs_add_chunk_map(info
, map
);
5602 btrfs_free_chunk_map(map
);
5603 return ERR_PTR(ret
);
5606 block_group
= btrfs_make_block_group(trans
, type
, start
, ctl
->chunk_size
);
5607 if (IS_ERR(block_group
)) {
5608 btrfs_remove_chunk_map(info
, map
);
5612 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5613 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5615 btrfs_device_set_bytes_used(dev
,
5616 dev
->bytes_used
+ ctl
->stripe_size
);
5617 if (list_empty(&dev
->post_commit_list
))
5618 list_add_tail(&dev
->post_commit_list
,
5619 &trans
->transaction
->dev_update_list
);
5622 atomic64_sub(ctl
->stripe_size
* map
->num_stripes
,
5623 &info
->free_chunk_space
);
5625 check_raid56_incompat_flag(info
, type
);
5626 check_raid1c34_incompat_flag(info
, type
);
5631 struct btrfs_block_group
*btrfs_create_chunk(struct btrfs_trans_handle
*trans
,
5634 struct btrfs_fs_info
*info
= trans
->fs_info
;
5635 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
5636 struct btrfs_device_info
*devices_info
= NULL
;
5637 struct alloc_chunk_ctl ctl
;
5638 struct btrfs_block_group
*block_group
;
5641 lockdep_assert_held(&info
->chunk_mutex
);
5643 if (!alloc_profile_is_valid(type
, 0)) {
5645 return ERR_PTR(-EINVAL
);
5648 if (list_empty(&fs_devices
->alloc_list
)) {
5649 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5650 btrfs_debug(info
, "%s: no writable device", __func__
);
5651 return ERR_PTR(-ENOSPC
);
5654 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
5655 btrfs_err(info
, "invalid chunk type 0x%llx requested", type
);
5657 return ERR_PTR(-EINVAL
);
5660 ctl
.start
= find_next_chunk(info
);
5662 init_alloc_chunk_ctl(fs_devices
, &ctl
);
5664 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
5667 return ERR_PTR(-ENOMEM
);
5669 ret
= gather_device_info(fs_devices
, &ctl
, devices_info
);
5671 block_group
= ERR_PTR(ret
);
5675 ret
= decide_stripe_size(fs_devices
, &ctl
, devices_info
);
5677 block_group
= ERR_PTR(ret
);
5681 block_group
= create_chunk(trans
, &ctl
, devices_info
);
5684 kfree(devices_info
);
5689 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5690 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5693 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5696 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle
*trans
,
5697 struct btrfs_block_group
*bg
)
5699 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5700 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5701 struct btrfs_key key
;
5702 struct btrfs_chunk
*chunk
;
5703 struct btrfs_stripe
*stripe
;
5704 struct btrfs_chunk_map
*map
;
5710 * We take the chunk_mutex for 2 reasons:
5712 * 1) Updates and insertions in the chunk btree must be done while holding
5713 * the chunk_mutex, as well as updating the system chunk array in the
5714 * superblock. See the comment on top of btrfs_chunk_alloc() for the
5717 * 2) To prevent races with the final phase of a device replace operation
5718 * that replaces the device object associated with the map's stripes,
5719 * because the device object's id can change at any time during that
5720 * final phase of the device replace operation
5721 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5722 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5723 * which would cause a failure when updating the device item, which does
5724 * not exists, or persisting a stripe of the chunk item with such ID.
5725 * Here we can't use the device_list_mutex because our caller already
5726 * has locked the chunk_mutex, and the final phase of device replace
5727 * acquires both mutexes - first the device_list_mutex and then the
5728 * chunk_mutex. Using any of those two mutexes protects us from a
5729 * concurrent device replace.
5731 lockdep_assert_held(&fs_info
->chunk_mutex
);
5733 map
= btrfs_get_chunk_map(fs_info
, bg
->start
, bg
->length
);
5736 btrfs_abort_transaction(trans
, ret
);
5740 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5742 chunk
= kzalloc(item_size
, GFP_NOFS
);
5745 btrfs_abort_transaction(trans
, ret
);
5749 for (i
= 0; i
< map
->num_stripes
; i
++) {
5750 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
5752 ret
= btrfs_update_device(trans
, device
);
5757 stripe
= &chunk
->stripe
;
5758 for (i
= 0; i
< map
->num_stripes
; i
++) {
5759 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
5760 const u64 dev_offset
= map
->stripes
[i
].physical
;
5762 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5763 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5764 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5768 btrfs_set_stack_chunk_length(chunk
, bg
->length
);
5769 btrfs_set_stack_chunk_owner(chunk
, BTRFS_EXTENT_TREE_OBJECTID
);
5770 btrfs_set_stack_chunk_stripe_len(chunk
, BTRFS_STRIPE_LEN
);
5771 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5772 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5773 btrfs_set_stack_chunk_io_align(chunk
, BTRFS_STRIPE_LEN
);
5774 btrfs_set_stack_chunk_io_width(chunk
, BTRFS_STRIPE_LEN
);
5775 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5776 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5778 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5779 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5780 key
.offset
= bg
->start
;
5782 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5786 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED
, &bg
->runtime_flags
);
5788 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5789 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5796 btrfs_free_chunk_map(map
);
5800 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5802 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5804 struct btrfs_block_group
*meta_bg
;
5805 struct btrfs_block_group
*sys_bg
;
5808 * When adding a new device for sprouting, the seed device is read-only
5809 * so we must first allocate a metadata and a system chunk. But before
5810 * adding the block group items to the extent, device and chunk btrees,
5813 * 1) Create both chunks without doing any changes to the btrees, as
5814 * otherwise we would get -ENOSPC since the block groups from the
5815 * seed device are read-only;
5817 * 2) Add the device item for the new sprout device - finishing the setup
5818 * of a new block group requires updating the device item in the chunk
5819 * btree, so it must exist when we attempt to do it. The previous step
5820 * ensures this does not fail with -ENOSPC.
5822 * After that we can add the block group items to their btrees:
5823 * update existing device item in the chunk btree, add a new block group
5824 * item to the extent btree, add a new chunk item to the chunk btree and
5825 * finally add the new device extent items to the devices btree.
5828 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5829 meta_bg
= btrfs_create_chunk(trans
, alloc_profile
);
5830 if (IS_ERR(meta_bg
))
5831 return PTR_ERR(meta_bg
);
5833 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5834 sys_bg
= btrfs_create_chunk(trans
, alloc_profile
);
5836 return PTR_ERR(sys_bg
);
5841 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map
*map
)
5843 const int index
= btrfs_bg_flags_to_raid_index(map
->type
);
5845 return btrfs_raid_array
[index
].tolerated_failures
;
5848 bool btrfs_chunk_writeable(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5850 struct btrfs_chunk_map
*map
;
5855 map
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5859 for (i
= 0; i
< map
->num_stripes
; i
++) {
5860 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5861 &map
->stripes
[i
].dev
->dev_state
)) {
5865 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5866 &map
->stripes
[i
].dev
->dev_state
)) {
5873 * If the number of missing devices is larger than max errors, we can
5874 * not write the data into that chunk successfully.
5876 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5879 btrfs_free_chunk_map(map
);
5883 void btrfs_mapping_tree_free(struct btrfs_fs_info
*fs_info
)
5885 write_lock(&fs_info
->mapping_tree_lock
);
5886 while (!RB_EMPTY_ROOT(&fs_info
->mapping_tree
.rb_root
)) {
5887 struct btrfs_chunk_map
*map
;
5888 struct rb_node
*node
;
5890 node
= rb_first_cached(&fs_info
->mapping_tree
);
5891 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
5892 rb_erase_cached(&map
->rb_node
, &fs_info
->mapping_tree
);
5893 RB_CLEAR_NODE(&map
->rb_node
);
5894 chunk_map_device_clear_bits(map
, CHUNK_ALLOCATED
);
5895 /* Once for the tree ref. */
5896 btrfs_free_chunk_map(map
);
5897 cond_resched_rwlock_write(&fs_info
->mapping_tree_lock
);
5899 write_unlock(&fs_info
->mapping_tree_lock
);
5902 static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map
*map
)
5904 enum btrfs_raid_types index
= btrfs_bg_flags_to_raid_index(map
->type
);
5906 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5910 * There could be two corrupted data stripes, we need to loop retry in
5911 * order to rebuild the correct data.
5913 * Fail a stripe at a time on every retry except the stripe under
5916 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5917 return map
->num_stripes
;
5919 /* Non-RAID56, use their ncopies from btrfs_raid_array. */
5920 return btrfs_raid_array
[index
].ncopies
;
5923 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5925 struct btrfs_chunk_map
*map
;
5928 map
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5931 * We could return errors for these cases, but that could get
5932 * ugly and we'd probably do the same thing which is just not do
5933 * anything else and exit, so return 1 so the callers don't try
5934 * to use other copies.
5938 ret
= btrfs_chunk_map_num_copies(map
);
5939 btrfs_free_chunk_map(map
);
5943 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5946 struct btrfs_chunk_map
*map
;
5947 unsigned long len
= fs_info
->sectorsize
;
5949 if (!btrfs_fs_incompat(fs_info
, RAID56
))
5952 map
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5954 if (!WARN_ON(IS_ERR(map
))) {
5955 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5956 len
= btrfs_stripe_nr_to_offset(nr_data_stripes(map
));
5957 btrfs_free_chunk_map(map
);
5962 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5963 struct btrfs_chunk_map
*map
, int first
,
5964 int dev_replace_is_ongoing
)
5966 const enum btrfs_read_policy policy
= READ_ONCE(fs_info
->fs_devices
->read_policy
);
5969 int preferred_mirror
;
5971 struct btrfs_device
*srcdev
;
5974 (BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
)));
5976 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5977 num_stripes
= map
->sub_stripes
;
5979 num_stripes
= map
->num_stripes
;
5983 /* Shouldn't happen, just warn and use pid instead of failing */
5984 btrfs_warn_rl(fs_info
, "unknown read_policy type %u, reset to pid",
5986 WRITE_ONCE(fs_info
->fs_devices
->read_policy
, BTRFS_READ_POLICY_PID
);
5988 case BTRFS_READ_POLICY_PID
:
5989 preferred_mirror
= first
+ (current
->pid
% num_stripes
);
5993 if (dev_replace_is_ongoing
&&
5994 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5995 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5996 srcdev
= fs_info
->dev_replace
.srcdev
;
6001 * try to avoid the drive that is the source drive for a
6002 * dev-replace procedure, only choose it if no other non-missing
6003 * mirror is available
6005 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
6006 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
6007 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
6008 return preferred_mirror
;
6009 for (i
= first
; i
< first
+ num_stripes
; i
++) {
6010 if (map
->stripes
[i
].dev
->bdev
&&
6011 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
6016 /* we couldn't find one that doesn't fail. Just return something
6017 * and the io error handling code will clean up eventually
6019 return preferred_mirror
;
6023 struct btrfs_io_context
*alloc_btrfs_io_context(struct btrfs_fs_info
*fs_info
,
6024 u64 logical
, u16 total_stripes
)
6026 struct btrfs_io_context
*bioc
;
6029 /* The size of btrfs_io_context */
6030 sizeof(struct btrfs_io_context
) +
6031 /* Plus the variable array for the stripes */
6032 sizeof(struct btrfs_io_stripe
) * (total_stripes
),
6038 refcount_set(&bioc
->refs
, 1);
6040 bioc
->fs_info
= fs_info
;
6041 bioc
->replace_stripe_src
= -1;
6042 bioc
->full_stripe_logical
= (u64
)-1;
6043 bioc
->logical
= logical
;
6048 void btrfs_get_bioc(struct btrfs_io_context
*bioc
)
6050 WARN_ON(!refcount_read(&bioc
->refs
));
6051 refcount_inc(&bioc
->refs
);
6054 void btrfs_put_bioc(struct btrfs_io_context
*bioc
)
6058 if (refcount_dec_and_test(&bioc
->refs
))
6063 * Please note that, discard won't be sent to target device of device
6066 struct btrfs_discard_stripe
*btrfs_map_discard(struct btrfs_fs_info
*fs_info
,
6067 u64 logical
, u64
*length_ret
,
6070 struct btrfs_chunk_map
*map
;
6071 struct btrfs_discard_stripe
*stripes
;
6072 u64 length
= *length_ret
;
6077 u64 stripe_end_offset
;
6081 u32 sub_stripes
= 0;
6082 u32 stripes_per_dev
= 0;
6083 u32 remaining_stripes
= 0;
6084 u32 last_stripe
= 0;
6088 map
= btrfs_get_chunk_map(fs_info
, logical
, length
);
6090 return ERR_CAST(map
);
6092 /* we don't discard raid56 yet */
6093 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6098 offset
= logical
- map
->start
;
6099 length
= min_t(u64
, map
->start
+ map
->chunk_len
- logical
, length
);
6100 *length_ret
= length
;
6103 * stripe_nr counts the total number of stripes we have to stride
6104 * to get to this block
6106 stripe_nr
= offset
>> BTRFS_STRIPE_LEN_SHIFT
;
6108 /* stripe_offset is the offset of this block in its stripe */
6109 stripe_offset
= offset
- btrfs_stripe_nr_to_offset(stripe_nr
);
6111 stripe_nr_end
= round_up(offset
+ length
, BTRFS_STRIPE_LEN
) >>
6112 BTRFS_STRIPE_LEN_SHIFT
;
6113 stripe_cnt
= stripe_nr_end
- stripe_nr
;
6114 stripe_end_offset
= btrfs_stripe_nr_to_offset(stripe_nr_end
) -
6117 * after this, stripe_nr is the number of stripes on this
6118 * device we have to walk to find the data, and stripe_index is
6119 * the number of our device in the stripe array
6123 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
6124 BTRFS_BLOCK_GROUP_RAID10
)) {
6125 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
6128 sub_stripes
= map
->sub_stripes
;
6130 factor
= map
->num_stripes
/ sub_stripes
;
6131 *num_stripes
= min_t(u64
, map
->num_stripes
,
6132 sub_stripes
* stripe_cnt
);
6133 stripe_index
= stripe_nr
% factor
;
6134 stripe_nr
/= factor
;
6135 stripe_index
*= sub_stripes
;
6137 remaining_stripes
= stripe_cnt
% factor
;
6138 stripes_per_dev
= stripe_cnt
/ factor
;
6139 last_stripe
= ((stripe_nr_end
- 1) % factor
) * sub_stripes
;
6140 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
6141 BTRFS_BLOCK_GROUP_DUP
)) {
6142 *num_stripes
= map
->num_stripes
;
6144 stripe_index
= stripe_nr
% map
->num_stripes
;
6145 stripe_nr
/= map
->num_stripes
;
6148 stripes
= kcalloc(*num_stripes
, sizeof(*stripes
), GFP_NOFS
);
6154 for (i
= 0; i
< *num_stripes
; i
++) {
6155 stripes
[i
].physical
=
6156 map
->stripes
[stripe_index
].physical
+
6157 stripe_offset
+ btrfs_stripe_nr_to_offset(stripe_nr
);
6158 stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
6160 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
6161 BTRFS_BLOCK_GROUP_RAID10
)) {
6162 stripes
[i
].length
= btrfs_stripe_nr_to_offset(stripes_per_dev
);
6164 if (i
/ sub_stripes
< remaining_stripes
)
6165 stripes
[i
].length
+= BTRFS_STRIPE_LEN
;
6168 * Special for the first stripe and
6171 * |-------|...|-------|
6175 if (i
< sub_stripes
)
6176 stripes
[i
].length
-= stripe_offset
;
6178 if (stripe_index
>= last_stripe
&&
6179 stripe_index
<= (last_stripe
+
6181 stripes
[i
].length
-= stripe_end_offset
;
6183 if (i
== sub_stripes
- 1)
6186 stripes
[i
].length
= length
;
6190 if (stripe_index
== map
->num_stripes
) {
6196 btrfs_free_chunk_map(map
);
6199 btrfs_free_chunk_map(map
);
6200 return ERR_PTR(ret
);
6203 static bool is_block_group_to_copy(struct btrfs_fs_info
*fs_info
, u64 logical
)
6205 struct btrfs_block_group
*cache
;
6208 /* Non zoned filesystem does not use "to_copy" flag */
6209 if (!btrfs_is_zoned(fs_info
))
6212 cache
= btrfs_lookup_block_group(fs_info
, logical
);
6214 ret
= test_bit(BLOCK_GROUP_FLAG_TO_COPY
, &cache
->runtime_flags
);
6216 btrfs_put_block_group(cache
);
6220 static void handle_ops_on_dev_replace(struct btrfs_io_context
*bioc
,
6221 struct btrfs_dev_replace
*dev_replace
,
6223 struct btrfs_io_geometry
*io_geom
)
6225 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
6227 * At this stage, num_stripes is still the real number of stripes,
6228 * excluding the duplicated stripes.
6230 int num_stripes
= io_geom
->num_stripes
;
6231 int max_errors
= io_geom
->max_errors
;
6232 int nr_extra_stripes
= 0;
6236 * A block group which has "to_copy" set will eventually be copied by
6237 * the dev-replace process. We can avoid cloning IO here.
6239 if (is_block_group_to_copy(dev_replace
->srcdev
->fs_info
, logical
))
6243 * Duplicate the write operations while the dev-replace procedure is
6244 * running. Since the copying of the old disk to the new disk takes
6245 * place at run time while the filesystem is mounted writable, the
6246 * regular write operations to the old disk have to be duplicated to go
6247 * to the new disk as well.
6249 * Note that device->missing is handled by the caller, and that the
6250 * write to the old disk is already set up in the stripes array.
6252 for (i
= 0; i
< num_stripes
; i
++) {
6253 struct btrfs_io_stripe
*old
= &bioc
->stripes
[i
];
6254 struct btrfs_io_stripe
*new = &bioc
->stripes
[num_stripes
+ nr_extra_stripes
];
6256 if (old
->dev
->devid
!= srcdev_devid
)
6259 new->physical
= old
->physical
;
6260 new->dev
= dev_replace
->tgtdev
;
6261 if (bioc
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
6262 bioc
->replace_stripe_src
= i
;
6266 /* We can only have at most 2 extra nr_stripes (for DUP). */
6267 ASSERT(nr_extra_stripes
<= 2);
6269 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
6271 * If we have 2 extra stripes, only choose the one with smaller physical.
6273 if (io_geom
->op
== BTRFS_MAP_GET_READ_MIRRORS
&& nr_extra_stripes
== 2) {
6274 struct btrfs_io_stripe
*first
= &bioc
->stripes
[num_stripes
];
6275 struct btrfs_io_stripe
*second
= &bioc
->stripes
[num_stripes
+ 1];
6277 /* Only DUP can have two extra stripes. */
6278 ASSERT(bioc
->map_type
& BTRFS_BLOCK_GROUP_DUP
);
6281 * Swap the last stripe stripes and reduce @nr_extra_stripes.
6282 * The extra stripe would still be there, but won't be accessed.
6284 if (first
->physical
> second
->physical
) {
6285 swap(second
->physical
, first
->physical
);
6286 swap(second
->dev
, first
->dev
);
6291 io_geom
->num_stripes
= num_stripes
+ nr_extra_stripes
;
6292 io_geom
->max_errors
= max_errors
+ nr_extra_stripes
;
6293 bioc
->replace_nr_stripes
= nr_extra_stripes
;
6296 static u64
btrfs_max_io_len(struct btrfs_chunk_map
*map
, u64 offset
,
6297 struct btrfs_io_geometry
*io_geom
)
6300 * Stripe_nr is the stripe where this block falls. stripe_offset is
6301 * the offset of this block in its stripe.
6303 io_geom
->stripe_offset
= offset
& BTRFS_STRIPE_LEN_MASK
;
6304 io_geom
->stripe_nr
= offset
>> BTRFS_STRIPE_LEN_SHIFT
;
6305 ASSERT(io_geom
->stripe_offset
< U32_MAX
);
6307 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6308 unsigned long full_stripe_len
=
6309 btrfs_stripe_nr_to_offset(nr_data_stripes(map
));
6312 * For full stripe start, we use previously calculated
6313 * @stripe_nr. Align it to nr_data_stripes, then multiply with
6316 * By this we can avoid u64 division completely. And we have
6317 * to go rounddown(), not round_down(), as nr_data_stripes is
6318 * not ensured to be power of 2.
6320 io_geom
->raid56_full_stripe_start
= btrfs_stripe_nr_to_offset(
6321 rounddown(io_geom
->stripe_nr
, nr_data_stripes(map
)));
6323 ASSERT(io_geom
->raid56_full_stripe_start
+ full_stripe_len
> offset
);
6324 ASSERT(io_geom
->raid56_full_stripe_start
<= offset
);
6326 * For writes to RAID56, allow to write a full stripe set, but
6327 * no straddling of stripe sets.
6329 if (io_geom
->op
== BTRFS_MAP_WRITE
)
6330 return full_stripe_len
- (offset
- io_geom
->raid56_full_stripe_start
);
6334 * For other RAID types and for RAID56 reads, allow a single stripe (on
6337 if (map
->type
& BTRFS_BLOCK_GROUP_STRIPE_MASK
)
6338 return BTRFS_STRIPE_LEN
- io_geom
->stripe_offset
;
6342 static int set_io_stripe(struct btrfs_fs_info
*fs_info
, u64 logical
,
6343 u64
*length
, struct btrfs_io_stripe
*dst
,
6344 struct btrfs_chunk_map
*map
,
6345 struct btrfs_io_geometry
*io_geom
)
6347 dst
->dev
= map
->stripes
[io_geom
->stripe_index
].dev
;
6349 if (io_geom
->op
== BTRFS_MAP_READ
&&
6350 btrfs_need_stripe_tree_update(fs_info
, map
->type
))
6351 return btrfs_get_raid_extent_offset(fs_info
, logical
, length
,
6353 io_geom
->stripe_index
, dst
);
6355 dst
->physical
= map
->stripes
[io_geom
->stripe_index
].physical
+
6356 io_geom
->stripe_offset
+
6357 btrfs_stripe_nr_to_offset(io_geom
->stripe_nr
);
6361 static bool is_single_device_io(struct btrfs_fs_info
*fs_info
,
6362 const struct btrfs_io_stripe
*smap
,
6363 const struct btrfs_chunk_map
*map
,
6364 int num_alloc_stripes
,
6365 enum btrfs_map_op op
, int mirror_num
)
6370 if (num_alloc_stripes
!= 1)
6373 if (btrfs_need_stripe_tree_update(fs_info
, map
->type
) && op
!= BTRFS_MAP_READ
)
6376 if ((map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) && mirror_num
> 1)
6382 static void map_blocks_raid0(const struct btrfs_chunk_map
*map
,
6383 struct btrfs_io_geometry
*io_geom
)
6385 io_geom
->stripe_index
= io_geom
->stripe_nr
% map
->num_stripes
;
6386 io_geom
->stripe_nr
/= map
->num_stripes
;
6387 if (io_geom
->op
== BTRFS_MAP_READ
)
6388 io_geom
->mirror_num
= 1;
6391 static void map_blocks_raid1(struct btrfs_fs_info
*fs_info
,
6392 struct btrfs_chunk_map
*map
,
6393 struct btrfs_io_geometry
*io_geom
,
6394 bool dev_replace_is_ongoing
)
6396 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6397 io_geom
->num_stripes
= map
->num_stripes
;
6401 if (io_geom
->mirror_num
) {
6402 io_geom
->stripe_index
= io_geom
->mirror_num
- 1;
6406 io_geom
->stripe_index
= find_live_mirror(fs_info
, map
, 0,
6407 dev_replace_is_ongoing
);
6408 io_geom
->mirror_num
= io_geom
->stripe_index
+ 1;
6411 static void map_blocks_dup(const struct btrfs_chunk_map
*map
,
6412 struct btrfs_io_geometry
*io_geom
)
6414 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6415 io_geom
->num_stripes
= map
->num_stripes
;
6419 if (io_geom
->mirror_num
) {
6420 io_geom
->stripe_index
= io_geom
->mirror_num
- 1;
6424 io_geom
->mirror_num
= 1;
6427 static void map_blocks_raid10(struct btrfs_fs_info
*fs_info
,
6428 struct btrfs_chunk_map
*map
,
6429 struct btrfs_io_geometry
*io_geom
,
6430 bool dev_replace_is_ongoing
)
6432 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
6433 int old_stripe_index
;
6435 io_geom
->stripe_index
= (io_geom
->stripe_nr
% factor
) * map
->sub_stripes
;
6436 io_geom
->stripe_nr
/= factor
;
6438 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6439 io_geom
->num_stripes
= map
->sub_stripes
;
6443 if (io_geom
->mirror_num
) {
6444 io_geom
->stripe_index
+= io_geom
->mirror_num
- 1;
6448 old_stripe_index
= io_geom
->stripe_index
;
6449 io_geom
->stripe_index
= find_live_mirror(fs_info
, map
,
6450 io_geom
->stripe_index
,
6451 dev_replace_is_ongoing
);
6452 io_geom
->mirror_num
= io_geom
->stripe_index
- old_stripe_index
+ 1;
6455 static void map_blocks_raid56_write(struct btrfs_chunk_map
*map
,
6456 struct btrfs_io_geometry
*io_geom
,
6457 u64 logical
, u64
*length
)
6459 int data_stripes
= nr_data_stripes(map
);
6462 * Needs full stripe mapping.
6464 * Push stripe_nr back to the start of the full stripe For those cases
6465 * needing a full stripe, @stripe_nr is the full stripe number.
6467 * Originally we go raid56_full_stripe_start / full_stripe_len, but
6468 * that can be expensive. Here we just divide @stripe_nr with
6471 io_geom
->stripe_nr
/= data_stripes
;
6473 /* RAID[56] write or recovery. Return all stripes */
6474 io_geom
->num_stripes
= map
->num_stripes
;
6475 io_geom
->max_errors
= btrfs_chunk_max_errors(map
);
6477 /* Return the length to the full stripe end. */
6478 *length
= min(logical
+ *length
,
6479 io_geom
->raid56_full_stripe_start
+ map
->start
+
6480 btrfs_stripe_nr_to_offset(data_stripes
)) -
6482 io_geom
->stripe_index
= 0;
6483 io_geom
->stripe_offset
= 0;
6486 static void map_blocks_raid56_read(struct btrfs_chunk_map
*map
,
6487 struct btrfs_io_geometry
*io_geom
)
6489 int data_stripes
= nr_data_stripes(map
);
6491 ASSERT(io_geom
->mirror_num
<= 1);
6492 /* Just grab the data stripe directly. */
6493 io_geom
->stripe_index
= io_geom
->stripe_nr
% data_stripes
;
6494 io_geom
->stripe_nr
/= data_stripes
;
6496 /* We distribute the parity blocks across stripes. */
6497 io_geom
->stripe_index
=
6498 (io_geom
->stripe_nr
+ io_geom
->stripe_index
) % map
->num_stripes
;
6500 if (io_geom
->op
== BTRFS_MAP_READ
&& io_geom
->mirror_num
< 1)
6501 io_geom
->mirror_num
= 1;
6504 static void map_blocks_single(const struct btrfs_chunk_map
*map
,
6505 struct btrfs_io_geometry
*io_geom
)
6507 io_geom
->stripe_index
= io_geom
->stripe_nr
% map
->num_stripes
;
6508 io_geom
->stripe_nr
/= map
->num_stripes
;
6509 io_geom
->mirror_num
= io_geom
->stripe_index
+ 1;
6513 * Map one logical range to one or more physical ranges.
6515 * @length: (Mandatory) mapped length of this run.
6516 * One logical range can be split into different segments
6517 * due to factors like zones and RAID0/5/6/10 stripe
6520 * @bioc_ret: (Mandatory) returned btrfs_io_context structure.
6521 * which has one or more physical ranges (btrfs_io_stripe)
6523 * Caller should call btrfs_put_bioc() to free it after use.
6525 * @smap: (Optional) single physical range optimization.
6526 * If the map request can be fulfilled by one single
6527 * physical range, and this is parameter is not NULL,
6528 * then @bioc_ret would be NULL, and @smap would be
6531 * @mirror_num_ret: (Mandatory) returned mirror number if the original
6534 * Mirror number 0 means to choose any live mirrors.
6536 * For non-RAID56 profiles, non-zero mirror_num means
6537 * the Nth mirror. (e.g. mirror_num 1 means the first
6540 * For RAID56 profile, mirror 1 means rebuild from P and
6541 * the remaining data stripes.
6543 * For RAID6 profile, mirror > 2 means mark another
6544 * data/P stripe error and rebuild from the remaining
6547 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6548 u64 logical
, u64
*length
,
6549 struct btrfs_io_context
**bioc_ret
,
6550 struct btrfs_io_stripe
*smap
, int *mirror_num_ret
)
6552 struct btrfs_chunk_map
*map
;
6553 struct btrfs_io_geometry io_geom
= { 0 };
6557 struct btrfs_io_context
*bioc
= NULL
;
6558 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
6559 int dev_replace_is_ongoing
= 0;
6560 u16 num_alloc_stripes
;
6565 io_geom
.mirror_num
= (mirror_num_ret
? *mirror_num_ret
: 0);
6566 io_geom
.num_stripes
= 1;
6567 io_geom
.stripe_index
= 0;
6570 map
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
6572 return PTR_ERR(map
);
6574 num_copies
= btrfs_chunk_map_num_copies(map
);
6575 if (io_geom
.mirror_num
> num_copies
)
6578 map_offset
= logical
- map
->start
;
6579 io_geom
.raid56_full_stripe_start
= (u64
)-1;
6580 max_len
= btrfs_max_io_len(map
, map_offset
, &io_geom
);
6581 *length
= min_t(u64
, map
->chunk_len
- map_offset
, max_len
);
6583 if (dev_replace
->replace_task
!= current
)
6584 down_read(&dev_replace
->rwsem
);
6586 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
6588 * Hold the semaphore for read during the whole operation, write is
6589 * requested at commit time but must wait.
6591 if (!dev_replace_is_ongoing
&& dev_replace
->replace_task
!= current
)
6592 up_read(&dev_replace
->rwsem
);
6594 switch (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6595 case BTRFS_BLOCK_GROUP_RAID0
:
6596 map_blocks_raid0(map
, &io_geom
);
6598 case BTRFS_BLOCK_GROUP_RAID1
:
6599 case BTRFS_BLOCK_GROUP_RAID1C3
:
6600 case BTRFS_BLOCK_GROUP_RAID1C4
:
6601 map_blocks_raid1(fs_info
, map
, &io_geom
, dev_replace_is_ongoing
);
6603 case BTRFS_BLOCK_GROUP_DUP
:
6604 map_blocks_dup(map
, &io_geom
);
6606 case BTRFS_BLOCK_GROUP_RAID10
:
6607 map_blocks_raid10(fs_info
, map
, &io_geom
, dev_replace_is_ongoing
);
6609 case BTRFS_BLOCK_GROUP_RAID5
:
6610 case BTRFS_BLOCK_GROUP_RAID6
:
6611 if (op
!= BTRFS_MAP_READ
|| io_geom
.mirror_num
> 1)
6612 map_blocks_raid56_write(map
, &io_geom
, logical
, length
);
6614 map_blocks_raid56_read(map
, &io_geom
);
6618 * After this, stripe_nr is the number of stripes on this
6619 * device we have to walk to find the data, and stripe_index is
6620 * the number of our device in the stripe array
6622 map_blocks_single(map
, &io_geom
);
6625 if (io_geom
.stripe_index
>= map
->num_stripes
) {
6627 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6628 io_geom
.stripe_index
, map
->num_stripes
);
6633 num_alloc_stripes
= io_geom
.num_stripes
;
6634 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6635 op
!= BTRFS_MAP_READ
)
6637 * For replace case, we need to add extra stripes for extra
6638 * duplicated stripes.
6640 * For both WRITE and GET_READ_MIRRORS, we may have at most
6641 * 2 more stripes (DUP types, otherwise 1).
6643 num_alloc_stripes
+= 2;
6646 * If this I/O maps to a single device, try to return the device and
6647 * physical block information on the stack instead of allocating an
6648 * I/O context structure.
6650 if (is_single_device_io(fs_info
, smap
, map
, num_alloc_stripes
, op
,
6651 io_geom
.mirror_num
)) {
6652 ret
= set_io_stripe(fs_info
, logical
, length
, smap
, map
, &io_geom
);
6654 *mirror_num_ret
= io_geom
.mirror_num
;
6659 bioc
= alloc_btrfs_io_context(fs_info
, logical
, num_alloc_stripes
);
6664 bioc
->map_type
= map
->type
;
6667 * For RAID56 full map, we need to make sure the stripes[] follows the
6668 * rule that data stripes are all ordered, then followed with P and Q
6671 * It's still mostly the same as other profiles, just with extra rotation.
6673 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&&
6674 (op
!= BTRFS_MAP_READ
|| io_geom
.mirror_num
> 1)) {
6676 * For RAID56 @stripe_nr is already the number of full stripes
6677 * before us, which is also the rotation value (needs to modulo
6678 * with num_stripes).
6680 * In this case, we just add @stripe_nr with @i, then do the
6681 * modulo, to reduce one modulo call.
6683 bioc
->full_stripe_logical
= map
->start
+
6684 btrfs_stripe_nr_to_offset(io_geom
.stripe_nr
*
6685 nr_data_stripes(map
));
6686 for (int i
= 0; i
< io_geom
.num_stripes
; i
++) {
6687 struct btrfs_io_stripe
*dst
= &bioc
->stripes
[i
];
6690 stripe_index
= (i
+ io_geom
.stripe_nr
) % io_geom
.num_stripes
;
6691 dst
->dev
= map
->stripes
[stripe_index
].dev
;
6693 map
->stripes
[stripe_index
].physical
+
6694 io_geom
.stripe_offset
+
6695 btrfs_stripe_nr_to_offset(io_geom
.stripe_nr
);
6699 * For all other non-RAID56 profiles, just copy the target
6700 * stripe into the bioc.
6702 for (int i
= 0; i
< io_geom
.num_stripes
; i
++) {
6703 ret
= set_io_stripe(fs_info
, logical
, length
,
6704 &bioc
->stripes
[i
], map
, &io_geom
);
6707 io_geom
.stripe_index
++;
6713 btrfs_put_bioc(bioc
);
6717 if (op
!= BTRFS_MAP_READ
)
6718 io_geom
.max_errors
= btrfs_chunk_max_errors(map
);
6720 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6721 op
!= BTRFS_MAP_READ
) {
6722 handle_ops_on_dev_replace(bioc
, dev_replace
, logical
, &io_geom
);
6726 bioc
->num_stripes
= io_geom
.num_stripes
;
6727 bioc
->max_errors
= io_geom
.max_errors
;
6728 bioc
->mirror_num
= io_geom
.mirror_num
;
6731 if (dev_replace_is_ongoing
&& dev_replace
->replace_task
!= current
) {
6732 lockdep_assert_held(&dev_replace
->rwsem
);
6733 /* Unlock and let waiting writers proceed */
6734 up_read(&dev_replace
->rwsem
);
6736 btrfs_free_chunk_map(map
);
6740 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args
*args
,
6741 const struct btrfs_fs_devices
*fs_devices
)
6743 if (args
->fsid
== NULL
)
6745 if (memcmp(fs_devices
->metadata_uuid
, args
->fsid
, BTRFS_FSID_SIZE
) == 0)
6750 static bool dev_args_match_device(const struct btrfs_dev_lookup_args
*args
,
6751 const struct btrfs_device
*device
)
6753 if (args
->missing
) {
6754 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
) &&
6760 if (device
->devid
!= args
->devid
)
6762 if (args
->uuid
&& memcmp(device
->uuid
, args
->uuid
, BTRFS_UUID_SIZE
) != 0)
6768 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6771 * If devid and uuid are both specified, the match must be exact, otherwise
6772 * only devid is used.
6774 struct btrfs_device
*btrfs_find_device(const struct btrfs_fs_devices
*fs_devices
,
6775 const struct btrfs_dev_lookup_args
*args
)
6777 struct btrfs_device
*device
;
6778 struct btrfs_fs_devices
*seed_devs
;
6780 if (dev_args_match_fs_devices(args
, fs_devices
)) {
6781 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
6782 if (dev_args_match_device(args
, device
))
6787 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
6788 if (!dev_args_match_fs_devices(args
, seed_devs
))
6790 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
6791 if (dev_args_match_device(args
, device
))
6799 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6800 u64 devid
, u8
*dev_uuid
)
6802 struct btrfs_device
*device
;
6803 unsigned int nofs_flag
;
6806 * We call this under the chunk_mutex, so we want to use NOFS for this
6807 * allocation, however we don't want to change btrfs_alloc_device() to
6808 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6812 nofs_flag
= memalloc_nofs_save();
6813 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
, NULL
);
6814 memalloc_nofs_restore(nofs_flag
);
6818 list_add(&device
->dev_list
, &fs_devices
->devices
);
6819 device
->fs_devices
= fs_devices
;
6820 fs_devices
->num_devices
++;
6822 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6823 fs_devices
->missing_devices
++;
6829 * Allocate new device struct, set up devid and UUID.
6831 * @fs_info: used only for generating a new devid, can be NULL if
6832 * devid is provided (i.e. @devid != NULL).
6833 * @devid: a pointer to devid for this device. If NULL a new devid
6835 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6837 * @path: a pointer to device path if available, NULL otherwise.
6839 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6840 * on error. Returned struct is not linked onto any lists and must be
6841 * destroyed with btrfs_free_device.
6843 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6844 const u64
*devid
, const u8
*uuid
,
6847 struct btrfs_device
*dev
;
6850 if (WARN_ON(!devid
&& !fs_info
))
6851 return ERR_PTR(-EINVAL
);
6853 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
6855 return ERR_PTR(-ENOMEM
);
6857 INIT_LIST_HEAD(&dev
->dev_list
);
6858 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
6859 INIT_LIST_HEAD(&dev
->post_commit_list
);
6861 atomic_set(&dev
->dev_stats_ccnt
, 0);
6862 btrfs_device_data_ordered_init(dev
);
6863 extent_io_tree_init(fs_info
, &dev
->alloc_state
, IO_TREE_DEVICE_ALLOC_STATE
);
6870 ret
= find_next_devid(fs_info
, &tmp
);
6872 btrfs_free_device(dev
);
6873 return ERR_PTR(ret
);
6879 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6881 generate_random_uuid(dev
->uuid
);
6884 struct rcu_string
*name
;
6886 name
= rcu_string_strdup(path
, GFP_KERNEL
);
6888 btrfs_free_device(dev
);
6889 return ERR_PTR(-ENOMEM
);
6891 rcu_assign_pointer(dev
->name
, name
);
6897 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6898 u64 devid
, u8
*uuid
, bool error
)
6901 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6904 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6908 u64
btrfs_calc_stripe_length(const struct btrfs_chunk_map
*map
)
6910 const int data_stripes
= calc_data_stripes(map
->type
, map
->num_stripes
);
6912 return div_u64(map
->chunk_len
, data_stripes
);
6915 #if BITS_PER_LONG == 32
6917 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6918 * can't be accessed on 32bit systems.
6920 * This function do mount time check to reject the fs if it already has
6921 * metadata chunk beyond that limit.
6923 static int check_32bit_meta_chunk(struct btrfs_fs_info
*fs_info
,
6924 u64 logical
, u64 length
, u64 type
)
6926 if (!(type
& BTRFS_BLOCK_GROUP_METADATA
))
6929 if (logical
+ length
< MAX_LFS_FILESIZE
)
6932 btrfs_err_32bit_limit(fs_info
);
6937 * This is to give early warning for any metadata chunk reaching
6938 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6939 * Although we can still access the metadata, it's not going to be possible
6940 * once the limit is reached.
6942 static void warn_32bit_meta_chunk(struct btrfs_fs_info
*fs_info
,
6943 u64 logical
, u64 length
, u64 type
)
6945 if (!(type
& BTRFS_BLOCK_GROUP_METADATA
))
6948 if (logical
+ length
< BTRFS_32BIT_EARLY_WARN_THRESHOLD
)
6951 btrfs_warn_32bit_limit(fs_info
);
6955 static struct btrfs_device
*handle_missing_device(struct btrfs_fs_info
*fs_info
,
6956 u64 devid
, u8
*uuid
)
6958 struct btrfs_device
*dev
;
6960 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6961 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6962 return ERR_PTR(-ENOENT
);
6965 dev
= add_missing_dev(fs_info
->fs_devices
, devid
, uuid
);
6967 btrfs_err(fs_info
, "failed to init missing device %llu: %ld",
6968 devid
, PTR_ERR(dev
));
6971 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6976 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6977 struct btrfs_chunk
*chunk
)
6979 BTRFS_DEV_LOOKUP_ARGS(args
);
6980 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6981 struct btrfs_chunk_map
*map
;
6986 u8 uuid
[BTRFS_UUID_SIZE
];
6992 logical
= key
->offset
;
6993 length
= btrfs_chunk_length(leaf
, chunk
);
6994 type
= btrfs_chunk_type(leaf
, chunk
);
6995 index
= btrfs_bg_flags_to_raid_index(type
);
6996 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6998 #if BITS_PER_LONG == 32
6999 ret
= check_32bit_meta_chunk(fs_info
, logical
, length
, type
);
7002 warn_32bit_meta_chunk(fs_info
, logical
, length
, type
);
7006 * Only need to verify chunk item if we're reading from sys chunk array,
7007 * as chunk item in tree block is already verified by tree-checker.
7009 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
7010 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
7015 map
= btrfs_find_chunk_map(fs_info
, logical
, 1);
7017 /* already mapped? */
7018 if (map
&& map
->start
<= logical
&& map
->start
+ map
->chunk_len
> logical
) {
7019 btrfs_free_chunk_map(map
);
7022 btrfs_free_chunk_map(map
);
7025 map
= btrfs_alloc_chunk_map(num_stripes
, GFP_NOFS
);
7029 map
->start
= logical
;
7030 map
->chunk_len
= length
;
7031 map
->num_stripes
= num_stripes
;
7032 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
7033 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
7036 * We can't use the sub_stripes value, as for profiles other than
7037 * RAID10, they may have 0 as sub_stripes for filesystems created by
7038 * older mkfs (<v5.4).
7039 * In that case, it can cause divide-by-zero errors later.
7040 * Since currently sub_stripes is fixed for each profile, let's
7041 * use the trusted value instead.
7043 map
->sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
7044 map
->verified_stripes
= 0;
7045 map
->stripe_size
= btrfs_calc_stripe_length(map
);
7046 for (i
= 0; i
< num_stripes
; i
++) {
7047 map
->stripes
[i
].physical
=
7048 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
7049 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
7051 read_extent_buffer(leaf
, uuid
, (unsigned long)
7052 btrfs_stripe_dev_uuid_nr(chunk
, i
),
7055 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7056 if (!map
->stripes
[i
].dev
) {
7057 map
->stripes
[i
].dev
= handle_missing_device(fs_info
,
7059 if (IS_ERR(map
->stripes
[i
].dev
)) {
7060 ret
= PTR_ERR(map
->stripes
[i
].dev
);
7061 btrfs_free_chunk_map(map
);
7066 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
7067 &(map
->stripes
[i
].dev
->dev_state
));
7070 ret
= btrfs_add_chunk_map(fs_info
, map
);
7073 "failed to add chunk map, start=%llu len=%llu: %d",
7074 map
->start
, map
->chunk_len
, ret
);
7080 static void fill_device_from_item(struct extent_buffer
*leaf
,
7081 struct btrfs_dev_item
*dev_item
,
7082 struct btrfs_device
*device
)
7086 device
->devid
= btrfs_device_id(leaf
, dev_item
);
7087 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
7088 device
->total_bytes
= device
->disk_total_bytes
;
7089 device
->commit_total_bytes
= device
->disk_total_bytes
;
7090 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
7091 device
->commit_bytes_used
= device
->bytes_used
;
7092 device
->type
= btrfs_device_type(leaf
, dev_item
);
7093 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
7094 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
7095 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
7096 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
7097 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
7099 ptr
= btrfs_device_uuid(dev_item
);
7100 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
7103 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
7106 struct btrfs_fs_devices
*fs_devices
;
7109 lockdep_assert_held(&uuid_mutex
);
7112 /* This will match only for multi-device seed fs */
7113 list_for_each_entry(fs_devices
, &fs_info
->fs_devices
->seed_list
, seed_list
)
7114 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
7118 fs_devices
= find_fsid(fsid
, NULL
);
7120 if (!btrfs_test_opt(fs_info
, DEGRADED
))
7121 return ERR_PTR(-ENOENT
);
7123 fs_devices
= alloc_fs_devices(fsid
);
7124 if (IS_ERR(fs_devices
))
7127 fs_devices
->seeding
= true;
7128 fs_devices
->opened
= 1;
7133 * Upon first call for a seed fs fsid, just create a private copy of the
7134 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7136 fs_devices
= clone_fs_devices(fs_devices
);
7137 if (IS_ERR(fs_devices
))
7140 ret
= open_fs_devices(fs_devices
, BLK_OPEN_READ
, fs_info
->bdev_holder
);
7142 free_fs_devices(fs_devices
);
7143 return ERR_PTR(ret
);
7146 if (!fs_devices
->seeding
) {
7147 close_fs_devices(fs_devices
);
7148 free_fs_devices(fs_devices
);
7149 return ERR_PTR(-EINVAL
);
7152 list_add(&fs_devices
->seed_list
, &fs_info
->fs_devices
->seed_list
);
7157 static int read_one_dev(struct extent_buffer
*leaf
,
7158 struct btrfs_dev_item
*dev_item
)
7160 BTRFS_DEV_LOOKUP_ARGS(args
);
7161 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
7162 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7163 struct btrfs_device
*device
;
7166 u8 fs_uuid
[BTRFS_FSID_SIZE
];
7167 u8 dev_uuid
[BTRFS_UUID_SIZE
];
7169 devid
= btrfs_device_id(leaf
, dev_item
);
7171 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
7173 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
7175 args
.uuid
= dev_uuid
;
7176 args
.fsid
= fs_uuid
;
7178 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
7179 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
7180 if (IS_ERR(fs_devices
))
7181 return PTR_ERR(fs_devices
);
7184 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7186 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7187 btrfs_report_missing_device(fs_info
, devid
,
7192 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
7193 if (IS_ERR(device
)) {
7195 "failed to add missing dev %llu: %ld",
7196 devid
, PTR_ERR(device
));
7197 return PTR_ERR(device
);
7199 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
7201 if (!device
->bdev
) {
7202 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7203 btrfs_report_missing_device(fs_info
,
7204 devid
, dev_uuid
, true);
7207 btrfs_report_missing_device(fs_info
, devid
,
7211 if (!device
->bdev
&&
7212 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
7214 * this happens when a device that was properly setup
7215 * in the device info lists suddenly goes bad.
7216 * device->bdev is NULL, and so we have to set
7217 * device->missing to one here
7219 device
->fs_devices
->missing_devices
++;
7220 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
7223 /* Move the device to its own fs_devices */
7224 if (device
->fs_devices
!= fs_devices
) {
7225 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
7226 &device
->dev_state
));
7228 list_move(&device
->dev_list
, &fs_devices
->devices
);
7229 device
->fs_devices
->num_devices
--;
7230 fs_devices
->num_devices
++;
7232 device
->fs_devices
->missing_devices
--;
7233 fs_devices
->missing_devices
++;
7235 device
->fs_devices
= fs_devices
;
7239 if (device
->fs_devices
!= fs_info
->fs_devices
) {
7240 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
7241 if (device
->generation
!=
7242 btrfs_device_generation(leaf
, dev_item
))
7246 fill_device_from_item(leaf
, dev_item
, device
);
7248 u64 max_total_bytes
= bdev_nr_bytes(device
->bdev
);
7250 if (device
->total_bytes
> max_total_bytes
) {
7252 "device total_bytes should be at most %llu but found %llu",
7253 max_total_bytes
, device
->total_bytes
);
7257 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
7258 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
7259 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
7260 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
7261 atomic64_add(device
->total_bytes
- device
->bytes_used
,
7262 &fs_info
->free_chunk_space
);
7268 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
7270 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
7271 struct extent_buffer
*sb
;
7272 struct btrfs_disk_key
*disk_key
;
7273 struct btrfs_chunk
*chunk
;
7275 unsigned long sb_array_offset
;
7282 struct btrfs_key key
;
7284 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
7287 * We allocated a dummy extent, just to use extent buffer accessors.
7288 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7289 * that's fine, we will not go beyond system chunk array anyway.
7291 sb
= alloc_dummy_extent_buffer(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
7294 set_extent_buffer_uptodate(sb
);
7296 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
7297 array_size
= btrfs_super_sys_array_size(super_copy
);
7299 array_ptr
= super_copy
->sys_chunk_array
;
7300 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
7303 while (cur_offset
< array_size
) {
7304 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
7305 len
= sizeof(*disk_key
);
7306 if (cur_offset
+ len
> array_size
)
7307 goto out_short_read
;
7309 btrfs_disk_key_to_cpu(&key
, disk_key
);
7312 sb_array_offset
+= len
;
7315 if (key
.type
!= BTRFS_CHUNK_ITEM_KEY
) {
7317 "unexpected item type %u in sys_array at offset %u",
7318 (u32
)key
.type
, cur_offset
);
7323 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
7325 * At least one btrfs_chunk with one stripe must be present,
7326 * exact stripe count check comes afterwards
7328 len
= btrfs_chunk_item_size(1);
7329 if (cur_offset
+ len
> array_size
)
7330 goto out_short_read
;
7332 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
7335 "invalid number of stripes %u in sys_array at offset %u",
7336 num_stripes
, cur_offset
);
7341 type
= btrfs_chunk_type(sb
, chunk
);
7342 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
7344 "invalid chunk type %llu in sys_array at offset %u",
7350 len
= btrfs_chunk_item_size(num_stripes
);
7351 if (cur_offset
+ len
> array_size
)
7352 goto out_short_read
;
7354 ret
= read_one_chunk(&key
, sb
, chunk
);
7359 sb_array_offset
+= len
;
7362 clear_extent_buffer_uptodate(sb
);
7363 free_extent_buffer_stale(sb
);
7367 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
7369 clear_extent_buffer_uptodate(sb
);
7370 free_extent_buffer_stale(sb
);
7375 * Check if all chunks in the fs are OK for read-write degraded mount
7377 * If the @failing_dev is specified, it's accounted as missing.
7379 * Return true if all chunks meet the minimal RW mount requirements.
7380 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7382 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
7383 struct btrfs_device
*failing_dev
)
7385 struct btrfs_chunk_map
*map
;
7389 map
= btrfs_find_chunk_map(fs_info
, 0, U64_MAX
);
7390 /* No chunk at all? Return false anyway */
7401 btrfs_get_num_tolerated_disk_barrier_failures(
7403 for (i
= 0; i
< map
->num_stripes
; i
++) {
7404 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
7406 if (!dev
|| !dev
->bdev
||
7407 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
7408 dev
->last_flush_error
)
7410 else if (failing_dev
&& failing_dev
== dev
)
7413 if (missing
> max_tolerated
) {
7416 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7417 map
->start
, missing
, max_tolerated
);
7418 btrfs_free_chunk_map(map
);
7422 next_start
= map
->start
+ map
->chunk_len
;
7423 btrfs_free_chunk_map(map
);
7425 map
= btrfs_find_chunk_map(fs_info
, next_start
, U64_MAX
- next_start
);
7431 static void readahead_tree_node_children(struct extent_buffer
*node
)
7434 const int nr_items
= btrfs_header_nritems(node
);
7436 for (i
= 0; i
< nr_items
; i
++)
7437 btrfs_readahead_node_child(node
, i
);
7440 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
7442 struct btrfs_root
*root
= fs_info
->chunk_root
;
7443 struct btrfs_path
*path
;
7444 struct extent_buffer
*leaf
;
7445 struct btrfs_key key
;
7446 struct btrfs_key found_key
;
7451 u64 last_ra_node
= 0;
7453 path
= btrfs_alloc_path();
7458 * uuid_mutex is needed only if we are mounting a sprout FS
7459 * otherwise we don't need it.
7461 mutex_lock(&uuid_mutex
);
7464 * It is possible for mount and umount to race in such a way that
7465 * we execute this code path, but open_fs_devices failed to clear
7466 * total_rw_bytes. We certainly want it cleared before reading the
7467 * device items, so clear it here.
7469 fs_info
->fs_devices
->total_rw_bytes
= 0;
7472 * Lockdep complains about possible circular locking dependency between
7473 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7474 * used for freeze procection of a fs (struct super_block.s_writers),
7475 * which we take when starting a transaction, and extent buffers of the
7476 * chunk tree if we call read_one_dev() while holding a lock on an
7477 * extent buffer of the chunk tree. Since we are mounting the filesystem
7478 * and at this point there can't be any concurrent task modifying the
7479 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7481 ASSERT(!test_bit(BTRFS_FS_OPEN
, &fs_info
->flags
));
7482 path
->skip_locking
= 1;
7485 * Read all device items, and then all the chunk items. All
7486 * device items are found before any chunk item (their object id
7487 * is smaller than the lowest possible object id for a chunk
7488 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7490 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
7493 btrfs_for_each_slot(root
, &key
, &found_key
, path
, iter_ret
) {
7494 struct extent_buffer
*node
= path
->nodes
[1];
7496 leaf
= path
->nodes
[0];
7497 slot
= path
->slots
[0];
7500 if (last_ra_node
!= node
->start
) {
7501 readahead_tree_node_children(node
);
7502 last_ra_node
= node
->start
;
7505 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
7506 struct btrfs_dev_item
*dev_item
;
7507 dev_item
= btrfs_item_ptr(leaf
, slot
,
7508 struct btrfs_dev_item
);
7509 ret
= read_one_dev(leaf
, dev_item
);
7513 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7514 struct btrfs_chunk
*chunk
;
7517 * We are only called at mount time, so no need to take
7518 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7519 * we always lock first fs_info->chunk_mutex before
7520 * acquiring any locks on the chunk tree. This is a
7521 * requirement for chunk allocation, see the comment on
7522 * top of btrfs_chunk_alloc() for details.
7524 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7525 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7530 /* Catch error found during iteration */
7537 * After loading chunk tree, we've got all device information,
7538 * do another round of validation checks.
7540 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7542 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7543 btrfs_super_num_devices(fs_info
->super_copy
),
7545 fs_info
->fs_devices
->total_devices
= total_dev
;
7546 btrfs_set_super_num_devices(fs_info
->super_copy
, total_dev
);
7548 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7549 fs_info
->fs_devices
->total_rw_bytes
) {
7551 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7552 btrfs_super_total_bytes(fs_info
->super_copy
),
7553 fs_info
->fs_devices
->total_rw_bytes
);
7559 mutex_unlock(&uuid_mutex
);
7561 btrfs_free_path(path
);
7565 int btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7567 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
, *seed_devs
;
7568 struct btrfs_device
*device
;
7571 fs_devices
->fs_info
= fs_info
;
7573 mutex_lock(&fs_devices
->device_list_mutex
);
7574 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7575 device
->fs_info
= fs_info
;
7577 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
7578 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
7579 device
->fs_info
= fs_info
;
7580 ret
= btrfs_get_dev_zone_info(device
, false);
7585 seed_devs
->fs_info
= fs_info
;
7587 mutex_unlock(&fs_devices
->device_list_mutex
);
7592 static u64
btrfs_dev_stats_value(const struct extent_buffer
*eb
,
7593 const struct btrfs_dev_stats_item
*ptr
,
7598 read_extent_buffer(eb
, &val
,
7599 offsetof(struct btrfs_dev_stats_item
, values
) +
7600 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7605 static void btrfs_set_dev_stats_value(struct extent_buffer
*eb
,
7606 struct btrfs_dev_stats_item
*ptr
,
7609 write_extent_buffer(eb
, &val
,
7610 offsetof(struct btrfs_dev_stats_item
, values
) +
7611 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7615 static int btrfs_device_init_dev_stats(struct btrfs_device
*device
,
7616 struct btrfs_path
*path
)
7618 struct btrfs_dev_stats_item
*ptr
;
7619 struct extent_buffer
*eb
;
7620 struct btrfs_key key
;
7624 if (!device
->fs_info
->dev_root
)
7627 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7628 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7629 key
.offset
= device
->devid
;
7630 ret
= btrfs_search_slot(NULL
, device
->fs_info
->dev_root
, &key
, path
, 0, 0);
7632 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7633 btrfs_dev_stat_set(device
, i
, 0);
7634 device
->dev_stats_valid
= 1;
7635 btrfs_release_path(path
);
7636 return ret
< 0 ? ret
: 0;
7638 slot
= path
->slots
[0];
7639 eb
= path
->nodes
[0];
7640 item_size
= btrfs_item_size(eb
, slot
);
7642 ptr
= btrfs_item_ptr(eb
, slot
, struct btrfs_dev_stats_item
);
7644 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7645 if (item_size
>= (1 + i
) * sizeof(__le64
))
7646 btrfs_dev_stat_set(device
, i
,
7647 btrfs_dev_stats_value(eb
, ptr
, i
));
7649 btrfs_dev_stat_set(device
, i
, 0);
7652 device
->dev_stats_valid
= 1;
7653 btrfs_dev_stat_print_on_load(device
);
7654 btrfs_release_path(path
);
7659 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7661 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
, *seed_devs
;
7662 struct btrfs_device
*device
;
7663 struct btrfs_path
*path
= NULL
;
7666 path
= btrfs_alloc_path();
7670 mutex_lock(&fs_devices
->device_list_mutex
);
7671 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7672 ret
= btrfs_device_init_dev_stats(device
, path
);
7676 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
7677 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
7678 ret
= btrfs_device_init_dev_stats(device
, path
);
7684 mutex_unlock(&fs_devices
->device_list_mutex
);
7686 btrfs_free_path(path
);
7690 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7691 struct btrfs_device
*device
)
7693 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7694 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7695 struct btrfs_path
*path
;
7696 struct btrfs_key key
;
7697 struct extent_buffer
*eb
;
7698 struct btrfs_dev_stats_item
*ptr
;
7702 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7703 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7704 key
.offset
= device
->devid
;
7706 path
= btrfs_alloc_path();
7709 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7711 btrfs_warn_in_rcu(fs_info
,
7712 "error %d while searching for dev_stats item for device %s",
7713 ret
, btrfs_dev_name(device
));
7718 btrfs_item_size(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7719 /* need to delete old one and insert a new one */
7720 ret
= btrfs_del_item(trans
, dev_root
, path
);
7722 btrfs_warn_in_rcu(fs_info
,
7723 "delete too small dev_stats item for device %s failed %d",
7724 btrfs_dev_name(device
), ret
);
7731 /* need to insert a new item */
7732 btrfs_release_path(path
);
7733 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7734 &key
, sizeof(*ptr
));
7736 btrfs_warn_in_rcu(fs_info
,
7737 "insert dev_stats item for device %s failed %d",
7738 btrfs_dev_name(device
), ret
);
7743 eb
= path
->nodes
[0];
7744 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7745 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7746 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7747 btrfs_dev_stat_read(device
, i
));
7748 btrfs_mark_buffer_dirty(trans
, eb
);
7751 btrfs_free_path(path
);
7756 * called from commit_transaction. Writes all changed device stats to disk.
7758 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7760 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7761 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7762 struct btrfs_device
*device
;
7766 mutex_lock(&fs_devices
->device_list_mutex
);
7767 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7768 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7769 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7774 * There is a LOAD-LOAD control dependency between the value of
7775 * dev_stats_ccnt and updating the on-disk values which requires
7776 * reading the in-memory counters. Such control dependencies
7777 * require explicit read memory barriers.
7779 * This memory barriers pairs with smp_mb__before_atomic in
7780 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7781 * barrier implied by atomic_xchg in
7782 * btrfs_dev_stats_read_and_reset
7786 ret
= update_dev_stat_item(trans
, device
);
7788 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7790 mutex_unlock(&fs_devices
->device_list_mutex
);
7795 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7797 btrfs_dev_stat_inc(dev
, index
);
7799 if (!dev
->dev_stats_valid
)
7801 btrfs_err_rl_in_rcu(dev
->fs_info
,
7802 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7803 btrfs_dev_name(dev
),
7804 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7805 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7806 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7807 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7808 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7811 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7815 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7816 if (btrfs_dev_stat_read(dev
, i
) != 0)
7818 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7819 return; /* all values == 0, suppress message */
7821 btrfs_info_in_rcu(dev
->fs_info
,
7822 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7823 btrfs_dev_name(dev
),
7824 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7825 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7826 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7827 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7828 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7831 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7832 struct btrfs_ioctl_get_dev_stats
*stats
)
7834 BTRFS_DEV_LOOKUP_ARGS(args
);
7835 struct btrfs_device
*dev
;
7836 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7839 mutex_lock(&fs_devices
->device_list_mutex
);
7840 args
.devid
= stats
->devid
;
7841 dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7842 mutex_unlock(&fs_devices
->device_list_mutex
);
7845 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7847 } else if (!dev
->dev_stats_valid
) {
7848 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7850 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7851 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7852 if (stats
->nr_items
> i
)
7854 btrfs_dev_stat_read_and_reset(dev
, i
);
7856 btrfs_dev_stat_set(dev
, i
, 0);
7858 btrfs_info(fs_info
, "device stats zeroed by %s (%d)",
7859 current
->comm
, task_pid_nr(current
));
7861 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7862 if (stats
->nr_items
> i
)
7863 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7865 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7866 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7871 * Update the size and bytes used for each device where it changed. This is
7872 * delayed since we would otherwise get errors while writing out the
7875 * Must be invoked during transaction commit.
7877 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7879 struct btrfs_device
*curr
, *next
;
7881 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7883 if (list_empty(&trans
->dev_update_list
))
7887 * We don't need the device_list_mutex here. This list is owned by the
7888 * transaction and the transaction must complete before the device is
7891 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7892 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7894 list_del_init(&curr
->post_commit_list
);
7895 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7896 curr
->commit_bytes_used
= curr
->bytes_used
;
7898 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7902 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7904 int btrfs_bg_type_to_factor(u64 flags
)
7906 const int index
= btrfs_bg_flags_to_raid_index(flags
);
7908 return btrfs_raid_array
[index
].ncopies
;
7913 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7914 u64 chunk_offset
, u64 devid
,
7915 u64 physical_offset
, u64 physical_len
)
7917 struct btrfs_dev_lookup_args args
= { .devid
= devid
};
7918 struct btrfs_chunk_map
*map
;
7919 struct btrfs_device
*dev
;
7925 map
= btrfs_find_chunk_map(fs_info
, chunk_offset
, 1);
7928 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7929 physical_offset
, devid
);
7934 stripe_len
= btrfs_calc_stripe_length(map
);
7935 if (physical_len
!= stripe_len
) {
7937 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7938 physical_offset
, devid
, map
->start
, physical_len
,
7945 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7946 * space. Although kernel can handle it without problem, better to warn
7949 if (physical_offset
< BTRFS_DEVICE_RANGE_RESERVED
)
7951 "devid %llu physical %llu len %llu inside the reserved space",
7952 devid
, physical_offset
, physical_len
);
7954 for (i
= 0; i
< map
->num_stripes
; i
++) {
7955 if (map
->stripes
[i
].dev
->devid
== devid
&&
7956 map
->stripes
[i
].physical
== physical_offset
) {
7958 if (map
->verified_stripes
>= map
->num_stripes
) {
7960 "too many dev extents for chunk %llu found",
7965 map
->verified_stripes
++;
7971 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7972 physical_offset
, devid
);
7976 /* Make sure no dev extent is beyond device boundary */
7977 dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7979 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7984 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7986 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7987 devid
, physical_offset
, physical_len
,
7988 dev
->disk_total_bytes
);
7993 if (dev
->zone_info
) {
7994 u64 zone_size
= dev
->zone_info
->zone_size
;
7996 if (!IS_ALIGNED(physical_offset
, zone_size
) ||
7997 !IS_ALIGNED(physical_len
, zone_size
)) {
7999 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8000 devid
, physical_offset
, physical_len
);
8007 btrfs_free_chunk_map(map
);
8011 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
8013 struct rb_node
*node
;
8016 read_lock(&fs_info
->mapping_tree_lock
);
8017 for (node
= rb_first_cached(&fs_info
->mapping_tree
); node
; node
= rb_next(node
)) {
8018 struct btrfs_chunk_map
*map
;
8020 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
8021 if (map
->num_stripes
!= map
->verified_stripes
) {
8023 "chunk %llu has missing dev extent, have %d expect %d",
8024 map
->start
, map
->verified_stripes
, map
->num_stripes
);
8030 read_unlock(&fs_info
->mapping_tree_lock
);
8035 * Ensure that all dev extents are mapped to correct chunk, otherwise
8036 * later chunk allocation/free would cause unexpected behavior.
8038 * NOTE: This will iterate through the whole device tree, which should be of
8039 * the same size level as the chunk tree. This slightly increases mount time.
8041 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
8043 struct btrfs_path
*path
;
8044 struct btrfs_root
*root
= fs_info
->dev_root
;
8045 struct btrfs_key key
;
8047 u64 prev_dev_ext_end
= 0;
8051 * We don't have a dev_root because we mounted with ignorebadroots and
8052 * failed to load the root, so we want to skip the verification in this
8055 * However if the dev root is fine, but the tree itself is corrupted
8056 * we'd still fail to mount. This verification is only to make sure
8057 * writes can happen safely, so instead just bypass this check
8058 * completely in the case of IGNOREBADROOTS.
8060 if (btrfs_test_opt(fs_info
, IGNOREBADROOTS
))
8064 key
.type
= BTRFS_DEV_EXTENT_KEY
;
8067 path
= btrfs_alloc_path();
8071 path
->reada
= READA_FORWARD
;
8072 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
8076 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
8077 ret
= btrfs_next_leaf(root
, path
);
8080 /* No dev extents at all? Not good */
8087 struct extent_buffer
*leaf
= path
->nodes
[0];
8088 struct btrfs_dev_extent
*dext
;
8089 int slot
= path
->slots
[0];
8091 u64 physical_offset
;
8095 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
8096 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
8098 devid
= key
.objectid
;
8099 physical_offset
= key
.offset
;
8101 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
8102 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
8103 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
8105 /* Check if this dev extent overlaps with the previous one */
8106 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
8108 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8109 devid
, physical_offset
, prev_dev_ext_end
);
8114 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
8115 physical_offset
, physical_len
);
8119 prev_dev_ext_end
= physical_offset
+ physical_len
;
8121 ret
= btrfs_next_item(root
, path
);
8130 /* Ensure all chunks have corresponding dev extents */
8131 ret
= verify_chunk_dev_extent_mapping(fs_info
);
8133 btrfs_free_path(path
);
8138 * Check whether the given block group or device is pinned by any inode being
8139 * used as a swapfile.
8141 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
8143 struct btrfs_swapfile_pin
*sp
;
8144 struct rb_node
*node
;
8146 spin_lock(&fs_info
->swapfile_pins_lock
);
8147 node
= fs_info
->swapfile_pins
.rb_node
;
8149 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
8151 node
= node
->rb_left
;
8152 else if (ptr
> sp
->ptr
)
8153 node
= node
->rb_right
;
8157 spin_unlock(&fs_info
->swapfile_pins_lock
);
8158 return node
!= NULL
;
8161 static int relocating_repair_kthread(void *data
)
8163 struct btrfs_block_group
*cache
= data
;
8164 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
8168 target
= cache
->start
;
8169 btrfs_put_block_group(cache
);
8171 sb_start_write(fs_info
->sb
);
8172 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_BALANCE
)) {
8174 "zoned: skip relocating block group %llu to repair: EBUSY",
8176 sb_end_write(fs_info
->sb
);
8180 mutex_lock(&fs_info
->reclaim_bgs_lock
);
8182 /* Ensure block group still exists */
8183 cache
= btrfs_lookup_block_group(fs_info
, target
);
8187 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR
, &cache
->runtime_flags
))
8190 ret
= btrfs_may_alloc_data_chunk(fs_info
, target
);
8195 "zoned: relocating block group %llu to repair IO failure",
8197 ret
= btrfs_relocate_chunk(fs_info
, target
);
8201 btrfs_put_block_group(cache
);
8202 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
8203 btrfs_exclop_finish(fs_info
);
8204 sb_end_write(fs_info
->sb
);
8209 bool btrfs_repair_one_zone(struct btrfs_fs_info
*fs_info
, u64 logical
)
8211 struct btrfs_block_group
*cache
;
8213 if (!btrfs_is_zoned(fs_info
))
8216 /* Do not attempt to repair in degraded state */
8217 if (btrfs_test_opt(fs_info
, DEGRADED
))
8220 cache
= btrfs_lookup_block_group(fs_info
, logical
);
8224 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR
, &cache
->runtime_flags
)) {
8225 btrfs_put_block_group(cache
);
8229 kthread_run(relocating_repair_kthread
, cache
,
8230 "btrfs-relocating-repair");
8235 static void map_raid56_repair_block(struct btrfs_io_context
*bioc
,
8236 struct btrfs_io_stripe
*smap
,
8239 int data_stripes
= nr_bioc_data_stripes(bioc
);
8242 for (i
= 0; i
< data_stripes
; i
++) {
8243 u64 stripe_start
= bioc
->full_stripe_logical
+
8244 btrfs_stripe_nr_to_offset(i
);
8246 if (logical
>= stripe_start
&&
8247 logical
< stripe_start
+ BTRFS_STRIPE_LEN
)
8250 ASSERT(i
< data_stripes
);
8251 smap
->dev
= bioc
->stripes
[i
].dev
;
8252 smap
->physical
= bioc
->stripes
[i
].physical
+
8253 ((logical
- bioc
->full_stripe_logical
) &
8254 BTRFS_STRIPE_LEN_MASK
);
8258 * Map a repair write into a single device.
8260 * A repair write is triggered by read time repair or scrub, which would only
8261 * update the contents of a single device.
8262 * Not update any other mirrors nor go through RMW path.
8264 * Callers should ensure:
8266 * - Call btrfs_bio_counter_inc_blocked() first
8267 * - The range does not cross stripe boundary
8268 * - Has a valid @mirror_num passed in.
8270 int btrfs_map_repair_block(struct btrfs_fs_info
*fs_info
,
8271 struct btrfs_io_stripe
*smap
, u64 logical
,
8272 u32 length
, int mirror_num
)
8274 struct btrfs_io_context
*bioc
= NULL
;
8275 u64 map_length
= length
;
8276 int mirror_ret
= mirror_num
;
8279 ASSERT(mirror_num
> 0);
8281 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_WRITE
, logical
, &map_length
,
8282 &bioc
, smap
, &mirror_ret
);
8286 /* The map range should not cross stripe boundary. */
8287 ASSERT(map_length
>= length
);
8289 /* Already mapped to single stripe. */
8293 /* Map the RAID56 multi-stripe writes to a single one. */
8294 if (bioc
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
8295 map_raid56_repair_block(bioc
, smap
, logical
);
8299 ASSERT(mirror_num
<= bioc
->num_stripes
);
8300 smap
->dev
= bioc
->stripes
[mirror_num
- 1].dev
;
8301 smap
->physical
= bioc
->stripes
[mirror_num
- 1].physical
;
8303 btrfs_put_bioc(bioc
);