1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/slab.h>
9 #include <linux/ratelimit.h>
10 #include <linux/kthread.h>
11 #include <linux/semaphore.h>
12 #include <linux/uuid.h>
13 #include <linux/list_sort.h>
14 #include <linux/namei.h>
18 #include "transaction.h"
21 #include "rcu-string.h"
22 #include "dev-replace.h"
24 #include "tree-checker.h"
25 #include "space-info.h"
26 #include "block-group.h"
30 #include "accessors.h"
31 #include "uuid-tree.h"
33 #include "relocation.h"
36 #include "raid-stripe-tree.h"
38 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
39 BTRFS_BLOCK_GROUP_RAID10 | \
40 BTRFS_BLOCK_GROUP_RAID56_MASK)
42 struct btrfs_io_geometry
{
48 u64 raid56_full_stripe_start
;
53 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
54 [BTRFS_RAID_RAID10
] = {
57 .devs_max
= 0, /* 0 == as many as possible */
59 .tolerated_failures
= 1,
63 .raid_name
= "raid10",
64 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
65 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
67 [BTRFS_RAID_RAID1
] = {
72 .tolerated_failures
= 1,
77 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
78 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
80 [BTRFS_RAID_RAID1C3
] = {
85 .tolerated_failures
= 2,
89 .raid_name
= "raid1c3",
90 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C3
,
91 .mindev_error
= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET
,
93 [BTRFS_RAID_RAID1C4
] = {
98 .tolerated_failures
= 3,
102 .raid_name
= "raid1c4",
103 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1C4
,
104 .mindev_error
= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET
,
111 .tolerated_failures
= 0,
116 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
119 [BTRFS_RAID_RAID0
] = {
124 .tolerated_failures
= 0,
128 .raid_name
= "raid0",
129 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
132 [BTRFS_RAID_SINGLE
] = {
137 .tolerated_failures
= 0,
141 .raid_name
= "single",
145 [BTRFS_RAID_RAID5
] = {
150 .tolerated_failures
= 1,
154 .raid_name
= "raid5",
155 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
156 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
158 [BTRFS_RAID_RAID6
] = {
163 .tolerated_failures
= 2,
167 .raid_name
= "raid6",
168 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
169 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
174 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
175 * can be used as index to access btrfs_raid_array[].
177 enum btrfs_raid_types __attribute_const__
btrfs_bg_flags_to_raid_index(u64 flags
)
179 const u64 profile
= (flags
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
182 return BTRFS_RAID_SINGLE
;
184 return BTRFS_BG_FLAG_TO_INDEX(profile
);
187 const char *btrfs_bg_type_to_raid_name(u64 flags
)
189 const int index
= btrfs_bg_flags_to_raid_index(flags
);
191 if (index
>= BTRFS_NR_RAID_TYPES
)
194 return btrfs_raid_array
[index
].raid_name
;
197 int btrfs_nr_parity_stripes(u64 type
)
199 enum btrfs_raid_types index
= btrfs_bg_flags_to_raid_index(type
);
201 return btrfs_raid_array
[index
].nparity
;
205 * Fill @buf with textual description of @bg_flags, no more than @size_buf
206 * bytes including terminating null byte.
208 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
213 u64 flags
= bg_flags
;
214 u32 size_bp
= size_buf
;
221 #define DESCRIBE_FLAG(flag, desc) \
223 if (flags & (flag)) { \
224 ret = snprintf(bp, size_bp, "%s|", (desc)); \
225 if (ret < 0 || ret >= size_bp) \
233 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
234 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
237 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
238 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
239 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
240 btrfs_raid_array
[i
].raid_name
);
244 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
248 if (size_bp
< size_buf
)
249 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
252 * The text is trimmed, it's up to the caller to provide sufficiently
258 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
259 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
260 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
266 * There are several mutexes that protect manipulation of devices and low-level
267 * structures like chunks but not block groups, extents or files
269 * uuid_mutex (global lock)
270 * ------------------------
271 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
272 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
273 * device) or requested by the device= mount option
275 * the mutex can be very coarse and can cover long-running operations
277 * protects: updates to fs_devices counters like missing devices, rw devices,
278 * seeding, structure cloning, opening/closing devices at mount/umount time
280 * global::fs_devs - add, remove, updates to the global list
282 * does not protect: manipulation of the fs_devices::devices list in general
283 * but in mount context it could be used to exclude list modifications by eg.
286 * btrfs_device::name - renames (write side), read is RCU
288 * fs_devices::device_list_mutex (per-fs, with RCU)
289 * ------------------------------------------------
290 * protects updates to fs_devices::devices, ie. adding and deleting
292 * simple list traversal with read-only actions can be done with RCU protection
294 * may be used to exclude some operations from running concurrently without any
295 * modifications to the list (see write_all_supers)
297 * Is not required at mount and close times, because our device list is
298 * protected by the uuid_mutex at that point.
302 * protects balance structures (status, state) and context accessed from
303 * several places (internally, ioctl)
307 * protects chunks, adding or removing during allocation, trim or when a new
308 * device is added/removed. Additionally it also protects post_commit_list of
309 * individual devices, since they can be added to the transaction's
310 * post_commit_list only with chunk_mutex held.
314 * a big lock that is held by the cleaner thread and prevents running subvolume
315 * cleaning together with relocation or delayed iputs
327 * Exclusive operations
328 * ====================
330 * Maintains the exclusivity of the following operations that apply to the
331 * whole filesystem and cannot run in parallel.
336 * - Device replace (*)
339 * The device operations (as above) can be in one of the following states:
345 * Only device operations marked with (*) can go into the Paused state for the
348 * - ioctl (only Balance can be Paused through ioctl)
349 * - filesystem remounted as read-only
350 * - filesystem unmounted and mounted as read-only
351 * - system power-cycle and filesystem mounted as read-only
352 * - filesystem or device errors leading to forced read-only
354 * The status of exclusive operation is set and cleared atomically.
355 * During the course of Paused state, fs_info::exclusive_operation remains set.
356 * A device operation in Paused or Running state can be canceled or resumed
357 * either by ioctl (Balance only) or when remounted as read-write.
358 * The exclusive status is cleared when the device operation is canceled or
362 DEFINE_MUTEX(uuid_mutex
);
363 static LIST_HEAD(fs_uuids
);
364 struct list_head
* __attribute_const__
btrfs_get_fs_uuids(void)
370 * Allocate new btrfs_fs_devices structure identified by a fsid.
372 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
373 * fs_devices::metadata_fsid
375 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
376 * The returned struct is not linked onto any lists and can be destroyed with
377 * kfree() right away.
379 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
)
381 struct btrfs_fs_devices
*fs_devs
;
383 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
387 mutex_init(&fs_devs
->device_list_mutex
);
389 INIT_LIST_HEAD(&fs_devs
->devices
);
390 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
391 INIT_LIST_HEAD(&fs_devs
->fs_list
);
392 INIT_LIST_HEAD(&fs_devs
->seed_list
);
395 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
396 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
402 static void btrfs_free_device(struct btrfs_device
*device
)
404 WARN_ON(!list_empty(&device
->post_commit_list
));
405 rcu_string_free(device
->name
);
406 extent_io_tree_release(&device
->alloc_state
);
407 btrfs_destroy_dev_zone_info(device
);
411 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
413 struct btrfs_device
*device
;
415 WARN_ON(fs_devices
->opened
);
416 while (!list_empty(&fs_devices
->devices
)) {
417 device
= list_entry(fs_devices
->devices
.next
,
418 struct btrfs_device
, dev_list
);
419 list_del(&device
->dev_list
);
420 btrfs_free_device(device
);
425 void __exit
btrfs_cleanup_fs_uuids(void)
427 struct btrfs_fs_devices
*fs_devices
;
429 while (!list_empty(&fs_uuids
)) {
430 fs_devices
= list_entry(fs_uuids
.next
,
431 struct btrfs_fs_devices
, fs_list
);
432 list_del(&fs_devices
->fs_list
);
433 free_fs_devices(fs_devices
);
437 static bool match_fsid_fs_devices(const struct btrfs_fs_devices
*fs_devices
,
438 const u8
*fsid
, const u8
*metadata_fsid
)
440 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0)
446 if (memcmp(metadata_fsid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
) != 0)
452 static noinline
struct btrfs_fs_devices
*find_fsid(
453 const u8
*fsid
, const u8
*metadata_fsid
)
455 struct btrfs_fs_devices
*fs_devices
;
459 /* Handle non-split brain cases */
460 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
461 if (match_fsid_fs_devices(fs_devices
, fsid
, metadata_fsid
))
468 btrfs_get_bdev_and_sb(const char *device_path
, blk_mode_t flags
, void *holder
,
469 int flush
, struct file
**bdev_file
,
470 struct btrfs_super_block
**disk_super
)
472 struct block_device
*bdev
;
475 *bdev_file
= bdev_file_open_by_path(device_path
, flags
, holder
, NULL
);
477 if (IS_ERR(*bdev_file
)) {
478 ret
= PTR_ERR(*bdev_file
);
479 btrfs_err(NULL
, "failed to open device for path %s with flags 0x%x: %d",
480 device_path
, flags
, ret
);
483 bdev
= file_bdev(*bdev_file
);
488 ret
= set_blocksize(*bdev_file
, BTRFS_BDEV_BLOCKSIZE
);
494 invalidate_bdev(bdev
);
495 *disk_super
= btrfs_read_dev_super(bdev
);
496 if (IS_ERR(*disk_super
)) {
497 ret
= PTR_ERR(*disk_super
);
511 * Search and remove all stale devices (which are not mounted). When both
512 * inputs are NULL, it will search and release all stale devices.
514 * @devt: Optional. When provided will it release all unmounted devices
515 * matching this devt only.
516 * @skip_device: Optional. Will skip this device when searching for the stale
519 * Return: 0 for success or if @devt is 0.
520 * -EBUSY if @devt is a mounted device.
521 * -ENOENT if @devt does not match any device in the list.
523 static int btrfs_free_stale_devices(dev_t devt
, struct btrfs_device
*skip_device
)
525 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
526 struct btrfs_device
*device
, *tmp_device
;
530 lockdep_assert_held(&uuid_mutex
);
532 /* Return good status if there is no instance of devt. */
534 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
536 mutex_lock(&fs_devices
->device_list_mutex
);
537 list_for_each_entry_safe(device
, tmp_device
,
538 &fs_devices
->devices
, dev_list
) {
539 if (skip_device
&& skip_device
== device
)
541 if (devt
&& devt
!= device
->devt
)
543 if (fs_devices
->opened
) {
549 /* delete the stale device */
550 fs_devices
->num_devices
--;
551 list_del(&device
->dev_list
);
552 btrfs_free_device(device
);
556 mutex_unlock(&fs_devices
->device_list_mutex
);
558 if (fs_devices
->num_devices
== 0) {
559 btrfs_sysfs_remove_fsid(fs_devices
);
560 list_del(&fs_devices
->fs_list
);
561 free_fs_devices(fs_devices
);
565 /* If there is at least one freed device return 0. */
572 static struct btrfs_fs_devices
*find_fsid_by_device(
573 struct btrfs_super_block
*disk_super
,
574 dev_t devt
, bool *same_fsid_diff_dev
)
576 struct btrfs_fs_devices
*fsid_fs_devices
;
577 struct btrfs_fs_devices
*devt_fs_devices
;
578 const bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
579 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
580 bool found_by_devt
= false;
582 /* Find the fs_device by the usual method, if found use it. */
583 fsid_fs_devices
= find_fsid(disk_super
->fsid
,
584 has_metadata_uuid
? disk_super
->metadata_uuid
: NULL
);
586 /* The temp_fsid feature is supported only with single device filesystem. */
587 if (btrfs_super_num_devices(disk_super
) != 1)
588 return fsid_fs_devices
;
591 * A seed device is an integral component of the sprout device, which
592 * functions as a multi-device filesystem. So, temp-fsid feature is
595 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
)
596 return fsid_fs_devices
;
598 /* Try to find a fs_devices by matching devt. */
599 list_for_each_entry(devt_fs_devices
, &fs_uuids
, fs_list
) {
600 struct btrfs_device
*device
;
602 list_for_each_entry(device
, &devt_fs_devices
->devices
, dev_list
) {
603 if (device
->devt
== devt
) {
604 found_by_devt
= true;
613 /* Existing device. */
614 if (fsid_fs_devices
== NULL
) {
615 if (devt_fs_devices
->opened
== 0) {
619 /* temp_fsid is mounting a subvol. */
620 return devt_fs_devices
;
623 /* Regular or temp_fsid device mounting a subvol. */
624 return devt_fs_devices
;
628 if (fsid_fs_devices
== NULL
) {
631 /* sb::fsid is already used create a new temp_fsid. */
632 *same_fsid_diff_dev
= true;
641 * This is only used on mount, and we are protected from competing things
642 * messing with our fs_devices by the uuid_mutex, thus we do not need the
643 * fs_devices->device_list_mutex here.
645 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
646 struct btrfs_device
*device
, blk_mode_t flags
,
649 struct file
*bdev_file
;
650 struct btrfs_super_block
*disk_super
;
659 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
660 &bdev_file
, &disk_super
);
664 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
665 if (devid
!= device
->devid
)
666 goto error_free_page
;
668 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
669 goto error_free_page
;
671 device
->generation
= btrfs_super_generation(disk_super
);
673 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
674 if (btrfs_super_incompat_flags(disk_super
) &
675 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
677 "BTRFS: Invalid seeding and uuid-changed device detected\n");
678 goto error_free_page
;
681 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
682 fs_devices
->seeding
= true;
684 if (bdev_read_only(file_bdev(bdev_file
)))
685 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
687 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
690 if (!bdev_nonrot(file_bdev(bdev_file
)))
691 fs_devices
->rotating
= true;
693 if (bdev_max_discard_sectors(file_bdev(bdev_file
)))
694 fs_devices
->discardable
= true;
696 device
->bdev_file
= bdev_file
;
697 device
->bdev
= file_bdev(bdev_file
);
698 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
700 if (device
->devt
!= device
->bdev
->bd_dev
) {
702 "device %s maj:min changed from %d:%d to %d:%d",
703 device
->name
->str
, MAJOR(device
->devt
),
704 MINOR(device
->devt
), MAJOR(device
->bdev
->bd_dev
),
705 MINOR(device
->bdev
->bd_dev
));
707 device
->devt
= device
->bdev
->bd_dev
;
710 fs_devices
->open_devices
++;
711 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
712 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
713 fs_devices
->rw_devices
++;
714 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
716 btrfs_release_disk_super(disk_super
);
721 btrfs_release_disk_super(disk_super
);
727 const u8
*btrfs_sb_fsid_ptr(const struct btrfs_super_block
*sb
)
729 bool has_metadata_uuid
= (btrfs_super_incompat_flags(sb
) &
730 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
732 return has_metadata_uuid
? sb
->metadata_uuid
: sb
->fsid
;
736 * Add new device to list of registered devices
739 * device pointer which was just added or updated when successful
740 * error pointer when failed
742 static noinline
struct btrfs_device
*device_list_add(const char *path
,
743 struct btrfs_super_block
*disk_super
,
744 bool *new_device_added
)
746 struct btrfs_device
*device
;
747 struct btrfs_fs_devices
*fs_devices
= NULL
;
748 struct rcu_string
*name
;
749 u64 found_transid
= btrfs_super_generation(disk_super
);
750 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
753 bool same_fsid_diff_dev
= false;
754 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
755 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
757 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2
) {
759 "device %s has incomplete metadata_uuid change, please use btrfstune to complete",
761 return ERR_PTR(-EAGAIN
);
764 error
= lookup_bdev(path
, &path_devt
);
766 btrfs_err(NULL
, "failed to lookup block device for path %s: %d",
768 return ERR_PTR(error
);
771 fs_devices
= find_fsid_by_device(disk_super
, path_devt
, &same_fsid_diff_dev
);
774 fs_devices
= alloc_fs_devices(disk_super
->fsid
);
775 if (IS_ERR(fs_devices
))
776 return ERR_CAST(fs_devices
);
778 if (has_metadata_uuid
)
779 memcpy(fs_devices
->metadata_uuid
,
780 disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
782 if (same_fsid_diff_dev
) {
783 generate_random_uuid(fs_devices
->fsid
);
784 fs_devices
->temp_fsid
= true;
785 pr_info("BTRFS: device %s (%d:%d) using temp-fsid %pU\n",
786 path
, MAJOR(path_devt
), MINOR(path_devt
),
790 mutex_lock(&fs_devices
->device_list_mutex
);
791 list_add(&fs_devices
->fs_list
, &fs_uuids
);
795 struct btrfs_dev_lookup_args args
= {
797 .uuid
= disk_super
->dev_item
.uuid
,
800 mutex_lock(&fs_devices
->device_list_mutex
);
801 device
= btrfs_find_device(fs_devices
, &args
);
803 if (found_transid
> fs_devices
->latest_generation
) {
804 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
806 memcpy(fs_devices
->metadata_uuid
,
807 btrfs_sb_fsid_ptr(disk_super
), BTRFS_FSID_SIZE
);
812 unsigned int nofs_flag
;
814 if (fs_devices
->opened
) {
816 "device %s (%d:%d) belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
817 path
, MAJOR(path_devt
), MINOR(path_devt
),
818 fs_devices
->fsid
, current
->comm
,
819 task_pid_nr(current
));
820 mutex_unlock(&fs_devices
->device_list_mutex
);
821 return ERR_PTR(-EBUSY
);
824 nofs_flag
= memalloc_nofs_save();
825 device
= btrfs_alloc_device(NULL
, &devid
,
826 disk_super
->dev_item
.uuid
, path
);
827 memalloc_nofs_restore(nofs_flag
);
828 if (IS_ERR(device
)) {
829 mutex_unlock(&fs_devices
->device_list_mutex
);
830 /* we can safely leave the fs_devices entry around */
834 device
->devt
= path_devt
;
836 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
837 fs_devices
->num_devices
++;
839 device
->fs_devices
= fs_devices
;
840 *new_device_added
= true;
842 if (disk_super
->label
[0])
844 "BTRFS: device label %s devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
845 disk_super
->label
, devid
, found_transid
, path
,
846 MAJOR(path_devt
), MINOR(path_devt
),
847 current
->comm
, task_pid_nr(current
));
850 "BTRFS: device fsid %pU devid %llu transid %llu %s (%d:%d) scanned by %s (%d)\n",
851 disk_super
->fsid
, devid
, found_transid
, path
,
852 MAJOR(path_devt
), MINOR(path_devt
),
853 current
->comm
, task_pid_nr(current
));
855 } else if (!device
->name
|| strcmp(device
->name
->str
, path
)) {
857 * When FS is already mounted.
858 * 1. If you are here and if the device->name is NULL that
859 * means this device was missing at time of FS mount.
860 * 2. If you are here and if the device->name is different
861 * from 'path' that means either
862 * a. The same device disappeared and reappeared with
864 * b. The missing-disk-which-was-replaced, has
867 * We must allow 1 and 2a above. But 2b would be a spurious
870 * Further in case of 1 and 2a above, the disk at 'path'
871 * would have missed some transaction when it was away and
872 * in case of 2a the stale bdev has to be updated as well.
873 * 2b must not be allowed at all time.
877 * For now, we do allow update to btrfs_fs_device through the
878 * btrfs dev scan cli after FS has been mounted. We're still
879 * tracking a problem where systems fail mount by subvolume id
880 * when we reject replacement on a mounted FS.
882 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
884 * That is if the FS is _not_ mounted and if you
885 * are here, that means there is more than one
886 * disk with same uuid and devid.We keep the one
887 * with larger generation number or the last-in if
888 * generation are equal.
890 mutex_unlock(&fs_devices
->device_list_mutex
);
892 "device %s already registered with a higher generation, found %llu expect %llu",
893 path
, found_transid
, device
->generation
);
894 return ERR_PTR(-EEXIST
);
898 * We are going to replace the device path for a given devid,
899 * make sure it's the same device if the device is mounted
901 * NOTE: the device->fs_info may not be reliable here so pass
902 * in a NULL to message helpers instead. This avoids a possible
903 * use-after-free when the fs_info and fs_info->sb are already
907 if (device
->devt
!= path_devt
) {
908 mutex_unlock(&fs_devices
->device_list_mutex
);
909 btrfs_warn_in_rcu(NULL
,
910 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
911 path
, devid
, found_transid
,
913 task_pid_nr(current
));
914 return ERR_PTR(-EEXIST
);
916 btrfs_info_in_rcu(NULL
,
917 "devid %llu device path %s changed to %s scanned by %s (%d)",
918 devid
, btrfs_dev_name(device
),
920 task_pid_nr(current
));
923 name
= rcu_string_strdup(path
, GFP_NOFS
);
925 mutex_unlock(&fs_devices
->device_list_mutex
);
926 return ERR_PTR(-ENOMEM
);
928 rcu_string_free(device
->name
);
929 rcu_assign_pointer(device
->name
, name
);
930 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
931 fs_devices
->missing_devices
--;
932 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
934 device
->devt
= path_devt
;
938 * Unmount does not free the btrfs_device struct but would zero
939 * generation along with most of the other members. So just update
940 * it back. We need it to pick the disk with largest generation
943 if (!fs_devices
->opened
) {
944 device
->generation
= found_transid
;
945 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
946 fs_devices
->latest_generation
);
949 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
951 mutex_unlock(&fs_devices
->device_list_mutex
);
955 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
957 struct btrfs_fs_devices
*fs_devices
;
958 struct btrfs_device
*device
;
959 struct btrfs_device
*orig_dev
;
962 lockdep_assert_held(&uuid_mutex
);
964 fs_devices
= alloc_fs_devices(orig
->fsid
);
965 if (IS_ERR(fs_devices
))
968 fs_devices
->total_devices
= orig
->total_devices
;
970 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
971 const char *dev_path
= NULL
;
974 * This is ok to do without RCU read locked because we hold the
975 * uuid mutex so nothing we touch in here is going to disappear.
978 dev_path
= orig_dev
->name
->str
;
980 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
981 orig_dev
->uuid
, dev_path
);
982 if (IS_ERR(device
)) {
983 ret
= PTR_ERR(device
);
987 if (orig_dev
->zone_info
) {
988 struct btrfs_zoned_device_info
*zone_info
;
990 zone_info
= btrfs_clone_dev_zone_info(orig_dev
);
992 btrfs_free_device(device
);
996 device
->zone_info
= zone_info
;
999 list_add(&device
->dev_list
, &fs_devices
->devices
);
1000 device
->fs_devices
= fs_devices
;
1001 fs_devices
->num_devices
++;
1005 free_fs_devices(fs_devices
);
1006 return ERR_PTR(ret
);
1009 static void __btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
,
1010 struct btrfs_device
**latest_dev
)
1012 struct btrfs_device
*device
, *next
;
1014 /* This is the initialized path, it is safe to release the devices. */
1015 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1016 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
)) {
1017 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1018 &device
->dev_state
) &&
1019 !test_bit(BTRFS_DEV_STATE_MISSING
,
1020 &device
->dev_state
) &&
1022 device
->generation
> (*latest_dev
)->generation
)) {
1023 *latest_dev
= device
;
1029 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1030 * in btrfs_init_dev_replace() so just continue.
1032 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
)
1035 if (device
->bdev_file
) {
1036 fput(device
->bdev_file
);
1037 device
->bdev
= NULL
;
1038 device
->bdev_file
= NULL
;
1039 fs_devices
->open_devices
--;
1041 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1042 list_del_init(&device
->dev_alloc_list
);
1043 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1044 fs_devices
->rw_devices
--;
1046 list_del_init(&device
->dev_list
);
1047 fs_devices
->num_devices
--;
1048 btrfs_free_device(device
);
1054 * After we have read the system tree and know devids belonging to this
1055 * filesystem, remove the device which does not belong there.
1057 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
)
1059 struct btrfs_device
*latest_dev
= NULL
;
1060 struct btrfs_fs_devices
*seed_dev
;
1062 mutex_lock(&uuid_mutex
);
1063 __btrfs_free_extra_devids(fs_devices
, &latest_dev
);
1065 list_for_each_entry(seed_dev
, &fs_devices
->seed_list
, seed_list
)
1066 __btrfs_free_extra_devids(seed_dev
, &latest_dev
);
1068 fs_devices
->latest_dev
= latest_dev
;
1070 mutex_unlock(&uuid_mutex
);
1073 static void btrfs_close_bdev(struct btrfs_device
*device
)
1078 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1079 sync_blockdev(device
->bdev
);
1080 invalidate_bdev(device
->bdev
);
1083 fput(device
->bdev_file
);
1086 static void btrfs_close_one_device(struct btrfs_device
*device
)
1088 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1090 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1091 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1092 list_del_init(&device
->dev_alloc_list
);
1093 fs_devices
->rw_devices
--;
1096 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
)
1097 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
1099 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1100 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1101 fs_devices
->missing_devices
--;
1104 btrfs_close_bdev(device
);
1106 fs_devices
->open_devices
--;
1107 device
->bdev
= NULL
;
1109 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1110 btrfs_destroy_dev_zone_info(device
);
1112 device
->fs_info
= NULL
;
1113 atomic_set(&device
->dev_stats_ccnt
, 0);
1114 extent_io_tree_release(&device
->alloc_state
);
1117 * Reset the flush error record. We might have a transient flush error
1118 * in this mount, and if so we aborted the current transaction and set
1119 * the fs to an error state, guaranteeing no super blocks can be further
1120 * committed. However that error might be transient and if we unmount the
1121 * filesystem and mount it again, we should allow the mount to succeed
1122 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1123 * filesystem again we still get flush errors, then we will again abort
1124 * any transaction and set the error state, guaranteeing no commits of
1125 * unsafe super blocks.
1127 device
->last_flush_error
= 0;
1129 /* Verify the device is back in a pristine state */
1130 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT
, &device
->dev_state
));
1131 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1132 WARN_ON(!list_empty(&device
->dev_alloc_list
));
1133 WARN_ON(!list_empty(&device
->post_commit_list
));
1136 static void close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1138 struct btrfs_device
*device
, *tmp
;
1140 lockdep_assert_held(&uuid_mutex
);
1142 if (--fs_devices
->opened
> 0)
1145 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
)
1146 btrfs_close_one_device(device
);
1148 WARN_ON(fs_devices
->open_devices
);
1149 WARN_ON(fs_devices
->rw_devices
);
1150 fs_devices
->opened
= 0;
1151 fs_devices
->seeding
= false;
1152 fs_devices
->fs_info
= NULL
;
1155 void btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1158 struct btrfs_fs_devices
*tmp
;
1160 mutex_lock(&uuid_mutex
);
1161 close_fs_devices(fs_devices
);
1162 if (!fs_devices
->opened
) {
1163 list_splice_init(&fs_devices
->seed_list
, &list
);
1166 * If the struct btrfs_fs_devices is not assembled with any
1167 * other device, it can be re-initialized during the next mount
1168 * without the needing device-scan step. Therefore, it can be
1171 if (fs_devices
->num_devices
== 1) {
1172 list_del(&fs_devices
->fs_list
);
1173 free_fs_devices(fs_devices
);
1178 list_for_each_entry_safe(fs_devices
, tmp
, &list
, seed_list
) {
1179 close_fs_devices(fs_devices
);
1180 list_del(&fs_devices
->seed_list
);
1181 free_fs_devices(fs_devices
);
1183 mutex_unlock(&uuid_mutex
);
1186 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1187 blk_mode_t flags
, void *holder
)
1189 struct btrfs_device
*device
;
1190 struct btrfs_device
*latest_dev
= NULL
;
1191 struct btrfs_device
*tmp_device
;
1194 list_for_each_entry_safe(device
, tmp_device
, &fs_devices
->devices
,
1198 ret2
= btrfs_open_one_device(fs_devices
, device
, flags
, holder
);
1200 (!latest_dev
|| device
->generation
> latest_dev
->generation
)) {
1201 latest_dev
= device
;
1202 } else if (ret2
== -ENODATA
) {
1203 fs_devices
->num_devices
--;
1204 list_del(&device
->dev_list
);
1205 btrfs_free_device(device
);
1207 if (ret
== 0 && ret2
!= 0)
1211 if (fs_devices
->open_devices
== 0) {
1217 fs_devices
->opened
= 1;
1218 fs_devices
->latest_dev
= latest_dev
;
1219 fs_devices
->total_rw_bytes
= 0;
1220 fs_devices
->chunk_alloc_policy
= BTRFS_CHUNK_ALLOC_REGULAR
;
1221 fs_devices
->read_policy
= BTRFS_READ_POLICY_PID
;
1226 static int devid_cmp(void *priv
, const struct list_head
*a
,
1227 const struct list_head
*b
)
1229 const struct btrfs_device
*dev1
, *dev2
;
1231 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1232 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1234 if (dev1
->devid
< dev2
->devid
)
1236 else if (dev1
->devid
> dev2
->devid
)
1241 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1242 blk_mode_t flags
, void *holder
)
1246 lockdep_assert_held(&uuid_mutex
);
1248 * The device_list_mutex cannot be taken here in case opening the
1249 * underlying device takes further locks like open_mutex.
1251 * We also don't need the lock here as this is called during mount and
1252 * exclusion is provided by uuid_mutex
1255 if (fs_devices
->opened
) {
1256 fs_devices
->opened
++;
1259 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1260 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1266 void btrfs_release_disk_super(struct btrfs_super_block
*super
)
1268 struct page
*page
= virt_to_page(super
);
1273 static struct btrfs_super_block
*btrfs_read_disk_super(struct block_device
*bdev
,
1274 u64 bytenr
, u64 bytenr_orig
)
1276 struct btrfs_super_block
*disk_super
;
1281 /* make sure our super fits in the device */
1282 if (bytenr
+ PAGE_SIZE
>= bdev_nr_bytes(bdev
))
1283 return ERR_PTR(-EINVAL
);
1285 /* make sure our super fits in the page */
1286 if (sizeof(*disk_super
) > PAGE_SIZE
)
1287 return ERR_PTR(-EINVAL
);
1289 /* make sure our super doesn't straddle pages on disk */
1290 index
= bytenr
>> PAGE_SHIFT
;
1291 if ((bytenr
+ sizeof(*disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1292 return ERR_PTR(-EINVAL
);
1294 /* pull in the page with our super */
1295 page
= read_cache_page_gfp(bdev
->bd_mapping
, index
, GFP_KERNEL
);
1298 return ERR_CAST(page
);
1300 p
= page_address(page
);
1302 /* align our pointer to the offset of the super block */
1303 disk_super
= p
+ offset_in_page(bytenr
);
1305 if (btrfs_super_bytenr(disk_super
) != bytenr_orig
||
1306 btrfs_super_magic(disk_super
) != BTRFS_MAGIC
) {
1307 btrfs_release_disk_super(p
);
1308 return ERR_PTR(-EINVAL
);
1311 if (disk_super
->label
[0] && disk_super
->label
[BTRFS_LABEL_SIZE
- 1])
1312 disk_super
->label
[BTRFS_LABEL_SIZE
- 1] = 0;
1317 int btrfs_forget_devices(dev_t devt
)
1321 mutex_lock(&uuid_mutex
);
1322 ret
= btrfs_free_stale_devices(devt
, NULL
);
1323 mutex_unlock(&uuid_mutex
);
1328 static bool btrfs_skip_registration(struct btrfs_super_block
*disk_super
,
1329 const char *path
, dev_t devt
,
1332 struct btrfs_fs_devices
*fs_devices
;
1335 * Do not skip device registration for mounted devices with matching
1336 * maj:min but different paths. Booting without initrd relies on
1337 * /dev/root initially, later replaced with the actual root device.
1338 * A successful scan ensures grub2-probe selects the correct device.
1340 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
1341 struct btrfs_device
*device
;
1343 mutex_lock(&fs_devices
->device_list_mutex
);
1345 if (!fs_devices
->opened
) {
1346 mutex_unlock(&fs_devices
->device_list_mutex
);
1350 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1351 if (device
->bdev
&& (device
->bdev
->bd_dev
== devt
) &&
1352 strcmp(device
->name
->str
, path
) != 0) {
1353 mutex_unlock(&fs_devices
->device_list_mutex
);
1355 /* Do not skip registration. */
1359 mutex_unlock(&fs_devices
->device_list_mutex
);
1362 if (!mount_arg_dev
&& btrfs_super_num_devices(disk_super
) == 1 &&
1363 !(btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
))
1370 * Look for a btrfs signature on a device. This may be called out of the mount path
1371 * and we are not allowed to call set_blocksize during the scan. The superblock
1372 * is read via pagecache.
1374 * With @mount_arg_dev it's a scan during mount time that will always register
1375 * the device or return an error. Multi-device and seeding devices are registered
1378 struct btrfs_device
*btrfs_scan_one_device(const char *path
, blk_mode_t flags
,
1381 struct btrfs_super_block
*disk_super
;
1382 bool new_device_added
= false;
1383 struct btrfs_device
*device
= NULL
;
1384 struct file
*bdev_file
;
1389 lockdep_assert_held(&uuid_mutex
);
1392 * Avoid an exclusive open here, as the systemd-udev may initiate the
1393 * device scan which may race with the user's mount or mkfs command,
1394 * resulting in failure.
1395 * Since the device scan is solely for reading purposes, there is no
1396 * need for an exclusive open. Additionally, the devices are read again
1397 * during the mount process. It is ok to get some inconsistent
1398 * values temporarily, as the device paths of the fsid are the only
1399 * required information for assembling the volume.
1401 bdev_file
= bdev_file_open_by_path(path
, flags
, NULL
, NULL
);
1402 if (IS_ERR(bdev_file
))
1403 return ERR_CAST(bdev_file
);
1406 * We would like to check all the super blocks, but doing so would
1407 * allow a mount to succeed after a mkfs from a different filesystem.
1408 * Currently, recovery from a bad primary btrfs superblock is done
1409 * using the userspace command 'btrfs check --super'.
1411 ret
= btrfs_sb_log_location_bdev(file_bdev(bdev_file
), 0, READ
, &bytenr
);
1413 device
= ERR_PTR(ret
);
1414 goto error_bdev_put
;
1417 disk_super
= btrfs_read_disk_super(file_bdev(bdev_file
), bytenr
,
1418 btrfs_sb_offset(0));
1419 if (IS_ERR(disk_super
)) {
1420 device
= ERR_CAST(disk_super
);
1421 goto error_bdev_put
;
1424 devt
= file_bdev(bdev_file
)->bd_dev
;
1425 if (btrfs_skip_registration(disk_super
, path
, devt
, mount_arg_dev
)) {
1426 pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n",
1427 path
, MAJOR(devt
), MINOR(devt
));
1429 btrfs_free_stale_devices(devt
, NULL
);
1432 goto free_disk_super
;
1435 device
= device_list_add(path
, disk_super
, &new_device_added
);
1436 if (!IS_ERR(device
) && new_device_added
)
1437 btrfs_free_stale_devices(device
->devt
, device
);
1440 btrfs_release_disk_super(disk_super
);
1449 * Try to find a chunk that intersects [start, start + len] range and when one
1450 * such is found, record the end of it in *start
1452 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1455 u64 physical_start
, physical_end
;
1457 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1459 if (find_first_extent_bit(&device
->alloc_state
, *start
,
1460 &physical_start
, &physical_end
,
1461 CHUNK_ALLOCATED
, NULL
)) {
1463 if (in_range(physical_start
, *start
, len
) ||
1464 in_range(*start
, physical_start
,
1465 physical_end
+ 1 - physical_start
)) {
1466 *start
= physical_end
+ 1;
1473 static u64
dev_extent_search_start(struct btrfs_device
*device
)
1475 switch (device
->fs_devices
->chunk_alloc_policy
) {
1476 case BTRFS_CHUNK_ALLOC_REGULAR
:
1477 return BTRFS_DEVICE_RANGE_RESERVED
;
1478 case BTRFS_CHUNK_ALLOC_ZONED
:
1480 * We don't care about the starting region like regular
1481 * allocator, because we anyway use/reserve the first two zones
1482 * for superblock logging.
1490 static bool dev_extent_hole_check_zoned(struct btrfs_device
*device
,
1491 u64
*hole_start
, u64
*hole_size
,
1494 u64 zone_size
= device
->zone_info
->zone_size
;
1497 bool changed
= false;
1499 ASSERT(IS_ALIGNED(*hole_start
, zone_size
));
1501 while (*hole_size
> 0) {
1502 pos
= btrfs_find_allocatable_zones(device
, *hole_start
,
1503 *hole_start
+ *hole_size
,
1505 if (pos
!= *hole_start
) {
1506 *hole_size
= *hole_start
+ *hole_size
- pos
;
1509 if (*hole_size
< num_bytes
)
1513 ret
= btrfs_ensure_empty_zones(device
, pos
, num_bytes
);
1515 /* Range is ensured to be empty */
1519 /* Given hole range was invalid (outside of device) */
1520 if (ret
== -ERANGE
) {
1521 *hole_start
+= *hole_size
;
1526 *hole_start
+= zone_size
;
1527 *hole_size
-= zone_size
;
1535 * Check if specified hole is suitable for allocation.
1537 * @device: the device which we have the hole
1538 * @hole_start: starting position of the hole
1539 * @hole_size: the size of the hole
1540 * @num_bytes: the size of the free space that we need
1542 * This function may modify @hole_start and @hole_size to reflect the suitable
1543 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1545 static bool dev_extent_hole_check(struct btrfs_device
*device
, u64
*hole_start
,
1546 u64
*hole_size
, u64 num_bytes
)
1548 bool changed
= false;
1549 u64 hole_end
= *hole_start
+ *hole_size
;
1553 * Check before we set max_hole_start, otherwise we could end up
1554 * sending back this offset anyway.
1556 if (contains_pending_extent(device
, hole_start
, *hole_size
)) {
1557 if (hole_end
>= *hole_start
)
1558 *hole_size
= hole_end
- *hole_start
;
1564 switch (device
->fs_devices
->chunk_alloc_policy
) {
1565 case BTRFS_CHUNK_ALLOC_REGULAR
:
1566 /* No extra check */
1568 case BTRFS_CHUNK_ALLOC_ZONED
:
1569 if (dev_extent_hole_check_zoned(device
, hole_start
,
1570 hole_size
, num_bytes
)) {
1573 * The changed hole can contain pending extent.
1574 * Loop again to check that.
1590 * Find free space in the specified device.
1592 * @device: the device which we search the free space in
1593 * @num_bytes: the size of the free space that we need
1594 * @search_start: the position from which to begin the search
1595 * @start: store the start of the free space.
1596 * @len: the size of the free space. that we find, or the size
1597 * of the max free space if we don't find suitable free space
1599 * This does a pretty simple search, the expectation is that it is called very
1600 * infrequently and that a given device has a small number of extents.
1602 * @start is used to store the start of the free space if we find. But if we
1603 * don't find suitable free space, it will be used to store the start position
1604 * of the max free space.
1606 * @len is used to store the size of the free space that we find.
1607 * But if we don't find suitable free space, it is used to store the size of
1608 * the max free space.
1610 * NOTE: This function will search *commit* root of device tree, and does extra
1611 * check to ensure dev extents are not double allocated.
1612 * This makes the function safe to allocate dev extents but may not report
1613 * correct usable device space, as device extent freed in current transaction
1614 * is not reported as available.
1616 static int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1617 u64
*start
, u64
*len
)
1619 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1620 struct btrfs_root
*root
= fs_info
->dev_root
;
1621 struct btrfs_key key
;
1622 struct btrfs_dev_extent
*dev_extent
;
1623 struct btrfs_path
*path
;
1627 u64 max_hole_size
= 0;
1629 u64 search_end
= device
->total_bytes
;
1632 struct extent_buffer
*l
;
1634 search_start
= dev_extent_search_start(device
);
1635 max_hole_start
= search_start
;
1637 WARN_ON(device
->zone_info
&&
1638 !IS_ALIGNED(num_bytes
, device
->zone_info
->zone_size
));
1640 path
= btrfs_alloc_path();
1646 if (search_start
>= search_end
||
1647 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1652 path
->reada
= READA_FORWARD
;
1653 path
->search_commit_root
= 1;
1654 path
->skip_locking
= 1;
1656 key
.objectid
= device
->devid
;
1657 key
.offset
= search_start
;
1658 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1660 ret
= btrfs_search_backwards(root
, &key
, path
);
1664 while (search_start
< search_end
) {
1666 slot
= path
->slots
[0];
1667 if (slot
>= btrfs_header_nritems(l
)) {
1668 ret
= btrfs_next_leaf(root
, path
);
1676 btrfs_item_key_to_cpu(l
, &key
, slot
);
1678 if (key
.objectid
< device
->devid
)
1681 if (key
.objectid
> device
->devid
)
1684 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1687 if (key
.offset
> search_end
)
1690 if (key
.offset
> search_start
) {
1691 hole_size
= key
.offset
- search_start
;
1692 dev_extent_hole_check(device
, &search_start
, &hole_size
,
1695 if (hole_size
> max_hole_size
) {
1696 max_hole_start
= search_start
;
1697 max_hole_size
= hole_size
;
1701 * If this free space is greater than which we need,
1702 * it must be the max free space that we have found
1703 * until now, so max_hole_start must point to the start
1704 * of this free space and the length of this free space
1705 * is stored in max_hole_size. Thus, we return
1706 * max_hole_start and max_hole_size and go back to the
1709 if (hole_size
>= num_bytes
) {
1715 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1716 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1718 if (extent_end
> search_start
)
1719 search_start
= extent_end
;
1726 * At this point, search_start should be the end of
1727 * allocated dev extents, and when shrinking the device,
1728 * search_end may be smaller than search_start.
1730 if (search_end
> search_start
) {
1731 hole_size
= search_end
- search_start
;
1732 if (dev_extent_hole_check(device
, &search_start
, &hole_size
,
1734 btrfs_release_path(path
);
1738 if (hole_size
> max_hole_size
) {
1739 max_hole_start
= search_start
;
1740 max_hole_size
= hole_size
;
1745 if (max_hole_size
< num_bytes
)
1750 ASSERT(max_hole_start
+ max_hole_size
<= search_end
);
1752 btrfs_free_path(path
);
1753 *start
= max_hole_start
;
1755 *len
= max_hole_size
;
1759 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1760 struct btrfs_device
*device
,
1761 u64 start
, u64
*dev_extent_len
)
1763 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1764 struct btrfs_root
*root
= fs_info
->dev_root
;
1766 struct btrfs_path
*path
;
1767 struct btrfs_key key
;
1768 struct btrfs_key found_key
;
1769 struct extent_buffer
*leaf
= NULL
;
1770 struct btrfs_dev_extent
*extent
= NULL
;
1772 path
= btrfs_alloc_path();
1776 key
.objectid
= device
->devid
;
1778 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1780 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1782 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1783 BTRFS_DEV_EXTENT_KEY
);
1786 leaf
= path
->nodes
[0];
1787 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1788 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1789 struct btrfs_dev_extent
);
1790 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1791 btrfs_dev_extent_length(leaf
, extent
) < start
);
1793 btrfs_release_path(path
);
1795 } else if (ret
== 0) {
1796 leaf
= path
->nodes
[0];
1797 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1798 struct btrfs_dev_extent
);
1803 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1805 ret
= btrfs_del_item(trans
, root
, path
);
1807 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1809 btrfs_free_path(path
);
1813 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1818 read_lock(&fs_info
->mapping_tree_lock
);
1819 n
= rb_last(&fs_info
->mapping_tree
.rb_root
);
1821 struct btrfs_chunk_map
*map
;
1823 map
= rb_entry(n
, struct btrfs_chunk_map
, rb_node
);
1824 ret
= map
->start
+ map
->chunk_len
;
1826 read_unlock(&fs_info
->mapping_tree_lock
);
1831 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1835 struct btrfs_key key
;
1836 struct btrfs_key found_key
;
1837 struct btrfs_path
*path
;
1839 path
= btrfs_alloc_path();
1843 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1844 key
.type
= BTRFS_DEV_ITEM_KEY
;
1845 key
.offset
= (u64
)-1;
1847 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1853 btrfs_err(fs_info
, "corrupted chunk tree devid -1 matched");
1858 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1859 BTRFS_DEV_ITEMS_OBJECTID
,
1860 BTRFS_DEV_ITEM_KEY
);
1864 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1866 *devid_ret
= found_key
.offset
+ 1;
1870 btrfs_free_path(path
);
1875 * the device information is stored in the chunk root
1876 * the btrfs_device struct should be fully filled in
1878 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
1879 struct btrfs_device
*device
)
1882 struct btrfs_path
*path
;
1883 struct btrfs_dev_item
*dev_item
;
1884 struct extent_buffer
*leaf
;
1885 struct btrfs_key key
;
1888 path
= btrfs_alloc_path();
1892 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1893 key
.type
= BTRFS_DEV_ITEM_KEY
;
1894 key
.offset
= device
->devid
;
1896 btrfs_reserve_chunk_metadata(trans
, true);
1897 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
1898 &key
, sizeof(*dev_item
));
1899 btrfs_trans_release_chunk_metadata(trans
);
1903 leaf
= path
->nodes
[0];
1904 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1906 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1907 btrfs_set_device_generation(leaf
, dev_item
, 0);
1908 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1909 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1910 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1911 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1912 btrfs_set_device_total_bytes(leaf
, dev_item
,
1913 btrfs_device_get_disk_total_bytes(device
));
1914 btrfs_set_device_bytes_used(leaf
, dev_item
,
1915 btrfs_device_get_bytes_used(device
));
1916 btrfs_set_device_group(leaf
, dev_item
, 0);
1917 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1918 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1919 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1921 ptr
= btrfs_device_uuid(dev_item
);
1922 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1923 ptr
= btrfs_device_fsid(dev_item
);
1924 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
1925 ptr
, BTRFS_FSID_SIZE
);
1926 btrfs_mark_buffer_dirty(trans
, leaf
);
1930 btrfs_free_path(path
);
1935 * Function to update ctime/mtime for a given device path.
1936 * Mainly used for ctime/mtime based probe like libblkid.
1938 * We don't care about errors here, this is just to be kind to userspace.
1940 static void update_dev_time(const char *device_path
)
1945 ret
= kern_path(device_path
, LOOKUP_FOLLOW
, &path
);
1949 inode_update_time(d_inode(path
.dentry
), S_MTIME
| S_CTIME
| S_VERSION
);
1953 static int btrfs_rm_dev_item(struct btrfs_trans_handle
*trans
,
1954 struct btrfs_device
*device
)
1956 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
1958 struct btrfs_path
*path
;
1959 struct btrfs_key key
;
1961 path
= btrfs_alloc_path();
1965 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1966 key
.type
= BTRFS_DEV_ITEM_KEY
;
1967 key
.offset
= device
->devid
;
1969 btrfs_reserve_chunk_metadata(trans
, false);
1970 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1971 btrfs_trans_release_chunk_metadata(trans
);
1978 ret
= btrfs_del_item(trans
, root
, path
);
1980 btrfs_free_path(path
);
1985 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1986 * filesystem. It's up to the caller to adjust that number regarding eg. device
1989 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
1997 seq
= read_seqbegin(&fs_info
->profiles_lock
);
1999 all_avail
= fs_info
->avail_data_alloc_bits
|
2000 fs_info
->avail_system_alloc_bits
|
2001 fs_info
->avail_metadata_alloc_bits
;
2002 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
2004 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
2005 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
2008 if (num_devices
< btrfs_raid_array
[i
].devs_min
)
2009 return btrfs_raid_array
[i
].mindev_error
;
2015 static struct btrfs_device
* btrfs_find_next_active_device(
2016 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
2018 struct btrfs_device
*next_device
;
2020 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
2021 if (next_device
!= device
&&
2022 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
2023 && next_device
->bdev
)
2031 * Helper function to check if the given device is part of s_bdev / latest_dev
2032 * and replace it with the provided or the next active device, in the context
2033 * where this function called, there should be always be another device (or
2034 * this_dev) which is active.
2036 void __cold
btrfs_assign_next_active_device(struct btrfs_device
*device
,
2037 struct btrfs_device
*next_device
)
2039 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2042 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
2044 ASSERT(next_device
);
2046 if (fs_info
->sb
->s_bdev
&&
2047 (fs_info
->sb
->s_bdev
== device
->bdev
))
2048 fs_info
->sb
->s_bdev
= next_device
->bdev
;
2050 if (fs_info
->fs_devices
->latest_dev
->bdev
== device
->bdev
)
2051 fs_info
->fs_devices
->latest_dev
= next_device
;
2055 * Return btrfs_fs_devices::num_devices excluding the device that's being
2056 * currently replaced.
2058 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
2060 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
2062 down_read(&fs_info
->dev_replace
.rwsem
);
2063 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
2064 ASSERT(num_devices
> 1);
2067 up_read(&fs_info
->dev_replace
.rwsem
);
2072 static void btrfs_scratch_superblock(struct btrfs_fs_info
*fs_info
,
2073 struct block_device
*bdev
, int copy_num
)
2075 struct btrfs_super_block
*disk_super
;
2076 const size_t len
= sizeof(disk_super
->magic
);
2077 const u64 bytenr
= btrfs_sb_offset(copy_num
);
2080 disk_super
= btrfs_read_disk_super(bdev
, bytenr
, bytenr
);
2081 if (IS_ERR(disk_super
))
2084 memset(&disk_super
->magic
, 0, len
);
2085 folio_mark_dirty(virt_to_folio(disk_super
));
2086 btrfs_release_disk_super(disk_super
);
2088 ret
= sync_blockdev_range(bdev
, bytenr
, bytenr
+ len
- 1);
2090 btrfs_warn(fs_info
, "error clearing superblock number %d (%d)",
2094 void btrfs_scratch_superblocks(struct btrfs_fs_info
*fs_info
, struct btrfs_device
*device
)
2097 struct block_device
*bdev
= device
->bdev
;
2102 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
; copy_num
++) {
2103 if (bdev_is_zoned(bdev
))
2104 btrfs_reset_sb_log_zones(bdev
, copy_num
);
2106 btrfs_scratch_superblock(fs_info
, bdev
, copy_num
);
2109 /* Notify udev that device has changed */
2110 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
2112 /* Update ctime/mtime for device path for libblkid */
2113 update_dev_time(device
->name
->str
);
2116 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
,
2117 struct btrfs_dev_lookup_args
*args
,
2118 struct file
**bdev_file
)
2120 struct btrfs_trans_handle
*trans
;
2121 struct btrfs_device
*device
;
2122 struct btrfs_fs_devices
*cur_devices
;
2123 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2127 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
2128 btrfs_err(fs_info
, "device remove not supported on extent tree v2 yet");
2133 * The device list in fs_devices is accessed without locks (neither
2134 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2135 * filesystem and another device rm cannot run.
2137 num_devices
= btrfs_num_devices(fs_info
);
2139 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
2143 device
= btrfs_find_device(fs_info
->fs_devices
, args
);
2146 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
2152 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
2153 btrfs_warn_in_rcu(fs_info
,
2154 "cannot remove device %s (devid %llu) due to active swapfile",
2155 btrfs_dev_name(device
), device
->devid
);
2159 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
2160 return BTRFS_ERROR_DEV_TGT_REPLACE
;
2162 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
2163 fs_info
->fs_devices
->rw_devices
== 1)
2164 return BTRFS_ERROR_DEV_ONLY_WRITABLE
;
2166 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2167 mutex_lock(&fs_info
->chunk_mutex
);
2168 list_del_init(&device
->dev_alloc_list
);
2169 device
->fs_devices
->rw_devices
--;
2170 mutex_unlock(&fs_info
->chunk_mutex
);
2173 ret
= btrfs_shrink_device(device
, 0);
2177 trans
= btrfs_start_transaction(fs_info
->chunk_root
, 0);
2178 if (IS_ERR(trans
)) {
2179 ret
= PTR_ERR(trans
);
2183 ret
= btrfs_rm_dev_item(trans
, device
);
2185 /* Any error in dev item removal is critical */
2187 "failed to remove device item for devid %llu: %d",
2188 device
->devid
, ret
);
2189 btrfs_abort_transaction(trans
, ret
);
2190 btrfs_end_transaction(trans
);
2194 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2195 btrfs_scrub_cancel_dev(device
);
2198 * the device list mutex makes sure that we don't change
2199 * the device list while someone else is writing out all
2200 * the device supers. Whoever is writing all supers, should
2201 * lock the device list mutex before getting the number of
2202 * devices in the super block (super_copy). Conversely,
2203 * whoever updates the number of devices in the super block
2204 * (super_copy) should hold the device list mutex.
2208 * In normal cases the cur_devices == fs_devices. But in case
2209 * of deleting a seed device, the cur_devices should point to
2210 * its own fs_devices listed under the fs_devices->seed_list.
2212 cur_devices
= device
->fs_devices
;
2213 mutex_lock(&fs_devices
->device_list_mutex
);
2214 list_del_rcu(&device
->dev_list
);
2216 cur_devices
->num_devices
--;
2217 cur_devices
->total_devices
--;
2218 /* Update total_devices of the parent fs_devices if it's seed */
2219 if (cur_devices
!= fs_devices
)
2220 fs_devices
->total_devices
--;
2222 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2223 cur_devices
->missing_devices
--;
2225 btrfs_assign_next_active_device(device
, NULL
);
2227 if (device
->bdev_file
) {
2228 cur_devices
->open_devices
--;
2229 /* remove sysfs entry */
2230 btrfs_sysfs_remove_device(device
);
2233 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2234 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2235 mutex_unlock(&fs_devices
->device_list_mutex
);
2238 * At this point, the device is zero sized and detached from the
2239 * devices list. All that's left is to zero out the old supers and
2242 * We cannot call btrfs_close_bdev() here because we're holding the sb
2243 * write lock, and fput() on the block device will pull in the
2244 * ->open_mutex on the block device and it's dependencies. Instead
2245 * just flush the device and let the caller do the final bdev_release.
2247 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2248 btrfs_scratch_superblocks(fs_info
, device
);
2250 sync_blockdev(device
->bdev
);
2251 invalidate_bdev(device
->bdev
);
2255 *bdev_file
= device
->bdev_file
;
2257 btrfs_free_device(device
);
2260 * This can happen if cur_devices is the private seed devices list. We
2261 * cannot call close_fs_devices() here because it expects the uuid_mutex
2262 * to be held, but in fact we don't need that for the private
2263 * seed_devices, we can simply decrement cur_devices->opened and then
2264 * remove it from our list and free the fs_devices.
2266 if (cur_devices
->num_devices
== 0) {
2267 list_del_init(&cur_devices
->seed_list
);
2268 ASSERT(cur_devices
->opened
== 1);
2269 cur_devices
->opened
--;
2270 free_fs_devices(cur_devices
);
2273 ret
= btrfs_commit_transaction(trans
);
2278 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2279 mutex_lock(&fs_info
->chunk_mutex
);
2280 list_add(&device
->dev_alloc_list
,
2281 &fs_devices
->alloc_list
);
2282 device
->fs_devices
->rw_devices
++;
2283 mutex_unlock(&fs_info
->chunk_mutex
);
2288 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2290 struct btrfs_fs_devices
*fs_devices
;
2292 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2295 * in case of fs with no seed, srcdev->fs_devices will point
2296 * to fs_devices of fs_info. However when the dev being replaced is
2297 * a seed dev it will point to the seed's local fs_devices. In short
2298 * srcdev will have its correct fs_devices in both the cases.
2300 fs_devices
= srcdev
->fs_devices
;
2302 list_del_rcu(&srcdev
->dev_list
);
2303 list_del(&srcdev
->dev_alloc_list
);
2304 fs_devices
->num_devices
--;
2305 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2306 fs_devices
->missing_devices
--;
2308 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2309 fs_devices
->rw_devices
--;
2312 fs_devices
->open_devices
--;
2315 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2317 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2319 mutex_lock(&uuid_mutex
);
2321 btrfs_close_bdev(srcdev
);
2323 btrfs_free_device(srcdev
);
2325 /* if this is no devs we rather delete the fs_devices */
2326 if (!fs_devices
->num_devices
) {
2328 * On a mounted FS, num_devices can't be zero unless it's a
2329 * seed. In case of a seed device being replaced, the replace
2330 * target added to the sprout FS, so there will be no more
2331 * device left under the seed FS.
2333 ASSERT(fs_devices
->seeding
);
2335 list_del_init(&fs_devices
->seed_list
);
2336 close_fs_devices(fs_devices
);
2337 free_fs_devices(fs_devices
);
2339 mutex_unlock(&uuid_mutex
);
2342 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2344 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2346 mutex_lock(&fs_devices
->device_list_mutex
);
2348 btrfs_sysfs_remove_device(tgtdev
);
2351 fs_devices
->open_devices
--;
2353 fs_devices
->num_devices
--;
2355 btrfs_assign_next_active_device(tgtdev
, NULL
);
2357 list_del_rcu(&tgtdev
->dev_list
);
2359 mutex_unlock(&fs_devices
->device_list_mutex
);
2361 btrfs_scratch_superblocks(tgtdev
->fs_info
, tgtdev
);
2363 btrfs_close_bdev(tgtdev
);
2365 btrfs_free_device(tgtdev
);
2369 * Populate args from device at path.
2371 * @fs_info: the filesystem
2372 * @args: the args to populate
2373 * @path: the path to the device
2375 * This will read the super block of the device at @path and populate @args with
2376 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to
2377 * lookup a device to operate on, but need to do it before we take any locks.
2378 * This properly handles the special case of "missing" that a user may pass in,
2379 * and does some basic sanity checks. The caller must make sure that @path is
2380 * properly NUL terminated before calling in, and must call
2381 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2384 * Return: 0 for success, -errno for failure
2386 int btrfs_get_dev_args_from_path(struct btrfs_fs_info
*fs_info
,
2387 struct btrfs_dev_lookup_args
*args
,
2390 struct btrfs_super_block
*disk_super
;
2391 struct file
*bdev_file
;
2394 if (!path
|| !path
[0])
2396 if (!strcmp(path
, "missing")) {
2397 args
->missing
= true;
2401 args
->uuid
= kzalloc(BTRFS_UUID_SIZE
, GFP_KERNEL
);
2402 args
->fsid
= kzalloc(BTRFS_FSID_SIZE
, GFP_KERNEL
);
2403 if (!args
->uuid
|| !args
->fsid
) {
2404 btrfs_put_dev_args_from_path(args
);
2408 ret
= btrfs_get_bdev_and_sb(path
, BLK_OPEN_READ
, NULL
, 0,
2409 &bdev_file
, &disk_super
);
2411 btrfs_put_dev_args_from_path(args
);
2415 args
->devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2416 memcpy(args
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
);
2417 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2418 memcpy(args
->fsid
, disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
2420 memcpy(args
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
2421 btrfs_release_disk_super(disk_super
);
2427 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2428 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2429 * that don't need to be freed.
2431 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args
*args
)
2439 struct btrfs_device
*btrfs_find_device_by_devspec(
2440 struct btrfs_fs_info
*fs_info
, u64 devid
,
2441 const char *device_path
)
2443 BTRFS_DEV_LOOKUP_ARGS(args
);
2444 struct btrfs_device
*device
;
2449 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2451 return ERR_PTR(-ENOENT
);
2455 ret
= btrfs_get_dev_args_from_path(fs_info
, &args
, device_path
);
2457 return ERR_PTR(ret
);
2458 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2459 btrfs_put_dev_args_from_path(&args
);
2461 return ERR_PTR(-ENOENT
);
2465 static struct btrfs_fs_devices
*btrfs_init_sprout(struct btrfs_fs_info
*fs_info
)
2467 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2468 struct btrfs_fs_devices
*old_devices
;
2469 struct btrfs_fs_devices
*seed_devices
;
2471 lockdep_assert_held(&uuid_mutex
);
2472 if (!fs_devices
->seeding
)
2473 return ERR_PTR(-EINVAL
);
2476 * Private copy of the seed devices, anchored at
2477 * fs_info->fs_devices->seed_list
2479 seed_devices
= alloc_fs_devices(NULL
);
2480 if (IS_ERR(seed_devices
))
2481 return seed_devices
;
2484 * It's necessary to retain a copy of the original seed fs_devices in
2485 * fs_uuids so that filesystems which have been seeded can successfully
2486 * reference the seed device from open_seed_devices. This also supports
2489 old_devices
= clone_fs_devices(fs_devices
);
2490 if (IS_ERR(old_devices
)) {
2491 kfree(seed_devices
);
2495 list_add(&old_devices
->fs_list
, &fs_uuids
);
2497 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2498 seed_devices
->opened
= 1;
2499 INIT_LIST_HEAD(&seed_devices
->devices
);
2500 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2501 mutex_init(&seed_devices
->device_list_mutex
);
2503 return seed_devices
;
2507 * Splice seed devices into the sprout fs_devices.
2508 * Generate a new fsid for the sprouted read-write filesystem.
2510 static void btrfs_setup_sprout(struct btrfs_fs_info
*fs_info
,
2511 struct btrfs_fs_devices
*seed_devices
)
2513 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2514 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2515 struct btrfs_device
*device
;
2519 * We are updating the fsid, the thread leading to device_list_add()
2520 * could race, so uuid_mutex is needed.
2522 lockdep_assert_held(&uuid_mutex
);
2525 * The threads listed below may traverse dev_list but can do that without
2526 * device_list_mutex:
2527 * - All device ops and balance - as we are in btrfs_exclop_start.
2528 * - Various dev_list readers - are using RCU.
2529 * - btrfs_ioctl_fitrim() - is using RCU.
2531 * For-read threads as below are using device_list_mutex:
2532 * - Readonly scrub btrfs_scrub_dev()
2533 * - Readonly scrub btrfs_scrub_progress()
2534 * - btrfs_get_dev_stats()
2536 lockdep_assert_held(&fs_devices
->device_list_mutex
);
2538 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2540 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2541 device
->fs_devices
= seed_devices
;
2543 fs_devices
->seeding
= false;
2544 fs_devices
->num_devices
= 0;
2545 fs_devices
->open_devices
= 0;
2546 fs_devices
->missing_devices
= 0;
2547 fs_devices
->rotating
= false;
2548 list_add(&seed_devices
->seed_list
, &fs_devices
->seed_list
);
2550 generate_random_uuid(fs_devices
->fsid
);
2551 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2552 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2554 super_flags
= btrfs_super_flags(disk_super
) &
2555 ~BTRFS_SUPER_FLAG_SEEDING
;
2556 btrfs_set_super_flags(disk_super
, super_flags
);
2560 * Store the expected generation for seed devices in device items.
2562 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2564 BTRFS_DEV_LOOKUP_ARGS(args
);
2565 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2566 struct btrfs_root
*root
= fs_info
->chunk_root
;
2567 struct btrfs_path
*path
;
2568 struct extent_buffer
*leaf
;
2569 struct btrfs_dev_item
*dev_item
;
2570 struct btrfs_device
*device
;
2571 struct btrfs_key key
;
2572 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2573 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2576 path
= btrfs_alloc_path();
2580 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2582 key
.type
= BTRFS_DEV_ITEM_KEY
;
2585 btrfs_reserve_chunk_metadata(trans
, false);
2586 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2587 btrfs_trans_release_chunk_metadata(trans
);
2591 leaf
= path
->nodes
[0];
2593 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2594 ret
= btrfs_next_leaf(root
, path
);
2599 leaf
= path
->nodes
[0];
2600 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2601 btrfs_release_path(path
);
2605 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2606 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2607 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2610 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2611 struct btrfs_dev_item
);
2612 args
.devid
= btrfs_device_id(leaf
, dev_item
);
2613 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2615 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2617 args
.uuid
= dev_uuid
;
2618 args
.fsid
= fs_uuid
;
2619 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
2620 BUG_ON(!device
); /* Logic error */
2622 if (device
->fs_devices
->seeding
) {
2623 btrfs_set_device_generation(leaf
, dev_item
,
2624 device
->generation
);
2625 btrfs_mark_buffer_dirty(trans
, leaf
);
2633 btrfs_free_path(path
);
2637 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2639 struct btrfs_root
*root
= fs_info
->dev_root
;
2640 struct btrfs_trans_handle
*trans
;
2641 struct btrfs_device
*device
;
2642 struct file
*bdev_file
;
2643 struct super_block
*sb
= fs_info
->sb
;
2644 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2645 struct btrfs_fs_devices
*seed_devices
= NULL
;
2646 u64 orig_super_total_bytes
;
2647 u64 orig_super_num_devices
;
2649 bool seeding_dev
= false;
2650 bool locked
= false;
2652 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2655 bdev_file
= bdev_file_open_by_path(device_path
, BLK_OPEN_WRITE
,
2656 fs_info
->bdev_holder
, NULL
);
2657 if (IS_ERR(bdev_file
))
2658 return PTR_ERR(bdev_file
);
2660 if (!btrfs_check_device_zone_type(fs_info
, file_bdev(bdev_file
))) {
2665 if (fs_devices
->seeding
) {
2667 down_write(&sb
->s_umount
);
2668 mutex_lock(&uuid_mutex
);
2672 sync_blockdev(file_bdev(bdev_file
));
2675 list_for_each_entry_rcu(device
, &fs_devices
->devices
, dev_list
) {
2676 if (device
->bdev
== file_bdev(bdev_file
)) {
2684 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
, device_path
);
2685 if (IS_ERR(device
)) {
2686 /* we can safely leave the fs_devices entry around */
2687 ret
= PTR_ERR(device
);
2691 device
->fs_info
= fs_info
;
2692 device
->bdev_file
= bdev_file
;
2693 device
->bdev
= file_bdev(bdev_file
);
2694 ret
= lookup_bdev(device_path
, &device
->devt
);
2696 goto error_free_device
;
2698 ret
= btrfs_get_dev_zone_info(device
, false);
2700 goto error_free_device
;
2702 trans
= btrfs_start_transaction(root
, 0);
2703 if (IS_ERR(trans
)) {
2704 ret
= PTR_ERR(trans
);
2705 goto error_free_zone
;
2708 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2709 device
->generation
= trans
->transid
;
2710 device
->io_width
= fs_info
->sectorsize
;
2711 device
->io_align
= fs_info
->sectorsize
;
2712 device
->sector_size
= fs_info
->sectorsize
;
2713 device
->total_bytes
=
2714 round_down(bdev_nr_bytes(device
->bdev
), fs_info
->sectorsize
);
2715 device
->disk_total_bytes
= device
->total_bytes
;
2716 device
->commit_total_bytes
= device
->total_bytes
;
2717 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2718 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2719 device
->dev_stats_valid
= 1;
2720 set_blocksize(device
->bdev_file
, BTRFS_BDEV_BLOCKSIZE
);
2723 btrfs_clear_sb_rdonly(sb
);
2725 /* GFP_KERNEL allocation must not be under device_list_mutex */
2726 seed_devices
= btrfs_init_sprout(fs_info
);
2727 if (IS_ERR(seed_devices
)) {
2728 ret
= PTR_ERR(seed_devices
);
2729 btrfs_abort_transaction(trans
, ret
);
2734 mutex_lock(&fs_devices
->device_list_mutex
);
2736 btrfs_setup_sprout(fs_info
, seed_devices
);
2737 btrfs_assign_next_active_device(fs_info
->fs_devices
->latest_dev
,
2741 device
->fs_devices
= fs_devices
;
2743 mutex_lock(&fs_info
->chunk_mutex
);
2744 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2745 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2746 fs_devices
->num_devices
++;
2747 fs_devices
->open_devices
++;
2748 fs_devices
->rw_devices
++;
2749 fs_devices
->total_devices
++;
2750 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2752 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2754 if (!bdev_nonrot(device
->bdev
))
2755 fs_devices
->rotating
= true;
2757 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2758 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2759 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2760 fs_info
->sectorsize
));
2762 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2763 btrfs_set_super_num_devices(fs_info
->super_copy
,
2764 orig_super_num_devices
+ 1);
2767 * we've got more storage, clear any full flags on the space
2770 btrfs_clear_space_info_full(fs_info
);
2772 mutex_unlock(&fs_info
->chunk_mutex
);
2774 /* Add sysfs device entry */
2775 btrfs_sysfs_add_device(device
);
2777 mutex_unlock(&fs_devices
->device_list_mutex
);
2780 mutex_lock(&fs_info
->chunk_mutex
);
2781 ret
= init_first_rw_device(trans
);
2782 mutex_unlock(&fs_info
->chunk_mutex
);
2784 btrfs_abort_transaction(trans
, ret
);
2789 ret
= btrfs_add_dev_item(trans
, device
);
2791 btrfs_abort_transaction(trans
, ret
);
2796 ret
= btrfs_finish_sprout(trans
);
2798 btrfs_abort_transaction(trans
, ret
);
2803 * fs_devices now represents the newly sprouted filesystem and
2804 * its fsid has been changed by btrfs_sprout_splice().
2806 btrfs_sysfs_update_sprout_fsid(fs_devices
);
2809 ret
= btrfs_commit_transaction(trans
);
2812 mutex_unlock(&uuid_mutex
);
2813 up_write(&sb
->s_umount
);
2816 if (ret
) /* transaction commit */
2819 ret
= btrfs_relocate_sys_chunks(fs_info
);
2821 btrfs_handle_fs_error(fs_info
, ret
,
2822 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2823 trans
= btrfs_attach_transaction(root
);
2824 if (IS_ERR(trans
)) {
2825 if (PTR_ERR(trans
) == -ENOENT
)
2827 ret
= PTR_ERR(trans
);
2831 ret
= btrfs_commit_transaction(trans
);
2835 * Now that we have written a new super block to this device, check all
2836 * other fs_devices list if device_path alienates any other scanned
2838 * We can ignore the return value as it typically returns -EINVAL and
2839 * only succeeds if the device was an alien.
2841 btrfs_forget_devices(device
->devt
);
2843 /* Update ctime/mtime for blkid or udev */
2844 update_dev_time(device_path
);
2849 btrfs_sysfs_remove_device(device
);
2850 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2851 mutex_lock(&fs_info
->chunk_mutex
);
2852 list_del_rcu(&device
->dev_list
);
2853 list_del(&device
->dev_alloc_list
);
2854 fs_info
->fs_devices
->num_devices
--;
2855 fs_info
->fs_devices
->open_devices
--;
2856 fs_info
->fs_devices
->rw_devices
--;
2857 fs_info
->fs_devices
->total_devices
--;
2858 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2859 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2860 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2861 orig_super_total_bytes
);
2862 btrfs_set_super_num_devices(fs_info
->super_copy
,
2863 orig_super_num_devices
);
2864 mutex_unlock(&fs_info
->chunk_mutex
);
2865 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2868 btrfs_set_sb_rdonly(sb
);
2870 btrfs_end_transaction(trans
);
2872 btrfs_destroy_dev_zone_info(device
);
2874 btrfs_free_device(device
);
2878 mutex_unlock(&uuid_mutex
);
2879 up_write(&sb
->s_umount
);
2884 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
2885 struct btrfs_device
*device
)
2888 struct btrfs_path
*path
;
2889 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2890 struct btrfs_dev_item
*dev_item
;
2891 struct extent_buffer
*leaf
;
2892 struct btrfs_key key
;
2894 path
= btrfs_alloc_path();
2898 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2899 key
.type
= BTRFS_DEV_ITEM_KEY
;
2900 key
.offset
= device
->devid
;
2902 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2911 leaf
= path
->nodes
[0];
2912 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2914 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2915 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2916 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2917 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2918 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2919 btrfs_set_device_total_bytes(leaf
, dev_item
,
2920 btrfs_device_get_disk_total_bytes(device
));
2921 btrfs_set_device_bytes_used(leaf
, dev_item
,
2922 btrfs_device_get_bytes_used(device
));
2923 btrfs_mark_buffer_dirty(trans
, leaf
);
2926 btrfs_free_path(path
);
2930 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
2931 struct btrfs_device
*device
, u64 new_size
)
2933 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2934 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2939 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2942 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2944 mutex_lock(&fs_info
->chunk_mutex
);
2945 old_total
= btrfs_super_total_bytes(super_copy
);
2946 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
2948 if (new_size
<= device
->total_bytes
||
2949 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2950 mutex_unlock(&fs_info
->chunk_mutex
);
2954 btrfs_set_super_total_bytes(super_copy
,
2955 round_down(old_total
+ diff
, fs_info
->sectorsize
));
2956 device
->fs_devices
->total_rw_bytes
+= diff
;
2957 atomic64_add(diff
, &fs_info
->free_chunk_space
);
2959 btrfs_device_set_total_bytes(device
, new_size
);
2960 btrfs_device_set_disk_total_bytes(device
, new_size
);
2961 btrfs_clear_space_info_full(device
->fs_info
);
2962 if (list_empty(&device
->post_commit_list
))
2963 list_add_tail(&device
->post_commit_list
,
2964 &trans
->transaction
->dev_update_list
);
2965 mutex_unlock(&fs_info
->chunk_mutex
);
2967 btrfs_reserve_chunk_metadata(trans
, false);
2968 ret
= btrfs_update_device(trans
, device
);
2969 btrfs_trans_release_chunk_metadata(trans
);
2974 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2976 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2977 struct btrfs_root
*root
= fs_info
->chunk_root
;
2979 struct btrfs_path
*path
;
2980 struct btrfs_key key
;
2982 path
= btrfs_alloc_path();
2986 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2987 key
.offset
= chunk_offset
;
2988 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2990 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2993 else if (ret
> 0) { /* Logic error or corruption */
2994 btrfs_err(fs_info
, "failed to lookup chunk %llu when freeing",
2996 btrfs_abort_transaction(trans
, -ENOENT
);
3001 ret
= btrfs_del_item(trans
, root
, path
);
3003 btrfs_err(fs_info
, "failed to delete chunk %llu item", chunk_offset
);
3004 btrfs_abort_transaction(trans
, ret
);
3008 btrfs_free_path(path
);
3012 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3014 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
3015 struct btrfs_disk_key
*disk_key
;
3016 struct btrfs_chunk
*chunk
;
3023 struct btrfs_key key
;
3025 lockdep_assert_held(&fs_info
->chunk_mutex
);
3026 array_size
= btrfs_super_sys_array_size(super_copy
);
3028 ptr
= super_copy
->sys_chunk_array
;
3031 while (cur
< array_size
) {
3032 disk_key
= (struct btrfs_disk_key
*)ptr
;
3033 btrfs_disk_key_to_cpu(&key
, disk_key
);
3035 len
= sizeof(*disk_key
);
3037 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3038 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
3039 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
3040 len
+= btrfs_chunk_item_size(num_stripes
);
3045 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
3046 key
.offset
== chunk_offset
) {
3047 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
3049 btrfs_set_super_sys_array_size(super_copy
, array_size
);
3058 struct btrfs_chunk_map
*btrfs_find_chunk_map_nolock(struct btrfs_fs_info
*fs_info
,
3059 u64 logical
, u64 length
)
3061 struct rb_node
*node
= fs_info
->mapping_tree
.rb_root
.rb_node
;
3062 struct rb_node
*prev
= NULL
;
3063 struct rb_node
*orig_prev
;
3064 struct btrfs_chunk_map
*map
;
3065 struct btrfs_chunk_map
*prev_map
= NULL
;
3068 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
3072 if (logical
< map
->start
) {
3073 node
= node
->rb_left
;
3074 } else if (logical
>= map
->start
+ map
->chunk_len
) {
3075 node
= node
->rb_right
;
3077 refcount_inc(&map
->refs
);
3086 while (prev
&& logical
>= prev_map
->start
+ prev_map
->chunk_len
) {
3087 prev
= rb_next(prev
);
3088 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3093 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3094 while (prev
&& logical
< prev_map
->start
) {
3095 prev
= rb_prev(prev
);
3096 prev_map
= rb_entry(prev
, struct btrfs_chunk_map
, rb_node
);
3101 u64 end
= logical
+ length
;
3104 * Caller can pass a U64_MAX length when it wants to get any
3105 * chunk starting at an offset of 'logical' or higher, so deal
3106 * with underflow by resetting the end offset to U64_MAX.
3111 if (end
> prev_map
->start
&&
3112 logical
< prev_map
->start
+ prev_map
->chunk_len
) {
3113 refcount_inc(&prev_map
->refs
);
3121 struct btrfs_chunk_map
*btrfs_find_chunk_map(struct btrfs_fs_info
*fs_info
,
3122 u64 logical
, u64 length
)
3124 struct btrfs_chunk_map
*map
;
3126 read_lock(&fs_info
->mapping_tree_lock
);
3127 map
= btrfs_find_chunk_map_nolock(fs_info
, logical
, length
);
3128 read_unlock(&fs_info
->mapping_tree_lock
);
3134 * Find the mapping containing the given logical extent.
3136 * @logical: Logical block offset in bytes.
3137 * @length: Length of extent in bytes.
3139 * Return: Chunk mapping or ERR_PTR.
3141 struct btrfs_chunk_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
3142 u64 logical
, u64 length
)
3144 struct btrfs_chunk_map
*map
;
3146 map
= btrfs_find_chunk_map(fs_info
, logical
, length
);
3148 if (unlikely(!map
)) {
3150 "unable to find chunk map for logical %llu length %llu",
3152 return ERR_PTR(-EINVAL
);
3155 if (unlikely(map
->start
> logical
|| map
->start
+ map
->chunk_len
<= logical
)) {
3157 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3158 logical
, logical
+ length
, map
->start
,
3159 map
->start
+ map
->chunk_len
);
3160 btrfs_free_chunk_map(map
);
3161 return ERR_PTR(-EINVAL
);
3164 /* Callers are responsible for dropping the reference. */
3168 static int remove_chunk_item(struct btrfs_trans_handle
*trans
,
3169 struct btrfs_chunk_map
*map
, u64 chunk_offset
)
3174 * Removing chunk items and updating the device items in the chunks btree
3175 * requires holding the chunk_mutex.
3176 * See the comment at btrfs_chunk_alloc() for the details.
3178 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
3180 for (i
= 0; i
< map
->num_stripes
; i
++) {
3183 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
3188 return btrfs_free_chunk(trans
, chunk_offset
);
3191 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
3193 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3194 struct btrfs_chunk_map
*map
;
3195 u64 dev_extent_len
= 0;
3197 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
3199 map
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
3202 * This is a logic error, but we don't want to just rely on the
3203 * user having built with ASSERT enabled, so if ASSERT doesn't
3204 * do anything we still error out.
3207 return PTR_ERR(map
);
3211 * First delete the device extent items from the devices btree.
3212 * We take the device_list_mutex to avoid racing with the finishing phase
3213 * of a device replace operation. See the comment below before acquiring
3214 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3215 * because that can result in a deadlock when deleting the device extent
3216 * items from the devices btree - COWing an extent buffer from the btree
3217 * may result in allocating a new metadata chunk, which would attempt to
3218 * lock again fs_info->chunk_mutex.
3220 mutex_lock(&fs_devices
->device_list_mutex
);
3221 for (i
= 0; i
< map
->num_stripes
; i
++) {
3222 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
3223 ret
= btrfs_free_dev_extent(trans
, device
,
3224 map
->stripes
[i
].physical
,
3227 mutex_unlock(&fs_devices
->device_list_mutex
);
3228 btrfs_abort_transaction(trans
, ret
);
3232 if (device
->bytes_used
> 0) {
3233 mutex_lock(&fs_info
->chunk_mutex
);
3234 btrfs_device_set_bytes_used(device
,
3235 device
->bytes_used
- dev_extent_len
);
3236 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
3237 btrfs_clear_space_info_full(fs_info
);
3238 mutex_unlock(&fs_info
->chunk_mutex
);
3241 mutex_unlock(&fs_devices
->device_list_mutex
);
3244 * We acquire fs_info->chunk_mutex for 2 reasons:
3246 * 1) Just like with the first phase of the chunk allocation, we must
3247 * reserve system space, do all chunk btree updates and deletions, and
3248 * update the system chunk array in the superblock while holding this
3249 * mutex. This is for similar reasons as explained on the comment at
3250 * the top of btrfs_chunk_alloc();
3252 * 2) Prevent races with the final phase of a device replace operation
3253 * that replaces the device object associated with the map's stripes,
3254 * because the device object's id can change at any time during that
3255 * final phase of the device replace operation
3256 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3257 * replaced device and then see it with an ID of
3258 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3259 * the device item, which does not exists on the chunk btree.
3260 * The finishing phase of device replace acquires both the
3261 * device_list_mutex and the chunk_mutex, in that order, so we are
3262 * safe by just acquiring the chunk_mutex.
3264 trans
->removing_chunk
= true;
3265 mutex_lock(&fs_info
->chunk_mutex
);
3267 check_system_chunk(trans
, map
->type
);
3269 ret
= remove_chunk_item(trans
, map
, chunk_offset
);
3271 * Normally we should not get -ENOSPC since we reserved space before
3272 * through the call to check_system_chunk().
3274 * Despite our system space_info having enough free space, we may not
3275 * be able to allocate extents from its block groups, because all have
3276 * an incompatible profile, which will force us to allocate a new system
3277 * block group with the right profile, or right after we called
3278 * check_system_space() above, a scrub turned the only system block group
3279 * with enough free space into RO mode.
3280 * This is explained with more detail at do_chunk_alloc().
3282 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3284 if (ret
== -ENOSPC
) {
3285 const u64 sys_flags
= btrfs_system_alloc_profile(fs_info
);
3286 struct btrfs_block_group
*sys_bg
;
3288 sys_bg
= btrfs_create_chunk(trans
, sys_flags
);
3289 if (IS_ERR(sys_bg
)) {
3290 ret
= PTR_ERR(sys_bg
);
3291 btrfs_abort_transaction(trans
, ret
);
3295 ret
= btrfs_chunk_alloc_add_chunk_item(trans
, sys_bg
);
3297 btrfs_abort_transaction(trans
, ret
);
3301 ret
= remove_chunk_item(trans
, map
, chunk_offset
);
3303 btrfs_abort_transaction(trans
, ret
);
3307 btrfs_abort_transaction(trans
, ret
);
3311 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, map
->chunk_len
);
3313 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3314 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
3316 btrfs_abort_transaction(trans
, ret
);
3321 mutex_unlock(&fs_info
->chunk_mutex
);
3322 trans
->removing_chunk
= false;
3325 * We are done with chunk btree updates and deletions, so release the
3326 * system space we previously reserved (with check_system_chunk()).
3328 btrfs_trans_release_chunk_metadata(trans
);
3330 ret
= btrfs_remove_block_group(trans
, map
);
3332 btrfs_abort_transaction(trans
, ret
);
3337 if (trans
->removing_chunk
) {
3338 mutex_unlock(&fs_info
->chunk_mutex
);
3339 trans
->removing_chunk
= false;
3342 btrfs_free_chunk_map(map
);
3346 int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3348 struct btrfs_root
*root
= fs_info
->chunk_root
;
3349 struct btrfs_trans_handle
*trans
;
3350 struct btrfs_block_group
*block_group
;
3354 if (btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
)) {
3356 "relocate: not supported on extent tree v2 yet");
3361 * Prevent races with automatic removal of unused block groups.
3362 * After we relocate and before we remove the chunk with offset
3363 * chunk_offset, automatic removal of the block group can kick in,
3364 * resulting in a failure when calling btrfs_remove_chunk() below.
3366 * Make sure to acquire this mutex before doing a tree search (dev
3367 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3368 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3369 * we release the path used to search the chunk/dev tree and before
3370 * the current task acquires this mutex and calls us.
3372 lockdep_assert_held(&fs_info
->reclaim_bgs_lock
);
3374 /* step one, relocate all the extents inside this chunk */
3375 btrfs_scrub_pause(fs_info
);
3376 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
3377 btrfs_scrub_continue(fs_info
);
3380 * If we had a transaction abort, stop all running scrubs.
3381 * See transaction.c:cleanup_transaction() why we do it here.
3383 if (BTRFS_FS_ERROR(fs_info
))
3384 btrfs_scrub_cancel(fs_info
);
3388 block_group
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3391 btrfs_discard_cancel_work(&fs_info
->discard_ctl
, block_group
);
3392 length
= block_group
->length
;
3393 btrfs_put_block_group(block_group
);
3396 * On a zoned file system, discard the whole block group, this will
3397 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3398 * resetting the zone fails, don't treat it as a fatal problem from the
3399 * filesystem's point of view.
3401 if (btrfs_is_zoned(fs_info
)) {
3402 ret
= btrfs_discard_extent(fs_info
, chunk_offset
, length
, NULL
);
3405 "failed to reset zone %llu after relocation",
3409 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
3411 if (IS_ERR(trans
)) {
3412 ret
= PTR_ERR(trans
);
3413 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
3418 * step two, delete the device extents and the
3419 * chunk tree entries
3421 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
3422 btrfs_end_transaction(trans
);
3426 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
3428 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3429 struct btrfs_path
*path
;
3430 struct extent_buffer
*leaf
;
3431 struct btrfs_chunk
*chunk
;
3432 struct btrfs_key key
;
3433 struct btrfs_key found_key
;
3435 bool retried
= false;
3439 path
= btrfs_alloc_path();
3444 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3445 key
.offset
= (u64
)-1;
3446 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3449 mutex_lock(&fs_info
->reclaim_bgs_lock
);
3450 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3452 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3457 * On the first search we would find chunk tree with
3458 * offset -1, which is not possible. On subsequent
3459 * loops this would find an existing item on an invalid
3460 * offset (one less than the previous one, wrong
3461 * alignment and size).
3464 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3468 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3471 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3477 leaf
= path
->nodes
[0];
3478 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3480 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3481 struct btrfs_chunk
);
3482 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3483 btrfs_release_path(path
);
3485 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3486 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3492 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
3494 if (found_key
.offset
== 0)
3496 key
.offset
= found_key
.offset
- 1;
3499 if (failed
&& !retried
) {
3503 } else if (WARN_ON(failed
&& retried
)) {
3507 btrfs_free_path(path
);
3512 * return 1 : allocate a data chunk successfully,
3513 * return <0: errors during allocating a data chunk,
3514 * return 0 : no need to allocate a data chunk.
3516 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3519 struct btrfs_block_group
*cache
;
3523 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3525 chunk_type
= cache
->flags
;
3526 btrfs_put_block_group(cache
);
3528 if (!(chunk_type
& BTRFS_BLOCK_GROUP_DATA
))
3531 spin_lock(&fs_info
->data_sinfo
->lock
);
3532 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3533 spin_unlock(&fs_info
->data_sinfo
->lock
);
3536 struct btrfs_trans_handle
*trans
;
3539 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3541 return PTR_ERR(trans
);
3543 ret
= btrfs_force_chunk_alloc(trans
, BTRFS_BLOCK_GROUP_DATA
);
3544 btrfs_end_transaction(trans
);
3553 static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args
*cpu
,
3554 const struct btrfs_disk_balance_args
*disk
)
3556 memset(cpu
, 0, sizeof(*cpu
));
3558 cpu
->profiles
= le64_to_cpu(disk
->profiles
);
3559 cpu
->usage
= le64_to_cpu(disk
->usage
);
3560 cpu
->devid
= le64_to_cpu(disk
->devid
);
3561 cpu
->pstart
= le64_to_cpu(disk
->pstart
);
3562 cpu
->pend
= le64_to_cpu(disk
->pend
);
3563 cpu
->vstart
= le64_to_cpu(disk
->vstart
);
3564 cpu
->vend
= le64_to_cpu(disk
->vend
);
3565 cpu
->target
= le64_to_cpu(disk
->target
);
3566 cpu
->flags
= le64_to_cpu(disk
->flags
);
3567 cpu
->limit
= le64_to_cpu(disk
->limit
);
3568 cpu
->stripes_min
= le32_to_cpu(disk
->stripes_min
);
3569 cpu
->stripes_max
= le32_to_cpu(disk
->stripes_max
);
3572 static void btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args
*disk
,
3573 const struct btrfs_balance_args
*cpu
)
3575 memset(disk
, 0, sizeof(*disk
));
3577 disk
->profiles
= cpu_to_le64(cpu
->profiles
);
3578 disk
->usage
= cpu_to_le64(cpu
->usage
);
3579 disk
->devid
= cpu_to_le64(cpu
->devid
);
3580 disk
->pstart
= cpu_to_le64(cpu
->pstart
);
3581 disk
->pend
= cpu_to_le64(cpu
->pend
);
3582 disk
->vstart
= cpu_to_le64(cpu
->vstart
);
3583 disk
->vend
= cpu_to_le64(cpu
->vend
);
3584 disk
->target
= cpu_to_le64(cpu
->target
);
3585 disk
->flags
= cpu_to_le64(cpu
->flags
);
3586 disk
->limit
= cpu_to_le64(cpu
->limit
);
3587 disk
->stripes_min
= cpu_to_le32(cpu
->stripes_min
);
3588 disk
->stripes_max
= cpu_to_le32(cpu
->stripes_max
);
3591 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3592 struct btrfs_balance_control
*bctl
)
3594 struct btrfs_root
*root
= fs_info
->tree_root
;
3595 struct btrfs_trans_handle
*trans
;
3596 struct btrfs_balance_item
*item
;
3597 struct btrfs_disk_balance_args disk_bargs
;
3598 struct btrfs_path
*path
;
3599 struct extent_buffer
*leaf
;
3600 struct btrfs_key key
;
3603 path
= btrfs_alloc_path();
3607 trans
= btrfs_start_transaction(root
, 0);
3608 if (IS_ERR(trans
)) {
3609 btrfs_free_path(path
);
3610 return PTR_ERR(trans
);
3613 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3614 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3617 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3622 leaf
= path
->nodes
[0];
3623 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3625 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3627 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3628 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3629 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3630 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3631 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3632 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3634 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3636 btrfs_mark_buffer_dirty(trans
, leaf
);
3638 btrfs_free_path(path
);
3639 err
= btrfs_commit_transaction(trans
);
3645 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3647 struct btrfs_root
*root
= fs_info
->tree_root
;
3648 struct btrfs_trans_handle
*trans
;
3649 struct btrfs_path
*path
;
3650 struct btrfs_key key
;
3653 path
= btrfs_alloc_path();
3657 trans
= btrfs_start_transaction_fallback_global_rsv(root
, 0);
3658 if (IS_ERR(trans
)) {
3659 btrfs_free_path(path
);
3660 return PTR_ERR(trans
);
3663 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3664 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3667 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3675 ret
= btrfs_del_item(trans
, root
, path
);
3677 btrfs_free_path(path
);
3678 err
= btrfs_commit_transaction(trans
);
3685 * This is a heuristic used to reduce the number of chunks balanced on
3686 * resume after balance was interrupted.
3688 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3691 * Turn on soft mode for chunk types that were being converted.
3693 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3694 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3695 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3696 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3697 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3698 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3701 * Turn on usage filter if is not already used. The idea is
3702 * that chunks that we have already balanced should be
3703 * reasonably full. Don't do it for chunks that are being
3704 * converted - that will keep us from relocating unconverted
3705 * (albeit full) chunks.
3707 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3708 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3709 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3710 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3711 bctl
->data
.usage
= 90;
3713 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3714 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3715 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3716 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3717 bctl
->sys
.usage
= 90;
3719 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3720 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3721 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3722 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3723 bctl
->meta
.usage
= 90;
3728 * Clear the balance status in fs_info and delete the balance item from disk.
3730 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3732 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3735 ASSERT(fs_info
->balance_ctl
);
3737 spin_lock(&fs_info
->balance_lock
);
3738 fs_info
->balance_ctl
= NULL
;
3739 spin_unlock(&fs_info
->balance_lock
);
3742 ret
= del_balance_item(fs_info
);
3744 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3748 * Balance filters. Return 1 if chunk should be filtered out
3749 * (should not be balanced).
3751 static int chunk_profiles_filter(u64 chunk_type
,
3752 struct btrfs_balance_args
*bargs
)
3754 chunk_type
= chunk_to_extended(chunk_type
) &
3755 BTRFS_EXTENDED_PROFILE_MASK
;
3757 if (bargs
->profiles
& chunk_type
)
3763 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3764 struct btrfs_balance_args
*bargs
)
3766 struct btrfs_block_group
*cache
;
3768 u64 user_thresh_min
;
3769 u64 user_thresh_max
;
3772 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3773 chunk_used
= cache
->used
;
3775 if (bargs
->usage_min
== 0)
3776 user_thresh_min
= 0;
3778 user_thresh_min
= mult_perc(cache
->length
, bargs
->usage_min
);
3780 if (bargs
->usage_max
== 0)
3781 user_thresh_max
= 1;
3782 else if (bargs
->usage_max
> 100)
3783 user_thresh_max
= cache
->length
;
3785 user_thresh_max
= mult_perc(cache
->length
, bargs
->usage_max
);
3787 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3790 btrfs_put_block_group(cache
);
3794 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3795 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3797 struct btrfs_block_group
*cache
;
3798 u64 chunk_used
, user_thresh
;
3801 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3802 chunk_used
= cache
->used
;
3804 if (bargs
->usage_min
== 0)
3806 else if (bargs
->usage
> 100)
3807 user_thresh
= cache
->length
;
3809 user_thresh
= mult_perc(cache
->length
, bargs
->usage
);
3811 if (chunk_used
< user_thresh
)
3814 btrfs_put_block_group(cache
);
3818 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3819 struct btrfs_chunk
*chunk
,
3820 struct btrfs_balance_args
*bargs
)
3822 struct btrfs_stripe
*stripe
;
3823 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3826 for (i
= 0; i
< num_stripes
; i
++) {
3827 stripe
= btrfs_stripe_nr(chunk
, i
);
3828 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3835 static u64
calc_data_stripes(u64 type
, int num_stripes
)
3837 const int index
= btrfs_bg_flags_to_raid_index(type
);
3838 const int ncopies
= btrfs_raid_array
[index
].ncopies
;
3839 const int nparity
= btrfs_raid_array
[index
].nparity
;
3841 return (num_stripes
- nparity
) / ncopies
;
3844 /* [pstart, pend) */
3845 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3846 struct btrfs_chunk
*chunk
,
3847 struct btrfs_balance_args
*bargs
)
3849 struct btrfs_stripe
*stripe
;
3850 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3857 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3860 type
= btrfs_chunk_type(leaf
, chunk
);
3861 factor
= calc_data_stripes(type
, num_stripes
);
3863 for (i
= 0; i
< num_stripes
; i
++) {
3864 stripe
= btrfs_stripe_nr(chunk
, i
);
3865 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3868 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3869 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3870 stripe_length
= div_u64(stripe_length
, factor
);
3872 if (stripe_offset
< bargs
->pend
&&
3873 stripe_offset
+ stripe_length
> bargs
->pstart
)
3880 /* [vstart, vend) */
3881 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
3882 struct btrfs_chunk
*chunk
,
3884 struct btrfs_balance_args
*bargs
)
3886 if (chunk_offset
< bargs
->vend
&&
3887 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
3888 /* at least part of the chunk is inside this vrange */
3894 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
3895 struct btrfs_chunk
*chunk
,
3896 struct btrfs_balance_args
*bargs
)
3898 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3900 if (bargs
->stripes_min
<= num_stripes
3901 && num_stripes
<= bargs
->stripes_max
)
3907 static int chunk_soft_convert_filter(u64 chunk_type
,
3908 struct btrfs_balance_args
*bargs
)
3910 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
3913 chunk_type
= chunk_to_extended(chunk_type
) &
3914 BTRFS_EXTENDED_PROFILE_MASK
;
3916 if (bargs
->target
== chunk_type
)
3922 static int should_balance_chunk(struct extent_buffer
*leaf
,
3923 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
3925 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3926 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3927 struct btrfs_balance_args
*bargs
= NULL
;
3928 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3931 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
3932 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
3936 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3937 bargs
= &bctl
->data
;
3938 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3940 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3941 bargs
= &bctl
->meta
;
3943 /* profiles filter */
3944 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
3945 chunk_profiles_filter(chunk_type
, bargs
)) {
3950 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3951 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
3953 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3954 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
3959 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
3960 chunk_devid_filter(leaf
, chunk
, bargs
)) {
3964 /* drange filter, makes sense only with devid filter */
3965 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
3966 chunk_drange_filter(leaf
, chunk
, bargs
)) {
3971 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
3972 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
3976 /* stripes filter */
3977 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
3978 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
3982 /* soft profile changing mode */
3983 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
3984 chunk_soft_convert_filter(chunk_type
, bargs
)) {
3989 * limited by count, must be the last filter
3991 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
3992 if (bargs
->limit
== 0)
3996 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
3998 * Same logic as the 'limit' filter; the minimum cannot be
3999 * determined here because we do not have the global information
4000 * about the count of all chunks that satisfy the filters.
4002 if (bargs
->limit_max
== 0)
4011 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
4013 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4014 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
4016 struct btrfs_chunk
*chunk
;
4017 struct btrfs_path
*path
= NULL
;
4018 struct btrfs_key key
;
4019 struct btrfs_key found_key
;
4020 struct extent_buffer
*leaf
;
4023 int enospc_errors
= 0;
4024 bool counting
= true;
4025 /* The single value limit and min/max limits use the same bytes in the */
4026 u64 limit_data
= bctl
->data
.limit
;
4027 u64 limit_meta
= bctl
->meta
.limit
;
4028 u64 limit_sys
= bctl
->sys
.limit
;
4032 int chunk_reserved
= 0;
4034 path
= btrfs_alloc_path();
4040 /* zero out stat counters */
4041 spin_lock(&fs_info
->balance_lock
);
4042 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
4043 spin_unlock(&fs_info
->balance_lock
);
4047 * The single value limit and min/max limits use the same bytes
4050 bctl
->data
.limit
= limit_data
;
4051 bctl
->meta
.limit
= limit_meta
;
4052 bctl
->sys
.limit
= limit_sys
;
4054 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
4055 key
.offset
= (u64
)-1;
4056 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
4059 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
4060 atomic_read(&fs_info
->balance_cancel_req
)) {
4065 mutex_lock(&fs_info
->reclaim_bgs_lock
);
4066 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
4068 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4073 * this shouldn't happen, it means the last relocate
4077 BUG(); /* FIXME break ? */
4079 ret
= btrfs_previous_item(chunk_root
, path
, 0,
4080 BTRFS_CHUNK_ITEM_KEY
);
4082 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4087 leaf
= path
->nodes
[0];
4088 slot
= path
->slots
[0];
4089 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
4091 if (found_key
.objectid
!= key
.objectid
) {
4092 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4096 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
4097 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
4100 spin_lock(&fs_info
->balance_lock
);
4101 bctl
->stat
.considered
++;
4102 spin_unlock(&fs_info
->balance_lock
);
4105 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
4107 btrfs_release_path(path
);
4109 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4114 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4115 spin_lock(&fs_info
->balance_lock
);
4116 bctl
->stat
.expected
++;
4117 spin_unlock(&fs_info
->balance_lock
);
4119 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
4121 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
4123 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
4130 * Apply limit_min filter, no need to check if the LIMITS
4131 * filter is used, limit_min is 0 by default
4133 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
4134 count_data
< bctl
->data
.limit_min
)
4135 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
4136 count_meta
< bctl
->meta
.limit_min
)
4137 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
4138 count_sys
< bctl
->sys
.limit_min
)) {
4139 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4143 if (!chunk_reserved
) {
4145 * We may be relocating the only data chunk we have,
4146 * which could potentially end up with losing data's
4147 * raid profile, so lets allocate an empty one in
4150 ret
= btrfs_may_alloc_data_chunk(fs_info
,
4153 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4155 } else if (ret
== 1) {
4160 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
4161 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4162 if (ret
== -ENOSPC
) {
4164 } else if (ret
== -ETXTBSY
) {
4166 "skipping relocation of block group %llu due to active swapfile",
4172 spin_lock(&fs_info
->balance_lock
);
4173 bctl
->stat
.completed
++;
4174 spin_unlock(&fs_info
->balance_lock
);
4177 if (found_key
.offset
== 0)
4179 key
.offset
= found_key
.offset
- 1;
4183 btrfs_release_path(path
);
4188 btrfs_free_path(path
);
4189 if (enospc_errors
) {
4190 btrfs_info(fs_info
, "%d enospc errors during balance",
4200 * See if a given profile is valid and reduced.
4202 * @flags: profile to validate
4203 * @extended: if true @flags is treated as an extended profile
4205 static int alloc_profile_is_valid(u64 flags
, int extended
)
4207 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
4208 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
4210 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
4212 /* 1) check that all other bits are zeroed */
4216 /* 2) see if profile is reduced */
4218 return !extended
; /* "0" is valid for usual profiles */
4220 return has_single_bit_set(flags
);
4224 * Validate target profile against allowed profiles and return true if it's OK.
4225 * Otherwise print the error message and return false.
4227 static inline int validate_convert_profile(struct btrfs_fs_info
*fs_info
,
4228 const struct btrfs_balance_args
*bargs
,
4229 u64 allowed
, const char *type
)
4231 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
4234 /* Profile is valid and does not have bits outside of the allowed set */
4235 if (alloc_profile_is_valid(bargs
->target
, 1) &&
4236 (bargs
->target
& ~allowed
) == 0)
4239 btrfs_err(fs_info
, "balance: invalid convert %s profile %s",
4240 type
, btrfs_bg_type_to_raid_name(bargs
->target
));
4245 * Fill @buf with textual description of balance filter flags @bargs, up to
4246 * @size_buf including the terminating null. The output may be trimmed if it
4247 * does not fit into the provided buffer.
4249 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
4253 u32 size_bp
= size_buf
;
4255 u64 flags
= bargs
->flags
;
4256 char tmp_buf
[128] = {'\0'};
4261 #define CHECK_APPEND_NOARG(a) \
4263 ret = snprintf(bp, size_bp, (a)); \
4264 if (ret < 0 || ret >= size_bp) \
4265 goto out_overflow; \
4270 #define CHECK_APPEND_1ARG(a, v1) \
4272 ret = snprintf(bp, size_bp, (a), (v1)); \
4273 if (ret < 0 || ret >= size_bp) \
4274 goto out_overflow; \
4279 #define CHECK_APPEND_2ARG(a, v1, v2) \
4281 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
4282 if (ret < 0 || ret >= size_bp) \
4283 goto out_overflow; \
4288 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
)
4289 CHECK_APPEND_1ARG("convert=%s,",
4290 btrfs_bg_type_to_raid_name(bargs
->target
));
4292 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
4293 CHECK_APPEND_NOARG("soft,");
4295 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
4296 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
4298 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
4301 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
4302 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
4304 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
4305 CHECK_APPEND_2ARG("usage=%u..%u,",
4306 bargs
->usage_min
, bargs
->usage_max
);
4308 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
4309 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
4311 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
4312 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4313 bargs
->pstart
, bargs
->pend
);
4315 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
4316 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4317 bargs
->vstart
, bargs
->vend
);
4319 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
4320 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
4322 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
4323 CHECK_APPEND_2ARG("limit=%u..%u,",
4324 bargs
->limit_min
, bargs
->limit_max
);
4326 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
4327 CHECK_APPEND_2ARG("stripes=%u..%u,",
4328 bargs
->stripes_min
, bargs
->stripes_max
);
4330 #undef CHECK_APPEND_2ARG
4331 #undef CHECK_APPEND_1ARG
4332 #undef CHECK_APPEND_NOARG
4336 if (size_bp
< size_buf
)
4337 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
4342 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
4344 u32 size_buf
= 1024;
4345 char tmp_buf
[192] = {'\0'};
4348 u32 size_bp
= size_buf
;
4350 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4352 buf
= kzalloc(size_buf
, GFP_KERNEL
);
4358 #define CHECK_APPEND_1ARG(a, v1) \
4360 ret = snprintf(bp, size_bp, (a), (v1)); \
4361 if (ret < 0 || ret >= size_bp) \
4362 goto out_overflow; \
4367 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
4368 CHECK_APPEND_1ARG("%s", "-f ");
4370 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
4371 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
4372 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
4375 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
4376 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
4377 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
4380 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
4381 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
4382 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
4385 #undef CHECK_APPEND_1ARG
4389 if (size_bp
< size_buf
)
4390 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
4391 btrfs_info(fs_info
, "balance: %s %s",
4392 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
4393 "resume" : "start", buf
);
4399 * Should be called with balance mutexe held
4401 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
4402 struct btrfs_balance_control
*bctl
,
4403 struct btrfs_ioctl_balance_args
*bargs
)
4405 u64 meta_target
, data_target
;
4411 bool reducing_redundancy
;
4412 bool paused
= false;
4415 if (btrfs_fs_closing(fs_info
) ||
4416 atomic_read(&fs_info
->balance_pause_req
) ||
4417 btrfs_should_cancel_balance(fs_info
)) {
4422 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
4423 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
4427 * In case of mixed groups both data and meta should be picked,
4428 * and identical options should be given for both of them.
4430 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
4431 if (mixed
&& (bctl
->flags
& allowed
)) {
4432 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
4433 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
4434 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
4436 "balance: mixed groups data and metadata options must be the same");
4443 * rw_devices will not change at the moment, device add/delete/replace
4446 num_devices
= fs_info
->fs_devices
->rw_devices
;
4449 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4450 * special bit for it, to make it easier to distinguish. Thus we need
4451 * to set it manually, or balance would refuse the profile.
4453 allowed
= BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
4454 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++)
4455 if (num_devices
>= btrfs_raid_array
[i
].devs_min
)
4456 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4458 if (!validate_convert_profile(fs_info
, &bctl
->data
, allowed
, "data") ||
4459 !validate_convert_profile(fs_info
, &bctl
->meta
, allowed
, "metadata") ||
4460 !validate_convert_profile(fs_info
, &bctl
->sys
, allowed
, "system")) {
4466 * Allow to reduce metadata or system integrity only if force set for
4467 * profiles with redundancy (copies, parity)
4470 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++) {
4471 if (btrfs_raid_array
[i
].ncopies
>= 2 ||
4472 btrfs_raid_array
[i
].tolerated_failures
>= 1)
4473 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4476 seq
= read_seqbegin(&fs_info
->profiles_lock
);
4478 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4479 (fs_info
->avail_system_alloc_bits
& allowed
) &&
4480 !(bctl
->sys
.target
& allowed
)) ||
4481 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4482 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
4483 !(bctl
->meta
.target
& allowed
)))
4484 reducing_redundancy
= true;
4486 reducing_redundancy
= false;
4488 /* if we're not converting, the target field is uninitialized */
4489 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4490 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
4491 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4492 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
4493 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4495 if (reducing_redundancy
) {
4496 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4498 "balance: force reducing metadata redundancy");
4501 "balance: reduces metadata redundancy, use --force if you want this");
4507 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4508 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4510 "balance: metadata profile %s has lower redundancy than data profile %s",
4511 btrfs_bg_type_to_raid_name(meta_target
),
4512 btrfs_bg_type_to_raid_name(data_target
));
4515 ret
= insert_balance_item(fs_info
, bctl
);
4516 if (ret
&& ret
!= -EEXIST
)
4519 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4520 BUG_ON(ret
== -EEXIST
);
4521 BUG_ON(fs_info
->balance_ctl
);
4522 spin_lock(&fs_info
->balance_lock
);
4523 fs_info
->balance_ctl
= bctl
;
4524 spin_unlock(&fs_info
->balance_lock
);
4526 BUG_ON(ret
!= -EEXIST
);
4527 spin_lock(&fs_info
->balance_lock
);
4528 update_balance_args(bctl
);
4529 spin_unlock(&fs_info
->balance_lock
);
4532 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4533 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4534 describe_balance_start_or_resume(fs_info
);
4535 mutex_unlock(&fs_info
->balance_mutex
);
4537 ret
= __btrfs_balance(fs_info
);
4539 mutex_lock(&fs_info
->balance_mutex
);
4540 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
)) {
4541 btrfs_info(fs_info
, "balance: paused");
4542 btrfs_exclop_balance(fs_info
, BTRFS_EXCLOP_BALANCE_PAUSED
);
4546 * Balance can be canceled by:
4548 * - Regular cancel request
4549 * Then ret == -ECANCELED and balance_cancel_req > 0
4551 * - Fatal signal to "btrfs" process
4552 * Either the signal caught by wait_reserve_ticket() and callers
4553 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4555 * Either way, in this case balance_cancel_req = 0, and
4556 * ret == -EINTR or ret == -ECANCELED.
4558 * So here we only check the return value to catch canceled balance.
4560 else if (ret
== -ECANCELED
|| ret
== -EINTR
)
4561 btrfs_info(fs_info
, "balance: canceled");
4563 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4565 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4568 memset(bargs
, 0, sizeof(*bargs
));
4569 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4572 /* We didn't pause, we can clean everything up. */
4574 reset_balance_state(fs_info
);
4575 btrfs_exclop_finish(fs_info
);
4578 wake_up(&fs_info
->balance_wait_q
);
4582 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4583 reset_balance_state(fs_info
);
4586 btrfs_exclop_finish(fs_info
);
4591 static int balance_kthread(void *data
)
4593 struct btrfs_fs_info
*fs_info
= data
;
4596 sb_start_write(fs_info
->sb
);
4597 mutex_lock(&fs_info
->balance_mutex
);
4598 if (fs_info
->balance_ctl
)
4599 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4600 mutex_unlock(&fs_info
->balance_mutex
);
4601 sb_end_write(fs_info
->sb
);
4606 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4608 struct task_struct
*tsk
;
4610 mutex_lock(&fs_info
->balance_mutex
);
4611 if (!fs_info
->balance_ctl
) {
4612 mutex_unlock(&fs_info
->balance_mutex
);
4615 mutex_unlock(&fs_info
->balance_mutex
);
4617 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4618 btrfs_info(fs_info
, "balance: resume skipped");
4622 spin_lock(&fs_info
->super_lock
);
4623 ASSERT(fs_info
->exclusive_operation
== BTRFS_EXCLOP_BALANCE_PAUSED
);
4624 fs_info
->exclusive_operation
= BTRFS_EXCLOP_BALANCE
;
4625 spin_unlock(&fs_info
->super_lock
);
4627 * A ro->rw remount sequence should continue with the paused balance
4628 * regardless of who pauses it, system or the user as of now, so set
4631 spin_lock(&fs_info
->balance_lock
);
4632 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4633 spin_unlock(&fs_info
->balance_lock
);
4635 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4636 return PTR_ERR_OR_ZERO(tsk
);
4639 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4641 struct btrfs_balance_control
*bctl
;
4642 struct btrfs_balance_item
*item
;
4643 struct btrfs_disk_balance_args disk_bargs
;
4644 struct btrfs_path
*path
;
4645 struct extent_buffer
*leaf
;
4646 struct btrfs_key key
;
4649 path
= btrfs_alloc_path();
4653 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4654 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4657 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4660 if (ret
> 0) { /* ret = -ENOENT; */
4665 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4671 leaf
= path
->nodes
[0];
4672 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4674 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4675 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4677 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4678 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4679 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4680 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4681 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4682 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4685 * This should never happen, as the paused balance state is recovered
4686 * during mount without any chance of other exclusive ops to collide.
4688 * This gives the exclusive op status to balance and keeps in paused
4689 * state until user intervention (cancel or umount). If the ownership
4690 * cannot be assigned, show a message but do not fail. The balance
4691 * is in a paused state and must have fs_info::balance_ctl properly
4694 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_BALANCE_PAUSED
))
4696 "balance: cannot set exclusive op status, resume manually");
4698 btrfs_release_path(path
);
4700 mutex_lock(&fs_info
->balance_mutex
);
4701 BUG_ON(fs_info
->balance_ctl
);
4702 spin_lock(&fs_info
->balance_lock
);
4703 fs_info
->balance_ctl
= bctl
;
4704 spin_unlock(&fs_info
->balance_lock
);
4705 mutex_unlock(&fs_info
->balance_mutex
);
4707 btrfs_free_path(path
);
4711 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4715 mutex_lock(&fs_info
->balance_mutex
);
4716 if (!fs_info
->balance_ctl
) {
4717 mutex_unlock(&fs_info
->balance_mutex
);
4721 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4722 atomic_inc(&fs_info
->balance_pause_req
);
4723 mutex_unlock(&fs_info
->balance_mutex
);
4725 wait_event(fs_info
->balance_wait_q
,
4726 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4728 mutex_lock(&fs_info
->balance_mutex
);
4729 /* we are good with balance_ctl ripped off from under us */
4730 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4731 atomic_dec(&fs_info
->balance_pause_req
);
4736 mutex_unlock(&fs_info
->balance_mutex
);
4740 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4742 mutex_lock(&fs_info
->balance_mutex
);
4743 if (!fs_info
->balance_ctl
) {
4744 mutex_unlock(&fs_info
->balance_mutex
);
4749 * A paused balance with the item stored on disk can be resumed at
4750 * mount time if the mount is read-write. Otherwise it's still paused
4751 * and we must not allow cancelling as it deletes the item.
4753 if (sb_rdonly(fs_info
->sb
)) {
4754 mutex_unlock(&fs_info
->balance_mutex
);
4758 atomic_inc(&fs_info
->balance_cancel_req
);
4760 * if we are running just wait and return, balance item is
4761 * deleted in btrfs_balance in this case
4763 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4764 mutex_unlock(&fs_info
->balance_mutex
);
4765 wait_event(fs_info
->balance_wait_q
,
4766 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4767 mutex_lock(&fs_info
->balance_mutex
);
4769 mutex_unlock(&fs_info
->balance_mutex
);
4771 * Lock released to allow other waiters to continue, we'll
4772 * reexamine the status again.
4774 mutex_lock(&fs_info
->balance_mutex
);
4776 if (fs_info
->balance_ctl
) {
4777 reset_balance_state(fs_info
);
4778 btrfs_exclop_finish(fs_info
);
4779 btrfs_info(fs_info
, "balance: canceled");
4783 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4784 atomic_dec(&fs_info
->balance_cancel_req
);
4785 mutex_unlock(&fs_info
->balance_mutex
);
4790 * shrinking a device means finding all of the device extents past
4791 * the new size, and then following the back refs to the chunks.
4792 * The chunk relocation code actually frees the device extent
4794 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4796 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4797 struct btrfs_root
*root
= fs_info
->dev_root
;
4798 struct btrfs_trans_handle
*trans
;
4799 struct btrfs_dev_extent
*dev_extent
= NULL
;
4800 struct btrfs_path
*path
;
4806 bool retried
= false;
4807 struct extent_buffer
*l
;
4808 struct btrfs_key key
;
4809 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4810 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4811 u64 old_size
= btrfs_device_get_total_bytes(device
);
4816 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4818 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4820 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4823 path
= btrfs_alloc_path();
4827 path
->reada
= READA_BACK
;
4829 trans
= btrfs_start_transaction(root
, 0);
4830 if (IS_ERR(trans
)) {
4831 btrfs_free_path(path
);
4832 return PTR_ERR(trans
);
4835 mutex_lock(&fs_info
->chunk_mutex
);
4837 btrfs_device_set_total_bytes(device
, new_size
);
4838 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4839 device
->fs_devices
->total_rw_bytes
-= diff
;
4842 * The new free_chunk_space is new_size - used, so we have to
4843 * subtract the delta of the old free_chunk_space which included
4844 * old_size - used. If used > new_size then just subtract this
4845 * entire device's free space.
4847 if (device
->bytes_used
< new_size
)
4848 free_diff
= (old_size
- device
->bytes_used
) -
4849 (new_size
- device
->bytes_used
);
4851 free_diff
= old_size
- device
->bytes_used
;
4852 atomic64_sub(free_diff
, &fs_info
->free_chunk_space
);
4856 * Once the device's size has been set to the new size, ensure all
4857 * in-memory chunks are synced to disk so that the loop below sees them
4858 * and relocates them accordingly.
4860 if (contains_pending_extent(device
, &start
, diff
)) {
4861 mutex_unlock(&fs_info
->chunk_mutex
);
4862 ret
= btrfs_commit_transaction(trans
);
4866 mutex_unlock(&fs_info
->chunk_mutex
);
4867 btrfs_end_transaction(trans
);
4871 key
.objectid
= device
->devid
;
4872 key
.offset
= (u64
)-1;
4873 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4876 mutex_lock(&fs_info
->reclaim_bgs_lock
);
4877 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4879 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4883 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
4885 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4889 btrfs_release_path(path
);
4894 slot
= path
->slots
[0];
4895 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
4897 if (key
.objectid
!= device
->devid
) {
4898 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4899 btrfs_release_path(path
);
4903 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
4904 length
= btrfs_dev_extent_length(l
, dev_extent
);
4906 if (key
.offset
+ length
<= new_size
) {
4907 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4908 btrfs_release_path(path
);
4912 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
4913 btrfs_release_path(path
);
4916 * We may be relocating the only data chunk we have,
4917 * which could potentially end up with losing data's
4918 * raid profile, so lets allocate an empty one in
4921 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
4923 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4927 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
4928 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
4929 if (ret
== -ENOSPC
) {
4932 if (ret
== -ETXTBSY
) {
4934 "could not shrink block group %llu due to active swapfile",
4939 } while (key
.offset
-- > 0);
4941 if (failed
&& !retried
) {
4945 } else if (failed
&& retried
) {
4950 /* Shrinking succeeded, else we would be at "done". */
4951 trans
= btrfs_start_transaction(root
, 0);
4952 if (IS_ERR(trans
)) {
4953 ret
= PTR_ERR(trans
);
4957 mutex_lock(&fs_info
->chunk_mutex
);
4958 /* Clear all state bits beyond the shrunk device size */
4959 clear_extent_bits(&device
->alloc_state
, new_size
, (u64
)-1,
4962 btrfs_device_set_disk_total_bytes(device
, new_size
);
4963 if (list_empty(&device
->post_commit_list
))
4964 list_add_tail(&device
->post_commit_list
,
4965 &trans
->transaction
->dev_update_list
);
4967 WARN_ON(diff
> old_total
);
4968 btrfs_set_super_total_bytes(super_copy
,
4969 round_down(old_total
- diff
, fs_info
->sectorsize
));
4970 mutex_unlock(&fs_info
->chunk_mutex
);
4972 btrfs_reserve_chunk_metadata(trans
, false);
4973 /* Now btrfs_update_device() will change the on-disk size. */
4974 ret
= btrfs_update_device(trans
, device
);
4975 btrfs_trans_release_chunk_metadata(trans
);
4977 btrfs_abort_transaction(trans
, ret
);
4978 btrfs_end_transaction(trans
);
4980 ret
= btrfs_commit_transaction(trans
);
4983 btrfs_free_path(path
);
4985 mutex_lock(&fs_info
->chunk_mutex
);
4986 btrfs_device_set_total_bytes(device
, old_size
);
4987 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4988 device
->fs_devices
->total_rw_bytes
+= diff
;
4989 atomic64_add(free_diff
, &fs_info
->free_chunk_space
);
4991 mutex_unlock(&fs_info
->chunk_mutex
);
4996 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
4997 struct btrfs_key
*key
,
4998 struct btrfs_chunk
*chunk
, int item_size
)
5000 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
5001 struct btrfs_disk_key disk_key
;
5005 lockdep_assert_held(&fs_info
->chunk_mutex
);
5007 array_size
= btrfs_super_sys_array_size(super_copy
);
5008 if (array_size
+ item_size
+ sizeof(disk_key
)
5009 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
5012 ptr
= super_copy
->sys_chunk_array
+ array_size
;
5013 btrfs_cpu_key_to_disk(&disk_key
, key
);
5014 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
5015 ptr
+= sizeof(disk_key
);
5016 memcpy(ptr
, chunk
, item_size
);
5017 item_size
+= sizeof(disk_key
);
5018 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
5024 * sort the devices in descending order by max_avail, total_avail
5026 static int btrfs_cmp_device_info(const void *a
, const void *b
)
5028 const struct btrfs_device_info
*di_a
= a
;
5029 const struct btrfs_device_info
*di_b
= b
;
5031 if (di_a
->max_avail
> di_b
->max_avail
)
5033 if (di_a
->max_avail
< di_b
->max_avail
)
5035 if (di_a
->total_avail
> di_b
->total_avail
)
5037 if (di_a
->total_avail
< di_b
->total_avail
)
5042 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
5044 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
5047 btrfs_set_fs_incompat(info
, RAID56
);
5050 static void check_raid1c34_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
5052 if (!(type
& (BTRFS_BLOCK_GROUP_RAID1C3
| BTRFS_BLOCK_GROUP_RAID1C4
)))
5055 btrfs_set_fs_incompat(info
, RAID1C34
);
5059 * Structure used internally for btrfs_create_chunk() function.
5060 * Wraps needed parameters.
5062 struct alloc_chunk_ctl
{
5065 /* Total number of stripes to allocate */
5067 /* sub_stripes info for map */
5069 /* Stripes per device */
5071 /* Maximum number of devices to use */
5073 /* Minimum number of devices to use */
5075 /* ndevs has to be a multiple of this */
5077 /* Number of copies */
5079 /* Number of stripes worth of bytes to store parity information */
5081 u64 max_stripe_size
;
5089 static void init_alloc_chunk_ctl_policy_regular(
5090 struct btrfs_fs_devices
*fs_devices
,
5091 struct alloc_chunk_ctl
*ctl
)
5093 struct btrfs_space_info
*space_info
;
5095 space_info
= btrfs_find_space_info(fs_devices
->fs_info
, ctl
->type
);
5098 ctl
->max_chunk_size
= READ_ONCE(space_info
->chunk_size
);
5099 ctl
->max_stripe_size
= min_t(u64
, ctl
->max_chunk_size
, SZ_1G
);
5101 if (ctl
->type
& BTRFS_BLOCK_GROUP_SYSTEM
)
5102 ctl
->devs_max
= min_t(int, ctl
->devs_max
, BTRFS_MAX_DEVS_SYS_CHUNK
);
5104 /* We don't want a chunk larger than 10% of writable space */
5105 ctl
->max_chunk_size
= min(mult_perc(fs_devices
->total_rw_bytes
, 10),
5106 ctl
->max_chunk_size
);
5107 ctl
->dev_extent_min
= btrfs_stripe_nr_to_offset(ctl
->dev_stripes
);
5110 static void init_alloc_chunk_ctl_policy_zoned(
5111 struct btrfs_fs_devices
*fs_devices
,
5112 struct alloc_chunk_ctl
*ctl
)
5114 u64 zone_size
= fs_devices
->fs_info
->zone_size
;
5116 int min_num_stripes
= ctl
->devs_min
* ctl
->dev_stripes
;
5117 int min_data_stripes
= (min_num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5118 u64 min_chunk_size
= min_data_stripes
* zone_size
;
5119 u64 type
= ctl
->type
;
5121 ctl
->max_stripe_size
= zone_size
;
5122 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
5123 ctl
->max_chunk_size
= round_down(BTRFS_MAX_DATA_CHUNK_SIZE
,
5125 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
5126 ctl
->max_chunk_size
= ctl
->max_stripe_size
;
5127 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5128 ctl
->max_chunk_size
= 2 * ctl
->max_stripe_size
;
5129 ctl
->devs_max
= min_t(int, ctl
->devs_max
,
5130 BTRFS_MAX_DEVS_SYS_CHUNK
);
5135 /* We don't want a chunk larger than 10% of writable space */
5136 limit
= max(round_down(mult_perc(fs_devices
->total_rw_bytes
, 10),
5139 ctl
->max_chunk_size
= min(limit
, ctl
->max_chunk_size
);
5140 ctl
->dev_extent_min
= zone_size
* ctl
->dev_stripes
;
5143 static void init_alloc_chunk_ctl(struct btrfs_fs_devices
*fs_devices
,
5144 struct alloc_chunk_ctl
*ctl
)
5146 int index
= btrfs_bg_flags_to_raid_index(ctl
->type
);
5148 ctl
->sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
5149 ctl
->dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
5150 ctl
->devs_max
= btrfs_raid_array
[index
].devs_max
;
5152 ctl
->devs_max
= BTRFS_MAX_DEVS(fs_devices
->fs_info
);
5153 ctl
->devs_min
= btrfs_raid_array
[index
].devs_min
;
5154 ctl
->devs_increment
= btrfs_raid_array
[index
].devs_increment
;
5155 ctl
->ncopies
= btrfs_raid_array
[index
].ncopies
;
5156 ctl
->nparity
= btrfs_raid_array
[index
].nparity
;
5159 switch (fs_devices
->chunk_alloc_policy
) {
5160 case BTRFS_CHUNK_ALLOC_REGULAR
:
5161 init_alloc_chunk_ctl_policy_regular(fs_devices
, ctl
);
5163 case BTRFS_CHUNK_ALLOC_ZONED
:
5164 init_alloc_chunk_ctl_policy_zoned(fs_devices
, ctl
);
5171 static int gather_device_info(struct btrfs_fs_devices
*fs_devices
,
5172 struct alloc_chunk_ctl
*ctl
,
5173 struct btrfs_device_info
*devices_info
)
5175 struct btrfs_fs_info
*info
= fs_devices
->fs_info
;
5176 struct btrfs_device
*device
;
5178 u64 dev_extent_want
= ctl
->max_stripe_size
* ctl
->dev_stripes
;
5185 * in the first pass through the devices list, we gather information
5186 * about the available holes on each device.
5188 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
5189 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5191 "BTRFS: read-only device in alloc_list\n");
5195 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
5196 &device
->dev_state
) ||
5197 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
5200 if (device
->total_bytes
> device
->bytes_used
)
5201 total_avail
= device
->total_bytes
- device
->bytes_used
;
5205 /* If there is no space on this device, skip it. */
5206 if (total_avail
< ctl
->dev_extent_min
)
5209 ret
= find_free_dev_extent(device
, dev_extent_want
, &dev_offset
,
5211 if (ret
&& ret
!= -ENOSPC
)
5215 max_avail
= dev_extent_want
;
5217 if (max_avail
< ctl
->dev_extent_min
) {
5218 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5220 "%s: devid %llu has no free space, have=%llu want=%llu",
5221 __func__
, device
->devid
, max_avail
,
5222 ctl
->dev_extent_min
);
5226 if (ndevs
== fs_devices
->rw_devices
) {
5227 WARN(1, "%s: found more than %llu devices\n",
5228 __func__
, fs_devices
->rw_devices
);
5231 devices_info
[ndevs
].dev_offset
= dev_offset
;
5232 devices_info
[ndevs
].max_avail
= max_avail
;
5233 devices_info
[ndevs
].total_avail
= total_avail
;
5234 devices_info
[ndevs
].dev
= device
;
5240 * now sort the devices by hole size / available space
5242 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
5243 btrfs_cmp_device_info
, NULL
);
5248 static int decide_stripe_size_regular(struct alloc_chunk_ctl
*ctl
,
5249 struct btrfs_device_info
*devices_info
)
5251 /* Number of stripes that count for block group size */
5255 * The primary goal is to maximize the number of stripes, so use as
5256 * many devices as possible, even if the stripes are not maximum sized.
5258 * The DUP profile stores more than one stripe per device, the
5259 * max_avail is the total size so we have to adjust.
5261 ctl
->stripe_size
= div_u64(devices_info
[ctl
->ndevs
- 1].max_avail
,
5263 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5265 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5266 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5269 * Use the number of data stripes to figure out how big this chunk is
5270 * really going to be in terms of logical address space, and compare
5271 * that answer with the max chunk size. If it's higher, we try to
5272 * reduce stripe_size.
5274 if (ctl
->stripe_size
* data_stripes
> ctl
->max_chunk_size
) {
5276 * Reduce stripe_size, round it up to a 16MB boundary again and
5277 * then use it, unless it ends up being even bigger than the
5278 * previous value we had already.
5280 ctl
->stripe_size
= min(round_up(div_u64(ctl
->max_chunk_size
,
5281 data_stripes
), SZ_16M
),
5285 /* Stripe size should not go beyond 1G. */
5286 ctl
->stripe_size
= min_t(u64
, ctl
->stripe_size
, SZ_1G
);
5288 /* Align to BTRFS_STRIPE_LEN */
5289 ctl
->stripe_size
= round_down(ctl
->stripe_size
, BTRFS_STRIPE_LEN
);
5290 ctl
->chunk_size
= ctl
->stripe_size
* data_stripes
;
5295 static int decide_stripe_size_zoned(struct alloc_chunk_ctl
*ctl
,
5296 struct btrfs_device_info
*devices_info
)
5298 u64 zone_size
= devices_info
[0].dev
->zone_info
->zone_size
;
5299 /* Number of stripes that count for block group size */
5303 * It should hold because:
5304 * dev_extent_min == dev_extent_want == zone_size * dev_stripes
5306 ASSERT(devices_info
[ctl
->ndevs
- 1].max_avail
== ctl
->dev_extent_min
);
5308 ctl
->stripe_size
= zone_size
;
5309 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5310 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5312 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5313 if (ctl
->stripe_size
* data_stripes
> ctl
->max_chunk_size
) {
5314 ctl
->ndevs
= div_u64(div_u64(ctl
->max_chunk_size
* ctl
->ncopies
,
5315 ctl
->stripe_size
) + ctl
->nparity
,
5317 ctl
->num_stripes
= ctl
->ndevs
* ctl
->dev_stripes
;
5318 data_stripes
= (ctl
->num_stripes
- ctl
->nparity
) / ctl
->ncopies
;
5319 ASSERT(ctl
->stripe_size
* data_stripes
<= ctl
->max_chunk_size
);
5322 ctl
->chunk_size
= ctl
->stripe_size
* data_stripes
;
5327 static int decide_stripe_size(struct btrfs_fs_devices
*fs_devices
,
5328 struct alloc_chunk_ctl
*ctl
,
5329 struct btrfs_device_info
*devices_info
)
5331 struct btrfs_fs_info
*info
= fs_devices
->fs_info
;
5334 * Round down to number of usable stripes, devs_increment can be any
5335 * number so we can't use round_down() that requires power of 2, while
5336 * rounddown is safe.
5338 ctl
->ndevs
= rounddown(ctl
->ndevs
, ctl
->devs_increment
);
5340 if (ctl
->ndevs
< ctl
->devs_min
) {
5341 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
5343 "%s: not enough devices with free space: have=%d minimum required=%d",
5344 __func__
, ctl
->ndevs
, ctl
->devs_min
);
5349 ctl
->ndevs
= min(ctl
->ndevs
, ctl
->devs_max
);
5351 switch (fs_devices
->chunk_alloc_policy
) {
5352 case BTRFS_CHUNK_ALLOC_REGULAR
:
5353 return decide_stripe_size_regular(ctl
, devices_info
);
5354 case BTRFS_CHUNK_ALLOC_ZONED
:
5355 return decide_stripe_size_zoned(ctl
, devices_info
);
5361 static void chunk_map_device_set_bits(struct btrfs_chunk_map
*map
, unsigned int bits
)
5363 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5364 struct btrfs_io_stripe
*stripe
= &map
->stripes
[i
];
5365 struct btrfs_device
*device
= stripe
->dev
;
5367 set_extent_bit(&device
->alloc_state
, stripe
->physical
,
5368 stripe
->physical
+ map
->stripe_size
- 1,
5369 bits
| EXTENT_NOWAIT
, NULL
);
5373 static void chunk_map_device_clear_bits(struct btrfs_chunk_map
*map
, unsigned int bits
)
5375 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5376 struct btrfs_io_stripe
*stripe
= &map
->stripes
[i
];
5377 struct btrfs_device
*device
= stripe
->dev
;
5379 __clear_extent_bit(&device
->alloc_state
, stripe
->physical
,
5380 stripe
->physical
+ map
->stripe_size
- 1,
5381 bits
| EXTENT_NOWAIT
,
5386 void btrfs_remove_chunk_map(struct btrfs_fs_info
*fs_info
, struct btrfs_chunk_map
*map
)
5388 write_lock(&fs_info
->mapping_tree_lock
);
5389 rb_erase_cached(&map
->rb_node
, &fs_info
->mapping_tree
);
5390 RB_CLEAR_NODE(&map
->rb_node
);
5391 chunk_map_device_clear_bits(map
, CHUNK_ALLOCATED
);
5392 write_unlock(&fs_info
->mapping_tree_lock
);
5394 /* Once for the tree reference. */
5395 btrfs_free_chunk_map(map
);
5399 int btrfs_add_chunk_map(struct btrfs_fs_info
*fs_info
, struct btrfs_chunk_map
*map
)
5402 struct rb_node
*parent
= NULL
;
5403 bool leftmost
= true;
5405 write_lock(&fs_info
->mapping_tree_lock
);
5406 p
= &fs_info
->mapping_tree
.rb_root
.rb_node
;
5408 struct btrfs_chunk_map
*entry
;
5411 entry
= rb_entry(parent
, struct btrfs_chunk_map
, rb_node
);
5413 if (map
->start
< entry
->start
) {
5415 } else if (map
->start
> entry
->start
) {
5416 p
= &(*p
)->rb_right
;
5419 write_unlock(&fs_info
->mapping_tree_lock
);
5423 rb_link_node(&map
->rb_node
, parent
, p
);
5424 rb_insert_color_cached(&map
->rb_node
, &fs_info
->mapping_tree
, leftmost
);
5425 chunk_map_device_set_bits(map
, CHUNK_ALLOCATED
);
5426 chunk_map_device_clear_bits(map
, CHUNK_TRIMMED
);
5427 write_unlock(&fs_info
->mapping_tree_lock
);
5433 struct btrfs_chunk_map
*btrfs_alloc_chunk_map(int num_stripes
, gfp_t gfp
)
5435 struct btrfs_chunk_map
*map
;
5437 map
= kmalloc(btrfs_chunk_map_size(num_stripes
), gfp
);
5441 refcount_set(&map
->refs
, 1);
5442 RB_CLEAR_NODE(&map
->rb_node
);
5447 static struct btrfs_block_group
*create_chunk(struct btrfs_trans_handle
*trans
,
5448 struct alloc_chunk_ctl
*ctl
,
5449 struct btrfs_device_info
*devices_info
)
5451 struct btrfs_fs_info
*info
= trans
->fs_info
;
5452 struct btrfs_chunk_map
*map
;
5453 struct btrfs_block_group
*block_group
;
5454 u64 start
= ctl
->start
;
5455 u64 type
= ctl
->type
;
5458 map
= btrfs_alloc_chunk_map(ctl
->num_stripes
, GFP_NOFS
);
5460 return ERR_PTR(-ENOMEM
);
5463 map
->chunk_len
= ctl
->chunk_size
;
5464 map
->stripe_size
= ctl
->stripe_size
;
5466 map
->io_align
= BTRFS_STRIPE_LEN
;
5467 map
->io_width
= BTRFS_STRIPE_LEN
;
5468 map
->sub_stripes
= ctl
->sub_stripes
;
5469 map
->num_stripes
= ctl
->num_stripes
;
5471 for (int i
= 0; i
< ctl
->ndevs
; i
++) {
5472 for (int j
= 0; j
< ctl
->dev_stripes
; j
++) {
5473 int s
= i
* ctl
->dev_stripes
+ j
;
5474 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
5475 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
5476 j
* ctl
->stripe_size
;
5480 trace_btrfs_chunk_alloc(info
, map
, start
, ctl
->chunk_size
);
5482 ret
= btrfs_add_chunk_map(info
, map
);
5484 btrfs_free_chunk_map(map
);
5485 return ERR_PTR(ret
);
5488 block_group
= btrfs_make_block_group(trans
, type
, start
, ctl
->chunk_size
);
5489 if (IS_ERR(block_group
)) {
5490 btrfs_remove_chunk_map(info
, map
);
5494 for (int i
= 0; i
< map
->num_stripes
; i
++) {
5495 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5497 btrfs_device_set_bytes_used(dev
,
5498 dev
->bytes_used
+ ctl
->stripe_size
);
5499 if (list_empty(&dev
->post_commit_list
))
5500 list_add_tail(&dev
->post_commit_list
,
5501 &trans
->transaction
->dev_update_list
);
5504 atomic64_sub(ctl
->stripe_size
* map
->num_stripes
,
5505 &info
->free_chunk_space
);
5507 check_raid56_incompat_flag(info
, type
);
5508 check_raid1c34_incompat_flag(info
, type
);
5513 struct btrfs_block_group
*btrfs_create_chunk(struct btrfs_trans_handle
*trans
,
5516 struct btrfs_fs_info
*info
= trans
->fs_info
;
5517 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
5518 struct btrfs_device_info
*devices_info
= NULL
;
5519 struct alloc_chunk_ctl ctl
;
5520 struct btrfs_block_group
*block_group
;
5523 lockdep_assert_held(&info
->chunk_mutex
);
5525 if (!alloc_profile_is_valid(type
, 0)) {
5527 return ERR_PTR(-EINVAL
);
5530 if (list_empty(&fs_devices
->alloc_list
)) {
5531 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5532 btrfs_debug(info
, "%s: no writable device", __func__
);
5533 return ERR_PTR(-ENOSPC
);
5536 if (!(type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
5537 btrfs_err(info
, "invalid chunk type 0x%llx requested", type
);
5539 return ERR_PTR(-EINVAL
);
5542 ctl
.start
= find_next_chunk(info
);
5544 init_alloc_chunk_ctl(fs_devices
, &ctl
);
5546 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
5549 return ERR_PTR(-ENOMEM
);
5551 ret
= gather_device_info(fs_devices
, &ctl
, devices_info
);
5553 block_group
= ERR_PTR(ret
);
5557 ret
= decide_stripe_size(fs_devices
, &ctl
, devices_info
);
5559 block_group
= ERR_PTR(ret
);
5563 block_group
= create_chunk(trans
, &ctl
, devices_info
);
5566 kfree(devices_info
);
5571 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5572 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5575 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5578 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle
*trans
,
5579 struct btrfs_block_group
*bg
)
5581 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5582 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5583 struct btrfs_key key
;
5584 struct btrfs_chunk
*chunk
;
5585 struct btrfs_stripe
*stripe
;
5586 struct btrfs_chunk_map
*map
;
5592 * We take the chunk_mutex for 2 reasons:
5594 * 1) Updates and insertions in the chunk btree must be done while holding
5595 * the chunk_mutex, as well as updating the system chunk array in the
5596 * superblock. See the comment on top of btrfs_chunk_alloc() for the
5599 * 2) To prevent races with the final phase of a device replace operation
5600 * that replaces the device object associated with the map's stripes,
5601 * because the device object's id can change at any time during that
5602 * final phase of the device replace operation
5603 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5604 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5605 * which would cause a failure when updating the device item, which does
5606 * not exists, or persisting a stripe of the chunk item with such ID.
5607 * Here we can't use the device_list_mutex because our caller already
5608 * has locked the chunk_mutex, and the final phase of device replace
5609 * acquires both mutexes - first the device_list_mutex and then the
5610 * chunk_mutex. Using any of those two mutexes protects us from a
5611 * concurrent device replace.
5613 lockdep_assert_held(&fs_info
->chunk_mutex
);
5615 map
= btrfs_get_chunk_map(fs_info
, bg
->start
, bg
->length
);
5618 btrfs_abort_transaction(trans
, ret
);
5622 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5624 chunk
= kzalloc(item_size
, GFP_NOFS
);
5627 btrfs_abort_transaction(trans
, ret
);
5631 for (i
= 0; i
< map
->num_stripes
; i
++) {
5632 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
5634 ret
= btrfs_update_device(trans
, device
);
5639 stripe
= &chunk
->stripe
;
5640 for (i
= 0; i
< map
->num_stripes
; i
++) {
5641 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
5642 const u64 dev_offset
= map
->stripes
[i
].physical
;
5644 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5645 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5646 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5650 btrfs_set_stack_chunk_length(chunk
, bg
->length
);
5651 btrfs_set_stack_chunk_owner(chunk
, BTRFS_EXTENT_TREE_OBJECTID
);
5652 btrfs_set_stack_chunk_stripe_len(chunk
, BTRFS_STRIPE_LEN
);
5653 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5654 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5655 btrfs_set_stack_chunk_io_align(chunk
, BTRFS_STRIPE_LEN
);
5656 btrfs_set_stack_chunk_io_width(chunk
, BTRFS_STRIPE_LEN
);
5657 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5658 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5660 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5661 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5662 key
.offset
= bg
->start
;
5664 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5668 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED
, &bg
->runtime_flags
);
5670 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5671 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5678 btrfs_free_chunk_map(map
);
5682 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5684 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5686 struct btrfs_block_group
*meta_bg
;
5687 struct btrfs_block_group
*sys_bg
;
5690 * When adding a new device for sprouting, the seed device is read-only
5691 * so we must first allocate a metadata and a system chunk. But before
5692 * adding the block group items to the extent, device and chunk btrees,
5695 * 1) Create both chunks without doing any changes to the btrees, as
5696 * otherwise we would get -ENOSPC since the block groups from the
5697 * seed device are read-only;
5699 * 2) Add the device item for the new sprout device - finishing the setup
5700 * of a new block group requires updating the device item in the chunk
5701 * btree, so it must exist when we attempt to do it. The previous step
5702 * ensures this does not fail with -ENOSPC.
5704 * After that we can add the block group items to their btrees:
5705 * update existing device item in the chunk btree, add a new block group
5706 * item to the extent btree, add a new chunk item to the chunk btree and
5707 * finally add the new device extent items to the devices btree.
5710 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5711 meta_bg
= btrfs_create_chunk(trans
, alloc_profile
);
5712 if (IS_ERR(meta_bg
))
5713 return PTR_ERR(meta_bg
);
5715 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5716 sys_bg
= btrfs_create_chunk(trans
, alloc_profile
);
5718 return PTR_ERR(sys_bg
);
5723 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map
*map
)
5725 const int index
= btrfs_bg_flags_to_raid_index(map
->type
);
5727 return btrfs_raid_array
[index
].tolerated_failures
;
5730 bool btrfs_chunk_writeable(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5732 struct btrfs_chunk_map
*map
;
5737 map
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5741 for (i
= 0; i
< map
->num_stripes
; i
++) {
5742 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5743 &map
->stripes
[i
].dev
->dev_state
)) {
5747 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5748 &map
->stripes
[i
].dev
->dev_state
)) {
5755 * If the number of missing devices is larger than max errors, we can
5756 * not write the data into that chunk successfully.
5758 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5761 btrfs_free_chunk_map(map
);
5765 void btrfs_mapping_tree_free(struct btrfs_fs_info
*fs_info
)
5767 write_lock(&fs_info
->mapping_tree_lock
);
5768 while (!RB_EMPTY_ROOT(&fs_info
->mapping_tree
.rb_root
)) {
5769 struct btrfs_chunk_map
*map
;
5770 struct rb_node
*node
;
5772 node
= rb_first_cached(&fs_info
->mapping_tree
);
5773 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
5774 rb_erase_cached(&map
->rb_node
, &fs_info
->mapping_tree
);
5775 RB_CLEAR_NODE(&map
->rb_node
);
5776 chunk_map_device_clear_bits(map
, CHUNK_ALLOCATED
);
5777 /* Once for the tree ref. */
5778 btrfs_free_chunk_map(map
);
5779 cond_resched_rwlock_write(&fs_info
->mapping_tree_lock
);
5781 write_unlock(&fs_info
->mapping_tree_lock
);
5784 static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map
*map
)
5786 enum btrfs_raid_types index
= btrfs_bg_flags_to_raid_index(map
->type
);
5788 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5792 * There could be two corrupted data stripes, we need to loop retry in
5793 * order to rebuild the correct data.
5795 * Fail a stripe at a time on every retry except the stripe under
5798 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5799 return map
->num_stripes
;
5801 /* Non-RAID56, use their ncopies from btrfs_raid_array. */
5802 return btrfs_raid_array
[index
].ncopies
;
5805 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5807 struct btrfs_chunk_map
*map
;
5810 map
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5813 * We could return errors for these cases, but that could get
5814 * ugly and we'd probably do the same thing which is just not do
5815 * anything else and exit, so return 1 so the callers don't try
5816 * to use other copies.
5820 ret
= btrfs_chunk_map_num_copies(map
);
5821 btrfs_free_chunk_map(map
);
5825 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5828 struct btrfs_chunk_map
*map
;
5829 unsigned long len
= fs_info
->sectorsize
;
5831 if (!btrfs_fs_incompat(fs_info
, RAID56
))
5834 map
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5836 if (!WARN_ON(IS_ERR(map
))) {
5837 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5838 len
= btrfs_stripe_nr_to_offset(nr_data_stripes(map
));
5839 btrfs_free_chunk_map(map
);
5844 int btrfs_is_parity_mirror(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5846 struct btrfs_chunk_map
*map
;
5849 if (!btrfs_fs_incompat(fs_info
, RAID56
))
5852 map
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5854 if (!WARN_ON(IS_ERR(map
))) {
5855 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5857 btrfs_free_chunk_map(map
);
5862 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5863 struct btrfs_chunk_map
*map
, int first
,
5864 int dev_replace_is_ongoing
)
5866 const enum btrfs_read_policy policy
= READ_ONCE(fs_info
->fs_devices
->read_policy
);
5869 int preferred_mirror
;
5871 struct btrfs_device
*srcdev
;
5874 (BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
)));
5876 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5877 num_stripes
= map
->sub_stripes
;
5879 num_stripes
= map
->num_stripes
;
5883 /* Shouldn't happen, just warn and use pid instead of failing */
5884 btrfs_warn_rl(fs_info
, "unknown read_policy type %u, reset to pid",
5886 WRITE_ONCE(fs_info
->fs_devices
->read_policy
, BTRFS_READ_POLICY_PID
);
5888 case BTRFS_READ_POLICY_PID
:
5889 preferred_mirror
= first
+ (current
->pid
% num_stripes
);
5893 if (dev_replace_is_ongoing
&&
5894 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5895 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5896 srcdev
= fs_info
->dev_replace
.srcdev
;
5901 * try to avoid the drive that is the source drive for a
5902 * dev-replace procedure, only choose it if no other non-missing
5903 * mirror is available
5905 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
5906 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
5907 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
5908 return preferred_mirror
;
5909 for (i
= first
; i
< first
+ num_stripes
; i
++) {
5910 if (map
->stripes
[i
].dev
->bdev
&&
5911 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
5916 /* we couldn't find one that doesn't fail. Just return something
5917 * and the io error handling code will clean up eventually
5919 return preferred_mirror
;
5922 static struct btrfs_io_context
*alloc_btrfs_io_context(struct btrfs_fs_info
*fs_info
,
5926 struct btrfs_io_context
*bioc
;
5929 /* The size of btrfs_io_context */
5930 sizeof(struct btrfs_io_context
) +
5931 /* Plus the variable array for the stripes */
5932 sizeof(struct btrfs_io_stripe
) * (total_stripes
),
5938 refcount_set(&bioc
->refs
, 1);
5940 bioc
->fs_info
= fs_info
;
5941 bioc
->replace_stripe_src
= -1;
5942 bioc
->full_stripe_logical
= (u64
)-1;
5943 bioc
->logical
= logical
;
5948 void btrfs_get_bioc(struct btrfs_io_context
*bioc
)
5950 WARN_ON(!refcount_read(&bioc
->refs
));
5951 refcount_inc(&bioc
->refs
);
5954 void btrfs_put_bioc(struct btrfs_io_context
*bioc
)
5958 if (refcount_dec_and_test(&bioc
->refs
))
5963 * Please note that, discard won't be sent to target device of device
5966 struct btrfs_discard_stripe
*btrfs_map_discard(struct btrfs_fs_info
*fs_info
,
5967 u64 logical
, u64
*length_ret
,
5970 struct btrfs_chunk_map
*map
;
5971 struct btrfs_discard_stripe
*stripes
;
5972 u64 length
= *length_ret
;
5977 u64 stripe_end_offset
;
5981 u32 sub_stripes
= 0;
5982 u32 stripes_per_dev
= 0;
5983 u32 remaining_stripes
= 0;
5984 u32 last_stripe
= 0;
5988 map
= btrfs_get_chunk_map(fs_info
, logical
, length
);
5990 return ERR_CAST(map
);
5992 /* we don't discard raid56 yet */
5993 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5998 offset
= logical
- map
->start
;
5999 length
= min_t(u64
, map
->start
+ map
->chunk_len
- logical
, length
);
6000 *length_ret
= length
;
6003 * stripe_nr counts the total number of stripes we have to stride
6004 * to get to this block
6006 stripe_nr
= offset
>> BTRFS_STRIPE_LEN_SHIFT
;
6008 /* stripe_offset is the offset of this block in its stripe */
6009 stripe_offset
= offset
- btrfs_stripe_nr_to_offset(stripe_nr
);
6011 stripe_nr_end
= round_up(offset
+ length
, BTRFS_STRIPE_LEN
) >>
6012 BTRFS_STRIPE_LEN_SHIFT
;
6013 stripe_cnt
= stripe_nr_end
- stripe_nr
;
6014 stripe_end_offset
= btrfs_stripe_nr_to_offset(stripe_nr_end
) -
6017 * after this, stripe_nr is the number of stripes on this
6018 * device we have to walk to find the data, and stripe_index is
6019 * the number of our device in the stripe array
6023 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
6024 BTRFS_BLOCK_GROUP_RAID10
)) {
6025 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
6028 sub_stripes
= map
->sub_stripes
;
6030 factor
= map
->num_stripes
/ sub_stripes
;
6031 *num_stripes
= min_t(u64
, map
->num_stripes
,
6032 sub_stripes
* stripe_cnt
);
6033 stripe_index
= stripe_nr
% factor
;
6034 stripe_nr
/= factor
;
6035 stripe_index
*= sub_stripes
;
6037 remaining_stripes
= stripe_cnt
% factor
;
6038 stripes_per_dev
= stripe_cnt
/ factor
;
6039 last_stripe
= ((stripe_nr_end
- 1) % factor
) * sub_stripes
;
6040 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
6041 BTRFS_BLOCK_GROUP_DUP
)) {
6042 *num_stripes
= map
->num_stripes
;
6044 stripe_index
= stripe_nr
% map
->num_stripes
;
6045 stripe_nr
/= map
->num_stripes
;
6048 stripes
= kcalloc(*num_stripes
, sizeof(*stripes
), GFP_NOFS
);
6054 for (i
= 0; i
< *num_stripes
; i
++) {
6055 stripes
[i
].physical
=
6056 map
->stripes
[stripe_index
].physical
+
6057 stripe_offset
+ btrfs_stripe_nr_to_offset(stripe_nr
);
6058 stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
6060 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
6061 BTRFS_BLOCK_GROUP_RAID10
)) {
6062 stripes
[i
].length
= btrfs_stripe_nr_to_offset(stripes_per_dev
);
6064 if (i
/ sub_stripes
< remaining_stripes
)
6065 stripes
[i
].length
+= BTRFS_STRIPE_LEN
;
6068 * Special for the first stripe and
6071 * |-------|...|-------|
6075 if (i
< sub_stripes
)
6076 stripes
[i
].length
-= stripe_offset
;
6078 if (stripe_index
>= last_stripe
&&
6079 stripe_index
<= (last_stripe
+
6081 stripes
[i
].length
-= stripe_end_offset
;
6083 if (i
== sub_stripes
- 1)
6086 stripes
[i
].length
= length
;
6090 if (stripe_index
== map
->num_stripes
) {
6096 btrfs_free_chunk_map(map
);
6099 btrfs_free_chunk_map(map
);
6100 return ERR_PTR(ret
);
6103 static bool is_block_group_to_copy(struct btrfs_fs_info
*fs_info
, u64 logical
)
6105 struct btrfs_block_group
*cache
;
6108 /* Non zoned filesystem does not use "to_copy" flag */
6109 if (!btrfs_is_zoned(fs_info
))
6112 cache
= btrfs_lookup_block_group(fs_info
, logical
);
6114 ret
= test_bit(BLOCK_GROUP_FLAG_TO_COPY
, &cache
->runtime_flags
);
6116 btrfs_put_block_group(cache
);
6120 static void handle_ops_on_dev_replace(struct btrfs_io_context
*bioc
,
6121 struct btrfs_dev_replace
*dev_replace
,
6123 struct btrfs_io_geometry
*io_geom
)
6125 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
6127 * At this stage, num_stripes is still the real number of stripes,
6128 * excluding the duplicated stripes.
6130 int num_stripes
= io_geom
->num_stripes
;
6131 int max_errors
= io_geom
->max_errors
;
6132 int nr_extra_stripes
= 0;
6136 * A block group which has "to_copy" set will eventually be copied by
6137 * the dev-replace process. We can avoid cloning IO here.
6139 if (is_block_group_to_copy(dev_replace
->srcdev
->fs_info
, logical
))
6143 * Duplicate the write operations while the dev-replace procedure is
6144 * running. Since the copying of the old disk to the new disk takes
6145 * place at run time while the filesystem is mounted writable, the
6146 * regular write operations to the old disk have to be duplicated to go
6147 * to the new disk as well.
6149 * Note that device->missing is handled by the caller, and that the
6150 * write to the old disk is already set up in the stripes array.
6152 for (i
= 0; i
< num_stripes
; i
++) {
6153 struct btrfs_io_stripe
*old
= &bioc
->stripes
[i
];
6154 struct btrfs_io_stripe
*new = &bioc
->stripes
[num_stripes
+ nr_extra_stripes
];
6156 if (old
->dev
->devid
!= srcdev_devid
)
6159 new->physical
= old
->physical
;
6160 new->dev
= dev_replace
->tgtdev
;
6161 if (bioc
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
6162 bioc
->replace_stripe_src
= i
;
6166 /* We can only have at most 2 extra nr_stripes (for DUP). */
6167 ASSERT(nr_extra_stripes
<= 2);
6169 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
6171 * If we have 2 extra stripes, only choose the one with smaller physical.
6173 if (io_geom
->op
== BTRFS_MAP_GET_READ_MIRRORS
&& nr_extra_stripes
== 2) {
6174 struct btrfs_io_stripe
*first
= &bioc
->stripes
[num_stripes
];
6175 struct btrfs_io_stripe
*second
= &bioc
->stripes
[num_stripes
+ 1];
6177 /* Only DUP can have two extra stripes. */
6178 ASSERT(bioc
->map_type
& BTRFS_BLOCK_GROUP_DUP
);
6181 * Swap the last stripe stripes and reduce @nr_extra_stripes.
6182 * The extra stripe would still be there, but won't be accessed.
6184 if (first
->physical
> second
->physical
) {
6185 swap(second
->physical
, first
->physical
);
6186 swap(second
->dev
, first
->dev
);
6191 io_geom
->num_stripes
= num_stripes
+ nr_extra_stripes
;
6192 io_geom
->max_errors
= max_errors
+ nr_extra_stripes
;
6193 bioc
->replace_nr_stripes
= nr_extra_stripes
;
6196 static u64
btrfs_max_io_len(struct btrfs_chunk_map
*map
, u64 offset
,
6197 struct btrfs_io_geometry
*io_geom
)
6200 * Stripe_nr is the stripe where this block falls. stripe_offset is
6201 * the offset of this block in its stripe.
6203 io_geom
->stripe_offset
= offset
& BTRFS_STRIPE_LEN_MASK
;
6204 io_geom
->stripe_nr
= offset
>> BTRFS_STRIPE_LEN_SHIFT
;
6205 ASSERT(io_geom
->stripe_offset
< U32_MAX
);
6207 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6208 unsigned long full_stripe_len
=
6209 btrfs_stripe_nr_to_offset(nr_data_stripes(map
));
6212 * For full stripe start, we use previously calculated
6213 * @stripe_nr. Align it to nr_data_stripes, then multiply with
6216 * By this we can avoid u64 division completely. And we have
6217 * to go rounddown(), not round_down(), as nr_data_stripes is
6218 * not ensured to be power of 2.
6220 io_geom
->raid56_full_stripe_start
= btrfs_stripe_nr_to_offset(
6221 rounddown(io_geom
->stripe_nr
, nr_data_stripes(map
)));
6223 ASSERT(io_geom
->raid56_full_stripe_start
+ full_stripe_len
> offset
);
6224 ASSERT(io_geom
->raid56_full_stripe_start
<= offset
);
6226 * For writes to RAID56, allow to write a full stripe set, but
6227 * no straddling of stripe sets.
6229 if (io_geom
->op
== BTRFS_MAP_WRITE
)
6230 return full_stripe_len
- (offset
- io_geom
->raid56_full_stripe_start
);
6234 * For other RAID types and for RAID56 reads, allow a single stripe (on
6237 if (map
->type
& BTRFS_BLOCK_GROUP_STRIPE_MASK
)
6238 return BTRFS_STRIPE_LEN
- io_geom
->stripe_offset
;
6242 static int set_io_stripe(struct btrfs_fs_info
*fs_info
, u64 logical
,
6243 u64
*length
, struct btrfs_io_stripe
*dst
,
6244 struct btrfs_chunk_map
*map
,
6245 struct btrfs_io_geometry
*io_geom
)
6247 dst
->dev
= map
->stripes
[io_geom
->stripe_index
].dev
;
6249 if (io_geom
->op
== BTRFS_MAP_READ
&&
6250 btrfs_need_stripe_tree_update(fs_info
, map
->type
))
6251 return btrfs_get_raid_extent_offset(fs_info
, logical
, length
,
6253 io_geom
->stripe_index
, dst
);
6255 dst
->physical
= map
->stripes
[io_geom
->stripe_index
].physical
+
6256 io_geom
->stripe_offset
+
6257 btrfs_stripe_nr_to_offset(io_geom
->stripe_nr
);
6261 static bool is_single_device_io(struct btrfs_fs_info
*fs_info
,
6262 const struct btrfs_io_stripe
*smap
,
6263 const struct btrfs_chunk_map
*map
,
6264 int num_alloc_stripes
,
6265 enum btrfs_map_op op
, int mirror_num
)
6270 if (num_alloc_stripes
!= 1)
6273 if (btrfs_need_stripe_tree_update(fs_info
, map
->type
) && op
!= BTRFS_MAP_READ
)
6276 if ((map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) && mirror_num
> 1)
6282 static void map_blocks_raid0(const struct btrfs_chunk_map
*map
,
6283 struct btrfs_io_geometry
*io_geom
)
6285 io_geom
->stripe_index
= io_geom
->stripe_nr
% map
->num_stripes
;
6286 io_geom
->stripe_nr
/= map
->num_stripes
;
6287 if (io_geom
->op
== BTRFS_MAP_READ
)
6288 io_geom
->mirror_num
= 1;
6291 static void map_blocks_raid1(struct btrfs_fs_info
*fs_info
,
6292 struct btrfs_chunk_map
*map
,
6293 struct btrfs_io_geometry
*io_geom
,
6294 bool dev_replace_is_ongoing
)
6296 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6297 io_geom
->num_stripes
= map
->num_stripes
;
6301 if (io_geom
->mirror_num
) {
6302 io_geom
->stripe_index
= io_geom
->mirror_num
- 1;
6306 io_geom
->stripe_index
= find_live_mirror(fs_info
, map
, 0,
6307 dev_replace_is_ongoing
);
6308 io_geom
->mirror_num
= io_geom
->stripe_index
+ 1;
6311 static void map_blocks_dup(const struct btrfs_chunk_map
*map
,
6312 struct btrfs_io_geometry
*io_geom
)
6314 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6315 io_geom
->num_stripes
= map
->num_stripes
;
6319 if (io_geom
->mirror_num
) {
6320 io_geom
->stripe_index
= io_geom
->mirror_num
- 1;
6324 io_geom
->mirror_num
= 1;
6327 static void map_blocks_raid10(struct btrfs_fs_info
*fs_info
,
6328 struct btrfs_chunk_map
*map
,
6329 struct btrfs_io_geometry
*io_geom
,
6330 bool dev_replace_is_ongoing
)
6332 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
6333 int old_stripe_index
;
6335 io_geom
->stripe_index
= (io_geom
->stripe_nr
% factor
) * map
->sub_stripes
;
6336 io_geom
->stripe_nr
/= factor
;
6338 if (io_geom
->op
!= BTRFS_MAP_READ
) {
6339 io_geom
->num_stripes
= map
->sub_stripes
;
6343 if (io_geom
->mirror_num
) {
6344 io_geom
->stripe_index
+= io_geom
->mirror_num
- 1;
6348 old_stripe_index
= io_geom
->stripe_index
;
6349 io_geom
->stripe_index
= find_live_mirror(fs_info
, map
,
6350 io_geom
->stripe_index
,
6351 dev_replace_is_ongoing
);
6352 io_geom
->mirror_num
= io_geom
->stripe_index
- old_stripe_index
+ 1;
6355 static void map_blocks_raid56_write(struct btrfs_chunk_map
*map
,
6356 struct btrfs_io_geometry
*io_geom
,
6357 u64 logical
, u64
*length
)
6359 int data_stripes
= nr_data_stripes(map
);
6362 * Needs full stripe mapping.
6364 * Push stripe_nr back to the start of the full stripe For those cases
6365 * needing a full stripe, @stripe_nr is the full stripe number.
6367 * Originally we go raid56_full_stripe_start / full_stripe_len, but
6368 * that can be expensive. Here we just divide @stripe_nr with
6371 io_geom
->stripe_nr
/= data_stripes
;
6373 /* RAID[56] write or recovery. Return all stripes */
6374 io_geom
->num_stripes
= map
->num_stripes
;
6375 io_geom
->max_errors
= btrfs_chunk_max_errors(map
);
6377 /* Return the length to the full stripe end. */
6378 *length
= min(logical
+ *length
,
6379 io_geom
->raid56_full_stripe_start
+ map
->start
+
6380 btrfs_stripe_nr_to_offset(data_stripes
)) -
6382 io_geom
->stripe_index
= 0;
6383 io_geom
->stripe_offset
= 0;
6386 static void map_blocks_raid56_read(struct btrfs_chunk_map
*map
,
6387 struct btrfs_io_geometry
*io_geom
)
6389 int data_stripes
= nr_data_stripes(map
);
6391 ASSERT(io_geom
->mirror_num
<= 1);
6392 /* Just grab the data stripe directly. */
6393 io_geom
->stripe_index
= io_geom
->stripe_nr
% data_stripes
;
6394 io_geom
->stripe_nr
/= data_stripes
;
6396 /* We distribute the parity blocks across stripes. */
6397 io_geom
->stripe_index
=
6398 (io_geom
->stripe_nr
+ io_geom
->stripe_index
) % map
->num_stripes
;
6400 if (io_geom
->op
== BTRFS_MAP_READ
&& io_geom
->mirror_num
< 1)
6401 io_geom
->mirror_num
= 1;
6404 static void map_blocks_single(const struct btrfs_chunk_map
*map
,
6405 struct btrfs_io_geometry
*io_geom
)
6407 io_geom
->stripe_index
= io_geom
->stripe_nr
% map
->num_stripes
;
6408 io_geom
->stripe_nr
/= map
->num_stripes
;
6409 io_geom
->mirror_num
= io_geom
->stripe_index
+ 1;
6413 * Map one logical range to one or more physical ranges.
6415 * @length: (Mandatory) mapped length of this run.
6416 * One logical range can be split into different segments
6417 * due to factors like zones and RAID0/5/6/10 stripe
6420 * @bioc_ret: (Mandatory) returned btrfs_io_context structure.
6421 * which has one or more physical ranges (btrfs_io_stripe)
6423 * Caller should call btrfs_put_bioc() to free it after use.
6425 * @smap: (Optional) single physical range optimization.
6426 * If the map request can be fulfilled by one single
6427 * physical range, and this is parameter is not NULL,
6428 * then @bioc_ret would be NULL, and @smap would be
6431 * @mirror_num_ret: (Mandatory) returned mirror number if the original
6434 * Mirror number 0 means to choose any live mirrors.
6436 * For non-RAID56 profiles, non-zero mirror_num means
6437 * the Nth mirror. (e.g. mirror_num 1 means the first
6440 * For RAID56 profile, mirror 1 means rebuild from P and
6441 * the remaining data stripes.
6443 * For RAID6 profile, mirror > 2 means mark another
6444 * data/P stripe error and rebuild from the remaining
6447 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6448 u64 logical
, u64
*length
,
6449 struct btrfs_io_context
**bioc_ret
,
6450 struct btrfs_io_stripe
*smap
, int *mirror_num_ret
)
6452 struct btrfs_chunk_map
*map
;
6453 struct btrfs_io_geometry io_geom
= { 0 };
6457 struct btrfs_io_context
*bioc
= NULL
;
6458 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
6459 int dev_replace_is_ongoing
= 0;
6460 u16 num_alloc_stripes
;
6465 io_geom
.mirror_num
= (mirror_num_ret
? *mirror_num_ret
: 0);
6466 io_geom
.num_stripes
= 1;
6467 io_geom
.stripe_index
= 0;
6470 map
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
6472 return PTR_ERR(map
);
6474 num_copies
= btrfs_chunk_map_num_copies(map
);
6475 if (io_geom
.mirror_num
> num_copies
)
6478 map_offset
= logical
- map
->start
;
6479 io_geom
.raid56_full_stripe_start
= (u64
)-1;
6480 max_len
= btrfs_max_io_len(map
, map_offset
, &io_geom
);
6481 *length
= min_t(u64
, map
->chunk_len
- map_offset
, max_len
);
6483 down_read(&dev_replace
->rwsem
);
6484 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
6486 * Hold the semaphore for read during the whole operation, write is
6487 * requested at commit time but must wait.
6489 if (!dev_replace_is_ongoing
)
6490 up_read(&dev_replace
->rwsem
);
6492 switch (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6493 case BTRFS_BLOCK_GROUP_RAID0
:
6494 map_blocks_raid0(map
, &io_geom
);
6496 case BTRFS_BLOCK_GROUP_RAID1
:
6497 case BTRFS_BLOCK_GROUP_RAID1C3
:
6498 case BTRFS_BLOCK_GROUP_RAID1C4
:
6499 map_blocks_raid1(fs_info
, map
, &io_geom
, dev_replace_is_ongoing
);
6501 case BTRFS_BLOCK_GROUP_DUP
:
6502 map_blocks_dup(map
, &io_geom
);
6504 case BTRFS_BLOCK_GROUP_RAID10
:
6505 map_blocks_raid10(fs_info
, map
, &io_geom
, dev_replace_is_ongoing
);
6507 case BTRFS_BLOCK_GROUP_RAID5
:
6508 case BTRFS_BLOCK_GROUP_RAID6
:
6509 if (op
!= BTRFS_MAP_READ
|| io_geom
.mirror_num
> 1)
6510 map_blocks_raid56_write(map
, &io_geom
, logical
, length
);
6512 map_blocks_raid56_read(map
, &io_geom
);
6516 * After this, stripe_nr is the number of stripes on this
6517 * device we have to walk to find the data, and stripe_index is
6518 * the number of our device in the stripe array
6520 map_blocks_single(map
, &io_geom
);
6523 if (io_geom
.stripe_index
>= map
->num_stripes
) {
6525 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6526 io_geom
.stripe_index
, map
->num_stripes
);
6531 num_alloc_stripes
= io_geom
.num_stripes
;
6532 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6533 op
!= BTRFS_MAP_READ
)
6535 * For replace case, we need to add extra stripes for extra
6536 * duplicated stripes.
6538 * For both WRITE and GET_READ_MIRRORS, we may have at most
6539 * 2 more stripes (DUP types, otherwise 1).
6541 num_alloc_stripes
+= 2;
6544 * If this I/O maps to a single device, try to return the device and
6545 * physical block information on the stack instead of allocating an
6546 * I/O context structure.
6548 if (is_single_device_io(fs_info
, smap
, map
, num_alloc_stripes
, op
,
6549 io_geom
.mirror_num
)) {
6550 ret
= set_io_stripe(fs_info
, logical
, length
, smap
, map
, &io_geom
);
6552 *mirror_num_ret
= io_geom
.mirror_num
;
6557 bioc
= alloc_btrfs_io_context(fs_info
, logical
, num_alloc_stripes
);
6562 bioc
->map_type
= map
->type
;
6565 * For RAID56 full map, we need to make sure the stripes[] follows the
6566 * rule that data stripes are all ordered, then followed with P and Q
6569 * It's still mostly the same as other profiles, just with extra rotation.
6571 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&&
6572 (op
!= BTRFS_MAP_READ
|| io_geom
.mirror_num
> 1)) {
6574 * For RAID56 @stripe_nr is already the number of full stripes
6575 * before us, which is also the rotation value (needs to modulo
6576 * with num_stripes).
6578 * In this case, we just add @stripe_nr with @i, then do the
6579 * modulo, to reduce one modulo call.
6581 bioc
->full_stripe_logical
= map
->start
+
6582 btrfs_stripe_nr_to_offset(io_geom
.stripe_nr
*
6583 nr_data_stripes(map
));
6584 for (int i
= 0; i
< io_geom
.num_stripes
; i
++) {
6585 struct btrfs_io_stripe
*dst
= &bioc
->stripes
[i
];
6588 stripe_index
= (i
+ io_geom
.stripe_nr
) % io_geom
.num_stripes
;
6589 dst
->dev
= map
->stripes
[stripe_index
].dev
;
6591 map
->stripes
[stripe_index
].physical
+
6592 io_geom
.stripe_offset
+
6593 btrfs_stripe_nr_to_offset(io_geom
.stripe_nr
);
6597 * For all other non-RAID56 profiles, just copy the target
6598 * stripe into the bioc.
6600 for (int i
= 0; i
< io_geom
.num_stripes
; i
++) {
6601 ret
= set_io_stripe(fs_info
, logical
, length
,
6602 &bioc
->stripes
[i
], map
, &io_geom
);
6605 io_geom
.stripe_index
++;
6611 btrfs_put_bioc(bioc
);
6615 if (op
!= BTRFS_MAP_READ
)
6616 io_geom
.max_errors
= btrfs_chunk_max_errors(map
);
6618 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6619 op
!= BTRFS_MAP_READ
) {
6620 handle_ops_on_dev_replace(bioc
, dev_replace
, logical
, &io_geom
);
6624 bioc
->num_stripes
= io_geom
.num_stripes
;
6625 bioc
->max_errors
= io_geom
.max_errors
;
6626 bioc
->mirror_num
= io_geom
.mirror_num
;
6629 if (dev_replace_is_ongoing
) {
6630 lockdep_assert_held(&dev_replace
->rwsem
);
6631 /* Unlock and let waiting writers proceed */
6632 up_read(&dev_replace
->rwsem
);
6634 btrfs_free_chunk_map(map
);
6638 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args
*args
,
6639 const struct btrfs_fs_devices
*fs_devices
)
6641 if (args
->fsid
== NULL
)
6643 if (memcmp(fs_devices
->metadata_uuid
, args
->fsid
, BTRFS_FSID_SIZE
) == 0)
6648 static bool dev_args_match_device(const struct btrfs_dev_lookup_args
*args
,
6649 const struct btrfs_device
*device
)
6651 if (args
->missing
) {
6652 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
) &&
6658 if (device
->devid
!= args
->devid
)
6660 if (args
->uuid
&& memcmp(device
->uuid
, args
->uuid
, BTRFS_UUID_SIZE
) != 0)
6666 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6669 * If devid and uuid are both specified, the match must be exact, otherwise
6670 * only devid is used.
6672 struct btrfs_device
*btrfs_find_device(const struct btrfs_fs_devices
*fs_devices
,
6673 const struct btrfs_dev_lookup_args
*args
)
6675 struct btrfs_device
*device
;
6676 struct btrfs_fs_devices
*seed_devs
;
6678 if (dev_args_match_fs_devices(args
, fs_devices
)) {
6679 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
6680 if (dev_args_match_device(args
, device
))
6685 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
6686 if (!dev_args_match_fs_devices(args
, seed_devs
))
6688 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
6689 if (dev_args_match_device(args
, device
))
6697 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6698 u64 devid
, u8
*dev_uuid
)
6700 struct btrfs_device
*device
;
6701 unsigned int nofs_flag
;
6704 * We call this under the chunk_mutex, so we want to use NOFS for this
6705 * allocation, however we don't want to change btrfs_alloc_device() to
6706 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6710 nofs_flag
= memalloc_nofs_save();
6711 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
, NULL
);
6712 memalloc_nofs_restore(nofs_flag
);
6716 list_add(&device
->dev_list
, &fs_devices
->devices
);
6717 device
->fs_devices
= fs_devices
;
6718 fs_devices
->num_devices
++;
6720 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6721 fs_devices
->missing_devices
++;
6727 * Allocate new device struct, set up devid and UUID.
6729 * @fs_info: used only for generating a new devid, can be NULL if
6730 * devid is provided (i.e. @devid != NULL).
6731 * @devid: a pointer to devid for this device. If NULL a new devid
6733 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6735 * @path: a pointer to device path if available, NULL otherwise.
6737 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6738 * on error. Returned struct is not linked onto any lists and must be
6739 * destroyed with btrfs_free_device.
6741 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6742 const u64
*devid
, const u8
*uuid
,
6745 struct btrfs_device
*dev
;
6748 if (WARN_ON(!devid
&& !fs_info
))
6749 return ERR_PTR(-EINVAL
);
6751 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
6753 return ERR_PTR(-ENOMEM
);
6755 INIT_LIST_HEAD(&dev
->dev_list
);
6756 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
6757 INIT_LIST_HEAD(&dev
->post_commit_list
);
6759 atomic_set(&dev
->dev_stats_ccnt
, 0);
6760 btrfs_device_data_ordered_init(dev
);
6761 extent_io_tree_init(fs_info
, &dev
->alloc_state
, IO_TREE_DEVICE_ALLOC_STATE
);
6768 ret
= find_next_devid(fs_info
, &tmp
);
6770 btrfs_free_device(dev
);
6771 return ERR_PTR(ret
);
6777 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6779 generate_random_uuid(dev
->uuid
);
6782 struct rcu_string
*name
;
6784 name
= rcu_string_strdup(path
, GFP_KERNEL
);
6786 btrfs_free_device(dev
);
6787 return ERR_PTR(-ENOMEM
);
6789 rcu_assign_pointer(dev
->name
, name
);
6795 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6796 u64 devid
, u8
*uuid
, bool error
)
6799 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6802 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6806 u64
btrfs_calc_stripe_length(const struct btrfs_chunk_map
*map
)
6808 const int data_stripes
= calc_data_stripes(map
->type
, map
->num_stripes
);
6810 return div_u64(map
->chunk_len
, data_stripes
);
6813 #if BITS_PER_LONG == 32
6815 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6816 * can't be accessed on 32bit systems.
6818 * This function do mount time check to reject the fs if it already has
6819 * metadata chunk beyond that limit.
6821 static int check_32bit_meta_chunk(struct btrfs_fs_info
*fs_info
,
6822 u64 logical
, u64 length
, u64 type
)
6824 if (!(type
& BTRFS_BLOCK_GROUP_METADATA
))
6827 if (logical
+ length
< MAX_LFS_FILESIZE
)
6830 btrfs_err_32bit_limit(fs_info
);
6835 * This is to give early warning for any metadata chunk reaching
6836 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6837 * Although we can still access the metadata, it's not going to be possible
6838 * once the limit is reached.
6840 static void warn_32bit_meta_chunk(struct btrfs_fs_info
*fs_info
,
6841 u64 logical
, u64 length
, u64 type
)
6843 if (!(type
& BTRFS_BLOCK_GROUP_METADATA
))
6846 if (logical
+ length
< BTRFS_32BIT_EARLY_WARN_THRESHOLD
)
6849 btrfs_warn_32bit_limit(fs_info
);
6853 static struct btrfs_device
*handle_missing_device(struct btrfs_fs_info
*fs_info
,
6854 u64 devid
, u8
*uuid
)
6856 struct btrfs_device
*dev
;
6858 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6859 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6860 return ERR_PTR(-ENOENT
);
6863 dev
= add_missing_dev(fs_info
->fs_devices
, devid
, uuid
);
6865 btrfs_err(fs_info
, "failed to init missing device %llu: %ld",
6866 devid
, PTR_ERR(dev
));
6869 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6874 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6875 struct btrfs_chunk
*chunk
)
6877 BTRFS_DEV_LOOKUP_ARGS(args
);
6878 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6879 struct btrfs_chunk_map
*map
;
6884 u8 uuid
[BTRFS_UUID_SIZE
];
6890 logical
= key
->offset
;
6891 length
= btrfs_chunk_length(leaf
, chunk
);
6892 type
= btrfs_chunk_type(leaf
, chunk
);
6893 index
= btrfs_bg_flags_to_raid_index(type
);
6894 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6896 #if BITS_PER_LONG == 32
6897 ret
= check_32bit_meta_chunk(fs_info
, logical
, length
, type
);
6900 warn_32bit_meta_chunk(fs_info
, logical
, length
, type
);
6904 * Only need to verify chunk item if we're reading from sys chunk array,
6905 * as chunk item in tree block is already verified by tree-checker.
6907 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
6908 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
6913 map
= btrfs_find_chunk_map(fs_info
, logical
, 1);
6915 /* already mapped? */
6916 if (map
&& map
->start
<= logical
&& map
->start
+ map
->chunk_len
> logical
) {
6917 btrfs_free_chunk_map(map
);
6920 btrfs_free_chunk_map(map
);
6923 map
= btrfs_alloc_chunk_map(num_stripes
, GFP_NOFS
);
6927 map
->start
= logical
;
6928 map
->chunk_len
= length
;
6929 map
->num_stripes
= num_stripes
;
6930 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
6931 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
6934 * We can't use the sub_stripes value, as for profiles other than
6935 * RAID10, they may have 0 as sub_stripes for filesystems created by
6936 * older mkfs (<v5.4).
6937 * In that case, it can cause divide-by-zero errors later.
6938 * Since currently sub_stripes is fixed for each profile, let's
6939 * use the trusted value instead.
6941 map
->sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
6942 map
->verified_stripes
= 0;
6943 map
->stripe_size
= btrfs_calc_stripe_length(map
);
6944 for (i
= 0; i
< num_stripes
; i
++) {
6945 map
->stripes
[i
].physical
=
6946 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
6947 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
6949 read_extent_buffer(leaf
, uuid
, (unsigned long)
6950 btrfs_stripe_dev_uuid_nr(chunk
, i
),
6953 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
6954 if (!map
->stripes
[i
].dev
) {
6955 map
->stripes
[i
].dev
= handle_missing_device(fs_info
,
6957 if (IS_ERR(map
->stripes
[i
].dev
)) {
6958 ret
= PTR_ERR(map
->stripes
[i
].dev
);
6959 btrfs_free_chunk_map(map
);
6964 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
6965 &(map
->stripes
[i
].dev
->dev_state
));
6968 ret
= btrfs_add_chunk_map(fs_info
, map
);
6971 "failed to add chunk map, start=%llu len=%llu: %d",
6972 map
->start
, map
->chunk_len
, ret
);
6978 static void fill_device_from_item(struct extent_buffer
*leaf
,
6979 struct btrfs_dev_item
*dev_item
,
6980 struct btrfs_device
*device
)
6984 device
->devid
= btrfs_device_id(leaf
, dev_item
);
6985 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
6986 device
->total_bytes
= device
->disk_total_bytes
;
6987 device
->commit_total_bytes
= device
->disk_total_bytes
;
6988 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
6989 device
->commit_bytes_used
= device
->bytes_used
;
6990 device
->type
= btrfs_device_type(leaf
, dev_item
);
6991 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
6992 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
6993 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
6994 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
6995 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
6997 ptr
= btrfs_device_uuid(dev_item
);
6998 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
7001 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
7004 struct btrfs_fs_devices
*fs_devices
;
7007 lockdep_assert_held(&uuid_mutex
);
7010 /* This will match only for multi-device seed fs */
7011 list_for_each_entry(fs_devices
, &fs_info
->fs_devices
->seed_list
, seed_list
)
7012 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
7016 fs_devices
= find_fsid(fsid
, NULL
);
7018 if (!btrfs_test_opt(fs_info
, DEGRADED
))
7019 return ERR_PTR(-ENOENT
);
7021 fs_devices
= alloc_fs_devices(fsid
);
7022 if (IS_ERR(fs_devices
))
7025 fs_devices
->seeding
= true;
7026 fs_devices
->opened
= 1;
7031 * Upon first call for a seed fs fsid, just create a private copy of the
7032 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7034 fs_devices
= clone_fs_devices(fs_devices
);
7035 if (IS_ERR(fs_devices
))
7038 ret
= open_fs_devices(fs_devices
, BLK_OPEN_READ
, fs_info
->bdev_holder
);
7040 free_fs_devices(fs_devices
);
7041 return ERR_PTR(ret
);
7044 if (!fs_devices
->seeding
) {
7045 close_fs_devices(fs_devices
);
7046 free_fs_devices(fs_devices
);
7047 return ERR_PTR(-EINVAL
);
7050 list_add(&fs_devices
->seed_list
, &fs_info
->fs_devices
->seed_list
);
7055 static int read_one_dev(struct extent_buffer
*leaf
,
7056 struct btrfs_dev_item
*dev_item
)
7058 BTRFS_DEV_LOOKUP_ARGS(args
);
7059 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
7060 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7061 struct btrfs_device
*device
;
7064 u8 fs_uuid
[BTRFS_FSID_SIZE
];
7065 u8 dev_uuid
[BTRFS_UUID_SIZE
];
7067 devid
= btrfs_device_id(leaf
, dev_item
);
7069 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
7071 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
7073 args
.uuid
= dev_uuid
;
7074 args
.fsid
= fs_uuid
;
7076 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
7077 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
7078 if (IS_ERR(fs_devices
))
7079 return PTR_ERR(fs_devices
);
7082 device
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7084 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7085 btrfs_report_missing_device(fs_info
, devid
,
7090 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
7091 if (IS_ERR(device
)) {
7093 "failed to add missing dev %llu: %ld",
7094 devid
, PTR_ERR(device
));
7095 return PTR_ERR(device
);
7097 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
7099 if (!device
->bdev
) {
7100 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7101 btrfs_report_missing_device(fs_info
,
7102 devid
, dev_uuid
, true);
7105 btrfs_report_missing_device(fs_info
, devid
,
7109 if (!device
->bdev
&&
7110 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
7112 * this happens when a device that was properly setup
7113 * in the device info lists suddenly goes bad.
7114 * device->bdev is NULL, and so we have to set
7115 * device->missing to one here
7117 device
->fs_devices
->missing_devices
++;
7118 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
7121 /* Move the device to its own fs_devices */
7122 if (device
->fs_devices
!= fs_devices
) {
7123 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
7124 &device
->dev_state
));
7126 list_move(&device
->dev_list
, &fs_devices
->devices
);
7127 device
->fs_devices
->num_devices
--;
7128 fs_devices
->num_devices
++;
7130 device
->fs_devices
->missing_devices
--;
7131 fs_devices
->missing_devices
++;
7133 device
->fs_devices
= fs_devices
;
7137 if (device
->fs_devices
!= fs_info
->fs_devices
) {
7138 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
7139 if (device
->generation
!=
7140 btrfs_device_generation(leaf
, dev_item
))
7144 fill_device_from_item(leaf
, dev_item
, device
);
7146 u64 max_total_bytes
= bdev_nr_bytes(device
->bdev
);
7148 if (device
->total_bytes
> max_total_bytes
) {
7150 "device total_bytes should be at most %llu but found %llu",
7151 max_total_bytes
, device
->total_bytes
);
7155 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
7156 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
7157 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
7158 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
7159 atomic64_add(device
->total_bytes
- device
->bytes_used
,
7160 &fs_info
->free_chunk_space
);
7166 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
7168 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
7169 struct extent_buffer
*sb
;
7170 struct btrfs_disk_key
*disk_key
;
7171 struct btrfs_chunk
*chunk
;
7173 unsigned long sb_array_offset
;
7180 struct btrfs_key key
;
7182 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
7185 * We allocated a dummy extent, just to use extent buffer accessors.
7186 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7187 * that's fine, we will not go beyond system chunk array anyway.
7189 sb
= alloc_dummy_extent_buffer(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
7192 set_extent_buffer_uptodate(sb
);
7194 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
7195 array_size
= btrfs_super_sys_array_size(super_copy
);
7197 array_ptr
= super_copy
->sys_chunk_array
;
7198 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
7201 while (cur_offset
< array_size
) {
7202 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
7203 len
= sizeof(*disk_key
);
7204 if (cur_offset
+ len
> array_size
)
7205 goto out_short_read
;
7207 btrfs_disk_key_to_cpu(&key
, disk_key
);
7210 sb_array_offset
+= len
;
7213 if (key
.type
!= BTRFS_CHUNK_ITEM_KEY
) {
7215 "unexpected item type %u in sys_array at offset %u",
7216 (u32
)key
.type
, cur_offset
);
7221 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
7223 * At least one btrfs_chunk with one stripe must be present,
7224 * exact stripe count check comes afterwards
7226 len
= btrfs_chunk_item_size(1);
7227 if (cur_offset
+ len
> array_size
)
7228 goto out_short_read
;
7230 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
7233 "invalid number of stripes %u in sys_array at offset %u",
7234 num_stripes
, cur_offset
);
7239 type
= btrfs_chunk_type(sb
, chunk
);
7240 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
7242 "invalid chunk type %llu in sys_array at offset %u",
7248 len
= btrfs_chunk_item_size(num_stripes
);
7249 if (cur_offset
+ len
> array_size
)
7250 goto out_short_read
;
7252 ret
= read_one_chunk(&key
, sb
, chunk
);
7257 sb_array_offset
+= len
;
7260 clear_extent_buffer_uptodate(sb
);
7261 free_extent_buffer_stale(sb
);
7265 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
7267 clear_extent_buffer_uptodate(sb
);
7268 free_extent_buffer_stale(sb
);
7273 * Check if all chunks in the fs are OK for read-write degraded mount
7275 * If the @failing_dev is specified, it's accounted as missing.
7277 * Return true if all chunks meet the minimal RW mount requirements.
7278 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7280 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
7281 struct btrfs_device
*failing_dev
)
7283 struct btrfs_chunk_map
*map
;
7287 map
= btrfs_find_chunk_map(fs_info
, 0, U64_MAX
);
7288 /* No chunk at all? Return false anyway */
7299 btrfs_get_num_tolerated_disk_barrier_failures(
7301 for (i
= 0; i
< map
->num_stripes
; i
++) {
7302 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
7304 if (!dev
|| !dev
->bdev
||
7305 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
7306 dev
->last_flush_error
)
7308 else if (failing_dev
&& failing_dev
== dev
)
7311 if (missing
> max_tolerated
) {
7314 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7315 map
->start
, missing
, max_tolerated
);
7316 btrfs_free_chunk_map(map
);
7320 next_start
= map
->start
+ map
->chunk_len
;
7321 btrfs_free_chunk_map(map
);
7323 map
= btrfs_find_chunk_map(fs_info
, next_start
, U64_MAX
- next_start
);
7329 static void readahead_tree_node_children(struct extent_buffer
*node
)
7332 const int nr_items
= btrfs_header_nritems(node
);
7334 for (i
= 0; i
< nr_items
; i
++)
7335 btrfs_readahead_node_child(node
, i
);
7338 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
7340 struct btrfs_root
*root
= fs_info
->chunk_root
;
7341 struct btrfs_path
*path
;
7342 struct extent_buffer
*leaf
;
7343 struct btrfs_key key
;
7344 struct btrfs_key found_key
;
7349 u64 last_ra_node
= 0;
7351 path
= btrfs_alloc_path();
7356 * uuid_mutex is needed only if we are mounting a sprout FS
7357 * otherwise we don't need it.
7359 mutex_lock(&uuid_mutex
);
7362 * It is possible for mount and umount to race in such a way that
7363 * we execute this code path, but open_fs_devices failed to clear
7364 * total_rw_bytes. We certainly want it cleared before reading the
7365 * device items, so clear it here.
7367 fs_info
->fs_devices
->total_rw_bytes
= 0;
7370 * Lockdep complains about possible circular locking dependency between
7371 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7372 * used for freeze procection of a fs (struct super_block.s_writers),
7373 * which we take when starting a transaction, and extent buffers of the
7374 * chunk tree if we call read_one_dev() while holding a lock on an
7375 * extent buffer of the chunk tree. Since we are mounting the filesystem
7376 * and at this point there can't be any concurrent task modifying the
7377 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7379 ASSERT(!test_bit(BTRFS_FS_OPEN
, &fs_info
->flags
));
7380 path
->skip_locking
= 1;
7383 * Read all device items, and then all the chunk items. All
7384 * device items are found before any chunk item (their object id
7385 * is smaller than the lowest possible object id for a chunk
7386 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7388 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
7391 btrfs_for_each_slot(root
, &key
, &found_key
, path
, iter_ret
) {
7392 struct extent_buffer
*node
= path
->nodes
[1];
7394 leaf
= path
->nodes
[0];
7395 slot
= path
->slots
[0];
7398 if (last_ra_node
!= node
->start
) {
7399 readahead_tree_node_children(node
);
7400 last_ra_node
= node
->start
;
7403 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
7404 struct btrfs_dev_item
*dev_item
;
7405 dev_item
= btrfs_item_ptr(leaf
, slot
,
7406 struct btrfs_dev_item
);
7407 ret
= read_one_dev(leaf
, dev_item
);
7411 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7412 struct btrfs_chunk
*chunk
;
7415 * We are only called at mount time, so no need to take
7416 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7417 * we always lock first fs_info->chunk_mutex before
7418 * acquiring any locks on the chunk tree. This is a
7419 * requirement for chunk allocation, see the comment on
7420 * top of btrfs_chunk_alloc() for details.
7422 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7423 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7428 /* Catch error found during iteration */
7435 * After loading chunk tree, we've got all device information,
7436 * do another round of validation checks.
7438 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7440 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7441 btrfs_super_num_devices(fs_info
->super_copy
),
7443 fs_info
->fs_devices
->total_devices
= total_dev
;
7444 btrfs_set_super_num_devices(fs_info
->super_copy
, total_dev
);
7446 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7447 fs_info
->fs_devices
->total_rw_bytes
) {
7449 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7450 btrfs_super_total_bytes(fs_info
->super_copy
),
7451 fs_info
->fs_devices
->total_rw_bytes
);
7457 mutex_unlock(&uuid_mutex
);
7459 btrfs_free_path(path
);
7463 int btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7465 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
, *seed_devs
;
7466 struct btrfs_device
*device
;
7469 fs_devices
->fs_info
= fs_info
;
7471 mutex_lock(&fs_devices
->device_list_mutex
);
7472 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7473 device
->fs_info
= fs_info
;
7475 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
7476 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
7477 device
->fs_info
= fs_info
;
7478 ret
= btrfs_get_dev_zone_info(device
, false);
7483 seed_devs
->fs_info
= fs_info
;
7485 mutex_unlock(&fs_devices
->device_list_mutex
);
7490 static u64
btrfs_dev_stats_value(const struct extent_buffer
*eb
,
7491 const struct btrfs_dev_stats_item
*ptr
,
7496 read_extent_buffer(eb
, &val
,
7497 offsetof(struct btrfs_dev_stats_item
, values
) +
7498 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7503 static void btrfs_set_dev_stats_value(struct extent_buffer
*eb
,
7504 struct btrfs_dev_stats_item
*ptr
,
7507 write_extent_buffer(eb
, &val
,
7508 offsetof(struct btrfs_dev_stats_item
, values
) +
7509 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7513 static int btrfs_device_init_dev_stats(struct btrfs_device
*device
,
7514 struct btrfs_path
*path
)
7516 struct btrfs_dev_stats_item
*ptr
;
7517 struct extent_buffer
*eb
;
7518 struct btrfs_key key
;
7522 if (!device
->fs_info
->dev_root
)
7525 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7526 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7527 key
.offset
= device
->devid
;
7528 ret
= btrfs_search_slot(NULL
, device
->fs_info
->dev_root
, &key
, path
, 0, 0);
7530 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7531 btrfs_dev_stat_set(device
, i
, 0);
7532 device
->dev_stats_valid
= 1;
7533 btrfs_release_path(path
);
7534 return ret
< 0 ? ret
: 0;
7536 slot
= path
->slots
[0];
7537 eb
= path
->nodes
[0];
7538 item_size
= btrfs_item_size(eb
, slot
);
7540 ptr
= btrfs_item_ptr(eb
, slot
, struct btrfs_dev_stats_item
);
7542 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7543 if (item_size
>= (1 + i
) * sizeof(__le64
))
7544 btrfs_dev_stat_set(device
, i
,
7545 btrfs_dev_stats_value(eb
, ptr
, i
));
7547 btrfs_dev_stat_set(device
, i
, 0);
7550 device
->dev_stats_valid
= 1;
7551 btrfs_dev_stat_print_on_load(device
);
7552 btrfs_release_path(path
);
7557 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7559 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
, *seed_devs
;
7560 struct btrfs_device
*device
;
7561 struct btrfs_path
*path
= NULL
;
7564 path
= btrfs_alloc_path();
7568 mutex_lock(&fs_devices
->device_list_mutex
);
7569 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7570 ret
= btrfs_device_init_dev_stats(device
, path
);
7574 list_for_each_entry(seed_devs
, &fs_devices
->seed_list
, seed_list
) {
7575 list_for_each_entry(device
, &seed_devs
->devices
, dev_list
) {
7576 ret
= btrfs_device_init_dev_stats(device
, path
);
7582 mutex_unlock(&fs_devices
->device_list_mutex
);
7584 btrfs_free_path(path
);
7588 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7589 struct btrfs_device
*device
)
7591 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7592 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7593 struct btrfs_path
*path
;
7594 struct btrfs_key key
;
7595 struct extent_buffer
*eb
;
7596 struct btrfs_dev_stats_item
*ptr
;
7600 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7601 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7602 key
.offset
= device
->devid
;
7604 path
= btrfs_alloc_path();
7607 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7609 btrfs_warn_in_rcu(fs_info
,
7610 "error %d while searching for dev_stats item for device %s",
7611 ret
, btrfs_dev_name(device
));
7616 btrfs_item_size(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7617 /* need to delete old one and insert a new one */
7618 ret
= btrfs_del_item(trans
, dev_root
, path
);
7620 btrfs_warn_in_rcu(fs_info
,
7621 "delete too small dev_stats item for device %s failed %d",
7622 btrfs_dev_name(device
), ret
);
7629 /* need to insert a new item */
7630 btrfs_release_path(path
);
7631 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7632 &key
, sizeof(*ptr
));
7634 btrfs_warn_in_rcu(fs_info
,
7635 "insert dev_stats item for device %s failed %d",
7636 btrfs_dev_name(device
), ret
);
7641 eb
= path
->nodes
[0];
7642 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7643 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7644 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7645 btrfs_dev_stat_read(device
, i
));
7646 btrfs_mark_buffer_dirty(trans
, eb
);
7649 btrfs_free_path(path
);
7654 * called from commit_transaction. Writes all changed device stats to disk.
7656 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7658 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7659 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7660 struct btrfs_device
*device
;
7664 mutex_lock(&fs_devices
->device_list_mutex
);
7665 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7666 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7667 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7672 * There is a LOAD-LOAD control dependency between the value of
7673 * dev_stats_ccnt and updating the on-disk values which requires
7674 * reading the in-memory counters. Such control dependencies
7675 * require explicit read memory barriers.
7677 * This memory barriers pairs with smp_mb__before_atomic in
7678 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7679 * barrier implied by atomic_xchg in
7680 * btrfs_dev_stats_read_and_reset
7684 ret
= update_dev_stat_item(trans
, device
);
7686 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7688 mutex_unlock(&fs_devices
->device_list_mutex
);
7693 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7695 btrfs_dev_stat_inc(dev
, index
);
7697 if (!dev
->dev_stats_valid
)
7699 btrfs_err_rl_in_rcu(dev
->fs_info
,
7700 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7701 btrfs_dev_name(dev
),
7702 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7703 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7704 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7705 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7706 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7709 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7713 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7714 if (btrfs_dev_stat_read(dev
, i
) != 0)
7716 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7717 return; /* all values == 0, suppress message */
7719 btrfs_info_in_rcu(dev
->fs_info
,
7720 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7721 btrfs_dev_name(dev
),
7722 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7723 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7724 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7725 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7726 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7729 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7730 struct btrfs_ioctl_get_dev_stats
*stats
)
7732 BTRFS_DEV_LOOKUP_ARGS(args
);
7733 struct btrfs_device
*dev
;
7734 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7737 mutex_lock(&fs_devices
->device_list_mutex
);
7738 args
.devid
= stats
->devid
;
7739 dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7740 mutex_unlock(&fs_devices
->device_list_mutex
);
7743 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7745 } else if (!dev
->dev_stats_valid
) {
7746 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7748 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7749 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7750 if (stats
->nr_items
> i
)
7752 btrfs_dev_stat_read_and_reset(dev
, i
);
7754 btrfs_dev_stat_set(dev
, i
, 0);
7756 btrfs_info(fs_info
, "device stats zeroed by %s (%d)",
7757 current
->comm
, task_pid_nr(current
));
7759 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7760 if (stats
->nr_items
> i
)
7761 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7763 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7764 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7769 * Update the size and bytes used for each device where it changed. This is
7770 * delayed since we would otherwise get errors while writing out the
7773 * Must be invoked during transaction commit.
7775 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7777 struct btrfs_device
*curr
, *next
;
7779 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7781 if (list_empty(&trans
->dev_update_list
))
7785 * We don't need the device_list_mutex here. This list is owned by the
7786 * transaction and the transaction must complete before the device is
7789 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7790 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7792 list_del_init(&curr
->post_commit_list
);
7793 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7794 curr
->commit_bytes_used
= curr
->bytes_used
;
7796 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7800 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7802 int btrfs_bg_type_to_factor(u64 flags
)
7804 const int index
= btrfs_bg_flags_to_raid_index(flags
);
7806 return btrfs_raid_array
[index
].ncopies
;
7811 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7812 u64 chunk_offset
, u64 devid
,
7813 u64 physical_offset
, u64 physical_len
)
7815 struct btrfs_dev_lookup_args args
= { .devid
= devid
};
7816 struct btrfs_chunk_map
*map
;
7817 struct btrfs_device
*dev
;
7823 map
= btrfs_find_chunk_map(fs_info
, chunk_offset
, 1);
7826 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7827 physical_offset
, devid
);
7832 stripe_len
= btrfs_calc_stripe_length(map
);
7833 if (physical_len
!= stripe_len
) {
7835 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7836 physical_offset
, devid
, map
->start
, physical_len
,
7843 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7844 * space. Although kernel can handle it without problem, better to warn
7847 if (physical_offset
< BTRFS_DEVICE_RANGE_RESERVED
)
7849 "devid %llu physical %llu len %llu inside the reserved space",
7850 devid
, physical_offset
, physical_len
);
7852 for (i
= 0; i
< map
->num_stripes
; i
++) {
7853 if (map
->stripes
[i
].dev
->devid
== devid
&&
7854 map
->stripes
[i
].physical
== physical_offset
) {
7856 if (map
->verified_stripes
>= map
->num_stripes
) {
7858 "too many dev extents for chunk %llu found",
7863 map
->verified_stripes
++;
7869 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7870 physical_offset
, devid
);
7874 /* Make sure no dev extent is beyond device boundary */
7875 dev
= btrfs_find_device(fs_info
->fs_devices
, &args
);
7877 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7882 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7884 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7885 devid
, physical_offset
, physical_len
,
7886 dev
->disk_total_bytes
);
7891 if (dev
->zone_info
) {
7892 u64 zone_size
= dev
->zone_info
->zone_size
;
7894 if (!IS_ALIGNED(physical_offset
, zone_size
) ||
7895 !IS_ALIGNED(physical_len
, zone_size
)) {
7897 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7898 devid
, physical_offset
, physical_len
);
7905 btrfs_free_chunk_map(map
);
7909 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
7911 struct rb_node
*node
;
7914 read_lock(&fs_info
->mapping_tree_lock
);
7915 for (node
= rb_first_cached(&fs_info
->mapping_tree
); node
; node
= rb_next(node
)) {
7916 struct btrfs_chunk_map
*map
;
7918 map
= rb_entry(node
, struct btrfs_chunk_map
, rb_node
);
7919 if (map
->num_stripes
!= map
->verified_stripes
) {
7921 "chunk %llu has missing dev extent, have %d expect %d",
7922 map
->start
, map
->verified_stripes
, map
->num_stripes
);
7928 read_unlock(&fs_info
->mapping_tree_lock
);
7933 * Ensure that all dev extents are mapped to correct chunk, otherwise
7934 * later chunk allocation/free would cause unexpected behavior.
7936 * NOTE: This will iterate through the whole device tree, which should be of
7937 * the same size level as the chunk tree. This slightly increases mount time.
7939 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
7941 struct btrfs_path
*path
;
7942 struct btrfs_root
*root
= fs_info
->dev_root
;
7943 struct btrfs_key key
;
7945 u64 prev_dev_ext_end
= 0;
7949 * We don't have a dev_root because we mounted with ignorebadroots and
7950 * failed to load the root, so we want to skip the verification in this
7953 * However if the dev root is fine, but the tree itself is corrupted
7954 * we'd still fail to mount. This verification is only to make sure
7955 * writes can happen safely, so instead just bypass this check
7956 * completely in the case of IGNOREBADROOTS.
7958 if (btrfs_test_opt(fs_info
, IGNOREBADROOTS
))
7962 key
.type
= BTRFS_DEV_EXTENT_KEY
;
7965 path
= btrfs_alloc_path();
7969 path
->reada
= READA_FORWARD
;
7970 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7974 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
7975 ret
= btrfs_next_leaf(root
, path
);
7978 /* No dev extents at all? Not good */
7985 struct extent_buffer
*leaf
= path
->nodes
[0];
7986 struct btrfs_dev_extent
*dext
;
7987 int slot
= path
->slots
[0];
7989 u64 physical_offset
;
7993 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7994 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
7996 devid
= key
.objectid
;
7997 physical_offset
= key
.offset
;
7999 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
8000 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
8001 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
8003 /* Check if this dev extent overlaps with the previous one */
8004 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
8006 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8007 devid
, physical_offset
, prev_dev_ext_end
);
8012 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
8013 physical_offset
, physical_len
);
8017 prev_dev_ext_end
= physical_offset
+ physical_len
;
8019 ret
= btrfs_next_item(root
, path
);
8028 /* Ensure all chunks have corresponding dev extents */
8029 ret
= verify_chunk_dev_extent_mapping(fs_info
);
8031 btrfs_free_path(path
);
8036 * Check whether the given block group or device is pinned by any inode being
8037 * used as a swapfile.
8039 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
8041 struct btrfs_swapfile_pin
*sp
;
8042 struct rb_node
*node
;
8044 spin_lock(&fs_info
->swapfile_pins_lock
);
8045 node
= fs_info
->swapfile_pins
.rb_node
;
8047 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
8049 node
= node
->rb_left
;
8050 else if (ptr
> sp
->ptr
)
8051 node
= node
->rb_right
;
8055 spin_unlock(&fs_info
->swapfile_pins_lock
);
8056 return node
!= NULL
;
8059 static int relocating_repair_kthread(void *data
)
8061 struct btrfs_block_group
*cache
= data
;
8062 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
8066 target
= cache
->start
;
8067 btrfs_put_block_group(cache
);
8069 sb_start_write(fs_info
->sb
);
8070 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_BALANCE
)) {
8072 "zoned: skip relocating block group %llu to repair: EBUSY",
8074 sb_end_write(fs_info
->sb
);
8078 mutex_lock(&fs_info
->reclaim_bgs_lock
);
8080 /* Ensure block group still exists */
8081 cache
= btrfs_lookup_block_group(fs_info
, target
);
8085 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR
, &cache
->runtime_flags
))
8088 ret
= btrfs_may_alloc_data_chunk(fs_info
, target
);
8093 "zoned: relocating block group %llu to repair IO failure",
8095 ret
= btrfs_relocate_chunk(fs_info
, target
);
8099 btrfs_put_block_group(cache
);
8100 mutex_unlock(&fs_info
->reclaim_bgs_lock
);
8101 btrfs_exclop_finish(fs_info
);
8102 sb_end_write(fs_info
->sb
);
8107 bool btrfs_repair_one_zone(struct btrfs_fs_info
*fs_info
, u64 logical
)
8109 struct btrfs_block_group
*cache
;
8111 if (!btrfs_is_zoned(fs_info
))
8114 /* Do not attempt to repair in degraded state */
8115 if (btrfs_test_opt(fs_info
, DEGRADED
))
8118 cache
= btrfs_lookup_block_group(fs_info
, logical
);
8122 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR
, &cache
->runtime_flags
)) {
8123 btrfs_put_block_group(cache
);
8127 kthread_run(relocating_repair_kthread
, cache
,
8128 "btrfs-relocating-repair");
8133 static void map_raid56_repair_block(struct btrfs_io_context
*bioc
,
8134 struct btrfs_io_stripe
*smap
,
8137 int data_stripes
= nr_bioc_data_stripes(bioc
);
8140 for (i
= 0; i
< data_stripes
; i
++) {
8141 u64 stripe_start
= bioc
->full_stripe_logical
+
8142 btrfs_stripe_nr_to_offset(i
);
8144 if (logical
>= stripe_start
&&
8145 logical
< stripe_start
+ BTRFS_STRIPE_LEN
)
8148 ASSERT(i
< data_stripes
);
8149 smap
->dev
= bioc
->stripes
[i
].dev
;
8150 smap
->physical
= bioc
->stripes
[i
].physical
+
8151 ((logical
- bioc
->full_stripe_logical
) &
8152 BTRFS_STRIPE_LEN_MASK
);
8156 * Map a repair write into a single device.
8158 * A repair write is triggered by read time repair or scrub, which would only
8159 * update the contents of a single device.
8160 * Not update any other mirrors nor go through RMW path.
8162 * Callers should ensure:
8164 * - Call btrfs_bio_counter_inc_blocked() first
8165 * - The range does not cross stripe boundary
8166 * - Has a valid @mirror_num passed in.
8168 int btrfs_map_repair_block(struct btrfs_fs_info
*fs_info
,
8169 struct btrfs_io_stripe
*smap
, u64 logical
,
8170 u32 length
, int mirror_num
)
8172 struct btrfs_io_context
*bioc
= NULL
;
8173 u64 map_length
= length
;
8174 int mirror_ret
= mirror_num
;
8177 ASSERT(mirror_num
> 0);
8179 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_WRITE
, logical
, &map_length
,
8180 &bioc
, smap
, &mirror_ret
);
8184 /* The map range should not cross stripe boundary. */
8185 ASSERT(map_length
>= length
);
8187 /* Already mapped to single stripe. */
8191 /* Map the RAID56 multi-stripe writes to a single one. */
8192 if (bioc
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
8193 map_raid56_repair_block(bioc
, smap
, logical
);
8197 ASSERT(mirror_num
<= bioc
->num_stripes
);
8198 smap
->dev
= bioc
->stripes
[mirror_num
- 1].dev
;
8199 smap
->physical
= bioc
->stripes
[mirror_num
- 1].physical
;
8201 btrfs_put_bioc(bioc
);