ARM: iop: don't use using 64-bit DMA masks
[linux/fpc-iii.git] / fs / btrfs / volumes.c
blobc063ac57c30ee369718f0ef6954a3888b93b5d0e
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/iocontext.h>
24 #include <linux/capability.h>
25 #include <linux/ratelimit.h>
26 #include <linux/kthread.h>
27 #include <linux/raid/pq.h>
28 #include <linux/semaphore.h>
29 #include <linux/uuid.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 [BTRFS_RAID_RAID10] = {
47 .sub_stripes = 2,
48 .dev_stripes = 1,
49 .devs_max = 0, /* 0 == as many as possible */
50 .devs_min = 4,
51 .tolerated_failures = 1,
52 .devs_increment = 2,
53 .ncopies = 2,
55 [BTRFS_RAID_RAID1] = {
56 .sub_stripes = 1,
57 .dev_stripes = 1,
58 .devs_max = 2,
59 .devs_min = 2,
60 .tolerated_failures = 1,
61 .devs_increment = 2,
62 .ncopies = 2,
64 [BTRFS_RAID_DUP] = {
65 .sub_stripes = 1,
66 .dev_stripes = 2,
67 .devs_max = 1,
68 .devs_min = 1,
69 .tolerated_failures = 0,
70 .devs_increment = 1,
71 .ncopies = 2,
73 [BTRFS_RAID_RAID0] = {
74 .sub_stripes = 1,
75 .dev_stripes = 1,
76 .devs_max = 0,
77 .devs_min = 2,
78 .tolerated_failures = 0,
79 .devs_increment = 1,
80 .ncopies = 1,
82 [BTRFS_RAID_SINGLE] = {
83 .sub_stripes = 1,
84 .dev_stripes = 1,
85 .devs_max = 1,
86 .devs_min = 1,
87 .tolerated_failures = 0,
88 .devs_increment = 1,
89 .ncopies = 1,
91 [BTRFS_RAID_RAID5] = {
92 .sub_stripes = 1,
93 .dev_stripes = 1,
94 .devs_max = 0,
95 .devs_min = 2,
96 .tolerated_failures = 1,
97 .devs_increment = 1,
98 .ncopies = 2,
100 [BTRFS_RAID_RAID6] = {
101 .sub_stripes = 1,
102 .dev_stripes = 1,
103 .devs_max = 0,
104 .devs_min = 3,
105 .tolerated_failures = 2,
106 .devs_increment = 1,
107 .ncopies = 3,
111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
114 [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
115 [BTRFS_RAID_RAID0] = BTRFS_BLOCK_GROUP_RAID0,
116 [BTRFS_RAID_SINGLE] = 0,
117 [BTRFS_RAID_RAID5] = BTRFS_BLOCK_GROUP_RAID5,
118 [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6,
122 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
123 * condition is not met. Zero means there's no corresponding
124 * BTRFS_ERROR_DEV_*_NOT_MET value.
126 const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
127 [BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
128 [BTRFS_RAID_RAID1] = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
129 [BTRFS_RAID_DUP] = 0,
130 [BTRFS_RAID_RAID0] = 0,
131 [BTRFS_RAID_SINGLE] = 0,
132 [BTRFS_RAID_RAID5] = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
133 [BTRFS_RAID_RAID6] = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
136 static int init_first_rw_device(struct btrfs_trans_handle *trans,
137 struct btrfs_root *root,
138 struct btrfs_device *device);
139 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
144 DEFINE_MUTEX(uuid_mutex);
145 static LIST_HEAD(fs_uuids);
146 struct list_head *btrfs_get_fs_uuids(void)
148 return &fs_uuids;
151 static struct btrfs_fs_devices *__alloc_fs_devices(void)
153 struct btrfs_fs_devices *fs_devs;
155 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
156 if (!fs_devs)
157 return ERR_PTR(-ENOMEM);
159 mutex_init(&fs_devs->device_list_mutex);
161 INIT_LIST_HEAD(&fs_devs->devices);
162 INIT_LIST_HEAD(&fs_devs->resized_devices);
163 INIT_LIST_HEAD(&fs_devs->alloc_list);
164 INIT_LIST_HEAD(&fs_devs->list);
166 return fs_devs;
170 * alloc_fs_devices - allocate struct btrfs_fs_devices
171 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is
172 * generated.
174 * Return: a pointer to a new &struct btrfs_fs_devices on success;
175 * ERR_PTR() on error. Returned struct is not linked onto any lists and
176 * can be destroyed with kfree() right away.
178 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
180 struct btrfs_fs_devices *fs_devs;
182 fs_devs = __alloc_fs_devices();
183 if (IS_ERR(fs_devs))
184 return fs_devs;
186 if (fsid)
187 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
188 else
189 generate_random_uuid(fs_devs->fsid);
191 return fs_devs;
194 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
196 struct btrfs_device *device;
197 WARN_ON(fs_devices->opened);
198 while (!list_empty(&fs_devices->devices)) {
199 device = list_entry(fs_devices->devices.next,
200 struct btrfs_device, dev_list);
201 list_del(&device->dev_list);
202 rcu_string_free(device->name);
203 kfree(device);
205 kfree(fs_devices);
208 static void btrfs_kobject_uevent(struct block_device *bdev,
209 enum kobject_action action)
211 int ret;
213 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
214 if (ret)
215 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
216 action,
217 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
218 &disk_to_dev(bdev->bd_disk)->kobj);
221 void btrfs_cleanup_fs_uuids(void)
223 struct btrfs_fs_devices *fs_devices;
225 while (!list_empty(&fs_uuids)) {
226 fs_devices = list_entry(fs_uuids.next,
227 struct btrfs_fs_devices, list);
228 list_del(&fs_devices->list);
229 free_fs_devices(fs_devices);
233 static struct btrfs_device *__alloc_device(void)
235 struct btrfs_device *dev;
237 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
238 if (!dev)
239 return ERR_PTR(-ENOMEM);
241 INIT_LIST_HEAD(&dev->dev_list);
242 INIT_LIST_HEAD(&dev->dev_alloc_list);
243 INIT_LIST_HEAD(&dev->resized_list);
245 spin_lock_init(&dev->io_lock);
247 spin_lock_init(&dev->reada_lock);
248 atomic_set(&dev->reada_in_flight, 0);
249 atomic_set(&dev->dev_stats_ccnt, 0);
250 btrfs_device_data_ordered_init(dev);
251 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
252 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
254 return dev;
257 static noinline struct btrfs_device *__find_device(struct list_head *head,
258 u64 devid, u8 *uuid)
260 struct btrfs_device *dev;
262 list_for_each_entry(dev, head, dev_list) {
263 if (dev->devid == devid &&
264 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
265 return dev;
268 return NULL;
271 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
273 struct btrfs_fs_devices *fs_devices;
275 list_for_each_entry(fs_devices, &fs_uuids, list) {
276 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
277 return fs_devices;
279 return NULL;
282 static int
283 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
284 int flush, struct block_device **bdev,
285 struct buffer_head **bh)
287 int ret;
289 *bdev = blkdev_get_by_path(device_path, flags, holder);
291 if (IS_ERR(*bdev)) {
292 ret = PTR_ERR(*bdev);
293 goto error;
296 if (flush)
297 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
298 ret = set_blocksize(*bdev, 4096);
299 if (ret) {
300 blkdev_put(*bdev, flags);
301 goto error;
303 invalidate_bdev(*bdev);
304 *bh = btrfs_read_dev_super(*bdev);
305 if (IS_ERR(*bh)) {
306 ret = PTR_ERR(*bh);
307 blkdev_put(*bdev, flags);
308 goto error;
311 return 0;
313 error:
314 *bdev = NULL;
315 *bh = NULL;
316 return ret;
319 static void requeue_list(struct btrfs_pending_bios *pending_bios,
320 struct bio *head, struct bio *tail)
323 struct bio *old_head;
325 old_head = pending_bios->head;
326 pending_bios->head = head;
327 if (pending_bios->tail)
328 tail->bi_next = old_head;
329 else
330 pending_bios->tail = tail;
334 * we try to collect pending bios for a device so we don't get a large
335 * number of procs sending bios down to the same device. This greatly
336 * improves the schedulers ability to collect and merge the bios.
338 * But, it also turns into a long list of bios to process and that is sure
339 * to eventually make the worker thread block. The solution here is to
340 * make some progress and then put this work struct back at the end of
341 * the list if the block device is congested. This way, multiple devices
342 * can make progress from a single worker thread.
344 static noinline void run_scheduled_bios(struct btrfs_device *device)
346 struct bio *pending;
347 struct backing_dev_info *bdi;
348 struct btrfs_fs_info *fs_info;
349 struct btrfs_pending_bios *pending_bios;
350 struct bio *tail;
351 struct bio *cur;
352 int again = 0;
353 unsigned long num_run;
354 unsigned long batch_run = 0;
355 unsigned long limit;
356 unsigned long last_waited = 0;
357 int force_reg = 0;
358 int sync_pending = 0;
359 struct blk_plug plug;
362 * this function runs all the bios we've collected for
363 * a particular device. We don't want to wander off to
364 * another device without first sending all of these down.
365 * So, setup a plug here and finish it off before we return
367 blk_start_plug(&plug);
369 bdi = blk_get_backing_dev_info(device->bdev);
370 fs_info = device->dev_root->fs_info;
371 limit = btrfs_async_submit_limit(fs_info);
372 limit = limit * 2 / 3;
374 loop:
375 spin_lock(&device->io_lock);
377 loop_lock:
378 num_run = 0;
380 /* take all the bios off the list at once and process them
381 * later on (without the lock held). But, remember the
382 * tail and other pointers so the bios can be properly reinserted
383 * into the list if we hit congestion
385 if (!force_reg && device->pending_sync_bios.head) {
386 pending_bios = &device->pending_sync_bios;
387 force_reg = 1;
388 } else {
389 pending_bios = &device->pending_bios;
390 force_reg = 0;
393 pending = pending_bios->head;
394 tail = pending_bios->tail;
395 WARN_ON(pending && !tail);
398 * if pending was null this time around, no bios need processing
399 * at all and we can stop. Otherwise it'll loop back up again
400 * and do an additional check so no bios are missed.
402 * device->running_pending is used to synchronize with the
403 * schedule_bio code.
405 if (device->pending_sync_bios.head == NULL &&
406 device->pending_bios.head == NULL) {
407 again = 0;
408 device->running_pending = 0;
409 } else {
410 again = 1;
411 device->running_pending = 1;
414 pending_bios->head = NULL;
415 pending_bios->tail = NULL;
417 spin_unlock(&device->io_lock);
419 while (pending) {
421 rmb();
422 /* we want to work on both lists, but do more bios on the
423 * sync list than the regular list
425 if ((num_run > 32 &&
426 pending_bios != &device->pending_sync_bios &&
427 device->pending_sync_bios.head) ||
428 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
429 device->pending_bios.head)) {
430 spin_lock(&device->io_lock);
431 requeue_list(pending_bios, pending, tail);
432 goto loop_lock;
435 cur = pending;
436 pending = pending->bi_next;
437 cur->bi_next = NULL;
440 * atomic_dec_return implies a barrier for waitqueue_active
442 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
443 waitqueue_active(&fs_info->async_submit_wait))
444 wake_up(&fs_info->async_submit_wait);
446 BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
449 * if we're doing the sync list, record that our
450 * plug has some sync requests on it
452 * If we're doing the regular list and there are
453 * sync requests sitting around, unplug before
454 * we add more
456 if (pending_bios == &device->pending_sync_bios) {
457 sync_pending = 1;
458 } else if (sync_pending) {
459 blk_finish_plug(&plug);
460 blk_start_plug(&plug);
461 sync_pending = 0;
464 btrfsic_submit_bio(cur);
465 num_run++;
466 batch_run++;
468 cond_resched();
471 * we made progress, there is more work to do and the bdi
472 * is now congested. Back off and let other work structs
473 * run instead
475 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
476 fs_info->fs_devices->open_devices > 1) {
477 struct io_context *ioc;
479 ioc = current->io_context;
482 * the main goal here is that we don't want to
483 * block if we're going to be able to submit
484 * more requests without blocking.
486 * This code does two great things, it pokes into
487 * the elevator code from a filesystem _and_
488 * it makes assumptions about how batching works.
490 if (ioc && ioc->nr_batch_requests > 0 &&
491 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
492 (last_waited == 0 ||
493 ioc->last_waited == last_waited)) {
495 * we want to go through our batch of
496 * requests and stop. So, we copy out
497 * the ioc->last_waited time and test
498 * against it before looping
500 last_waited = ioc->last_waited;
501 cond_resched();
502 continue;
504 spin_lock(&device->io_lock);
505 requeue_list(pending_bios, pending, tail);
506 device->running_pending = 1;
508 spin_unlock(&device->io_lock);
509 btrfs_queue_work(fs_info->submit_workers,
510 &device->work);
511 goto done;
513 /* unplug every 64 requests just for good measure */
514 if (batch_run % 64 == 0) {
515 blk_finish_plug(&plug);
516 blk_start_plug(&plug);
517 sync_pending = 0;
521 cond_resched();
522 if (again)
523 goto loop;
525 spin_lock(&device->io_lock);
526 if (device->pending_bios.head || device->pending_sync_bios.head)
527 goto loop_lock;
528 spin_unlock(&device->io_lock);
530 done:
531 blk_finish_plug(&plug);
534 static void pending_bios_fn(struct btrfs_work *work)
536 struct btrfs_device *device;
538 device = container_of(work, struct btrfs_device, work);
539 run_scheduled_bios(device);
543 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
545 struct btrfs_fs_devices *fs_devs;
546 struct btrfs_device *dev;
548 if (!cur_dev->name)
549 return;
551 list_for_each_entry(fs_devs, &fs_uuids, list) {
552 int del = 1;
554 if (fs_devs->opened)
555 continue;
556 if (fs_devs->seeding)
557 continue;
559 list_for_each_entry(dev, &fs_devs->devices, dev_list) {
561 if (dev == cur_dev)
562 continue;
563 if (!dev->name)
564 continue;
567 * Todo: This won't be enough. What if the same device
568 * comes back (with new uuid and) with its mapper path?
569 * But for now, this does help as mostly an admin will
570 * either use mapper or non mapper path throughout.
572 rcu_read_lock();
573 del = strcmp(rcu_str_deref(dev->name),
574 rcu_str_deref(cur_dev->name));
575 rcu_read_unlock();
576 if (!del)
577 break;
580 if (!del) {
581 /* delete the stale device */
582 if (fs_devs->num_devices == 1) {
583 btrfs_sysfs_remove_fsid(fs_devs);
584 list_del(&fs_devs->list);
585 free_fs_devices(fs_devs);
586 break;
587 } else {
588 fs_devs->num_devices--;
589 list_del(&dev->dev_list);
590 rcu_string_free(dev->name);
591 kfree(dev);
593 break;
599 * Add new device to list of registered devices
601 * Returns:
602 * 1 - first time device is seen
603 * 0 - device already known
604 * < 0 - error
606 static noinline int device_list_add(const char *path,
607 struct btrfs_super_block *disk_super,
608 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
610 struct btrfs_device *device;
611 struct btrfs_fs_devices *fs_devices;
612 struct rcu_string *name;
613 int ret = 0;
614 u64 found_transid = btrfs_super_generation(disk_super);
616 fs_devices = find_fsid(disk_super->fsid);
617 if (!fs_devices) {
618 fs_devices = alloc_fs_devices(disk_super->fsid);
619 if (IS_ERR(fs_devices))
620 return PTR_ERR(fs_devices);
622 list_add(&fs_devices->list, &fs_uuids);
624 device = NULL;
625 } else {
626 device = __find_device(&fs_devices->devices, devid,
627 disk_super->dev_item.uuid);
630 if (!device) {
631 if (fs_devices->opened)
632 return -EBUSY;
634 device = btrfs_alloc_device(NULL, &devid,
635 disk_super->dev_item.uuid);
636 if (IS_ERR(device)) {
637 /* we can safely leave the fs_devices entry around */
638 return PTR_ERR(device);
641 name = rcu_string_strdup(path, GFP_NOFS);
642 if (!name) {
643 kfree(device);
644 return -ENOMEM;
646 rcu_assign_pointer(device->name, name);
648 mutex_lock(&fs_devices->device_list_mutex);
649 list_add_rcu(&device->dev_list, &fs_devices->devices);
650 fs_devices->num_devices++;
651 mutex_unlock(&fs_devices->device_list_mutex);
653 ret = 1;
654 device->fs_devices = fs_devices;
655 } else if (!device->name || strcmp(device->name->str, path)) {
657 * When FS is already mounted.
658 * 1. If you are here and if the device->name is NULL that
659 * means this device was missing at time of FS mount.
660 * 2. If you are here and if the device->name is different
661 * from 'path' that means either
662 * a. The same device disappeared and reappeared with
663 * different name. or
664 * b. The missing-disk-which-was-replaced, has
665 * reappeared now.
667 * We must allow 1 and 2a above. But 2b would be a spurious
668 * and unintentional.
670 * Further in case of 1 and 2a above, the disk at 'path'
671 * would have missed some transaction when it was away and
672 * in case of 2a the stale bdev has to be updated as well.
673 * 2b must not be allowed at all time.
677 * For now, we do allow update to btrfs_fs_device through the
678 * btrfs dev scan cli after FS has been mounted. We're still
679 * tracking a problem where systems fail mount by subvolume id
680 * when we reject replacement on a mounted FS.
682 if (!fs_devices->opened && found_transid < device->generation) {
684 * That is if the FS is _not_ mounted and if you
685 * are here, that means there is more than one
686 * disk with same uuid and devid.We keep the one
687 * with larger generation number or the last-in if
688 * generation are equal.
690 return -EEXIST;
693 name = rcu_string_strdup(path, GFP_NOFS);
694 if (!name)
695 return -ENOMEM;
696 rcu_string_free(device->name);
697 rcu_assign_pointer(device->name, name);
698 if (device->missing) {
699 fs_devices->missing_devices--;
700 device->missing = 0;
705 * Unmount does not free the btrfs_device struct but would zero
706 * generation along with most of the other members. So just update
707 * it back. We need it to pick the disk with largest generation
708 * (as above).
710 if (!fs_devices->opened)
711 device->generation = found_transid;
714 * if there is new btrfs on an already registered device,
715 * then remove the stale device entry.
717 if (ret > 0)
718 btrfs_free_stale_device(device);
720 *fs_devices_ret = fs_devices;
722 return ret;
725 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
727 struct btrfs_fs_devices *fs_devices;
728 struct btrfs_device *device;
729 struct btrfs_device *orig_dev;
731 fs_devices = alloc_fs_devices(orig->fsid);
732 if (IS_ERR(fs_devices))
733 return fs_devices;
735 mutex_lock(&orig->device_list_mutex);
736 fs_devices->total_devices = orig->total_devices;
738 /* We have held the volume lock, it is safe to get the devices. */
739 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
740 struct rcu_string *name;
742 device = btrfs_alloc_device(NULL, &orig_dev->devid,
743 orig_dev->uuid);
744 if (IS_ERR(device))
745 goto error;
748 * This is ok to do without rcu read locked because we hold the
749 * uuid mutex so nothing we touch in here is going to disappear.
751 if (orig_dev->name) {
752 name = rcu_string_strdup(orig_dev->name->str,
753 GFP_KERNEL);
754 if (!name) {
755 kfree(device);
756 goto error;
758 rcu_assign_pointer(device->name, name);
761 list_add(&device->dev_list, &fs_devices->devices);
762 device->fs_devices = fs_devices;
763 fs_devices->num_devices++;
765 mutex_unlock(&orig->device_list_mutex);
766 return fs_devices;
767 error:
768 mutex_unlock(&orig->device_list_mutex);
769 free_fs_devices(fs_devices);
770 return ERR_PTR(-ENOMEM);
773 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
775 struct btrfs_device *device, *next;
776 struct btrfs_device *latest_dev = NULL;
778 mutex_lock(&uuid_mutex);
779 again:
780 /* This is the initialized path, it is safe to release the devices. */
781 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
782 if (device->in_fs_metadata) {
783 if (!device->is_tgtdev_for_dev_replace &&
784 (!latest_dev ||
785 device->generation > latest_dev->generation)) {
786 latest_dev = device;
788 continue;
791 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
793 * In the first step, keep the device which has
794 * the correct fsid and the devid that is used
795 * for the dev_replace procedure.
796 * In the second step, the dev_replace state is
797 * read from the device tree and it is known
798 * whether the procedure is really active or
799 * not, which means whether this device is
800 * used or whether it should be removed.
802 if (step == 0 || device->is_tgtdev_for_dev_replace) {
803 continue;
806 if (device->bdev) {
807 blkdev_put(device->bdev, device->mode);
808 device->bdev = NULL;
809 fs_devices->open_devices--;
811 if (device->writeable) {
812 list_del_init(&device->dev_alloc_list);
813 device->writeable = 0;
814 if (!device->is_tgtdev_for_dev_replace)
815 fs_devices->rw_devices--;
817 list_del_init(&device->dev_list);
818 fs_devices->num_devices--;
819 rcu_string_free(device->name);
820 kfree(device);
823 if (fs_devices->seed) {
824 fs_devices = fs_devices->seed;
825 goto again;
828 fs_devices->latest_bdev = latest_dev->bdev;
830 mutex_unlock(&uuid_mutex);
833 static void __free_device(struct work_struct *work)
835 struct btrfs_device *device;
837 device = container_of(work, struct btrfs_device, rcu_work);
838 rcu_string_free(device->name);
839 kfree(device);
842 static void free_device(struct rcu_head *head)
844 struct btrfs_device *device;
846 device = container_of(head, struct btrfs_device, rcu);
848 INIT_WORK(&device->rcu_work, __free_device);
849 schedule_work(&device->rcu_work);
852 static void btrfs_close_bdev(struct btrfs_device *device)
854 if (device->bdev && device->writeable) {
855 sync_blockdev(device->bdev);
856 invalidate_bdev(device->bdev);
859 if (device->bdev)
860 blkdev_put(device->bdev, device->mode);
863 static void btrfs_prepare_close_one_device(struct btrfs_device *device)
865 struct btrfs_fs_devices *fs_devices = device->fs_devices;
866 struct btrfs_device *new_device;
867 struct rcu_string *name;
869 if (device->bdev)
870 fs_devices->open_devices--;
872 if (device->writeable &&
873 device->devid != BTRFS_DEV_REPLACE_DEVID) {
874 list_del_init(&device->dev_alloc_list);
875 fs_devices->rw_devices--;
878 if (device->missing)
879 fs_devices->missing_devices--;
881 new_device = btrfs_alloc_device(NULL, &device->devid,
882 device->uuid);
883 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
885 /* Safe because we are under uuid_mutex */
886 if (device->name) {
887 name = rcu_string_strdup(device->name->str, GFP_NOFS);
888 BUG_ON(!name); /* -ENOMEM */
889 rcu_assign_pointer(new_device->name, name);
892 list_replace_rcu(&device->dev_list, &new_device->dev_list);
893 new_device->fs_devices = device->fs_devices;
896 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
898 struct btrfs_device *device, *tmp;
899 struct list_head pending_put;
901 INIT_LIST_HEAD(&pending_put);
903 if (--fs_devices->opened > 0)
904 return 0;
906 mutex_lock(&fs_devices->device_list_mutex);
907 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
908 btrfs_prepare_close_one_device(device);
909 list_add(&device->dev_list, &pending_put);
911 mutex_unlock(&fs_devices->device_list_mutex);
914 * btrfs_show_devname() is using the device_list_mutex,
915 * sometimes call to blkdev_put() leads vfs calling
916 * into this func. So do put outside of device_list_mutex,
917 * as of now.
919 while (!list_empty(&pending_put)) {
920 device = list_first_entry(&pending_put,
921 struct btrfs_device, dev_list);
922 list_del(&device->dev_list);
923 btrfs_close_bdev(device);
924 call_rcu(&device->rcu, free_device);
927 WARN_ON(fs_devices->open_devices);
928 WARN_ON(fs_devices->rw_devices);
929 fs_devices->opened = 0;
930 fs_devices->seeding = 0;
932 return 0;
935 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
937 struct btrfs_fs_devices *seed_devices = NULL;
938 int ret;
940 mutex_lock(&uuid_mutex);
941 ret = __btrfs_close_devices(fs_devices);
942 if (!fs_devices->opened) {
943 seed_devices = fs_devices->seed;
944 fs_devices->seed = NULL;
946 mutex_unlock(&uuid_mutex);
948 while (seed_devices) {
949 fs_devices = seed_devices;
950 seed_devices = fs_devices->seed;
951 __btrfs_close_devices(fs_devices);
952 free_fs_devices(fs_devices);
955 * Wait for rcu kworkers under __btrfs_close_devices
956 * to finish all blkdev_puts so device is really
957 * free when umount is done.
959 rcu_barrier();
960 return ret;
963 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
964 fmode_t flags, void *holder)
966 struct request_queue *q;
967 struct block_device *bdev;
968 struct list_head *head = &fs_devices->devices;
969 struct btrfs_device *device;
970 struct btrfs_device *latest_dev = NULL;
971 struct buffer_head *bh;
972 struct btrfs_super_block *disk_super;
973 u64 devid;
974 int seeding = 1;
975 int ret = 0;
977 flags |= FMODE_EXCL;
979 list_for_each_entry(device, head, dev_list) {
980 if (device->bdev)
981 continue;
982 if (!device->name)
983 continue;
985 /* Just open everything we can; ignore failures here */
986 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
987 &bdev, &bh))
988 continue;
990 disk_super = (struct btrfs_super_block *)bh->b_data;
991 devid = btrfs_stack_device_id(&disk_super->dev_item);
992 if (devid != device->devid)
993 goto error_brelse;
995 if (memcmp(device->uuid, disk_super->dev_item.uuid,
996 BTRFS_UUID_SIZE))
997 goto error_brelse;
999 device->generation = btrfs_super_generation(disk_super);
1000 if (!latest_dev ||
1001 device->generation > latest_dev->generation)
1002 latest_dev = device;
1004 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
1005 device->writeable = 0;
1006 } else {
1007 device->writeable = !bdev_read_only(bdev);
1008 seeding = 0;
1011 q = bdev_get_queue(bdev);
1012 if (blk_queue_discard(q))
1013 device->can_discard = 1;
1015 device->bdev = bdev;
1016 device->in_fs_metadata = 0;
1017 device->mode = flags;
1019 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1020 fs_devices->rotating = 1;
1022 fs_devices->open_devices++;
1023 if (device->writeable &&
1024 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1025 fs_devices->rw_devices++;
1026 list_add(&device->dev_alloc_list,
1027 &fs_devices->alloc_list);
1029 brelse(bh);
1030 continue;
1032 error_brelse:
1033 brelse(bh);
1034 blkdev_put(bdev, flags);
1035 continue;
1037 if (fs_devices->open_devices == 0) {
1038 ret = -EINVAL;
1039 goto out;
1041 fs_devices->seeding = seeding;
1042 fs_devices->opened = 1;
1043 fs_devices->latest_bdev = latest_dev->bdev;
1044 fs_devices->total_rw_bytes = 0;
1045 out:
1046 return ret;
1049 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1050 fmode_t flags, void *holder)
1052 int ret;
1054 mutex_lock(&uuid_mutex);
1055 if (fs_devices->opened) {
1056 fs_devices->opened++;
1057 ret = 0;
1058 } else {
1059 ret = __btrfs_open_devices(fs_devices, flags, holder);
1061 mutex_unlock(&uuid_mutex);
1062 return ret;
1065 void btrfs_release_disk_super(struct page *page)
1067 kunmap(page);
1068 put_page(page);
1071 int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1072 struct page **page, struct btrfs_super_block **disk_super)
1074 void *p;
1075 pgoff_t index;
1077 /* make sure our super fits in the device */
1078 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1079 return 1;
1081 /* make sure our super fits in the page */
1082 if (sizeof(**disk_super) > PAGE_SIZE)
1083 return 1;
1085 /* make sure our super doesn't straddle pages on disk */
1086 index = bytenr >> PAGE_SHIFT;
1087 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1088 return 1;
1090 /* pull in the page with our super */
1091 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1092 index, GFP_KERNEL);
1094 if (IS_ERR_OR_NULL(*page))
1095 return 1;
1097 p = kmap(*page);
1099 /* align our pointer to the offset of the super block */
1100 *disk_super = p + (bytenr & ~PAGE_MASK);
1102 if (btrfs_super_bytenr(*disk_super) != bytenr ||
1103 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1104 btrfs_release_disk_super(*page);
1105 return 1;
1108 if ((*disk_super)->label[0] &&
1109 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1110 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1112 return 0;
1116 * Look for a btrfs signature on a device. This may be called out of the mount path
1117 * and we are not allowed to call set_blocksize during the scan. The superblock
1118 * is read via pagecache
1120 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1121 struct btrfs_fs_devices **fs_devices_ret)
1123 struct btrfs_super_block *disk_super;
1124 struct block_device *bdev;
1125 struct page *page;
1126 int ret = -EINVAL;
1127 u64 devid;
1128 u64 transid;
1129 u64 total_devices;
1130 u64 bytenr;
1133 * we would like to check all the supers, but that would make
1134 * a btrfs mount succeed after a mkfs from a different FS.
1135 * So, we need to add a special mount option to scan for
1136 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1138 bytenr = btrfs_sb_offset(0);
1139 flags |= FMODE_EXCL;
1140 mutex_lock(&uuid_mutex);
1142 bdev = blkdev_get_by_path(path, flags, holder);
1143 if (IS_ERR(bdev)) {
1144 ret = PTR_ERR(bdev);
1145 goto error;
1148 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1149 goto error_bdev_put;
1151 devid = btrfs_stack_device_id(&disk_super->dev_item);
1152 transid = btrfs_super_generation(disk_super);
1153 total_devices = btrfs_super_num_devices(disk_super);
1155 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1156 if (ret > 0) {
1157 if (disk_super->label[0]) {
1158 pr_info("BTRFS: device label %s ", disk_super->label);
1159 } else {
1160 pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
1163 pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
1164 ret = 0;
1166 if (!ret && fs_devices_ret)
1167 (*fs_devices_ret)->total_devices = total_devices;
1169 btrfs_release_disk_super(page);
1171 error_bdev_put:
1172 blkdev_put(bdev, flags);
1173 error:
1174 mutex_unlock(&uuid_mutex);
1175 return ret;
1178 /* helper to account the used device space in the range */
1179 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1180 u64 end, u64 *length)
1182 struct btrfs_key key;
1183 struct btrfs_root *root = device->dev_root;
1184 struct btrfs_dev_extent *dev_extent;
1185 struct btrfs_path *path;
1186 u64 extent_end;
1187 int ret;
1188 int slot;
1189 struct extent_buffer *l;
1191 *length = 0;
1193 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1194 return 0;
1196 path = btrfs_alloc_path();
1197 if (!path)
1198 return -ENOMEM;
1199 path->reada = READA_FORWARD;
1201 key.objectid = device->devid;
1202 key.offset = start;
1203 key.type = BTRFS_DEV_EXTENT_KEY;
1205 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1206 if (ret < 0)
1207 goto out;
1208 if (ret > 0) {
1209 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1210 if (ret < 0)
1211 goto out;
1214 while (1) {
1215 l = path->nodes[0];
1216 slot = path->slots[0];
1217 if (slot >= btrfs_header_nritems(l)) {
1218 ret = btrfs_next_leaf(root, path);
1219 if (ret == 0)
1220 continue;
1221 if (ret < 0)
1222 goto out;
1224 break;
1226 btrfs_item_key_to_cpu(l, &key, slot);
1228 if (key.objectid < device->devid)
1229 goto next;
1231 if (key.objectid > device->devid)
1232 break;
1234 if (key.type != BTRFS_DEV_EXTENT_KEY)
1235 goto next;
1237 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1238 extent_end = key.offset + btrfs_dev_extent_length(l,
1239 dev_extent);
1240 if (key.offset <= start && extent_end > end) {
1241 *length = end - start + 1;
1242 break;
1243 } else if (key.offset <= start && extent_end > start)
1244 *length += extent_end - start;
1245 else if (key.offset > start && extent_end <= end)
1246 *length += extent_end - key.offset;
1247 else if (key.offset > start && key.offset <= end) {
1248 *length += end - key.offset + 1;
1249 break;
1250 } else if (key.offset > end)
1251 break;
1253 next:
1254 path->slots[0]++;
1256 ret = 0;
1257 out:
1258 btrfs_free_path(path);
1259 return ret;
1262 static int contains_pending_extent(struct btrfs_transaction *transaction,
1263 struct btrfs_device *device,
1264 u64 *start, u64 len)
1266 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1267 struct extent_map *em;
1268 struct list_head *search_list = &fs_info->pinned_chunks;
1269 int ret = 0;
1270 u64 physical_start = *start;
1272 if (transaction)
1273 search_list = &transaction->pending_chunks;
1274 again:
1275 list_for_each_entry(em, search_list, list) {
1276 struct map_lookup *map;
1277 int i;
1279 map = em->map_lookup;
1280 for (i = 0; i < map->num_stripes; i++) {
1281 u64 end;
1283 if (map->stripes[i].dev != device)
1284 continue;
1285 if (map->stripes[i].physical >= physical_start + len ||
1286 map->stripes[i].physical + em->orig_block_len <=
1287 physical_start)
1288 continue;
1290 * Make sure that while processing the pinned list we do
1291 * not override our *start with a lower value, because
1292 * we can have pinned chunks that fall within this
1293 * device hole and that have lower physical addresses
1294 * than the pending chunks we processed before. If we
1295 * do not take this special care we can end up getting
1296 * 2 pending chunks that start at the same physical
1297 * device offsets because the end offset of a pinned
1298 * chunk can be equal to the start offset of some
1299 * pending chunk.
1301 end = map->stripes[i].physical + em->orig_block_len;
1302 if (end > *start) {
1303 *start = end;
1304 ret = 1;
1308 if (search_list != &fs_info->pinned_chunks) {
1309 search_list = &fs_info->pinned_chunks;
1310 goto again;
1313 return ret;
1318 * find_free_dev_extent_start - find free space in the specified device
1319 * @device: the device which we search the free space in
1320 * @num_bytes: the size of the free space that we need
1321 * @search_start: the position from which to begin the search
1322 * @start: store the start of the free space.
1323 * @len: the size of the free space. that we find, or the size
1324 * of the max free space if we don't find suitable free space
1326 * this uses a pretty simple search, the expectation is that it is
1327 * called very infrequently and that a given device has a small number
1328 * of extents
1330 * @start is used to store the start of the free space if we find. But if we
1331 * don't find suitable free space, it will be used to store the start position
1332 * of the max free space.
1334 * @len is used to store the size of the free space that we find.
1335 * But if we don't find suitable free space, it is used to store the size of
1336 * the max free space.
1338 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1339 struct btrfs_device *device, u64 num_bytes,
1340 u64 search_start, u64 *start, u64 *len)
1342 struct btrfs_key key;
1343 struct btrfs_root *root = device->dev_root;
1344 struct btrfs_dev_extent *dev_extent;
1345 struct btrfs_path *path;
1346 u64 hole_size;
1347 u64 max_hole_start;
1348 u64 max_hole_size;
1349 u64 extent_end;
1350 u64 search_end = device->total_bytes;
1351 int ret;
1352 int slot;
1353 struct extent_buffer *l;
1354 u64 min_search_start;
1357 * We don't want to overwrite the superblock on the drive nor any area
1358 * used by the boot loader (grub for example), so we make sure to start
1359 * at an offset of at least 1MB.
1361 min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1362 search_start = max(search_start, min_search_start);
1364 path = btrfs_alloc_path();
1365 if (!path)
1366 return -ENOMEM;
1368 max_hole_start = search_start;
1369 max_hole_size = 0;
1371 again:
1372 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1373 ret = -ENOSPC;
1374 goto out;
1377 path->reada = READA_FORWARD;
1378 path->search_commit_root = 1;
1379 path->skip_locking = 1;
1381 key.objectid = device->devid;
1382 key.offset = search_start;
1383 key.type = BTRFS_DEV_EXTENT_KEY;
1385 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1386 if (ret < 0)
1387 goto out;
1388 if (ret > 0) {
1389 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1390 if (ret < 0)
1391 goto out;
1394 while (1) {
1395 l = path->nodes[0];
1396 slot = path->slots[0];
1397 if (slot >= btrfs_header_nritems(l)) {
1398 ret = btrfs_next_leaf(root, path);
1399 if (ret == 0)
1400 continue;
1401 if (ret < 0)
1402 goto out;
1404 break;
1406 btrfs_item_key_to_cpu(l, &key, slot);
1408 if (key.objectid < device->devid)
1409 goto next;
1411 if (key.objectid > device->devid)
1412 break;
1414 if (key.type != BTRFS_DEV_EXTENT_KEY)
1415 goto next;
1417 if (key.offset > search_start) {
1418 hole_size = key.offset - search_start;
1421 * Have to check before we set max_hole_start, otherwise
1422 * we could end up sending back this offset anyway.
1424 if (contains_pending_extent(transaction, device,
1425 &search_start,
1426 hole_size)) {
1427 if (key.offset >= search_start) {
1428 hole_size = key.offset - search_start;
1429 } else {
1430 WARN_ON_ONCE(1);
1431 hole_size = 0;
1435 if (hole_size > max_hole_size) {
1436 max_hole_start = search_start;
1437 max_hole_size = hole_size;
1441 * If this free space is greater than which we need,
1442 * it must be the max free space that we have found
1443 * until now, so max_hole_start must point to the start
1444 * of this free space and the length of this free space
1445 * is stored in max_hole_size. Thus, we return
1446 * max_hole_start and max_hole_size and go back to the
1447 * caller.
1449 if (hole_size >= num_bytes) {
1450 ret = 0;
1451 goto out;
1455 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1456 extent_end = key.offset + btrfs_dev_extent_length(l,
1457 dev_extent);
1458 if (extent_end > search_start)
1459 search_start = extent_end;
1460 next:
1461 path->slots[0]++;
1462 cond_resched();
1466 * At this point, search_start should be the end of
1467 * allocated dev extents, and when shrinking the device,
1468 * search_end may be smaller than search_start.
1470 if (search_end > search_start) {
1471 hole_size = search_end - search_start;
1473 if (contains_pending_extent(transaction, device, &search_start,
1474 hole_size)) {
1475 btrfs_release_path(path);
1476 goto again;
1479 if (hole_size > max_hole_size) {
1480 max_hole_start = search_start;
1481 max_hole_size = hole_size;
1485 /* See above. */
1486 if (max_hole_size < num_bytes)
1487 ret = -ENOSPC;
1488 else
1489 ret = 0;
1491 out:
1492 btrfs_free_path(path);
1493 *start = max_hole_start;
1494 if (len)
1495 *len = max_hole_size;
1496 return ret;
1499 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1500 struct btrfs_device *device, u64 num_bytes,
1501 u64 *start, u64 *len)
1503 /* FIXME use last free of some kind */
1504 return find_free_dev_extent_start(trans->transaction, device,
1505 num_bytes, 0, start, len);
1508 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1509 struct btrfs_device *device,
1510 u64 start, u64 *dev_extent_len)
1512 int ret;
1513 struct btrfs_path *path;
1514 struct btrfs_root *root = device->dev_root;
1515 struct btrfs_key key;
1516 struct btrfs_key found_key;
1517 struct extent_buffer *leaf = NULL;
1518 struct btrfs_dev_extent *extent = NULL;
1520 path = btrfs_alloc_path();
1521 if (!path)
1522 return -ENOMEM;
1524 key.objectid = device->devid;
1525 key.offset = start;
1526 key.type = BTRFS_DEV_EXTENT_KEY;
1527 again:
1528 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1529 if (ret > 0) {
1530 ret = btrfs_previous_item(root, path, key.objectid,
1531 BTRFS_DEV_EXTENT_KEY);
1532 if (ret)
1533 goto out;
1534 leaf = path->nodes[0];
1535 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1536 extent = btrfs_item_ptr(leaf, path->slots[0],
1537 struct btrfs_dev_extent);
1538 BUG_ON(found_key.offset > start || found_key.offset +
1539 btrfs_dev_extent_length(leaf, extent) < start);
1540 key = found_key;
1541 btrfs_release_path(path);
1542 goto again;
1543 } else if (ret == 0) {
1544 leaf = path->nodes[0];
1545 extent = btrfs_item_ptr(leaf, path->slots[0],
1546 struct btrfs_dev_extent);
1547 } else {
1548 btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
1549 goto out;
1552 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1554 ret = btrfs_del_item(trans, root, path);
1555 if (ret) {
1556 btrfs_handle_fs_error(root->fs_info, ret,
1557 "Failed to remove dev extent item");
1558 } else {
1559 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1561 out:
1562 btrfs_free_path(path);
1563 return ret;
1566 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1567 struct btrfs_device *device,
1568 u64 chunk_tree, u64 chunk_objectid,
1569 u64 chunk_offset, u64 start, u64 num_bytes)
1571 int ret;
1572 struct btrfs_path *path;
1573 struct btrfs_root *root = device->dev_root;
1574 struct btrfs_dev_extent *extent;
1575 struct extent_buffer *leaf;
1576 struct btrfs_key key;
1578 WARN_ON(!device->in_fs_metadata);
1579 WARN_ON(device->is_tgtdev_for_dev_replace);
1580 path = btrfs_alloc_path();
1581 if (!path)
1582 return -ENOMEM;
1584 key.objectid = device->devid;
1585 key.offset = start;
1586 key.type = BTRFS_DEV_EXTENT_KEY;
1587 ret = btrfs_insert_empty_item(trans, root, path, &key,
1588 sizeof(*extent));
1589 if (ret)
1590 goto out;
1592 leaf = path->nodes[0];
1593 extent = btrfs_item_ptr(leaf, path->slots[0],
1594 struct btrfs_dev_extent);
1595 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1596 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1597 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1599 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1600 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1602 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1603 btrfs_mark_buffer_dirty(leaf);
1604 out:
1605 btrfs_free_path(path);
1606 return ret;
1609 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1611 struct extent_map_tree *em_tree;
1612 struct extent_map *em;
1613 struct rb_node *n;
1614 u64 ret = 0;
1616 em_tree = &fs_info->mapping_tree.map_tree;
1617 read_lock(&em_tree->lock);
1618 n = rb_last(&em_tree->map);
1619 if (n) {
1620 em = rb_entry(n, struct extent_map, rb_node);
1621 ret = em->start + em->len;
1623 read_unlock(&em_tree->lock);
1625 return ret;
1628 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1629 u64 *devid_ret)
1631 int ret;
1632 struct btrfs_key key;
1633 struct btrfs_key found_key;
1634 struct btrfs_path *path;
1636 path = btrfs_alloc_path();
1637 if (!path)
1638 return -ENOMEM;
1640 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1641 key.type = BTRFS_DEV_ITEM_KEY;
1642 key.offset = (u64)-1;
1644 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1645 if (ret < 0)
1646 goto error;
1648 BUG_ON(ret == 0); /* Corruption */
1650 ret = btrfs_previous_item(fs_info->chunk_root, path,
1651 BTRFS_DEV_ITEMS_OBJECTID,
1652 BTRFS_DEV_ITEM_KEY);
1653 if (ret) {
1654 *devid_ret = 1;
1655 } else {
1656 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1657 path->slots[0]);
1658 *devid_ret = found_key.offset + 1;
1660 ret = 0;
1661 error:
1662 btrfs_free_path(path);
1663 return ret;
1667 * the device information is stored in the chunk root
1668 * the btrfs_device struct should be fully filled in
1670 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1671 struct btrfs_root *root,
1672 struct btrfs_device *device)
1674 int ret;
1675 struct btrfs_path *path;
1676 struct btrfs_dev_item *dev_item;
1677 struct extent_buffer *leaf;
1678 struct btrfs_key key;
1679 unsigned long ptr;
1681 root = root->fs_info->chunk_root;
1683 path = btrfs_alloc_path();
1684 if (!path)
1685 return -ENOMEM;
1687 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1688 key.type = BTRFS_DEV_ITEM_KEY;
1689 key.offset = device->devid;
1691 ret = btrfs_insert_empty_item(trans, root, path, &key,
1692 sizeof(*dev_item));
1693 if (ret)
1694 goto out;
1696 leaf = path->nodes[0];
1697 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1699 btrfs_set_device_id(leaf, dev_item, device->devid);
1700 btrfs_set_device_generation(leaf, dev_item, 0);
1701 btrfs_set_device_type(leaf, dev_item, device->type);
1702 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1703 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1704 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1705 btrfs_set_device_total_bytes(leaf, dev_item,
1706 btrfs_device_get_disk_total_bytes(device));
1707 btrfs_set_device_bytes_used(leaf, dev_item,
1708 btrfs_device_get_bytes_used(device));
1709 btrfs_set_device_group(leaf, dev_item, 0);
1710 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1711 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1712 btrfs_set_device_start_offset(leaf, dev_item, 0);
1714 ptr = btrfs_device_uuid(dev_item);
1715 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1716 ptr = btrfs_device_fsid(dev_item);
1717 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1718 btrfs_mark_buffer_dirty(leaf);
1720 ret = 0;
1721 out:
1722 btrfs_free_path(path);
1723 return ret;
1727 * Function to update ctime/mtime for a given device path.
1728 * Mainly used for ctime/mtime based probe like libblkid.
1730 static void update_dev_time(char *path_name)
1732 struct file *filp;
1734 filp = filp_open(path_name, O_RDWR, 0);
1735 if (IS_ERR(filp))
1736 return;
1737 file_update_time(filp);
1738 filp_close(filp, NULL);
1741 static int btrfs_rm_dev_item(struct btrfs_root *root,
1742 struct btrfs_device *device)
1744 int ret;
1745 struct btrfs_path *path;
1746 struct btrfs_key key;
1747 struct btrfs_trans_handle *trans;
1749 root = root->fs_info->chunk_root;
1751 path = btrfs_alloc_path();
1752 if (!path)
1753 return -ENOMEM;
1755 trans = btrfs_start_transaction(root, 0);
1756 if (IS_ERR(trans)) {
1757 btrfs_free_path(path);
1758 return PTR_ERR(trans);
1760 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1761 key.type = BTRFS_DEV_ITEM_KEY;
1762 key.offset = device->devid;
1764 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1765 if (ret < 0)
1766 goto out;
1768 if (ret > 0) {
1769 ret = -ENOENT;
1770 goto out;
1773 ret = btrfs_del_item(trans, root, path);
1774 if (ret)
1775 goto out;
1776 out:
1777 btrfs_free_path(path);
1778 btrfs_commit_transaction(trans, root);
1779 return ret;
1783 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1784 * filesystem. It's up to the caller to adjust that number regarding eg. device
1785 * replace.
1787 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1788 u64 num_devices)
1790 u64 all_avail;
1791 unsigned seq;
1792 int i;
1794 do {
1795 seq = read_seqbegin(&fs_info->profiles_lock);
1797 all_avail = fs_info->avail_data_alloc_bits |
1798 fs_info->avail_system_alloc_bits |
1799 fs_info->avail_metadata_alloc_bits;
1800 } while (read_seqretry(&fs_info->profiles_lock, seq));
1802 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1803 if (!(all_avail & btrfs_raid_group[i]))
1804 continue;
1806 if (num_devices < btrfs_raid_array[i].devs_min) {
1807 int ret = btrfs_raid_mindev_error[i];
1809 if (ret)
1810 return ret;
1814 return 0;
1817 struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
1818 struct btrfs_device *device)
1820 struct btrfs_device *next_device;
1822 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1823 if (next_device != device &&
1824 !next_device->missing && next_device->bdev)
1825 return next_device;
1828 return NULL;
1832 * Helper function to check if the given device is part of s_bdev / latest_bdev
1833 * and replace it with the provided or the next active device, in the context
1834 * where this function called, there should be always be another device (or
1835 * this_dev) which is active.
1837 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
1838 struct btrfs_device *device, struct btrfs_device *this_dev)
1840 struct btrfs_device *next_device;
1842 if (this_dev)
1843 next_device = this_dev;
1844 else
1845 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1846 device);
1847 ASSERT(next_device);
1849 if (fs_info->sb->s_bdev &&
1850 (fs_info->sb->s_bdev == device->bdev))
1851 fs_info->sb->s_bdev = next_device->bdev;
1853 if (fs_info->fs_devices->latest_bdev == device->bdev)
1854 fs_info->fs_devices->latest_bdev = next_device->bdev;
1857 int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1859 struct btrfs_device *device;
1860 struct btrfs_fs_devices *cur_devices;
1861 u64 num_devices;
1862 int ret = 0;
1863 bool clear_super = false;
1865 mutex_lock(&uuid_mutex);
1867 num_devices = root->fs_info->fs_devices->num_devices;
1868 btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1869 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1870 WARN_ON(num_devices < 1);
1871 num_devices--;
1873 btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1875 ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1876 if (ret)
1877 goto out;
1879 ret = btrfs_find_device_by_devspec(root, devid, device_path,
1880 &device);
1881 if (ret)
1882 goto out;
1884 if (device->is_tgtdev_for_dev_replace) {
1885 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1886 goto out;
1889 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1890 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1891 goto out;
1894 if (device->writeable) {
1895 lock_chunks(root);
1896 list_del_init(&device->dev_alloc_list);
1897 device->fs_devices->rw_devices--;
1898 unlock_chunks(root);
1899 clear_super = true;
1902 mutex_unlock(&uuid_mutex);
1903 ret = btrfs_shrink_device(device, 0);
1904 mutex_lock(&uuid_mutex);
1905 if (ret)
1906 goto error_undo;
1909 * TODO: the superblock still includes this device in its num_devices
1910 * counter although write_all_supers() is not locked out. This
1911 * could give a filesystem state which requires a degraded mount.
1913 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1914 if (ret)
1915 goto error_undo;
1917 device->in_fs_metadata = 0;
1918 btrfs_scrub_cancel_dev(root->fs_info, device);
1921 * the device list mutex makes sure that we don't change
1922 * the device list while someone else is writing out all
1923 * the device supers. Whoever is writing all supers, should
1924 * lock the device list mutex before getting the number of
1925 * devices in the super block (super_copy). Conversely,
1926 * whoever updates the number of devices in the super block
1927 * (super_copy) should hold the device list mutex.
1930 cur_devices = device->fs_devices;
1931 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1932 list_del_rcu(&device->dev_list);
1934 device->fs_devices->num_devices--;
1935 device->fs_devices->total_devices--;
1937 if (device->missing)
1938 device->fs_devices->missing_devices--;
1940 btrfs_assign_next_active_device(root->fs_info, device, NULL);
1942 if (device->bdev) {
1943 device->fs_devices->open_devices--;
1944 /* remove sysfs entry */
1945 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1948 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1949 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1950 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1953 * at this point, the device is zero sized and detached from
1954 * the devices list. All that's left is to zero out the old
1955 * supers and free the device.
1957 if (device->writeable)
1958 btrfs_scratch_superblocks(device->bdev, device->name->str);
1960 btrfs_close_bdev(device);
1961 call_rcu(&device->rcu, free_device);
1963 if (cur_devices->open_devices == 0) {
1964 struct btrfs_fs_devices *fs_devices;
1965 fs_devices = root->fs_info->fs_devices;
1966 while (fs_devices) {
1967 if (fs_devices->seed == cur_devices) {
1968 fs_devices->seed = cur_devices->seed;
1969 break;
1971 fs_devices = fs_devices->seed;
1973 cur_devices->seed = NULL;
1974 __btrfs_close_devices(cur_devices);
1975 free_fs_devices(cur_devices);
1978 root->fs_info->num_tolerated_disk_barrier_failures =
1979 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1981 out:
1982 mutex_unlock(&uuid_mutex);
1983 return ret;
1985 error_undo:
1986 if (device->writeable) {
1987 lock_chunks(root);
1988 list_add(&device->dev_alloc_list,
1989 &root->fs_info->fs_devices->alloc_list);
1990 device->fs_devices->rw_devices++;
1991 unlock_chunks(root);
1993 goto out;
1996 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1997 struct btrfs_device *srcdev)
1999 struct btrfs_fs_devices *fs_devices;
2001 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
2004 * in case of fs with no seed, srcdev->fs_devices will point
2005 * to fs_devices of fs_info. However when the dev being replaced is
2006 * a seed dev it will point to the seed's local fs_devices. In short
2007 * srcdev will have its correct fs_devices in both the cases.
2009 fs_devices = srcdev->fs_devices;
2011 list_del_rcu(&srcdev->dev_list);
2012 list_del_rcu(&srcdev->dev_alloc_list);
2013 fs_devices->num_devices--;
2014 if (srcdev->missing)
2015 fs_devices->missing_devices--;
2017 if (srcdev->writeable)
2018 fs_devices->rw_devices--;
2020 if (srcdev->bdev)
2021 fs_devices->open_devices--;
2024 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2025 struct btrfs_device *srcdev)
2027 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2029 if (srcdev->writeable) {
2030 /* zero out the old super if it is writable */
2031 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2034 btrfs_close_bdev(srcdev);
2036 call_rcu(&srcdev->rcu, free_device);
2039 * unless fs_devices is seed fs, num_devices shouldn't go
2040 * zero
2042 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
2044 /* if this is no devs we rather delete the fs_devices */
2045 if (!fs_devices->num_devices) {
2046 struct btrfs_fs_devices *tmp_fs_devices;
2048 tmp_fs_devices = fs_info->fs_devices;
2049 while (tmp_fs_devices) {
2050 if (tmp_fs_devices->seed == fs_devices) {
2051 tmp_fs_devices->seed = fs_devices->seed;
2052 break;
2054 tmp_fs_devices = tmp_fs_devices->seed;
2056 fs_devices->seed = NULL;
2057 __btrfs_close_devices(fs_devices);
2058 free_fs_devices(fs_devices);
2062 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2063 struct btrfs_device *tgtdev)
2065 mutex_lock(&uuid_mutex);
2066 WARN_ON(!tgtdev);
2067 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2069 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2071 if (tgtdev->bdev)
2072 fs_info->fs_devices->open_devices--;
2074 fs_info->fs_devices->num_devices--;
2076 btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
2078 list_del_rcu(&tgtdev->dev_list);
2080 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2081 mutex_unlock(&uuid_mutex);
2084 * The update_dev_time() with in btrfs_scratch_superblocks()
2085 * may lead to a call to btrfs_show_devname() which will try
2086 * to hold device_list_mutex. And here this device
2087 * is already out of device list, so we don't have to hold
2088 * the device_list_mutex lock.
2090 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2092 btrfs_close_bdev(tgtdev);
2093 call_rcu(&tgtdev->rcu, free_device);
2096 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2097 struct btrfs_device **device)
2099 int ret = 0;
2100 struct btrfs_super_block *disk_super;
2101 u64 devid;
2102 u8 *dev_uuid;
2103 struct block_device *bdev;
2104 struct buffer_head *bh;
2106 *device = NULL;
2107 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2108 root->fs_info->bdev_holder, 0, &bdev, &bh);
2109 if (ret)
2110 return ret;
2111 disk_super = (struct btrfs_super_block *)bh->b_data;
2112 devid = btrfs_stack_device_id(&disk_super->dev_item);
2113 dev_uuid = disk_super->dev_item.uuid;
2114 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2115 disk_super->fsid);
2116 brelse(bh);
2117 if (!*device)
2118 ret = -ENOENT;
2119 blkdev_put(bdev, FMODE_READ);
2120 return ret;
2123 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2124 char *device_path,
2125 struct btrfs_device **device)
2127 *device = NULL;
2128 if (strcmp(device_path, "missing") == 0) {
2129 struct list_head *devices;
2130 struct btrfs_device *tmp;
2132 devices = &root->fs_info->fs_devices->devices;
2134 * It is safe to read the devices since the volume_mutex
2135 * is held by the caller.
2137 list_for_each_entry(tmp, devices, dev_list) {
2138 if (tmp->in_fs_metadata && !tmp->bdev) {
2139 *device = tmp;
2140 break;
2144 if (!*device)
2145 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2147 return 0;
2148 } else {
2149 return btrfs_find_device_by_path(root, device_path, device);
2154 * Lookup a device given by device id, or the path if the id is 0.
2156 int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
2157 char *devpath,
2158 struct btrfs_device **device)
2160 int ret;
2162 if (devid) {
2163 ret = 0;
2164 *device = btrfs_find_device(root->fs_info, devid, NULL,
2165 NULL);
2166 if (!*device)
2167 ret = -ENOENT;
2168 } else {
2169 if (!devpath || !devpath[0])
2170 return -EINVAL;
2172 ret = btrfs_find_device_missing_or_by_path(root, devpath,
2173 device);
2175 return ret;
2179 * does all the dirty work required for changing file system's UUID.
2181 static int btrfs_prepare_sprout(struct btrfs_root *root)
2183 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2184 struct btrfs_fs_devices *old_devices;
2185 struct btrfs_fs_devices *seed_devices;
2186 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2187 struct btrfs_device *device;
2188 u64 super_flags;
2190 BUG_ON(!mutex_is_locked(&uuid_mutex));
2191 if (!fs_devices->seeding)
2192 return -EINVAL;
2194 seed_devices = __alloc_fs_devices();
2195 if (IS_ERR(seed_devices))
2196 return PTR_ERR(seed_devices);
2198 old_devices = clone_fs_devices(fs_devices);
2199 if (IS_ERR(old_devices)) {
2200 kfree(seed_devices);
2201 return PTR_ERR(old_devices);
2204 list_add(&old_devices->list, &fs_uuids);
2206 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2207 seed_devices->opened = 1;
2208 INIT_LIST_HEAD(&seed_devices->devices);
2209 INIT_LIST_HEAD(&seed_devices->alloc_list);
2210 mutex_init(&seed_devices->device_list_mutex);
2212 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2213 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2214 synchronize_rcu);
2215 list_for_each_entry(device, &seed_devices->devices, dev_list)
2216 device->fs_devices = seed_devices;
2218 lock_chunks(root);
2219 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2220 unlock_chunks(root);
2222 fs_devices->seeding = 0;
2223 fs_devices->num_devices = 0;
2224 fs_devices->open_devices = 0;
2225 fs_devices->missing_devices = 0;
2226 fs_devices->rotating = 0;
2227 fs_devices->seed = seed_devices;
2229 generate_random_uuid(fs_devices->fsid);
2230 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2231 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2232 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2234 super_flags = btrfs_super_flags(disk_super) &
2235 ~BTRFS_SUPER_FLAG_SEEDING;
2236 btrfs_set_super_flags(disk_super, super_flags);
2238 return 0;
2242 * Store the expected generation for seed devices in device items.
2244 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2245 struct btrfs_root *root)
2247 struct btrfs_path *path;
2248 struct extent_buffer *leaf;
2249 struct btrfs_dev_item *dev_item;
2250 struct btrfs_device *device;
2251 struct btrfs_key key;
2252 u8 fs_uuid[BTRFS_UUID_SIZE];
2253 u8 dev_uuid[BTRFS_UUID_SIZE];
2254 u64 devid;
2255 int ret;
2257 path = btrfs_alloc_path();
2258 if (!path)
2259 return -ENOMEM;
2261 root = root->fs_info->chunk_root;
2262 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2263 key.offset = 0;
2264 key.type = BTRFS_DEV_ITEM_KEY;
2266 while (1) {
2267 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2268 if (ret < 0)
2269 goto error;
2271 leaf = path->nodes[0];
2272 next_slot:
2273 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2274 ret = btrfs_next_leaf(root, path);
2275 if (ret > 0)
2276 break;
2277 if (ret < 0)
2278 goto error;
2279 leaf = path->nodes[0];
2280 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2281 btrfs_release_path(path);
2282 continue;
2285 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2286 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2287 key.type != BTRFS_DEV_ITEM_KEY)
2288 break;
2290 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2291 struct btrfs_dev_item);
2292 devid = btrfs_device_id(leaf, dev_item);
2293 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2294 BTRFS_UUID_SIZE);
2295 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2296 BTRFS_UUID_SIZE);
2297 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2298 fs_uuid);
2299 BUG_ON(!device); /* Logic error */
2301 if (device->fs_devices->seeding) {
2302 btrfs_set_device_generation(leaf, dev_item,
2303 device->generation);
2304 btrfs_mark_buffer_dirty(leaf);
2307 path->slots[0]++;
2308 goto next_slot;
2310 ret = 0;
2311 error:
2312 btrfs_free_path(path);
2313 return ret;
2316 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2318 struct request_queue *q;
2319 struct btrfs_trans_handle *trans;
2320 struct btrfs_device *device;
2321 struct block_device *bdev;
2322 struct list_head *devices;
2323 struct super_block *sb = root->fs_info->sb;
2324 struct rcu_string *name;
2325 u64 tmp;
2326 int seeding_dev = 0;
2327 int ret = 0;
2329 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2330 return -EROFS;
2332 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2333 root->fs_info->bdev_holder);
2334 if (IS_ERR(bdev))
2335 return PTR_ERR(bdev);
2337 if (root->fs_info->fs_devices->seeding) {
2338 seeding_dev = 1;
2339 down_write(&sb->s_umount);
2340 mutex_lock(&uuid_mutex);
2343 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2345 devices = &root->fs_info->fs_devices->devices;
2347 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2348 list_for_each_entry(device, devices, dev_list) {
2349 if (device->bdev == bdev) {
2350 ret = -EEXIST;
2351 mutex_unlock(
2352 &root->fs_info->fs_devices->device_list_mutex);
2353 goto error;
2356 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2358 device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2359 if (IS_ERR(device)) {
2360 /* we can safely leave the fs_devices entry around */
2361 ret = PTR_ERR(device);
2362 goto error;
2365 name = rcu_string_strdup(device_path, GFP_KERNEL);
2366 if (!name) {
2367 kfree(device);
2368 ret = -ENOMEM;
2369 goto error;
2371 rcu_assign_pointer(device->name, name);
2373 trans = btrfs_start_transaction(root, 0);
2374 if (IS_ERR(trans)) {
2375 rcu_string_free(device->name);
2376 kfree(device);
2377 ret = PTR_ERR(trans);
2378 goto error;
2381 q = bdev_get_queue(bdev);
2382 if (blk_queue_discard(q))
2383 device->can_discard = 1;
2384 device->writeable = 1;
2385 device->generation = trans->transid;
2386 device->io_width = root->sectorsize;
2387 device->io_align = root->sectorsize;
2388 device->sector_size = root->sectorsize;
2389 device->total_bytes = i_size_read(bdev->bd_inode);
2390 device->disk_total_bytes = device->total_bytes;
2391 device->commit_total_bytes = device->total_bytes;
2392 device->dev_root = root->fs_info->dev_root;
2393 device->bdev = bdev;
2394 device->in_fs_metadata = 1;
2395 device->is_tgtdev_for_dev_replace = 0;
2396 device->mode = FMODE_EXCL;
2397 device->dev_stats_valid = 1;
2398 set_blocksize(device->bdev, 4096);
2400 if (seeding_dev) {
2401 sb->s_flags &= ~MS_RDONLY;
2402 ret = btrfs_prepare_sprout(root);
2403 BUG_ON(ret); /* -ENOMEM */
2406 device->fs_devices = root->fs_info->fs_devices;
2408 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2409 lock_chunks(root);
2410 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2411 list_add(&device->dev_alloc_list,
2412 &root->fs_info->fs_devices->alloc_list);
2413 root->fs_info->fs_devices->num_devices++;
2414 root->fs_info->fs_devices->open_devices++;
2415 root->fs_info->fs_devices->rw_devices++;
2416 root->fs_info->fs_devices->total_devices++;
2417 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2419 spin_lock(&root->fs_info->free_chunk_lock);
2420 root->fs_info->free_chunk_space += device->total_bytes;
2421 spin_unlock(&root->fs_info->free_chunk_lock);
2423 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2424 root->fs_info->fs_devices->rotating = 1;
2426 tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2427 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2428 tmp + device->total_bytes);
2430 tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2431 btrfs_set_super_num_devices(root->fs_info->super_copy,
2432 tmp + 1);
2434 /* add sysfs device entry */
2435 btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2438 * we've got more storage, clear any full flags on the space
2439 * infos
2441 btrfs_clear_space_info_full(root->fs_info);
2443 unlock_chunks(root);
2444 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2446 if (seeding_dev) {
2447 lock_chunks(root);
2448 ret = init_first_rw_device(trans, root, device);
2449 unlock_chunks(root);
2450 if (ret) {
2451 btrfs_abort_transaction(trans, ret);
2452 goto error_trans;
2456 ret = btrfs_add_device(trans, root, device);
2457 if (ret) {
2458 btrfs_abort_transaction(trans, ret);
2459 goto error_trans;
2462 if (seeding_dev) {
2463 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2465 ret = btrfs_finish_sprout(trans, root);
2466 if (ret) {
2467 btrfs_abort_transaction(trans, ret);
2468 goto error_trans;
2471 /* Sprouting would change fsid of the mounted root,
2472 * so rename the fsid on the sysfs
2474 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2475 root->fs_info->fsid);
2476 if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2477 fsid_buf))
2478 btrfs_warn(root->fs_info,
2479 "sysfs: failed to create fsid for sprout");
2482 root->fs_info->num_tolerated_disk_barrier_failures =
2483 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2484 ret = btrfs_commit_transaction(trans, root);
2486 if (seeding_dev) {
2487 mutex_unlock(&uuid_mutex);
2488 up_write(&sb->s_umount);
2490 if (ret) /* transaction commit */
2491 return ret;
2493 ret = btrfs_relocate_sys_chunks(root);
2494 if (ret < 0)
2495 btrfs_handle_fs_error(root->fs_info, ret,
2496 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2497 trans = btrfs_attach_transaction(root);
2498 if (IS_ERR(trans)) {
2499 if (PTR_ERR(trans) == -ENOENT)
2500 return 0;
2501 return PTR_ERR(trans);
2503 ret = btrfs_commit_transaction(trans, root);
2506 /* Update ctime/mtime for libblkid */
2507 update_dev_time(device_path);
2508 return ret;
2510 error_trans:
2511 btrfs_end_transaction(trans, root);
2512 rcu_string_free(device->name);
2513 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2514 kfree(device);
2515 error:
2516 blkdev_put(bdev, FMODE_EXCL);
2517 if (seeding_dev) {
2518 mutex_unlock(&uuid_mutex);
2519 up_write(&sb->s_umount);
2521 return ret;
2524 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2525 struct btrfs_device *srcdev,
2526 struct btrfs_device **device_out)
2528 struct request_queue *q;
2529 struct btrfs_device *device;
2530 struct block_device *bdev;
2531 struct btrfs_fs_info *fs_info = root->fs_info;
2532 struct list_head *devices;
2533 struct rcu_string *name;
2534 u64 devid = BTRFS_DEV_REPLACE_DEVID;
2535 int ret = 0;
2537 *device_out = NULL;
2538 if (fs_info->fs_devices->seeding) {
2539 btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2540 return -EINVAL;
2543 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2544 fs_info->bdev_holder);
2545 if (IS_ERR(bdev)) {
2546 btrfs_err(fs_info, "target device %s is invalid!", device_path);
2547 return PTR_ERR(bdev);
2550 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2552 devices = &fs_info->fs_devices->devices;
2553 list_for_each_entry(device, devices, dev_list) {
2554 if (device->bdev == bdev) {
2555 btrfs_err(fs_info,
2556 "target device is in the filesystem!");
2557 ret = -EEXIST;
2558 goto error;
2563 if (i_size_read(bdev->bd_inode) <
2564 btrfs_device_get_total_bytes(srcdev)) {
2565 btrfs_err(fs_info,
2566 "target device is smaller than source device!");
2567 ret = -EINVAL;
2568 goto error;
2572 device = btrfs_alloc_device(NULL, &devid, NULL);
2573 if (IS_ERR(device)) {
2574 ret = PTR_ERR(device);
2575 goto error;
2578 name = rcu_string_strdup(device_path, GFP_NOFS);
2579 if (!name) {
2580 kfree(device);
2581 ret = -ENOMEM;
2582 goto error;
2584 rcu_assign_pointer(device->name, name);
2586 q = bdev_get_queue(bdev);
2587 if (blk_queue_discard(q))
2588 device->can_discard = 1;
2589 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2590 device->writeable = 1;
2591 device->generation = 0;
2592 device->io_width = root->sectorsize;
2593 device->io_align = root->sectorsize;
2594 device->sector_size = root->sectorsize;
2595 device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2596 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2597 device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2598 ASSERT(list_empty(&srcdev->resized_list));
2599 device->commit_total_bytes = srcdev->commit_total_bytes;
2600 device->commit_bytes_used = device->bytes_used;
2601 device->dev_root = fs_info->dev_root;
2602 device->bdev = bdev;
2603 device->in_fs_metadata = 1;
2604 device->is_tgtdev_for_dev_replace = 1;
2605 device->mode = FMODE_EXCL;
2606 device->dev_stats_valid = 1;
2607 set_blocksize(device->bdev, 4096);
2608 device->fs_devices = fs_info->fs_devices;
2609 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2610 fs_info->fs_devices->num_devices++;
2611 fs_info->fs_devices->open_devices++;
2612 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2614 *device_out = device;
2615 return ret;
2617 error:
2618 blkdev_put(bdev, FMODE_EXCL);
2619 return ret;
2622 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2623 struct btrfs_device *tgtdev)
2625 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2626 tgtdev->io_width = fs_info->dev_root->sectorsize;
2627 tgtdev->io_align = fs_info->dev_root->sectorsize;
2628 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2629 tgtdev->dev_root = fs_info->dev_root;
2630 tgtdev->in_fs_metadata = 1;
2633 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2634 struct btrfs_device *device)
2636 int ret;
2637 struct btrfs_path *path;
2638 struct btrfs_root *root;
2639 struct btrfs_dev_item *dev_item;
2640 struct extent_buffer *leaf;
2641 struct btrfs_key key;
2643 root = device->dev_root->fs_info->chunk_root;
2645 path = btrfs_alloc_path();
2646 if (!path)
2647 return -ENOMEM;
2649 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2650 key.type = BTRFS_DEV_ITEM_KEY;
2651 key.offset = device->devid;
2653 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2654 if (ret < 0)
2655 goto out;
2657 if (ret > 0) {
2658 ret = -ENOENT;
2659 goto out;
2662 leaf = path->nodes[0];
2663 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2665 btrfs_set_device_id(leaf, dev_item, device->devid);
2666 btrfs_set_device_type(leaf, dev_item, device->type);
2667 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2668 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2669 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2670 btrfs_set_device_total_bytes(leaf, dev_item,
2671 btrfs_device_get_disk_total_bytes(device));
2672 btrfs_set_device_bytes_used(leaf, dev_item,
2673 btrfs_device_get_bytes_used(device));
2674 btrfs_mark_buffer_dirty(leaf);
2676 out:
2677 btrfs_free_path(path);
2678 return ret;
2681 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2682 struct btrfs_device *device, u64 new_size)
2684 struct btrfs_super_block *super_copy =
2685 device->dev_root->fs_info->super_copy;
2686 struct btrfs_fs_devices *fs_devices;
2687 u64 old_total;
2688 u64 diff;
2690 if (!device->writeable)
2691 return -EACCES;
2693 lock_chunks(device->dev_root);
2694 old_total = btrfs_super_total_bytes(super_copy);
2695 diff = new_size - device->total_bytes;
2697 if (new_size <= device->total_bytes ||
2698 device->is_tgtdev_for_dev_replace) {
2699 unlock_chunks(device->dev_root);
2700 return -EINVAL;
2703 fs_devices = device->dev_root->fs_info->fs_devices;
2705 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2706 device->fs_devices->total_rw_bytes += diff;
2708 btrfs_device_set_total_bytes(device, new_size);
2709 btrfs_device_set_disk_total_bytes(device, new_size);
2710 btrfs_clear_space_info_full(device->dev_root->fs_info);
2711 if (list_empty(&device->resized_list))
2712 list_add_tail(&device->resized_list,
2713 &fs_devices->resized_devices);
2714 unlock_chunks(device->dev_root);
2716 return btrfs_update_device(trans, device);
2719 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2720 struct btrfs_root *root, u64 chunk_objectid,
2721 u64 chunk_offset)
2723 int ret;
2724 struct btrfs_path *path;
2725 struct btrfs_key key;
2727 root = root->fs_info->chunk_root;
2728 path = btrfs_alloc_path();
2729 if (!path)
2730 return -ENOMEM;
2732 key.objectid = chunk_objectid;
2733 key.offset = chunk_offset;
2734 key.type = BTRFS_CHUNK_ITEM_KEY;
2736 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2737 if (ret < 0)
2738 goto out;
2739 else if (ret > 0) { /* Logic error or corruption */
2740 btrfs_handle_fs_error(root->fs_info, -ENOENT,
2741 "Failed lookup while freeing chunk.");
2742 ret = -ENOENT;
2743 goto out;
2746 ret = btrfs_del_item(trans, root, path);
2747 if (ret < 0)
2748 btrfs_handle_fs_error(root->fs_info, ret,
2749 "Failed to delete chunk item.");
2750 out:
2751 btrfs_free_path(path);
2752 return ret;
2755 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2756 chunk_offset)
2758 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2759 struct btrfs_disk_key *disk_key;
2760 struct btrfs_chunk *chunk;
2761 u8 *ptr;
2762 int ret = 0;
2763 u32 num_stripes;
2764 u32 array_size;
2765 u32 len = 0;
2766 u32 cur;
2767 struct btrfs_key key;
2769 lock_chunks(root);
2770 array_size = btrfs_super_sys_array_size(super_copy);
2772 ptr = super_copy->sys_chunk_array;
2773 cur = 0;
2775 while (cur < array_size) {
2776 disk_key = (struct btrfs_disk_key *)ptr;
2777 btrfs_disk_key_to_cpu(&key, disk_key);
2779 len = sizeof(*disk_key);
2781 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2782 chunk = (struct btrfs_chunk *)(ptr + len);
2783 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2784 len += btrfs_chunk_item_size(num_stripes);
2785 } else {
2786 ret = -EIO;
2787 break;
2789 if (key.objectid == chunk_objectid &&
2790 key.offset == chunk_offset) {
2791 memmove(ptr, ptr + len, array_size - (cur + len));
2792 array_size -= len;
2793 btrfs_set_super_sys_array_size(super_copy, array_size);
2794 } else {
2795 ptr += len;
2796 cur += len;
2799 unlock_chunks(root);
2800 return ret;
2803 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2804 struct btrfs_root *root, u64 chunk_offset)
2806 struct extent_map_tree *em_tree;
2807 struct extent_map *em;
2808 struct btrfs_root *extent_root = root->fs_info->extent_root;
2809 struct map_lookup *map;
2810 u64 dev_extent_len = 0;
2811 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2812 int i, ret = 0;
2813 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2815 /* Just in case */
2816 root = root->fs_info->chunk_root;
2817 em_tree = &root->fs_info->mapping_tree.map_tree;
2819 read_lock(&em_tree->lock);
2820 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2821 read_unlock(&em_tree->lock);
2823 if (!em || em->start > chunk_offset ||
2824 em->start + em->len < chunk_offset) {
2826 * This is a logic error, but we don't want to just rely on the
2827 * user having built with ASSERT enabled, so if ASSERT doesn't
2828 * do anything we still error out.
2830 ASSERT(0);
2831 if (em)
2832 free_extent_map(em);
2833 return -EINVAL;
2835 map = em->map_lookup;
2836 lock_chunks(root->fs_info->chunk_root);
2837 check_system_chunk(trans, extent_root, map->type);
2838 unlock_chunks(root->fs_info->chunk_root);
2841 * Take the device list mutex to prevent races with the final phase of
2842 * a device replace operation that replaces the device object associated
2843 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2845 mutex_lock(&fs_devices->device_list_mutex);
2846 for (i = 0; i < map->num_stripes; i++) {
2847 struct btrfs_device *device = map->stripes[i].dev;
2848 ret = btrfs_free_dev_extent(trans, device,
2849 map->stripes[i].physical,
2850 &dev_extent_len);
2851 if (ret) {
2852 mutex_unlock(&fs_devices->device_list_mutex);
2853 btrfs_abort_transaction(trans, ret);
2854 goto out;
2857 if (device->bytes_used > 0) {
2858 lock_chunks(root);
2859 btrfs_device_set_bytes_used(device,
2860 device->bytes_used - dev_extent_len);
2861 spin_lock(&root->fs_info->free_chunk_lock);
2862 root->fs_info->free_chunk_space += dev_extent_len;
2863 spin_unlock(&root->fs_info->free_chunk_lock);
2864 btrfs_clear_space_info_full(root->fs_info);
2865 unlock_chunks(root);
2868 if (map->stripes[i].dev) {
2869 ret = btrfs_update_device(trans, map->stripes[i].dev);
2870 if (ret) {
2871 mutex_unlock(&fs_devices->device_list_mutex);
2872 btrfs_abort_transaction(trans, ret);
2873 goto out;
2877 mutex_unlock(&fs_devices->device_list_mutex);
2879 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2880 if (ret) {
2881 btrfs_abort_transaction(trans, ret);
2882 goto out;
2885 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2887 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2888 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2889 if (ret) {
2890 btrfs_abort_transaction(trans, ret);
2891 goto out;
2895 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2896 if (ret) {
2897 btrfs_abort_transaction(trans, ret);
2898 goto out;
2901 out:
2902 /* once for us */
2903 free_extent_map(em);
2904 return ret;
2907 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2909 struct btrfs_root *extent_root;
2910 struct btrfs_trans_handle *trans;
2911 int ret;
2913 root = root->fs_info->chunk_root;
2914 extent_root = root->fs_info->extent_root;
2917 * Prevent races with automatic removal of unused block groups.
2918 * After we relocate and before we remove the chunk with offset
2919 * chunk_offset, automatic removal of the block group can kick in,
2920 * resulting in a failure when calling btrfs_remove_chunk() below.
2922 * Make sure to acquire this mutex before doing a tree search (dev
2923 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2924 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2925 * we release the path used to search the chunk/dev tree and before
2926 * the current task acquires this mutex and calls us.
2928 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2930 ret = btrfs_can_relocate(extent_root, chunk_offset);
2931 if (ret)
2932 return -ENOSPC;
2934 /* step one, relocate all the extents inside this chunk */
2935 btrfs_scrub_pause(root);
2936 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2937 btrfs_scrub_continue(root);
2938 if (ret)
2939 return ret;
2941 trans = btrfs_start_trans_remove_block_group(root->fs_info,
2942 chunk_offset);
2943 if (IS_ERR(trans)) {
2944 ret = PTR_ERR(trans);
2945 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2946 return ret;
2950 * step two, delete the device extents and the
2951 * chunk tree entries
2953 ret = btrfs_remove_chunk(trans, root, chunk_offset);
2954 btrfs_end_transaction(trans, extent_root);
2955 return ret;
2958 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2960 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2961 struct btrfs_path *path;
2962 struct extent_buffer *leaf;
2963 struct btrfs_chunk *chunk;
2964 struct btrfs_key key;
2965 struct btrfs_key found_key;
2966 u64 chunk_type;
2967 bool retried = false;
2968 int failed = 0;
2969 int ret;
2971 path = btrfs_alloc_path();
2972 if (!path)
2973 return -ENOMEM;
2975 again:
2976 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2977 key.offset = (u64)-1;
2978 key.type = BTRFS_CHUNK_ITEM_KEY;
2980 while (1) {
2981 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2982 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2983 if (ret < 0) {
2984 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2985 goto error;
2987 BUG_ON(ret == 0); /* Corruption */
2989 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2990 key.type);
2991 if (ret)
2992 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2993 if (ret < 0)
2994 goto error;
2995 if (ret > 0)
2996 break;
2998 leaf = path->nodes[0];
2999 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3001 chunk = btrfs_item_ptr(leaf, path->slots[0],
3002 struct btrfs_chunk);
3003 chunk_type = btrfs_chunk_type(leaf, chunk);
3004 btrfs_release_path(path);
3006 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3007 ret = btrfs_relocate_chunk(chunk_root,
3008 found_key.offset);
3009 if (ret == -ENOSPC)
3010 failed++;
3011 else
3012 BUG_ON(ret);
3014 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
3016 if (found_key.offset == 0)
3017 break;
3018 key.offset = found_key.offset - 1;
3020 ret = 0;
3021 if (failed && !retried) {
3022 failed = 0;
3023 retried = true;
3024 goto again;
3025 } else if (WARN_ON(failed && retried)) {
3026 ret = -ENOSPC;
3028 error:
3029 btrfs_free_path(path);
3030 return ret;
3033 static int insert_balance_item(struct btrfs_root *root,
3034 struct btrfs_balance_control *bctl)
3036 struct btrfs_trans_handle *trans;
3037 struct btrfs_balance_item *item;
3038 struct btrfs_disk_balance_args disk_bargs;
3039 struct btrfs_path *path;
3040 struct extent_buffer *leaf;
3041 struct btrfs_key key;
3042 int ret, err;
3044 path = btrfs_alloc_path();
3045 if (!path)
3046 return -ENOMEM;
3048 trans = btrfs_start_transaction(root, 0);
3049 if (IS_ERR(trans)) {
3050 btrfs_free_path(path);
3051 return PTR_ERR(trans);
3054 key.objectid = BTRFS_BALANCE_OBJECTID;
3055 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3056 key.offset = 0;
3058 ret = btrfs_insert_empty_item(trans, root, path, &key,
3059 sizeof(*item));
3060 if (ret)
3061 goto out;
3063 leaf = path->nodes[0];
3064 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3066 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
3068 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3069 btrfs_set_balance_data(leaf, item, &disk_bargs);
3070 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3071 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3072 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3073 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3075 btrfs_set_balance_flags(leaf, item, bctl->flags);
3077 btrfs_mark_buffer_dirty(leaf);
3078 out:
3079 btrfs_free_path(path);
3080 err = btrfs_commit_transaction(trans, root);
3081 if (err && !ret)
3082 ret = err;
3083 return ret;
3086 static int del_balance_item(struct btrfs_root *root)
3088 struct btrfs_trans_handle *trans;
3089 struct btrfs_path *path;
3090 struct btrfs_key key;
3091 int ret, err;
3093 path = btrfs_alloc_path();
3094 if (!path)
3095 return -ENOMEM;
3097 trans = btrfs_start_transaction(root, 0);
3098 if (IS_ERR(trans)) {
3099 btrfs_free_path(path);
3100 return PTR_ERR(trans);
3103 key.objectid = BTRFS_BALANCE_OBJECTID;
3104 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3105 key.offset = 0;
3107 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3108 if (ret < 0)
3109 goto out;
3110 if (ret > 0) {
3111 ret = -ENOENT;
3112 goto out;
3115 ret = btrfs_del_item(trans, root, path);
3116 out:
3117 btrfs_free_path(path);
3118 err = btrfs_commit_transaction(trans, root);
3119 if (err && !ret)
3120 ret = err;
3121 return ret;
3125 * This is a heuristic used to reduce the number of chunks balanced on
3126 * resume after balance was interrupted.
3128 static void update_balance_args(struct btrfs_balance_control *bctl)
3131 * Turn on soft mode for chunk types that were being converted.
3133 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3134 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3135 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3136 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3137 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3138 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3141 * Turn on usage filter if is not already used. The idea is
3142 * that chunks that we have already balanced should be
3143 * reasonably full. Don't do it for chunks that are being
3144 * converted - that will keep us from relocating unconverted
3145 * (albeit full) chunks.
3147 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3148 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3149 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3150 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3151 bctl->data.usage = 90;
3153 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3154 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3155 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3156 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3157 bctl->sys.usage = 90;
3159 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3160 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3161 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3162 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3163 bctl->meta.usage = 90;
3168 * Should be called with both balance and volume mutexes held to
3169 * serialize other volume operations (add_dev/rm_dev/resize) with
3170 * restriper. Same goes for unset_balance_control.
3172 static void set_balance_control(struct btrfs_balance_control *bctl)
3174 struct btrfs_fs_info *fs_info = bctl->fs_info;
3176 BUG_ON(fs_info->balance_ctl);
3178 spin_lock(&fs_info->balance_lock);
3179 fs_info->balance_ctl = bctl;
3180 spin_unlock(&fs_info->balance_lock);
3183 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3185 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3187 BUG_ON(!fs_info->balance_ctl);
3189 spin_lock(&fs_info->balance_lock);
3190 fs_info->balance_ctl = NULL;
3191 spin_unlock(&fs_info->balance_lock);
3193 kfree(bctl);
3197 * Balance filters. Return 1 if chunk should be filtered out
3198 * (should not be balanced).
3200 static int chunk_profiles_filter(u64 chunk_type,
3201 struct btrfs_balance_args *bargs)
3203 chunk_type = chunk_to_extended(chunk_type) &
3204 BTRFS_EXTENDED_PROFILE_MASK;
3206 if (bargs->profiles & chunk_type)
3207 return 0;
3209 return 1;
3212 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3213 struct btrfs_balance_args *bargs)
3215 struct btrfs_block_group_cache *cache;
3216 u64 chunk_used;
3217 u64 user_thresh_min;
3218 u64 user_thresh_max;
3219 int ret = 1;
3221 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3222 chunk_used = btrfs_block_group_used(&cache->item);
3224 if (bargs->usage_min == 0)
3225 user_thresh_min = 0;
3226 else
3227 user_thresh_min = div_factor_fine(cache->key.offset,
3228 bargs->usage_min);
3230 if (bargs->usage_max == 0)
3231 user_thresh_max = 1;
3232 else if (bargs->usage_max > 100)
3233 user_thresh_max = cache->key.offset;
3234 else
3235 user_thresh_max = div_factor_fine(cache->key.offset,
3236 bargs->usage_max);
3238 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3239 ret = 0;
3241 btrfs_put_block_group(cache);
3242 return ret;
3245 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3246 u64 chunk_offset, struct btrfs_balance_args *bargs)
3248 struct btrfs_block_group_cache *cache;
3249 u64 chunk_used, user_thresh;
3250 int ret = 1;
3252 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3253 chunk_used = btrfs_block_group_used(&cache->item);
3255 if (bargs->usage_min == 0)
3256 user_thresh = 1;
3257 else if (bargs->usage > 100)
3258 user_thresh = cache->key.offset;
3259 else
3260 user_thresh = div_factor_fine(cache->key.offset,
3261 bargs->usage);
3263 if (chunk_used < user_thresh)
3264 ret = 0;
3266 btrfs_put_block_group(cache);
3267 return ret;
3270 static int chunk_devid_filter(struct extent_buffer *leaf,
3271 struct btrfs_chunk *chunk,
3272 struct btrfs_balance_args *bargs)
3274 struct btrfs_stripe *stripe;
3275 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3276 int i;
3278 for (i = 0; i < num_stripes; i++) {
3279 stripe = btrfs_stripe_nr(chunk, i);
3280 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3281 return 0;
3284 return 1;
3287 /* [pstart, pend) */
3288 static int chunk_drange_filter(struct extent_buffer *leaf,
3289 struct btrfs_chunk *chunk,
3290 u64 chunk_offset,
3291 struct btrfs_balance_args *bargs)
3293 struct btrfs_stripe *stripe;
3294 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3295 u64 stripe_offset;
3296 u64 stripe_length;
3297 int factor;
3298 int i;
3300 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3301 return 0;
3303 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3304 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3305 factor = num_stripes / 2;
3306 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3307 factor = num_stripes - 1;
3308 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3309 factor = num_stripes - 2;
3310 } else {
3311 factor = num_stripes;
3314 for (i = 0; i < num_stripes; i++) {
3315 stripe = btrfs_stripe_nr(chunk, i);
3316 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3317 continue;
3319 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3320 stripe_length = btrfs_chunk_length(leaf, chunk);
3321 stripe_length = div_u64(stripe_length, factor);
3323 if (stripe_offset < bargs->pend &&
3324 stripe_offset + stripe_length > bargs->pstart)
3325 return 0;
3328 return 1;
3331 /* [vstart, vend) */
3332 static int chunk_vrange_filter(struct extent_buffer *leaf,
3333 struct btrfs_chunk *chunk,
3334 u64 chunk_offset,
3335 struct btrfs_balance_args *bargs)
3337 if (chunk_offset < bargs->vend &&
3338 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3339 /* at least part of the chunk is inside this vrange */
3340 return 0;
3342 return 1;
3345 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3346 struct btrfs_chunk *chunk,
3347 struct btrfs_balance_args *bargs)
3349 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3351 if (bargs->stripes_min <= num_stripes
3352 && num_stripes <= bargs->stripes_max)
3353 return 0;
3355 return 1;
3358 static int chunk_soft_convert_filter(u64 chunk_type,
3359 struct btrfs_balance_args *bargs)
3361 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3362 return 0;
3364 chunk_type = chunk_to_extended(chunk_type) &
3365 BTRFS_EXTENDED_PROFILE_MASK;
3367 if (bargs->target == chunk_type)
3368 return 1;
3370 return 0;
3373 static int should_balance_chunk(struct btrfs_root *root,
3374 struct extent_buffer *leaf,
3375 struct btrfs_chunk *chunk, u64 chunk_offset)
3377 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3378 struct btrfs_balance_args *bargs = NULL;
3379 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3381 /* type filter */
3382 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3383 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3384 return 0;
3387 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3388 bargs = &bctl->data;
3389 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3390 bargs = &bctl->sys;
3391 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3392 bargs = &bctl->meta;
3394 /* profiles filter */
3395 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3396 chunk_profiles_filter(chunk_type, bargs)) {
3397 return 0;
3400 /* usage filter */
3401 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3402 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3403 return 0;
3404 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3405 chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3406 return 0;
3409 /* devid filter */
3410 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3411 chunk_devid_filter(leaf, chunk, bargs)) {
3412 return 0;
3415 /* drange filter, makes sense only with devid filter */
3416 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3417 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3418 return 0;
3421 /* vrange filter */
3422 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3423 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3424 return 0;
3427 /* stripes filter */
3428 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3429 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3430 return 0;
3433 /* soft profile changing mode */
3434 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3435 chunk_soft_convert_filter(chunk_type, bargs)) {
3436 return 0;
3440 * limited by count, must be the last filter
3442 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3443 if (bargs->limit == 0)
3444 return 0;
3445 else
3446 bargs->limit--;
3447 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3449 * Same logic as the 'limit' filter; the minimum cannot be
3450 * determined here because we do not have the global information
3451 * about the count of all chunks that satisfy the filters.
3453 if (bargs->limit_max == 0)
3454 return 0;
3455 else
3456 bargs->limit_max--;
3459 return 1;
3462 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3464 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3465 struct btrfs_root *chunk_root = fs_info->chunk_root;
3466 struct btrfs_root *dev_root = fs_info->dev_root;
3467 struct list_head *devices;
3468 struct btrfs_device *device;
3469 u64 old_size;
3470 u64 size_to_free;
3471 u64 chunk_type;
3472 struct btrfs_chunk *chunk;
3473 struct btrfs_path *path = NULL;
3474 struct btrfs_key key;
3475 struct btrfs_key found_key;
3476 struct btrfs_trans_handle *trans;
3477 struct extent_buffer *leaf;
3478 int slot;
3479 int ret;
3480 int enospc_errors = 0;
3481 bool counting = true;
3482 /* The single value limit and min/max limits use the same bytes in the */
3483 u64 limit_data = bctl->data.limit;
3484 u64 limit_meta = bctl->meta.limit;
3485 u64 limit_sys = bctl->sys.limit;
3486 u32 count_data = 0;
3487 u32 count_meta = 0;
3488 u32 count_sys = 0;
3489 int chunk_reserved = 0;
3490 u64 bytes_used = 0;
3492 /* step one make some room on all the devices */
3493 devices = &fs_info->fs_devices->devices;
3494 list_for_each_entry(device, devices, dev_list) {
3495 old_size = btrfs_device_get_total_bytes(device);
3496 size_to_free = div_factor(old_size, 1);
3497 size_to_free = min_t(u64, size_to_free, SZ_1M);
3498 if (!device->writeable ||
3499 btrfs_device_get_total_bytes(device) -
3500 btrfs_device_get_bytes_used(device) > size_to_free ||
3501 device->is_tgtdev_for_dev_replace)
3502 continue;
3504 ret = btrfs_shrink_device(device, old_size - size_to_free);
3505 if (ret == -ENOSPC)
3506 break;
3507 if (ret) {
3508 /* btrfs_shrink_device never returns ret > 0 */
3509 WARN_ON(ret > 0);
3510 goto error;
3513 trans = btrfs_start_transaction(dev_root, 0);
3514 if (IS_ERR(trans)) {
3515 ret = PTR_ERR(trans);
3516 btrfs_info_in_rcu(fs_info,
3517 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
3518 rcu_str_deref(device->name), ret,
3519 old_size, old_size - size_to_free);
3520 goto error;
3523 ret = btrfs_grow_device(trans, device, old_size);
3524 if (ret) {
3525 btrfs_end_transaction(trans, dev_root);
3526 /* btrfs_grow_device never returns ret > 0 */
3527 WARN_ON(ret > 0);
3528 btrfs_info_in_rcu(fs_info,
3529 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
3530 rcu_str_deref(device->name), ret,
3531 old_size, old_size - size_to_free);
3532 goto error;
3535 btrfs_end_transaction(trans, dev_root);
3538 /* step two, relocate all the chunks */
3539 path = btrfs_alloc_path();
3540 if (!path) {
3541 ret = -ENOMEM;
3542 goto error;
3545 /* zero out stat counters */
3546 spin_lock(&fs_info->balance_lock);
3547 memset(&bctl->stat, 0, sizeof(bctl->stat));
3548 spin_unlock(&fs_info->balance_lock);
3549 again:
3550 if (!counting) {
3552 * The single value limit and min/max limits use the same bytes
3553 * in the
3555 bctl->data.limit = limit_data;
3556 bctl->meta.limit = limit_meta;
3557 bctl->sys.limit = limit_sys;
3559 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3560 key.offset = (u64)-1;
3561 key.type = BTRFS_CHUNK_ITEM_KEY;
3563 while (1) {
3564 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3565 atomic_read(&fs_info->balance_cancel_req)) {
3566 ret = -ECANCELED;
3567 goto error;
3570 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3571 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3572 if (ret < 0) {
3573 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3574 goto error;
3578 * this shouldn't happen, it means the last relocate
3579 * failed
3581 if (ret == 0)
3582 BUG(); /* FIXME break ? */
3584 ret = btrfs_previous_item(chunk_root, path, 0,
3585 BTRFS_CHUNK_ITEM_KEY);
3586 if (ret) {
3587 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3588 ret = 0;
3589 break;
3592 leaf = path->nodes[0];
3593 slot = path->slots[0];
3594 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3596 if (found_key.objectid != key.objectid) {
3597 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3598 break;
3601 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3602 chunk_type = btrfs_chunk_type(leaf, chunk);
3604 if (!counting) {
3605 spin_lock(&fs_info->balance_lock);
3606 bctl->stat.considered++;
3607 spin_unlock(&fs_info->balance_lock);
3610 ret = should_balance_chunk(chunk_root, leaf, chunk,
3611 found_key.offset);
3613 btrfs_release_path(path);
3614 if (!ret) {
3615 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3616 goto loop;
3619 if (counting) {
3620 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3621 spin_lock(&fs_info->balance_lock);
3622 bctl->stat.expected++;
3623 spin_unlock(&fs_info->balance_lock);
3625 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3626 count_data++;
3627 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3628 count_sys++;
3629 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3630 count_meta++;
3632 goto loop;
3636 * Apply limit_min filter, no need to check if the LIMITS
3637 * filter is used, limit_min is 0 by default
3639 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3640 count_data < bctl->data.limit_min)
3641 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3642 count_meta < bctl->meta.limit_min)
3643 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3644 count_sys < bctl->sys.limit_min)) {
3645 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3646 goto loop;
3649 ASSERT(fs_info->data_sinfo);
3650 spin_lock(&fs_info->data_sinfo->lock);
3651 bytes_used = fs_info->data_sinfo->bytes_used;
3652 spin_unlock(&fs_info->data_sinfo->lock);
3654 if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3655 !chunk_reserved && !bytes_used) {
3656 trans = btrfs_start_transaction(chunk_root, 0);
3657 if (IS_ERR(trans)) {
3658 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3659 ret = PTR_ERR(trans);
3660 goto error;
3663 ret = btrfs_force_chunk_alloc(trans, chunk_root,
3664 BTRFS_BLOCK_GROUP_DATA);
3665 btrfs_end_transaction(trans, chunk_root);
3666 if (ret < 0) {
3667 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3668 goto error;
3670 chunk_reserved = 1;
3673 ret = btrfs_relocate_chunk(chunk_root,
3674 found_key.offset);
3675 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3676 if (ret && ret != -ENOSPC)
3677 goto error;
3678 if (ret == -ENOSPC) {
3679 enospc_errors++;
3680 } else {
3681 spin_lock(&fs_info->balance_lock);
3682 bctl->stat.completed++;
3683 spin_unlock(&fs_info->balance_lock);
3685 loop:
3686 if (found_key.offset == 0)
3687 break;
3688 key.offset = found_key.offset - 1;
3691 if (counting) {
3692 btrfs_release_path(path);
3693 counting = false;
3694 goto again;
3696 error:
3697 btrfs_free_path(path);
3698 if (enospc_errors) {
3699 btrfs_info(fs_info, "%d enospc errors during balance",
3700 enospc_errors);
3701 if (!ret)
3702 ret = -ENOSPC;
3705 return ret;
3709 * alloc_profile_is_valid - see if a given profile is valid and reduced
3710 * @flags: profile to validate
3711 * @extended: if true @flags is treated as an extended profile
3713 static int alloc_profile_is_valid(u64 flags, int extended)
3715 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3716 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3718 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3720 /* 1) check that all other bits are zeroed */
3721 if (flags & ~mask)
3722 return 0;
3724 /* 2) see if profile is reduced */
3725 if (flags == 0)
3726 return !extended; /* "0" is valid for usual profiles */
3728 /* true if exactly one bit set */
3729 return (flags & (flags - 1)) == 0;
3732 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3734 /* cancel requested || normal exit path */
3735 return atomic_read(&fs_info->balance_cancel_req) ||
3736 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3737 atomic_read(&fs_info->balance_cancel_req) == 0);
3740 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3742 int ret;
3744 unset_balance_control(fs_info);
3745 ret = del_balance_item(fs_info->tree_root);
3746 if (ret)
3747 btrfs_handle_fs_error(fs_info, ret, NULL);
3749 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3752 /* Non-zero return value signifies invalidity */
3753 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3754 u64 allowed)
3756 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3757 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3758 (bctl_arg->target & ~allowed)));
3762 * Should be called with both balance and volume mutexes held
3764 int btrfs_balance(struct btrfs_balance_control *bctl,
3765 struct btrfs_ioctl_balance_args *bargs)
3767 struct btrfs_fs_info *fs_info = bctl->fs_info;
3768 u64 meta_target, data_target;
3769 u64 allowed;
3770 int mixed = 0;
3771 int ret;
3772 u64 num_devices;
3773 unsigned seq;
3775 if (btrfs_fs_closing(fs_info) ||
3776 atomic_read(&fs_info->balance_pause_req) ||
3777 atomic_read(&fs_info->balance_cancel_req)) {
3778 ret = -EINVAL;
3779 goto out;
3782 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3783 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3784 mixed = 1;
3787 * In case of mixed groups both data and meta should be picked,
3788 * and identical options should be given for both of them.
3790 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3791 if (mixed && (bctl->flags & allowed)) {
3792 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3793 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3794 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3795 btrfs_err(fs_info,
3796 "with mixed groups data and metadata balance options must be the same");
3797 ret = -EINVAL;
3798 goto out;
3802 num_devices = fs_info->fs_devices->num_devices;
3803 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3804 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3805 BUG_ON(num_devices < 1);
3806 num_devices--;
3808 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3809 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
3810 if (num_devices > 1)
3811 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3812 if (num_devices > 2)
3813 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3814 if (num_devices > 3)
3815 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3816 BTRFS_BLOCK_GROUP_RAID6);
3817 if (validate_convert_profile(&bctl->data, allowed)) {
3818 btrfs_err(fs_info,
3819 "unable to start balance with target data profile %llu",
3820 bctl->data.target);
3821 ret = -EINVAL;
3822 goto out;
3824 if (validate_convert_profile(&bctl->meta, allowed)) {
3825 btrfs_err(fs_info,
3826 "unable to start balance with target metadata profile %llu",
3827 bctl->meta.target);
3828 ret = -EINVAL;
3829 goto out;
3831 if (validate_convert_profile(&bctl->sys, allowed)) {
3832 btrfs_err(fs_info,
3833 "unable to start balance with target system profile %llu",
3834 bctl->sys.target);
3835 ret = -EINVAL;
3836 goto out;
3839 /* allow to reduce meta or sys integrity only if force set */
3840 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3841 BTRFS_BLOCK_GROUP_RAID10 |
3842 BTRFS_BLOCK_GROUP_RAID5 |
3843 BTRFS_BLOCK_GROUP_RAID6;
3844 do {
3845 seq = read_seqbegin(&fs_info->profiles_lock);
3847 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3848 (fs_info->avail_system_alloc_bits & allowed) &&
3849 !(bctl->sys.target & allowed)) ||
3850 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3851 (fs_info->avail_metadata_alloc_bits & allowed) &&
3852 !(bctl->meta.target & allowed))) {
3853 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3854 btrfs_info(fs_info,
3855 "force reducing metadata integrity");
3856 } else {
3857 btrfs_err(fs_info,
3858 "balance will reduce metadata integrity, use force if you want this");
3859 ret = -EINVAL;
3860 goto out;
3863 } while (read_seqretry(&fs_info->profiles_lock, seq));
3865 /* if we're not converting, the target field is uninitialized */
3866 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3867 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
3868 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3869 bctl->data.target : fs_info->avail_data_alloc_bits;
3870 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
3871 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
3872 btrfs_warn(fs_info,
3873 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3874 meta_target, data_target);
3877 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3878 fs_info->num_tolerated_disk_barrier_failures = min(
3879 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3880 btrfs_get_num_tolerated_disk_barrier_failures(
3881 bctl->sys.target));
3884 ret = insert_balance_item(fs_info->tree_root, bctl);
3885 if (ret && ret != -EEXIST)
3886 goto out;
3888 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3889 BUG_ON(ret == -EEXIST);
3890 set_balance_control(bctl);
3891 } else {
3892 BUG_ON(ret != -EEXIST);
3893 spin_lock(&fs_info->balance_lock);
3894 update_balance_args(bctl);
3895 spin_unlock(&fs_info->balance_lock);
3898 atomic_inc(&fs_info->balance_running);
3899 mutex_unlock(&fs_info->balance_mutex);
3901 ret = __btrfs_balance(fs_info);
3903 mutex_lock(&fs_info->balance_mutex);
3904 atomic_dec(&fs_info->balance_running);
3906 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3907 fs_info->num_tolerated_disk_barrier_failures =
3908 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3911 if (bargs) {
3912 memset(bargs, 0, sizeof(*bargs));
3913 update_ioctl_balance_args(fs_info, 0, bargs);
3916 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3917 balance_need_close(fs_info)) {
3918 __cancel_balance(fs_info);
3921 wake_up(&fs_info->balance_wait_q);
3923 return ret;
3924 out:
3925 if (bctl->flags & BTRFS_BALANCE_RESUME)
3926 __cancel_balance(fs_info);
3927 else {
3928 kfree(bctl);
3929 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3931 return ret;
3934 static int balance_kthread(void *data)
3936 struct btrfs_fs_info *fs_info = data;
3937 int ret = 0;
3939 mutex_lock(&fs_info->volume_mutex);
3940 mutex_lock(&fs_info->balance_mutex);
3942 if (fs_info->balance_ctl) {
3943 btrfs_info(fs_info, "continuing balance");
3944 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3947 mutex_unlock(&fs_info->balance_mutex);
3948 mutex_unlock(&fs_info->volume_mutex);
3950 return ret;
3953 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3955 struct task_struct *tsk;
3957 spin_lock(&fs_info->balance_lock);
3958 if (!fs_info->balance_ctl) {
3959 spin_unlock(&fs_info->balance_lock);
3960 return 0;
3962 spin_unlock(&fs_info->balance_lock);
3964 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
3965 btrfs_info(fs_info, "force skipping balance");
3966 return 0;
3970 * A ro->rw remount sequence should continue with the paused balance
3971 * regardless of who pauses it, system or the user as of now, so set
3972 * the resume flag.
3974 spin_lock(&fs_info->balance_lock);
3975 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3976 spin_unlock(&fs_info->balance_lock);
3978 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3979 return PTR_ERR_OR_ZERO(tsk);
3982 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3984 struct btrfs_balance_control *bctl;
3985 struct btrfs_balance_item *item;
3986 struct btrfs_disk_balance_args disk_bargs;
3987 struct btrfs_path *path;
3988 struct extent_buffer *leaf;
3989 struct btrfs_key key;
3990 int ret;
3992 path = btrfs_alloc_path();
3993 if (!path)
3994 return -ENOMEM;
3996 key.objectid = BTRFS_BALANCE_OBJECTID;
3997 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3998 key.offset = 0;
4000 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4001 if (ret < 0)
4002 goto out;
4003 if (ret > 0) { /* ret = -ENOENT; */
4004 ret = 0;
4005 goto out;
4008 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4009 if (!bctl) {
4010 ret = -ENOMEM;
4011 goto out;
4014 leaf = path->nodes[0];
4015 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4017 bctl->fs_info = fs_info;
4018 bctl->flags = btrfs_balance_flags(leaf, item);
4019 bctl->flags |= BTRFS_BALANCE_RESUME;
4021 btrfs_balance_data(leaf, item, &disk_bargs);
4022 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4023 btrfs_balance_meta(leaf, item, &disk_bargs);
4024 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4025 btrfs_balance_sys(leaf, item, &disk_bargs);
4026 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4028 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
4030 mutex_lock(&fs_info->volume_mutex);
4031 mutex_lock(&fs_info->balance_mutex);
4033 set_balance_control(bctl);
4035 mutex_unlock(&fs_info->balance_mutex);
4036 mutex_unlock(&fs_info->volume_mutex);
4037 out:
4038 btrfs_free_path(path);
4039 return ret;
4042 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4044 int ret = 0;
4046 mutex_lock(&fs_info->balance_mutex);
4047 if (!fs_info->balance_ctl) {
4048 mutex_unlock(&fs_info->balance_mutex);
4049 return -ENOTCONN;
4052 if (atomic_read(&fs_info->balance_running)) {
4053 atomic_inc(&fs_info->balance_pause_req);
4054 mutex_unlock(&fs_info->balance_mutex);
4056 wait_event(fs_info->balance_wait_q,
4057 atomic_read(&fs_info->balance_running) == 0);
4059 mutex_lock(&fs_info->balance_mutex);
4060 /* we are good with balance_ctl ripped off from under us */
4061 BUG_ON(atomic_read(&fs_info->balance_running));
4062 atomic_dec(&fs_info->balance_pause_req);
4063 } else {
4064 ret = -ENOTCONN;
4067 mutex_unlock(&fs_info->balance_mutex);
4068 return ret;
4071 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4073 if (fs_info->sb->s_flags & MS_RDONLY)
4074 return -EROFS;
4076 mutex_lock(&fs_info->balance_mutex);
4077 if (!fs_info->balance_ctl) {
4078 mutex_unlock(&fs_info->balance_mutex);
4079 return -ENOTCONN;
4082 atomic_inc(&fs_info->balance_cancel_req);
4084 * if we are running just wait and return, balance item is
4085 * deleted in btrfs_balance in this case
4087 if (atomic_read(&fs_info->balance_running)) {
4088 mutex_unlock(&fs_info->balance_mutex);
4089 wait_event(fs_info->balance_wait_q,
4090 atomic_read(&fs_info->balance_running) == 0);
4091 mutex_lock(&fs_info->balance_mutex);
4092 } else {
4093 /* __cancel_balance needs volume_mutex */
4094 mutex_unlock(&fs_info->balance_mutex);
4095 mutex_lock(&fs_info->volume_mutex);
4096 mutex_lock(&fs_info->balance_mutex);
4098 if (fs_info->balance_ctl)
4099 __cancel_balance(fs_info);
4101 mutex_unlock(&fs_info->volume_mutex);
4104 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
4105 atomic_dec(&fs_info->balance_cancel_req);
4106 mutex_unlock(&fs_info->balance_mutex);
4107 return 0;
4110 static int btrfs_uuid_scan_kthread(void *data)
4112 struct btrfs_fs_info *fs_info = data;
4113 struct btrfs_root *root = fs_info->tree_root;
4114 struct btrfs_key key;
4115 struct btrfs_key max_key;
4116 struct btrfs_path *path = NULL;
4117 int ret = 0;
4118 struct extent_buffer *eb;
4119 int slot;
4120 struct btrfs_root_item root_item;
4121 u32 item_size;
4122 struct btrfs_trans_handle *trans = NULL;
4124 path = btrfs_alloc_path();
4125 if (!path) {
4126 ret = -ENOMEM;
4127 goto out;
4130 key.objectid = 0;
4131 key.type = BTRFS_ROOT_ITEM_KEY;
4132 key.offset = 0;
4134 max_key.objectid = (u64)-1;
4135 max_key.type = BTRFS_ROOT_ITEM_KEY;
4136 max_key.offset = (u64)-1;
4138 while (1) {
4139 ret = btrfs_search_forward(root, &key, path, 0);
4140 if (ret) {
4141 if (ret > 0)
4142 ret = 0;
4143 break;
4146 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4147 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4148 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4149 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4150 goto skip;
4152 eb = path->nodes[0];
4153 slot = path->slots[0];
4154 item_size = btrfs_item_size_nr(eb, slot);
4155 if (item_size < sizeof(root_item))
4156 goto skip;
4158 read_extent_buffer(eb, &root_item,
4159 btrfs_item_ptr_offset(eb, slot),
4160 (int)sizeof(root_item));
4161 if (btrfs_root_refs(&root_item) == 0)
4162 goto skip;
4164 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4165 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4166 if (trans)
4167 goto update_tree;
4169 btrfs_release_path(path);
4171 * 1 - subvol uuid item
4172 * 1 - received_subvol uuid item
4174 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4175 if (IS_ERR(trans)) {
4176 ret = PTR_ERR(trans);
4177 break;
4179 continue;
4180 } else {
4181 goto skip;
4183 update_tree:
4184 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4185 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4186 root_item.uuid,
4187 BTRFS_UUID_KEY_SUBVOL,
4188 key.objectid);
4189 if (ret < 0) {
4190 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4191 ret);
4192 break;
4196 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4197 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4198 root_item.received_uuid,
4199 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4200 key.objectid);
4201 if (ret < 0) {
4202 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4203 ret);
4204 break;
4208 skip:
4209 if (trans) {
4210 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4211 trans = NULL;
4212 if (ret)
4213 break;
4216 btrfs_release_path(path);
4217 if (key.offset < (u64)-1) {
4218 key.offset++;
4219 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4220 key.offset = 0;
4221 key.type = BTRFS_ROOT_ITEM_KEY;
4222 } else if (key.objectid < (u64)-1) {
4223 key.offset = 0;
4224 key.type = BTRFS_ROOT_ITEM_KEY;
4225 key.objectid++;
4226 } else {
4227 break;
4229 cond_resched();
4232 out:
4233 btrfs_free_path(path);
4234 if (trans && !IS_ERR(trans))
4235 btrfs_end_transaction(trans, fs_info->uuid_root);
4236 if (ret)
4237 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4238 else
4239 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4240 up(&fs_info->uuid_tree_rescan_sem);
4241 return 0;
4245 * Callback for btrfs_uuid_tree_iterate().
4246 * returns:
4247 * 0 check succeeded, the entry is not outdated.
4248 * < 0 if an error occurred.
4249 * > 0 if the check failed, which means the caller shall remove the entry.
4251 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4252 u8 *uuid, u8 type, u64 subid)
4254 struct btrfs_key key;
4255 int ret = 0;
4256 struct btrfs_root *subvol_root;
4258 if (type != BTRFS_UUID_KEY_SUBVOL &&
4259 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4260 goto out;
4262 key.objectid = subid;
4263 key.type = BTRFS_ROOT_ITEM_KEY;
4264 key.offset = (u64)-1;
4265 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4266 if (IS_ERR(subvol_root)) {
4267 ret = PTR_ERR(subvol_root);
4268 if (ret == -ENOENT)
4269 ret = 1;
4270 goto out;
4273 switch (type) {
4274 case BTRFS_UUID_KEY_SUBVOL:
4275 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4276 ret = 1;
4277 break;
4278 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4279 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4280 BTRFS_UUID_SIZE))
4281 ret = 1;
4282 break;
4285 out:
4286 return ret;
4289 static int btrfs_uuid_rescan_kthread(void *data)
4291 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4292 int ret;
4295 * 1st step is to iterate through the existing UUID tree and
4296 * to delete all entries that contain outdated data.
4297 * 2nd step is to add all missing entries to the UUID tree.
4299 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4300 if (ret < 0) {
4301 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4302 up(&fs_info->uuid_tree_rescan_sem);
4303 return ret;
4305 return btrfs_uuid_scan_kthread(data);
4308 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4310 struct btrfs_trans_handle *trans;
4311 struct btrfs_root *tree_root = fs_info->tree_root;
4312 struct btrfs_root *uuid_root;
4313 struct task_struct *task;
4314 int ret;
4317 * 1 - root node
4318 * 1 - root item
4320 trans = btrfs_start_transaction(tree_root, 2);
4321 if (IS_ERR(trans))
4322 return PTR_ERR(trans);
4324 uuid_root = btrfs_create_tree(trans, fs_info,
4325 BTRFS_UUID_TREE_OBJECTID);
4326 if (IS_ERR(uuid_root)) {
4327 ret = PTR_ERR(uuid_root);
4328 btrfs_abort_transaction(trans, ret);
4329 btrfs_end_transaction(trans, tree_root);
4330 return ret;
4333 fs_info->uuid_root = uuid_root;
4335 ret = btrfs_commit_transaction(trans, tree_root);
4336 if (ret)
4337 return ret;
4339 down(&fs_info->uuid_tree_rescan_sem);
4340 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4341 if (IS_ERR(task)) {
4342 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4343 btrfs_warn(fs_info, "failed to start uuid_scan task");
4344 up(&fs_info->uuid_tree_rescan_sem);
4345 return PTR_ERR(task);
4348 return 0;
4351 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4353 struct task_struct *task;
4355 down(&fs_info->uuid_tree_rescan_sem);
4356 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4357 if (IS_ERR(task)) {
4358 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4359 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4360 up(&fs_info->uuid_tree_rescan_sem);
4361 return PTR_ERR(task);
4364 return 0;
4368 * shrinking a device means finding all of the device extents past
4369 * the new size, and then following the back refs to the chunks.
4370 * The chunk relocation code actually frees the device extent
4372 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4374 struct btrfs_trans_handle *trans;
4375 struct btrfs_root *root = device->dev_root;
4376 struct btrfs_dev_extent *dev_extent = NULL;
4377 struct btrfs_path *path;
4378 u64 length;
4379 u64 chunk_offset;
4380 int ret;
4381 int slot;
4382 int failed = 0;
4383 bool retried = false;
4384 bool checked_pending_chunks = false;
4385 struct extent_buffer *l;
4386 struct btrfs_key key;
4387 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4388 u64 old_total = btrfs_super_total_bytes(super_copy);
4389 u64 old_size = btrfs_device_get_total_bytes(device);
4390 u64 diff = old_size - new_size;
4392 if (device->is_tgtdev_for_dev_replace)
4393 return -EINVAL;
4395 path = btrfs_alloc_path();
4396 if (!path)
4397 return -ENOMEM;
4399 path->reada = READA_FORWARD;
4401 lock_chunks(root);
4403 btrfs_device_set_total_bytes(device, new_size);
4404 if (device->writeable) {
4405 device->fs_devices->total_rw_bytes -= diff;
4406 spin_lock(&root->fs_info->free_chunk_lock);
4407 root->fs_info->free_chunk_space -= diff;
4408 spin_unlock(&root->fs_info->free_chunk_lock);
4410 unlock_chunks(root);
4412 again:
4413 key.objectid = device->devid;
4414 key.offset = (u64)-1;
4415 key.type = BTRFS_DEV_EXTENT_KEY;
4417 do {
4418 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4419 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4420 if (ret < 0) {
4421 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4422 goto done;
4425 ret = btrfs_previous_item(root, path, 0, key.type);
4426 if (ret)
4427 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4428 if (ret < 0)
4429 goto done;
4430 if (ret) {
4431 ret = 0;
4432 btrfs_release_path(path);
4433 break;
4436 l = path->nodes[0];
4437 slot = path->slots[0];
4438 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4440 if (key.objectid != device->devid) {
4441 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4442 btrfs_release_path(path);
4443 break;
4446 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4447 length = btrfs_dev_extent_length(l, dev_extent);
4449 if (key.offset + length <= new_size) {
4450 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4451 btrfs_release_path(path);
4452 break;
4455 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4456 btrfs_release_path(path);
4458 ret = btrfs_relocate_chunk(root, chunk_offset);
4459 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4460 if (ret && ret != -ENOSPC)
4461 goto done;
4462 if (ret == -ENOSPC)
4463 failed++;
4464 } while (key.offset-- > 0);
4466 if (failed && !retried) {
4467 failed = 0;
4468 retried = true;
4469 goto again;
4470 } else if (failed && retried) {
4471 ret = -ENOSPC;
4472 goto done;
4475 /* Shrinking succeeded, else we would be at "done". */
4476 trans = btrfs_start_transaction(root, 0);
4477 if (IS_ERR(trans)) {
4478 ret = PTR_ERR(trans);
4479 goto done;
4482 lock_chunks(root);
4485 * We checked in the above loop all device extents that were already in
4486 * the device tree. However before we have updated the device's
4487 * total_bytes to the new size, we might have had chunk allocations that
4488 * have not complete yet (new block groups attached to transaction
4489 * handles), and therefore their device extents were not yet in the
4490 * device tree and we missed them in the loop above. So if we have any
4491 * pending chunk using a device extent that overlaps the device range
4492 * that we can not use anymore, commit the current transaction and
4493 * repeat the search on the device tree - this way we guarantee we will
4494 * not have chunks using device extents that end beyond 'new_size'.
4496 if (!checked_pending_chunks) {
4497 u64 start = new_size;
4498 u64 len = old_size - new_size;
4500 if (contains_pending_extent(trans->transaction, device,
4501 &start, len)) {
4502 unlock_chunks(root);
4503 checked_pending_chunks = true;
4504 failed = 0;
4505 retried = false;
4506 ret = btrfs_commit_transaction(trans, root);
4507 if (ret)
4508 goto done;
4509 goto again;
4513 btrfs_device_set_disk_total_bytes(device, new_size);
4514 if (list_empty(&device->resized_list))
4515 list_add_tail(&device->resized_list,
4516 &root->fs_info->fs_devices->resized_devices);
4518 WARN_ON(diff > old_total);
4519 btrfs_set_super_total_bytes(super_copy, old_total - diff);
4520 unlock_chunks(root);
4522 /* Now btrfs_update_device() will change the on-disk size. */
4523 ret = btrfs_update_device(trans, device);
4524 btrfs_end_transaction(trans, root);
4525 done:
4526 btrfs_free_path(path);
4527 if (ret) {
4528 lock_chunks(root);
4529 btrfs_device_set_total_bytes(device, old_size);
4530 if (device->writeable)
4531 device->fs_devices->total_rw_bytes += diff;
4532 spin_lock(&root->fs_info->free_chunk_lock);
4533 root->fs_info->free_chunk_space += diff;
4534 spin_unlock(&root->fs_info->free_chunk_lock);
4535 unlock_chunks(root);
4537 return ret;
4540 static int btrfs_add_system_chunk(struct btrfs_root *root,
4541 struct btrfs_key *key,
4542 struct btrfs_chunk *chunk, int item_size)
4544 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4545 struct btrfs_disk_key disk_key;
4546 u32 array_size;
4547 u8 *ptr;
4549 lock_chunks(root);
4550 array_size = btrfs_super_sys_array_size(super_copy);
4551 if (array_size + item_size + sizeof(disk_key)
4552 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4553 unlock_chunks(root);
4554 return -EFBIG;
4557 ptr = super_copy->sys_chunk_array + array_size;
4558 btrfs_cpu_key_to_disk(&disk_key, key);
4559 memcpy(ptr, &disk_key, sizeof(disk_key));
4560 ptr += sizeof(disk_key);
4561 memcpy(ptr, chunk, item_size);
4562 item_size += sizeof(disk_key);
4563 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4564 unlock_chunks(root);
4566 return 0;
4570 * sort the devices in descending order by max_avail, total_avail
4572 static int btrfs_cmp_device_info(const void *a, const void *b)
4574 const struct btrfs_device_info *di_a = a;
4575 const struct btrfs_device_info *di_b = b;
4577 if (di_a->max_avail > di_b->max_avail)
4578 return -1;
4579 if (di_a->max_avail < di_b->max_avail)
4580 return 1;
4581 if (di_a->total_avail > di_b->total_avail)
4582 return -1;
4583 if (di_a->total_avail < di_b->total_avail)
4584 return 1;
4585 return 0;
4588 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4590 /* TODO allow them to set a preferred stripe size */
4591 return SZ_64K;
4594 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4596 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4597 return;
4599 btrfs_set_fs_incompat(info, RAID56);
4602 #define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \
4603 - sizeof(struct btrfs_chunk)) \
4604 / sizeof(struct btrfs_stripe) + 1)
4606 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
4607 - 2 * sizeof(struct btrfs_disk_key) \
4608 - 2 * sizeof(struct btrfs_chunk)) \
4609 / sizeof(struct btrfs_stripe) + 1)
4611 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4612 struct btrfs_root *extent_root, u64 start,
4613 u64 type)
4615 struct btrfs_fs_info *info = extent_root->fs_info;
4616 struct btrfs_fs_devices *fs_devices = info->fs_devices;
4617 struct list_head *cur;
4618 struct map_lookup *map = NULL;
4619 struct extent_map_tree *em_tree;
4620 struct extent_map *em;
4621 struct btrfs_device_info *devices_info = NULL;
4622 u64 total_avail;
4623 int num_stripes; /* total number of stripes to allocate */
4624 int data_stripes; /* number of stripes that count for
4625 block group size */
4626 int sub_stripes; /* sub_stripes info for map */
4627 int dev_stripes; /* stripes per dev */
4628 int devs_max; /* max devs to use */
4629 int devs_min; /* min devs needed */
4630 int devs_increment; /* ndevs has to be a multiple of this */
4631 int ncopies; /* how many copies to data has */
4632 int ret;
4633 u64 max_stripe_size;
4634 u64 max_chunk_size;
4635 u64 stripe_size;
4636 u64 num_bytes;
4637 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4638 int ndevs;
4639 int i;
4640 int j;
4641 int index;
4643 BUG_ON(!alloc_profile_is_valid(type, 0));
4645 if (list_empty(&fs_devices->alloc_list))
4646 return -ENOSPC;
4648 index = __get_raid_index(type);
4650 sub_stripes = btrfs_raid_array[index].sub_stripes;
4651 dev_stripes = btrfs_raid_array[index].dev_stripes;
4652 devs_max = btrfs_raid_array[index].devs_max;
4653 devs_min = btrfs_raid_array[index].devs_min;
4654 devs_increment = btrfs_raid_array[index].devs_increment;
4655 ncopies = btrfs_raid_array[index].ncopies;
4657 if (type & BTRFS_BLOCK_GROUP_DATA) {
4658 max_stripe_size = SZ_1G;
4659 max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4660 if (!devs_max)
4661 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4662 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4663 /* for larger filesystems, use larger metadata chunks */
4664 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4665 max_stripe_size = SZ_1G;
4666 else
4667 max_stripe_size = SZ_256M;
4668 max_chunk_size = max_stripe_size;
4669 if (!devs_max)
4670 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4671 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4672 max_stripe_size = SZ_32M;
4673 max_chunk_size = 2 * max_stripe_size;
4674 if (!devs_max)
4675 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4676 } else {
4677 btrfs_err(info, "invalid chunk type 0x%llx requested",
4678 type);
4679 BUG_ON(1);
4682 /* we don't want a chunk larger than 10% of writeable space */
4683 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4684 max_chunk_size);
4686 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4687 GFP_NOFS);
4688 if (!devices_info)
4689 return -ENOMEM;
4691 cur = fs_devices->alloc_list.next;
4694 * in the first pass through the devices list, we gather information
4695 * about the available holes on each device.
4697 ndevs = 0;
4698 while (cur != &fs_devices->alloc_list) {
4699 struct btrfs_device *device;
4700 u64 max_avail;
4701 u64 dev_offset;
4703 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4705 cur = cur->next;
4707 if (!device->writeable) {
4708 WARN(1, KERN_ERR
4709 "BTRFS: read-only device in alloc_list\n");
4710 continue;
4713 if (!device->in_fs_metadata ||
4714 device->is_tgtdev_for_dev_replace)
4715 continue;
4717 if (device->total_bytes > device->bytes_used)
4718 total_avail = device->total_bytes - device->bytes_used;
4719 else
4720 total_avail = 0;
4722 /* If there is no space on this device, skip it. */
4723 if (total_avail == 0)
4724 continue;
4726 ret = find_free_dev_extent(trans, device,
4727 max_stripe_size * dev_stripes,
4728 &dev_offset, &max_avail);
4729 if (ret && ret != -ENOSPC)
4730 goto error;
4732 if (ret == 0)
4733 max_avail = max_stripe_size * dev_stripes;
4735 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4736 continue;
4738 if (ndevs == fs_devices->rw_devices) {
4739 WARN(1, "%s: found more than %llu devices\n",
4740 __func__, fs_devices->rw_devices);
4741 break;
4743 devices_info[ndevs].dev_offset = dev_offset;
4744 devices_info[ndevs].max_avail = max_avail;
4745 devices_info[ndevs].total_avail = total_avail;
4746 devices_info[ndevs].dev = device;
4747 ++ndevs;
4751 * now sort the devices by hole size / available space
4753 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4754 btrfs_cmp_device_info, NULL);
4756 /* round down to number of usable stripes */
4757 ndevs -= ndevs % devs_increment;
4759 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4760 ret = -ENOSPC;
4761 goto error;
4764 if (devs_max && ndevs > devs_max)
4765 ndevs = devs_max;
4767 * The primary goal is to maximize the number of stripes, so use as
4768 * many devices as possible, even if the stripes are not maximum sized.
4770 * The DUP profile stores more than one stripe per device, the
4771 * max_avail is the total size so we have to adjust.
4773 stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4774 num_stripes = ndevs * dev_stripes;
4777 * this will have to be fixed for RAID1 and RAID10 over
4778 * more drives
4780 data_stripes = num_stripes / ncopies;
4782 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4783 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4784 extent_root->stripesize);
4785 data_stripes = num_stripes - 1;
4787 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4788 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4789 extent_root->stripesize);
4790 data_stripes = num_stripes - 2;
4794 * Use the number of data stripes to figure out how big this chunk
4795 * is really going to be in terms of logical address space,
4796 * and compare that answer with the max chunk size
4798 if (stripe_size * data_stripes > max_chunk_size) {
4799 u64 mask = (1ULL << 24) - 1;
4801 stripe_size = div_u64(max_chunk_size, data_stripes);
4803 /* bump the answer up to a 16MB boundary */
4804 stripe_size = (stripe_size + mask) & ~mask;
4806 /* but don't go higher than the limits we found
4807 * while searching for free extents
4809 if (stripe_size > devices_info[ndevs-1].max_avail)
4810 stripe_size = devices_info[ndevs-1].max_avail;
4813 /* align to BTRFS_STRIPE_LEN */
4814 stripe_size = div_u64(stripe_size, raid_stripe_len);
4815 stripe_size *= raid_stripe_len;
4817 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4818 if (!map) {
4819 ret = -ENOMEM;
4820 goto error;
4822 map->num_stripes = num_stripes;
4824 for (i = 0; i < ndevs; ++i) {
4825 for (j = 0; j < dev_stripes; ++j) {
4826 int s = i * dev_stripes + j;
4827 map->stripes[s].dev = devices_info[i].dev;
4828 map->stripes[s].physical = devices_info[i].dev_offset +
4829 j * stripe_size;
4832 map->sector_size = extent_root->sectorsize;
4833 map->stripe_len = raid_stripe_len;
4834 map->io_align = raid_stripe_len;
4835 map->io_width = raid_stripe_len;
4836 map->type = type;
4837 map->sub_stripes = sub_stripes;
4839 num_bytes = stripe_size * data_stripes;
4841 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4843 em = alloc_extent_map();
4844 if (!em) {
4845 kfree(map);
4846 ret = -ENOMEM;
4847 goto error;
4849 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4850 em->map_lookup = map;
4851 em->start = start;
4852 em->len = num_bytes;
4853 em->block_start = 0;
4854 em->block_len = em->len;
4855 em->orig_block_len = stripe_size;
4857 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4858 write_lock(&em_tree->lock);
4859 ret = add_extent_mapping(em_tree, em, 0);
4860 if (!ret) {
4861 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4862 atomic_inc(&em->refs);
4864 write_unlock(&em_tree->lock);
4865 if (ret) {
4866 free_extent_map(em);
4867 goto error;
4870 ret = btrfs_make_block_group(trans, extent_root, 0, type,
4871 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4872 start, num_bytes);
4873 if (ret)
4874 goto error_del_extent;
4876 for (i = 0; i < map->num_stripes; i++) {
4877 num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4878 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4881 spin_lock(&extent_root->fs_info->free_chunk_lock);
4882 extent_root->fs_info->free_chunk_space -= (stripe_size *
4883 map->num_stripes);
4884 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4886 free_extent_map(em);
4887 check_raid56_incompat_flag(extent_root->fs_info, type);
4889 kfree(devices_info);
4890 return 0;
4892 error_del_extent:
4893 write_lock(&em_tree->lock);
4894 remove_extent_mapping(em_tree, em);
4895 write_unlock(&em_tree->lock);
4897 /* One for our allocation */
4898 free_extent_map(em);
4899 /* One for the tree reference */
4900 free_extent_map(em);
4901 /* One for the pending_chunks list reference */
4902 free_extent_map(em);
4903 error:
4904 kfree(devices_info);
4905 return ret;
4908 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4909 struct btrfs_root *extent_root,
4910 u64 chunk_offset, u64 chunk_size)
4912 struct btrfs_key key;
4913 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4914 struct btrfs_device *device;
4915 struct btrfs_chunk *chunk;
4916 struct btrfs_stripe *stripe;
4917 struct extent_map_tree *em_tree;
4918 struct extent_map *em;
4919 struct map_lookup *map;
4920 size_t item_size;
4921 u64 dev_offset;
4922 u64 stripe_size;
4923 int i = 0;
4924 int ret = 0;
4926 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4927 read_lock(&em_tree->lock);
4928 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4929 read_unlock(&em_tree->lock);
4931 if (!em) {
4932 btrfs_crit(extent_root->fs_info,
4933 "unable to find logical %Lu len %Lu",
4934 chunk_offset, chunk_size);
4935 return -EINVAL;
4938 if (em->start != chunk_offset || em->len != chunk_size) {
4939 btrfs_crit(extent_root->fs_info,
4940 "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
4941 chunk_offset, chunk_size, em->start, em->len);
4942 free_extent_map(em);
4943 return -EINVAL;
4946 map = em->map_lookup;
4947 item_size = btrfs_chunk_item_size(map->num_stripes);
4948 stripe_size = em->orig_block_len;
4950 chunk = kzalloc(item_size, GFP_NOFS);
4951 if (!chunk) {
4952 ret = -ENOMEM;
4953 goto out;
4957 * Take the device list mutex to prevent races with the final phase of
4958 * a device replace operation that replaces the device object associated
4959 * with the map's stripes, because the device object's id can change
4960 * at any time during that final phase of the device replace operation
4961 * (dev-replace.c:btrfs_dev_replace_finishing()).
4963 mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4964 for (i = 0; i < map->num_stripes; i++) {
4965 device = map->stripes[i].dev;
4966 dev_offset = map->stripes[i].physical;
4968 ret = btrfs_update_device(trans, device);
4969 if (ret)
4970 break;
4971 ret = btrfs_alloc_dev_extent(trans, device,
4972 chunk_root->root_key.objectid,
4973 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4974 chunk_offset, dev_offset,
4975 stripe_size);
4976 if (ret)
4977 break;
4979 if (ret) {
4980 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4981 goto out;
4984 stripe = &chunk->stripe;
4985 for (i = 0; i < map->num_stripes; i++) {
4986 device = map->stripes[i].dev;
4987 dev_offset = map->stripes[i].physical;
4989 btrfs_set_stack_stripe_devid(stripe, device->devid);
4990 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4991 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4992 stripe++;
4994 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4996 btrfs_set_stack_chunk_length(chunk, chunk_size);
4997 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4998 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4999 btrfs_set_stack_chunk_type(chunk, map->type);
5000 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5001 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5002 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5003 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
5004 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5006 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5007 key.type = BTRFS_CHUNK_ITEM_KEY;
5008 key.offset = chunk_offset;
5010 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5011 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5013 * TODO: Cleanup of inserted chunk root in case of
5014 * failure.
5016 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
5017 item_size);
5020 out:
5021 kfree(chunk);
5022 free_extent_map(em);
5023 return ret;
5027 * Chunk allocation falls into two parts. The first part does works
5028 * that make the new allocated chunk useable, but not do any operation
5029 * that modifies the chunk tree. The second part does the works that
5030 * require modifying the chunk tree. This division is important for the
5031 * bootstrap process of adding storage to a seed btrfs.
5033 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5034 struct btrfs_root *extent_root, u64 type)
5036 u64 chunk_offset;
5038 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
5039 chunk_offset = find_next_chunk(extent_root->fs_info);
5040 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
5043 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
5044 struct btrfs_root *root,
5045 struct btrfs_device *device)
5047 u64 chunk_offset;
5048 u64 sys_chunk_offset;
5049 u64 alloc_profile;
5050 struct btrfs_fs_info *fs_info = root->fs_info;
5051 struct btrfs_root *extent_root = fs_info->extent_root;
5052 int ret;
5054 chunk_offset = find_next_chunk(fs_info);
5055 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
5056 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
5057 alloc_profile);
5058 if (ret)
5059 return ret;
5061 sys_chunk_offset = find_next_chunk(root->fs_info);
5062 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
5063 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
5064 alloc_profile);
5065 return ret;
5068 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5070 int max_errors;
5072 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5073 BTRFS_BLOCK_GROUP_RAID10 |
5074 BTRFS_BLOCK_GROUP_RAID5 |
5075 BTRFS_BLOCK_GROUP_DUP)) {
5076 max_errors = 1;
5077 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5078 max_errors = 2;
5079 } else {
5080 max_errors = 0;
5083 return max_errors;
5086 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
5088 struct extent_map *em;
5089 struct map_lookup *map;
5090 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5091 int readonly = 0;
5092 int miss_ndevs = 0;
5093 int i;
5095 read_lock(&map_tree->map_tree.lock);
5096 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5097 read_unlock(&map_tree->map_tree.lock);
5098 if (!em)
5099 return 1;
5101 map = em->map_lookup;
5102 for (i = 0; i < map->num_stripes; i++) {
5103 if (map->stripes[i].dev->missing) {
5104 miss_ndevs++;
5105 continue;
5108 if (!map->stripes[i].dev->writeable) {
5109 readonly = 1;
5110 goto end;
5115 * If the number of missing devices is larger than max errors,
5116 * we can not write the data into that chunk successfully, so
5117 * set it readonly.
5119 if (miss_ndevs > btrfs_chunk_max_errors(map))
5120 readonly = 1;
5121 end:
5122 free_extent_map(em);
5123 return readonly;
5126 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5128 extent_map_tree_init(&tree->map_tree);
5131 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5133 struct extent_map *em;
5135 while (1) {
5136 write_lock(&tree->map_tree.lock);
5137 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5138 if (em)
5139 remove_extent_mapping(&tree->map_tree, em);
5140 write_unlock(&tree->map_tree.lock);
5141 if (!em)
5142 break;
5143 /* once for us */
5144 free_extent_map(em);
5145 /* once for the tree */
5146 free_extent_map(em);
5150 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5152 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5153 struct extent_map *em;
5154 struct map_lookup *map;
5155 struct extent_map_tree *em_tree = &map_tree->map_tree;
5156 int ret;
5158 read_lock(&em_tree->lock);
5159 em = lookup_extent_mapping(em_tree, logical, len);
5160 read_unlock(&em_tree->lock);
5163 * We could return errors for these cases, but that could get ugly and
5164 * we'd probably do the same thing which is just not do anything else
5165 * and exit, so return 1 so the callers don't try to use other copies.
5167 if (!em) {
5168 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5169 logical+len);
5170 return 1;
5173 if (em->start > logical || em->start + em->len < logical) {
5174 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got %Lu-%Lu",
5175 logical, logical+len, em->start,
5176 em->start + em->len);
5177 free_extent_map(em);
5178 return 1;
5181 map = em->map_lookup;
5182 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5183 ret = map->num_stripes;
5184 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5185 ret = map->sub_stripes;
5186 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5187 ret = 2;
5188 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5190 * There could be two corrupted data stripes, we need
5191 * to loop retry in order to rebuild the correct data.
5193 * Fail a stripe at a time on every retry except the
5194 * stripe under reconstruction.
5196 ret = map->num_stripes;
5197 else
5198 ret = 1;
5199 free_extent_map(em);
5201 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5202 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5203 ret++;
5204 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5206 return ret;
5209 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5210 struct btrfs_mapping_tree *map_tree,
5211 u64 logical)
5213 struct extent_map *em;
5214 struct map_lookup *map;
5215 struct extent_map_tree *em_tree = &map_tree->map_tree;
5216 unsigned long len = root->sectorsize;
5218 read_lock(&em_tree->lock);
5219 em = lookup_extent_mapping(em_tree, logical, len);
5220 read_unlock(&em_tree->lock);
5221 BUG_ON(!em);
5223 BUG_ON(em->start > logical || em->start + em->len < logical);
5224 map = em->map_lookup;
5225 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5226 len = map->stripe_len * nr_data_stripes(map);
5227 free_extent_map(em);
5228 return len;
5231 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5232 u64 logical, u64 len, int mirror_num)
5234 struct extent_map *em;
5235 struct map_lookup *map;
5236 struct extent_map_tree *em_tree = &map_tree->map_tree;
5237 int ret = 0;
5239 read_lock(&em_tree->lock);
5240 em = lookup_extent_mapping(em_tree, logical, len);
5241 read_unlock(&em_tree->lock);
5242 BUG_ON(!em);
5244 BUG_ON(em->start > logical || em->start + em->len < logical);
5245 map = em->map_lookup;
5246 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5247 ret = 1;
5248 free_extent_map(em);
5249 return ret;
5252 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5253 struct map_lookup *map, int first, int num,
5254 int optimal, int dev_replace_is_ongoing)
5256 int i;
5257 int tolerance;
5258 struct btrfs_device *srcdev;
5260 if (dev_replace_is_ongoing &&
5261 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5262 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5263 srcdev = fs_info->dev_replace.srcdev;
5264 else
5265 srcdev = NULL;
5268 * try to avoid the drive that is the source drive for a
5269 * dev-replace procedure, only choose it if no other non-missing
5270 * mirror is available
5272 for (tolerance = 0; tolerance < 2; tolerance++) {
5273 if (map->stripes[optimal].dev->bdev &&
5274 (tolerance || map->stripes[optimal].dev != srcdev))
5275 return optimal;
5276 for (i = first; i < first + num; i++) {
5277 if (map->stripes[i].dev->bdev &&
5278 (tolerance || map->stripes[i].dev != srcdev))
5279 return i;
5283 /* we couldn't find one that doesn't fail. Just return something
5284 * and the io error handling code will clean up eventually
5286 return optimal;
5289 static inline int parity_smaller(u64 a, u64 b)
5291 return a > b;
5294 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5295 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5297 struct btrfs_bio_stripe s;
5298 int i;
5299 u64 l;
5300 int again = 1;
5302 while (again) {
5303 again = 0;
5304 for (i = 0; i < num_stripes - 1; i++) {
5305 if (parity_smaller(bbio->raid_map[i],
5306 bbio->raid_map[i+1])) {
5307 s = bbio->stripes[i];
5308 l = bbio->raid_map[i];
5309 bbio->stripes[i] = bbio->stripes[i+1];
5310 bbio->raid_map[i] = bbio->raid_map[i+1];
5311 bbio->stripes[i+1] = s;
5312 bbio->raid_map[i+1] = l;
5314 again = 1;
5320 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5322 struct btrfs_bio *bbio = kzalloc(
5323 /* the size of the btrfs_bio */
5324 sizeof(struct btrfs_bio) +
5325 /* plus the variable array for the stripes */
5326 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5327 /* plus the variable array for the tgt dev */
5328 sizeof(int) * (real_stripes) +
5330 * plus the raid_map, which includes both the tgt dev
5331 * and the stripes
5333 sizeof(u64) * (total_stripes),
5334 GFP_NOFS|__GFP_NOFAIL);
5336 atomic_set(&bbio->error, 0);
5337 atomic_set(&bbio->refs, 1);
5339 return bbio;
5342 void btrfs_get_bbio(struct btrfs_bio *bbio)
5344 WARN_ON(!atomic_read(&bbio->refs));
5345 atomic_inc(&bbio->refs);
5348 void btrfs_put_bbio(struct btrfs_bio *bbio)
5350 if (!bbio)
5351 return;
5352 if (atomic_dec_and_test(&bbio->refs))
5353 kfree(bbio);
5356 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5357 u64 logical, u64 *length,
5358 struct btrfs_bio **bbio_ret,
5359 int mirror_num, int need_raid_map)
5361 struct extent_map *em;
5362 struct map_lookup *map;
5363 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5364 struct extent_map_tree *em_tree = &map_tree->map_tree;
5365 u64 offset;
5366 u64 stripe_offset;
5367 u64 stripe_end_offset;
5368 u64 stripe_nr;
5369 u64 stripe_nr_orig;
5370 u64 stripe_nr_end;
5371 u64 stripe_len;
5372 u32 stripe_index;
5373 int i;
5374 int ret = 0;
5375 int num_stripes;
5376 int max_errors = 0;
5377 int tgtdev_indexes = 0;
5378 struct btrfs_bio *bbio = NULL;
5379 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5380 int dev_replace_is_ongoing = 0;
5381 int num_alloc_stripes;
5382 int patch_the_first_stripe_for_dev_replace = 0;
5383 u64 physical_to_patch_in_first_stripe = 0;
5384 u64 raid56_full_stripe_start = (u64)-1;
5386 read_lock(&em_tree->lock);
5387 em = lookup_extent_mapping(em_tree, logical, *length);
5388 read_unlock(&em_tree->lock);
5390 if (!em) {
5391 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5392 logical, *length);
5393 return -EINVAL;
5396 if (em->start > logical || em->start + em->len < logical) {
5397 btrfs_crit(fs_info,
5398 "found a bad mapping, wanted %Lu, found %Lu-%Lu",
5399 logical, em->start, em->start + em->len);
5400 free_extent_map(em);
5401 return -EINVAL;
5404 map = em->map_lookup;
5405 offset = logical - em->start;
5407 stripe_len = map->stripe_len;
5408 stripe_nr = offset;
5410 * stripe_nr counts the total number of stripes we have to stride
5411 * to get to this block
5413 stripe_nr = div64_u64(stripe_nr, stripe_len);
5415 stripe_offset = stripe_nr * stripe_len;
5416 if (offset < stripe_offset) {
5417 btrfs_crit(fs_info,
5418 "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5419 stripe_offset, offset, em->start, logical,
5420 stripe_len);
5421 free_extent_map(em);
5422 return -EINVAL;
5425 /* stripe_offset is the offset of this block in its stripe*/
5426 stripe_offset = offset - stripe_offset;
5428 /* if we're here for raid56, we need to know the stripe aligned start */
5429 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5430 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5431 raid56_full_stripe_start = offset;
5433 /* allow a write of a full stripe, but make sure we don't
5434 * allow straddling of stripes
5436 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5437 full_stripe_len);
5438 raid56_full_stripe_start *= full_stripe_len;
5441 if (op == REQ_OP_DISCARD) {
5442 /* we don't discard raid56 yet */
5443 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5444 ret = -EOPNOTSUPP;
5445 goto out;
5447 *length = min_t(u64, em->len - offset, *length);
5448 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5449 u64 max_len;
5450 /* For writes to RAID[56], allow a full stripeset across all disks.
5451 For other RAID types and for RAID[56] reads, just allow a single
5452 stripe (on a single disk). */
5453 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5454 (op == REQ_OP_WRITE)) {
5455 max_len = stripe_len * nr_data_stripes(map) -
5456 (offset - raid56_full_stripe_start);
5457 } else {
5458 /* we limit the length of each bio to what fits in a stripe */
5459 max_len = stripe_len - stripe_offset;
5461 *length = min_t(u64, em->len - offset, max_len);
5462 } else {
5463 *length = em->len - offset;
5466 /* This is for when we're called from btrfs_merge_bio_hook() and all
5467 it cares about is the length */
5468 if (!bbio_ret)
5469 goto out;
5471 btrfs_dev_replace_lock(dev_replace, 0);
5472 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5473 if (!dev_replace_is_ongoing)
5474 btrfs_dev_replace_unlock(dev_replace, 0);
5475 else
5476 btrfs_dev_replace_set_lock_blocking(dev_replace);
5478 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5479 op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5480 op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
5482 * in dev-replace case, for repair case (that's the only
5483 * case where the mirror is selected explicitly when
5484 * calling btrfs_map_block), blocks left of the left cursor
5485 * can also be read from the target drive.
5486 * For REQ_GET_READ_MIRRORS, the target drive is added as
5487 * the last one to the array of stripes. For READ, it also
5488 * needs to be supported using the same mirror number.
5489 * If the requested block is not left of the left cursor,
5490 * EIO is returned. This can happen because btrfs_num_copies()
5491 * returns one more in the dev-replace case.
5493 u64 tmp_length = *length;
5494 struct btrfs_bio *tmp_bbio = NULL;
5495 int tmp_num_stripes;
5496 u64 srcdev_devid = dev_replace->srcdev->devid;
5497 int index_srcdev = 0;
5498 int found = 0;
5499 u64 physical_of_found = 0;
5501 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5502 logical, &tmp_length, &tmp_bbio, 0, 0);
5503 if (ret) {
5504 WARN_ON(tmp_bbio != NULL);
5505 goto out;
5508 tmp_num_stripes = tmp_bbio->num_stripes;
5509 if (mirror_num > tmp_num_stripes) {
5511 * REQ_GET_READ_MIRRORS does not contain this
5512 * mirror, that means that the requested area
5513 * is not left of the left cursor
5515 ret = -EIO;
5516 btrfs_put_bbio(tmp_bbio);
5517 goto out;
5521 * process the rest of the function using the mirror_num
5522 * of the source drive. Therefore look it up first.
5523 * At the end, patch the device pointer to the one of the
5524 * target drive.
5526 for (i = 0; i < tmp_num_stripes; i++) {
5527 if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5528 continue;
5531 * In case of DUP, in order to keep it simple, only add
5532 * the mirror with the lowest physical address
5534 if (found &&
5535 physical_of_found <= tmp_bbio->stripes[i].physical)
5536 continue;
5538 index_srcdev = i;
5539 found = 1;
5540 physical_of_found = tmp_bbio->stripes[i].physical;
5543 btrfs_put_bbio(tmp_bbio);
5545 if (!found) {
5546 WARN_ON(1);
5547 ret = -EIO;
5548 goto out;
5551 mirror_num = index_srcdev + 1;
5552 patch_the_first_stripe_for_dev_replace = 1;
5553 physical_to_patch_in_first_stripe = physical_of_found;
5554 } else if (mirror_num > map->num_stripes) {
5555 mirror_num = 0;
5558 num_stripes = 1;
5559 stripe_index = 0;
5560 stripe_nr_orig = stripe_nr;
5561 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5562 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5563 stripe_end_offset = stripe_nr_end * map->stripe_len -
5564 (offset + *length);
5566 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5567 if (op == REQ_OP_DISCARD)
5568 num_stripes = min_t(u64, map->num_stripes,
5569 stripe_nr_end - stripe_nr_orig);
5570 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5571 &stripe_index);
5572 if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5573 op != REQ_GET_READ_MIRRORS)
5574 mirror_num = 1;
5575 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5576 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
5577 op == REQ_GET_READ_MIRRORS)
5578 num_stripes = map->num_stripes;
5579 else if (mirror_num)
5580 stripe_index = mirror_num - 1;
5581 else {
5582 stripe_index = find_live_mirror(fs_info, map, 0,
5583 map->num_stripes,
5584 current->pid % map->num_stripes,
5585 dev_replace_is_ongoing);
5586 mirror_num = stripe_index + 1;
5589 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5590 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
5591 op == REQ_GET_READ_MIRRORS) {
5592 num_stripes = map->num_stripes;
5593 } else if (mirror_num) {
5594 stripe_index = mirror_num - 1;
5595 } else {
5596 mirror_num = 1;
5599 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5600 u32 factor = map->num_stripes / map->sub_stripes;
5602 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5603 stripe_index *= map->sub_stripes;
5605 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
5606 num_stripes = map->sub_stripes;
5607 else if (op == REQ_OP_DISCARD)
5608 num_stripes = min_t(u64, map->sub_stripes *
5609 (stripe_nr_end - stripe_nr_orig),
5610 map->num_stripes);
5611 else if (mirror_num)
5612 stripe_index += mirror_num - 1;
5613 else {
5614 int old_stripe_index = stripe_index;
5615 stripe_index = find_live_mirror(fs_info, map,
5616 stripe_index,
5617 map->sub_stripes, stripe_index +
5618 current->pid % map->sub_stripes,
5619 dev_replace_is_ongoing);
5620 mirror_num = stripe_index - old_stripe_index + 1;
5623 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5624 if (need_raid_map &&
5625 (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
5626 mirror_num > 1)) {
5627 /* push stripe_nr back to the start of the full stripe */
5628 stripe_nr = div_u64(raid56_full_stripe_start,
5629 stripe_len * nr_data_stripes(map));
5631 /* RAID[56] write or recovery. Return all stripes */
5632 num_stripes = map->num_stripes;
5633 max_errors = nr_parity_stripes(map);
5635 *length = map->stripe_len;
5636 stripe_index = 0;
5637 stripe_offset = 0;
5638 } else {
5640 * Mirror #0 or #1 means the original data block.
5641 * Mirror #2 is RAID5 parity block.
5642 * Mirror #3 is RAID6 Q block.
5644 stripe_nr = div_u64_rem(stripe_nr,
5645 nr_data_stripes(map), &stripe_index);
5646 if (mirror_num > 1)
5647 stripe_index = nr_data_stripes(map) +
5648 mirror_num - 2;
5650 /* We distribute the parity blocks across stripes */
5651 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5652 &stripe_index);
5653 if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5654 op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
5655 mirror_num = 1;
5657 } else {
5659 * after this, stripe_nr is the number of stripes on this
5660 * device we have to walk to find the data, and stripe_index is
5661 * the number of our device in the stripe array
5663 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5664 &stripe_index);
5665 mirror_num = stripe_index + 1;
5667 if (stripe_index >= map->num_stripes) {
5668 btrfs_crit(fs_info,
5669 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5670 stripe_index, map->num_stripes);
5671 ret = -EINVAL;
5672 goto out;
5675 num_alloc_stripes = num_stripes;
5676 if (dev_replace_is_ongoing) {
5677 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
5678 num_alloc_stripes <<= 1;
5679 if (op == REQ_GET_READ_MIRRORS)
5680 num_alloc_stripes++;
5681 tgtdev_indexes = num_stripes;
5684 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5685 if (!bbio) {
5686 ret = -ENOMEM;
5687 goto out;
5689 if (dev_replace_is_ongoing)
5690 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5692 /* build raid_map */
5693 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5694 need_raid_map &&
5695 ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
5696 mirror_num > 1)) {
5697 u64 tmp;
5698 unsigned rot;
5700 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5701 sizeof(struct btrfs_bio_stripe) *
5702 num_alloc_stripes +
5703 sizeof(int) * tgtdev_indexes);
5705 /* Work out the disk rotation on this stripe-set */
5706 div_u64_rem(stripe_nr, num_stripes, &rot);
5708 /* Fill in the logical address of each stripe */
5709 tmp = stripe_nr * nr_data_stripes(map);
5710 for (i = 0; i < nr_data_stripes(map); i++)
5711 bbio->raid_map[(i+rot) % num_stripes] =
5712 em->start + (tmp + i) * map->stripe_len;
5714 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5715 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5716 bbio->raid_map[(i+rot+1) % num_stripes] =
5717 RAID6_Q_STRIPE;
5720 if (op == REQ_OP_DISCARD) {
5721 u32 factor = 0;
5722 u32 sub_stripes = 0;
5723 u64 stripes_per_dev = 0;
5724 u32 remaining_stripes = 0;
5725 u32 last_stripe = 0;
5727 if (map->type &
5728 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5729 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5730 sub_stripes = 1;
5731 else
5732 sub_stripes = map->sub_stripes;
5734 factor = map->num_stripes / sub_stripes;
5735 stripes_per_dev = div_u64_rem(stripe_nr_end -
5736 stripe_nr_orig,
5737 factor,
5738 &remaining_stripes);
5739 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5740 last_stripe *= sub_stripes;
5743 for (i = 0; i < num_stripes; i++) {
5744 bbio->stripes[i].physical =
5745 map->stripes[stripe_index].physical +
5746 stripe_offset + stripe_nr * map->stripe_len;
5747 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5749 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5750 BTRFS_BLOCK_GROUP_RAID10)) {
5751 bbio->stripes[i].length = stripes_per_dev *
5752 map->stripe_len;
5754 if (i / sub_stripes < remaining_stripes)
5755 bbio->stripes[i].length +=
5756 map->stripe_len;
5759 * Special for the first stripe and
5760 * the last stripe:
5762 * |-------|...|-------|
5763 * |----------|
5764 * off end_off
5766 if (i < sub_stripes)
5767 bbio->stripes[i].length -=
5768 stripe_offset;
5770 if (stripe_index >= last_stripe &&
5771 stripe_index <= (last_stripe +
5772 sub_stripes - 1))
5773 bbio->stripes[i].length -=
5774 stripe_end_offset;
5776 if (i == sub_stripes - 1)
5777 stripe_offset = 0;
5778 } else
5779 bbio->stripes[i].length = *length;
5781 stripe_index++;
5782 if (stripe_index == map->num_stripes) {
5783 /* This could only happen for RAID0/10 */
5784 stripe_index = 0;
5785 stripe_nr++;
5788 } else {
5789 for (i = 0; i < num_stripes; i++) {
5790 bbio->stripes[i].physical =
5791 map->stripes[stripe_index].physical +
5792 stripe_offset +
5793 stripe_nr * map->stripe_len;
5794 bbio->stripes[i].dev =
5795 map->stripes[stripe_index].dev;
5796 stripe_index++;
5800 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
5801 max_errors = btrfs_chunk_max_errors(map);
5803 if (bbio->raid_map)
5804 sort_parity_stripes(bbio, num_stripes);
5806 tgtdev_indexes = 0;
5807 if (dev_replace_is_ongoing &&
5808 (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
5809 dev_replace->tgtdev != NULL) {
5810 int index_where_to_add;
5811 u64 srcdev_devid = dev_replace->srcdev->devid;
5814 * duplicate the write operations while the dev replace
5815 * procedure is running. Since the copying of the old disk
5816 * to the new disk takes place at run time while the
5817 * filesystem is mounted writable, the regular write
5818 * operations to the old disk have to be duplicated to go
5819 * to the new disk as well.
5820 * Note that device->missing is handled by the caller, and
5821 * that the write to the old disk is already set up in the
5822 * stripes array.
5824 index_where_to_add = num_stripes;
5825 for (i = 0; i < num_stripes; i++) {
5826 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5827 /* write to new disk, too */
5828 struct btrfs_bio_stripe *new =
5829 bbio->stripes + index_where_to_add;
5830 struct btrfs_bio_stripe *old =
5831 bbio->stripes + i;
5833 new->physical = old->physical;
5834 new->length = old->length;
5835 new->dev = dev_replace->tgtdev;
5836 bbio->tgtdev_map[i] = index_where_to_add;
5837 index_where_to_add++;
5838 max_errors++;
5839 tgtdev_indexes++;
5842 num_stripes = index_where_to_add;
5843 } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
5844 dev_replace->tgtdev != NULL) {
5845 u64 srcdev_devid = dev_replace->srcdev->devid;
5846 int index_srcdev = 0;
5847 int found = 0;
5848 u64 physical_of_found = 0;
5851 * During the dev-replace procedure, the target drive can
5852 * also be used to read data in case it is needed to repair
5853 * a corrupt block elsewhere. This is possible if the
5854 * requested area is left of the left cursor. In this area,
5855 * the target drive is a full copy of the source drive.
5857 for (i = 0; i < num_stripes; i++) {
5858 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5860 * In case of DUP, in order to keep it
5861 * simple, only add the mirror with the
5862 * lowest physical address
5864 if (found &&
5865 physical_of_found <=
5866 bbio->stripes[i].physical)
5867 continue;
5868 index_srcdev = i;
5869 found = 1;
5870 physical_of_found = bbio->stripes[i].physical;
5873 if (found) {
5874 struct btrfs_bio_stripe *tgtdev_stripe =
5875 bbio->stripes + num_stripes;
5877 tgtdev_stripe->physical = physical_of_found;
5878 tgtdev_stripe->length =
5879 bbio->stripes[index_srcdev].length;
5880 tgtdev_stripe->dev = dev_replace->tgtdev;
5881 bbio->tgtdev_map[index_srcdev] = num_stripes;
5883 tgtdev_indexes++;
5884 num_stripes++;
5888 *bbio_ret = bbio;
5889 bbio->map_type = map->type;
5890 bbio->num_stripes = num_stripes;
5891 bbio->max_errors = max_errors;
5892 bbio->mirror_num = mirror_num;
5893 bbio->num_tgtdevs = tgtdev_indexes;
5896 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5897 * mirror_num == num_stripes + 1 && dev_replace target drive is
5898 * available as a mirror
5900 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5901 WARN_ON(num_stripes > 1);
5902 bbio->stripes[0].dev = dev_replace->tgtdev;
5903 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5904 bbio->mirror_num = map->num_stripes + 1;
5906 out:
5907 if (dev_replace_is_ongoing) {
5908 btrfs_dev_replace_clear_lock_blocking(dev_replace);
5909 btrfs_dev_replace_unlock(dev_replace, 0);
5911 free_extent_map(em);
5912 return ret;
5915 int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5916 u64 logical, u64 *length,
5917 struct btrfs_bio **bbio_ret, int mirror_num)
5919 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5920 mirror_num, 0);
5923 /* For Scrub/replace */
5924 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
5925 u64 logical, u64 *length,
5926 struct btrfs_bio **bbio_ret, int mirror_num,
5927 int need_raid_map)
5929 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5930 mirror_num, need_raid_map);
5933 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
5934 u64 chunk_start, u64 physical, u64 devid,
5935 u64 **logical, int *naddrs, int *stripe_len)
5937 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5938 struct extent_map_tree *em_tree = &map_tree->map_tree;
5939 struct extent_map *em;
5940 struct map_lookup *map;
5941 u64 *buf;
5942 u64 bytenr;
5943 u64 length;
5944 u64 stripe_nr;
5945 u64 rmap_len;
5946 int i, j, nr = 0;
5948 read_lock(&em_tree->lock);
5949 em = lookup_extent_mapping(em_tree, chunk_start, 1);
5950 read_unlock(&em_tree->lock);
5952 if (!em) {
5953 btrfs_err(fs_info, "couldn't find em for chunk %Lu",
5954 chunk_start);
5955 return -EIO;
5958 if (em->start != chunk_start) {
5959 btrfs_err(fs_info, "bad chunk start, em=%Lu, wanted=%Lu",
5960 em->start, chunk_start);
5961 free_extent_map(em);
5962 return -EIO;
5964 map = em->map_lookup;
5966 length = em->len;
5967 rmap_len = map->stripe_len;
5969 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5970 length = div_u64(length, map->num_stripes / map->sub_stripes);
5971 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5972 length = div_u64(length, map->num_stripes);
5973 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5974 length = div_u64(length, nr_data_stripes(map));
5975 rmap_len = map->stripe_len * nr_data_stripes(map);
5978 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5979 BUG_ON(!buf); /* -ENOMEM */
5981 for (i = 0; i < map->num_stripes; i++) {
5982 if (devid && map->stripes[i].dev->devid != devid)
5983 continue;
5984 if (map->stripes[i].physical > physical ||
5985 map->stripes[i].physical + length <= physical)
5986 continue;
5988 stripe_nr = physical - map->stripes[i].physical;
5989 stripe_nr = div_u64(stripe_nr, map->stripe_len);
5991 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5992 stripe_nr = stripe_nr * map->num_stripes + i;
5993 stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5994 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5995 stripe_nr = stripe_nr * map->num_stripes + i;
5996 } /* else if RAID[56], multiply by nr_data_stripes().
5997 * Alternatively, just use rmap_len below instead of
5998 * map->stripe_len */
6000 bytenr = chunk_start + stripe_nr * rmap_len;
6001 WARN_ON(nr >= map->num_stripes);
6002 for (j = 0; j < nr; j++) {
6003 if (buf[j] == bytenr)
6004 break;
6006 if (j == nr) {
6007 WARN_ON(nr >= map->num_stripes);
6008 buf[nr++] = bytenr;
6012 *logical = buf;
6013 *naddrs = nr;
6014 *stripe_len = rmap_len;
6016 free_extent_map(em);
6017 return 0;
6020 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6022 bio->bi_private = bbio->private;
6023 bio->bi_end_io = bbio->end_io;
6024 bio_endio(bio);
6026 btrfs_put_bbio(bbio);
6029 static void btrfs_end_bio(struct bio *bio)
6031 struct btrfs_bio *bbio = bio->bi_private;
6032 int is_orig_bio = 0;
6034 if (bio->bi_error) {
6035 atomic_inc(&bbio->error);
6036 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
6037 unsigned int stripe_index =
6038 btrfs_io_bio(bio)->stripe_index;
6039 struct btrfs_device *dev;
6041 BUG_ON(stripe_index >= bbio->num_stripes);
6042 dev = bbio->stripes[stripe_index].dev;
6043 if (dev->bdev) {
6044 if (bio_op(bio) == REQ_OP_WRITE)
6045 btrfs_dev_stat_inc(dev,
6046 BTRFS_DEV_STAT_WRITE_ERRS);
6047 else
6048 btrfs_dev_stat_inc(dev,
6049 BTRFS_DEV_STAT_READ_ERRS);
6050 if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
6051 btrfs_dev_stat_inc(dev,
6052 BTRFS_DEV_STAT_FLUSH_ERRS);
6053 btrfs_dev_stat_print_on_error(dev);
6058 if (bio == bbio->orig_bio)
6059 is_orig_bio = 1;
6061 btrfs_bio_counter_dec(bbio->fs_info);
6063 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6064 if (!is_orig_bio) {
6065 bio_put(bio);
6066 bio = bbio->orig_bio;
6069 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6070 /* only send an error to the higher layers if it is
6071 * beyond the tolerance of the btrfs bio
6073 if (atomic_read(&bbio->error) > bbio->max_errors) {
6074 bio->bi_error = -EIO;
6075 } else {
6077 * this bio is actually up to date, we didn't
6078 * go over the max number of errors
6080 bio->bi_error = 0;
6083 btrfs_end_bbio(bbio, bio);
6084 } else if (!is_orig_bio) {
6085 bio_put(bio);
6090 * see run_scheduled_bios for a description of why bios are collected for
6091 * async submit.
6093 * This will add one bio to the pending list for a device and make sure
6094 * the work struct is scheduled.
6096 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
6097 struct btrfs_device *device,
6098 struct bio *bio)
6100 int should_queue = 1;
6101 struct btrfs_pending_bios *pending_bios;
6103 if (device->missing || !device->bdev) {
6104 bio_io_error(bio);
6105 return;
6108 /* don't bother with additional async steps for reads, right now */
6109 if (bio_op(bio) == REQ_OP_READ) {
6110 bio_get(bio);
6111 btrfsic_submit_bio(bio);
6112 bio_put(bio);
6113 return;
6117 * nr_async_bios allows us to reliably return congestion to the
6118 * higher layers. Otherwise, the async bio makes it appear we have
6119 * made progress against dirty pages when we've really just put it
6120 * on a queue for later
6122 atomic_inc(&root->fs_info->nr_async_bios);
6123 WARN_ON(bio->bi_next);
6124 bio->bi_next = NULL;
6126 spin_lock(&device->io_lock);
6127 if (bio->bi_opf & REQ_SYNC)
6128 pending_bios = &device->pending_sync_bios;
6129 else
6130 pending_bios = &device->pending_bios;
6132 if (pending_bios->tail)
6133 pending_bios->tail->bi_next = bio;
6135 pending_bios->tail = bio;
6136 if (!pending_bios->head)
6137 pending_bios->head = bio;
6138 if (device->running_pending)
6139 should_queue = 0;
6141 spin_unlock(&device->io_lock);
6143 if (should_queue)
6144 btrfs_queue_work(root->fs_info->submit_workers,
6145 &device->work);
6148 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
6149 struct bio *bio, u64 physical, int dev_nr,
6150 int async)
6152 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6154 bio->bi_private = bbio;
6155 btrfs_io_bio(bio)->stripe_index = dev_nr;
6156 bio->bi_end_io = btrfs_end_bio;
6157 bio->bi_iter.bi_sector = physical >> 9;
6158 #ifdef DEBUG
6160 struct rcu_string *name;
6162 rcu_read_lock();
6163 name = rcu_dereference(dev->name);
6164 btrfs_debug(fs_info,
6165 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6166 bio_op(bio), bio->bi_opf,
6167 (u64)bio->bi_iter.bi_sector,
6168 (u_long)dev->bdev->bd_dev, name->str, dev->devid,
6169 bio->bi_iter.bi_size);
6170 rcu_read_unlock();
6172 #endif
6173 bio->bi_bdev = dev->bdev;
6175 btrfs_bio_counter_inc_noblocked(root->fs_info);
6177 if (async)
6178 btrfs_schedule_bio(root, dev, bio);
6179 else
6180 btrfsic_submit_bio(bio);
6183 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6185 atomic_inc(&bbio->error);
6186 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6187 /* Should be the original bio. */
6188 WARN_ON(bio != bbio->orig_bio);
6190 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6191 bio->bi_iter.bi_sector = logical >> 9;
6192 bio->bi_error = -EIO;
6193 btrfs_end_bbio(bbio, bio);
6197 int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
6198 int mirror_num, int async_submit)
6200 struct btrfs_device *dev;
6201 struct bio *first_bio = bio;
6202 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6203 u64 length = 0;
6204 u64 map_length;
6205 int ret;
6206 int dev_nr;
6207 int total_devs;
6208 struct btrfs_bio *bbio = NULL;
6210 length = bio->bi_iter.bi_size;
6211 map_length = length;
6213 btrfs_bio_counter_inc_blocked(root->fs_info);
6214 ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
6215 &map_length, &bbio, mirror_num, 1);
6216 if (ret) {
6217 btrfs_bio_counter_dec(root->fs_info);
6218 return ret;
6221 total_devs = bbio->num_stripes;
6222 bbio->orig_bio = first_bio;
6223 bbio->private = first_bio->bi_private;
6224 bbio->end_io = first_bio->bi_end_io;
6225 bbio->fs_info = root->fs_info;
6226 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6228 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6229 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6230 /* In this case, map_length has been set to the length of
6231 a single stripe; not the whole write */
6232 if (bio_op(bio) == REQ_OP_WRITE) {
6233 ret = raid56_parity_write(root, bio, bbio, map_length);
6234 } else {
6235 ret = raid56_parity_recover(root, bio, bbio, map_length,
6236 mirror_num, 1);
6239 btrfs_bio_counter_dec(root->fs_info);
6240 return ret;
6243 if (map_length < length) {
6244 btrfs_crit(root->fs_info,
6245 "mapping failed logical %llu bio len %llu len %llu",
6246 logical, length, map_length);
6247 BUG();
6250 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6251 dev = bbio->stripes[dev_nr].dev;
6252 if (!dev || !dev->bdev ||
6253 (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6254 bbio_error(bbio, first_bio, logical);
6255 continue;
6258 if (dev_nr < total_devs - 1) {
6259 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6260 BUG_ON(!bio); /* -ENOMEM */
6261 } else
6262 bio = first_bio;
6264 submit_stripe_bio(root, bbio, bio,
6265 bbio->stripes[dev_nr].physical, dev_nr,
6266 async_submit);
6268 btrfs_bio_counter_dec(root->fs_info);
6269 return 0;
6272 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6273 u8 *uuid, u8 *fsid)
6275 struct btrfs_device *device;
6276 struct btrfs_fs_devices *cur_devices;
6278 cur_devices = fs_info->fs_devices;
6279 while (cur_devices) {
6280 if (!fsid ||
6281 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6282 device = __find_device(&cur_devices->devices,
6283 devid, uuid);
6284 if (device)
6285 return device;
6287 cur_devices = cur_devices->seed;
6289 return NULL;
6292 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6293 struct btrfs_fs_devices *fs_devices,
6294 u64 devid, u8 *dev_uuid)
6296 struct btrfs_device *device;
6298 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6299 if (IS_ERR(device))
6300 return NULL;
6302 list_add(&device->dev_list, &fs_devices->devices);
6303 device->fs_devices = fs_devices;
6304 fs_devices->num_devices++;
6306 device->missing = 1;
6307 fs_devices->missing_devices++;
6309 return device;
6313 * btrfs_alloc_device - allocate struct btrfs_device
6314 * @fs_info: used only for generating a new devid, can be NULL if
6315 * devid is provided (i.e. @devid != NULL).
6316 * @devid: a pointer to devid for this device. If NULL a new devid
6317 * is generated.
6318 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6319 * is generated.
6321 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6322 * on error. Returned struct is not linked onto any lists and can be
6323 * destroyed with kfree() right away.
6325 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6326 const u64 *devid,
6327 const u8 *uuid)
6329 struct btrfs_device *dev;
6330 u64 tmp;
6332 if (WARN_ON(!devid && !fs_info))
6333 return ERR_PTR(-EINVAL);
6335 dev = __alloc_device();
6336 if (IS_ERR(dev))
6337 return dev;
6339 if (devid)
6340 tmp = *devid;
6341 else {
6342 int ret;
6344 ret = find_next_devid(fs_info, &tmp);
6345 if (ret) {
6346 kfree(dev);
6347 return ERR_PTR(ret);
6350 dev->devid = tmp;
6352 if (uuid)
6353 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6354 else
6355 generate_random_uuid(dev->uuid);
6357 btrfs_init_work(&dev->work, btrfs_submit_helper,
6358 pending_bios_fn, NULL, NULL);
6360 return dev;
6363 /* Return -EIO if any error, otherwise return 0. */
6364 static int btrfs_check_chunk_valid(struct btrfs_root *root,
6365 struct extent_buffer *leaf,
6366 struct btrfs_chunk *chunk, u64 logical)
6368 u64 length;
6369 u64 stripe_len;
6370 u16 num_stripes;
6371 u16 sub_stripes;
6372 u64 type;
6373 u64 features;
6374 bool mixed = false;
6376 length = btrfs_chunk_length(leaf, chunk);
6377 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6378 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6379 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6380 type = btrfs_chunk_type(leaf, chunk);
6382 if (!num_stripes) {
6383 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6384 num_stripes);
6385 return -EIO;
6387 if (!IS_ALIGNED(logical, root->sectorsize)) {
6388 btrfs_err(root->fs_info,
6389 "invalid chunk logical %llu", logical);
6390 return -EIO;
6392 if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
6393 btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
6394 btrfs_chunk_sector_size(leaf, chunk));
6395 return -EIO;
6397 if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6398 btrfs_err(root->fs_info,
6399 "invalid chunk length %llu", length);
6400 return -EIO;
6402 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6403 btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6404 stripe_len);
6405 return -EIO;
6407 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6408 type) {
6409 btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6410 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6411 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6412 btrfs_chunk_type(leaf, chunk));
6413 return -EIO;
6416 if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
6417 btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type);
6418 return -EIO;
6421 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
6422 (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
6423 btrfs_err(root->fs_info,
6424 "system chunk with data or metadata type: 0x%llx", type);
6425 return -EIO;
6428 features = btrfs_super_incompat_flags(root->fs_info->super_copy);
6429 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
6430 mixed = true;
6432 if (!mixed) {
6433 if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
6434 (type & BTRFS_BLOCK_GROUP_DATA)) {
6435 btrfs_err(root->fs_info,
6436 "mixed chunk type in non-mixed mode: 0x%llx", type);
6437 return -EIO;
6441 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6442 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
6443 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6444 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6445 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
6446 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6447 num_stripes != 1)) {
6448 btrfs_err(root->fs_info,
6449 "invalid num_stripes:sub_stripes %u:%u for profile %llu",
6450 num_stripes, sub_stripes,
6451 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6452 return -EIO;
6455 return 0;
6458 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6459 struct extent_buffer *leaf,
6460 struct btrfs_chunk *chunk)
6462 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6463 struct map_lookup *map;
6464 struct extent_map *em;
6465 u64 logical;
6466 u64 length;
6467 u64 stripe_len;
6468 u64 devid;
6469 u8 uuid[BTRFS_UUID_SIZE];
6470 int num_stripes;
6471 int ret;
6472 int i;
6474 logical = key->offset;
6475 length = btrfs_chunk_length(leaf, chunk);
6476 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6477 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6479 ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
6480 if (ret)
6481 return ret;
6483 read_lock(&map_tree->map_tree.lock);
6484 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6485 read_unlock(&map_tree->map_tree.lock);
6487 /* already mapped? */
6488 if (em && em->start <= logical && em->start + em->len > logical) {
6489 free_extent_map(em);
6490 return 0;
6491 } else if (em) {
6492 free_extent_map(em);
6495 em = alloc_extent_map();
6496 if (!em)
6497 return -ENOMEM;
6498 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6499 if (!map) {
6500 free_extent_map(em);
6501 return -ENOMEM;
6504 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6505 em->map_lookup = map;
6506 em->start = logical;
6507 em->len = length;
6508 em->orig_start = 0;
6509 em->block_start = 0;
6510 em->block_len = em->len;
6512 map->num_stripes = num_stripes;
6513 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6514 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6515 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6516 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6517 map->type = btrfs_chunk_type(leaf, chunk);
6518 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6519 for (i = 0; i < num_stripes; i++) {
6520 map->stripes[i].physical =
6521 btrfs_stripe_offset_nr(leaf, chunk, i);
6522 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6523 read_extent_buffer(leaf, uuid, (unsigned long)
6524 btrfs_stripe_dev_uuid_nr(chunk, i),
6525 BTRFS_UUID_SIZE);
6526 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6527 uuid, NULL);
6528 if (!map->stripes[i].dev &&
6529 !btrfs_test_opt(root->fs_info, DEGRADED)) {
6530 free_extent_map(em);
6531 return -EIO;
6533 if (!map->stripes[i].dev) {
6534 map->stripes[i].dev =
6535 add_missing_dev(root, root->fs_info->fs_devices,
6536 devid, uuid);
6537 if (!map->stripes[i].dev) {
6538 free_extent_map(em);
6539 return -EIO;
6541 btrfs_warn(root->fs_info,
6542 "devid %llu uuid %pU is missing",
6543 devid, uuid);
6545 map->stripes[i].dev->in_fs_metadata = 1;
6548 write_lock(&map_tree->map_tree.lock);
6549 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6550 write_unlock(&map_tree->map_tree.lock);
6551 BUG_ON(ret); /* Tree corruption */
6552 free_extent_map(em);
6554 return 0;
6557 static void fill_device_from_item(struct extent_buffer *leaf,
6558 struct btrfs_dev_item *dev_item,
6559 struct btrfs_device *device)
6561 unsigned long ptr;
6563 device->devid = btrfs_device_id(leaf, dev_item);
6564 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6565 device->total_bytes = device->disk_total_bytes;
6566 device->commit_total_bytes = device->disk_total_bytes;
6567 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6568 device->commit_bytes_used = device->bytes_used;
6569 device->type = btrfs_device_type(leaf, dev_item);
6570 device->io_align = btrfs_device_io_align(leaf, dev_item);
6571 device->io_width = btrfs_device_io_width(leaf, dev_item);
6572 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6573 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6574 device->is_tgtdev_for_dev_replace = 0;
6576 ptr = btrfs_device_uuid(dev_item);
6577 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6580 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6581 u8 *fsid)
6583 struct btrfs_fs_devices *fs_devices;
6584 int ret;
6586 BUG_ON(!mutex_is_locked(&uuid_mutex));
6588 fs_devices = root->fs_info->fs_devices->seed;
6589 while (fs_devices) {
6590 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6591 return fs_devices;
6593 fs_devices = fs_devices->seed;
6596 fs_devices = find_fsid(fsid);
6597 if (!fs_devices) {
6598 if (!btrfs_test_opt(root->fs_info, DEGRADED))
6599 return ERR_PTR(-ENOENT);
6601 fs_devices = alloc_fs_devices(fsid);
6602 if (IS_ERR(fs_devices))
6603 return fs_devices;
6605 fs_devices->seeding = 1;
6606 fs_devices->opened = 1;
6607 return fs_devices;
6610 fs_devices = clone_fs_devices(fs_devices);
6611 if (IS_ERR(fs_devices))
6612 return fs_devices;
6614 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6615 root->fs_info->bdev_holder);
6616 if (ret) {
6617 free_fs_devices(fs_devices);
6618 fs_devices = ERR_PTR(ret);
6619 goto out;
6622 if (!fs_devices->seeding) {
6623 __btrfs_close_devices(fs_devices);
6624 free_fs_devices(fs_devices);
6625 fs_devices = ERR_PTR(-EINVAL);
6626 goto out;
6629 fs_devices->seed = root->fs_info->fs_devices->seed;
6630 root->fs_info->fs_devices->seed = fs_devices;
6631 out:
6632 return fs_devices;
6635 static int read_one_dev(struct btrfs_root *root,
6636 struct extent_buffer *leaf,
6637 struct btrfs_dev_item *dev_item)
6639 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6640 struct btrfs_device *device;
6641 u64 devid;
6642 int ret;
6643 u8 fs_uuid[BTRFS_UUID_SIZE];
6644 u8 dev_uuid[BTRFS_UUID_SIZE];
6646 devid = btrfs_device_id(leaf, dev_item);
6647 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6648 BTRFS_UUID_SIZE);
6649 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6650 BTRFS_UUID_SIZE);
6652 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6653 fs_devices = open_seed_devices(root, fs_uuid);
6654 if (IS_ERR(fs_devices))
6655 return PTR_ERR(fs_devices);
6658 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6659 if (!device) {
6660 if (!btrfs_test_opt(root->fs_info, DEGRADED))
6661 return -EIO;
6663 device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6664 if (!device)
6665 return -ENOMEM;
6666 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6667 devid, dev_uuid);
6668 } else {
6669 if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
6670 return -EIO;
6672 if(!device->bdev && !device->missing) {
6674 * this happens when a device that was properly setup
6675 * in the device info lists suddenly goes bad.
6676 * device->bdev is NULL, and so we have to set
6677 * device->missing to one here
6679 device->fs_devices->missing_devices++;
6680 device->missing = 1;
6683 /* Move the device to its own fs_devices */
6684 if (device->fs_devices != fs_devices) {
6685 ASSERT(device->missing);
6687 list_move(&device->dev_list, &fs_devices->devices);
6688 device->fs_devices->num_devices--;
6689 fs_devices->num_devices++;
6691 device->fs_devices->missing_devices--;
6692 fs_devices->missing_devices++;
6694 device->fs_devices = fs_devices;
6698 if (device->fs_devices != root->fs_info->fs_devices) {
6699 BUG_ON(device->writeable);
6700 if (device->generation !=
6701 btrfs_device_generation(leaf, dev_item))
6702 return -EINVAL;
6705 fill_device_from_item(leaf, dev_item, device);
6706 device->in_fs_metadata = 1;
6707 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6708 device->fs_devices->total_rw_bytes += device->total_bytes;
6709 spin_lock(&root->fs_info->free_chunk_lock);
6710 root->fs_info->free_chunk_space += device->total_bytes -
6711 device->bytes_used;
6712 spin_unlock(&root->fs_info->free_chunk_lock);
6714 ret = 0;
6715 return ret;
6718 int btrfs_read_sys_array(struct btrfs_root *root)
6720 struct btrfs_fs_info *fs_info = root->fs_info;
6721 struct btrfs_super_block *super_copy = fs_info->super_copy;
6722 struct extent_buffer *sb;
6723 struct btrfs_disk_key *disk_key;
6724 struct btrfs_chunk *chunk;
6725 u8 *array_ptr;
6726 unsigned long sb_array_offset;
6727 int ret = 0;
6728 u32 num_stripes;
6729 u32 array_size;
6730 u32 len = 0;
6731 u32 cur_offset;
6732 u64 type;
6733 struct btrfs_key key;
6735 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6737 * This will create extent buffer of nodesize, superblock size is
6738 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6739 * overallocate but we can keep it as-is, only the first page is used.
6741 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6742 if (IS_ERR(sb))
6743 return PTR_ERR(sb);
6744 set_extent_buffer_uptodate(sb);
6745 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6747 * The sb extent buffer is artificial and just used to read the system array.
6748 * set_extent_buffer_uptodate() call does not properly mark all it's
6749 * pages up-to-date when the page is larger: extent does not cover the
6750 * whole page and consequently check_page_uptodate does not find all
6751 * the page's extents up-to-date (the hole beyond sb),
6752 * write_extent_buffer then triggers a WARN_ON.
6754 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6755 * but sb spans only this function. Add an explicit SetPageUptodate call
6756 * to silence the warning eg. on PowerPC 64.
6758 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6759 SetPageUptodate(sb->pages[0]);
6761 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6762 array_size = btrfs_super_sys_array_size(super_copy);
6764 array_ptr = super_copy->sys_chunk_array;
6765 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6766 cur_offset = 0;
6768 while (cur_offset < array_size) {
6769 disk_key = (struct btrfs_disk_key *)array_ptr;
6770 len = sizeof(*disk_key);
6771 if (cur_offset + len > array_size)
6772 goto out_short_read;
6774 btrfs_disk_key_to_cpu(&key, disk_key);
6776 array_ptr += len;
6777 sb_array_offset += len;
6778 cur_offset += len;
6780 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6781 chunk = (struct btrfs_chunk *)sb_array_offset;
6783 * At least one btrfs_chunk with one stripe must be
6784 * present, exact stripe count check comes afterwards
6786 len = btrfs_chunk_item_size(1);
6787 if (cur_offset + len > array_size)
6788 goto out_short_read;
6790 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6791 if (!num_stripes) {
6792 btrfs_err(fs_info,
6793 "invalid number of stripes %u in sys_array at offset %u",
6794 num_stripes, cur_offset);
6795 ret = -EIO;
6796 break;
6799 type = btrfs_chunk_type(sb, chunk);
6800 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6801 btrfs_err(fs_info,
6802 "invalid chunk type %llu in sys_array at offset %u",
6803 type, cur_offset);
6804 ret = -EIO;
6805 break;
6808 len = btrfs_chunk_item_size(num_stripes);
6809 if (cur_offset + len > array_size)
6810 goto out_short_read;
6812 ret = read_one_chunk(root, &key, sb, chunk);
6813 if (ret)
6814 break;
6815 } else {
6816 btrfs_err(fs_info,
6817 "unexpected item type %u in sys_array at offset %u",
6818 (u32)key.type, cur_offset);
6819 ret = -EIO;
6820 break;
6822 array_ptr += len;
6823 sb_array_offset += len;
6824 cur_offset += len;
6826 clear_extent_buffer_uptodate(sb);
6827 free_extent_buffer_stale(sb);
6828 return ret;
6830 out_short_read:
6831 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6832 len, cur_offset);
6833 clear_extent_buffer_uptodate(sb);
6834 free_extent_buffer_stale(sb);
6835 return -EIO;
6838 int btrfs_read_chunk_tree(struct btrfs_root *root)
6840 struct btrfs_path *path;
6841 struct extent_buffer *leaf;
6842 struct btrfs_key key;
6843 struct btrfs_key found_key;
6844 int ret;
6845 int slot;
6846 u64 total_dev = 0;
6848 root = root->fs_info->chunk_root;
6850 path = btrfs_alloc_path();
6851 if (!path)
6852 return -ENOMEM;
6854 mutex_lock(&uuid_mutex);
6855 lock_chunks(root);
6858 * Read all device items, and then all the chunk items. All
6859 * device items are found before any chunk item (their object id
6860 * is smaller than the lowest possible object id for a chunk
6861 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6863 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6864 key.offset = 0;
6865 key.type = 0;
6866 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6867 if (ret < 0)
6868 goto error;
6869 while (1) {
6870 leaf = path->nodes[0];
6871 slot = path->slots[0];
6872 if (slot >= btrfs_header_nritems(leaf)) {
6873 ret = btrfs_next_leaf(root, path);
6874 if (ret == 0)
6875 continue;
6876 if (ret < 0)
6877 goto error;
6878 break;
6880 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6881 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6882 struct btrfs_dev_item *dev_item;
6883 dev_item = btrfs_item_ptr(leaf, slot,
6884 struct btrfs_dev_item);
6885 ret = read_one_dev(root, leaf, dev_item);
6886 if (ret)
6887 goto error;
6888 total_dev++;
6889 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6890 struct btrfs_chunk *chunk;
6891 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6892 ret = read_one_chunk(root, &found_key, leaf, chunk);
6893 if (ret)
6894 goto error;
6896 path->slots[0]++;
6900 * After loading chunk tree, we've got all device information,
6901 * do another round of validation checks.
6903 if (total_dev != root->fs_info->fs_devices->total_devices) {
6904 btrfs_err(root->fs_info,
6905 "super_num_devices %llu mismatch with num_devices %llu found here",
6906 btrfs_super_num_devices(root->fs_info->super_copy),
6907 total_dev);
6908 ret = -EINVAL;
6909 goto error;
6911 if (btrfs_super_total_bytes(root->fs_info->super_copy) <
6912 root->fs_info->fs_devices->total_rw_bytes) {
6913 btrfs_err(root->fs_info,
6914 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6915 btrfs_super_total_bytes(root->fs_info->super_copy),
6916 root->fs_info->fs_devices->total_rw_bytes);
6917 ret = -EINVAL;
6918 goto error;
6920 ret = 0;
6921 error:
6922 unlock_chunks(root);
6923 mutex_unlock(&uuid_mutex);
6925 btrfs_free_path(path);
6926 return ret;
6929 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6931 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6932 struct btrfs_device *device;
6934 while (fs_devices) {
6935 mutex_lock(&fs_devices->device_list_mutex);
6936 list_for_each_entry(device, &fs_devices->devices, dev_list)
6937 device->dev_root = fs_info->dev_root;
6938 mutex_unlock(&fs_devices->device_list_mutex);
6940 fs_devices = fs_devices->seed;
6944 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6946 int i;
6948 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6949 btrfs_dev_stat_reset(dev, i);
6952 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6954 struct btrfs_key key;
6955 struct btrfs_key found_key;
6956 struct btrfs_root *dev_root = fs_info->dev_root;
6957 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6958 struct extent_buffer *eb;
6959 int slot;
6960 int ret = 0;
6961 struct btrfs_device *device;
6962 struct btrfs_path *path = NULL;
6963 int i;
6965 path = btrfs_alloc_path();
6966 if (!path) {
6967 ret = -ENOMEM;
6968 goto out;
6971 mutex_lock(&fs_devices->device_list_mutex);
6972 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6973 int item_size;
6974 struct btrfs_dev_stats_item *ptr;
6976 key.objectid = BTRFS_DEV_STATS_OBJECTID;
6977 key.type = BTRFS_PERSISTENT_ITEM_KEY;
6978 key.offset = device->devid;
6979 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6980 if (ret) {
6981 __btrfs_reset_dev_stats(device);
6982 device->dev_stats_valid = 1;
6983 btrfs_release_path(path);
6984 continue;
6986 slot = path->slots[0];
6987 eb = path->nodes[0];
6988 btrfs_item_key_to_cpu(eb, &found_key, slot);
6989 item_size = btrfs_item_size_nr(eb, slot);
6991 ptr = btrfs_item_ptr(eb, slot,
6992 struct btrfs_dev_stats_item);
6994 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6995 if (item_size >= (1 + i) * sizeof(__le64))
6996 btrfs_dev_stat_set(device, i,
6997 btrfs_dev_stats_value(eb, ptr, i));
6998 else
6999 btrfs_dev_stat_reset(device, i);
7002 device->dev_stats_valid = 1;
7003 btrfs_dev_stat_print_on_load(device);
7004 btrfs_release_path(path);
7006 mutex_unlock(&fs_devices->device_list_mutex);
7008 out:
7009 btrfs_free_path(path);
7010 return ret < 0 ? ret : 0;
7013 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7014 struct btrfs_root *dev_root,
7015 struct btrfs_device *device)
7017 struct btrfs_path *path;
7018 struct btrfs_key key;
7019 struct extent_buffer *eb;
7020 struct btrfs_dev_stats_item *ptr;
7021 int ret;
7022 int i;
7024 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7025 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7026 key.offset = device->devid;
7028 path = btrfs_alloc_path();
7029 BUG_ON(!path);
7030 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7031 if (ret < 0) {
7032 btrfs_warn_in_rcu(dev_root->fs_info,
7033 "error %d while searching for dev_stats item for device %s",
7034 ret, rcu_str_deref(device->name));
7035 goto out;
7038 if (ret == 0 &&
7039 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7040 /* need to delete old one and insert a new one */
7041 ret = btrfs_del_item(trans, dev_root, path);
7042 if (ret != 0) {
7043 btrfs_warn_in_rcu(dev_root->fs_info,
7044 "delete too small dev_stats item for device %s failed %d",
7045 rcu_str_deref(device->name), ret);
7046 goto out;
7048 ret = 1;
7051 if (ret == 1) {
7052 /* need to insert a new item */
7053 btrfs_release_path(path);
7054 ret = btrfs_insert_empty_item(trans, dev_root, path,
7055 &key, sizeof(*ptr));
7056 if (ret < 0) {
7057 btrfs_warn_in_rcu(dev_root->fs_info,
7058 "insert dev_stats item for device %s failed %d",
7059 rcu_str_deref(device->name), ret);
7060 goto out;
7064 eb = path->nodes[0];
7065 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7066 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7067 btrfs_set_dev_stats_value(eb, ptr, i,
7068 btrfs_dev_stat_read(device, i));
7069 btrfs_mark_buffer_dirty(eb);
7071 out:
7072 btrfs_free_path(path);
7073 return ret;
7077 * called from commit_transaction. Writes all changed device stats to disk.
7079 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7080 struct btrfs_fs_info *fs_info)
7082 struct btrfs_root *dev_root = fs_info->dev_root;
7083 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7084 struct btrfs_device *device;
7085 int stats_cnt;
7086 int ret = 0;
7088 mutex_lock(&fs_devices->device_list_mutex);
7089 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7090 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
7091 continue;
7093 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7094 ret = update_dev_stat_item(trans, dev_root, device);
7095 if (!ret)
7096 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7098 mutex_unlock(&fs_devices->device_list_mutex);
7100 return ret;
7103 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7105 btrfs_dev_stat_inc(dev, index);
7106 btrfs_dev_stat_print_on_error(dev);
7109 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7111 if (!dev->dev_stats_valid)
7112 return;
7113 btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
7114 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7115 rcu_str_deref(dev->name),
7116 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7117 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7118 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7119 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7120 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7123 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7125 int i;
7127 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7128 if (btrfs_dev_stat_read(dev, i) != 0)
7129 break;
7130 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7131 return; /* all values == 0, suppress message */
7133 btrfs_info_in_rcu(dev->dev_root->fs_info,
7134 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7135 rcu_str_deref(dev->name),
7136 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7137 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7138 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7139 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7140 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7143 int btrfs_get_dev_stats(struct btrfs_root *root,
7144 struct btrfs_ioctl_get_dev_stats *stats)
7146 struct btrfs_device *dev;
7147 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7148 int i;
7150 mutex_lock(&fs_devices->device_list_mutex);
7151 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
7152 mutex_unlock(&fs_devices->device_list_mutex);
7154 if (!dev) {
7155 btrfs_warn(root->fs_info,
7156 "get dev_stats failed, device not found");
7157 return -ENODEV;
7158 } else if (!dev->dev_stats_valid) {
7159 btrfs_warn(root->fs_info,
7160 "get dev_stats failed, not yet valid");
7161 return -ENODEV;
7162 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7163 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7164 if (stats->nr_items > i)
7165 stats->values[i] =
7166 btrfs_dev_stat_read_and_reset(dev, i);
7167 else
7168 btrfs_dev_stat_reset(dev, i);
7170 } else {
7171 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7172 if (stats->nr_items > i)
7173 stats->values[i] = btrfs_dev_stat_read(dev, i);
7175 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7176 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7177 return 0;
7180 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
7182 struct buffer_head *bh;
7183 struct btrfs_super_block *disk_super;
7184 int copy_num;
7186 if (!bdev)
7187 return;
7189 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7190 copy_num++) {
7192 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7193 continue;
7195 disk_super = (struct btrfs_super_block *)bh->b_data;
7197 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7198 set_buffer_dirty(bh);
7199 sync_dirty_buffer(bh);
7200 brelse(bh);
7203 /* Notify udev that device has changed */
7204 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7206 /* Update ctime/mtime for device path for libblkid */
7207 update_dev_time(device_path);
7211 * Update the size of all devices, which is used for writing out the
7212 * super blocks.
7214 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7216 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7217 struct btrfs_device *curr, *next;
7219 if (list_empty(&fs_devices->resized_devices))
7220 return;
7222 mutex_lock(&fs_devices->device_list_mutex);
7223 lock_chunks(fs_info->dev_root);
7224 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7225 resized_list) {
7226 list_del_init(&curr->resized_list);
7227 curr->commit_total_bytes = curr->disk_total_bytes;
7229 unlock_chunks(fs_info->dev_root);
7230 mutex_unlock(&fs_devices->device_list_mutex);
7233 /* Must be invoked during the transaction commit */
7234 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
7235 struct btrfs_transaction *transaction)
7237 struct extent_map *em;
7238 struct map_lookup *map;
7239 struct btrfs_device *dev;
7240 int i;
7242 if (list_empty(&transaction->pending_chunks))
7243 return;
7245 /* In order to kick the device replace finish process */
7246 lock_chunks(root);
7247 list_for_each_entry(em, &transaction->pending_chunks, list) {
7248 map = em->map_lookup;
7250 for (i = 0; i < map->num_stripes; i++) {
7251 dev = map->stripes[i].dev;
7252 dev->commit_bytes_used = dev->bytes_used;
7255 unlock_chunks(root);
7258 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7260 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7261 while (fs_devices) {
7262 fs_devices->fs_info = fs_info;
7263 fs_devices = fs_devices->seed;
7267 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7269 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7270 while (fs_devices) {
7271 fs_devices->fs_info = NULL;
7272 fs_devices = fs_devices->seed;