2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
67 struct scrub_block
*sblock
;
69 struct btrfs_device
*dev
;
70 u64 flags
; /* extent flags */
74 u64 physical_for_dev_replace
;
77 unsigned int mirror_num
:8;
78 unsigned int have_csum
:1;
79 unsigned int io_error
:1;
81 u8 csum
[BTRFS_CSUM_SIZE
];
86 struct scrub_ctx
*sctx
;
87 struct btrfs_device
*dev
;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
95 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
99 struct btrfs_work work
;
103 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
105 atomic_t outstanding_pages
;
106 atomic_t ref_count
; /* free mem on transition to zero */
107 struct scrub_ctx
*sctx
;
109 unsigned int header_error
:1;
110 unsigned int checksum_error
:1;
111 unsigned int no_io_error_seen
:1;
112 unsigned int generation_error
:1; /* also sets header_error */
116 struct scrub_wr_ctx
{
117 struct scrub_bio
*wr_curr_bio
;
118 struct btrfs_device
*tgtdev
;
119 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
120 atomic_t flush_all_writes
;
121 struct mutex wr_lock
;
125 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
126 struct btrfs_root
*dev_root
;
129 atomic_t bios_in_flight
;
130 atomic_t workers_pending
;
131 spinlock_t list_lock
;
132 wait_queue_head_t list_wait
;
134 struct list_head csum_list
;
137 int pages_per_rd_bio
;
143 struct scrub_wr_ctx wr_ctx
;
148 struct btrfs_scrub_progress stat
;
149 spinlock_t stat_lock
;
152 struct scrub_fixup_nodatasum
{
153 struct scrub_ctx
*sctx
;
154 struct btrfs_device
*dev
;
156 struct btrfs_root
*root
;
157 struct btrfs_work work
;
161 struct scrub_copy_nocow_ctx
{
162 struct scrub_ctx
*sctx
;
166 u64 physical_for_dev_replace
;
167 struct btrfs_work work
;
170 struct scrub_warning
{
171 struct btrfs_path
*path
;
172 u64 extent_item_size
;
178 struct btrfs_device
*dev
;
184 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
185 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
186 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
187 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
188 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
189 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
190 struct btrfs_fs_info
*fs_info
,
191 struct scrub_block
*original_sblock
,
192 u64 length
, u64 logical
,
193 struct scrub_block
*sblocks_for_recheck
);
194 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
195 struct scrub_block
*sblock
, int is_metadata
,
196 int have_csum
, u8
*csum
, u64 generation
,
198 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
199 struct scrub_block
*sblock
,
200 int is_metadata
, int have_csum
,
201 const u8
*csum
, u64 generation
,
203 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
);
204 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
205 struct scrub_block
*sblock_good
,
207 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
208 struct scrub_block
*sblock_good
,
209 int page_num
, int force_write
);
210 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
211 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
213 static int scrub_checksum_data(struct scrub_block
*sblock
);
214 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
215 static int scrub_checksum_super(struct scrub_block
*sblock
);
216 static void scrub_block_get(struct scrub_block
*sblock
);
217 static void scrub_block_put(struct scrub_block
*sblock
);
218 static void scrub_page_get(struct scrub_page
*spage
);
219 static void scrub_page_put(struct scrub_page
*spage
);
220 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
221 struct scrub_page
*spage
);
222 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
223 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
224 u64 gen
, int mirror_num
, u8
*csum
, int force
,
225 u64 physical_for_dev_replace
);
226 static void scrub_bio_end_io(struct bio
*bio
, int err
);
227 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
228 static void scrub_block_complete(struct scrub_block
*sblock
);
229 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
230 u64 extent_logical
, u64 extent_len
,
231 u64
*extent_physical
,
232 struct btrfs_device
**extent_dev
,
233 int *extent_mirror_num
);
234 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
235 struct scrub_wr_ctx
*wr_ctx
,
236 struct btrfs_fs_info
*fs_info
,
237 struct btrfs_device
*dev
,
239 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
240 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
241 struct scrub_page
*spage
);
242 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
243 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
244 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
245 static int write_page_nocow(struct scrub_ctx
*sctx
,
246 u64 physical_for_dev_replace
, struct page
*page
);
247 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
249 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
250 int mirror_num
, u64 physical_for_dev_replace
);
251 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
254 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
256 atomic_inc(&sctx
->bios_in_flight
);
259 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
261 atomic_dec(&sctx
->bios_in_flight
);
262 wake_up(&sctx
->list_wait
);
266 * used for workers that require transaction commits (i.e., for the
269 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
271 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
274 * increment scrubs_running to prevent cancel requests from
275 * completing as long as a worker is running. we must also
276 * increment scrubs_paused to prevent deadlocking on pause
277 * requests used for transactions commits (as the worker uses a
278 * transaction context). it is safe to regard the worker
279 * as paused for all matters practical. effectively, we only
280 * avoid cancellation requests from completing.
282 mutex_lock(&fs_info
->scrub_lock
);
283 atomic_inc(&fs_info
->scrubs_running
);
284 atomic_inc(&fs_info
->scrubs_paused
);
285 mutex_unlock(&fs_info
->scrub_lock
);
286 atomic_inc(&sctx
->workers_pending
);
289 /* used for workers that require transaction commits */
290 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
292 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
295 * see scrub_pending_trans_workers_inc() why we're pretending
296 * to be paused in the scrub counters
298 mutex_lock(&fs_info
->scrub_lock
);
299 atomic_dec(&fs_info
->scrubs_running
);
300 atomic_dec(&fs_info
->scrubs_paused
);
301 mutex_unlock(&fs_info
->scrub_lock
);
302 atomic_dec(&sctx
->workers_pending
);
303 wake_up(&fs_info
->scrub_pause_wait
);
304 wake_up(&sctx
->list_wait
);
307 static void scrub_free_csums(struct scrub_ctx
*sctx
)
309 while (!list_empty(&sctx
->csum_list
)) {
310 struct btrfs_ordered_sum
*sum
;
311 sum
= list_first_entry(&sctx
->csum_list
,
312 struct btrfs_ordered_sum
, list
);
313 list_del(&sum
->list
);
318 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
325 scrub_free_wr_ctx(&sctx
->wr_ctx
);
327 /* this can happen when scrub is cancelled */
328 if (sctx
->curr
!= -1) {
329 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
331 for (i
= 0; i
< sbio
->page_count
; i
++) {
332 WARN_ON(!sbio
->pagev
[i
]->page
);
333 scrub_block_put(sbio
->pagev
[i
]->sblock
);
338 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
339 struct scrub_bio
*sbio
= sctx
->bios
[i
];
346 scrub_free_csums(sctx
);
350 static noinline_for_stack
351 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
353 struct scrub_ctx
*sctx
;
355 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
356 int pages_per_rd_bio
;
360 * the setting of pages_per_rd_bio is correct for scrub but might
361 * be wrong for the dev_replace code where we might read from
362 * different devices in the initial huge bios. However, that
363 * code is able to correctly handle the case when adding a page
367 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
368 bio_get_nr_vecs(dev
->bdev
));
370 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
371 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
374 sctx
->is_dev_replace
= is_dev_replace
;
375 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
377 sctx
->dev_root
= dev
->dev_root
;
378 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
379 struct scrub_bio
*sbio
;
381 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
384 sctx
->bios
[i
] = sbio
;
388 sbio
->page_count
= 0;
389 sbio
->work
.func
= scrub_bio_end_io_worker
;
391 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
392 sctx
->bios
[i
]->next_free
= i
+ 1;
394 sctx
->bios
[i
]->next_free
= -1;
396 sctx
->first_free
= 0;
397 sctx
->nodesize
= dev
->dev_root
->nodesize
;
398 sctx
->leafsize
= dev
->dev_root
->leafsize
;
399 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
400 atomic_set(&sctx
->bios_in_flight
, 0);
401 atomic_set(&sctx
->workers_pending
, 0);
402 atomic_set(&sctx
->cancel_req
, 0);
403 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
404 INIT_LIST_HEAD(&sctx
->csum_list
);
406 spin_lock_init(&sctx
->list_lock
);
407 spin_lock_init(&sctx
->stat_lock
);
408 init_waitqueue_head(&sctx
->list_wait
);
410 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
411 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
413 scrub_free_ctx(sctx
);
419 scrub_free_ctx(sctx
);
420 return ERR_PTR(-ENOMEM
);
423 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
430 struct extent_buffer
*eb
;
431 struct btrfs_inode_item
*inode_item
;
432 struct scrub_warning
*swarn
= warn_ctx
;
433 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
434 struct inode_fs_paths
*ipath
= NULL
;
435 struct btrfs_root
*local_root
;
436 struct btrfs_key root_key
;
438 root_key
.objectid
= root
;
439 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
440 root_key
.offset
= (u64
)-1;
441 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
442 if (IS_ERR(local_root
)) {
443 ret
= PTR_ERR(local_root
);
447 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
449 btrfs_release_path(swarn
->path
);
453 eb
= swarn
->path
->nodes
[0];
454 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
455 struct btrfs_inode_item
);
456 isize
= btrfs_inode_size(eb
, inode_item
);
457 nlink
= btrfs_inode_nlink(eb
, inode_item
);
458 btrfs_release_path(swarn
->path
);
460 ipath
= init_ipath(4096, local_root
, swarn
->path
);
462 ret
= PTR_ERR(ipath
);
466 ret
= paths_from_inode(inum
, ipath
);
472 * we deliberately ignore the bit ipath might have been too small to
473 * hold all of the paths here
475 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
476 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
477 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
478 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
479 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
480 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
481 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
482 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
488 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
489 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
490 "resolving failed with ret=%d\n", swarn
->errstr
,
491 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
492 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
498 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
500 struct btrfs_device
*dev
;
501 struct btrfs_fs_info
*fs_info
;
502 struct btrfs_path
*path
;
503 struct btrfs_key found_key
;
504 struct extent_buffer
*eb
;
505 struct btrfs_extent_item
*ei
;
506 struct scrub_warning swarn
;
507 unsigned long ptr
= 0;
513 const int bufsize
= 4096;
516 WARN_ON(sblock
->page_count
< 1);
517 dev
= sblock
->pagev
[0]->dev
;
518 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
520 path
= btrfs_alloc_path();
522 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
523 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
524 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
525 swarn
.logical
= sblock
->pagev
[0]->logical
;
526 swarn
.errstr
= errstr
;
528 swarn
.msg_bufsize
= bufsize
;
529 swarn
.scratch_bufsize
= bufsize
;
531 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
534 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
539 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
540 swarn
.extent_item_size
= found_key
.offset
;
543 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
544 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
546 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
548 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
549 &ref_root
, &ref_level
);
550 printk_in_rcu(KERN_WARNING
551 "btrfs: %s at logical %llu on dev %s, "
552 "sector %llu: metadata %s (level %d) in tree "
553 "%llu\n", errstr
, swarn
.logical
,
554 rcu_str_deref(dev
->name
),
555 (unsigned long long)swarn
.sector
,
556 ref_level
? "node" : "leaf",
557 ret
< 0 ? -1 : ref_level
,
558 ret
< 0 ? -1 : ref_root
);
560 btrfs_release_path(path
);
562 btrfs_release_path(path
);
565 iterate_extent_inodes(fs_info
, found_key
.objectid
,
567 scrub_print_warning_inode
, &swarn
);
571 btrfs_free_path(path
);
572 kfree(swarn
.scratch_buf
);
573 kfree(swarn
.msg_buf
);
576 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
578 struct page
*page
= NULL
;
580 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
583 struct btrfs_key key
;
584 struct inode
*inode
= NULL
;
585 struct btrfs_fs_info
*fs_info
;
586 u64 end
= offset
+ PAGE_SIZE
- 1;
587 struct btrfs_root
*local_root
;
591 key
.type
= BTRFS_ROOT_ITEM_KEY
;
592 key
.offset
= (u64
)-1;
594 fs_info
= fixup
->root
->fs_info
;
595 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
597 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
598 if (IS_ERR(local_root
)) {
599 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
600 return PTR_ERR(local_root
);
603 key
.type
= BTRFS_INODE_ITEM_KEY
;
606 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
607 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
609 return PTR_ERR(inode
);
611 index
= offset
>> PAGE_CACHE_SHIFT
;
613 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
619 if (PageUptodate(page
)) {
620 if (PageDirty(page
)) {
622 * we need to write the data to the defect sector. the
623 * data that was in that sector is not in memory,
624 * because the page was modified. we must not write the
625 * modified page to that sector.
627 * TODO: what could be done here: wait for the delalloc
628 * runner to write out that page (might involve
629 * COW) and see whether the sector is still
630 * referenced afterwards.
632 * For the meantime, we'll treat this error
633 * incorrectable, although there is a chance that a
634 * later scrub will find the bad sector again and that
635 * there's no dirty page in memory, then.
640 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
641 ret
= repair_io_failure(fs_info
, offset
, PAGE_SIZE
,
642 fixup
->logical
, page
,
648 * we need to get good data first. the general readpage path
649 * will call repair_io_failure for us, we just have to make
650 * sure we read the bad mirror.
652 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
653 EXTENT_DAMAGED
, GFP_NOFS
);
655 /* set_extent_bits should give proper error */
662 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
665 wait_on_page_locked(page
);
667 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
668 end
, EXTENT_DAMAGED
, 0, NULL
);
670 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
671 EXTENT_DAMAGED
, GFP_NOFS
);
683 if (ret
== 0 && corrected
) {
685 * we only need to call readpage for one of the inodes belonging
686 * to this extent. so make iterate_extent_inodes stop
694 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
697 struct scrub_fixup_nodatasum
*fixup
;
698 struct scrub_ctx
*sctx
;
699 struct btrfs_trans_handle
*trans
= NULL
;
700 struct btrfs_fs_info
*fs_info
;
701 struct btrfs_path
*path
;
702 int uncorrectable
= 0;
704 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
706 fs_info
= fixup
->root
->fs_info
;
708 path
= btrfs_alloc_path();
710 spin_lock(&sctx
->stat_lock
);
711 ++sctx
->stat
.malloc_errors
;
712 spin_unlock(&sctx
->stat_lock
);
717 trans
= btrfs_join_transaction(fixup
->root
);
724 * the idea is to trigger a regular read through the standard path. we
725 * read a page from the (failed) logical address by specifying the
726 * corresponding copynum of the failed sector. thus, that readpage is
728 * that is the point where on-the-fly error correction will kick in
729 * (once it's finished) and rewrite the failed sector if a good copy
732 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
733 path
, scrub_fixup_readpage
,
741 spin_lock(&sctx
->stat_lock
);
742 ++sctx
->stat
.corrected_errors
;
743 spin_unlock(&sctx
->stat_lock
);
746 if (trans
&& !IS_ERR(trans
))
747 btrfs_end_transaction(trans
, fixup
->root
);
749 spin_lock(&sctx
->stat_lock
);
750 ++sctx
->stat
.uncorrectable_errors
;
751 spin_unlock(&sctx
->stat_lock
);
752 btrfs_dev_replace_stats_inc(
753 &sctx
->dev_root
->fs_info
->dev_replace
.
754 num_uncorrectable_read_errors
);
755 printk_ratelimited_in_rcu(KERN_ERR
756 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
757 (unsigned long long)fixup
->logical
,
758 rcu_str_deref(fixup
->dev
->name
));
761 btrfs_free_path(path
);
764 scrub_pending_trans_workers_dec(sctx
);
768 * scrub_handle_errored_block gets called when either verification of the
769 * pages failed or the bio failed to read, e.g. with EIO. In the latter
770 * case, this function handles all pages in the bio, even though only one
772 * The goal of this function is to repair the errored block by using the
773 * contents of one of the mirrors.
775 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
777 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
778 struct btrfs_device
*dev
;
779 struct btrfs_fs_info
*fs_info
;
783 unsigned int failed_mirror_index
;
784 unsigned int is_metadata
;
785 unsigned int have_csum
;
787 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
788 struct scrub_block
*sblock_bad
;
793 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
794 DEFAULT_RATELIMIT_BURST
);
796 BUG_ON(sblock_to_check
->page_count
< 1);
797 fs_info
= sctx
->dev_root
->fs_info
;
798 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
800 * if we find an error in a super block, we just report it.
801 * They will get written with the next transaction commit
804 spin_lock(&sctx
->stat_lock
);
805 ++sctx
->stat
.super_errors
;
806 spin_unlock(&sctx
->stat_lock
);
809 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
810 logical
= sblock_to_check
->pagev
[0]->logical
;
811 generation
= sblock_to_check
->pagev
[0]->generation
;
812 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
813 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
814 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
815 BTRFS_EXTENT_FLAG_DATA
);
816 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
817 csum
= sblock_to_check
->pagev
[0]->csum
;
818 dev
= sblock_to_check
->pagev
[0]->dev
;
820 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
821 sblocks_for_recheck
= NULL
;
826 * read all mirrors one after the other. This includes to
827 * re-read the extent or metadata block that failed (that was
828 * the cause that this fixup code is called) another time,
829 * page by page this time in order to know which pages
830 * caused I/O errors and which ones are good (for all mirrors).
831 * It is the goal to handle the situation when more than one
832 * mirror contains I/O errors, but the errors do not
833 * overlap, i.e. the data can be repaired by selecting the
834 * pages from those mirrors without I/O error on the
835 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
836 * would be that mirror #1 has an I/O error on the first page,
837 * the second page is good, and mirror #2 has an I/O error on
838 * the second page, but the first page is good.
839 * Then the first page of the first mirror can be repaired by
840 * taking the first page of the second mirror, and the
841 * second page of the second mirror can be repaired by
842 * copying the contents of the 2nd page of the 1st mirror.
843 * One more note: if the pages of one mirror contain I/O
844 * errors, the checksum cannot be verified. In order to get
845 * the best data for repairing, the first attempt is to find
846 * a mirror without I/O errors and with a validated checksum.
847 * Only if this is not possible, the pages are picked from
848 * mirrors with I/O errors without considering the checksum.
849 * If the latter is the case, at the end, the checksum of the
850 * repaired area is verified in order to correctly maintain
854 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
855 sizeof(*sblocks_for_recheck
),
857 if (!sblocks_for_recheck
) {
858 spin_lock(&sctx
->stat_lock
);
859 sctx
->stat
.malloc_errors
++;
860 sctx
->stat
.read_errors
++;
861 sctx
->stat
.uncorrectable_errors
++;
862 spin_unlock(&sctx
->stat_lock
);
863 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
867 /* setup the context, map the logical blocks and alloc the pages */
868 ret
= scrub_setup_recheck_block(sctx
, fs_info
, sblock_to_check
, length
,
869 logical
, sblocks_for_recheck
);
871 spin_lock(&sctx
->stat_lock
);
872 sctx
->stat
.read_errors
++;
873 sctx
->stat
.uncorrectable_errors
++;
874 spin_unlock(&sctx
->stat_lock
);
875 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
878 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
879 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
881 /* build and submit the bios for the failed mirror, check checksums */
882 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
883 csum
, generation
, sctx
->csum_size
);
885 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
886 sblock_bad
->no_io_error_seen
) {
888 * the error disappeared after reading page by page, or
889 * the area was part of a huge bio and other parts of the
890 * bio caused I/O errors, or the block layer merged several
891 * read requests into one and the error is caused by a
892 * different bio (usually one of the two latter cases is
895 spin_lock(&sctx
->stat_lock
);
896 sctx
->stat
.unverified_errors
++;
897 spin_unlock(&sctx
->stat_lock
);
899 if (sctx
->is_dev_replace
)
900 scrub_write_block_to_dev_replace(sblock_bad
);
904 if (!sblock_bad
->no_io_error_seen
) {
905 spin_lock(&sctx
->stat_lock
);
906 sctx
->stat
.read_errors
++;
907 spin_unlock(&sctx
->stat_lock
);
908 if (__ratelimit(&_rs
))
909 scrub_print_warning("i/o error", sblock_to_check
);
910 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
911 } else if (sblock_bad
->checksum_error
) {
912 spin_lock(&sctx
->stat_lock
);
913 sctx
->stat
.csum_errors
++;
914 spin_unlock(&sctx
->stat_lock
);
915 if (__ratelimit(&_rs
))
916 scrub_print_warning("checksum error", sblock_to_check
);
917 btrfs_dev_stat_inc_and_print(dev
,
918 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
919 } else if (sblock_bad
->header_error
) {
920 spin_lock(&sctx
->stat_lock
);
921 sctx
->stat
.verify_errors
++;
922 spin_unlock(&sctx
->stat_lock
);
923 if (__ratelimit(&_rs
))
924 scrub_print_warning("checksum/header error",
926 if (sblock_bad
->generation_error
)
927 btrfs_dev_stat_inc_and_print(dev
,
928 BTRFS_DEV_STAT_GENERATION_ERRS
);
930 btrfs_dev_stat_inc_and_print(dev
,
931 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
934 if (sctx
->readonly
&& !sctx
->is_dev_replace
)
935 goto did_not_correct_error
;
937 if (!is_metadata
&& !have_csum
) {
938 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
941 WARN_ON(sctx
->is_dev_replace
);
944 * !is_metadata and !have_csum, this means that the data
945 * might not be COW'ed, that it might be modified
946 * concurrently. The general strategy to work on the
947 * commit root does not help in the case when COW is not
950 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
951 if (!fixup_nodatasum
)
952 goto did_not_correct_error
;
953 fixup_nodatasum
->sctx
= sctx
;
954 fixup_nodatasum
->dev
= dev
;
955 fixup_nodatasum
->logical
= logical
;
956 fixup_nodatasum
->root
= fs_info
->extent_root
;
957 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
958 scrub_pending_trans_workers_inc(sctx
);
959 fixup_nodatasum
->work
.func
= scrub_fixup_nodatasum
;
960 btrfs_queue_worker(&fs_info
->scrub_workers
,
961 &fixup_nodatasum
->work
);
966 * now build and submit the bios for the other mirrors, check
968 * First try to pick the mirror which is completely without I/O
969 * errors and also does not have a checksum error.
970 * If one is found, and if a checksum is present, the full block
971 * that is known to contain an error is rewritten. Afterwards
972 * the block is known to be corrected.
973 * If a mirror is found which is completely correct, and no
974 * checksum is present, only those pages are rewritten that had
975 * an I/O error in the block to be repaired, since it cannot be
976 * determined, which copy of the other pages is better (and it
977 * could happen otherwise that a correct page would be
978 * overwritten by a bad one).
980 for (mirror_index
= 0;
981 mirror_index
< BTRFS_MAX_MIRRORS
&&
982 sblocks_for_recheck
[mirror_index
].page_count
> 0;
984 struct scrub_block
*sblock_other
;
986 if (mirror_index
== failed_mirror_index
)
988 sblock_other
= sblocks_for_recheck
+ mirror_index
;
990 /* build and submit the bios, check checksums */
991 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
992 have_csum
, csum
, generation
,
995 if (!sblock_other
->header_error
&&
996 !sblock_other
->checksum_error
&&
997 sblock_other
->no_io_error_seen
) {
998 if (sctx
->is_dev_replace
) {
999 scrub_write_block_to_dev_replace(sblock_other
);
1001 int force_write
= is_metadata
|| have_csum
;
1003 ret
= scrub_repair_block_from_good_copy(
1004 sblock_bad
, sblock_other
,
1008 goto corrected_error
;
1013 * for dev_replace, pick good pages and write to the target device.
1015 if (sctx
->is_dev_replace
) {
1017 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1022 for (mirror_index
= 0;
1023 mirror_index
< BTRFS_MAX_MIRRORS
&&
1024 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1026 struct scrub_block
*sblock_other
=
1027 sblocks_for_recheck
+ mirror_index
;
1028 struct scrub_page
*page_other
=
1029 sblock_other
->pagev
[page_num
];
1031 if (!page_other
->io_error
) {
1032 ret
= scrub_write_page_to_dev_replace(
1033 sblock_other
, page_num
);
1035 /* succeeded for this page */
1039 btrfs_dev_replace_stats_inc(
1041 fs_info
->dev_replace
.
1049 * did not find a mirror to fetch the page
1050 * from. scrub_write_page_to_dev_replace()
1051 * handles this case (page->io_error), by
1052 * filling the block with zeros before
1053 * submitting the write request
1056 ret
= scrub_write_page_to_dev_replace(
1057 sblock_bad
, page_num
);
1059 btrfs_dev_replace_stats_inc(
1060 &sctx
->dev_root
->fs_info
->
1061 dev_replace
.num_write_errors
);
1069 * for regular scrub, repair those pages that are errored.
1070 * In case of I/O errors in the area that is supposed to be
1071 * repaired, continue by picking good copies of those pages.
1072 * Select the good pages from mirrors to rewrite bad pages from
1073 * the area to fix. Afterwards verify the checksum of the block
1074 * that is supposed to be repaired. This verification step is
1075 * only done for the purpose of statistic counting and for the
1076 * final scrub report, whether errors remain.
1077 * A perfect algorithm could make use of the checksum and try
1078 * all possible combinations of pages from the different mirrors
1079 * until the checksum verification succeeds. For example, when
1080 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1081 * of mirror #2 is readable but the final checksum test fails,
1082 * then the 2nd page of mirror #3 could be tried, whether now
1083 * the final checksum succeedes. But this would be a rare
1084 * exception and is therefore not implemented. At least it is
1085 * avoided that the good copy is overwritten.
1086 * A more useful improvement would be to pick the sectors
1087 * without I/O error based on sector sizes (512 bytes on legacy
1088 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1089 * mirror could be repaired by taking 512 byte of a different
1090 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1091 * area are unreadable.
1094 /* can only fix I/O errors from here on */
1095 if (sblock_bad
->no_io_error_seen
)
1096 goto did_not_correct_error
;
1099 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1100 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1102 if (!page_bad
->io_error
)
1105 for (mirror_index
= 0;
1106 mirror_index
< BTRFS_MAX_MIRRORS
&&
1107 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1109 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
1111 struct scrub_page
*page_other
= sblock_other
->pagev
[
1114 if (!page_other
->io_error
) {
1115 ret
= scrub_repair_page_from_good_copy(
1116 sblock_bad
, sblock_other
, page_num
, 0);
1118 page_bad
->io_error
= 0;
1119 break; /* succeeded for this page */
1124 if (page_bad
->io_error
) {
1125 /* did not find a mirror to copy the page from */
1131 if (is_metadata
|| have_csum
) {
1133 * need to verify the checksum now that all
1134 * sectors on disk are repaired (the write
1135 * request for data to be repaired is on its way).
1136 * Just be lazy and use scrub_recheck_block()
1137 * which re-reads the data before the checksum
1138 * is verified, but most likely the data comes out
1139 * of the page cache.
1141 scrub_recheck_block(fs_info
, sblock_bad
,
1142 is_metadata
, have_csum
, csum
,
1143 generation
, sctx
->csum_size
);
1144 if (!sblock_bad
->header_error
&&
1145 !sblock_bad
->checksum_error
&&
1146 sblock_bad
->no_io_error_seen
)
1147 goto corrected_error
;
1149 goto did_not_correct_error
;
1152 spin_lock(&sctx
->stat_lock
);
1153 sctx
->stat
.corrected_errors
++;
1154 spin_unlock(&sctx
->stat_lock
);
1155 printk_ratelimited_in_rcu(KERN_ERR
1156 "btrfs: fixed up error at logical %llu on dev %s\n",
1157 (unsigned long long)logical
,
1158 rcu_str_deref(dev
->name
));
1161 did_not_correct_error
:
1162 spin_lock(&sctx
->stat_lock
);
1163 sctx
->stat
.uncorrectable_errors
++;
1164 spin_unlock(&sctx
->stat_lock
);
1165 printk_ratelimited_in_rcu(KERN_ERR
1166 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
1167 (unsigned long long)logical
,
1168 rcu_str_deref(dev
->name
));
1172 if (sblocks_for_recheck
) {
1173 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1175 struct scrub_block
*sblock
= sblocks_for_recheck
+
1179 for (page_index
= 0; page_index
< sblock
->page_count
;
1181 sblock
->pagev
[page_index
]->sblock
= NULL
;
1182 scrub_page_put(sblock
->pagev
[page_index
]);
1185 kfree(sblocks_for_recheck
);
1191 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1192 struct btrfs_fs_info
*fs_info
,
1193 struct scrub_block
*original_sblock
,
1194 u64 length
, u64 logical
,
1195 struct scrub_block
*sblocks_for_recheck
)
1202 * note: the two members ref_count and outstanding_pages
1203 * are not used (and not set) in the blocks that are used for
1204 * the recheck procedure
1208 while (length
> 0) {
1209 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1210 u64 mapped_length
= sublen
;
1211 struct btrfs_bio
*bbio
= NULL
;
1214 * with a length of PAGE_SIZE, each returned stripe
1215 * represents one mirror
1217 ret
= btrfs_map_block(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1218 &mapped_length
, &bbio
, 0);
1219 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1224 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1225 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1227 struct scrub_block
*sblock
;
1228 struct scrub_page
*page
;
1230 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1233 sblock
= sblocks_for_recheck
+ mirror_index
;
1234 sblock
->sctx
= sctx
;
1235 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1238 spin_lock(&sctx
->stat_lock
);
1239 sctx
->stat
.malloc_errors
++;
1240 spin_unlock(&sctx
->stat_lock
);
1244 scrub_page_get(page
);
1245 sblock
->pagev
[page_index
] = page
;
1246 page
->logical
= logical
;
1247 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1248 BUG_ON(page_index
>= original_sblock
->page_count
);
1249 page
->physical_for_dev_replace
=
1250 original_sblock
->pagev
[page_index
]->
1251 physical_for_dev_replace
;
1252 /* for missing devices, dev->bdev is NULL */
1253 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1254 page
->mirror_num
= mirror_index
+ 1;
1255 sblock
->page_count
++;
1256 page
->page
= alloc_page(GFP_NOFS
);
1270 * this function will check the on disk data for checksum errors, header
1271 * errors and read I/O errors. If any I/O errors happen, the exact pages
1272 * which are errored are marked as being bad. The goal is to enable scrub
1273 * to take those pages that are not errored from all the mirrors so that
1274 * the pages that are errored in the just handled mirror can be repaired.
1276 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1277 struct scrub_block
*sblock
, int is_metadata
,
1278 int have_csum
, u8
*csum
, u64 generation
,
1283 sblock
->no_io_error_seen
= 1;
1284 sblock
->header_error
= 0;
1285 sblock
->checksum_error
= 0;
1287 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1289 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1290 DECLARE_COMPLETION_ONSTACK(complete
);
1292 if (page
->dev
->bdev
== NULL
) {
1294 sblock
->no_io_error_seen
= 0;
1298 WARN_ON(!page
->page
);
1299 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1302 sblock
->no_io_error_seen
= 0;
1305 bio
->bi_bdev
= page
->dev
->bdev
;
1306 bio
->bi_sector
= page
->physical
>> 9;
1307 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1308 bio
->bi_private
= &complete
;
1310 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1311 btrfsic_submit_bio(READ
, bio
);
1313 /* this will also unplug the queue */
1314 wait_for_completion(&complete
);
1316 page
->io_error
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1317 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1318 sblock
->no_io_error_seen
= 0;
1322 if (sblock
->no_io_error_seen
)
1323 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1324 have_csum
, csum
, generation
,
1330 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1331 struct scrub_block
*sblock
,
1332 int is_metadata
, int have_csum
,
1333 const u8
*csum
, u64 generation
,
1337 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1339 void *mapped_buffer
;
1341 WARN_ON(!sblock
->pagev
[0]->page
);
1343 struct btrfs_header
*h
;
1345 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1346 h
= (struct btrfs_header
*)mapped_buffer
;
1348 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
) ||
1349 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1350 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1352 sblock
->header_error
= 1;
1353 } else if (generation
!= le64_to_cpu(h
->generation
)) {
1354 sblock
->header_error
= 1;
1355 sblock
->generation_error
= 1;
1362 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1365 for (page_num
= 0;;) {
1366 if (page_num
== 0 && is_metadata
)
1367 crc
= btrfs_csum_data(
1368 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1369 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1371 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1373 kunmap_atomic(mapped_buffer
);
1375 if (page_num
>= sblock
->page_count
)
1377 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1379 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1382 btrfs_csum_final(crc
, calculated_csum
);
1383 if (memcmp(calculated_csum
, csum
, csum_size
))
1384 sblock
->checksum_error
= 1;
1387 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
)
1389 complete((struct completion
*)bio
->bi_private
);
1392 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1393 struct scrub_block
*sblock_good
,
1399 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1402 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1413 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1414 struct scrub_block
*sblock_good
,
1415 int page_num
, int force_write
)
1417 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1418 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1420 BUG_ON(page_bad
->page
== NULL
);
1421 BUG_ON(page_good
->page
== NULL
);
1422 if (force_write
|| sblock_bad
->header_error
||
1423 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1426 DECLARE_COMPLETION_ONSTACK(complete
);
1428 if (!page_bad
->dev
->bdev
) {
1429 printk_ratelimited(KERN_WARNING
1430 "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
1434 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1437 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1438 bio
->bi_sector
= page_bad
->physical
>> 9;
1439 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1440 bio
->bi_private
= &complete
;
1442 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1443 if (PAGE_SIZE
!= ret
) {
1447 btrfsic_submit_bio(WRITE
, bio
);
1449 /* this will also unplug the queue */
1450 wait_for_completion(&complete
);
1451 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
1452 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1453 BTRFS_DEV_STAT_WRITE_ERRS
);
1454 btrfs_dev_replace_stats_inc(
1455 &sblock_bad
->sctx
->dev_root
->fs_info
->
1456 dev_replace
.num_write_errors
);
1466 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1470 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1473 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1475 btrfs_dev_replace_stats_inc(
1476 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1481 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1484 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1486 BUG_ON(spage
->page
== NULL
);
1487 if (spage
->io_error
) {
1488 void *mapped_buffer
= kmap_atomic(spage
->page
);
1490 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1491 flush_dcache_page(spage
->page
);
1492 kunmap_atomic(mapped_buffer
);
1494 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1497 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1498 struct scrub_page
*spage
)
1500 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1501 struct scrub_bio
*sbio
;
1504 mutex_lock(&wr_ctx
->wr_lock
);
1506 if (!wr_ctx
->wr_curr_bio
) {
1507 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1509 if (!wr_ctx
->wr_curr_bio
) {
1510 mutex_unlock(&wr_ctx
->wr_lock
);
1513 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1514 wr_ctx
->wr_curr_bio
->page_count
= 0;
1516 sbio
= wr_ctx
->wr_curr_bio
;
1517 if (sbio
->page_count
== 0) {
1520 sbio
->physical
= spage
->physical_for_dev_replace
;
1521 sbio
->logical
= spage
->logical
;
1522 sbio
->dev
= wr_ctx
->tgtdev
;
1525 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1527 mutex_unlock(&wr_ctx
->wr_lock
);
1533 bio
->bi_private
= sbio
;
1534 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1535 bio
->bi_bdev
= sbio
->dev
->bdev
;
1536 bio
->bi_sector
= sbio
->physical
>> 9;
1538 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1539 spage
->physical_for_dev_replace
||
1540 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1542 scrub_wr_submit(sctx
);
1546 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1547 if (ret
!= PAGE_SIZE
) {
1548 if (sbio
->page_count
< 1) {
1551 mutex_unlock(&wr_ctx
->wr_lock
);
1554 scrub_wr_submit(sctx
);
1558 sbio
->pagev
[sbio
->page_count
] = spage
;
1559 scrub_page_get(spage
);
1561 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1562 scrub_wr_submit(sctx
);
1563 mutex_unlock(&wr_ctx
->wr_lock
);
1568 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1570 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1571 struct scrub_bio
*sbio
;
1573 if (!wr_ctx
->wr_curr_bio
)
1576 sbio
= wr_ctx
->wr_curr_bio
;
1577 wr_ctx
->wr_curr_bio
= NULL
;
1578 WARN_ON(!sbio
->bio
->bi_bdev
);
1579 scrub_pending_bio_inc(sctx
);
1580 /* process all writes in a single worker thread. Then the block layer
1581 * orders the requests before sending them to the driver which
1582 * doubled the write performance on spinning disks when measured
1584 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1587 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1589 struct scrub_bio
*sbio
= bio
->bi_private
;
1590 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1595 sbio
->work
.func
= scrub_wr_bio_end_io_worker
;
1596 btrfs_queue_worker(&fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1599 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1601 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1602 struct scrub_ctx
*sctx
= sbio
->sctx
;
1605 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1607 struct btrfs_dev_replace
*dev_replace
=
1608 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1610 for (i
= 0; i
< sbio
->page_count
; i
++) {
1611 struct scrub_page
*spage
= sbio
->pagev
[i
];
1613 spage
->io_error
= 1;
1614 btrfs_dev_replace_stats_inc(&dev_replace
->
1619 for (i
= 0; i
< sbio
->page_count
; i
++)
1620 scrub_page_put(sbio
->pagev
[i
]);
1624 scrub_pending_bio_dec(sctx
);
1627 static int scrub_checksum(struct scrub_block
*sblock
)
1632 WARN_ON(sblock
->page_count
< 1);
1633 flags
= sblock
->pagev
[0]->flags
;
1635 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1636 ret
= scrub_checksum_data(sblock
);
1637 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1638 ret
= scrub_checksum_tree_block(sblock
);
1639 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1640 (void)scrub_checksum_super(sblock
);
1644 scrub_handle_errored_block(sblock
);
1649 static int scrub_checksum_data(struct scrub_block
*sblock
)
1651 struct scrub_ctx
*sctx
= sblock
->sctx
;
1652 u8 csum
[BTRFS_CSUM_SIZE
];
1661 BUG_ON(sblock
->page_count
< 1);
1662 if (!sblock
->pagev
[0]->have_csum
)
1665 on_disk_csum
= sblock
->pagev
[0]->csum
;
1666 page
= sblock
->pagev
[0]->page
;
1667 buffer
= kmap_atomic(page
);
1669 len
= sctx
->sectorsize
;
1672 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1674 crc
= btrfs_csum_data(buffer
, crc
, l
);
1675 kunmap_atomic(buffer
);
1680 BUG_ON(index
>= sblock
->page_count
);
1681 BUG_ON(!sblock
->pagev
[index
]->page
);
1682 page
= sblock
->pagev
[index
]->page
;
1683 buffer
= kmap_atomic(page
);
1686 btrfs_csum_final(crc
, csum
);
1687 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1693 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1695 struct scrub_ctx
*sctx
= sblock
->sctx
;
1696 struct btrfs_header
*h
;
1697 struct btrfs_root
*root
= sctx
->dev_root
;
1698 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1699 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1700 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1702 void *mapped_buffer
;
1711 BUG_ON(sblock
->page_count
< 1);
1712 page
= sblock
->pagev
[0]->page
;
1713 mapped_buffer
= kmap_atomic(page
);
1714 h
= (struct btrfs_header
*)mapped_buffer
;
1715 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1718 * we don't use the getter functions here, as we
1719 * a) don't have an extent buffer and
1720 * b) the page is already kmapped
1723 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
))
1726 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(h
->generation
))
1729 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1732 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1736 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
1737 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1738 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1739 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1742 u64 l
= min_t(u64
, len
, mapped_size
);
1744 crc
= btrfs_csum_data(p
, crc
, l
);
1745 kunmap_atomic(mapped_buffer
);
1750 BUG_ON(index
>= sblock
->page_count
);
1751 BUG_ON(!sblock
->pagev
[index
]->page
);
1752 page
= sblock
->pagev
[index
]->page
;
1753 mapped_buffer
= kmap_atomic(page
);
1754 mapped_size
= PAGE_SIZE
;
1758 btrfs_csum_final(crc
, calculated_csum
);
1759 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1762 return fail
|| crc_fail
;
1765 static int scrub_checksum_super(struct scrub_block
*sblock
)
1767 struct btrfs_super_block
*s
;
1768 struct scrub_ctx
*sctx
= sblock
->sctx
;
1769 struct btrfs_root
*root
= sctx
->dev_root
;
1770 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1771 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1772 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1774 void *mapped_buffer
;
1783 BUG_ON(sblock
->page_count
< 1);
1784 page
= sblock
->pagev
[0]->page
;
1785 mapped_buffer
= kmap_atomic(page
);
1786 s
= (struct btrfs_super_block
*)mapped_buffer
;
1787 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1789 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(s
->bytenr
))
1792 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(s
->generation
))
1795 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1798 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1799 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1800 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1803 u64 l
= min_t(u64
, len
, mapped_size
);
1805 crc
= btrfs_csum_data(p
, crc
, l
);
1806 kunmap_atomic(mapped_buffer
);
1811 BUG_ON(index
>= sblock
->page_count
);
1812 BUG_ON(!sblock
->pagev
[index
]->page
);
1813 page
= sblock
->pagev
[index
]->page
;
1814 mapped_buffer
= kmap_atomic(page
);
1815 mapped_size
= PAGE_SIZE
;
1819 btrfs_csum_final(crc
, calculated_csum
);
1820 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1823 if (fail_cor
+ fail_gen
) {
1825 * if we find an error in a super block, we just report it.
1826 * They will get written with the next transaction commit
1829 spin_lock(&sctx
->stat_lock
);
1830 ++sctx
->stat
.super_errors
;
1831 spin_unlock(&sctx
->stat_lock
);
1833 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1834 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1836 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1837 BTRFS_DEV_STAT_GENERATION_ERRS
);
1840 return fail_cor
+ fail_gen
;
1843 static void scrub_block_get(struct scrub_block
*sblock
)
1845 atomic_inc(&sblock
->ref_count
);
1848 static void scrub_block_put(struct scrub_block
*sblock
)
1850 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1853 for (i
= 0; i
< sblock
->page_count
; i
++)
1854 scrub_page_put(sblock
->pagev
[i
]);
1859 static void scrub_page_get(struct scrub_page
*spage
)
1861 atomic_inc(&spage
->ref_count
);
1864 static void scrub_page_put(struct scrub_page
*spage
)
1866 if (atomic_dec_and_test(&spage
->ref_count
)) {
1868 __free_page(spage
->page
);
1873 static void scrub_submit(struct scrub_ctx
*sctx
)
1875 struct scrub_bio
*sbio
;
1877 if (sctx
->curr
== -1)
1880 sbio
= sctx
->bios
[sctx
->curr
];
1882 scrub_pending_bio_inc(sctx
);
1884 if (!sbio
->bio
->bi_bdev
) {
1886 * this case should not happen. If btrfs_map_block() is
1887 * wrong, it could happen for dev-replace operations on
1888 * missing devices when no mirrors are available, but in
1889 * this case it should already fail the mount.
1890 * This case is handled correctly (but _very_ slowly).
1892 printk_ratelimited(KERN_WARNING
1893 "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
1894 bio_endio(sbio
->bio
, -EIO
);
1896 btrfsic_submit_bio(READ
, sbio
->bio
);
1900 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
1901 struct scrub_page
*spage
)
1903 struct scrub_block
*sblock
= spage
->sblock
;
1904 struct scrub_bio
*sbio
;
1909 * grab a fresh bio or wait for one to become available
1911 while (sctx
->curr
== -1) {
1912 spin_lock(&sctx
->list_lock
);
1913 sctx
->curr
= sctx
->first_free
;
1914 if (sctx
->curr
!= -1) {
1915 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
1916 sctx
->bios
[sctx
->curr
]->next_free
= -1;
1917 sctx
->bios
[sctx
->curr
]->page_count
= 0;
1918 spin_unlock(&sctx
->list_lock
);
1920 spin_unlock(&sctx
->list_lock
);
1921 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
1924 sbio
= sctx
->bios
[sctx
->curr
];
1925 if (sbio
->page_count
== 0) {
1928 sbio
->physical
= spage
->physical
;
1929 sbio
->logical
= spage
->logical
;
1930 sbio
->dev
= spage
->dev
;
1933 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
1939 bio
->bi_private
= sbio
;
1940 bio
->bi_end_io
= scrub_bio_end_io
;
1941 bio
->bi_bdev
= sbio
->dev
->bdev
;
1942 bio
->bi_sector
= sbio
->physical
>> 9;
1944 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1946 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1948 sbio
->dev
!= spage
->dev
) {
1953 sbio
->pagev
[sbio
->page_count
] = spage
;
1954 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1955 if (ret
!= PAGE_SIZE
) {
1956 if (sbio
->page_count
< 1) {
1965 scrub_block_get(sblock
); /* one for the page added to the bio */
1966 atomic_inc(&sblock
->outstanding_pages
);
1968 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
1974 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1975 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
1976 u64 gen
, int mirror_num
, u8
*csum
, int force
,
1977 u64 physical_for_dev_replace
)
1979 struct scrub_block
*sblock
;
1982 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
1984 spin_lock(&sctx
->stat_lock
);
1985 sctx
->stat
.malloc_errors
++;
1986 spin_unlock(&sctx
->stat_lock
);
1990 /* one ref inside this function, plus one for each page added to
1992 atomic_set(&sblock
->ref_count
, 1);
1993 sblock
->sctx
= sctx
;
1994 sblock
->no_io_error_seen
= 1;
1996 for (index
= 0; len
> 0; index
++) {
1997 struct scrub_page
*spage
;
1998 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2000 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2003 spin_lock(&sctx
->stat_lock
);
2004 sctx
->stat
.malloc_errors
++;
2005 spin_unlock(&sctx
->stat_lock
);
2006 scrub_block_put(sblock
);
2009 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2010 scrub_page_get(spage
);
2011 sblock
->pagev
[index
] = spage
;
2012 spage
->sblock
= sblock
;
2014 spage
->flags
= flags
;
2015 spage
->generation
= gen
;
2016 spage
->logical
= logical
;
2017 spage
->physical
= physical
;
2018 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2019 spage
->mirror_num
= mirror_num
;
2021 spage
->have_csum
= 1;
2022 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2024 spage
->have_csum
= 0;
2026 sblock
->page_count
++;
2027 spage
->page
= alloc_page(GFP_NOFS
);
2033 physical_for_dev_replace
+= l
;
2036 WARN_ON(sblock
->page_count
== 0);
2037 for (index
= 0; index
< sblock
->page_count
; index
++) {
2038 struct scrub_page
*spage
= sblock
->pagev
[index
];
2041 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2043 scrub_block_put(sblock
);
2051 /* last one frees, either here or in bio completion for last page */
2052 scrub_block_put(sblock
);
2056 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2058 struct scrub_bio
*sbio
= bio
->bi_private
;
2059 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2064 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
2067 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2069 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2070 struct scrub_ctx
*sctx
= sbio
->sctx
;
2073 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2075 for (i
= 0; i
< sbio
->page_count
; i
++) {
2076 struct scrub_page
*spage
= sbio
->pagev
[i
];
2078 spage
->io_error
= 1;
2079 spage
->sblock
->no_io_error_seen
= 0;
2083 /* now complete the scrub_block items that have all pages completed */
2084 for (i
= 0; i
< sbio
->page_count
; i
++) {
2085 struct scrub_page
*spage
= sbio
->pagev
[i
];
2086 struct scrub_block
*sblock
= spage
->sblock
;
2088 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2089 scrub_block_complete(sblock
);
2090 scrub_block_put(sblock
);
2095 spin_lock(&sctx
->list_lock
);
2096 sbio
->next_free
= sctx
->first_free
;
2097 sctx
->first_free
= sbio
->index
;
2098 spin_unlock(&sctx
->list_lock
);
2100 if (sctx
->is_dev_replace
&&
2101 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2102 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2103 scrub_wr_submit(sctx
);
2104 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2107 scrub_pending_bio_dec(sctx
);
2110 static void scrub_block_complete(struct scrub_block
*sblock
)
2112 if (!sblock
->no_io_error_seen
) {
2113 scrub_handle_errored_block(sblock
);
2116 * if has checksum error, write via repair mechanism in
2117 * dev replace case, otherwise write here in dev replace
2120 if (!scrub_checksum(sblock
) && sblock
->sctx
->is_dev_replace
)
2121 scrub_write_block_to_dev_replace(sblock
);
2125 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2128 struct btrfs_ordered_sum
*sum
= NULL
;
2129 unsigned long index
;
2130 unsigned long num_sectors
;
2132 while (!list_empty(&sctx
->csum_list
)) {
2133 sum
= list_first_entry(&sctx
->csum_list
,
2134 struct btrfs_ordered_sum
, list
);
2135 if (sum
->bytenr
> logical
)
2137 if (sum
->bytenr
+ sum
->len
> logical
)
2140 ++sctx
->stat
.csum_discards
;
2141 list_del(&sum
->list
);
2148 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2149 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2150 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2151 if (index
== num_sectors
- 1) {
2152 list_del(&sum
->list
);
2158 /* scrub extent tries to collect up to 64 kB for each bio */
2159 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2160 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2161 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2164 u8 csum
[BTRFS_CSUM_SIZE
];
2167 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2168 blocksize
= sctx
->sectorsize
;
2169 spin_lock(&sctx
->stat_lock
);
2170 sctx
->stat
.data_extents_scrubbed
++;
2171 sctx
->stat
.data_bytes_scrubbed
+= len
;
2172 spin_unlock(&sctx
->stat_lock
);
2173 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2174 WARN_ON(sctx
->nodesize
!= sctx
->leafsize
);
2175 blocksize
= sctx
->nodesize
;
2176 spin_lock(&sctx
->stat_lock
);
2177 sctx
->stat
.tree_extents_scrubbed
++;
2178 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2179 spin_unlock(&sctx
->stat_lock
);
2181 blocksize
= sctx
->sectorsize
;
2186 u64 l
= min_t(u64
, len
, blocksize
);
2189 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2190 /* push csums to sbio */
2191 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2193 ++sctx
->stat
.no_csum
;
2194 if (sctx
->is_dev_replace
&& !have_csum
) {
2195 ret
= copy_nocow_pages(sctx
, logical
, l
,
2197 physical_for_dev_replace
);
2198 goto behind_scrub_pages
;
2201 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2202 mirror_num
, have_csum
? csum
: NULL
, 0,
2203 physical_for_dev_replace
);
2210 physical_for_dev_replace
+= l
;
2215 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2216 struct map_lookup
*map
,
2217 struct btrfs_device
*scrub_dev
,
2218 int num
, u64 base
, u64 length
,
2221 struct btrfs_path
*path
;
2222 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2223 struct btrfs_root
*root
= fs_info
->extent_root
;
2224 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2225 struct btrfs_extent_item
*extent
;
2226 struct blk_plug plug
;
2231 struct extent_buffer
*l
;
2232 struct btrfs_key key
;
2238 struct reada_control
*reada1
;
2239 struct reada_control
*reada2
;
2240 struct btrfs_key key_start
;
2241 struct btrfs_key key_end
;
2242 u64 increment
= map
->stripe_len
;
2245 u64 extent_physical
;
2247 struct btrfs_device
*extent_dev
;
2248 int extent_mirror_num
;
2251 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
2252 BTRFS_BLOCK_GROUP_RAID6
)) {
2253 if (num
>= nr_data_stripes(map
)) {
2260 do_div(nstripes
, map
->stripe_len
);
2261 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2262 offset
= map
->stripe_len
* num
;
2263 increment
= map
->stripe_len
* map
->num_stripes
;
2265 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2266 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2267 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
2268 increment
= map
->stripe_len
* factor
;
2269 mirror_num
= num
% map
->sub_stripes
+ 1;
2270 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2271 increment
= map
->stripe_len
;
2272 mirror_num
= num
% map
->num_stripes
+ 1;
2273 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2274 increment
= map
->stripe_len
;
2275 mirror_num
= num
% map
->num_stripes
+ 1;
2277 increment
= map
->stripe_len
;
2281 path
= btrfs_alloc_path();
2286 * work on commit root. The related disk blocks are static as
2287 * long as COW is applied. This means, it is save to rewrite
2288 * them to repair disk errors without any race conditions
2290 path
->search_commit_root
= 1;
2291 path
->skip_locking
= 1;
2294 * trigger the readahead for extent tree csum tree and wait for
2295 * completion. During readahead, the scrub is officially paused
2296 * to not hold off transaction commits
2298 logical
= base
+ offset
;
2300 wait_event(sctx
->list_wait
,
2301 atomic_read(&sctx
->bios_in_flight
) == 0);
2302 atomic_inc(&fs_info
->scrubs_paused
);
2303 wake_up(&fs_info
->scrub_pause_wait
);
2305 /* FIXME it might be better to start readahead at commit root */
2306 key_start
.objectid
= logical
;
2307 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
2308 key_start
.offset
= (u64
)0;
2309 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
2310 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
2311 key_end
.offset
= (u64
)-1;
2312 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
2314 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2315 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
2316 key_start
.offset
= logical
;
2317 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
2318 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
2319 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
2320 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
2322 if (!IS_ERR(reada1
))
2323 btrfs_reada_wait(reada1
);
2324 if (!IS_ERR(reada2
))
2325 btrfs_reada_wait(reada2
);
2327 mutex_lock(&fs_info
->scrub_lock
);
2328 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2329 mutex_unlock(&fs_info
->scrub_lock
);
2330 wait_event(fs_info
->scrub_pause_wait
,
2331 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2332 mutex_lock(&fs_info
->scrub_lock
);
2334 atomic_dec(&fs_info
->scrubs_paused
);
2335 mutex_unlock(&fs_info
->scrub_lock
);
2336 wake_up(&fs_info
->scrub_pause_wait
);
2339 * collect all data csums for the stripe to avoid seeking during
2340 * the scrub. This might currently (crc32) end up to be about 1MB
2342 blk_start_plug(&plug
);
2345 * now find all extents for each stripe and scrub them
2347 logical
= base
+ offset
;
2348 physical
= map
->stripes
[num
].physical
;
2349 logic_end
= logical
+ increment
* nstripes
;
2351 while (logical
< logic_end
) {
2355 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
2356 atomic_read(&sctx
->cancel_req
)) {
2361 * check to see if we have to pause
2363 if (atomic_read(&fs_info
->scrub_pause_req
)) {
2364 /* push queued extents */
2365 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2367 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2368 scrub_wr_submit(sctx
);
2369 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2370 wait_event(sctx
->list_wait
,
2371 atomic_read(&sctx
->bios_in_flight
) == 0);
2372 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2373 atomic_inc(&fs_info
->scrubs_paused
);
2374 wake_up(&fs_info
->scrub_pause_wait
);
2375 mutex_lock(&fs_info
->scrub_lock
);
2376 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2377 mutex_unlock(&fs_info
->scrub_lock
);
2378 wait_event(fs_info
->scrub_pause_wait
,
2379 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2380 mutex_lock(&fs_info
->scrub_lock
);
2382 atomic_dec(&fs_info
->scrubs_paused
);
2383 mutex_unlock(&fs_info
->scrub_lock
);
2384 wake_up(&fs_info
->scrub_pause_wait
);
2387 key
.objectid
= logical
;
2388 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2389 key
.offset
= (u64
)-1;
2391 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2396 ret
= btrfs_previous_item(root
, path
, 0,
2397 BTRFS_EXTENT_ITEM_KEY
);
2401 /* there's no smaller item, so stick with the
2403 btrfs_release_path(path
);
2404 ret
= btrfs_search_slot(NULL
, root
, &key
,
2416 slot
= path
->slots
[0];
2417 if (slot
>= btrfs_header_nritems(l
)) {
2418 ret
= btrfs_next_leaf(root
, path
);
2427 btrfs_item_key_to_cpu(l
, &key
, slot
);
2429 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2430 bytes
= root
->leafsize
;
2434 if (key
.objectid
+ bytes
<= logical
)
2437 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2438 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2441 if (key
.objectid
>= logical
+ map
->stripe_len
) {
2442 /* out of this device extent */
2443 if (key
.objectid
>= logic_end
)
2448 extent
= btrfs_item_ptr(l
, slot
,
2449 struct btrfs_extent_item
);
2450 flags
= btrfs_extent_flags(l
, extent
);
2451 generation
= btrfs_extent_generation(l
, extent
);
2453 if (key
.objectid
< logical
&&
2454 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2456 "btrfs scrub: tree block %llu spanning "
2457 "stripes, ignored. logical=%llu\n",
2458 (unsigned long long)key
.objectid
,
2459 (unsigned long long)logical
);
2464 extent_logical
= key
.objectid
;
2468 * trim extent to this stripe
2470 if (extent_logical
< logical
) {
2471 extent_len
-= logical
- extent_logical
;
2472 extent_logical
= logical
;
2474 if (extent_logical
+ extent_len
>
2475 logical
+ map
->stripe_len
) {
2476 extent_len
= logical
+ map
->stripe_len
-
2480 extent_physical
= extent_logical
- logical
+ physical
;
2481 extent_dev
= scrub_dev
;
2482 extent_mirror_num
= mirror_num
;
2484 scrub_remap_extent(fs_info
, extent_logical
,
2485 extent_len
, &extent_physical
,
2487 &extent_mirror_num
);
2489 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
2490 logical
+ map
->stripe_len
- 1,
2491 &sctx
->csum_list
, 1);
2495 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
2496 extent_physical
, extent_dev
, flags
,
2497 generation
, extent_mirror_num
,
2498 extent_logical
- logical
+ physical
);
2502 scrub_free_csums(sctx
);
2503 if (extent_logical
+ extent_len
<
2504 key
.objectid
+ bytes
) {
2505 logical
+= increment
;
2506 physical
+= map
->stripe_len
;
2508 if (logical
< key
.objectid
+ bytes
) {
2513 if (logical
>= logic_end
) {
2521 btrfs_release_path(path
);
2522 logical
+= increment
;
2523 physical
+= map
->stripe_len
;
2524 spin_lock(&sctx
->stat_lock
);
2526 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
2529 sctx
->stat
.last_physical
= physical
;
2530 spin_unlock(&sctx
->stat_lock
);
2535 /* push queued extents */
2537 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2538 scrub_wr_submit(sctx
);
2539 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2541 blk_finish_plug(&plug
);
2542 btrfs_free_path(path
);
2543 return ret
< 0 ? ret
: 0;
2546 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
2547 struct btrfs_device
*scrub_dev
,
2548 u64 chunk_tree
, u64 chunk_objectid
,
2549 u64 chunk_offset
, u64 length
,
2550 u64 dev_offset
, int is_dev_replace
)
2552 struct btrfs_mapping_tree
*map_tree
=
2553 &sctx
->dev_root
->fs_info
->mapping_tree
;
2554 struct map_lookup
*map
;
2555 struct extent_map
*em
;
2559 read_lock(&map_tree
->map_tree
.lock
);
2560 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2561 read_unlock(&map_tree
->map_tree
.lock
);
2566 map
= (struct map_lookup
*)em
->bdev
;
2567 if (em
->start
!= chunk_offset
)
2570 if (em
->len
< length
)
2573 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2574 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
2575 map
->stripes
[i
].physical
== dev_offset
) {
2576 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
2577 chunk_offset
, length
,
2584 free_extent_map(em
);
2589 static noinline_for_stack
2590 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
2591 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
2594 struct btrfs_dev_extent
*dev_extent
= NULL
;
2595 struct btrfs_path
*path
;
2596 struct btrfs_root
*root
= sctx
->dev_root
;
2597 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2604 struct extent_buffer
*l
;
2605 struct btrfs_key key
;
2606 struct btrfs_key found_key
;
2607 struct btrfs_block_group_cache
*cache
;
2608 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
2610 path
= btrfs_alloc_path();
2615 path
->search_commit_root
= 1;
2616 path
->skip_locking
= 1;
2618 key
.objectid
= scrub_dev
->devid
;
2620 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2623 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2627 if (path
->slots
[0] >=
2628 btrfs_header_nritems(path
->nodes
[0])) {
2629 ret
= btrfs_next_leaf(root
, path
);
2636 slot
= path
->slots
[0];
2638 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2640 if (found_key
.objectid
!= scrub_dev
->devid
)
2643 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2646 if (found_key
.offset
>= end
)
2649 if (found_key
.offset
< key
.offset
)
2652 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2653 length
= btrfs_dev_extent_length(l
, dev_extent
);
2655 if (found_key
.offset
+ length
<= start
) {
2656 key
.offset
= found_key
.offset
+ length
;
2657 btrfs_release_path(path
);
2661 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2662 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2663 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2666 * get a reference on the corresponding block group to prevent
2667 * the chunk from going away while we scrub it
2669 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2674 dev_replace
->cursor_right
= found_key
.offset
+ length
;
2675 dev_replace
->cursor_left
= found_key
.offset
;
2676 dev_replace
->item_needs_writeback
= 1;
2677 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
2678 chunk_offset
, length
, found_key
.offset
,
2682 * flush, submit all pending read and write bios, afterwards
2684 * Note that in the dev replace case, a read request causes
2685 * write requests that are submitted in the read completion
2686 * worker. Therefore in the current situation, it is required
2687 * that all write requests are flushed, so that all read and
2688 * write requests are really completed when bios_in_flight
2691 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
2693 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2694 scrub_wr_submit(sctx
);
2695 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2697 wait_event(sctx
->list_wait
,
2698 atomic_read(&sctx
->bios_in_flight
) == 0);
2699 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
2700 atomic_inc(&fs_info
->scrubs_paused
);
2701 wake_up(&fs_info
->scrub_pause_wait
);
2702 wait_event(sctx
->list_wait
,
2703 atomic_read(&sctx
->workers_pending
) == 0);
2705 mutex_lock(&fs_info
->scrub_lock
);
2706 while (atomic_read(&fs_info
->scrub_pause_req
)) {
2707 mutex_unlock(&fs_info
->scrub_lock
);
2708 wait_event(fs_info
->scrub_pause_wait
,
2709 atomic_read(&fs_info
->scrub_pause_req
) == 0);
2710 mutex_lock(&fs_info
->scrub_lock
);
2712 atomic_dec(&fs_info
->scrubs_paused
);
2713 mutex_unlock(&fs_info
->scrub_lock
);
2714 wake_up(&fs_info
->scrub_pause_wait
);
2716 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
2717 dev_replace
->item_needs_writeback
= 1;
2718 btrfs_put_block_group(cache
);
2721 if (is_dev_replace
&&
2722 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
2726 if (sctx
->stat
.malloc_errors
> 0) {
2731 key
.offset
= found_key
.offset
+ length
;
2732 btrfs_release_path(path
);
2735 btrfs_free_path(path
);
2738 * ret can still be 1 from search_slot or next_leaf,
2739 * that's not an error
2741 return ret
< 0 ? ret
: 0;
2744 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
2745 struct btrfs_device
*scrub_dev
)
2751 struct btrfs_root
*root
= sctx
->dev_root
;
2753 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
2756 gen
= root
->fs_info
->last_trans_committed
;
2758 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2759 bytenr
= btrfs_sb_offset(i
);
2760 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> scrub_dev
->total_bytes
)
2763 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2764 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
2769 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2775 * get a reference count on fs_info->scrub_workers. start worker if necessary
2777 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
2782 mutex_lock(&fs_info
->scrub_lock
);
2783 if (fs_info
->scrub_workers_refcnt
== 0) {
2785 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub", 1,
2786 &fs_info
->generic_worker
);
2788 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
2789 fs_info
->thread_pool_size
,
2790 &fs_info
->generic_worker
);
2791 fs_info
->scrub_workers
.idle_thresh
= 4;
2792 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
2795 btrfs_init_workers(&fs_info
->scrub_wr_completion_workers
,
2797 fs_info
->thread_pool_size
,
2798 &fs_info
->generic_worker
);
2799 fs_info
->scrub_wr_completion_workers
.idle_thresh
= 2;
2800 ret
= btrfs_start_workers(
2801 &fs_info
->scrub_wr_completion_workers
);
2804 btrfs_init_workers(&fs_info
->scrub_nocow_workers
, "scrubnc", 1,
2805 &fs_info
->generic_worker
);
2806 ret
= btrfs_start_workers(&fs_info
->scrub_nocow_workers
);
2810 ++fs_info
->scrub_workers_refcnt
;
2812 mutex_unlock(&fs_info
->scrub_lock
);
2817 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
2819 mutex_lock(&fs_info
->scrub_lock
);
2820 if (--fs_info
->scrub_workers_refcnt
== 0) {
2821 btrfs_stop_workers(&fs_info
->scrub_workers
);
2822 btrfs_stop_workers(&fs_info
->scrub_wr_completion_workers
);
2823 btrfs_stop_workers(&fs_info
->scrub_nocow_workers
);
2825 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2826 mutex_unlock(&fs_info
->scrub_lock
);
2829 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
2830 u64 end
, struct btrfs_scrub_progress
*progress
,
2831 int readonly
, int is_dev_replace
)
2833 struct scrub_ctx
*sctx
;
2835 struct btrfs_device
*dev
;
2837 if (btrfs_fs_closing(fs_info
))
2841 * check some assumptions
2843 if (fs_info
->chunk_root
->nodesize
!= fs_info
->chunk_root
->leafsize
) {
2845 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2846 fs_info
->chunk_root
->nodesize
,
2847 fs_info
->chunk_root
->leafsize
);
2851 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
2853 * in this case scrub is unable to calculate the checksum
2854 * the way scrub is implemented. Do not handle this
2855 * situation at all because it won't ever happen.
2858 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2859 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
2863 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
2864 /* not supported for data w/o checksums */
2866 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2867 fs_info
->chunk_root
->sectorsize
,
2868 (unsigned long long)PAGE_SIZE
);
2872 if (fs_info
->chunk_root
->nodesize
>
2873 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
2874 fs_info
->chunk_root
->sectorsize
>
2875 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
2877 * would exhaust the array bounds of pagev member in
2878 * struct scrub_block
2880 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2881 fs_info
->chunk_root
->nodesize
,
2882 SCRUB_MAX_PAGES_PER_BLOCK
,
2883 fs_info
->chunk_root
->sectorsize
,
2884 SCRUB_MAX_PAGES_PER_BLOCK
);
2888 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
2892 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2893 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
2894 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
2895 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2896 scrub_workers_put(fs_info
);
2899 mutex_lock(&fs_info
->scrub_lock
);
2901 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
2902 mutex_unlock(&fs_info
->scrub_lock
);
2903 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2904 scrub_workers_put(fs_info
);
2908 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
2909 if (dev
->scrub_device
||
2911 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
2912 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2913 mutex_unlock(&fs_info
->scrub_lock
);
2914 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2915 scrub_workers_put(fs_info
);
2916 return -EINPROGRESS
;
2918 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
2919 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
2921 mutex_unlock(&fs_info
->scrub_lock
);
2922 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2923 scrub_workers_put(fs_info
);
2924 return PTR_ERR(sctx
);
2926 sctx
->readonly
= readonly
;
2927 dev
->scrub_device
= sctx
;
2929 atomic_inc(&fs_info
->scrubs_running
);
2930 mutex_unlock(&fs_info
->scrub_lock
);
2931 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2933 if (!is_dev_replace
) {
2934 down_read(&fs_info
->scrub_super_lock
);
2935 ret
= scrub_supers(sctx
, dev
);
2936 up_read(&fs_info
->scrub_super_lock
);
2940 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
2943 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2944 atomic_dec(&fs_info
->scrubs_running
);
2945 wake_up(&fs_info
->scrub_pause_wait
);
2947 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
2950 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
2952 mutex_lock(&fs_info
->scrub_lock
);
2953 dev
->scrub_device
= NULL
;
2954 mutex_unlock(&fs_info
->scrub_lock
);
2956 scrub_free_ctx(sctx
);
2957 scrub_workers_put(fs_info
);
2962 void btrfs_scrub_pause(struct btrfs_root
*root
)
2964 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2966 mutex_lock(&fs_info
->scrub_lock
);
2967 atomic_inc(&fs_info
->scrub_pause_req
);
2968 while (atomic_read(&fs_info
->scrubs_paused
) !=
2969 atomic_read(&fs_info
->scrubs_running
)) {
2970 mutex_unlock(&fs_info
->scrub_lock
);
2971 wait_event(fs_info
->scrub_pause_wait
,
2972 atomic_read(&fs_info
->scrubs_paused
) ==
2973 atomic_read(&fs_info
->scrubs_running
));
2974 mutex_lock(&fs_info
->scrub_lock
);
2976 mutex_unlock(&fs_info
->scrub_lock
);
2979 void btrfs_scrub_continue(struct btrfs_root
*root
)
2981 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2983 atomic_dec(&fs_info
->scrub_pause_req
);
2984 wake_up(&fs_info
->scrub_pause_wait
);
2987 void btrfs_scrub_pause_super(struct btrfs_root
*root
)
2989 down_write(&root
->fs_info
->scrub_super_lock
);
2992 void btrfs_scrub_continue_super(struct btrfs_root
*root
)
2994 up_write(&root
->fs_info
->scrub_super_lock
);
2997 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
2999 mutex_lock(&fs_info
->scrub_lock
);
3000 if (!atomic_read(&fs_info
->scrubs_running
)) {
3001 mutex_unlock(&fs_info
->scrub_lock
);
3005 atomic_inc(&fs_info
->scrub_cancel_req
);
3006 while (atomic_read(&fs_info
->scrubs_running
)) {
3007 mutex_unlock(&fs_info
->scrub_lock
);
3008 wait_event(fs_info
->scrub_pause_wait
,
3009 atomic_read(&fs_info
->scrubs_running
) == 0);
3010 mutex_lock(&fs_info
->scrub_lock
);
3012 atomic_dec(&fs_info
->scrub_cancel_req
);
3013 mutex_unlock(&fs_info
->scrub_lock
);
3018 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3019 struct btrfs_device
*dev
)
3021 struct scrub_ctx
*sctx
;
3023 mutex_lock(&fs_info
->scrub_lock
);
3024 sctx
= dev
->scrub_device
;
3026 mutex_unlock(&fs_info
->scrub_lock
);
3029 atomic_inc(&sctx
->cancel_req
);
3030 while (dev
->scrub_device
) {
3031 mutex_unlock(&fs_info
->scrub_lock
);
3032 wait_event(fs_info
->scrub_pause_wait
,
3033 dev
->scrub_device
== NULL
);
3034 mutex_lock(&fs_info
->scrub_lock
);
3036 mutex_unlock(&fs_info
->scrub_lock
);
3041 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3042 struct btrfs_scrub_progress
*progress
)
3044 struct btrfs_device
*dev
;
3045 struct scrub_ctx
*sctx
= NULL
;
3047 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3048 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3050 sctx
= dev
->scrub_device
;
3052 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3053 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3055 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3058 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3059 u64 extent_logical
, u64 extent_len
,
3060 u64
*extent_physical
,
3061 struct btrfs_device
**extent_dev
,
3062 int *extent_mirror_num
)
3065 struct btrfs_bio
*bbio
= NULL
;
3068 mapped_length
= extent_len
;
3069 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3070 &mapped_length
, &bbio
, 0);
3071 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3072 !bbio
->stripes
[0].dev
->bdev
) {
3077 *extent_physical
= bbio
->stripes
[0].physical
;
3078 *extent_mirror_num
= bbio
->mirror_num
;
3079 *extent_dev
= bbio
->stripes
[0].dev
;
3083 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3084 struct scrub_wr_ctx
*wr_ctx
,
3085 struct btrfs_fs_info
*fs_info
,
3086 struct btrfs_device
*dev
,
3089 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3091 mutex_init(&wr_ctx
->wr_lock
);
3092 wr_ctx
->wr_curr_bio
= NULL
;
3093 if (!is_dev_replace
)
3096 WARN_ON(!dev
->bdev
);
3097 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3098 bio_get_nr_vecs(dev
->bdev
));
3099 wr_ctx
->tgtdev
= dev
;
3100 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3104 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3106 mutex_lock(&wr_ctx
->wr_lock
);
3107 kfree(wr_ctx
->wr_curr_bio
);
3108 wr_ctx
->wr_curr_bio
= NULL
;
3109 mutex_unlock(&wr_ctx
->wr_lock
);
3112 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3113 int mirror_num
, u64 physical_for_dev_replace
)
3115 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3116 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3118 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3120 spin_lock(&sctx
->stat_lock
);
3121 sctx
->stat
.malloc_errors
++;
3122 spin_unlock(&sctx
->stat_lock
);
3126 scrub_pending_trans_workers_inc(sctx
);
3128 nocow_ctx
->sctx
= sctx
;
3129 nocow_ctx
->logical
= logical
;
3130 nocow_ctx
->len
= len
;
3131 nocow_ctx
->mirror_num
= mirror_num
;
3132 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3133 nocow_ctx
->work
.func
= copy_nocow_pages_worker
;
3134 btrfs_queue_worker(&fs_info
->scrub_nocow_workers
,
3140 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3142 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3143 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3144 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3145 u64 logical
= nocow_ctx
->logical
;
3146 u64 len
= nocow_ctx
->len
;
3147 int mirror_num
= nocow_ctx
->mirror_num
;
3148 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3150 struct btrfs_trans_handle
*trans
= NULL
;
3151 struct btrfs_fs_info
*fs_info
;
3152 struct btrfs_path
*path
;
3153 struct btrfs_root
*root
;
3154 int not_written
= 0;
3156 fs_info
= sctx
->dev_root
->fs_info
;
3157 root
= fs_info
->extent_root
;
3159 path
= btrfs_alloc_path();
3161 spin_lock(&sctx
->stat_lock
);
3162 sctx
->stat
.malloc_errors
++;
3163 spin_unlock(&sctx
->stat_lock
);
3168 trans
= btrfs_join_transaction(root
);
3169 if (IS_ERR(trans
)) {
3174 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
3175 copy_nocow_pages_for_inode
,
3177 if (ret
!= 0 && ret
!= -ENOENT
) {
3178 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n",
3179 (unsigned long long)logical
,
3180 (unsigned long long)physical_for_dev_replace
,
3181 (unsigned long long)len
,
3182 (unsigned long long)mirror_num
, ret
);
3188 if (trans
&& !IS_ERR(trans
))
3189 btrfs_end_transaction(trans
, root
);
3191 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
3192 num_uncorrectable_read_errors
);
3194 btrfs_free_path(path
);
3197 scrub_pending_trans_workers_dec(sctx
);
3200 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3202 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3203 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
3204 struct btrfs_key key
;
3205 struct inode
*inode
;
3207 struct btrfs_root
*local_root
;
3208 u64 physical_for_dev_replace
;
3210 unsigned long index
;
3215 key
.objectid
= root
;
3216 key
.type
= BTRFS_ROOT_ITEM_KEY
;
3217 key
.offset
= (u64
)-1;
3219 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
3221 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
3222 if (IS_ERR(local_root
)) {
3223 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3224 return PTR_ERR(local_root
);
3227 if (btrfs_root_refs(&local_root
->root_item
) == 0) {
3228 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3232 key
.type
= BTRFS_INODE_ITEM_KEY
;
3233 key
.objectid
= inum
;
3235 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
3236 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
3238 return PTR_ERR(inode
);
3240 /* Avoid truncate/dio/punch hole.. */
3241 mutex_lock(&inode
->i_mutex
);
3242 inode_dio_wait(inode
);
3245 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3246 len
= nocow_ctx
->len
;
3247 while (len
>= PAGE_CACHE_SIZE
) {
3248 index
= offset
>> PAGE_CACHE_SHIFT
;
3250 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
3252 pr_err("find_or_create_page() failed\n");
3257 if (PageUptodate(page
)) {
3258 if (PageDirty(page
))
3261 ClearPageError(page
);
3262 err
= extent_read_full_page(&BTRFS_I(inode
)->
3264 page
, btrfs_get_extent
,
3265 nocow_ctx
->mirror_num
);
3273 * If the page has been remove from the page cache,
3274 * the data on it is meaningless, because it may be
3275 * old one, the new data may be written into the new
3276 * page in the page cache.
3278 if (page
->mapping
!= inode
->i_mapping
) {
3279 page_cache_release(page
);
3282 if (!PageUptodate(page
)) {
3287 err
= write_page_nocow(nocow_ctx
->sctx
,
3288 physical_for_dev_replace
, page
);
3293 page_cache_release(page
);
3298 offset
+= PAGE_CACHE_SIZE
;
3299 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
3300 len
-= PAGE_CACHE_SIZE
;
3303 mutex_unlock(&inode
->i_mutex
);
3308 static int write_page_nocow(struct scrub_ctx
*sctx
,
3309 u64 physical_for_dev_replace
, struct page
*page
)
3312 struct btrfs_device
*dev
;
3314 DECLARE_COMPLETION_ONSTACK(compl);
3316 dev
= sctx
->wr_ctx
.tgtdev
;
3320 printk_ratelimited(KERN_WARNING
3321 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3324 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
3326 spin_lock(&sctx
->stat_lock
);
3327 sctx
->stat
.malloc_errors
++;
3328 spin_unlock(&sctx
->stat_lock
);
3331 bio
->bi_private
= &compl;
3332 bio
->bi_end_io
= scrub_complete_bio_end_io
;
3334 bio
->bi_sector
= physical_for_dev_replace
>> 9;
3335 bio
->bi_bdev
= dev
->bdev
;
3336 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
3337 if (ret
!= PAGE_CACHE_SIZE
) {
3340 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
3343 btrfsic_submit_bio(WRITE_SYNC
, bio
);
3344 wait_for_completion(&compl);
3346 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
3347 goto leave_with_eio
;