1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
14 #include "ordered-data.h"
15 #include "transaction.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "rcu-string.h"
22 #include "block-group.h"
25 * This is only the first step towards a full-features scrub. It reads all
26 * extent and super block and verifies the checksums. In case a bad checksum
27 * is found or the extent cannot be read, good data will be written back if
30 * Future enhancements:
31 * - In case an unrepairable extent is encountered, track which files are
32 * affected and report them
33 * - track and record media errors, throw out bad devices
34 * - add a mode to also read unallocated space
41 * the following three values only influence the performance.
42 * The last one configures the number of parallel and outstanding I/O
43 * operations. The first two values configure an upper limit for the number
44 * of (dynamically allocated) pages that are added to a bio.
46 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
47 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
48 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
51 * the following value times PAGE_SIZE needs to be large enough to match the
52 * largest node/leaf/sector size that shall be supported.
53 * Values larger than BTRFS_STRIPE_LEN are not supported.
55 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
57 struct scrub_recover
{
59 struct btrfs_bio
*bbio
;
64 struct scrub_block
*sblock
;
66 struct btrfs_device
*dev
;
67 struct list_head list
;
68 u64 flags
; /* extent flags */
72 u64 physical_for_dev_replace
;
75 unsigned int mirror_num
:8;
76 unsigned int have_csum
:1;
77 unsigned int io_error
:1;
79 u8 csum
[BTRFS_CSUM_SIZE
];
81 struct scrub_recover
*recover
;
86 struct scrub_ctx
*sctx
;
87 struct btrfs_device
*dev
;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
95 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
99 struct btrfs_work work
;
103 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
105 atomic_t outstanding_pages
;
106 refcount_t refs
; /* free mem on transition to zero */
107 struct scrub_ctx
*sctx
;
108 struct scrub_parity
*sparity
;
110 unsigned int header_error
:1;
111 unsigned int checksum_error
:1;
112 unsigned int no_io_error_seen
:1;
113 unsigned int generation_error
:1; /* also sets header_error */
115 /* The following is for the data used to check parity */
116 /* It is for the data with checksum */
117 unsigned int data_corrected
:1;
119 struct btrfs_work work
;
122 /* Used for the chunks with parity stripe such RAID5/6 */
123 struct scrub_parity
{
124 struct scrub_ctx
*sctx
;
126 struct btrfs_device
*scrub_dev
;
138 struct list_head spages
;
140 /* Work of parity check and repair */
141 struct btrfs_work work
;
143 /* Mark the parity blocks which have data */
144 unsigned long *dbitmap
;
147 * Mark the parity blocks which have data, but errors happen when
148 * read data or check data
150 unsigned long *ebitmap
;
152 unsigned long bitmap
[0];
156 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
157 struct btrfs_fs_info
*fs_info
;
160 atomic_t bios_in_flight
;
161 atomic_t workers_pending
;
162 spinlock_t list_lock
;
163 wait_queue_head_t list_wait
;
165 struct list_head csum_list
;
168 int pages_per_rd_bio
;
172 struct scrub_bio
*wr_curr_bio
;
173 struct mutex wr_lock
;
174 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
175 struct btrfs_device
*wr_tgtdev
;
176 bool flush_all_writes
;
181 struct btrfs_scrub_progress stat
;
182 spinlock_t stat_lock
;
185 * Use a ref counter to avoid use-after-free issues. Scrub workers
186 * decrement bios_in_flight and workers_pending and then do a wakeup
187 * on the list_wait wait queue. We must ensure the main scrub task
188 * doesn't free the scrub context before or while the workers are
189 * doing the wakeup() call.
194 struct scrub_warning
{
195 struct btrfs_path
*path
;
196 u64 extent_item_size
;
200 struct btrfs_device
*dev
;
203 struct full_stripe_lock
{
210 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
211 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
212 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
213 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
214 struct scrub_block
*sblocks_for_recheck
);
215 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
216 struct scrub_block
*sblock
,
217 int retry_failed_mirror
);
218 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
);
219 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
220 struct scrub_block
*sblock_good
);
221 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
222 struct scrub_block
*sblock_good
,
223 int page_num
, int force_write
);
224 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
225 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
227 static int scrub_checksum_data(struct scrub_block
*sblock
);
228 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
229 static int scrub_checksum_super(struct scrub_block
*sblock
);
230 static void scrub_block_get(struct scrub_block
*sblock
);
231 static void scrub_block_put(struct scrub_block
*sblock
);
232 static void scrub_page_get(struct scrub_page
*spage
);
233 static void scrub_page_put(struct scrub_page
*spage
);
234 static void scrub_parity_get(struct scrub_parity
*sparity
);
235 static void scrub_parity_put(struct scrub_parity
*sparity
);
236 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
237 struct scrub_page
*spage
);
238 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
239 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
240 u64 gen
, int mirror_num
, u8
*csum
, int force
,
241 u64 physical_for_dev_replace
);
242 static void scrub_bio_end_io(struct bio
*bio
);
243 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
244 static void scrub_block_complete(struct scrub_block
*sblock
);
245 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
246 u64 extent_logical
, u64 extent_len
,
247 u64
*extent_physical
,
248 struct btrfs_device
**extent_dev
,
249 int *extent_mirror_num
);
250 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
251 struct scrub_page
*spage
);
252 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
253 static void scrub_wr_bio_end_io(struct bio
*bio
);
254 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
255 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
256 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
257 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
259 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
261 return page
->recover
&&
262 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
265 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
267 refcount_inc(&sctx
->refs
);
268 atomic_inc(&sctx
->bios_in_flight
);
271 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
273 atomic_dec(&sctx
->bios_in_flight
);
274 wake_up(&sctx
->list_wait
);
278 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
280 while (atomic_read(&fs_info
->scrub_pause_req
)) {
281 mutex_unlock(&fs_info
->scrub_lock
);
282 wait_event(fs_info
->scrub_pause_wait
,
283 atomic_read(&fs_info
->scrub_pause_req
) == 0);
284 mutex_lock(&fs_info
->scrub_lock
);
288 static void scrub_pause_on(struct btrfs_fs_info
*fs_info
)
290 atomic_inc(&fs_info
->scrubs_paused
);
291 wake_up(&fs_info
->scrub_pause_wait
);
294 static void scrub_pause_off(struct btrfs_fs_info
*fs_info
)
296 mutex_lock(&fs_info
->scrub_lock
);
297 __scrub_blocked_if_needed(fs_info
);
298 atomic_dec(&fs_info
->scrubs_paused
);
299 mutex_unlock(&fs_info
->scrub_lock
);
301 wake_up(&fs_info
->scrub_pause_wait
);
304 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
306 scrub_pause_on(fs_info
);
307 scrub_pause_off(fs_info
);
311 * Insert new full stripe lock into full stripe locks tree
313 * Return pointer to existing or newly inserted full_stripe_lock structure if
314 * everything works well.
315 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
317 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
320 static struct full_stripe_lock
*insert_full_stripe_lock(
321 struct btrfs_full_stripe_locks_tree
*locks_root
,
325 struct rb_node
*parent
= NULL
;
326 struct full_stripe_lock
*entry
;
327 struct full_stripe_lock
*ret
;
329 lockdep_assert_held(&locks_root
->lock
);
331 p
= &locks_root
->root
.rb_node
;
334 entry
= rb_entry(parent
, struct full_stripe_lock
, node
);
335 if (fstripe_logical
< entry
->logical
) {
337 } else if (fstripe_logical
> entry
->logical
) {
348 ret
= kmalloc(sizeof(*ret
), GFP_KERNEL
);
350 return ERR_PTR(-ENOMEM
);
351 ret
->logical
= fstripe_logical
;
353 mutex_init(&ret
->mutex
);
355 rb_link_node(&ret
->node
, parent
, p
);
356 rb_insert_color(&ret
->node
, &locks_root
->root
);
361 * Search for a full stripe lock of a block group
363 * Return pointer to existing full stripe lock if found
364 * Return NULL if not found
366 static struct full_stripe_lock
*search_full_stripe_lock(
367 struct btrfs_full_stripe_locks_tree
*locks_root
,
370 struct rb_node
*node
;
371 struct full_stripe_lock
*entry
;
373 lockdep_assert_held(&locks_root
->lock
);
375 node
= locks_root
->root
.rb_node
;
377 entry
= rb_entry(node
, struct full_stripe_lock
, node
);
378 if (fstripe_logical
< entry
->logical
)
379 node
= node
->rb_left
;
380 else if (fstripe_logical
> entry
->logical
)
381 node
= node
->rb_right
;
389 * Helper to get full stripe logical from a normal bytenr.
391 * Caller must ensure @cache is a RAID56 block group.
393 static u64
get_full_stripe_logical(struct btrfs_block_group
*cache
, u64 bytenr
)
398 * Due to chunk item size limit, full stripe length should not be
399 * larger than U32_MAX. Just a sanity check here.
401 WARN_ON_ONCE(cache
->full_stripe_len
>= U32_MAX
);
404 * round_down() can only handle power of 2, while RAID56 full
405 * stripe length can be 64KiB * n, so we need to manually round down.
407 ret
= div64_u64(bytenr
- cache
->start
, cache
->full_stripe_len
) *
408 cache
->full_stripe_len
+ cache
->start
;
413 * Lock a full stripe to avoid concurrency of recovery and read
415 * It's only used for profiles with parities (RAID5/6), for other profiles it
418 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
419 * So caller must call unlock_full_stripe() at the same context.
421 * Return <0 if encounters error.
423 static int lock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
426 struct btrfs_block_group
*bg_cache
;
427 struct btrfs_full_stripe_locks_tree
*locks_root
;
428 struct full_stripe_lock
*existing
;
433 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
439 /* Profiles not based on parity don't need full stripe lock */
440 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
442 locks_root
= &bg_cache
->full_stripe_locks_root
;
444 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
446 /* Now insert the full stripe lock */
447 mutex_lock(&locks_root
->lock
);
448 existing
= insert_full_stripe_lock(locks_root
, fstripe_start
);
449 mutex_unlock(&locks_root
->lock
);
450 if (IS_ERR(existing
)) {
451 ret
= PTR_ERR(existing
);
454 mutex_lock(&existing
->mutex
);
457 btrfs_put_block_group(bg_cache
);
462 * Unlock a full stripe.
464 * NOTE: Caller must ensure it's the same context calling corresponding
465 * lock_full_stripe().
467 * Return 0 if we unlock full stripe without problem.
468 * Return <0 for error
470 static int unlock_full_stripe(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
473 struct btrfs_block_group
*bg_cache
;
474 struct btrfs_full_stripe_locks_tree
*locks_root
;
475 struct full_stripe_lock
*fstripe_lock
;
480 /* If we didn't acquire full stripe lock, no need to continue */
484 bg_cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
489 if (!(bg_cache
->flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
492 locks_root
= &bg_cache
->full_stripe_locks_root
;
493 fstripe_start
= get_full_stripe_logical(bg_cache
, bytenr
);
495 mutex_lock(&locks_root
->lock
);
496 fstripe_lock
= search_full_stripe_lock(locks_root
, fstripe_start
);
497 /* Unpaired unlock_full_stripe() detected */
501 mutex_unlock(&locks_root
->lock
);
505 if (fstripe_lock
->refs
== 0) {
507 btrfs_warn(fs_info
, "full stripe lock at %llu refcount underflow",
508 fstripe_lock
->logical
);
510 fstripe_lock
->refs
--;
513 if (fstripe_lock
->refs
== 0) {
514 rb_erase(&fstripe_lock
->node
, &locks_root
->root
);
517 mutex_unlock(&locks_root
->lock
);
519 mutex_unlock(&fstripe_lock
->mutex
);
523 btrfs_put_block_group(bg_cache
);
527 static void scrub_free_csums(struct scrub_ctx
*sctx
)
529 while (!list_empty(&sctx
->csum_list
)) {
530 struct btrfs_ordered_sum
*sum
;
531 sum
= list_first_entry(&sctx
->csum_list
,
532 struct btrfs_ordered_sum
, list
);
533 list_del(&sum
->list
);
538 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
545 /* this can happen when scrub is cancelled */
546 if (sctx
->curr
!= -1) {
547 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
549 for (i
= 0; i
< sbio
->page_count
; i
++) {
550 WARN_ON(!sbio
->pagev
[i
]->page
);
551 scrub_block_put(sbio
->pagev
[i
]->sblock
);
556 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
557 struct scrub_bio
*sbio
= sctx
->bios
[i
];
564 kfree(sctx
->wr_curr_bio
);
565 scrub_free_csums(sctx
);
569 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
571 if (refcount_dec_and_test(&sctx
->refs
))
572 scrub_free_ctx(sctx
);
575 static noinline_for_stack
struct scrub_ctx
*scrub_setup_ctx(
576 struct btrfs_fs_info
*fs_info
, int is_dev_replace
)
578 struct scrub_ctx
*sctx
;
581 sctx
= kzalloc(sizeof(*sctx
), GFP_KERNEL
);
584 refcount_set(&sctx
->refs
, 1);
585 sctx
->is_dev_replace
= is_dev_replace
;
586 sctx
->pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
588 sctx
->fs_info
= fs_info
;
589 INIT_LIST_HEAD(&sctx
->csum_list
);
590 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
591 struct scrub_bio
*sbio
;
593 sbio
= kzalloc(sizeof(*sbio
), GFP_KERNEL
);
596 sctx
->bios
[i
] = sbio
;
600 sbio
->page_count
= 0;
601 btrfs_init_work(&sbio
->work
, scrub_bio_end_io_worker
, NULL
,
604 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
605 sctx
->bios
[i
]->next_free
= i
+ 1;
607 sctx
->bios
[i
]->next_free
= -1;
609 sctx
->first_free
= 0;
610 atomic_set(&sctx
->bios_in_flight
, 0);
611 atomic_set(&sctx
->workers_pending
, 0);
612 atomic_set(&sctx
->cancel_req
, 0);
613 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
615 spin_lock_init(&sctx
->list_lock
);
616 spin_lock_init(&sctx
->stat_lock
);
617 init_waitqueue_head(&sctx
->list_wait
);
619 WARN_ON(sctx
->wr_curr_bio
!= NULL
);
620 mutex_init(&sctx
->wr_lock
);
621 sctx
->wr_curr_bio
= NULL
;
622 if (is_dev_replace
) {
623 WARN_ON(!fs_info
->dev_replace
.tgtdev
);
624 sctx
->pages_per_wr_bio
= SCRUB_PAGES_PER_WR_BIO
;
625 sctx
->wr_tgtdev
= fs_info
->dev_replace
.tgtdev
;
626 sctx
->flush_all_writes
= false;
632 scrub_free_ctx(sctx
);
633 return ERR_PTR(-ENOMEM
);
636 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
644 struct extent_buffer
*eb
;
645 struct btrfs_inode_item
*inode_item
;
646 struct scrub_warning
*swarn
= warn_ctx
;
647 struct btrfs_fs_info
*fs_info
= swarn
->dev
->fs_info
;
648 struct inode_fs_paths
*ipath
= NULL
;
649 struct btrfs_root
*local_root
;
650 struct btrfs_key root_key
;
651 struct btrfs_key key
;
653 root_key
.objectid
= root
;
654 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
655 root_key
.offset
= (u64
)-1;
656 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
657 if (IS_ERR(local_root
)) {
658 ret
= PTR_ERR(local_root
);
663 * this makes the path point to (inum INODE_ITEM ioff)
666 key
.type
= BTRFS_INODE_ITEM_KEY
;
669 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
671 btrfs_release_path(swarn
->path
);
675 eb
= swarn
->path
->nodes
[0];
676 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
677 struct btrfs_inode_item
);
678 isize
= btrfs_inode_size(eb
, inode_item
);
679 nlink
= btrfs_inode_nlink(eb
, inode_item
);
680 btrfs_release_path(swarn
->path
);
683 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
684 * uses GFP_NOFS in this context, so we keep it consistent but it does
685 * not seem to be strictly necessary.
687 nofs_flag
= memalloc_nofs_save();
688 ipath
= init_ipath(4096, local_root
, swarn
->path
);
689 memalloc_nofs_restore(nofs_flag
);
691 ret
= PTR_ERR(ipath
);
695 ret
= paths_from_inode(inum
, ipath
);
701 * we deliberately ignore the bit ipath might have been too small to
702 * hold all of the paths here
704 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
705 btrfs_warn_in_rcu(fs_info
,
706 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
707 swarn
->errstr
, swarn
->logical
,
708 rcu_str_deref(swarn
->dev
->name
),
711 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
712 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
718 btrfs_warn_in_rcu(fs_info
,
719 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
720 swarn
->errstr
, swarn
->logical
,
721 rcu_str_deref(swarn
->dev
->name
),
723 root
, inum
, offset
, ret
);
729 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
731 struct btrfs_device
*dev
;
732 struct btrfs_fs_info
*fs_info
;
733 struct btrfs_path
*path
;
734 struct btrfs_key found_key
;
735 struct extent_buffer
*eb
;
736 struct btrfs_extent_item
*ei
;
737 struct scrub_warning swarn
;
738 unsigned long ptr
= 0;
746 WARN_ON(sblock
->page_count
< 1);
747 dev
= sblock
->pagev
[0]->dev
;
748 fs_info
= sblock
->sctx
->fs_info
;
750 path
= btrfs_alloc_path();
754 swarn
.physical
= sblock
->pagev
[0]->physical
;
755 swarn
.logical
= sblock
->pagev
[0]->logical
;
756 swarn
.errstr
= errstr
;
759 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
764 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
765 swarn
.extent_item_size
= found_key
.offset
;
768 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
769 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
771 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
773 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
774 item_size
, &ref_root
,
776 btrfs_warn_in_rcu(fs_info
,
777 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
778 errstr
, swarn
.logical
,
779 rcu_str_deref(dev
->name
),
781 ref_level
? "node" : "leaf",
782 ret
< 0 ? -1 : ref_level
,
783 ret
< 0 ? -1 : ref_root
);
785 btrfs_release_path(path
);
787 btrfs_release_path(path
);
790 iterate_extent_inodes(fs_info
, found_key
.objectid
,
792 scrub_print_warning_inode
, &swarn
, false);
796 btrfs_free_path(path
);
799 static inline void scrub_get_recover(struct scrub_recover
*recover
)
801 refcount_inc(&recover
->refs
);
804 static inline void scrub_put_recover(struct btrfs_fs_info
*fs_info
,
805 struct scrub_recover
*recover
)
807 if (refcount_dec_and_test(&recover
->refs
)) {
808 btrfs_bio_counter_dec(fs_info
);
809 btrfs_put_bbio(recover
->bbio
);
815 * scrub_handle_errored_block gets called when either verification of the
816 * pages failed or the bio failed to read, e.g. with EIO. In the latter
817 * case, this function handles all pages in the bio, even though only one
819 * The goal of this function is to repair the errored block by using the
820 * contents of one of the mirrors.
822 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
824 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
825 struct btrfs_device
*dev
;
826 struct btrfs_fs_info
*fs_info
;
828 unsigned int failed_mirror_index
;
829 unsigned int is_metadata
;
830 unsigned int have_csum
;
831 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
832 struct scrub_block
*sblock_bad
;
837 bool full_stripe_locked
;
838 unsigned int nofs_flag
;
839 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
840 DEFAULT_RATELIMIT_BURST
);
842 BUG_ON(sblock_to_check
->page_count
< 1);
843 fs_info
= sctx
->fs_info
;
844 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
846 * if we find an error in a super block, we just report it.
847 * They will get written with the next transaction commit
850 spin_lock(&sctx
->stat_lock
);
851 ++sctx
->stat
.super_errors
;
852 spin_unlock(&sctx
->stat_lock
);
855 logical
= sblock_to_check
->pagev
[0]->logical
;
856 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
857 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
858 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
859 BTRFS_EXTENT_FLAG_DATA
);
860 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
861 dev
= sblock_to_check
->pagev
[0]->dev
;
864 * We must use GFP_NOFS because the scrub task might be waiting for a
865 * worker task executing this function and in turn a transaction commit
866 * might be waiting the scrub task to pause (which needs to wait for all
867 * the worker tasks to complete before pausing).
868 * We do allocations in the workers through insert_full_stripe_lock()
869 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
872 nofs_flag
= memalloc_nofs_save();
874 * For RAID5/6, race can happen for a different device scrub thread.
875 * For data corruption, Parity and Data threads will both try
876 * to recovery the data.
877 * Race can lead to doubly added csum error, or even unrecoverable
880 ret
= lock_full_stripe(fs_info
, logical
, &full_stripe_locked
);
882 memalloc_nofs_restore(nofs_flag
);
883 spin_lock(&sctx
->stat_lock
);
885 sctx
->stat
.malloc_errors
++;
886 sctx
->stat
.read_errors
++;
887 sctx
->stat
.uncorrectable_errors
++;
888 spin_unlock(&sctx
->stat_lock
);
893 * read all mirrors one after the other. This includes to
894 * re-read the extent or metadata block that failed (that was
895 * the cause that this fixup code is called) another time,
896 * page by page this time in order to know which pages
897 * caused I/O errors and which ones are good (for all mirrors).
898 * It is the goal to handle the situation when more than one
899 * mirror contains I/O errors, but the errors do not
900 * overlap, i.e. the data can be repaired by selecting the
901 * pages from those mirrors without I/O error on the
902 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
903 * would be that mirror #1 has an I/O error on the first page,
904 * the second page is good, and mirror #2 has an I/O error on
905 * the second page, but the first page is good.
906 * Then the first page of the first mirror can be repaired by
907 * taking the first page of the second mirror, and the
908 * second page of the second mirror can be repaired by
909 * copying the contents of the 2nd page of the 1st mirror.
910 * One more note: if the pages of one mirror contain I/O
911 * errors, the checksum cannot be verified. In order to get
912 * the best data for repairing, the first attempt is to find
913 * a mirror without I/O errors and with a validated checksum.
914 * Only if this is not possible, the pages are picked from
915 * mirrors with I/O errors without considering the checksum.
916 * If the latter is the case, at the end, the checksum of the
917 * repaired area is verified in order to correctly maintain
921 sblocks_for_recheck
= kcalloc(BTRFS_MAX_MIRRORS
,
922 sizeof(*sblocks_for_recheck
), GFP_KERNEL
);
923 if (!sblocks_for_recheck
) {
924 spin_lock(&sctx
->stat_lock
);
925 sctx
->stat
.malloc_errors
++;
926 sctx
->stat
.read_errors
++;
927 sctx
->stat
.uncorrectable_errors
++;
928 spin_unlock(&sctx
->stat_lock
);
929 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
933 /* setup the context, map the logical blocks and alloc the pages */
934 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
936 spin_lock(&sctx
->stat_lock
);
937 sctx
->stat
.read_errors
++;
938 sctx
->stat
.uncorrectable_errors
++;
939 spin_unlock(&sctx
->stat_lock
);
940 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
943 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
944 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
946 /* build and submit the bios for the failed mirror, check checksums */
947 scrub_recheck_block(fs_info
, sblock_bad
, 1);
949 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
950 sblock_bad
->no_io_error_seen
) {
952 * the error disappeared after reading page by page, or
953 * the area was part of a huge bio and other parts of the
954 * bio caused I/O errors, or the block layer merged several
955 * read requests into one and the error is caused by a
956 * different bio (usually one of the two latter cases is
959 spin_lock(&sctx
->stat_lock
);
960 sctx
->stat
.unverified_errors
++;
961 sblock_to_check
->data_corrected
= 1;
962 spin_unlock(&sctx
->stat_lock
);
964 if (sctx
->is_dev_replace
)
965 scrub_write_block_to_dev_replace(sblock_bad
);
969 if (!sblock_bad
->no_io_error_seen
) {
970 spin_lock(&sctx
->stat_lock
);
971 sctx
->stat
.read_errors
++;
972 spin_unlock(&sctx
->stat_lock
);
973 if (__ratelimit(&_rs
))
974 scrub_print_warning("i/o error", sblock_to_check
);
975 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
976 } else if (sblock_bad
->checksum_error
) {
977 spin_lock(&sctx
->stat_lock
);
978 sctx
->stat
.csum_errors
++;
979 spin_unlock(&sctx
->stat_lock
);
980 if (__ratelimit(&_rs
))
981 scrub_print_warning("checksum error", sblock_to_check
);
982 btrfs_dev_stat_inc_and_print(dev
,
983 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
984 } else if (sblock_bad
->header_error
) {
985 spin_lock(&sctx
->stat_lock
);
986 sctx
->stat
.verify_errors
++;
987 spin_unlock(&sctx
->stat_lock
);
988 if (__ratelimit(&_rs
))
989 scrub_print_warning("checksum/header error",
991 if (sblock_bad
->generation_error
)
992 btrfs_dev_stat_inc_and_print(dev
,
993 BTRFS_DEV_STAT_GENERATION_ERRS
);
995 btrfs_dev_stat_inc_and_print(dev
,
996 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
999 if (sctx
->readonly
) {
1000 ASSERT(!sctx
->is_dev_replace
);
1005 * now build and submit the bios for the other mirrors, check
1007 * First try to pick the mirror which is completely without I/O
1008 * errors and also does not have a checksum error.
1009 * If one is found, and if a checksum is present, the full block
1010 * that is known to contain an error is rewritten. Afterwards
1011 * the block is known to be corrected.
1012 * If a mirror is found which is completely correct, and no
1013 * checksum is present, only those pages are rewritten that had
1014 * an I/O error in the block to be repaired, since it cannot be
1015 * determined, which copy of the other pages is better (and it
1016 * could happen otherwise that a correct page would be
1017 * overwritten by a bad one).
1019 for (mirror_index
= 0; ;mirror_index
++) {
1020 struct scrub_block
*sblock_other
;
1022 if (mirror_index
== failed_mirror_index
)
1025 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1026 if (!scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1027 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1029 if (!sblocks_for_recheck
[mirror_index
].page_count
)
1032 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1034 struct scrub_recover
*r
= sblock_bad
->pagev
[0]->recover
;
1035 int max_allowed
= r
->bbio
->num_stripes
-
1036 r
->bbio
->num_tgtdevs
;
1038 if (mirror_index
>= max_allowed
)
1040 if (!sblocks_for_recheck
[1].page_count
)
1043 ASSERT(failed_mirror_index
== 0);
1044 sblock_other
= sblocks_for_recheck
+ 1;
1045 sblock_other
->pagev
[0]->mirror_num
= 1 + mirror_index
;
1048 /* build and submit the bios, check checksums */
1049 scrub_recheck_block(fs_info
, sblock_other
, 0);
1051 if (!sblock_other
->header_error
&&
1052 !sblock_other
->checksum_error
&&
1053 sblock_other
->no_io_error_seen
) {
1054 if (sctx
->is_dev_replace
) {
1055 scrub_write_block_to_dev_replace(sblock_other
);
1056 goto corrected_error
;
1058 ret
= scrub_repair_block_from_good_copy(
1059 sblock_bad
, sblock_other
);
1061 goto corrected_error
;
1066 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1067 goto did_not_correct_error
;
1070 * In case of I/O errors in the area that is supposed to be
1071 * repaired, continue by picking good copies of those pages.
1072 * Select the good pages from mirrors to rewrite bad pages from
1073 * the area to fix. Afterwards verify the checksum of the block
1074 * that is supposed to be repaired. This verification step is
1075 * only done for the purpose of statistic counting and for the
1076 * final scrub report, whether errors remain.
1077 * A perfect algorithm could make use of the checksum and try
1078 * all possible combinations of pages from the different mirrors
1079 * until the checksum verification succeeds. For example, when
1080 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1081 * of mirror #2 is readable but the final checksum test fails,
1082 * then the 2nd page of mirror #3 could be tried, whether now
1083 * the final checksum succeeds. But this would be a rare
1084 * exception and is therefore not implemented. At least it is
1085 * avoided that the good copy is overwritten.
1086 * A more useful improvement would be to pick the sectors
1087 * without I/O error based on sector sizes (512 bytes on legacy
1088 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1089 * mirror could be repaired by taking 512 byte of a different
1090 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1091 * area are unreadable.
1094 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1096 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1097 struct scrub_block
*sblock_other
= NULL
;
1099 /* skip no-io-error page in scrub */
1100 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1103 if (scrub_is_page_on_raid56(sblock_bad
->pagev
[0])) {
1105 * In case of dev replace, if raid56 rebuild process
1106 * didn't work out correct data, then copy the content
1107 * in sblock_bad to make sure target device is identical
1108 * to source device, instead of writing garbage data in
1109 * sblock_for_recheck array to target device.
1111 sblock_other
= NULL
;
1112 } else if (page_bad
->io_error
) {
1113 /* try to find no-io-error page in mirrors */
1114 for (mirror_index
= 0;
1115 mirror_index
< BTRFS_MAX_MIRRORS
&&
1116 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1118 if (!sblocks_for_recheck
[mirror_index
].
1119 pagev
[page_num
]->io_error
) {
1120 sblock_other
= sblocks_for_recheck
+
1129 if (sctx
->is_dev_replace
) {
1131 * did not find a mirror to fetch the page
1132 * from. scrub_write_page_to_dev_replace()
1133 * handles this case (page->io_error), by
1134 * filling the block with zeros before
1135 * submitting the write request
1138 sblock_other
= sblock_bad
;
1140 if (scrub_write_page_to_dev_replace(sblock_other
,
1143 &fs_info
->dev_replace
.num_write_errors
);
1146 } else if (sblock_other
) {
1147 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1151 page_bad
->io_error
= 0;
1157 if (success
&& !sctx
->is_dev_replace
) {
1158 if (is_metadata
|| have_csum
) {
1160 * need to verify the checksum now that all
1161 * sectors on disk are repaired (the write
1162 * request for data to be repaired is on its way).
1163 * Just be lazy and use scrub_recheck_block()
1164 * which re-reads the data before the checksum
1165 * is verified, but most likely the data comes out
1166 * of the page cache.
1168 scrub_recheck_block(fs_info
, sblock_bad
, 1);
1169 if (!sblock_bad
->header_error
&&
1170 !sblock_bad
->checksum_error
&&
1171 sblock_bad
->no_io_error_seen
)
1172 goto corrected_error
;
1174 goto did_not_correct_error
;
1177 spin_lock(&sctx
->stat_lock
);
1178 sctx
->stat
.corrected_errors
++;
1179 sblock_to_check
->data_corrected
= 1;
1180 spin_unlock(&sctx
->stat_lock
);
1181 btrfs_err_rl_in_rcu(fs_info
,
1182 "fixed up error at logical %llu on dev %s",
1183 logical
, rcu_str_deref(dev
->name
));
1186 did_not_correct_error
:
1187 spin_lock(&sctx
->stat_lock
);
1188 sctx
->stat
.uncorrectable_errors
++;
1189 spin_unlock(&sctx
->stat_lock
);
1190 btrfs_err_rl_in_rcu(fs_info
,
1191 "unable to fixup (regular) error at logical %llu on dev %s",
1192 logical
, rcu_str_deref(dev
->name
));
1196 if (sblocks_for_recheck
) {
1197 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1199 struct scrub_block
*sblock
= sblocks_for_recheck
+
1201 struct scrub_recover
*recover
;
1204 for (page_index
= 0; page_index
< sblock
->page_count
;
1206 sblock
->pagev
[page_index
]->sblock
= NULL
;
1207 recover
= sblock
->pagev
[page_index
]->recover
;
1209 scrub_put_recover(fs_info
, recover
);
1210 sblock
->pagev
[page_index
]->recover
=
1213 scrub_page_put(sblock
->pagev
[page_index
]);
1216 kfree(sblocks_for_recheck
);
1219 ret
= unlock_full_stripe(fs_info
, logical
, full_stripe_locked
);
1220 memalloc_nofs_restore(nofs_flag
);
1226 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1228 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1230 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1233 return (int)bbio
->num_stripes
;
1236 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1239 int nstripes
, int mirror
,
1245 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1247 for (i
= 0; i
< nstripes
; i
++) {
1248 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1249 raid_map
[i
] == RAID5_P_STRIPE
)
1252 if (logical
>= raid_map
[i
] &&
1253 logical
< raid_map
[i
] + mapped_length
)
1258 *stripe_offset
= logical
- raid_map
[i
];
1260 /* The other RAID type */
1261 *stripe_index
= mirror
;
1266 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1267 struct scrub_block
*sblocks_for_recheck
)
1269 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1270 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1271 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1272 u64 logical
= original_sblock
->pagev
[0]->logical
;
1273 u64 generation
= original_sblock
->pagev
[0]->generation
;
1274 u64 flags
= original_sblock
->pagev
[0]->flags
;
1275 u64 have_csum
= original_sblock
->pagev
[0]->have_csum
;
1276 struct scrub_recover
*recover
;
1277 struct btrfs_bio
*bbio
;
1288 * note: the two members refs and outstanding_pages
1289 * are not used (and not set) in the blocks that are used for
1290 * the recheck procedure
1293 while (length
> 0) {
1294 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1295 mapped_length
= sublen
;
1299 * with a length of PAGE_SIZE, each returned stripe
1300 * represents one mirror
1302 btrfs_bio_counter_inc_blocked(fs_info
);
1303 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
1304 logical
, &mapped_length
, &bbio
);
1305 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1306 btrfs_put_bbio(bbio
);
1307 btrfs_bio_counter_dec(fs_info
);
1311 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1313 btrfs_put_bbio(bbio
);
1314 btrfs_bio_counter_dec(fs_info
);
1318 refcount_set(&recover
->refs
, 1);
1319 recover
->bbio
= bbio
;
1320 recover
->map_length
= mapped_length
;
1322 BUG_ON(page_index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
1324 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1326 for (mirror_index
= 0; mirror_index
< nmirrors
;
1328 struct scrub_block
*sblock
;
1329 struct scrub_page
*page
;
1331 sblock
= sblocks_for_recheck
+ mirror_index
;
1332 sblock
->sctx
= sctx
;
1334 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1337 spin_lock(&sctx
->stat_lock
);
1338 sctx
->stat
.malloc_errors
++;
1339 spin_unlock(&sctx
->stat_lock
);
1340 scrub_put_recover(fs_info
, recover
);
1343 scrub_page_get(page
);
1344 sblock
->pagev
[page_index
] = page
;
1345 page
->sblock
= sblock
;
1346 page
->flags
= flags
;
1347 page
->generation
= generation
;
1348 page
->logical
= logical
;
1349 page
->have_csum
= have_csum
;
1352 original_sblock
->pagev
[0]->csum
,
1355 scrub_stripe_index_and_offset(logical
,
1364 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1366 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1368 BUG_ON(page_index
>= original_sblock
->page_count
);
1369 page
->physical_for_dev_replace
=
1370 original_sblock
->pagev
[page_index
]->
1371 physical_for_dev_replace
;
1372 /* for missing devices, dev->bdev is NULL */
1373 page
->mirror_num
= mirror_index
+ 1;
1374 sblock
->page_count
++;
1375 page
->page
= alloc_page(GFP_NOFS
);
1379 scrub_get_recover(recover
);
1380 page
->recover
= recover
;
1382 scrub_put_recover(fs_info
, recover
);
1391 static void scrub_bio_wait_endio(struct bio
*bio
)
1393 complete(bio
->bi_private
);
1396 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1398 struct scrub_page
*page
)
1400 DECLARE_COMPLETION_ONSTACK(done
);
1404 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1405 bio
->bi_private
= &done
;
1406 bio
->bi_end_io
= scrub_bio_wait_endio
;
1408 mirror_num
= page
->sblock
->pagev
[0]->mirror_num
;
1409 ret
= raid56_parity_recover(fs_info
, bio
, page
->recover
->bbio
,
1410 page
->recover
->map_length
,
1415 wait_for_completion_io(&done
);
1416 return blk_status_to_errno(bio
->bi_status
);
1419 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info
*fs_info
,
1420 struct scrub_block
*sblock
)
1422 struct scrub_page
*first_page
= sblock
->pagev
[0];
1426 /* All pages in sblock belong to the same stripe on the same device. */
1427 ASSERT(first_page
->dev
);
1428 if (!first_page
->dev
->bdev
)
1431 bio
= btrfs_io_bio_alloc(BIO_MAX_PAGES
);
1432 bio_set_dev(bio
, first_page
->dev
->bdev
);
1434 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1435 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1437 WARN_ON(!page
->page
);
1438 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1441 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, first_page
)) {
1448 scrub_recheck_block_checksum(sblock
);
1452 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++)
1453 sblock
->pagev
[page_num
]->io_error
= 1;
1455 sblock
->no_io_error_seen
= 0;
1459 * this function will check the on disk data for checksum errors, header
1460 * errors and read I/O errors. If any I/O errors happen, the exact pages
1461 * which are errored are marked as being bad. The goal is to enable scrub
1462 * to take those pages that are not errored from all the mirrors so that
1463 * the pages that are errored in the just handled mirror can be repaired.
1465 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1466 struct scrub_block
*sblock
,
1467 int retry_failed_mirror
)
1471 sblock
->no_io_error_seen
= 1;
1473 /* short cut for raid56 */
1474 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(sblock
->pagev
[0]))
1475 return scrub_recheck_block_on_raid56(fs_info
, sblock
);
1477 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1479 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1481 if (page
->dev
->bdev
== NULL
) {
1483 sblock
->no_io_error_seen
= 0;
1487 WARN_ON(!page
->page
);
1488 bio
= btrfs_io_bio_alloc(1);
1489 bio_set_dev(bio
, page
->dev
->bdev
);
1491 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1492 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1493 bio
->bi_opf
= REQ_OP_READ
;
1495 if (btrfsic_submit_bio_wait(bio
)) {
1497 sblock
->no_io_error_seen
= 0;
1503 if (sblock
->no_io_error_seen
)
1504 scrub_recheck_block_checksum(sblock
);
1507 static inline int scrub_check_fsid(u8 fsid
[],
1508 struct scrub_page
*spage
)
1510 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1513 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1517 static void scrub_recheck_block_checksum(struct scrub_block
*sblock
)
1519 sblock
->header_error
= 0;
1520 sblock
->checksum_error
= 0;
1521 sblock
->generation_error
= 0;
1523 if (sblock
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_DATA
)
1524 scrub_checksum_data(sblock
);
1526 scrub_checksum_tree_block(sblock
);
1529 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1530 struct scrub_block
*sblock_good
)
1535 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1538 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1548 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1549 struct scrub_block
*sblock_good
,
1550 int page_num
, int force_write
)
1552 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1553 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1554 struct btrfs_fs_info
*fs_info
= sblock_bad
->sctx
->fs_info
;
1556 BUG_ON(page_bad
->page
== NULL
);
1557 BUG_ON(page_good
->page
== NULL
);
1558 if (force_write
|| sblock_bad
->header_error
||
1559 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1563 if (!page_bad
->dev
->bdev
) {
1564 btrfs_warn_rl(fs_info
,
1565 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1569 bio
= btrfs_io_bio_alloc(1);
1570 bio_set_dev(bio
, page_bad
->dev
->bdev
);
1571 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1572 bio
->bi_opf
= REQ_OP_WRITE
;
1574 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1575 if (PAGE_SIZE
!= ret
) {
1580 if (btrfsic_submit_bio_wait(bio
)) {
1581 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1582 BTRFS_DEV_STAT_WRITE_ERRS
);
1583 atomic64_inc(&fs_info
->dev_replace
.num_write_errors
);
1593 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1595 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
1599 * This block is used for the check of the parity on the source device,
1600 * so the data needn't be written into the destination device.
1602 if (sblock
->sparity
)
1605 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1608 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1610 atomic64_inc(&fs_info
->dev_replace
.num_write_errors
);
1614 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1617 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1619 BUG_ON(spage
->page
== NULL
);
1620 if (spage
->io_error
) {
1621 void *mapped_buffer
= kmap_atomic(spage
->page
);
1623 clear_page(mapped_buffer
);
1624 flush_dcache_page(spage
->page
);
1625 kunmap_atomic(mapped_buffer
);
1627 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1630 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1631 struct scrub_page
*spage
)
1633 struct scrub_bio
*sbio
;
1636 mutex_lock(&sctx
->wr_lock
);
1638 if (!sctx
->wr_curr_bio
) {
1639 sctx
->wr_curr_bio
= kzalloc(sizeof(*sctx
->wr_curr_bio
),
1641 if (!sctx
->wr_curr_bio
) {
1642 mutex_unlock(&sctx
->wr_lock
);
1645 sctx
->wr_curr_bio
->sctx
= sctx
;
1646 sctx
->wr_curr_bio
->page_count
= 0;
1648 sbio
= sctx
->wr_curr_bio
;
1649 if (sbio
->page_count
== 0) {
1652 sbio
->physical
= spage
->physical_for_dev_replace
;
1653 sbio
->logical
= spage
->logical
;
1654 sbio
->dev
= sctx
->wr_tgtdev
;
1657 bio
= btrfs_io_bio_alloc(sctx
->pages_per_wr_bio
);
1661 bio
->bi_private
= sbio
;
1662 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1663 bio_set_dev(bio
, sbio
->dev
->bdev
);
1664 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1665 bio
->bi_opf
= REQ_OP_WRITE
;
1667 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1668 spage
->physical_for_dev_replace
||
1669 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1671 scrub_wr_submit(sctx
);
1675 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1676 if (ret
!= PAGE_SIZE
) {
1677 if (sbio
->page_count
< 1) {
1680 mutex_unlock(&sctx
->wr_lock
);
1683 scrub_wr_submit(sctx
);
1687 sbio
->pagev
[sbio
->page_count
] = spage
;
1688 scrub_page_get(spage
);
1690 if (sbio
->page_count
== sctx
->pages_per_wr_bio
)
1691 scrub_wr_submit(sctx
);
1692 mutex_unlock(&sctx
->wr_lock
);
1697 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1699 struct scrub_bio
*sbio
;
1701 if (!sctx
->wr_curr_bio
)
1704 sbio
= sctx
->wr_curr_bio
;
1705 sctx
->wr_curr_bio
= NULL
;
1706 WARN_ON(!sbio
->bio
->bi_disk
);
1707 scrub_pending_bio_inc(sctx
);
1708 /* process all writes in a single worker thread. Then the block layer
1709 * orders the requests before sending them to the driver which
1710 * doubled the write performance on spinning disks when measured
1712 btrfsic_submit_bio(sbio
->bio
);
1715 static void scrub_wr_bio_end_io(struct bio
*bio
)
1717 struct scrub_bio
*sbio
= bio
->bi_private
;
1718 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
1720 sbio
->status
= bio
->bi_status
;
1723 btrfs_init_work(&sbio
->work
, scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1724 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1727 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1729 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1730 struct scrub_ctx
*sctx
= sbio
->sctx
;
1733 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1735 struct btrfs_dev_replace
*dev_replace
=
1736 &sbio
->sctx
->fs_info
->dev_replace
;
1738 for (i
= 0; i
< sbio
->page_count
; i
++) {
1739 struct scrub_page
*spage
= sbio
->pagev
[i
];
1741 spage
->io_error
= 1;
1742 atomic64_inc(&dev_replace
->num_write_errors
);
1746 for (i
= 0; i
< sbio
->page_count
; i
++)
1747 scrub_page_put(sbio
->pagev
[i
]);
1751 scrub_pending_bio_dec(sctx
);
1754 static int scrub_checksum(struct scrub_block
*sblock
)
1760 * No need to initialize these stats currently,
1761 * because this function only use return value
1762 * instead of these stats value.
1767 sblock
->header_error
= 0;
1768 sblock
->generation_error
= 0;
1769 sblock
->checksum_error
= 0;
1771 WARN_ON(sblock
->page_count
< 1);
1772 flags
= sblock
->pagev
[0]->flags
;
1774 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1775 ret
= scrub_checksum_data(sblock
);
1776 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1777 ret
= scrub_checksum_tree_block(sblock
);
1778 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1779 (void)scrub_checksum_super(sblock
);
1783 scrub_handle_errored_block(sblock
);
1788 static int scrub_checksum_data(struct scrub_block
*sblock
)
1790 struct scrub_ctx
*sctx
= sblock
->sctx
;
1791 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1792 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
1793 u8 csum
[BTRFS_CSUM_SIZE
];
1800 BUG_ON(sblock
->page_count
< 1);
1801 if (!sblock
->pagev
[0]->have_csum
)
1804 shash
->tfm
= fs_info
->csum_shash
;
1805 crypto_shash_init(shash
);
1807 on_disk_csum
= sblock
->pagev
[0]->csum
;
1808 page
= sblock
->pagev
[0]->page
;
1809 buffer
= kmap_atomic(page
);
1811 len
= sctx
->fs_info
->sectorsize
;
1814 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1816 crypto_shash_update(shash
, buffer
, l
);
1817 kunmap_atomic(buffer
);
1822 BUG_ON(index
>= sblock
->page_count
);
1823 BUG_ON(!sblock
->pagev
[index
]->page
);
1824 page
= sblock
->pagev
[index
]->page
;
1825 buffer
= kmap_atomic(page
);
1828 crypto_shash_final(shash
, csum
);
1829 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1830 sblock
->checksum_error
= 1;
1832 return sblock
->checksum_error
;
1835 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1837 struct scrub_ctx
*sctx
= sblock
->sctx
;
1838 struct btrfs_header
*h
;
1839 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1840 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
1841 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1842 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1844 void *mapped_buffer
;
1850 shash
->tfm
= fs_info
->csum_shash
;
1851 crypto_shash_init(shash
);
1853 BUG_ON(sblock
->page_count
< 1);
1854 page
= sblock
->pagev
[0]->page
;
1855 mapped_buffer
= kmap_atomic(page
);
1856 h
= (struct btrfs_header
*)mapped_buffer
;
1857 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1860 * we don't use the getter functions here, as we
1861 * a) don't have an extent buffer and
1862 * b) the page is already kmapped
1864 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1865 sblock
->header_error
= 1;
1867 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
)) {
1868 sblock
->header_error
= 1;
1869 sblock
->generation_error
= 1;
1872 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1873 sblock
->header_error
= 1;
1875 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1877 sblock
->header_error
= 1;
1879 len
= sctx
->fs_info
->nodesize
- BTRFS_CSUM_SIZE
;
1880 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1881 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1884 u64 l
= min_t(u64
, len
, mapped_size
);
1886 crypto_shash_update(shash
, p
, l
);
1887 kunmap_atomic(mapped_buffer
);
1892 BUG_ON(index
>= sblock
->page_count
);
1893 BUG_ON(!sblock
->pagev
[index
]->page
);
1894 page
= sblock
->pagev
[index
]->page
;
1895 mapped_buffer
= kmap_atomic(page
);
1896 mapped_size
= PAGE_SIZE
;
1900 crypto_shash_final(shash
, calculated_csum
);
1901 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1902 sblock
->checksum_error
= 1;
1904 return sblock
->header_error
|| sblock
->checksum_error
;
1907 static int scrub_checksum_super(struct scrub_block
*sblock
)
1909 struct btrfs_super_block
*s
;
1910 struct scrub_ctx
*sctx
= sblock
->sctx
;
1911 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
1912 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
1913 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1914 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1916 void *mapped_buffer
;
1924 shash
->tfm
= fs_info
->csum_shash
;
1925 crypto_shash_init(shash
);
1927 BUG_ON(sblock
->page_count
< 1);
1928 page
= sblock
->pagev
[0]->page
;
1929 mapped_buffer
= kmap_atomic(page
);
1930 s
= (struct btrfs_super_block
*)mapped_buffer
;
1931 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1933 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1936 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
1939 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
1942 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1943 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1944 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1947 u64 l
= min_t(u64
, len
, mapped_size
);
1949 crypto_shash_update(shash
, p
, l
);
1950 kunmap_atomic(mapped_buffer
);
1955 BUG_ON(index
>= sblock
->page_count
);
1956 BUG_ON(!sblock
->pagev
[index
]->page
);
1957 page
= sblock
->pagev
[index
]->page
;
1958 mapped_buffer
= kmap_atomic(page
);
1959 mapped_size
= PAGE_SIZE
;
1963 crypto_shash_final(shash
, calculated_csum
);
1964 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1967 if (fail_cor
+ fail_gen
) {
1969 * if we find an error in a super block, we just report it.
1970 * They will get written with the next transaction commit
1973 spin_lock(&sctx
->stat_lock
);
1974 ++sctx
->stat
.super_errors
;
1975 spin_unlock(&sctx
->stat_lock
);
1977 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1978 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1980 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1981 BTRFS_DEV_STAT_GENERATION_ERRS
);
1984 return fail_cor
+ fail_gen
;
1987 static void scrub_block_get(struct scrub_block
*sblock
)
1989 refcount_inc(&sblock
->refs
);
1992 static void scrub_block_put(struct scrub_block
*sblock
)
1994 if (refcount_dec_and_test(&sblock
->refs
)) {
1997 if (sblock
->sparity
)
1998 scrub_parity_put(sblock
->sparity
);
2000 for (i
= 0; i
< sblock
->page_count
; i
++)
2001 scrub_page_put(sblock
->pagev
[i
]);
2006 static void scrub_page_get(struct scrub_page
*spage
)
2008 atomic_inc(&spage
->refs
);
2011 static void scrub_page_put(struct scrub_page
*spage
)
2013 if (atomic_dec_and_test(&spage
->refs
)) {
2015 __free_page(spage
->page
);
2020 static void scrub_submit(struct scrub_ctx
*sctx
)
2022 struct scrub_bio
*sbio
;
2024 if (sctx
->curr
== -1)
2027 sbio
= sctx
->bios
[sctx
->curr
];
2029 scrub_pending_bio_inc(sctx
);
2030 btrfsic_submit_bio(sbio
->bio
);
2033 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2034 struct scrub_page
*spage
)
2036 struct scrub_block
*sblock
= spage
->sblock
;
2037 struct scrub_bio
*sbio
;
2042 * grab a fresh bio or wait for one to become available
2044 while (sctx
->curr
== -1) {
2045 spin_lock(&sctx
->list_lock
);
2046 sctx
->curr
= sctx
->first_free
;
2047 if (sctx
->curr
!= -1) {
2048 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2049 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2050 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2051 spin_unlock(&sctx
->list_lock
);
2053 spin_unlock(&sctx
->list_lock
);
2054 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2057 sbio
= sctx
->bios
[sctx
->curr
];
2058 if (sbio
->page_count
== 0) {
2061 sbio
->physical
= spage
->physical
;
2062 sbio
->logical
= spage
->logical
;
2063 sbio
->dev
= spage
->dev
;
2066 bio
= btrfs_io_bio_alloc(sctx
->pages_per_rd_bio
);
2070 bio
->bi_private
= sbio
;
2071 bio
->bi_end_io
= scrub_bio_end_io
;
2072 bio_set_dev(bio
, sbio
->dev
->bdev
);
2073 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2074 bio
->bi_opf
= REQ_OP_READ
;
2076 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2078 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2080 sbio
->dev
!= spage
->dev
) {
2085 sbio
->pagev
[sbio
->page_count
] = spage
;
2086 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2087 if (ret
!= PAGE_SIZE
) {
2088 if (sbio
->page_count
< 1) {
2097 scrub_block_get(sblock
); /* one for the page added to the bio */
2098 atomic_inc(&sblock
->outstanding_pages
);
2100 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2106 static void scrub_missing_raid56_end_io(struct bio
*bio
)
2108 struct scrub_block
*sblock
= bio
->bi_private
;
2109 struct btrfs_fs_info
*fs_info
= sblock
->sctx
->fs_info
;
2112 sblock
->no_io_error_seen
= 0;
2116 btrfs_queue_work(fs_info
->scrub_workers
, &sblock
->work
);
2119 static void scrub_missing_raid56_worker(struct btrfs_work
*work
)
2121 struct scrub_block
*sblock
= container_of(work
, struct scrub_block
, work
);
2122 struct scrub_ctx
*sctx
= sblock
->sctx
;
2123 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2125 struct btrfs_device
*dev
;
2127 logical
= sblock
->pagev
[0]->logical
;
2128 dev
= sblock
->pagev
[0]->dev
;
2130 if (sblock
->no_io_error_seen
)
2131 scrub_recheck_block_checksum(sblock
);
2133 if (!sblock
->no_io_error_seen
) {
2134 spin_lock(&sctx
->stat_lock
);
2135 sctx
->stat
.read_errors
++;
2136 spin_unlock(&sctx
->stat_lock
);
2137 btrfs_err_rl_in_rcu(fs_info
,
2138 "IO error rebuilding logical %llu for dev %s",
2139 logical
, rcu_str_deref(dev
->name
));
2140 } else if (sblock
->header_error
|| sblock
->checksum_error
) {
2141 spin_lock(&sctx
->stat_lock
);
2142 sctx
->stat
.uncorrectable_errors
++;
2143 spin_unlock(&sctx
->stat_lock
);
2144 btrfs_err_rl_in_rcu(fs_info
,
2145 "failed to rebuild valid logical %llu for dev %s",
2146 logical
, rcu_str_deref(dev
->name
));
2148 scrub_write_block_to_dev_replace(sblock
);
2151 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2152 mutex_lock(&sctx
->wr_lock
);
2153 scrub_wr_submit(sctx
);
2154 mutex_unlock(&sctx
->wr_lock
);
2157 scrub_block_put(sblock
);
2158 scrub_pending_bio_dec(sctx
);
2161 static void scrub_missing_raid56_pages(struct scrub_block
*sblock
)
2163 struct scrub_ctx
*sctx
= sblock
->sctx
;
2164 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2165 u64 length
= sblock
->page_count
* PAGE_SIZE
;
2166 u64 logical
= sblock
->pagev
[0]->logical
;
2167 struct btrfs_bio
*bbio
= NULL
;
2169 struct btrfs_raid_bio
*rbio
;
2173 btrfs_bio_counter_inc_blocked(fs_info
);
2174 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
, logical
,
2176 if (ret
|| !bbio
|| !bbio
->raid_map
)
2179 if (WARN_ON(!sctx
->is_dev_replace
||
2180 !(bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))) {
2182 * We shouldn't be scrubbing a missing device. Even for dev
2183 * replace, we should only get here for RAID 5/6. We either
2184 * managed to mount something with no mirrors remaining or
2185 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2190 bio
= btrfs_io_bio_alloc(0);
2191 bio
->bi_iter
.bi_sector
= logical
>> 9;
2192 bio
->bi_private
= sblock
;
2193 bio
->bi_end_io
= scrub_missing_raid56_end_io
;
2195 rbio
= raid56_alloc_missing_rbio(fs_info
, bio
, bbio
, length
);
2199 for (i
= 0; i
< sblock
->page_count
; i
++) {
2200 struct scrub_page
*spage
= sblock
->pagev
[i
];
2202 raid56_add_scrub_pages(rbio
, spage
->page
, spage
->logical
);
2205 btrfs_init_work(&sblock
->work
, scrub_missing_raid56_worker
, NULL
, NULL
);
2206 scrub_block_get(sblock
);
2207 scrub_pending_bio_inc(sctx
);
2208 raid56_submit_missing_rbio(rbio
);
2214 btrfs_bio_counter_dec(fs_info
);
2215 btrfs_put_bbio(bbio
);
2216 spin_lock(&sctx
->stat_lock
);
2217 sctx
->stat
.malloc_errors
++;
2218 spin_unlock(&sctx
->stat_lock
);
2221 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2222 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2223 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2224 u64 physical_for_dev_replace
)
2226 struct scrub_block
*sblock
;
2229 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2231 spin_lock(&sctx
->stat_lock
);
2232 sctx
->stat
.malloc_errors
++;
2233 spin_unlock(&sctx
->stat_lock
);
2237 /* one ref inside this function, plus one for each page added to
2239 refcount_set(&sblock
->refs
, 1);
2240 sblock
->sctx
= sctx
;
2241 sblock
->no_io_error_seen
= 1;
2243 for (index
= 0; len
> 0; index
++) {
2244 struct scrub_page
*spage
;
2245 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2247 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2250 spin_lock(&sctx
->stat_lock
);
2251 sctx
->stat
.malloc_errors
++;
2252 spin_unlock(&sctx
->stat_lock
);
2253 scrub_block_put(sblock
);
2256 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2257 scrub_page_get(spage
);
2258 sblock
->pagev
[index
] = spage
;
2259 spage
->sblock
= sblock
;
2261 spage
->flags
= flags
;
2262 spage
->generation
= gen
;
2263 spage
->logical
= logical
;
2264 spage
->physical
= physical
;
2265 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2266 spage
->mirror_num
= mirror_num
;
2268 spage
->have_csum
= 1;
2269 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2271 spage
->have_csum
= 0;
2273 sblock
->page_count
++;
2274 spage
->page
= alloc_page(GFP_KERNEL
);
2280 physical_for_dev_replace
+= l
;
2283 WARN_ON(sblock
->page_count
== 0);
2284 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2286 * This case should only be hit for RAID 5/6 device replace. See
2287 * the comment in scrub_missing_raid56_pages() for details.
2289 scrub_missing_raid56_pages(sblock
);
2291 for (index
= 0; index
< sblock
->page_count
; index
++) {
2292 struct scrub_page
*spage
= sblock
->pagev
[index
];
2295 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2297 scrub_block_put(sblock
);
2306 /* last one frees, either here or in bio completion for last page */
2307 scrub_block_put(sblock
);
2311 static void scrub_bio_end_io(struct bio
*bio
)
2313 struct scrub_bio
*sbio
= bio
->bi_private
;
2314 struct btrfs_fs_info
*fs_info
= sbio
->dev
->fs_info
;
2316 sbio
->status
= bio
->bi_status
;
2319 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2322 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2324 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2325 struct scrub_ctx
*sctx
= sbio
->sctx
;
2328 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2330 for (i
= 0; i
< sbio
->page_count
; i
++) {
2331 struct scrub_page
*spage
= sbio
->pagev
[i
];
2333 spage
->io_error
= 1;
2334 spage
->sblock
->no_io_error_seen
= 0;
2338 /* now complete the scrub_block items that have all pages completed */
2339 for (i
= 0; i
< sbio
->page_count
; i
++) {
2340 struct scrub_page
*spage
= sbio
->pagev
[i
];
2341 struct scrub_block
*sblock
= spage
->sblock
;
2343 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2344 scrub_block_complete(sblock
);
2345 scrub_block_put(sblock
);
2350 spin_lock(&sctx
->list_lock
);
2351 sbio
->next_free
= sctx
->first_free
;
2352 sctx
->first_free
= sbio
->index
;
2353 spin_unlock(&sctx
->list_lock
);
2355 if (sctx
->is_dev_replace
&& sctx
->flush_all_writes
) {
2356 mutex_lock(&sctx
->wr_lock
);
2357 scrub_wr_submit(sctx
);
2358 mutex_unlock(&sctx
->wr_lock
);
2361 scrub_pending_bio_dec(sctx
);
2364 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2365 unsigned long *bitmap
,
2371 int sectorsize
= sparity
->sctx
->fs_info
->sectorsize
;
2373 if (len
>= sparity
->stripe_len
) {
2374 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2378 start
-= sparity
->logic_start
;
2379 start
= div64_u64_rem(start
, sparity
->stripe_len
, &offset
);
2380 offset
= div_u64(offset
, sectorsize
);
2381 nsectors64
= div_u64(len
, sectorsize
);
2383 ASSERT(nsectors64
< UINT_MAX
);
2384 nsectors
= (u32
)nsectors64
;
2386 if (offset
+ nsectors
<= sparity
->nsectors
) {
2387 bitmap_set(bitmap
, offset
, nsectors
);
2391 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2392 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2395 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2398 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2401 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2404 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2407 static void scrub_block_complete(struct scrub_block
*sblock
)
2411 if (!sblock
->no_io_error_seen
) {
2413 scrub_handle_errored_block(sblock
);
2416 * if has checksum error, write via repair mechanism in
2417 * dev replace case, otherwise write here in dev replace
2420 corrupted
= scrub_checksum(sblock
);
2421 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2422 scrub_write_block_to_dev_replace(sblock
);
2425 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2426 u64 start
= sblock
->pagev
[0]->logical
;
2427 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2430 scrub_parity_mark_sectors_error(sblock
->sparity
,
2431 start
, end
- start
);
2435 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u8
*csum
)
2437 struct btrfs_ordered_sum
*sum
= NULL
;
2438 unsigned long index
;
2439 unsigned long num_sectors
;
2441 while (!list_empty(&sctx
->csum_list
)) {
2442 sum
= list_first_entry(&sctx
->csum_list
,
2443 struct btrfs_ordered_sum
, list
);
2444 if (sum
->bytenr
> logical
)
2446 if (sum
->bytenr
+ sum
->len
> logical
)
2449 ++sctx
->stat
.csum_discards
;
2450 list_del(&sum
->list
);
2457 index
= div_u64(logical
- sum
->bytenr
, sctx
->fs_info
->sectorsize
);
2458 ASSERT(index
< UINT_MAX
);
2460 num_sectors
= sum
->len
/ sctx
->fs_info
->sectorsize
;
2461 memcpy(csum
, sum
->sums
+ index
* sctx
->csum_size
, sctx
->csum_size
);
2462 if (index
== num_sectors
- 1) {
2463 list_del(&sum
->list
);
2469 /* scrub extent tries to collect up to 64 kB for each bio */
2470 static int scrub_extent(struct scrub_ctx
*sctx
, struct map_lookup
*map
,
2471 u64 logical
, u64 len
,
2472 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2473 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2476 u8 csum
[BTRFS_CSUM_SIZE
];
2479 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2480 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2481 blocksize
= map
->stripe_len
;
2483 blocksize
= sctx
->fs_info
->sectorsize
;
2484 spin_lock(&sctx
->stat_lock
);
2485 sctx
->stat
.data_extents_scrubbed
++;
2486 sctx
->stat
.data_bytes_scrubbed
+= len
;
2487 spin_unlock(&sctx
->stat_lock
);
2488 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2489 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
2490 blocksize
= map
->stripe_len
;
2492 blocksize
= sctx
->fs_info
->nodesize
;
2493 spin_lock(&sctx
->stat_lock
);
2494 sctx
->stat
.tree_extents_scrubbed
++;
2495 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2496 spin_unlock(&sctx
->stat_lock
);
2498 blocksize
= sctx
->fs_info
->sectorsize
;
2503 u64 l
= min_t(u64
, len
, blocksize
);
2506 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2507 /* push csums to sbio */
2508 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2510 ++sctx
->stat
.no_csum
;
2512 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2513 mirror_num
, have_csum
? csum
: NULL
, 0,
2514 physical_for_dev_replace
);
2520 physical_for_dev_replace
+= l
;
2525 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2526 u64 logical
, u64 len
,
2527 u64 physical
, struct btrfs_device
*dev
,
2528 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2530 struct scrub_ctx
*sctx
= sparity
->sctx
;
2531 struct scrub_block
*sblock
;
2534 sblock
= kzalloc(sizeof(*sblock
), GFP_KERNEL
);
2536 spin_lock(&sctx
->stat_lock
);
2537 sctx
->stat
.malloc_errors
++;
2538 spin_unlock(&sctx
->stat_lock
);
2542 /* one ref inside this function, plus one for each page added to
2544 refcount_set(&sblock
->refs
, 1);
2545 sblock
->sctx
= sctx
;
2546 sblock
->no_io_error_seen
= 1;
2547 sblock
->sparity
= sparity
;
2548 scrub_parity_get(sparity
);
2550 for (index
= 0; len
> 0; index
++) {
2551 struct scrub_page
*spage
;
2552 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2554 spage
= kzalloc(sizeof(*spage
), GFP_KERNEL
);
2557 spin_lock(&sctx
->stat_lock
);
2558 sctx
->stat
.malloc_errors
++;
2559 spin_unlock(&sctx
->stat_lock
);
2560 scrub_block_put(sblock
);
2563 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2564 /* For scrub block */
2565 scrub_page_get(spage
);
2566 sblock
->pagev
[index
] = spage
;
2567 /* For scrub parity */
2568 scrub_page_get(spage
);
2569 list_add_tail(&spage
->list
, &sparity
->spages
);
2570 spage
->sblock
= sblock
;
2572 spage
->flags
= flags
;
2573 spage
->generation
= gen
;
2574 spage
->logical
= logical
;
2575 spage
->physical
= physical
;
2576 spage
->mirror_num
= mirror_num
;
2578 spage
->have_csum
= 1;
2579 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2581 spage
->have_csum
= 0;
2583 sblock
->page_count
++;
2584 spage
->page
= alloc_page(GFP_KERNEL
);
2592 WARN_ON(sblock
->page_count
== 0);
2593 for (index
= 0; index
< sblock
->page_count
; index
++) {
2594 struct scrub_page
*spage
= sblock
->pagev
[index
];
2597 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2599 scrub_block_put(sblock
);
2604 /* last one frees, either here or in bio completion for last page */
2605 scrub_block_put(sblock
);
2609 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2610 u64 logical
, u64 len
,
2611 u64 physical
, struct btrfs_device
*dev
,
2612 u64 flags
, u64 gen
, int mirror_num
)
2614 struct scrub_ctx
*sctx
= sparity
->sctx
;
2616 u8 csum
[BTRFS_CSUM_SIZE
];
2619 if (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
)) {
2620 scrub_parity_mark_sectors_error(sparity
, logical
, len
);
2624 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2625 blocksize
= sparity
->stripe_len
;
2626 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2627 blocksize
= sparity
->stripe_len
;
2629 blocksize
= sctx
->fs_info
->sectorsize
;
2634 u64 l
= min_t(u64
, len
, blocksize
);
2637 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2638 /* push csums to sbio */
2639 have_csum
= scrub_find_csum(sctx
, logical
, csum
);
2643 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2644 flags
, gen
, mirror_num
,
2645 have_csum
? csum
: NULL
);
2657 * Given a physical address, this will calculate it's
2658 * logical offset. if this is a parity stripe, it will return
2659 * the most left data stripe's logical offset.
2661 * return 0 if it is a data stripe, 1 means parity stripe.
2663 static int get_raid56_logic_offset(u64 physical
, int num
,
2664 struct map_lookup
*map
, u64
*offset
,
2673 const int data_stripes
= nr_data_stripes(map
);
2675 last_offset
= (physical
- map
->stripes
[num
].physical
) * data_stripes
;
2677 *stripe_start
= last_offset
;
2679 *offset
= last_offset
;
2680 for (i
= 0; i
< data_stripes
; i
++) {
2681 *offset
= last_offset
+ i
* map
->stripe_len
;
2683 stripe_nr
= div64_u64(*offset
, map
->stripe_len
);
2684 stripe_nr
= div_u64(stripe_nr
, data_stripes
);
2686 /* Work out the disk rotation on this stripe-set */
2687 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
, &rot
);
2688 /* calculate which stripe this data locates */
2690 stripe_index
= rot
% map
->num_stripes
;
2691 if (stripe_index
== num
)
2693 if (stripe_index
< num
)
2696 *offset
= last_offset
+ j
* map
->stripe_len
;
2700 static void scrub_free_parity(struct scrub_parity
*sparity
)
2702 struct scrub_ctx
*sctx
= sparity
->sctx
;
2703 struct scrub_page
*curr
, *next
;
2706 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2708 spin_lock(&sctx
->stat_lock
);
2709 sctx
->stat
.read_errors
+= nbits
;
2710 sctx
->stat
.uncorrectable_errors
+= nbits
;
2711 spin_unlock(&sctx
->stat_lock
);
2714 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2715 list_del_init(&curr
->list
);
2716 scrub_page_put(curr
);
2722 static void scrub_parity_bio_endio_worker(struct btrfs_work
*work
)
2724 struct scrub_parity
*sparity
= container_of(work
, struct scrub_parity
,
2726 struct scrub_ctx
*sctx
= sparity
->sctx
;
2728 scrub_free_parity(sparity
);
2729 scrub_pending_bio_dec(sctx
);
2732 static void scrub_parity_bio_endio(struct bio
*bio
)
2734 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2735 struct btrfs_fs_info
*fs_info
= sparity
->sctx
->fs_info
;
2738 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2743 btrfs_init_work(&sparity
->work
, scrub_parity_bio_endio_worker
, NULL
,
2745 btrfs_queue_work(fs_info
->scrub_parity_workers
, &sparity
->work
);
2748 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2750 struct scrub_ctx
*sctx
= sparity
->sctx
;
2751 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2753 struct btrfs_raid_bio
*rbio
;
2754 struct btrfs_bio
*bbio
= NULL
;
2758 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2762 length
= sparity
->logic_end
- sparity
->logic_start
;
2764 btrfs_bio_counter_inc_blocked(fs_info
);
2765 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_WRITE
, sparity
->logic_start
,
2767 if (ret
|| !bbio
|| !bbio
->raid_map
)
2770 bio
= btrfs_io_bio_alloc(0);
2771 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2772 bio
->bi_private
= sparity
;
2773 bio
->bi_end_io
= scrub_parity_bio_endio
;
2775 rbio
= raid56_parity_alloc_scrub_rbio(fs_info
, bio
, bbio
,
2776 length
, sparity
->scrub_dev
,
2782 scrub_pending_bio_inc(sctx
);
2783 raid56_parity_submit_scrub_rbio(rbio
);
2789 btrfs_bio_counter_dec(fs_info
);
2790 btrfs_put_bbio(bbio
);
2791 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2793 spin_lock(&sctx
->stat_lock
);
2794 sctx
->stat
.malloc_errors
++;
2795 spin_unlock(&sctx
->stat_lock
);
2797 scrub_free_parity(sparity
);
2800 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2802 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * sizeof(long);
2805 static void scrub_parity_get(struct scrub_parity
*sparity
)
2807 refcount_inc(&sparity
->refs
);
2810 static void scrub_parity_put(struct scrub_parity
*sparity
)
2812 if (!refcount_dec_and_test(&sparity
->refs
))
2815 scrub_parity_check_and_repair(sparity
);
2818 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2819 struct map_lookup
*map
,
2820 struct btrfs_device
*sdev
,
2821 struct btrfs_path
*path
,
2825 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
2826 struct btrfs_root
*root
= fs_info
->extent_root
;
2827 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2828 struct btrfs_extent_item
*extent
;
2829 struct btrfs_bio
*bbio
= NULL
;
2833 struct extent_buffer
*l
;
2834 struct btrfs_key key
;
2837 u64 extent_physical
;
2840 struct btrfs_device
*extent_dev
;
2841 struct scrub_parity
*sparity
;
2844 int extent_mirror_num
;
2847 nsectors
= div_u64(map
->stripe_len
, fs_info
->sectorsize
);
2848 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2849 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2852 spin_lock(&sctx
->stat_lock
);
2853 sctx
->stat
.malloc_errors
++;
2854 spin_unlock(&sctx
->stat_lock
);
2858 sparity
->stripe_len
= map
->stripe_len
;
2859 sparity
->nsectors
= nsectors
;
2860 sparity
->sctx
= sctx
;
2861 sparity
->scrub_dev
= sdev
;
2862 sparity
->logic_start
= logic_start
;
2863 sparity
->logic_end
= logic_end
;
2864 refcount_set(&sparity
->refs
, 1);
2865 INIT_LIST_HEAD(&sparity
->spages
);
2866 sparity
->dbitmap
= sparity
->bitmap
;
2867 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2870 while (logic_start
< logic_end
) {
2871 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2872 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2874 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2875 key
.objectid
= logic_start
;
2876 key
.offset
= (u64
)-1;
2878 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2883 ret
= btrfs_previous_extent_item(root
, path
, 0);
2887 btrfs_release_path(path
);
2888 ret
= btrfs_search_slot(NULL
, root
, &key
,
2900 slot
= path
->slots
[0];
2901 if (slot
>= btrfs_header_nritems(l
)) {
2902 ret
= btrfs_next_leaf(root
, path
);
2911 btrfs_item_key_to_cpu(l
, &key
, slot
);
2913 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2914 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2917 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2918 bytes
= fs_info
->nodesize
;
2922 if (key
.objectid
+ bytes
<= logic_start
)
2925 if (key
.objectid
>= logic_end
) {
2930 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2931 logic_start
+= map
->stripe_len
;
2933 extent
= btrfs_item_ptr(l
, slot
,
2934 struct btrfs_extent_item
);
2935 flags
= btrfs_extent_flags(l
, extent
);
2936 generation
= btrfs_extent_generation(l
, extent
);
2938 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
2939 (key
.objectid
< logic_start
||
2940 key
.objectid
+ bytes
>
2941 logic_start
+ map
->stripe_len
)) {
2943 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2944 key
.objectid
, logic_start
);
2945 spin_lock(&sctx
->stat_lock
);
2946 sctx
->stat
.uncorrectable_errors
++;
2947 spin_unlock(&sctx
->stat_lock
);
2951 extent_logical
= key
.objectid
;
2954 if (extent_logical
< logic_start
) {
2955 extent_len
-= logic_start
- extent_logical
;
2956 extent_logical
= logic_start
;
2959 if (extent_logical
+ extent_len
>
2960 logic_start
+ map
->stripe_len
)
2961 extent_len
= logic_start
+ map
->stripe_len
-
2964 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2967 mapped_length
= extent_len
;
2969 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
,
2970 extent_logical
, &mapped_length
, &bbio
,
2973 if (!bbio
|| mapped_length
< extent_len
)
2977 btrfs_put_bbio(bbio
);
2980 extent_physical
= bbio
->stripes
[0].physical
;
2981 extent_mirror_num
= bbio
->mirror_num
;
2982 extent_dev
= bbio
->stripes
[0].dev
;
2983 btrfs_put_bbio(bbio
);
2985 ret
= btrfs_lookup_csums_range(csum_root
,
2987 extent_logical
+ extent_len
- 1,
2988 &sctx
->csum_list
, 1);
2992 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2999 scrub_free_csums(sctx
);
3004 if (extent_logical
+ extent_len
<
3005 key
.objectid
+ bytes
) {
3006 logic_start
+= map
->stripe_len
;
3008 if (logic_start
>= logic_end
) {
3013 if (logic_start
< key
.objectid
+ bytes
) {
3022 btrfs_release_path(path
);
3027 logic_start
+= map
->stripe_len
;
3031 scrub_parity_mark_sectors_error(sparity
, logic_start
,
3032 logic_end
- logic_start
);
3033 scrub_parity_put(sparity
);
3035 mutex_lock(&sctx
->wr_lock
);
3036 scrub_wr_submit(sctx
);
3037 mutex_unlock(&sctx
->wr_lock
);
3039 btrfs_release_path(path
);
3040 return ret
< 0 ? ret
: 0;
3043 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
3044 struct map_lookup
*map
,
3045 struct btrfs_device
*scrub_dev
,
3046 int num
, u64 base
, u64 length
)
3048 struct btrfs_path
*path
, *ppath
;
3049 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3050 struct btrfs_root
*root
= fs_info
->extent_root
;
3051 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
3052 struct btrfs_extent_item
*extent
;
3053 struct blk_plug plug
;
3058 struct extent_buffer
*l
;
3065 struct reada_control
*reada1
;
3066 struct reada_control
*reada2
;
3067 struct btrfs_key key
;
3068 struct btrfs_key key_end
;
3069 u64 increment
= map
->stripe_len
;
3072 u64 extent_physical
;
3076 struct btrfs_device
*extent_dev
;
3077 int extent_mirror_num
;
3080 physical
= map
->stripes
[num
].physical
;
3082 nstripes
= div64_u64(length
, map
->stripe_len
);
3083 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3084 offset
= map
->stripe_len
* num
;
3085 increment
= map
->stripe_len
* map
->num_stripes
;
3087 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3088 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3089 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3090 increment
= map
->stripe_len
* factor
;
3091 mirror_num
= num
% map
->sub_stripes
+ 1;
3092 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1_MASK
) {
3093 increment
= map
->stripe_len
;
3094 mirror_num
= num
% map
->num_stripes
+ 1;
3095 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3096 increment
= map
->stripe_len
;
3097 mirror_num
= num
% map
->num_stripes
+ 1;
3098 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3099 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3100 increment
= map
->stripe_len
* nr_data_stripes(map
);
3103 increment
= map
->stripe_len
;
3107 path
= btrfs_alloc_path();
3111 ppath
= btrfs_alloc_path();
3113 btrfs_free_path(path
);
3118 * work on commit root. The related disk blocks are static as
3119 * long as COW is applied. This means, it is save to rewrite
3120 * them to repair disk errors without any race conditions
3122 path
->search_commit_root
= 1;
3123 path
->skip_locking
= 1;
3125 ppath
->search_commit_root
= 1;
3126 ppath
->skip_locking
= 1;
3128 * trigger the readahead for extent tree csum tree and wait for
3129 * completion. During readahead, the scrub is officially paused
3130 * to not hold off transaction commits
3132 logical
= base
+ offset
;
3133 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3134 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3135 get_raid56_logic_offset(physical_end
, num
,
3136 map
, &logic_end
, NULL
);
3139 logic_end
= logical
+ increment
* nstripes
;
3141 wait_event(sctx
->list_wait
,
3142 atomic_read(&sctx
->bios_in_flight
) == 0);
3143 scrub_blocked_if_needed(fs_info
);
3145 /* FIXME it might be better to start readahead at commit root */
3146 key
.objectid
= logical
;
3147 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3148 key
.offset
= (u64
)0;
3149 key_end
.objectid
= logic_end
;
3150 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3151 key_end
.offset
= (u64
)-1;
3152 reada1
= btrfs_reada_add(root
, &key
, &key_end
);
3154 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3155 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
3156 key
.offset
= logical
;
3157 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3158 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3159 key_end
.offset
= logic_end
;
3160 reada2
= btrfs_reada_add(csum_root
, &key
, &key_end
);
3162 if (!IS_ERR(reada1
))
3163 btrfs_reada_wait(reada1
);
3164 if (!IS_ERR(reada2
))
3165 btrfs_reada_wait(reada2
);
3169 * collect all data csums for the stripe to avoid seeking during
3170 * the scrub. This might currently (crc32) end up to be about 1MB
3172 blk_start_plug(&plug
);
3175 * now find all extents for each stripe and scrub them
3178 while (physical
< physical_end
) {
3182 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3183 atomic_read(&sctx
->cancel_req
)) {
3188 * check to see if we have to pause
3190 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3191 /* push queued extents */
3192 sctx
->flush_all_writes
= true;
3194 mutex_lock(&sctx
->wr_lock
);
3195 scrub_wr_submit(sctx
);
3196 mutex_unlock(&sctx
->wr_lock
);
3197 wait_event(sctx
->list_wait
,
3198 atomic_read(&sctx
->bios_in_flight
) == 0);
3199 sctx
->flush_all_writes
= false;
3200 scrub_blocked_if_needed(fs_info
);
3203 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3204 ret
= get_raid56_logic_offset(physical
, num
, map
,
3209 /* it is parity strip */
3210 stripe_logical
+= base
;
3211 stripe_end
= stripe_logical
+ increment
;
3212 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3213 ppath
, stripe_logical
,
3221 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3222 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3224 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3225 key
.objectid
= logical
;
3226 key
.offset
= (u64
)-1;
3228 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3233 ret
= btrfs_previous_extent_item(root
, path
, 0);
3237 /* there's no smaller item, so stick with the
3239 btrfs_release_path(path
);
3240 ret
= btrfs_search_slot(NULL
, root
, &key
,
3252 slot
= path
->slots
[0];
3253 if (slot
>= btrfs_header_nritems(l
)) {
3254 ret
= btrfs_next_leaf(root
, path
);
3263 btrfs_item_key_to_cpu(l
, &key
, slot
);
3265 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3266 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3269 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3270 bytes
= fs_info
->nodesize
;
3274 if (key
.objectid
+ bytes
<= logical
)
3277 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3278 /* out of this device extent */
3279 if (key
.objectid
>= logic_end
)
3284 extent
= btrfs_item_ptr(l
, slot
,
3285 struct btrfs_extent_item
);
3286 flags
= btrfs_extent_flags(l
, extent
);
3287 generation
= btrfs_extent_generation(l
, extent
);
3289 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3290 (key
.objectid
< logical
||
3291 key
.objectid
+ bytes
>
3292 logical
+ map
->stripe_len
)) {
3294 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3295 key
.objectid
, logical
);
3296 spin_lock(&sctx
->stat_lock
);
3297 sctx
->stat
.uncorrectable_errors
++;
3298 spin_unlock(&sctx
->stat_lock
);
3303 extent_logical
= key
.objectid
;
3307 * trim extent to this stripe
3309 if (extent_logical
< logical
) {
3310 extent_len
-= logical
- extent_logical
;
3311 extent_logical
= logical
;
3313 if (extent_logical
+ extent_len
>
3314 logical
+ map
->stripe_len
) {
3315 extent_len
= logical
+ map
->stripe_len
-
3319 extent_physical
= extent_logical
- logical
+ physical
;
3320 extent_dev
= scrub_dev
;
3321 extent_mirror_num
= mirror_num
;
3322 if (sctx
->is_dev_replace
)
3323 scrub_remap_extent(fs_info
, extent_logical
,
3324 extent_len
, &extent_physical
,
3326 &extent_mirror_num
);
3328 ret
= btrfs_lookup_csums_range(csum_root
,
3332 &sctx
->csum_list
, 1);
3336 ret
= scrub_extent(sctx
, map
, extent_logical
, extent_len
,
3337 extent_physical
, extent_dev
, flags
,
3338 generation
, extent_mirror_num
,
3339 extent_logical
- logical
+ physical
);
3341 scrub_free_csums(sctx
);
3346 if (extent_logical
+ extent_len
<
3347 key
.objectid
+ bytes
) {
3348 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3350 * loop until we find next data stripe
3351 * or we have finished all stripes.
3354 physical
+= map
->stripe_len
;
3355 ret
= get_raid56_logic_offset(physical
,
3360 if (ret
&& physical
< physical_end
) {
3361 stripe_logical
+= base
;
3362 stripe_end
= stripe_logical
+
3364 ret
= scrub_raid56_parity(sctx
,
3365 map
, scrub_dev
, ppath
,
3373 physical
+= map
->stripe_len
;
3374 logical
+= increment
;
3376 if (logical
< key
.objectid
+ bytes
) {
3381 if (physical
>= physical_end
) {
3389 btrfs_release_path(path
);
3391 logical
+= increment
;
3392 physical
+= map
->stripe_len
;
3393 spin_lock(&sctx
->stat_lock
);
3395 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3398 sctx
->stat
.last_physical
= physical
;
3399 spin_unlock(&sctx
->stat_lock
);
3404 /* push queued extents */
3406 mutex_lock(&sctx
->wr_lock
);
3407 scrub_wr_submit(sctx
);
3408 mutex_unlock(&sctx
->wr_lock
);
3410 blk_finish_plug(&plug
);
3411 btrfs_free_path(path
);
3412 btrfs_free_path(ppath
);
3413 return ret
< 0 ? ret
: 0;
3416 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3417 struct btrfs_device
*scrub_dev
,
3418 u64 chunk_offset
, u64 length
,
3420 struct btrfs_block_group
*cache
)
3422 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3423 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
3424 struct map_lookup
*map
;
3425 struct extent_map
*em
;
3429 read_lock(&map_tree
->lock
);
3430 em
= lookup_extent_mapping(map_tree
, chunk_offset
, 1);
3431 read_unlock(&map_tree
->lock
);
3435 * Might have been an unused block group deleted by the cleaner
3436 * kthread or relocation.
3438 spin_lock(&cache
->lock
);
3439 if (!cache
->removed
)
3441 spin_unlock(&cache
->lock
);
3446 map
= em
->map_lookup
;
3447 if (em
->start
!= chunk_offset
)
3450 if (em
->len
< length
)
3453 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3454 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3455 map
->stripes
[i
].physical
== dev_offset
) {
3456 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3457 chunk_offset
, length
);
3463 free_extent_map(em
);
3468 static noinline_for_stack
3469 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3470 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
)
3472 struct btrfs_dev_extent
*dev_extent
= NULL
;
3473 struct btrfs_path
*path
;
3474 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3475 struct btrfs_root
*root
= fs_info
->dev_root
;
3481 struct extent_buffer
*l
;
3482 struct btrfs_key key
;
3483 struct btrfs_key found_key
;
3484 struct btrfs_block_group
*cache
;
3485 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3487 path
= btrfs_alloc_path();
3491 path
->reada
= READA_FORWARD
;
3492 path
->search_commit_root
= 1;
3493 path
->skip_locking
= 1;
3495 key
.objectid
= scrub_dev
->devid
;
3497 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3500 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3504 if (path
->slots
[0] >=
3505 btrfs_header_nritems(path
->nodes
[0])) {
3506 ret
= btrfs_next_leaf(root
, path
);
3519 slot
= path
->slots
[0];
3521 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3523 if (found_key
.objectid
!= scrub_dev
->devid
)
3526 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3529 if (found_key
.offset
>= end
)
3532 if (found_key
.offset
< key
.offset
)
3535 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3536 length
= btrfs_dev_extent_length(l
, dev_extent
);
3538 if (found_key
.offset
+ length
<= start
)
3541 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3544 * get a reference on the corresponding block group to prevent
3545 * the chunk from going away while we scrub it
3547 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3549 /* some chunks are removed but not committed to disk yet,
3550 * continue scrubbing */
3555 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3556 * to avoid deadlock caused by:
3557 * btrfs_inc_block_group_ro()
3558 * -> btrfs_wait_for_commit()
3559 * -> btrfs_commit_transaction()
3560 * -> btrfs_scrub_pause()
3562 scrub_pause_on(fs_info
);
3565 * Don't do chunk preallocation for scrub.
3567 * This is especially important for SYSTEM bgs, or we can hit
3568 * -EFBIG from btrfs_finish_chunk_alloc() like:
3569 * 1. The only SYSTEM bg is marked RO.
3570 * Since SYSTEM bg is small, that's pretty common.
3571 * 2. New SYSTEM bg will be allocated
3572 * Due to regular version will allocate new chunk.
3573 * 3. New SYSTEM bg is empty and will get cleaned up
3574 * Before cleanup really happens, it's marked RO again.
3575 * 4. Empty SYSTEM bg get scrubbed
3578 * This can easily boost the amount of SYSTEM chunks if cleaner
3579 * thread can't be triggered fast enough, and use up all space
3580 * of btrfs_super_block::sys_chunk_array
3582 * While for dev replace, we need to try our best to mark block
3583 * group RO, to prevent race between:
3584 * - Write duplication
3585 * Contains latest data
3587 * Contains data from commit tree
3589 * If target block group is not marked RO, nocow writes can
3590 * be overwritten by scrub copy, causing data corruption.
3591 * So for dev-replace, it's not allowed to continue if a block
3594 ret
= btrfs_inc_block_group_ro(cache
, sctx
->is_dev_replace
);
3597 } else if (ret
== -ENOSPC
&& !sctx
->is_dev_replace
) {
3599 * btrfs_inc_block_group_ro return -ENOSPC when it
3600 * failed in creating new chunk for metadata.
3601 * It is not a problem for scrub, because
3602 * metadata are always cowed, and our scrub paused
3603 * commit_transactions.
3608 "failed setting block group ro: %d", ret
);
3609 btrfs_put_block_group(cache
);
3610 scrub_pause_off(fs_info
);
3615 * Now the target block is marked RO, wait for nocow writes to
3616 * finish before dev-replace.
3617 * COW is fine, as COW never overwrites extents in commit tree.
3619 if (sctx
->is_dev_replace
) {
3620 btrfs_wait_nocow_writers(cache
);
3621 btrfs_wait_ordered_roots(fs_info
, U64_MAX
, cache
->start
,
3625 scrub_pause_off(fs_info
);
3626 down_write(&dev_replace
->rwsem
);
3627 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3628 dev_replace
->cursor_left
= found_key
.offset
;
3629 dev_replace
->item_needs_writeback
= 1;
3630 up_write(&dev_replace
->rwsem
);
3632 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_offset
, length
,
3633 found_key
.offset
, cache
);
3636 * flush, submit all pending read and write bios, afterwards
3638 * Note that in the dev replace case, a read request causes
3639 * write requests that are submitted in the read completion
3640 * worker. Therefore in the current situation, it is required
3641 * that all write requests are flushed, so that all read and
3642 * write requests are really completed when bios_in_flight
3645 sctx
->flush_all_writes
= true;
3647 mutex_lock(&sctx
->wr_lock
);
3648 scrub_wr_submit(sctx
);
3649 mutex_unlock(&sctx
->wr_lock
);
3651 wait_event(sctx
->list_wait
,
3652 atomic_read(&sctx
->bios_in_flight
) == 0);
3654 scrub_pause_on(fs_info
);
3657 * must be called before we decrease @scrub_paused.
3658 * make sure we don't block transaction commit while
3659 * we are waiting pending workers finished.
3661 wait_event(sctx
->list_wait
,
3662 atomic_read(&sctx
->workers_pending
) == 0);
3663 sctx
->flush_all_writes
= false;
3665 scrub_pause_off(fs_info
);
3667 down_write(&dev_replace
->rwsem
);
3668 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3669 dev_replace
->item_needs_writeback
= 1;
3670 up_write(&dev_replace
->rwsem
);
3673 btrfs_dec_block_group_ro(cache
);
3676 * We might have prevented the cleaner kthread from deleting
3677 * this block group if it was already unused because we raced
3678 * and set it to RO mode first. So add it back to the unused
3679 * list, otherwise it might not ever be deleted unless a manual
3680 * balance is triggered or it becomes used and unused again.
3682 spin_lock(&cache
->lock
);
3683 if (!cache
->removed
&& !cache
->ro
&& cache
->reserved
== 0 &&
3685 spin_unlock(&cache
->lock
);
3686 if (btrfs_test_opt(fs_info
, DISCARD_ASYNC
))
3687 btrfs_discard_queue_work(&fs_info
->discard_ctl
,
3690 btrfs_mark_bg_unused(cache
);
3692 spin_unlock(&cache
->lock
);
3695 btrfs_put_block_group(cache
);
3698 if (sctx
->is_dev_replace
&&
3699 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3703 if (sctx
->stat
.malloc_errors
> 0) {
3708 key
.offset
= found_key
.offset
+ length
;
3709 btrfs_release_path(path
);
3712 btrfs_free_path(path
);
3717 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3718 struct btrfs_device
*scrub_dev
)
3724 struct btrfs_fs_info
*fs_info
= sctx
->fs_info
;
3726 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
3729 /* Seed devices of a new filesystem has their own generation. */
3730 if (scrub_dev
->fs_devices
!= fs_info
->fs_devices
)
3731 gen
= scrub_dev
->generation
;
3733 gen
= fs_info
->last_trans_committed
;
3735 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3736 bytenr
= btrfs_sb_offset(i
);
3737 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3738 scrub_dev
->commit_total_bytes
)
3741 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3742 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3747 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3753 * get a reference count on fs_info->scrub_workers. start worker if necessary
3755 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3758 unsigned int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3759 int max_active
= fs_info
->thread_pool_size
;
3761 lockdep_assert_held(&fs_info
->scrub_lock
);
3763 if (refcount_read(&fs_info
->scrub_workers_refcnt
) == 0) {
3764 ASSERT(fs_info
->scrub_workers
== NULL
);
3765 fs_info
->scrub_workers
= btrfs_alloc_workqueue(fs_info
, "scrub",
3766 flags
, is_dev_replace
? 1 : max_active
, 4);
3767 if (!fs_info
->scrub_workers
)
3768 goto fail_scrub_workers
;
3770 ASSERT(fs_info
->scrub_wr_completion_workers
== NULL
);
3771 fs_info
->scrub_wr_completion_workers
=
3772 btrfs_alloc_workqueue(fs_info
, "scrubwrc", flags
,
3774 if (!fs_info
->scrub_wr_completion_workers
)
3775 goto fail_scrub_wr_completion_workers
;
3777 ASSERT(fs_info
->scrub_parity_workers
== NULL
);
3778 fs_info
->scrub_parity_workers
=
3779 btrfs_alloc_workqueue(fs_info
, "scrubparity", flags
,
3781 if (!fs_info
->scrub_parity_workers
)
3782 goto fail_scrub_parity_workers
;
3784 refcount_set(&fs_info
->scrub_workers_refcnt
, 1);
3786 refcount_inc(&fs_info
->scrub_workers_refcnt
);
3790 fail_scrub_parity_workers
:
3791 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3792 fail_scrub_wr_completion_workers
:
3793 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3798 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3799 u64 end
, struct btrfs_scrub_progress
*progress
,
3800 int readonly
, int is_dev_replace
)
3802 struct scrub_ctx
*sctx
;
3804 struct btrfs_device
*dev
;
3805 unsigned int nofs_flag
;
3806 struct btrfs_workqueue
*scrub_workers
= NULL
;
3807 struct btrfs_workqueue
*scrub_wr_comp
= NULL
;
3808 struct btrfs_workqueue
*scrub_parity
= NULL
;
3810 if (btrfs_fs_closing(fs_info
))
3813 if (fs_info
->nodesize
> BTRFS_STRIPE_LEN
) {
3815 * in this case scrub is unable to calculate the checksum
3816 * the way scrub is implemented. Do not handle this
3817 * situation at all because it won't ever happen.
3820 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3826 if (fs_info
->sectorsize
!= PAGE_SIZE
) {
3827 /* not supported for data w/o checksums */
3828 btrfs_err_rl(fs_info
,
3829 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3830 fs_info
->sectorsize
, PAGE_SIZE
);
3834 if (fs_info
->nodesize
>
3835 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3836 fs_info
->sectorsize
> PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3838 * would exhaust the array bounds of pagev member in
3839 * struct scrub_block
3842 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3844 SCRUB_MAX_PAGES_PER_BLOCK
,
3845 fs_info
->sectorsize
,
3846 SCRUB_MAX_PAGES_PER_BLOCK
);
3850 /* Allocate outside of device_list_mutex */
3851 sctx
= scrub_setup_ctx(fs_info
, is_dev_replace
);
3853 return PTR_ERR(sctx
);
3855 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3856 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
3857 if (!dev
|| (test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) &&
3859 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3864 if (!is_dev_replace
&& !readonly
&&
3865 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
)) {
3866 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3867 btrfs_err_in_rcu(fs_info
, "scrub: device %s is not writable",
3868 rcu_str_deref(dev
->name
));
3873 mutex_lock(&fs_info
->scrub_lock
);
3874 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &dev
->dev_state
) ||
3875 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &dev
->dev_state
)) {
3876 mutex_unlock(&fs_info
->scrub_lock
);
3877 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3882 down_read(&fs_info
->dev_replace
.rwsem
);
3883 if (dev
->scrub_ctx
||
3885 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3886 up_read(&fs_info
->dev_replace
.rwsem
);
3887 mutex_unlock(&fs_info
->scrub_lock
);
3888 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3892 up_read(&fs_info
->dev_replace
.rwsem
);
3894 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3896 mutex_unlock(&fs_info
->scrub_lock
);
3897 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3901 sctx
->readonly
= readonly
;
3902 dev
->scrub_ctx
= sctx
;
3903 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3906 * checking @scrub_pause_req here, we can avoid
3907 * race between committing transaction and scrubbing.
3909 __scrub_blocked_if_needed(fs_info
);
3910 atomic_inc(&fs_info
->scrubs_running
);
3911 mutex_unlock(&fs_info
->scrub_lock
);
3914 * In order to avoid deadlock with reclaim when there is a transaction
3915 * trying to pause scrub, make sure we use GFP_NOFS for all the
3916 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3917 * invoked by our callees. The pausing request is done when the
3918 * transaction commit starts, and it blocks the transaction until scrub
3919 * is paused (done at specific points at scrub_stripe() or right above
3920 * before incrementing fs_info->scrubs_running).
3922 nofs_flag
= memalloc_nofs_save();
3923 if (!is_dev_replace
) {
3924 btrfs_info(fs_info
, "scrub: started on devid %llu", devid
);
3926 * by holding device list mutex, we can
3927 * kick off writing super in log tree sync.
3929 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3930 ret
= scrub_supers(sctx
, dev
);
3931 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3935 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
);
3936 memalloc_nofs_restore(nofs_flag
);
3938 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3939 atomic_dec(&fs_info
->scrubs_running
);
3940 wake_up(&fs_info
->scrub_pause_wait
);
3942 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3945 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3947 if (!is_dev_replace
)
3948 btrfs_info(fs_info
, "scrub: %s on devid %llu with status: %d",
3949 ret
? "not finished" : "finished", devid
, ret
);
3951 mutex_lock(&fs_info
->scrub_lock
);
3952 dev
->scrub_ctx
= NULL
;
3953 if (refcount_dec_and_test(&fs_info
->scrub_workers_refcnt
)) {
3954 scrub_workers
= fs_info
->scrub_workers
;
3955 scrub_wr_comp
= fs_info
->scrub_wr_completion_workers
;
3956 scrub_parity
= fs_info
->scrub_parity_workers
;
3958 fs_info
->scrub_workers
= NULL
;
3959 fs_info
->scrub_wr_completion_workers
= NULL
;
3960 fs_info
->scrub_parity_workers
= NULL
;
3962 mutex_unlock(&fs_info
->scrub_lock
);
3964 btrfs_destroy_workqueue(scrub_workers
);
3965 btrfs_destroy_workqueue(scrub_wr_comp
);
3966 btrfs_destroy_workqueue(scrub_parity
);
3967 scrub_put_ctx(sctx
);
3972 scrub_free_ctx(sctx
);
3977 void btrfs_scrub_pause(struct btrfs_fs_info
*fs_info
)
3979 mutex_lock(&fs_info
->scrub_lock
);
3980 atomic_inc(&fs_info
->scrub_pause_req
);
3981 while (atomic_read(&fs_info
->scrubs_paused
) !=
3982 atomic_read(&fs_info
->scrubs_running
)) {
3983 mutex_unlock(&fs_info
->scrub_lock
);
3984 wait_event(fs_info
->scrub_pause_wait
,
3985 atomic_read(&fs_info
->scrubs_paused
) ==
3986 atomic_read(&fs_info
->scrubs_running
));
3987 mutex_lock(&fs_info
->scrub_lock
);
3989 mutex_unlock(&fs_info
->scrub_lock
);
3992 void btrfs_scrub_continue(struct btrfs_fs_info
*fs_info
)
3994 atomic_dec(&fs_info
->scrub_pause_req
);
3995 wake_up(&fs_info
->scrub_pause_wait
);
3998 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
4000 mutex_lock(&fs_info
->scrub_lock
);
4001 if (!atomic_read(&fs_info
->scrubs_running
)) {
4002 mutex_unlock(&fs_info
->scrub_lock
);
4006 atomic_inc(&fs_info
->scrub_cancel_req
);
4007 while (atomic_read(&fs_info
->scrubs_running
)) {
4008 mutex_unlock(&fs_info
->scrub_lock
);
4009 wait_event(fs_info
->scrub_pause_wait
,
4010 atomic_read(&fs_info
->scrubs_running
) == 0);
4011 mutex_lock(&fs_info
->scrub_lock
);
4013 atomic_dec(&fs_info
->scrub_cancel_req
);
4014 mutex_unlock(&fs_info
->scrub_lock
);
4019 int btrfs_scrub_cancel_dev(struct btrfs_device
*dev
)
4021 struct btrfs_fs_info
*fs_info
= dev
->fs_info
;
4022 struct scrub_ctx
*sctx
;
4024 mutex_lock(&fs_info
->scrub_lock
);
4025 sctx
= dev
->scrub_ctx
;
4027 mutex_unlock(&fs_info
->scrub_lock
);
4030 atomic_inc(&sctx
->cancel_req
);
4031 while (dev
->scrub_ctx
) {
4032 mutex_unlock(&fs_info
->scrub_lock
);
4033 wait_event(fs_info
->scrub_pause_wait
,
4034 dev
->scrub_ctx
== NULL
);
4035 mutex_lock(&fs_info
->scrub_lock
);
4037 mutex_unlock(&fs_info
->scrub_lock
);
4042 int btrfs_scrub_progress(struct btrfs_fs_info
*fs_info
, u64 devid
,
4043 struct btrfs_scrub_progress
*progress
)
4045 struct btrfs_device
*dev
;
4046 struct scrub_ctx
*sctx
= NULL
;
4048 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
4049 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
4051 sctx
= dev
->scrub_ctx
;
4053 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
4054 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
4056 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
4059 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
4060 u64 extent_logical
, u64 extent_len
,
4061 u64
*extent_physical
,
4062 struct btrfs_device
**extent_dev
,
4063 int *extent_mirror_num
)
4066 struct btrfs_bio
*bbio
= NULL
;
4069 mapped_length
= extent_len
;
4070 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_READ
, extent_logical
,
4071 &mapped_length
, &bbio
, 0);
4072 if (ret
|| !bbio
|| mapped_length
< extent_len
||
4073 !bbio
->stripes
[0].dev
->bdev
) {
4074 btrfs_put_bbio(bbio
);
4078 *extent_physical
= bbio
->stripes
[0].physical
;
4079 *extent_mirror_num
= bbio
->mirror_num
;
4080 *extent_dev
= bbio
->stripes
[0].dev
;
4081 btrfs_put_bbio(bbio
);