2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD
,
66 BTRFS_RBIO_PARITY_SCRUB
,
67 BTRFS_RBIO_REBUILD_MISSING
,
70 struct btrfs_raid_bio
{
71 struct btrfs_fs_info
*fs_info
;
72 struct btrfs_bio
*bbio
;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list
;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache
;
87 * for scheduling work in the helper threads
89 struct btrfs_work work
;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list
;
97 spinlock_t bio_list_lock
;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list
;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation
;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending
;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page
**stripe_pages
;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page
**bio_pages
;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap
;
180 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
);
181 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
);
182 static void rmw_work(struct btrfs_work
*work
);
183 static void read_rebuild_work(struct btrfs_work
*work
);
184 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
);
185 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
);
186 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
, struct bio
*bio
);
187 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
);
188 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
);
189 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
);
190 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
);
192 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
194 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info
*info
)
202 struct btrfs_stripe_hash_table
*table
;
203 struct btrfs_stripe_hash_table
*x
;
204 struct btrfs_stripe_hash
*cur
;
205 struct btrfs_stripe_hash
*h
;
206 int num_entries
= 1 << BTRFS_STRIPE_HASH_TABLE_BITS
;
210 if (info
->stripe_hash_table
)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size
= sizeof(*table
) + sizeof(*h
) * num_entries
;
221 table
= kvzalloc(table_size
, GFP_KERNEL
);
225 spin_lock_init(&table
->cache_lock
);
226 INIT_LIST_HEAD(&table
->stripe_cache
);
230 for (i
= 0; i
< num_entries
; i
++) {
232 INIT_LIST_HEAD(&cur
->hash_list
);
233 spin_lock_init(&cur
->lock
);
236 x
= cmpxchg(&info
->stripe_hash_table
, NULL
, table
);
243 * caching an rbio means to copy anything from the
244 * bio_pages array into the stripe_pages array. We
245 * use the page uptodate bit in the stripe cache array
246 * to indicate if it has valid data
248 * once the caching is done, we set the cache ready
251 static void cache_rbio_pages(struct btrfs_raid_bio
*rbio
)
258 ret
= alloc_rbio_pages(rbio
);
262 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
263 if (!rbio
->bio_pages
[i
])
266 s
= kmap(rbio
->bio_pages
[i
]);
267 d
= kmap(rbio
->stripe_pages
[i
]);
269 memcpy(d
, s
, PAGE_SIZE
);
271 kunmap(rbio
->bio_pages
[i
]);
272 kunmap(rbio
->stripe_pages
[i
]);
273 SetPageUptodate(rbio
->stripe_pages
[i
]);
275 set_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
279 * we hash on the first logical address of the stripe
281 static int rbio_bucket(struct btrfs_raid_bio
*rbio
)
283 u64 num
= rbio
->bbio
->raid_map
[0];
286 * we shift down quite a bit. We're using byte
287 * addressing, and most of the lower bits are zeros.
288 * This tends to upset hash_64, and it consistently
289 * returns just one or two different values.
291 * shifting off the lower bits fixes things.
293 return hash_64(num
>> 16, BTRFS_STRIPE_HASH_TABLE_BITS
);
297 * stealing an rbio means taking all the uptodate pages from the stripe
298 * array in the source rbio and putting them into the destination rbio
300 static void steal_rbio(struct btrfs_raid_bio
*src
, struct btrfs_raid_bio
*dest
)
306 if (!test_bit(RBIO_CACHE_READY_BIT
, &src
->flags
))
309 for (i
= 0; i
< dest
->nr_pages
; i
++) {
310 s
= src
->stripe_pages
[i
];
311 if (!s
|| !PageUptodate(s
)) {
315 d
= dest
->stripe_pages
[i
];
319 dest
->stripe_pages
[i
] = s
;
320 src
->stripe_pages
[i
] = NULL
;
325 * merging means we take the bio_list from the victim and
326 * splice it into the destination. The victim should
327 * be discarded afterwards.
329 * must be called with dest->rbio_list_lock held
331 static void merge_rbio(struct btrfs_raid_bio
*dest
,
332 struct btrfs_raid_bio
*victim
)
334 bio_list_merge(&dest
->bio_list
, &victim
->bio_list
);
335 dest
->bio_list_bytes
+= victim
->bio_list_bytes
;
336 dest
->generic_bio_cnt
+= victim
->generic_bio_cnt
;
337 bio_list_init(&victim
->bio_list
);
341 * used to prune items that are in the cache. The caller
342 * must hold the hash table lock.
344 static void __remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
346 int bucket
= rbio_bucket(rbio
);
347 struct btrfs_stripe_hash_table
*table
;
348 struct btrfs_stripe_hash
*h
;
352 * check the bit again under the hash table lock.
354 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
357 table
= rbio
->fs_info
->stripe_hash_table
;
358 h
= table
->table
+ bucket
;
360 /* hold the lock for the bucket because we may be
361 * removing it from the hash table
366 * hold the lock for the bio list because we need
367 * to make sure the bio list is empty
369 spin_lock(&rbio
->bio_list_lock
);
371 if (test_and_clear_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
372 list_del_init(&rbio
->stripe_cache
);
373 table
->cache_size
-= 1;
376 /* if the bio list isn't empty, this rbio is
377 * still involved in an IO. We take it out
378 * of the cache list, and drop the ref that
379 * was held for the list.
381 * If the bio_list was empty, we also remove
382 * the rbio from the hash_table, and drop
383 * the corresponding ref
385 if (bio_list_empty(&rbio
->bio_list
)) {
386 if (!list_empty(&rbio
->hash_list
)) {
387 list_del_init(&rbio
->hash_list
);
388 refcount_dec(&rbio
->refs
);
389 BUG_ON(!list_empty(&rbio
->plug_list
));
394 spin_unlock(&rbio
->bio_list_lock
);
395 spin_unlock(&h
->lock
);
398 __free_raid_bio(rbio
);
402 * prune a given rbio from the cache
404 static void remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
406 struct btrfs_stripe_hash_table
*table
;
409 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
412 table
= rbio
->fs_info
->stripe_hash_table
;
414 spin_lock_irqsave(&table
->cache_lock
, flags
);
415 __remove_rbio_from_cache(rbio
);
416 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
420 * remove everything in the cache
422 static void btrfs_clear_rbio_cache(struct btrfs_fs_info
*info
)
424 struct btrfs_stripe_hash_table
*table
;
426 struct btrfs_raid_bio
*rbio
;
428 table
= info
->stripe_hash_table
;
430 spin_lock_irqsave(&table
->cache_lock
, flags
);
431 while (!list_empty(&table
->stripe_cache
)) {
432 rbio
= list_entry(table
->stripe_cache
.next
,
433 struct btrfs_raid_bio
,
435 __remove_rbio_from_cache(rbio
);
437 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
441 * remove all cached entries and free the hash table
444 void btrfs_free_stripe_hash_table(struct btrfs_fs_info
*info
)
446 if (!info
->stripe_hash_table
)
448 btrfs_clear_rbio_cache(info
);
449 kvfree(info
->stripe_hash_table
);
450 info
->stripe_hash_table
= NULL
;
454 * insert an rbio into the stripe cache. It
455 * must have already been prepared by calling
458 * If this rbio was already cached, it gets
459 * moved to the front of the lru.
461 * If the size of the rbio cache is too big, we
464 static void cache_rbio(struct btrfs_raid_bio
*rbio
)
466 struct btrfs_stripe_hash_table
*table
;
469 if (!test_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
))
472 table
= rbio
->fs_info
->stripe_hash_table
;
474 spin_lock_irqsave(&table
->cache_lock
, flags
);
475 spin_lock(&rbio
->bio_list_lock
);
477 /* bump our ref if we were not in the list before */
478 if (!test_and_set_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
479 refcount_inc(&rbio
->refs
);
481 if (!list_empty(&rbio
->stripe_cache
)){
482 list_move(&rbio
->stripe_cache
, &table
->stripe_cache
);
484 list_add(&rbio
->stripe_cache
, &table
->stripe_cache
);
485 table
->cache_size
+= 1;
488 spin_unlock(&rbio
->bio_list_lock
);
490 if (table
->cache_size
> RBIO_CACHE_SIZE
) {
491 struct btrfs_raid_bio
*found
;
493 found
= list_entry(table
->stripe_cache
.prev
,
494 struct btrfs_raid_bio
,
498 __remove_rbio_from_cache(found
);
501 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
505 * helper function to run the xor_blocks api. It is only
506 * able to do MAX_XOR_BLOCKS at a time, so we need to
509 static void run_xor(void **pages
, int src_cnt
, ssize_t len
)
513 void *dest
= pages
[src_cnt
];
516 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
517 xor_blocks(xor_src_cnt
, len
, dest
, pages
+ src_off
);
519 src_cnt
-= xor_src_cnt
;
520 src_off
+= xor_src_cnt
;
525 * returns true if the bio list inside this rbio
526 * covers an entire stripe (no rmw required).
527 * Must be called with the bio list lock held, or
528 * at a time when you know it is impossible to add
529 * new bios into the list
531 static int __rbio_is_full(struct btrfs_raid_bio
*rbio
)
533 unsigned long size
= rbio
->bio_list_bytes
;
536 if (size
!= rbio
->nr_data
* rbio
->stripe_len
)
539 BUG_ON(size
> rbio
->nr_data
* rbio
->stripe_len
);
543 static int rbio_is_full(struct btrfs_raid_bio
*rbio
)
548 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
549 ret
= __rbio_is_full(rbio
);
550 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
555 * returns 1 if it is safe to merge two rbios together.
556 * The merging is safe if the two rbios correspond to
557 * the same stripe and if they are both going in the same
558 * direction (read vs write), and if neither one is
559 * locked for final IO
561 * The caller is responsible for locking such that
562 * rmw_locked is safe to test
564 static int rbio_can_merge(struct btrfs_raid_bio
*last
,
565 struct btrfs_raid_bio
*cur
)
567 if (test_bit(RBIO_RMW_LOCKED_BIT
, &last
->flags
) ||
568 test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
))
572 * we can't merge with cached rbios, since the
573 * idea is that when we merge the destination
574 * rbio is going to run our IO for us. We can
575 * steal from cached rbios though, other functions
578 if (test_bit(RBIO_CACHE_BIT
, &last
->flags
) ||
579 test_bit(RBIO_CACHE_BIT
, &cur
->flags
))
582 if (last
->bbio
->raid_map
[0] !=
583 cur
->bbio
->raid_map
[0])
586 /* we can't merge with different operations */
587 if (last
->operation
!= cur
->operation
)
590 * We've need read the full stripe from the drive.
591 * check and repair the parity and write the new results.
593 * We're not allowed to add any new bios to the
594 * bio list here, anyone else that wants to
595 * change this stripe needs to do their own rmw.
597 if (last
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
600 if (last
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
603 if (last
->operation
== BTRFS_RBIO_READ_REBUILD
) {
604 int fa
= last
->faila
;
605 int fb
= last
->failb
;
606 int cur_fa
= cur
->faila
;
607 int cur_fb
= cur
->failb
;
609 if (last
->faila
>= last
->failb
) {
614 if (cur
->faila
>= cur
->failb
) {
619 if (fa
!= cur_fa
|| fb
!= cur_fb
)
625 static int rbio_stripe_page_index(struct btrfs_raid_bio
*rbio
, int stripe
,
628 return stripe
* rbio
->stripe_npages
+ index
;
632 * these are just the pages from the rbio array, not from anything
633 * the FS sent down to us
635 static struct page
*rbio_stripe_page(struct btrfs_raid_bio
*rbio
, int stripe
,
638 return rbio
->stripe_pages
[rbio_stripe_page_index(rbio
, stripe
, index
)];
642 * helper to index into the pstripe
644 static struct page
*rbio_pstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
646 return rbio_stripe_page(rbio
, rbio
->nr_data
, index
);
650 * helper to index into the qstripe, returns null
651 * if there is no qstripe
653 static struct page
*rbio_qstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
655 if (rbio
->nr_data
+ 1 == rbio
->real_stripes
)
657 return rbio_stripe_page(rbio
, rbio
->nr_data
+ 1, index
);
661 * The first stripe in the table for a logical address
662 * has the lock. rbios are added in one of three ways:
664 * 1) Nobody has the stripe locked yet. The rbio is given
665 * the lock and 0 is returned. The caller must start the IO
668 * 2) Someone has the stripe locked, but we're able to merge
669 * with the lock owner. The rbio is freed and the IO will
670 * start automatically along with the existing rbio. 1 is returned.
672 * 3) Someone has the stripe locked, but we're not able to merge.
673 * The rbio is added to the lock owner's plug list, or merged into
674 * an rbio already on the plug list. When the lock owner unlocks,
675 * the next rbio on the list is run and the IO is started automatically.
678 * If we return 0, the caller still owns the rbio and must continue with
679 * IO submission. If we return 1, the caller must assume the rbio has
680 * already been freed.
682 static noinline
int lock_stripe_add(struct btrfs_raid_bio
*rbio
)
684 int bucket
= rbio_bucket(rbio
);
685 struct btrfs_stripe_hash
*h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
686 struct btrfs_raid_bio
*cur
;
687 struct btrfs_raid_bio
*pending
;
689 struct btrfs_raid_bio
*freeit
= NULL
;
690 struct btrfs_raid_bio
*cache_drop
= NULL
;
693 spin_lock_irqsave(&h
->lock
, flags
);
694 list_for_each_entry(cur
, &h
->hash_list
, hash_list
) {
695 if (cur
->bbio
->raid_map
[0] == rbio
->bbio
->raid_map
[0]) {
696 spin_lock(&cur
->bio_list_lock
);
698 /* can we steal this cached rbio's pages? */
699 if (bio_list_empty(&cur
->bio_list
) &&
700 list_empty(&cur
->plug_list
) &&
701 test_bit(RBIO_CACHE_BIT
, &cur
->flags
) &&
702 !test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
)) {
703 list_del_init(&cur
->hash_list
);
704 refcount_dec(&cur
->refs
);
706 steal_rbio(cur
, rbio
);
708 spin_unlock(&cur
->bio_list_lock
);
713 /* can we merge into the lock owner? */
714 if (rbio_can_merge(cur
, rbio
)) {
715 merge_rbio(cur
, rbio
);
716 spin_unlock(&cur
->bio_list_lock
);
724 * we couldn't merge with the running
725 * rbio, see if we can merge with the
726 * pending ones. We don't have to
727 * check for rmw_locked because there
728 * is no way they are inside finish_rmw
731 list_for_each_entry(pending
, &cur
->plug_list
,
733 if (rbio_can_merge(pending
, rbio
)) {
734 merge_rbio(pending
, rbio
);
735 spin_unlock(&cur
->bio_list_lock
);
742 /* no merging, put us on the tail of the plug list,
743 * our rbio will be started with the currently
744 * running rbio unlocks
746 list_add_tail(&rbio
->plug_list
, &cur
->plug_list
);
747 spin_unlock(&cur
->bio_list_lock
);
753 refcount_inc(&rbio
->refs
);
754 list_add(&rbio
->hash_list
, &h
->hash_list
);
756 spin_unlock_irqrestore(&h
->lock
, flags
);
758 remove_rbio_from_cache(cache_drop
);
760 __free_raid_bio(freeit
);
765 * called as rmw or parity rebuild is completed. If the plug list has more
766 * rbios waiting for this stripe, the next one on the list will be started
768 static noinline
void unlock_stripe(struct btrfs_raid_bio
*rbio
)
771 struct btrfs_stripe_hash
*h
;
775 bucket
= rbio_bucket(rbio
);
776 h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
778 if (list_empty(&rbio
->plug_list
))
781 spin_lock_irqsave(&h
->lock
, flags
);
782 spin_lock(&rbio
->bio_list_lock
);
784 if (!list_empty(&rbio
->hash_list
)) {
786 * if we're still cached and there is no other IO
787 * to perform, just leave this rbio here for others
788 * to steal from later
790 if (list_empty(&rbio
->plug_list
) &&
791 test_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
793 clear_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
794 BUG_ON(!bio_list_empty(&rbio
->bio_list
));
798 list_del_init(&rbio
->hash_list
);
799 refcount_dec(&rbio
->refs
);
802 * we use the plug list to hold all the rbios
803 * waiting for the chance to lock this stripe.
804 * hand the lock over to one of them.
806 if (!list_empty(&rbio
->plug_list
)) {
807 struct btrfs_raid_bio
*next
;
808 struct list_head
*head
= rbio
->plug_list
.next
;
810 next
= list_entry(head
, struct btrfs_raid_bio
,
813 list_del_init(&rbio
->plug_list
);
815 list_add(&next
->hash_list
, &h
->hash_list
);
816 refcount_inc(&next
->refs
);
817 spin_unlock(&rbio
->bio_list_lock
);
818 spin_unlock_irqrestore(&h
->lock
, flags
);
820 if (next
->operation
== BTRFS_RBIO_READ_REBUILD
)
821 async_read_rebuild(next
);
822 else if (next
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
823 steal_rbio(rbio
, next
);
824 async_read_rebuild(next
);
825 } else if (next
->operation
== BTRFS_RBIO_WRITE
) {
826 steal_rbio(rbio
, next
);
827 async_rmw_stripe(next
);
828 } else if (next
->operation
== BTRFS_RBIO_PARITY_SCRUB
) {
829 steal_rbio(rbio
, next
);
830 async_scrub_parity(next
);
837 spin_unlock(&rbio
->bio_list_lock
);
838 spin_unlock_irqrestore(&h
->lock
, flags
);
842 remove_rbio_from_cache(rbio
);
845 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
)
849 if (!refcount_dec_and_test(&rbio
->refs
))
852 WARN_ON(!list_empty(&rbio
->stripe_cache
));
853 WARN_ON(!list_empty(&rbio
->hash_list
));
854 WARN_ON(!bio_list_empty(&rbio
->bio_list
));
856 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
857 if (rbio
->stripe_pages
[i
]) {
858 __free_page(rbio
->stripe_pages
[i
]);
859 rbio
->stripe_pages
[i
] = NULL
;
863 btrfs_put_bbio(rbio
->bbio
);
867 static void rbio_endio_bio_list(struct bio
*cur
, blk_status_t err
)
874 cur
->bi_status
= err
;
881 * this frees the rbio and runs through all the bios in the
882 * bio_list and calls end_io on them
884 static void rbio_orig_end_io(struct btrfs_raid_bio
*rbio
, blk_status_t err
)
886 struct bio
*cur
= bio_list_get(&rbio
->bio_list
);
889 if (rbio
->generic_bio_cnt
)
890 btrfs_bio_counter_sub(rbio
->fs_info
, rbio
->generic_bio_cnt
);
893 * At this moment, rbio->bio_list is empty, however since rbio does not
894 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
895 * hash list, rbio may be merged with others so that rbio->bio_list
897 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
898 * more and we can call bio_endio() on all queued bios.
901 extra
= bio_list_get(&rbio
->bio_list
);
902 __free_raid_bio(rbio
);
904 rbio_endio_bio_list(cur
, err
);
906 rbio_endio_bio_list(extra
, err
);
910 * end io function used by finish_rmw. When we finally
911 * get here, we've written a full stripe
913 static void raid_write_end_io(struct bio
*bio
)
915 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
916 blk_status_t err
= bio
->bi_status
;
920 fail_bio_stripe(rbio
, bio
);
924 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
929 /* OK, we have read all the stripes we need to. */
930 max_errors
= (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
) ?
931 0 : rbio
->bbio
->max_errors
;
932 if (atomic_read(&rbio
->error
) > max_errors
)
935 rbio_orig_end_io(rbio
, err
);
939 * the read/modify/write code wants to use the original bio for
940 * any pages it included, and then use the rbio for everything
941 * else. This function decides if a given index (stripe number)
942 * and page number in that stripe fall inside the original bio
945 * if you set bio_list_only, you'll get a NULL back for any ranges
946 * that are outside the bio_list
948 * This doesn't take any refs on anything, you get a bare page pointer
949 * and the caller must bump refs as required.
951 * You must call index_rbio_pages once before you can trust
952 * the answers from this function.
954 static struct page
*page_in_rbio(struct btrfs_raid_bio
*rbio
,
955 int index
, int pagenr
, int bio_list_only
)
958 struct page
*p
= NULL
;
960 chunk_page
= index
* (rbio
->stripe_len
>> PAGE_SHIFT
) + pagenr
;
962 spin_lock_irq(&rbio
->bio_list_lock
);
963 p
= rbio
->bio_pages
[chunk_page
];
964 spin_unlock_irq(&rbio
->bio_list_lock
);
966 if (p
|| bio_list_only
)
969 return rbio
->stripe_pages
[chunk_page
];
973 * number of pages we need for the entire stripe across all the
976 static unsigned long rbio_nr_pages(unsigned long stripe_len
, int nr_stripes
)
978 return DIV_ROUND_UP(stripe_len
, PAGE_SIZE
) * nr_stripes
;
982 * allocation and initial setup for the btrfs_raid_bio. Not
983 * this does not allocate any pages for rbio->pages.
985 static struct btrfs_raid_bio
*alloc_rbio(struct btrfs_fs_info
*fs_info
,
986 struct btrfs_bio
*bbio
,
989 struct btrfs_raid_bio
*rbio
;
991 int real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
992 int num_pages
= rbio_nr_pages(stripe_len
, real_stripes
);
993 int stripe_npages
= DIV_ROUND_UP(stripe_len
, PAGE_SIZE
);
996 rbio
= kzalloc(sizeof(*rbio
) + num_pages
* sizeof(struct page
*) * 2 +
997 DIV_ROUND_UP(stripe_npages
, BITS_PER_LONG
) *
998 sizeof(long), GFP_NOFS
);
1000 return ERR_PTR(-ENOMEM
);
1002 bio_list_init(&rbio
->bio_list
);
1003 INIT_LIST_HEAD(&rbio
->plug_list
);
1004 spin_lock_init(&rbio
->bio_list_lock
);
1005 INIT_LIST_HEAD(&rbio
->stripe_cache
);
1006 INIT_LIST_HEAD(&rbio
->hash_list
);
1008 rbio
->fs_info
= fs_info
;
1009 rbio
->stripe_len
= stripe_len
;
1010 rbio
->nr_pages
= num_pages
;
1011 rbio
->real_stripes
= real_stripes
;
1012 rbio
->stripe_npages
= stripe_npages
;
1015 refcount_set(&rbio
->refs
, 1);
1016 atomic_set(&rbio
->error
, 0);
1017 atomic_set(&rbio
->stripes_pending
, 0);
1020 * the stripe_pages and bio_pages array point to the extra
1021 * memory we allocated past the end of the rbio
1024 rbio
->stripe_pages
= p
;
1025 rbio
->bio_pages
= p
+ sizeof(struct page
*) * num_pages
;
1026 rbio
->dbitmap
= p
+ sizeof(struct page
*) * num_pages
* 2;
1028 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1029 nr_data
= real_stripes
- 1;
1030 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1031 nr_data
= real_stripes
- 2;
1035 rbio
->nr_data
= nr_data
;
1039 /* allocate pages for all the stripes in the bio, including parity */
1040 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
)
1045 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
1046 if (rbio
->stripe_pages
[i
])
1048 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1051 rbio
->stripe_pages
[i
] = page
;
1056 /* only allocate pages for p/q stripes */
1057 static int alloc_rbio_parity_pages(struct btrfs_raid_bio
*rbio
)
1062 i
= rbio_stripe_page_index(rbio
, rbio
->nr_data
, 0);
1064 for (; i
< rbio
->nr_pages
; i
++) {
1065 if (rbio
->stripe_pages
[i
])
1067 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1070 rbio
->stripe_pages
[i
] = page
;
1076 * add a single page from a specific stripe into our list of bios for IO
1077 * this will try to merge into existing bios if possible, and returns
1078 * zero if all went well.
1080 static int rbio_add_io_page(struct btrfs_raid_bio
*rbio
,
1081 struct bio_list
*bio_list
,
1084 unsigned long page_index
,
1085 unsigned long bio_max_len
)
1087 struct bio
*last
= bio_list
->tail
;
1091 struct btrfs_bio_stripe
*stripe
;
1094 stripe
= &rbio
->bbio
->stripes
[stripe_nr
];
1095 disk_start
= stripe
->physical
+ (page_index
<< PAGE_SHIFT
);
1097 /* if the device is missing, just fail this stripe */
1098 if (!stripe
->dev
->bdev
)
1099 return fail_rbio_index(rbio
, stripe_nr
);
1101 /* see if we can add this page onto our existing bio */
1103 last_end
= (u64
)last
->bi_iter
.bi_sector
<< 9;
1104 last_end
+= last
->bi_iter
.bi_size
;
1107 * we can't merge these if they are from different
1108 * devices or if they are not contiguous
1110 if (last_end
== disk_start
&& stripe
->dev
->bdev
&&
1112 last
->bi_disk
== stripe
->dev
->bdev
->bd_disk
&&
1113 last
->bi_partno
== stripe
->dev
->bdev
->bd_partno
) {
1114 ret
= bio_add_page(last
, page
, PAGE_SIZE
, 0);
1115 if (ret
== PAGE_SIZE
)
1120 /* put a new bio on the list */
1121 bio
= btrfs_io_bio_alloc(bio_max_len
>> PAGE_SHIFT
?: 1);
1122 bio
->bi_iter
.bi_size
= 0;
1123 bio_set_dev(bio
, stripe
->dev
->bdev
);
1124 bio
->bi_iter
.bi_sector
= disk_start
>> 9;
1126 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
1127 bio_list_add(bio_list
, bio
);
1132 * while we're doing the read/modify/write cycle, we could
1133 * have errors in reading pages off the disk. This checks
1134 * for errors and if we're not able to read the page it'll
1135 * trigger parity reconstruction. The rmw will be finished
1136 * after we've reconstructed the failed stripes
1138 static void validate_rbio_for_rmw(struct btrfs_raid_bio
*rbio
)
1140 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
1141 BUG_ON(rbio
->faila
== rbio
->real_stripes
- 1);
1142 __raid56_parity_recover(rbio
);
1149 * helper function to walk our bio list and populate the bio_pages array with
1150 * the result. This seems expensive, but it is faster than constantly
1151 * searching through the bio list as we setup the IO in finish_rmw or stripe
1154 * This must be called before you trust the answers from page_in_rbio
1156 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
)
1160 unsigned long stripe_offset
;
1161 unsigned long page_index
;
1163 spin_lock_irq(&rbio
->bio_list_lock
);
1164 bio_list_for_each(bio
, &rbio
->bio_list
) {
1165 struct bio_vec bvec
;
1166 struct bvec_iter iter
;
1169 start
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1170 stripe_offset
= start
- rbio
->bbio
->raid_map
[0];
1171 page_index
= stripe_offset
>> PAGE_SHIFT
;
1173 if (bio_flagged(bio
, BIO_CLONED
))
1174 bio
->bi_iter
= btrfs_io_bio(bio
)->iter
;
1176 bio_for_each_segment(bvec
, bio
, iter
) {
1177 rbio
->bio_pages
[page_index
+ i
] = bvec
.bv_page
;
1181 spin_unlock_irq(&rbio
->bio_list_lock
);
1185 * this is called from one of two situations. We either
1186 * have a full stripe from the higher layers, or we've read all
1187 * the missing bits off disk.
1189 * This will calculate the parity and then send down any
1192 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
)
1194 struct btrfs_bio
*bbio
= rbio
->bbio
;
1195 void *pointers
[rbio
->real_stripes
];
1196 int nr_data
= rbio
->nr_data
;
1201 struct bio_list bio_list
;
1205 bio_list_init(&bio_list
);
1207 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
1208 p_stripe
= rbio
->real_stripes
- 1;
1209 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
1210 p_stripe
= rbio
->real_stripes
- 2;
1211 q_stripe
= rbio
->real_stripes
- 1;
1216 /* at this point we either have a full stripe,
1217 * or we've read the full stripe from the drive.
1218 * recalculate the parity and write the new results.
1220 * We're not allowed to add any new bios to the
1221 * bio list here, anyone else that wants to
1222 * change this stripe needs to do their own rmw.
1224 spin_lock_irq(&rbio
->bio_list_lock
);
1225 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1226 spin_unlock_irq(&rbio
->bio_list_lock
);
1228 atomic_set(&rbio
->error
, 0);
1231 * now that we've set rmw_locked, run through the
1232 * bio list one last time and map the page pointers
1234 * We don't cache full rbios because we're assuming
1235 * the higher layers are unlikely to use this area of
1236 * the disk again soon. If they do use it again,
1237 * hopefully they will send another full bio.
1239 index_rbio_pages(rbio
);
1240 if (!rbio_is_full(rbio
))
1241 cache_rbio_pages(rbio
);
1243 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1245 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1247 /* first collect one page from each data stripe */
1248 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
1249 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1250 pointers
[stripe
] = kmap(p
);
1253 /* then add the parity stripe */
1254 p
= rbio_pstripe_page(rbio
, pagenr
);
1256 pointers
[stripe
++] = kmap(p
);
1258 if (q_stripe
!= -1) {
1261 * raid6, add the qstripe and call the
1262 * library function to fill in our p/q
1264 p
= rbio_qstripe_page(rbio
, pagenr
);
1266 pointers
[stripe
++] = kmap(p
);
1268 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
1272 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
1273 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
1277 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
1278 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
1282 * time to start writing. Make bios for everything from the
1283 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1286 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1287 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1289 if (stripe
< rbio
->nr_data
) {
1290 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1294 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1297 ret
= rbio_add_io_page(rbio
, &bio_list
,
1298 page
, stripe
, pagenr
, rbio
->stripe_len
);
1304 if (likely(!bbio
->num_tgtdevs
))
1307 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1308 if (!bbio
->tgtdev_map
[stripe
])
1311 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1313 if (stripe
< rbio
->nr_data
) {
1314 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1318 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1321 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1322 rbio
->bbio
->tgtdev_map
[stripe
],
1323 pagenr
, rbio
->stripe_len
);
1330 atomic_set(&rbio
->stripes_pending
, bio_list_size(&bio_list
));
1331 BUG_ON(atomic_read(&rbio
->stripes_pending
) == 0);
1334 bio
= bio_list_pop(&bio_list
);
1338 bio
->bi_private
= rbio
;
1339 bio
->bi_end_io
= raid_write_end_io
;
1340 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1347 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1349 while ((bio
= bio_list_pop(&bio_list
)))
1354 * helper to find the stripe number for a given bio. Used to figure out which
1355 * stripe has failed. This expects the bio to correspond to a physical disk,
1356 * so it looks up based on physical sector numbers.
1358 static int find_bio_stripe(struct btrfs_raid_bio
*rbio
,
1361 u64 physical
= bio
->bi_iter
.bi_sector
;
1364 struct btrfs_bio_stripe
*stripe
;
1368 for (i
= 0; i
< rbio
->bbio
->num_stripes
; i
++) {
1369 stripe
= &rbio
->bbio
->stripes
[i
];
1370 stripe_start
= stripe
->physical
;
1371 if (physical
>= stripe_start
&&
1372 physical
< stripe_start
+ rbio
->stripe_len
&&
1373 bio
->bi_disk
== stripe
->dev
->bdev
->bd_disk
&&
1374 bio
->bi_partno
== stripe
->dev
->bdev
->bd_partno
) {
1382 * helper to find the stripe number for a given
1383 * bio (before mapping). Used to figure out which stripe has
1384 * failed. This looks up based on logical block numbers.
1386 static int find_logical_bio_stripe(struct btrfs_raid_bio
*rbio
,
1389 u64 logical
= bio
->bi_iter
.bi_sector
;
1395 for (i
= 0; i
< rbio
->nr_data
; i
++) {
1396 stripe_start
= rbio
->bbio
->raid_map
[i
];
1397 if (logical
>= stripe_start
&&
1398 logical
< stripe_start
+ rbio
->stripe_len
) {
1406 * returns -EIO if we had too many failures
1408 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
)
1410 unsigned long flags
;
1413 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
1415 /* we already know this stripe is bad, move on */
1416 if (rbio
->faila
== failed
|| rbio
->failb
== failed
)
1419 if (rbio
->faila
== -1) {
1420 /* first failure on this rbio */
1421 rbio
->faila
= failed
;
1422 atomic_inc(&rbio
->error
);
1423 } else if (rbio
->failb
== -1) {
1424 /* second failure on this rbio */
1425 rbio
->failb
= failed
;
1426 atomic_inc(&rbio
->error
);
1431 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
1437 * helper to fail a stripe based on a physical disk
1440 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
,
1443 int failed
= find_bio_stripe(rbio
, bio
);
1448 return fail_rbio_index(rbio
, failed
);
1452 * this sets each page in the bio uptodate. It should only be used on private
1453 * rbio pages, nothing that comes in from the higher layers
1455 static void set_bio_pages_uptodate(struct bio
*bio
)
1457 struct bio_vec
*bvec
;
1460 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
1462 bio_for_each_segment_all(bvec
, bio
, i
)
1463 SetPageUptodate(bvec
->bv_page
);
1467 * end io for the read phase of the rmw cycle. All the bios here are physical
1468 * stripe bios we've read from the disk so we can recalculate the parity of the
1471 * This will usually kick off finish_rmw once all the bios are read in, but it
1472 * may trigger parity reconstruction if we had any errors along the way
1474 static void raid_rmw_end_io(struct bio
*bio
)
1476 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1479 fail_bio_stripe(rbio
, bio
);
1481 set_bio_pages_uptodate(bio
);
1485 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
1488 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
1492 * this will normally call finish_rmw to start our write
1493 * but if there are any failed stripes we'll reconstruct
1496 validate_rbio_for_rmw(rbio
);
1501 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1504 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1506 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
, rmw_work
, NULL
, NULL
);
1507 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
1510 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
)
1512 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1513 read_rebuild_work
, NULL
, NULL
);
1515 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
1519 * the stripe must be locked by the caller. It will
1520 * unlock after all the writes are done
1522 static int raid56_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1524 int bios_to_read
= 0;
1525 struct bio_list bio_list
;
1531 bio_list_init(&bio_list
);
1533 ret
= alloc_rbio_pages(rbio
);
1537 index_rbio_pages(rbio
);
1539 atomic_set(&rbio
->error
, 0);
1541 * build a list of bios to read all the missing parts of this
1544 for (stripe
= 0; stripe
< rbio
->nr_data
; stripe
++) {
1545 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1548 * we want to find all the pages missing from
1549 * the rbio and read them from the disk. If
1550 * page_in_rbio finds a page in the bio list
1551 * we don't need to read it off the stripe.
1553 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1557 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1559 * the bio cache may have handed us an uptodate
1560 * page. If so, be happy and use it
1562 if (PageUptodate(page
))
1565 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1566 stripe
, pagenr
, rbio
->stripe_len
);
1572 bios_to_read
= bio_list_size(&bio_list
);
1573 if (!bios_to_read
) {
1575 * this can happen if others have merged with
1576 * us, it means there is nothing left to read.
1577 * But if there are missing devices it may not be
1578 * safe to do the full stripe write yet.
1584 * the bbio may be freed once we submit the last bio. Make sure
1585 * not to touch it after that
1587 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
1589 bio
= bio_list_pop(&bio_list
);
1593 bio
->bi_private
= rbio
;
1594 bio
->bi_end_io
= raid_rmw_end_io
;
1595 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
1597 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
1601 /* the actual write will happen once the reads are done */
1605 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1607 while ((bio
= bio_list_pop(&bio_list
)))
1613 validate_rbio_for_rmw(rbio
);
1618 * if the upper layers pass in a full stripe, we thank them by only allocating
1619 * enough pages to hold the parity, and sending it all down quickly.
1621 static int full_stripe_write(struct btrfs_raid_bio
*rbio
)
1625 ret
= alloc_rbio_parity_pages(rbio
);
1627 __free_raid_bio(rbio
);
1631 ret
= lock_stripe_add(rbio
);
1638 * partial stripe writes get handed over to async helpers.
1639 * We're really hoping to merge a few more writes into this
1640 * rbio before calculating new parity
1642 static int partial_stripe_write(struct btrfs_raid_bio
*rbio
)
1646 ret
= lock_stripe_add(rbio
);
1648 async_rmw_stripe(rbio
);
1653 * sometimes while we were reading from the drive to
1654 * recalculate parity, enough new bios come into create
1655 * a full stripe. So we do a check here to see if we can
1656 * go directly to finish_rmw
1658 static int __raid56_parity_write(struct btrfs_raid_bio
*rbio
)
1660 /* head off into rmw land if we don't have a full stripe */
1661 if (!rbio_is_full(rbio
))
1662 return partial_stripe_write(rbio
);
1663 return full_stripe_write(rbio
);
1667 * We use plugging call backs to collect full stripes.
1668 * Any time we get a partial stripe write while plugged
1669 * we collect it into a list. When the unplug comes down,
1670 * we sort the list by logical block number and merge
1671 * everything we can into the same rbios
1673 struct btrfs_plug_cb
{
1674 struct blk_plug_cb cb
;
1675 struct btrfs_fs_info
*info
;
1676 struct list_head rbio_list
;
1677 struct btrfs_work work
;
1681 * rbios on the plug list are sorted for easier merging.
1683 static int plug_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1685 struct btrfs_raid_bio
*ra
= container_of(a
, struct btrfs_raid_bio
,
1687 struct btrfs_raid_bio
*rb
= container_of(b
, struct btrfs_raid_bio
,
1689 u64 a_sector
= ra
->bio_list
.head
->bi_iter
.bi_sector
;
1690 u64 b_sector
= rb
->bio_list
.head
->bi_iter
.bi_sector
;
1692 if (a_sector
< b_sector
)
1694 if (a_sector
> b_sector
)
1699 static void run_plug(struct btrfs_plug_cb
*plug
)
1701 struct btrfs_raid_bio
*cur
;
1702 struct btrfs_raid_bio
*last
= NULL
;
1705 * sort our plug list then try to merge
1706 * everything we can in hopes of creating full
1709 list_sort(NULL
, &plug
->rbio_list
, plug_cmp
);
1710 while (!list_empty(&plug
->rbio_list
)) {
1711 cur
= list_entry(plug
->rbio_list
.next
,
1712 struct btrfs_raid_bio
, plug_list
);
1713 list_del_init(&cur
->plug_list
);
1715 if (rbio_is_full(cur
)) {
1716 /* we have a full stripe, send it down */
1717 full_stripe_write(cur
);
1721 if (rbio_can_merge(last
, cur
)) {
1722 merge_rbio(last
, cur
);
1723 __free_raid_bio(cur
);
1727 __raid56_parity_write(last
);
1732 __raid56_parity_write(last
);
1738 * if the unplug comes from schedule, we have to push the
1739 * work off to a helper thread
1741 static void unplug_work(struct btrfs_work
*work
)
1743 struct btrfs_plug_cb
*plug
;
1744 plug
= container_of(work
, struct btrfs_plug_cb
, work
);
1748 static void btrfs_raid_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1750 struct btrfs_plug_cb
*plug
;
1751 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1753 if (from_schedule
) {
1754 btrfs_init_work(&plug
->work
, btrfs_rmw_helper
,
1755 unplug_work
, NULL
, NULL
);
1756 btrfs_queue_work(plug
->info
->rmw_workers
,
1764 * our main entry point for writes from the rest of the FS.
1766 int raid56_parity_write(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
1767 struct btrfs_bio
*bbio
, u64 stripe_len
)
1769 struct btrfs_raid_bio
*rbio
;
1770 struct btrfs_plug_cb
*plug
= NULL
;
1771 struct blk_plug_cb
*cb
;
1774 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
1776 btrfs_put_bbio(bbio
);
1777 return PTR_ERR(rbio
);
1779 bio_list_add(&rbio
->bio_list
, bio
);
1780 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
1781 rbio
->operation
= BTRFS_RBIO_WRITE
;
1783 btrfs_bio_counter_inc_noblocked(fs_info
);
1784 rbio
->generic_bio_cnt
= 1;
1787 * don't plug on full rbios, just get them out the door
1788 * as quickly as we can
1790 if (rbio_is_full(rbio
)) {
1791 ret
= full_stripe_write(rbio
);
1793 btrfs_bio_counter_dec(fs_info
);
1797 cb
= blk_check_plugged(btrfs_raid_unplug
, fs_info
, sizeof(*plug
));
1799 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1801 plug
->info
= fs_info
;
1802 INIT_LIST_HEAD(&plug
->rbio_list
);
1804 list_add_tail(&rbio
->plug_list
, &plug
->rbio_list
);
1807 ret
= __raid56_parity_write(rbio
);
1809 btrfs_bio_counter_dec(fs_info
);
1815 * all parity reconstruction happens here. We've read in everything
1816 * we can find from the drives and this does the heavy lifting of
1817 * sorting the good from the bad.
1819 static void __raid_recover_end_io(struct btrfs_raid_bio
*rbio
)
1823 int faila
= -1, failb
= -1;
1828 pointers
= kcalloc(rbio
->real_stripes
, sizeof(void *), GFP_NOFS
);
1830 err
= BLK_STS_RESOURCE
;
1834 faila
= rbio
->faila
;
1835 failb
= rbio
->failb
;
1837 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1838 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1839 spin_lock_irq(&rbio
->bio_list_lock
);
1840 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1841 spin_unlock_irq(&rbio
->bio_list_lock
);
1844 index_rbio_pages(rbio
);
1846 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1848 * Now we just use bitmap to mark the horizontal stripes in
1849 * which we have data when doing parity scrub.
1851 if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
&&
1852 !test_bit(pagenr
, rbio
->dbitmap
))
1855 /* setup our array of pointers with pages
1858 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1860 * if we're rebuilding a read, we have to use
1861 * pages from the bio list
1863 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1864 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1865 (stripe
== faila
|| stripe
== failb
)) {
1866 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1868 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1870 pointers
[stripe
] = kmap(page
);
1873 /* all raid6 handling here */
1874 if (rbio
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
) {
1876 * single failure, rebuild from parity raid5
1880 if (faila
== rbio
->nr_data
) {
1882 * Just the P stripe has failed, without
1883 * a bad data or Q stripe.
1884 * TODO, we should redo the xor here.
1886 err
= BLK_STS_IOERR
;
1890 * a single failure in raid6 is rebuilt
1891 * in the pstripe code below
1896 /* make sure our ps and qs are in order */
1897 if (faila
> failb
) {
1903 /* if the q stripe is failed, do a pstripe reconstruction
1905 * If both the q stripe and the P stripe are failed, we're
1906 * here due to a crc mismatch and we can't give them the
1909 if (rbio
->bbio
->raid_map
[failb
] == RAID6_Q_STRIPE
) {
1910 if (rbio
->bbio
->raid_map
[faila
] ==
1912 err
= BLK_STS_IOERR
;
1916 * otherwise we have one bad data stripe and
1917 * a good P stripe. raid5!
1922 if (rbio
->bbio
->raid_map
[failb
] == RAID5_P_STRIPE
) {
1923 raid6_datap_recov(rbio
->real_stripes
,
1924 PAGE_SIZE
, faila
, pointers
);
1926 raid6_2data_recov(rbio
->real_stripes
,
1927 PAGE_SIZE
, faila
, failb
,
1933 /* rebuild from P stripe here (raid5 or raid6) */
1934 BUG_ON(failb
!= -1);
1936 /* Copy parity block into failed block to start with */
1937 memcpy(pointers
[faila
],
1938 pointers
[rbio
->nr_data
],
1941 /* rearrange the pointer array */
1942 p
= pointers
[faila
];
1943 for (stripe
= faila
; stripe
< rbio
->nr_data
- 1; stripe
++)
1944 pointers
[stripe
] = pointers
[stripe
+ 1];
1945 pointers
[rbio
->nr_data
- 1] = p
;
1947 /* xor in the rest */
1948 run_xor(pointers
, rbio
->nr_data
- 1, PAGE_SIZE
);
1950 /* if we're doing this rebuild as part of an rmw, go through
1951 * and set all of our private rbio pages in the
1952 * failed stripes as uptodate. This way finish_rmw will
1953 * know they can be trusted. If this was a read reconstruction,
1954 * other endio functions will fiddle the uptodate bits
1956 if (rbio
->operation
== BTRFS_RBIO_WRITE
) {
1957 for (i
= 0; i
< rbio
->stripe_npages
; i
++) {
1959 page
= rbio_stripe_page(rbio
, faila
, i
);
1960 SetPageUptodate(page
);
1963 page
= rbio_stripe_page(rbio
, failb
, i
);
1964 SetPageUptodate(page
);
1968 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1970 * if we're rebuilding a read, we have to use
1971 * pages from the bio list
1973 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1974 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1975 (stripe
== faila
|| stripe
== failb
)) {
1976 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1978 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1989 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
) {
1991 * - In case of two failures, where rbio->failb != -1:
1993 * Do not cache this rbio since the above read reconstruction
1994 * (raid6_datap_recov() or raid6_2data_recov()) may have
1995 * changed some content of stripes which are not identical to
1996 * on-disk content any more, otherwise, a later write/recover
1997 * may steal stripe_pages from this rbio and end up with
1998 * corruptions or rebuild failures.
2000 * - In case of single failure, where rbio->failb == -1:
2002 * Cache this rbio iff the above read reconstruction is
2003 * excuted without problems.
2005 if (err
== BLK_STS_OK
&& rbio
->failb
< 0)
2006 cache_rbio_pages(rbio
);
2008 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2010 rbio_orig_end_io(rbio
, err
);
2011 } else if (rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
2012 rbio_orig_end_io(rbio
, err
);
2013 } else if (err
== BLK_STS_OK
) {
2017 if (rbio
->operation
== BTRFS_RBIO_WRITE
)
2019 else if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
2020 finish_parity_scrub(rbio
, 0);
2024 rbio_orig_end_io(rbio
, err
);
2029 * This is called only for stripes we've read from disk to
2030 * reconstruct the parity.
2032 static void raid_recover_end_io(struct bio
*bio
)
2034 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2037 * we only read stripe pages off the disk, set them
2038 * up to date if there were no errors
2041 fail_bio_stripe(rbio
, bio
);
2043 set_bio_pages_uptodate(bio
);
2046 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2049 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2050 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2052 __raid_recover_end_io(rbio
);
2056 * reads everything we need off the disk to reconstruct
2057 * the parity. endio handlers trigger final reconstruction
2058 * when the IO is done.
2060 * This is used both for reads from the higher layers and for
2061 * parity construction required to finish a rmw cycle.
2063 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
)
2065 int bios_to_read
= 0;
2066 struct bio_list bio_list
;
2072 bio_list_init(&bio_list
);
2074 ret
= alloc_rbio_pages(rbio
);
2078 atomic_set(&rbio
->error
, 0);
2081 * read everything that hasn't failed. Thanks to the
2082 * stripe cache, it is possible that some or all of these
2083 * pages are going to be uptodate.
2085 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2086 if (rbio
->faila
== stripe
|| rbio
->failb
== stripe
) {
2087 atomic_inc(&rbio
->error
);
2091 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
2095 * the rmw code may have already read this
2098 p
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2099 if (PageUptodate(p
))
2102 ret
= rbio_add_io_page(rbio
, &bio_list
,
2103 rbio_stripe_page(rbio
, stripe
, pagenr
),
2104 stripe
, pagenr
, rbio
->stripe_len
);
2110 bios_to_read
= bio_list_size(&bio_list
);
2111 if (!bios_to_read
) {
2113 * we might have no bios to read just because the pages
2114 * were up to date, or we might have no bios to read because
2115 * the devices were gone.
2117 if (atomic_read(&rbio
->error
) <= rbio
->bbio
->max_errors
) {
2118 __raid_recover_end_io(rbio
);
2126 * the bbio may be freed once we submit the last bio. Make sure
2127 * not to touch it after that
2129 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2131 bio
= bio_list_pop(&bio_list
);
2135 bio
->bi_private
= rbio
;
2136 bio
->bi_end_io
= raid_recover_end_io
;
2137 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2139 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2147 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
2148 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
2149 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2151 while ((bio
= bio_list_pop(&bio_list
)))
2158 * the main entry point for reads from the higher layers. This
2159 * is really only called when the normal read path had a failure,
2160 * so we assume the bio they send down corresponds to a failed part
2163 int raid56_parity_recover(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2164 struct btrfs_bio
*bbio
, u64 stripe_len
,
2165 int mirror_num
, int generic_io
)
2167 struct btrfs_raid_bio
*rbio
;
2171 ASSERT(bbio
->mirror_num
== mirror_num
);
2172 btrfs_io_bio(bio
)->mirror_num
= mirror_num
;
2175 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
2178 btrfs_put_bbio(bbio
);
2179 return PTR_ERR(rbio
);
2182 rbio
->operation
= BTRFS_RBIO_READ_REBUILD
;
2183 bio_list_add(&rbio
->bio_list
, bio
);
2184 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
2186 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2187 if (rbio
->faila
== -1) {
2189 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2190 __func__
, (u64
)bio
->bi_iter
.bi_sector
<< 9,
2191 (u64
)bio
->bi_iter
.bi_size
, bbio
->map_type
);
2193 btrfs_put_bbio(bbio
);
2199 btrfs_bio_counter_inc_noblocked(fs_info
);
2200 rbio
->generic_bio_cnt
= 1;
2202 btrfs_get_bbio(bbio
);
2207 * for 'mirror == 2', reconstruct from all other stripes.
2208 * for 'mirror_num > 2', select a stripe to fail on every retry.
2210 if (mirror_num
> 2) {
2212 * 'mirror == 3' is to fail the p stripe and
2213 * reconstruct from the q stripe. 'mirror > 3' is to
2214 * fail a data stripe and reconstruct from p+q stripe.
2216 rbio
->failb
= rbio
->real_stripes
- (mirror_num
- 1);
2217 ASSERT(rbio
->failb
> 0);
2218 if (rbio
->failb
<= rbio
->faila
)
2222 ret
= lock_stripe_add(rbio
);
2225 * __raid56_parity_recover will end the bio with
2226 * any errors it hits. We don't want to return
2227 * its error value up the stack because our caller
2228 * will end up calling bio_endio with any nonzero
2232 __raid56_parity_recover(rbio
);
2234 * our rbio has been added to the list of
2235 * rbios that will be handled after the
2236 * currently lock owner is done
2242 static void rmw_work(struct btrfs_work
*work
)
2244 struct btrfs_raid_bio
*rbio
;
2246 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2247 raid56_rmw_stripe(rbio
);
2250 static void read_rebuild_work(struct btrfs_work
*work
)
2252 struct btrfs_raid_bio
*rbio
;
2254 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2255 __raid56_parity_recover(rbio
);
2259 * The following code is used to scrub/replace the parity stripe
2261 * Caller must have already increased bio_counter for getting @bbio.
2263 * Note: We need make sure all the pages that add into the scrub/replace
2264 * raid bio are correct and not be changed during the scrub/replace. That
2265 * is those pages just hold metadata or file data with checksum.
2268 struct btrfs_raid_bio
*
2269 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2270 struct btrfs_bio
*bbio
, u64 stripe_len
,
2271 struct btrfs_device
*scrub_dev
,
2272 unsigned long *dbitmap
, int stripe_nsectors
)
2274 struct btrfs_raid_bio
*rbio
;
2277 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
2280 bio_list_add(&rbio
->bio_list
, bio
);
2282 * This is a special bio which is used to hold the completion handler
2283 * and make the scrub rbio is similar to the other types
2285 ASSERT(!bio
->bi_iter
.bi_size
);
2286 rbio
->operation
= BTRFS_RBIO_PARITY_SCRUB
;
2289 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2290 * to the end position, so this search can start from the first parity
2293 for (i
= rbio
->nr_data
; i
< rbio
->real_stripes
; i
++) {
2294 if (bbio
->stripes
[i
].dev
== scrub_dev
) {
2299 ASSERT(i
< rbio
->real_stripes
);
2301 /* Now we just support the sectorsize equals to page size */
2302 ASSERT(fs_info
->sectorsize
== PAGE_SIZE
);
2303 ASSERT(rbio
->stripe_npages
== stripe_nsectors
);
2304 bitmap_copy(rbio
->dbitmap
, dbitmap
, stripe_nsectors
);
2307 * We have already increased bio_counter when getting bbio, record it
2308 * so we can free it at rbio_orig_end_io().
2310 rbio
->generic_bio_cnt
= 1;
2315 /* Used for both parity scrub and missing. */
2316 void raid56_add_scrub_pages(struct btrfs_raid_bio
*rbio
, struct page
*page
,
2322 ASSERT(logical
>= rbio
->bbio
->raid_map
[0]);
2323 ASSERT(logical
+ PAGE_SIZE
<= rbio
->bbio
->raid_map
[0] +
2324 rbio
->stripe_len
* rbio
->nr_data
);
2325 stripe_offset
= (int)(logical
- rbio
->bbio
->raid_map
[0]);
2326 index
= stripe_offset
>> PAGE_SHIFT
;
2327 rbio
->bio_pages
[index
] = page
;
2331 * We just scrub the parity that we have correct data on the same horizontal,
2332 * so we needn't allocate all pages for all the stripes.
2334 static int alloc_rbio_essential_pages(struct btrfs_raid_bio
*rbio
)
2341 for_each_set_bit(bit
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2342 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2343 index
= i
* rbio
->stripe_npages
+ bit
;
2344 if (rbio
->stripe_pages
[index
])
2347 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2350 rbio
->stripe_pages
[index
] = page
;
2356 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
2359 struct btrfs_bio
*bbio
= rbio
->bbio
;
2360 void *pointers
[rbio
->real_stripes
];
2361 DECLARE_BITMAP(pbitmap
, rbio
->stripe_npages
);
2362 int nr_data
= rbio
->nr_data
;
2367 struct page
*p_page
= NULL
;
2368 struct page
*q_page
= NULL
;
2369 struct bio_list bio_list
;
2374 bio_list_init(&bio_list
);
2376 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
2377 p_stripe
= rbio
->real_stripes
- 1;
2378 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
2379 p_stripe
= rbio
->real_stripes
- 2;
2380 q_stripe
= rbio
->real_stripes
- 1;
2385 if (bbio
->num_tgtdevs
&& bbio
->tgtdev_map
[rbio
->scrubp
]) {
2387 bitmap_copy(pbitmap
, rbio
->dbitmap
, rbio
->stripe_npages
);
2391 * Because the higher layers(scrubber) are unlikely to
2392 * use this area of the disk again soon, so don't cache
2395 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2400 p_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2403 SetPageUptodate(p_page
);
2405 if (q_stripe
!= -1) {
2406 q_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2408 __free_page(p_page
);
2411 SetPageUptodate(q_page
);
2414 atomic_set(&rbio
->error
, 0);
2416 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2419 /* first collect one page from each data stripe */
2420 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
2421 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
2422 pointers
[stripe
] = kmap(p
);
2425 /* then add the parity stripe */
2426 pointers
[stripe
++] = kmap(p_page
);
2428 if (q_stripe
!= -1) {
2431 * raid6, add the qstripe and call the
2432 * library function to fill in our p/q
2434 pointers
[stripe
++] = kmap(q_page
);
2436 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
2440 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
2441 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
2444 /* Check scrubbing parity and repair it */
2445 p
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2447 if (memcmp(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
))
2448 memcpy(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
);
2450 /* Parity is right, needn't writeback */
2451 bitmap_clear(rbio
->dbitmap
, pagenr
, 1);
2454 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
2455 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
2458 __free_page(p_page
);
2460 __free_page(q_page
);
2464 * time to start writing. Make bios for everything from the
2465 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2468 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2471 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2472 ret
= rbio_add_io_page(rbio
, &bio_list
,
2473 page
, rbio
->scrubp
, pagenr
, rbio
->stripe_len
);
2481 for_each_set_bit(pagenr
, pbitmap
, rbio
->stripe_npages
) {
2484 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2485 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2486 bbio
->tgtdev_map
[rbio
->scrubp
],
2487 pagenr
, rbio
->stripe_len
);
2493 nr_data
= bio_list_size(&bio_list
);
2495 /* Every parity is right */
2496 rbio_orig_end_io(rbio
, BLK_STS_OK
);
2500 atomic_set(&rbio
->stripes_pending
, nr_data
);
2503 bio
= bio_list_pop(&bio_list
);
2507 bio
->bi_private
= rbio
;
2508 bio
->bi_end_io
= raid_write_end_io
;
2509 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
2516 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2518 while ((bio
= bio_list_pop(&bio_list
)))
2522 static inline int is_data_stripe(struct btrfs_raid_bio
*rbio
, int stripe
)
2524 if (stripe
>= 0 && stripe
< rbio
->nr_data
)
2530 * While we're doing the parity check and repair, we could have errors
2531 * in reading pages off the disk. This checks for errors and if we're
2532 * not able to read the page it'll trigger parity reconstruction. The
2533 * parity scrub will be finished after we've reconstructed the failed
2536 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio
*rbio
)
2538 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2541 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
2542 int dfail
= 0, failp
= -1;
2544 if (is_data_stripe(rbio
, rbio
->faila
))
2546 else if (is_parity_stripe(rbio
->faila
))
2547 failp
= rbio
->faila
;
2549 if (is_data_stripe(rbio
, rbio
->failb
))
2551 else if (is_parity_stripe(rbio
->failb
))
2552 failp
= rbio
->failb
;
2555 * Because we can not use a scrubbing parity to repair
2556 * the data, so the capability of the repair is declined.
2557 * (In the case of RAID5, we can not repair anything)
2559 if (dfail
> rbio
->bbio
->max_errors
- 1)
2563 * If all data is good, only parity is correctly, just
2564 * repair the parity.
2567 finish_parity_scrub(rbio
, 0);
2572 * Here means we got one corrupted data stripe and one
2573 * corrupted parity on RAID6, if the corrupted parity
2574 * is scrubbing parity, luckily, use the other one to repair
2575 * the data, or we can not repair the data stripe.
2577 if (failp
!= rbio
->scrubp
)
2580 __raid_recover_end_io(rbio
);
2582 finish_parity_scrub(rbio
, 1);
2587 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2591 * end io for the read phase of the rmw cycle. All the bios here are physical
2592 * stripe bios we've read from the disk so we can recalculate the parity of the
2595 * This will usually kick off finish_rmw once all the bios are read in, but it
2596 * may trigger parity reconstruction if we had any errors along the way
2598 static void raid56_parity_scrub_end_io(struct bio
*bio
)
2600 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2603 fail_bio_stripe(rbio
, bio
);
2605 set_bio_pages_uptodate(bio
);
2609 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2613 * this will normally call finish_rmw to start our write
2614 * but if there are any failed stripes we'll reconstruct
2617 validate_rbio_for_parity_scrub(rbio
);
2620 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio
*rbio
)
2622 int bios_to_read
= 0;
2623 struct bio_list bio_list
;
2629 bio_list_init(&bio_list
);
2631 ret
= alloc_rbio_essential_pages(rbio
);
2635 atomic_set(&rbio
->error
, 0);
2637 * build a list of bios to read all the missing parts of this
2640 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2641 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2644 * we want to find all the pages missing from
2645 * the rbio and read them from the disk. If
2646 * page_in_rbio finds a page in the bio list
2647 * we don't need to read it off the stripe.
2649 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
2653 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2655 * the bio cache may have handed us an uptodate
2656 * page. If so, be happy and use it
2658 if (PageUptodate(page
))
2661 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2662 stripe
, pagenr
, rbio
->stripe_len
);
2668 bios_to_read
= bio_list_size(&bio_list
);
2669 if (!bios_to_read
) {
2671 * this can happen if others have merged with
2672 * us, it means there is nothing left to read.
2673 * But if there are missing devices it may not be
2674 * safe to do the full stripe write yet.
2680 * the bbio may be freed once we submit the last bio. Make sure
2681 * not to touch it after that
2683 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2685 bio
= bio_list_pop(&bio_list
);
2689 bio
->bi_private
= rbio
;
2690 bio
->bi_end_io
= raid56_parity_scrub_end_io
;
2691 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2693 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2697 /* the actual write will happen once the reads are done */
2701 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2703 while ((bio
= bio_list_pop(&bio_list
)))
2709 validate_rbio_for_parity_scrub(rbio
);
2712 static void scrub_parity_work(struct btrfs_work
*work
)
2714 struct btrfs_raid_bio
*rbio
;
2716 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2717 raid56_parity_scrub_stripe(rbio
);
2720 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
)
2722 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2723 scrub_parity_work
, NULL
, NULL
);
2725 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2728 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio
*rbio
)
2730 if (!lock_stripe_add(rbio
))
2731 async_scrub_parity(rbio
);
2734 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2736 struct btrfs_raid_bio
*
2737 raid56_alloc_missing_rbio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2738 struct btrfs_bio
*bbio
, u64 length
)
2740 struct btrfs_raid_bio
*rbio
;
2742 rbio
= alloc_rbio(fs_info
, bbio
, length
);
2746 rbio
->operation
= BTRFS_RBIO_REBUILD_MISSING
;
2747 bio_list_add(&rbio
->bio_list
, bio
);
2749 * This is a special bio which is used to hold the completion handler
2750 * and make the scrub rbio is similar to the other types
2752 ASSERT(!bio
->bi_iter
.bi_size
);
2754 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2755 if (rbio
->faila
== -1) {
2762 * When we get bbio, we have already increased bio_counter, record it
2763 * so we can free it at rbio_orig_end_io()
2765 rbio
->generic_bio_cnt
= 1;
2770 static void missing_raid56_work(struct btrfs_work
*work
)
2772 struct btrfs_raid_bio
*rbio
;
2774 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2775 __raid56_parity_recover(rbio
);
2778 static void async_missing_raid56(struct btrfs_raid_bio
*rbio
)
2780 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2781 missing_raid56_work
, NULL
, NULL
);
2783 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2786 void raid56_submit_missing_rbio(struct btrfs_raid_bio
*rbio
)
2788 if (!lock_stripe_add(rbio
))
2789 async_missing_raid56(rbio
);