2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD
,
66 BTRFS_RBIO_PARITY_SCRUB
,
67 BTRFS_RBIO_REBUILD_MISSING
,
70 struct btrfs_raid_bio
{
71 struct btrfs_fs_info
*fs_info
;
72 struct btrfs_bio
*bbio
;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list
;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache
;
87 * for scheduling work in the helper threads
89 struct btrfs_work work
;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list
;
97 spinlock_t bio_list_lock
;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list
;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation
;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending
;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page
**stripe_pages
;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page
**bio_pages
;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap
;
180 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
);
181 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
);
182 static void rmw_work(struct btrfs_work
*work
);
183 static void read_rebuild_work(struct btrfs_work
*work
);
184 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
);
185 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
);
186 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
, struct bio
*bio
);
187 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
);
188 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
);
189 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
);
190 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
);
192 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
194 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info
*info
)
202 struct btrfs_stripe_hash_table
*table
;
203 struct btrfs_stripe_hash_table
*x
;
204 struct btrfs_stripe_hash
*cur
;
205 struct btrfs_stripe_hash
*h
;
206 int num_entries
= 1 << BTRFS_STRIPE_HASH_TABLE_BITS
;
210 if (info
->stripe_hash_table
)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size
= sizeof(*table
) + sizeof(*h
) * num_entries
;
221 table
= kzalloc(table_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
223 table
= vzalloc(table_size
);
228 spin_lock_init(&table
->cache_lock
);
229 INIT_LIST_HEAD(&table
->stripe_cache
);
233 for (i
= 0; i
< num_entries
; i
++) {
235 INIT_LIST_HEAD(&cur
->hash_list
);
236 spin_lock_init(&cur
->lock
);
237 init_waitqueue_head(&cur
->wait
);
240 x
= cmpxchg(&info
->stripe_hash_table
, NULL
, table
);
247 * caching an rbio means to copy anything from the
248 * bio_pages array into the stripe_pages array. We
249 * use the page uptodate bit in the stripe cache array
250 * to indicate if it has valid data
252 * once the caching is done, we set the cache ready
255 static void cache_rbio_pages(struct btrfs_raid_bio
*rbio
)
262 ret
= alloc_rbio_pages(rbio
);
266 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
267 if (!rbio
->bio_pages
[i
])
270 s
= kmap(rbio
->bio_pages
[i
]);
271 d
= kmap(rbio
->stripe_pages
[i
]);
273 memcpy(d
, s
, PAGE_SIZE
);
275 kunmap(rbio
->bio_pages
[i
]);
276 kunmap(rbio
->stripe_pages
[i
]);
277 SetPageUptodate(rbio
->stripe_pages
[i
]);
279 set_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
283 * we hash on the first logical address of the stripe
285 static int rbio_bucket(struct btrfs_raid_bio
*rbio
)
287 u64 num
= rbio
->bbio
->raid_map
[0];
290 * we shift down quite a bit. We're using byte
291 * addressing, and most of the lower bits are zeros.
292 * This tends to upset hash_64, and it consistently
293 * returns just one or two different values.
295 * shifting off the lower bits fixes things.
297 return hash_64(num
>> 16, BTRFS_STRIPE_HASH_TABLE_BITS
);
301 * stealing an rbio means taking all the uptodate pages from the stripe
302 * array in the source rbio and putting them into the destination rbio
304 static void steal_rbio(struct btrfs_raid_bio
*src
, struct btrfs_raid_bio
*dest
)
310 if (!test_bit(RBIO_CACHE_READY_BIT
, &src
->flags
))
313 for (i
= 0; i
< dest
->nr_pages
; i
++) {
314 s
= src
->stripe_pages
[i
];
315 if (!s
|| !PageUptodate(s
)) {
319 d
= dest
->stripe_pages
[i
];
323 dest
->stripe_pages
[i
] = s
;
324 src
->stripe_pages
[i
] = NULL
;
329 * merging means we take the bio_list from the victim and
330 * splice it into the destination. The victim should
331 * be discarded afterwards.
333 * must be called with dest->rbio_list_lock held
335 static void merge_rbio(struct btrfs_raid_bio
*dest
,
336 struct btrfs_raid_bio
*victim
)
338 bio_list_merge(&dest
->bio_list
, &victim
->bio_list
);
339 dest
->bio_list_bytes
+= victim
->bio_list_bytes
;
340 dest
->generic_bio_cnt
+= victim
->generic_bio_cnt
;
341 bio_list_init(&victim
->bio_list
);
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
348 static void __remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
350 int bucket
= rbio_bucket(rbio
);
351 struct btrfs_stripe_hash_table
*table
;
352 struct btrfs_stripe_hash
*h
;
356 * check the bit again under the hash table lock.
358 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
361 table
= rbio
->fs_info
->stripe_hash_table
;
362 h
= table
->table
+ bucket
;
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
373 spin_lock(&rbio
->bio_list_lock
);
375 if (test_and_clear_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
376 list_del_init(&rbio
->stripe_cache
);
377 table
->cache_size
-= 1;
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
389 if (bio_list_empty(&rbio
->bio_list
)) {
390 if (!list_empty(&rbio
->hash_list
)) {
391 list_del_init(&rbio
->hash_list
);
392 atomic_dec(&rbio
->refs
);
393 BUG_ON(!list_empty(&rbio
->plug_list
));
398 spin_unlock(&rbio
->bio_list_lock
);
399 spin_unlock(&h
->lock
);
402 __free_raid_bio(rbio
);
406 * prune a given rbio from the cache
408 static void remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
410 struct btrfs_stripe_hash_table
*table
;
413 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
416 table
= rbio
->fs_info
->stripe_hash_table
;
418 spin_lock_irqsave(&table
->cache_lock
, flags
);
419 __remove_rbio_from_cache(rbio
);
420 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
424 * remove everything in the cache
426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info
*info
)
428 struct btrfs_stripe_hash_table
*table
;
430 struct btrfs_raid_bio
*rbio
;
432 table
= info
->stripe_hash_table
;
434 spin_lock_irqsave(&table
->cache_lock
, flags
);
435 while (!list_empty(&table
->stripe_cache
)) {
436 rbio
= list_entry(table
->stripe_cache
.next
,
437 struct btrfs_raid_bio
,
439 __remove_rbio_from_cache(rbio
);
441 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
445 * remove all cached entries and free the hash table
448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info
*info
)
450 if (!info
->stripe_hash_table
)
452 btrfs_clear_rbio_cache(info
);
453 kvfree(info
->stripe_hash_table
);
454 info
->stripe_hash_table
= NULL
;
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
465 * If the size of the rbio cache is too big, we
468 static void cache_rbio(struct btrfs_raid_bio
*rbio
)
470 struct btrfs_stripe_hash_table
*table
;
473 if (!test_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
))
476 table
= rbio
->fs_info
->stripe_hash_table
;
478 spin_lock_irqsave(&table
->cache_lock
, flags
);
479 spin_lock(&rbio
->bio_list_lock
);
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
483 atomic_inc(&rbio
->refs
);
485 if (!list_empty(&rbio
->stripe_cache
)){
486 list_move(&rbio
->stripe_cache
, &table
->stripe_cache
);
488 list_add(&rbio
->stripe_cache
, &table
->stripe_cache
);
489 table
->cache_size
+= 1;
492 spin_unlock(&rbio
->bio_list_lock
);
494 if (table
->cache_size
> RBIO_CACHE_SIZE
) {
495 struct btrfs_raid_bio
*found
;
497 found
= list_entry(table
->stripe_cache
.prev
,
498 struct btrfs_raid_bio
,
502 __remove_rbio_from_cache(found
);
505 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
513 static void run_xor(void **pages
, int src_cnt
, ssize_t len
)
517 void *dest
= pages
[src_cnt
];
520 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
521 xor_blocks(xor_src_cnt
, len
, dest
, pages
+ src_off
);
523 src_cnt
-= xor_src_cnt
;
524 src_off
+= xor_src_cnt
;
529 * returns true if the bio list inside this rbio
530 * covers an entire stripe (no rmw required).
531 * Must be called with the bio list lock held, or
532 * at a time when you know it is impossible to add
533 * new bios into the list
535 static int __rbio_is_full(struct btrfs_raid_bio
*rbio
)
537 unsigned long size
= rbio
->bio_list_bytes
;
540 if (size
!= rbio
->nr_data
* rbio
->stripe_len
)
543 BUG_ON(size
> rbio
->nr_data
* rbio
->stripe_len
);
547 static int rbio_is_full(struct btrfs_raid_bio
*rbio
)
552 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
553 ret
= __rbio_is_full(rbio
);
554 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
559 * returns 1 if it is safe to merge two rbios together.
560 * The merging is safe if the two rbios correspond to
561 * the same stripe and if they are both going in the same
562 * direction (read vs write), and if neither one is
563 * locked for final IO
565 * The caller is responsible for locking such that
566 * rmw_locked is safe to test
568 static int rbio_can_merge(struct btrfs_raid_bio
*last
,
569 struct btrfs_raid_bio
*cur
)
571 if (test_bit(RBIO_RMW_LOCKED_BIT
, &last
->flags
) ||
572 test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
))
576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can
579 * steal from cached rbios though, other functions
582 if (test_bit(RBIO_CACHE_BIT
, &last
->flags
) ||
583 test_bit(RBIO_CACHE_BIT
, &cur
->flags
))
586 if (last
->bbio
->raid_map
[0] !=
587 cur
->bbio
->raid_map
[0])
590 /* we can't merge with different operations */
591 if (last
->operation
!= cur
->operation
)
594 * We've need read the full stripe from the drive.
595 * check and repair the parity and write the new results.
597 * We're not allowed to add any new bios to the
598 * bio list here, anyone else that wants to
599 * change this stripe needs to do their own rmw.
601 if (last
->operation
== BTRFS_RBIO_PARITY_SCRUB
||
602 cur
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
605 if (last
->operation
== BTRFS_RBIO_REBUILD_MISSING
||
606 cur
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
612 static int rbio_stripe_page_index(struct btrfs_raid_bio
*rbio
, int stripe
,
615 return stripe
* rbio
->stripe_npages
+ index
;
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
622 static struct page
*rbio_stripe_page(struct btrfs_raid_bio
*rbio
, int stripe
,
625 return rbio
->stripe_pages
[rbio_stripe_page_index(rbio
, stripe
, index
)];
629 * helper to index into the pstripe
631 static struct page
*rbio_pstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
633 return rbio_stripe_page(rbio
, rbio
->nr_data
, index
);
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
640 static struct page
*rbio_qstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
642 if (rbio
->nr_data
+ 1 == rbio
->real_stripes
)
644 return rbio_stripe_page(rbio
, rbio
->nr_data
+ 1, index
);
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
669 static noinline
int lock_stripe_add(struct btrfs_raid_bio
*rbio
)
671 int bucket
= rbio_bucket(rbio
);
672 struct btrfs_stripe_hash
*h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
673 struct btrfs_raid_bio
*cur
;
674 struct btrfs_raid_bio
*pending
;
677 struct btrfs_raid_bio
*freeit
= NULL
;
678 struct btrfs_raid_bio
*cache_drop
= NULL
;
682 spin_lock_irqsave(&h
->lock
, flags
);
683 list_for_each_entry(cur
, &h
->hash_list
, hash_list
) {
685 if (cur
->bbio
->raid_map
[0] == rbio
->bbio
->raid_map
[0]) {
686 spin_lock(&cur
->bio_list_lock
);
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur
->bio_list
) &&
690 list_empty(&cur
->plug_list
) &&
691 test_bit(RBIO_CACHE_BIT
, &cur
->flags
) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
)) {
693 list_del_init(&cur
->hash_list
);
694 atomic_dec(&cur
->refs
);
696 steal_rbio(cur
, rbio
);
698 spin_unlock(&cur
->bio_list_lock
);
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur
, rbio
)) {
705 merge_rbio(cur
, rbio
);
706 spin_unlock(&cur
->bio_list_lock
);
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
721 list_for_each_entry(pending
, &cur
->plug_list
,
723 if (rbio_can_merge(pending
, rbio
)) {
724 merge_rbio(pending
, rbio
);
725 spin_unlock(&cur
->bio_list_lock
);
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
736 list_add_tail(&rbio
->plug_list
, &cur
->plug_list
);
737 spin_unlock(&cur
->bio_list_lock
);
743 atomic_inc(&rbio
->refs
);
744 list_add(&rbio
->hash_list
, &h
->hash_list
);
746 spin_unlock_irqrestore(&h
->lock
, flags
);
748 remove_rbio_from_cache(cache_drop
);
750 __free_raid_bio(freeit
);
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
758 static noinline
void unlock_stripe(struct btrfs_raid_bio
*rbio
)
761 struct btrfs_stripe_hash
*h
;
765 bucket
= rbio_bucket(rbio
);
766 h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
768 if (list_empty(&rbio
->plug_list
))
771 spin_lock_irqsave(&h
->lock
, flags
);
772 spin_lock(&rbio
->bio_list_lock
);
774 if (!list_empty(&rbio
->hash_list
)) {
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
780 if (list_empty(&rbio
->plug_list
) &&
781 test_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
783 clear_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
784 BUG_ON(!bio_list_empty(&rbio
->bio_list
));
788 list_del_init(&rbio
->hash_list
);
789 atomic_dec(&rbio
->refs
);
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
796 if (!list_empty(&rbio
->plug_list
)) {
797 struct btrfs_raid_bio
*next
;
798 struct list_head
*head
= rbio
->plug_list
.next
;
800 next
= list_entry(head
, struct btrfs_raid_bio
,
803 list_del_init(&rbio
->plug_list
);
805 list_add(&next
->hash_list
, &h
->hash_list
);
806 atomic_inc(&next
->refs
);
807 spin_unlock(&rbio
->bio_list_lock
);
808 spin_unlock_irqrestore(&h
->lock
, flags
);
810 if (next
->operation
== BTRFS_RBIO_READ_REBUILD
)
811 async_read_rebuild(next
);
812 else if (next
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
813 steal_rbio(rbio
, next
);
814 async_read_rebuild(next
);
815 } else if (next
->operation
== BTRFS_RBIO_WRITE
) {
816 steal_rbio(rbio
, next
);
817 async_rmw_stripe(next
);
818 } else if (next
->operation
== BTRFS_RBIO_PARITY_SCRUB
) {
819 steal_rbio(rbio
, next
);
820 async_scrub_parity(next
);
825 * The barrier for this waitqueue_active is not needed,
826 * we're protected by h->lock and can't miss a wakeup.
828 } else if (waitqueue_active(&h
->wait
)) {
829 spin_unlock(&rbio
->bio_list_lock
);
830 spin_unlock_irqrestore(&h
->lock
, flags
);
836 spin_unlock(&rbio
->bio_list_lock
);
837 spin_unlock_irqrestore(&h
->lock
, flags
);
841 remove_rbio_from_cache(rbio
);
844 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
)
848 WARN_ON(atomic_read(&rbio
->refs
) < 0);
849 if (!atomic_dec_and_test(&rbio
->refs
))
852 WARN_ON(!list_empty(&rbio
->stripe_cache
));
853 WARN_ON(!list_empty(&rbio
->hash_list
));
854 WARN_ON(!bio_list_empty(&rbio
->bio_list
));
856 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
857 if (rbio
->stripe_pages
[i
]) {
858 __free_page(rbio
->stripe_pages
[i
]);
859 rbio
->stripe_pages
[i
] = NULL
;
863 btrfs_put_bbio(rbio
->bbio
);
867 static void free_raid_bio(struct btrfs_raid_bio
*rbio
)
870 __free_raid_bio(rbio
);
874 * this frees the rbio and runs through all the bios in the
875 * bio_list and calls end_io on them
877 static void rbio_orig_end_io(struct btrfs_raid_bio
*rbio
, int err
)
879 struct bio
*cur
= bio_list_get(&rbio
->bio_list
);
882 if (rbio
->generic_bio_cnt
)
883 btrfs_bio_counter_sub(rbio
->fs_info
, rbio
->generic_bio_cnt
);
897 * end io function used by finish_rmw. When we finally
898 * get here, we've written a full stripe
900 static void raid_write_end_io(struct bio
*bio
)
902 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
903 int err
= bio
->bi_error
;
907 fail_bio_stripe(rbio
, bio
);
911 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
916 /* OK, we have read all the stripes we need to. */
917 max_errors
= (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
) ?
918 0 : rbio
->bbio
->max_errors
;
919 if (atomic_read(&rbio
->error
) > max_errors
)
922 rbio_orig_end_io(rbio
, err
);
926 * the read/modify/write code wants to use the original bio for
927 * any pages it included, and then use the rbio for everything
928 * else. This function decides if a given index (stripe number)
929 * and page number in that stripe fall inside the original bio
932 * if you set bio_list_only, you'll get a NULL back for any ranges
933 * that are outside the bio_list
935 * This doesn't take any refs on anything, you get a bare page pointer
936 * and the caller must bump refs as required.
938 * You must call index_rbio_pages once before you can trust
939 * the answers from this function.
941 static struct page
*page_in_rbio(struct btrfs_raid_bio
*rbio
,
942 int index
, int pagenr
, int bio_list_only
)
945 struct page
*p
= NULL
;
947 chunk_page
= index
* (rbio
->stripe_len
>> PAGE_SHIFT
) + pagenr
;
949 spin_lock_irq(&rbio
->bio_list_lock
);
950 p
= rbio
->bio_pages
[chunk_page
];
951 spin_unlock_irq(&rbio
->bio_list_lock
);
953 if (p
|| bio_list_only
)
956 return rbio
->stripe_pages
[chunk_page
];
960 * number of pages we need for the entire stripe across all the
963 static unsigned long rbio_nr_pages(unsigned long stripe_len
, int nr_stripes
)
965 return DIV_ROUND_UP(stripe_len
, PAGE_SIZE
) * nr_stripes
;
969 * allocation and initial setup for the btrfs_raid_bio. Not
970 * this does not allocate any pages for rbio->pages.
972 static struct btrfs_raid_bio
*alloc_rbio(struct btrfs_root
*root
,
973 struct btrfs_bio
*bbio
, u64 stripe_len
)
975 struct btrfs_raid_bio
*rbio
;
977 int real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
978 int num_pages
= rbio_nr_pages(stripe_len
, real_stripes
);
979 int stripe_npages
= DIV_ROUND_UP(stripe_len
, PAGE_SIZE
);
982 rbio
= kzalloc(sizeof(*rbio
) + num_pages
* sizeof(struct page
*) * 2 +
983 DIV_ROUND_UP(stripe_npages
, BITS_PER_LONG
) *
984 sizeof(long), GFP_NOFS
);
986 return ERR_PTR(-ENOMEM
);
988 bio_list_init(&rbio
->bio_list
);
989 INIT_LIST_HEAD(&rbio
->plug_list
);
990 spin_lock_init(&rbio
->bio_list_lock
);
991 INIT_LIST_HEAD(&rbio
->stripe_cache
);
992 INIT_LIST_HEAD(&rbio
->hash_list
);
994 rbio
->fs_info
= root
->fs_info
;
995 rbio
->stripe_len
= stripe_len
;
996 rbio
->nr_pages
= num_pages
;
997 rbio
->real_stripes
= real_stripes
;
998 rbio
->stripe_npages
= stripe_npages
;
1001 atomic_set(&rbio
->refs
, 1);
1002 atomic_set(&rbio
->error
, 0);
1003 atomic_set(&rbio
->stripes_pending
, 0);
1006 * the stripe_pages and bio_pages array point to the extra
1007 * memory we allocated past the end of the rbio
1010 rbio
->stripe_pages
= p
;
1011 rbio
->bio_pages
= p
+ sizeof(struct page
*) * num_pages
;
1012 rbio
->dbitmap
= p
+ sizeof(struct page
*) * num_pages
* 2;
1014 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1015 nr_data
= real_stripes
- 1;
1016 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1017 nr_data
= real_stripes
- 2;
1021 rbio
->nr_data
= nr_data
;
1025 /* allocate pages for all the stripes in the bio, including parity */
1026 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
)
1031 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
1032 if (rbio
->stripe_pages
[i
])
1034 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1037 rbio
->stripe_pages
[i
] = page
;
1042 /* only allocate pages for p/q stripes */
1043 static int alloc_rbio_parity_pages(struct btrfs_raid_bio
*rbio
)
1048 i
= rbio_stripe_page_index(rbio
, rbio
->nr_data
, 0);
1050 for (; i
< rbio
->nr_pages
; i
++) {
1051 if (rbio
->stripe_pages
[i
])
1053 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1056 rbio
->stripe_pages
[i
] = page
;
1062 * add a single page from a specific stripe into our list of bios for IO
1063 * this will try to merge into existing bios if possible, and returns
1064 * zero if all went well.
1066 static int rbio_add_io_page(struct btrfs_raid_bio
*rbio
,
1067 struct bio_list
*bio_list
,
1070 unsigned long page_index
,
1071 unsigned long bio_max_len
)
1073 struct bio
*last
= bio_list
->tail
;
1077 struct btrfs_bio_stripe
*stripe
;
1080 stripe
= &rbio
->bbio
->stripes
[stripe_nr
];
1081 disk_start
= stripe
->physical
+ (page_index
<< PAGE_SHIFT
);
1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe
->dev
->bdev
)
1085 return fail_rbio_index(rbio
, stripe_nr
);
1087 /* see if we can add this page onto our existing bio */
1089 last_end
= (u64
)last
->bi_iter
.bi_sector
<< 9;
1090 last_end
+= last
->bi_iter
.bi_size
;
1093 * we can't merge these if they are from different
1094 * devices or if they are not contiguous
1096 if (last_end
== disk_start
&& stripe
->dev
->bdev
&&
1098 last
->bi_bdev
== stripe
->dev
->bdev
) {
1099 ret
= bio_add_page(last
, page
, PAGE_SIZE
, 0);
1100 if (ret
== PAGE_SIZE
)
1105 /* put a new bio on the list */
1106 bio
= btrfs_io_bio_alloc(GFP_NOFS
, bio_max_len
>> PAGE_SHIFT
?:1);
1110 bio
->bi_iter
.bi_size
= 0;
1111 bio
->bi_bdev
= stripe
->dev
->bdev
;
1112 bio
->bi_iter
.bi_sector
= disk_start
>> 9;
1114 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
1115 bio_list_add(bio_list
, bio
);
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk. This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction. The rmw will be finished
1124 * after we've reconstructed the failed stripes
1126 static void validate_rbio_for_rmw(struct btrfs_raid_bio
*rbio
)
1128 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
1129 BUG_ON(rbio
->faila
== rbio
->real_stripes
- 1);
1130 __raid56_parity_recover(rbio
);
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1142 * This must be called before you trust the answers from page_in_rbio
1144 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
)
1148 unsigned long stripe_offset
;
1149 unsigned long page_index
;
1153 spin_lock_irq(&rbio
->bio_list_lock
);
1154 bio_list_for_each(bio
, &rbio
->bio_list
) {
1155 start
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1156 stripe_offset
= start
- rbio
->bbio
->raid_map
[0];
1157 page_index
= stripe_offset
>> PAGE_SHIFT
;
1159 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1160 p
= bio
->bi_io_vec
[i
].bv_page
;
1161 rbio
->bio_pages
[page_index
+ i
] = p
;
1164 spin_unlock_irq(&rbio
->bio_list_lock
);
1168 * this is called from one of two situations. We either
1169 * have a full stripe from the higher layers, or we've read all
1170 * the missing bits off disk.
1172 * This will calculate the parity and then send down any
1175 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
)
1177 struct btrfs_bio
*bbio
= rbio
->bbio
;
1178 void *pointers
[rbio
->real_stripes
];
1179 int nr_data
= rbio
->nr_data
;
1184 struct bio_list bio_list
;
1188 bio_list_init(&bio_list
);
1190 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
1191 p_stripe
= rbio
->real_stripes
- 1;
1192 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
1193 p_stripe
= rbio
->real_stripes
- 2;
1194 q_stripe
= rbio
->real_stripes
- 1;
1199 /* at this point we either have a full stripe,
1200 * or we've read the full stripe from the drive.
1201 * recalculate the parity and write the new results.
1203 * We're not allowed to add any new bios to the
1204 * bio list here, anyone else that wants to
1205 * change this stripe needs to do their own rmw.
1207 spin_lock_irq(&rbio
->bio_list_lock
);
1208 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1209 spin_unlock_irq(&rbio
->bio_list_lock
);
1211 atomic_set(&rbio
->error
, 0);
1214 * now that we've set rmw_locked, run through the
1215 * bio list one last time and map the page pointers
1217 * We don't cache full rbios because we're assuming
1218 * the higher layers are unlikely to use this area of
1219 * the disk again soon. If they do use it again,
1220 * hopefully they will send another full bio.
1222 index_rbio_pages(rbio
);
1223 if (!rbio_is_full(rbio
))
1224 cache_rbio_pages(rbio
);
1226 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1228 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1230 /* first collect one page from each data stripe */
1231 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
1232 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1233 pointers
[stripe
] = kmap(p
);
1236 /* then add the parity stripe */
1237 p
= rbio_pstripe_page(rbio
, pagenr
);
1239 pointers
[stripe
++] = kmap(p
);
1241 if (q_stripe
!= -1) {
1244 * raid6, add the qstripe and call the
1245 * library function to fill in our p/q
1247 p
= rbio_qstripe_page(rbio
, pagenr
);
1249 pointers
[stripe
++] = kmap(p
);
1251 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
1255 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
1256 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
1260 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
1261 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
1265 * time to start writing. Make bios for everything from the
1266 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1269 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1270 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1272 if (stripe
< rbio
->nr_data
) {
1273 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1277 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1280 ret
= rbio_add_io_page(rbio
, &bio_list
,
1281 page
, stripe
, pagenr
, rbio
->stripe_len
);
1287 if (likely(!bbio
->num_tgtdevs
))
1290 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1291 if (!bbio
->tgtdev_map
[stripe
])
1294 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1296 if (stripe
< rbio
->nr_data
) {
1297 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1301 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1304 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1305 rbio
->bbio
->tgtdev_map
[stripe
],
1306 pagenr
, rbio
->stripe_len
);
1313 atomic_set(&rbio
->stripes_pending
, bio_list_size(&bio_list
));
1314 BUG_ON(atomic_read(&rbio
->stripes_pending
) == 0);
1317 bio
= bio_list_pop(&bio_list
);
1321 bio
->bi_private
= rbio
;
1322 bio
->bi_end_io
= raid_write_end_io
;
1323 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1330 rbio_orig_end_io(rbio
, -EIO
);
1334 * helper to find the stripe number for a given bio. Used to figure out which
1335 * stripe has failed. This expects the bio to correspond to a physical disk,
1336 * so it looks up based on physical sector numbers.
1338 static int find_bio_stripe(struct btrfs_raid_bio
*rbio
,
1341 u64 physical
= bio
->bi_iter
.bi_sector
;
1344 struct btrfs_bio_stripe
*stripe
;
1348 for (i
= 0; i
< rbio
->bbio
->num_stripes
; i
++) {
1349 stripe
= &rbio
->bbio
->stripes
[i
];
1350 stripe_start
= stripe
->physical
;
1351 if (physical
>= stripe_start
&&
1352 physical
< stripe_start
+ rbio
->stripe_len
&&
1353 bio
->bi_bdev
== stripe
->dev
->bdev
) {
1361 * helper to find the stripe number for a given
1362 * bio (before mapping). Used to figure out which stripe has
1363 * failed. This looks up based on logical block numbers.
1365 static int find_logical_bio_stripe(struct btrfs_raid_bio
*rbio
,
1368 u64 logical
= bio
->bi_iter
.bi_sector
;
1374 for (i
= 0; i
< rbio
->nr_data
; i
++) {
1375 stripe_start
= rbio
->bbio
->raid_map
[i
];
1376 if (logical
>= stripe_start
&&
1377 logical
< stripe_start
+ rbio
->stripe_len
) {
1385 * returns -EIO if we had too many failures
1387 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
)
1389 unsigned long flags
;
1392 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
1394 /* we already know this stripe is bad, move on */
1395 if (rbio
->faila
== failed
|| rbio
->failb
== failed
)
1398 if (rbio
->faila
== -1) {
1399 /* first failure on this rbio */
1400 rbio
->faila
= failed
;
1401 atomic_inc(&rbio
->error
);
1402 } else if (rbio
->failb
== -1) {
1403 /* second failure on this rbio */
1404 rbio
->failb
= failed
;
1405 atomic_inc(&rbio
->error
);
1410 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
1416 * helper to fail a stripe based on a physical disk
1419 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
,
1422 int failed
= find_bio_stripe(rbio
, bio
);
1427 return fail_rbio_index(rbio
, failed
);
1431 * this sets each page in the bio uptodate. It should only be used on private
1432 * rbio pages, nothing that comes in from the higher layers
1434 static void set_bio_pages_uptodate(struct bio
*bio
)
1439 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1440 p
= bio
->bi_io_vec
[i
].bv_page
;
1446 * end io for the read phase of the rmw cycle. All the bios here are physical
1447 * stripe bios we've read from the disk so we can recalculate the parity of the
1450 * This will usually kick off finish_rmw once all the bios are read in, but it
1451 * may trigger parity reconstruction if we had any errors along the way
1453 static void raid_rmw_end_io(struct bio
*bio
)
1455 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1458 fail_bio_stripe(rbio
, bio
);
1460 set_bio_pages_uptodate(bio
);
1464 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
1467 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
1471 * this will normally call finish_rmw to start our write
1472 * but if there are any failed stripes we'll reconstruct
1475 validate_rbio_for_rmw(rbio
);
1480 rbio_orig_end_io(rbio
, -EIO
);
1483 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1485 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1486 rmw_work
, NULL
, NULL
);
1488 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
1492 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
)
1494 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1495 read_rebuild_work
, NULL
, NULL
);
1497 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
1502 * the stripe must be locked by the caller. It will
1503 * unlock after all the writes are done
1505 static int raid56_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1507 int bios_to_read
= 0;
1508 struct bio_list bio_list
;
1514 bio_list_init(&bio_list
);
1516 ret
= alloc_rbio_pages(rbio
);
1520 index_rbio_pages(rbio
);
1522 atomic_set(&rbio
->error
, 0);
1524 * build a list of bios to read all the missing parts of this
1527 for (stripe
= 0; stripe
< rbio
->nr_data
; stripe
++) {
1528 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1531 * we want to find all the pages missing from
1532 * the rbio and read them from the disk. If
1533 * page_in_rbio finds a page in the bio list
1534 * we don't need to read it off the stripe.
1536 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1540 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1542 * the bio cache may have handed us an uptodate
1543 * page. If so, be happy and use it
1545 if (PageUptodate(page
))
1548 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1549 stripe
, pagenr
, rbio
->stripe_len
);
1555 bios_to_read
= bio_list_size(&bio_list
);
1556 if (!bios_to_read
) {
1558 * this can happen if others have merged with
1559 * us, it means there is nothing left to read.
1560 * But if there are missing devices it may not be
1561 * safe to do the full stripe write yet.
1567 * the bbio may be freed once we submit the last bio. Make sure
1568 * not to touch it after that
1570 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
1572 bio
= bio_list_pop(&bio_list
);
1576 bio
->bi_private
= rbio
;
1577 bio
->bi_end_io
= raid_rmw_end_io
;
1578 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
1580 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
1581 BTRFS_WQ_ENDIO_RAID56
);
1585 /* the actual write will happen once the reads are done */
1589 rbio_orig_end_io(rbio
, -EIO
);
1593 validate_rbio_for_rmw(rbio
);
1598 * if the upper layers pass in a full stripe, we thank them by only allocating
1599 * enough pages to hold the parity, and sending it all down quickly.
1601 static int full_stripe_write(struct btrfs_raid_bio
*rbio
)
1605 ret
= alloc_rbio_parity_pages(rbio
);
1607 __free_raid_bio(rbio
);
1611 ret
= lock_stripe_add(rbio
);
1618 * partial stripe writes get handed over to async helpers.
1619 * We're really hoping to merge a few more writes into this
1620 * rbio before calculating new parity
1622 static int partial_stripe_write(struct btrfs_raid_bio
*rbio
)
1626 ret
= lock_stripe_add(rbio
);
1628 async_rmw_stripe(rbio
);
1633 * sometimes while we were reading from the drive to
1634 * recalculate parity, enough new bios come into create
1635 * a full stripe. So we do a check here to see if we can
1636 * go directly to finish_rmw
1638 static int __raid56_parity_write(struct btrfs_raid_bio
*rbio
)
1640 /* head off into rmw land if we don't have a full stripe */
1641 if (!rbio_is_full(rbio
))
1642 return partial_stripe_write(rbio
);
1643 return full_stripe_write(rbio
);
1647 * We use plugging call backs to collect full stripes.
1648 * Any time we get a partial stripe write while plugged
1649 * we collect it into a list. When the unplug comes down,
1650 * we sort the list by logical block number and merge
1651 * everything we can into the same rbios
1653 struct btrfs_plug_cb
{
1654 struct blk_plug_cb cb
;
1655 struct btrfs_fs_info
*info
;
1656 struct list_head rbio_list
;
1657 struct btrfs_work work
;
1661 * rbios on the plug list are sorted for easier merging.
1663 static int plug_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1665 struct btrfs_raid_bio
*ra
= container_of(a
, struct btrfs_raid_bio
,
1667 struct btrfs_raid_bio
*rb
= container_of(b
, struct btrfs_raid_bio
,
1669 u64 a_sector
= ra
->bio_list
.head
->bi_iter
.bi_sector
;
1670 u64 b_sector
= rb
->bio_list
.head
->bi_iter
.bi_sector
;
1672 if (a_sector
< b_sector
)
1674 if (a_sector
> b_sector
)
1679 static void run_plug(struct btrfs_plug_cb
*plug
)
1681 struct btrfs_raid_bio
*cur
;
1682 struct btrfs_raid_bio
*last
= NULL
;
1685 * sort our plug list then try to merge
1686 * everything we can in hopes of creating full
1689 list_sort(NULL
, &plug
->rbio_list
, plug_cmp
);
1690 while (!list_empty(&plug
->rbio_list
)) {
1691 cur
= list_entry(plug
->rbio_list
.next
,
1692 struct btrfs_raid_bio
, plug_list
);
1693 list_del_init(&cur
->plug_list
);
1695 if (rbio_is_full(cur
)) {
1696 /* we have a full stripe, send it down */
1697 full_stripe_write(cur
);
1701 if (rbio_can_merge(last
, cur
)) {
1702 merge_rbio(last
, cur
);
1703 __free_raid_bio(cur
);
1707 __raid56_parity_write(last
);
1712 __raid56_parity_write(last
);
1718 * if the unplug comes from schedule, we have to push the
1719 * work off to a helper thread
1721 static void unplug_work(struct btrfs_work
*work
)
1723 struct btrfs_plug_cb
*plug
;
1724 plug
= container_of(work
, struct btrfs_plug_cb
, work
);
1728 static void btrfs_raid_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1730 struct btrfs_plug_cb
*plug
;
1731 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1733 if (from_schedule
) {
1734 btrfs_init_work(&plug
->work
, btrfs_rmw_helper
,
1735 unplug_work
, NULL
, NULL
);
1736 btrfs_queue_work(plug
->info
->rmw_workers
,
1744 * our main entry point for writes from the rest of the FS.
1746 int raid56_parity_write(struct btrfs_root
*root
, struct bio
*bio
,
1747 struct btrfs_bio
*bbio
, u64 stripe_len
)
1749 struct btrfs_raid_bio
*rbio
;
1750 struct btrfs_plug_cb
*plug
= NULL
;
1751 struct blk_plug_cb
*cb
;
1754 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
1756 btrfs_put_bbio(bbio
);
1757 return PTR_ERR(rbio
);
1759 bio_list_add(&rbio
->bio_list
, bio
);
1760 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
1761 rbio
->operation
= BTRFS_RBIO_WRITE
;
1763 btrfs_bio_counter_inc_noblocked(root
->fs_info
);
1764 rbio
->generic_bio_cnt
= 1;
1767 * don't plug on full rbios, just get them out the door
1768 * as quickly as we can
1770 if (rbio_is_full(rbio
)) {
1771 ret
= full_stripe_write(rbio
);
1773 btrfs_bio_counter_dec(root
->fs_info
);
1777 cb
= blk_check_plugged(btrfs_raid_unplug
, root
->fs_info
,
1780 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1782 plug
->info
= root
->fs_info
;
1783 INIT_LIST_HEAD(&plug
->rbio_list
);
1785 list_add_tail(&rbio
->plug_list
, &plug
->rbio_list
);
1788 ret
= __raid56_parity_write(rbio
);
1790 btrfs_bio_counter_dec(root
->fs_info
);
1796 * all parity reconstruction happens here. We've read in everything
1797 * we can find from the drives and this does the heavy lifting of
1798 * sorting the good from the bad.
1800 static void __raid_recover_end_io(struct btrfs_raid_bio
*rbio
)
1804 int faila
= -1, failb
= -1;
1809 pointers
= kcalloc(rbio
->real_stripes
, sizeof(void *), GFP_NOFS
);
1815 faila
= rbio
->faila
;
1816 failb
= rbio
->failb
;
1818 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1819 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1820 spin_lock_irq(&rbio
->bio_list_lock
);
1821 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1822 spin_unlock_irq(&rbio
->bio_list_lock
);
1825 index_rbio_pages(rbio
);
1827 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1829 * Now we just use bitmap to mark the horizontal stripes in
1830 * which we have data when doing parity scrub.
1832 if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
&&
1833 !test_bit(pagenr
, rbio
->dbitmap
))
1836 /* setup our array of pointers with pages
1839 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1841 * if we're rebuilding a read, we have to use
1842 * pages from the bio list
1844 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1845 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1846 (stripe
== faila
|| stripe
== failb
)) {
1847 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1849 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1851 pointers
[stripe
] = kmap(page
);
1854 /* all raid6 handling here */
1855 if (rbio
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
) {
1857 * single failure, rebuild from parity raid5
1861 if (faila
== rbio
->nr_data
) {
1863 * Just the P stripe has failed, without
1864 * a bad data or Q stripe.
1865 * TODO, we should redo the xor here.
1871 * a single failure in raid6 is rebuilt
1872 * in the pstripe code below
1877 /* make sure our ps and qs are in order */
1878 if (faila
> failb
) {
1884 /* if the q stripe is failed, do a pstripe reconstruction
1886 * If both the q stripe and the P stripe are failed, we're
1887 * here due to a crc mismatch and we can't give them the
1890 if (rbio
->bbio
->raid_map
[failb
] == RAID6_Q_STRIPE
) {
1891 if (rbio
->bbio
->raid_map
[faila
] ==
1897 * otherwise we have one bad data stripe and
1898 * a good P stripe. raid5!
1903 if (rbio
->bbio
->raid_map
[failb
] == RAID5_P_STRIPE
) {
1904 raid6_datap_recov(rbio
->real_stripes
,
1905 PAGE_SIZE
, faila
, pointers
);
1907 raid6_2data_recov(rbio
->real_stripes
,
1908 PAGE_SIZE
, faila
, failb
,
1914 /* rebuild from P stripe here (raid5 or raid6) */
1915 BUG_ON(failb
!= -1);
1917 /* Copy parity block into failed block to start with */
1918 memcpy(pointers
[faila
],
1919 pointers
[rbio
->nr_data
],
1922 /* rearrange the pointer array */
1923 p
= pointers
[faila
];
1924 for (stripe
= faila
; stripe
< rbio
->nr_data
- 1; stripe
++)
1925 pointers
[stripe
] = pointers
[stripe
+ 1];
1926 pointers
[rbio
->nr_data
- 1] = p
;
1928 /* xor in the rest */
1929 run_xor(pointers
, rbio
->nr_data
- 1, PAGE_SIZE
);
1931 /* if we're doing this rebuild as part of an rmw, go through
1932 * and set all of our private rbio pages in the
1933 * failed stripes as uptodate. This way finish_rmw will
1934 * know they can be trusted. If this was a read reconstruction,
1935 * other endio functions will fiddle the uptodate bits
1937 if (rbio
->operation
== BTRFS_RBIO_WRITE
) {
1938 for (i
= 0; i
< rbio
->stripe_npages
; i
++) {
1940 page
= rbio_stripe_page(rbio
, faila
, i
);
1941 SetPageUptodate(page
);
1944 page
= rbio_stripe_page(rbio
, failb
, i
);
1945 SetPageUptodate(page
);
1949 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1951 * if we're rebuilding a read, we have to use
1952 * pages from the bio list
1954 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1955 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1956 (stripe
== faila
|| stripe
== failb
)) {
1957 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1959 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1970 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
) {
1972 cache_rbio_pages(rbio
);
1974 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1976 rbio_orig_end_io(rbio
, err
);
1977 } else if (rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1978 rbio_orig_end_io(rbio
, err
);
1979 } else if (err
== 0) {
1983 if (rbio
->operation
== BTRFS_RBIO_WRITE
)
1985 else if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
1986 finish_parity_scrub(rbio
, 0);
1990 rbio_orig_end_io(rbio
, err
);
1995 * This is called only for stripes we've read from disk to
1996 * reconstruct the parity.
1998 static void raid_recover_end_io(struct bio
*bio
)
2000 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2003 * we only read stripe pages off the disk, set them
2004 * up to date if there were no errors
2007 fail_bio_stripe(rbio
, bio
);
2009 set_bio_pages_uptodate(bio
);
2012 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2015 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2016 rbio_orig_end_io(rbio
, -EIO
);
2018 __raid_recover_end_io(rbio
);
2022 * reads everything we need off the disk to reconstruct
2023 * the parity. endio handlers trigger final reconstruction
2024 * when the IO is done.
2026 * This is used both for reads from the higher layers and for
2027 * parity construction required to finish a rmw cycle.
2029 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
)
2031 int bios_to_read
= 0;
2032 struct bio_list bio_list
;
2038 bio_list_init(&bio_list
);
2040 ret
= alloc_rbio_pages(rbio
);
2044 atomic_set(&rbio
->error
, 0);
2047 * read everything that hasn't failed. Thanks to the
2048 * stripe cache, it is possible that some or all of these
2049 * pages are going to be uptodate.
2051 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2052 if (rbio
->faila
== stripe
|| rbio
->failb
== stripe
) {
2053 atomic_inc(&rbio
->error
);
2057 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
2061 * the rmw code may have already read this
2064 p
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2065 if (PageUptodate(p
))
2068 ret
= rbio_add_io_page(rbio
, &bio_list
,
2069 rbio_stripe_page(rbio
, stripe
, pagenr
),
2070 stripe
, pagenr
, rbio
->stripe_len
);
2076 bios_to_read
= bio_list_size(&bio_list
);
2077 if (!bios_to_read
) {
2079 * we might have no bios to read just because the pages
2080 * were up to date, or we might have no bios to read because
2081 * the devices were gone.
2083 if (atomic_read(&rbio
->error
) <= rbio
->bbio
->max_errors
) {
2084 __raid_recover_end_io(rbio
);
2092 * the bbio may be freed once we submit the last bio. Make sure
2093 * not to touch it after that
2095 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2097 bio
= bio_list_pop(&bio_list
);
2101 bio
->bi_private
= rbio
;
2102 bio
->bi_end_io
= raid_recover_end_io
;
2103 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2105 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
2106 BTRFS_WQ_ENDIO_RAID56
);
2114 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
2115 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
2116 rbio_orig_end_io(rbio
, -EIO
);
2121 * the main entry point for reads from the higher layers. This
2122 * is really only called when the normal read path had a failure,
2123 * so we assume the bio they send down corresponds to a failed part
2126 int raid56_parity_recover(struct btrfs_root
*root
, struct bio
*bio
,
2127 struct btrfs_bio
*bbio
, u64 stripe_len
,
2128 int mirror_num
, int generic_io
)
2130 struct btrfs_raid_bio
*rbio
;
2133 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2136 btrfs_put_bbio(bbio
);
2137 return PTR_ERR(rbio
);
2140 rbio
->operation
= BTRFS_RBIO_READ_REBUILD
;
2141 bio_list_add(&rbio
->bio_list
, bio
);
2142 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
2144 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2145 if (rbio
->faila
== -1) {
2146 btrfs_warn(root
->fs_info
,
2147 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2148 __func__
, (u64
)bio
->bi_iter
.bi_sector
<< 9,
2149 (u64
)bio
->bi_iter
.bi_size
, bbio
->map_type
);
2151 btrfs_put_bbio(bbio
);
2157 btrfs_bio_counter_inc_noblocked(root
->fs_info
);
2158 rbio
->generic_bio_cnt
= 1;
2160 btrfs_get_bbio(bbio
);
2165 * for 'mirror == 2', reconstruct from all other stripes.
2166 * for 'mirror_num > 2', select a stripe to fail on every retry.
2168 if (mirror_num
> 2) {
2170 * 'mirror == 3' is to fail the p stripe and
2171 * reconstruct from the q stripe. 'mirror > 3' is to
2172 * fail a data stripe and reconstruct from p+q stripe.
2174 rbio
->failb
= rbio
->real_stripes
- (mirror_num
- 1);
2175 ASSERT(rbio
->failb
> 0);
2176 if (rbio
->failb
<= rbio
->faila
)
2180 ret
= lock_stripe_add(rbio
);
2183 * __raid56_parity_recover will end the bio with
2184 * any errors it hits. We don't want to return
2185 * its error value up the stack because our caller
2186 * will end up calling bio_endio with any nonzero
2190 __raid56_parity_recover(rbio
);
2192 * our rbio has been added to the list of
2193 * rbios that will be handled after the
2194 * currently lock owner is done
2200 static void rmw_work(struct btrfs_work
*work
)
2202 struct btrfs_raid_bio
*rbio
;
2204 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2205 raid56_rmw_stripe(rbio
);
2208 static void read_rebuild_work(struct btrfs_work
*work
)
2210 struct btrfs_raid_bio
*rbio
;
2212 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2213 __raid56_parity_recover(rbio
);
2217 * The following code is used to scrub/replace the parity stripe
2219 * Note: We need make sure all the pages that add into the scrub/replace
2220 * raid bio are correct and not be changed during the scrub/replace. That
2221 * is those pages just hold metadata or file data with checksum.
2224 struct btrfs_raid_bio
*
2225 raid56_parity_alloc_scrub_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2226 struct btrfs_bio
*bbio
, u64 stripe_len
,
2227 struct btrfs_device
*scrub_dev
,
2228 unsigned long *dbitmap
, int stripe_nsectors
)
2230 struct btrfs_raid_bio
*rbio
;
2233 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2236 bio_list_add(&rbio
->bio_list
, bio
);
2238 * This is a special bio which is used to hold the completion handler
2239 * and make the scrub rbio is similar to the other types
2241 ASSERT(!bio
->bi_iter
.bi_size
);
2242 rbio
->operation
= BTRFS_RBIO_PARITY_SCRUB
;
2244 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2245 if (bbio
->stripes
[i
].dev
== scrub_dev
) {
2251 /* Now we just support the sectorsize equals to page size */
2252 ASSERT(root
->sectorsize
== PAGE_SIZE
);
2253 ASSERT(rbio
->stripe_npages
== stripe_nsectors
);
2254 bitmap_copy(rbio
->dbitmap
, dbitmap
, stripe_nsectors
);
2259 /* Used for both parity scrub and missing. */
2260 void raid56_add_scrub_pages(struct btrfs_raid_bio
*rbio
, struct page
*page
,
2266 ASSERT(logical
>= rbio
->bbio
->raid_map
[0]);
2267 ASSERT(logical
+ PAGE_SIZE
<= rbio
->bbio
->raid_map
[0] +
2268 rbio
->stripe_len
* rbio
->nr_data
);
2269 stripe_offset
= (int)(logical
- rbio
->bbio
->raid_map
[0]);
2270 index
= stripe_offset
>> PAGE_SHIFT
;
2271 rbio
->bio_pages
[index
] = page
;
2275 * We just scrub the parity that we have correct data on the same horizontal,
2276 * so we needn't allocate all pages for all the stripes.
2278 static int alloc_rbio_essential_pages(struct btrfs_raid_bio
*rbio
)
2285 for_each_set_bit(bit
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2286 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2287 index
= i
* rbio
->stripe_npages
+ bit
;
2288 if (rbio
->stripe_pages
[index
])
2291 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2294 rbio
->stripe_pages
[index
] = page
;
2300 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
2303 struct btrfs_bio
*bbio
= rbio
->bbio
;
2304 void *pointers
[rbio
->real_stripes
];
2305 DECLARE_BITMAP(pbitmap
, rbio
->stripe_npages
);
2306 int nr_data
= rbio
->nr_data
;
2311 struct page
*p_page
= NULL
;
2312 struct page
*q_page
= NULL
;
2313 struct bio_list bio_list
;
2318 bio_list_init(&bio_list
);
2320 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
2321 p_stripe
= rbio
->real_stripes
- 1;
2322 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
2323 p_stripe
= rbio
->real_stripes
- 2;
2324 q_stripe
= rbio
->real_stripes
- 1;
2329 if (bbio
->num_tgtdevs
&& bbio
->tgtdev_map
[rbio
->scrubp
]) {
2331 bitmap_copy(pbitmap
, rbio
->dbitmap
, rbio
->stripe_npages
);
2335 * Because the higher layers(scrubber) are unlikely to
2336 * use this area of the disk again soon, so don't cache
2339 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2344 p_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2347 SetPageUptodate(p_page
);
2349 if (q_stripe
!= -1) {
2350 q_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2352 __free_page(p_page
);
2355 SetPageUptodate(q_page
);
2358 atomic_set(&rbio
->error
, 0);
2360 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2363 /* first collect one page from each data stripe */
2364 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
2365 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
2366 pointers
[stripe
] = kmap(p
);
2369 /* then add the parity stripe */
2370 pointers
[stripe
++] = kmap(p_page
);
2372 if (q_stripe
!= -1) {
2375 * raid6, add the qstripe and call the
2376 * library function to fill in our p/q
2378 pointers
[stripe
++] = kmap(q_page
);
2380 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
2384 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
2385 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
2388 /* Check scrubbing parity and repair it */
2389 p
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2391 if (memcmp(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
))
2392 memcpy(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
);
2394 /* Parity is right, needn't writeback */
2395 bitmap_clear(rbio
->dbitmap
, pagenr
, 1);
2398 for (stripe
= 0; stripe
< nr_data
; stripe
++)
2399 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
2403 __free_page(p_page
);
2405 __free_page(q_page
);
2409 * time to start writing. Make bios for everything from the
2410 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2413 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2416 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2417 ret
= rbio_add_io_page(rbio
, &bio_list
,
2418 page
, rbio
->scrubp
, pagenr
, rbio
->stripe_len
);
2426 for_each_set_bit(pagenr
, pbitmap
, rbio
->stripe_npages
) {
2429 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2430 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2431 bbio
->tgtdev_map
[rbio
->scrubp
],
2432 pagenr
, rbio
->stripe_len
);
2438 nr_data
= bio_list_size(&bio_list
);
2440 /* Every parity is right */
2441 rbio_orig_end_io(rbio
, 0);
2445 atomic_set(&rbio
->stripes_pending
, nr_data
);
2448 bio
= bio_list_pop(&bio_list
);
2452 bio
->bi_private
= rbio
;
2453 bio
->bi_end_io
= raid_write_end_io
;
2454 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
2461 rbio_orig_end_io(rbio
, -EIO
);
2464 static inline int is_data_stripe(struct btrfs_raid_bio
*rbio
, int stripe
)
2466 if (stripe
>= 0 && stripe
< rbio
->nr_data
)
2472 * While we're doing the parity check and repair, we could have errors
2473 * in reading pages off the disk. This checks for errors and if we're
2474 * not able to read the page it'll trigger parity reconstruction. The
2475 * parity scrub will be finished after we've reconstructed the failed
2478 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio
*rbio
)
2480 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2483 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
2484 int dfail
= 0, failp
= -1;
2486 if (is_data_stripe(rbio
, rbio
->faila
))
2488 else if (is_parity_stripe(rbio
->faila
))
2489 failp
= rbio
->faila
;
2491 if (is_data_stripe(rbio
, rbio
->failb
))
2493 else if (is_parity_stripe(rbio
->failb
))
2494 failp
= rbio
->failb
;
2497 * Because we can not use a scrubbing parity to repair
2498 * the data, so the capability of the repair is declined.
2499 * (In the case of RAID5, we can not repair anything)
2501 if (dfail
> rbio
->bbio
->max_errors
- 1)
2505 * If all data is good, only parity is correctly, just
2506 * repair the parity.
2509 finish_parity_scrub(rbio
, 0);
2514 * Here means we got one corrupted data stripe and one
2515 * corrupted parity on RAID6, if the corrupted parity
2516 * is scrubbing parity, luckily, use the other one to repair
2517 * the data, or we can not repair the data stripe.
2519 if (failp
!= rbio
->scrubp
)
2522 __raid_recover_end_io(rbio
);
2524 finish_parity_scrub(rbio
, 1);
2529 rbio_orig_end_io(rbio
, -EIO
);
2533 * end io for the read phase of the rmw cycle. All the bios here are physical
2534 * stripe bios we've read from the disk so we can recalculate the parity of the
2537 * This will usually kick off finish_rmw once all the bios are read in, but it
2538 * may trigger parity reconstruction if we had any errors along the way
2540 static void raid56_parity_scrub_end_io(struct bio
*bio
)
2542 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2545 fail_bio_stripe(rbio
, bio
);
2547 set_bio_pages_uptodate(bio
);
2551 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2555 * this will normally call finish_rmw to start our write
2556 * but if there are any failed stripes we'll reconstruct
2559 validate_rbio_for_parity_scrub(rbio
);
2562 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio
*rbio
)
2564 int bios_to_read
= 0;
2565 struct bio_list bio_list
;
2571 ret
= alloc_rbio_essential_pages(rbio
);
2575 bio_list_init(&bio_list
);
2577 atomic_set(&rbio
->error
, 0);
2579 * build a list of bios to read all the missing parts of this
2582 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2583 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2586 * we want to find all the pages missing from
2587 * the rbio and read them from the disk. If
2588 * page_in_rbio finds a page in the bio list
2589 * we don't need to read it off the stripe.
2591 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
2595 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2597 * the bio cache may have handed us an uptodate
2598 * page. If so, be happy and use it
2600 if (PageUptodate(page
))
2603 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2604 stripe
, pagenr
, rbio
->stripe_len
);
2610 bios_to_read
= bio_list_size(&bio_list
);
2611 if (!bios_to_read
) {
2613 * this can happen if others have merged with
2614 * us, it means there is nothing left to read.
2615 * But if there are missing devices it may not be
2616 * safe to do the full stripe write yet.
2622 * the bbio may be freed once we submit the last bio. Make sure
2623 * not to touch it after that
2625 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2627 bio
= bio_list_pop(&bio_list
);
2631 bio
->bi_private
= rbio
;
2632 bio
->bi_end_io
= raid56_parity_scrub_end_io
;
2633 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2635 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
2636 BTRFS_WQ_ENDIO_RAID56
);
2640 /* the actual write will happen once the reads are done */
2644 rbio_orig_end_io(rbio
, -EIO
);
2648 validate_rbio_for_parity_scrub(rbio
);
2651 static void scrub_parity_work(struct btrfs_work
*work
)
2653 struct btrfs_raid_bio
*rbio
;
2655 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2656 raid56_parity_scrub_stripe(rbio
);
2659 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
)
2661 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2662 scrub_parity_work
, NULL
, NULL
);
2664 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
2668 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio
*rbio
)
2670 if (!lock_stripe_add(rbio
))
2671 async_scrub_parity(rbio
);
2674 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2676 struct btrfs_raid_bio
*
2677 raid56_alloc_missing_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2678 struct btrfs_bio
*bbio
, u64 length
)
2680 struct btrfs_raid_bio
*rbio
;
2682 rbio
= alloc_rbio(root
, bbio
, length
);
2686 rbio
->operation
= BTRFS_RBIO_REBUILD_MISSING
;
2687 bio_list_add(&rbio
->bio_list
, bio
);
2689 * This is a special bio which is used to hold the completion handler
2690 * and make the scrub rbio is similar to the other types
2692 ASSERT(!bio
->bi_iter
.bi_size
);
2694 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2695 if (rbio
->faila
== -1) {
2704 static void missing_raid56_work(struct btrfs_work
*work
)
2706 struct btrfs_raid_bio
*rbio
;
2708 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2709 __raid56_parity_recover(rbio
);
2712 static void async_missing_raid56(struct btrfs_raid_bio
*rbio
)
2714 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2715 missing_raid56_work
, NULL
, NULL
);
2717 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2720 void raid56_submit_missing_rbio(struct btrfs_raid_bio
*rbio
)
2722 if (!lock_stripe_add(rbio
))
2723 async_missing_raid56(rbio
);