1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * raid1.c : Multiple Devices driver for Linux
5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
9 * RAID-1 management functions.
11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17 * bitmapped intelligence in resync:
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/ratelimit.h>
32 #include <linux/interval_tree_generic.h>
34 #include <trace/events/block.h>
38 #include "md-bitmap.h"
40 #define UNSUPPORTED_MDDEV_FLAGS \
41 ((1L << MD_HAS_JOURNAL) | \
42 (1L << MD_JOURNAL_CLEAN) | \
43 (1L << MD_HAS_PPL) | \
44 (1L << MD_HAS_MULTIPLE_PPLS))
46 static void allow_barrier(struct r1conf
*conf
, sector_t sector_nr
);
47 static void lower_barrier(struct r1conf
*conf
, sector_t sector_nr
);
49 #define RAID_1_10_NAME "raid1"
52 #define START(node) ((node)->start)
53 #define LAST(node) ((node)->last)
54 INTERVAL_TREE_DEFINE(struct serial_info
, node
, sector_t
, _subtree_last
,
55 START
, LAST
, static inline, raid1_rb
);
57 static int check_and_add_serial(struct md_rdev
*rdev
, struct r1bio
*r1_bio
,
58 struct serial_info
*si
, int idx
)
62 sector_t lo
= r1_bio
->sector
;
63 sector_t hi
= lo
+ r1_bio
->sectors
;
64 struct serial_in_rdev
*serial
= &rdev
->serial
[idx
];
66 spin_lock_irqsave(&serial
->serial_lock
, flags
);
67 /* collision happened */
68 if (raid1_rb_iter_first(&serial
->serial_rb
, lo
, hi
))
73 raid1_rb_insert(si
, &serial
->serial_rb
);
75 spin_unlock_irqrestore(&serial
->serial_lock
, flags
);
80 static void wait_for_serialization(struct md_rdev
*rdev
, struct r1bio
*r1_bio
)
82 struct mddev
*mddev
= rdev
->mddev
;
83 struct serial_info
*si
;
84 int idx
= sector_to_idx(r1_bio
->sector
);
85 struct serial_in_rdev
*serial
= &rdev
->serial
[idx
];
87 if (WARN_ON(!mddev
->serial_info_pool
))
89 si
= mempool_alloc(mddev
->serial_info_pool
, GFP_NOIO
);
90 wait_event(serial
->serial_io_wait
,
91 check_and_add_serial(rdev
, r1_bio
, si
, idx
) == 0);
94 static void remove_serial(struct md_rdev
*rdev
, sector_t lo
, sector_t hi
)
96 struct serial_info
*si
;
99 struct mddev
*mddev
= rdev
->mddev
;
100 int idx
= sector_to_idx(lo
);
101 struct serial_in_rdev
*serial
= &rdev
->serial
[idx
];
103 spin_lock_irqsave(&serial
->serial_lock
, flags
);
104 for (si
= raid1_rb_iter_first(&serial
->serial_rb
, lo
, hi
);
105 si
; si
= raid1_rb_iter_next(si
, lo
, hi
)) {
106 if (si
->start
== lo
&& si
->last
== hi
) {
107 raid1_rb_remove(si
, &serial
->serial_rb
);
108 mempool_free(si
, mddev
->serial_info_pool
);
114 WARN(1, "The write IO is not recorded for serialization\n");
115 spin_unlock_irqrestore(&serial
->serial_lock
, flags
);
116 wake_up(&serial
->serial_io_wait
);
120 * for resync bio, r1bio pointer can be retrieved from the per-bio
121 * 'struct resync_pages'.
123 static inline struct r1bio
*get_resync_r1bio(struct bio
*bio
)
125 return get_resync_pages(bio
)->raid_bio
;
128 static void * r1bio_pool_alloc(gfp_t gfp_flags
, void *data
)
130 struct pool_info
*pi
= data
;
131 int size
= offsetof(struct r1bio
, bios
[pi
->raid_disks
]);
133 /* allocate a r1bio with room for raid_disks entries in the bios array */
134 return kzalloc(size
, gfp_flags
);
137 #define RESYNC_DEPTH 32
138 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
139 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
140 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
141 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
142 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
144 static void * r1buf_pool_alloc(gfp_t gfp_flags
, void *data
)
146 struct pool_info
*pi
= data
;
147 struct r1bio
*r1_bio
;
151 struct resync_pages
*rps
;
153 r1_bio
= r1bio_pool_alloc(gfp_flags
, pi
);
157 rps
= kmalloc_array(pi
->raid_disks
, sizeof(struct resync_pages
),
163 * Allocate bios : 1 for reading, n-1 for writing
165 for (j
= pi
->raid_disks
; j
-- ; ) {
166 bio
= bio_kmalloc(RESYNC_PAGES
, gfp_flags
);
169 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, RESYNC_PAGES
, 0);
170 r1_bio
->bios
[j
] = bio
;
173 * Allocate RESYNC_PAGES data pages and attach them to
175 * If this is a user-requested check/repair, allocate
176 * RESYNC_PAGES for each bio.
178 if (test_bit(MD_RECOVERY_REQUESTED
, &pi
->mddev
->recovery
))
179 need_pages
= pi
->raid_disks
;
182 for (j
= 0; j
< pi
->raid_disks
; j
++) {
183 struct resync_pages
*rp
= &rps
[j
];
185 bio
= r1_bio
->bios
[j
];
187 if (j
< need_pages
) {
188 if (resync_alloc_pages(rp
, gfp_flags
))
191 memcpy(rp
, &rps
[0], sizeof(*rp
));
192 resync_get_all_pages(rp
);
195 rp
->raid_bio
= r1_bio
;
196 bio
->bi_private
= rp
;
199 r1_bio
->master_bio
= NULL
;
205 resync_free_pages(&rps
[j
]);
208 while (++j
< pi
->raid_disks
) {
209 bio_uninit(r1_bio
->bios
[j
]);
210 kfree(r1_bio
->bios
[j
]);
215 rbio_pool_free(r1_bio
, data
);
219 static void r1buf_pool_free(void *__r1_bio
, void *data
)
221 struct pool_info
*pi
= data
;
223 struct r1bio
*r1bio
= __r1_bio
;
224 struct resync_pages
*rp
= NULL
;
226 for (i
= pi
->raid_disks
; i
--; ) {
227 rp
= get_resync_pages(r1bio
->bios
[i
]);
228 resync_free_pages(rp
);
229 bio_uninit(r1bio
->bios
[i
]);
230 kfree(r1bio
->bios
[i
]);
233 /* resync pages array stored in the 1st bio's .bi_private */
236 rbio_pool_free(r1bio
, data
);
239 static void put_all_bios(struct r1conf
*conf
, struct r1bio
*r1_bio
)
243 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
244 struct bio
**bio
= r1_bio
->bios
+ i
;
245 if (!BIO_SPECIAL(*bio
))
251 static void free_r1bio(struct r1bio
*r1_bio
)
253 struct r1conf
*conf
= r1_bio
->mddev
->private;
255 put_all_bios(conf
, r1_bio
);
256 mempool_free(r1_bio
, &conf
->r1bio_pool
);
259 static void put_buf(struct r1bio
*r1_bio
)
261 struct r1conf
*conf
= r1_bio
->mddev
->private;
262 sector_t sect
= r1_bio
->sector
;
265 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
266 struct bio
*bio
= r1_bio
->bios
[i
];
268 rdev_dec_pending(conf
->mirrors
[i
].rdev
, r1_bio
->mddev
);
271 mempool_free(r1_bio
, &conf
->r1buf_pool
);
273 lower_barrier(conf
, sect
);
276 static void reschedule_retry(struct r1bio
*r1_bio
)
279 struct mddev
*mddev
= r1_bio
->mddev
;
280 struct r1conf
*conf
= mddev
->private;
283 idx
= sector_to_idx(r1_bio
->sector
);
284 spin_lock_irqsave(&conf
->device_lock
, flags
);
285 list_add(&r1_bio
->retry_list
, &conf
->retry_list
);
286 atomic_inc(&conf
->nr_queued
[idx
]);
287 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
289 wake_up(&conf
->wait_barrier
);
290 md_wakeup_thread(mddev
->thread
);
294 * raid_end_bio_io() is called when we have finished servicing a mirrored
295 * operation and are ready to return a success/failure code to the buffer
298 static void call_bio_endio(struct r1bio
*r1_bio
)
300 struct bio
*bio
= r1_bio
->master_bio
;
302 if (!test_bit(R1BIO_Uptodate
, &r1_bio
->state
))
303 bio
->bi_status
= BLK_STS_IOERR
;
308 static void raid_end_bio_io(struct r1bio
*r1_bio
)
310 struct bio
*bio
= r1_bio
->master_bio
;
311 struct r1conf
*conf
= r1_bio
->mddev
->private;
312 sector_t sector
= r1_bio
->sector
;
314 /* if nobody has done the final endio yet, do it now */
315 if (!test_and_set_bit(R1BIO_Returned
, &r1_bio
->state
)) {
316 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
317 (bio_data_dir(bio
) == WRITE
) ? "write" : "read",
318 (unsigned long long) bio
->bi_iter
.bi_sector
,
319 (unsigned long long) bio_end_sector(bio
) - 1);
321 call_bio_endio(r1_bio
);
326 * Wake up any possible resync thread that waits for the device
327 * to go idle. All I/Os, even write-behind writes, are done.
329 allow_barrier(conf
, sector
);
333 * Update disk head position estimator based on IRQ completion info.
335 static inline void update_head_pos(int disk
, struct r1bio
*r1_bio
)
337 struct r1conf
*conf
= r1_bio
->mddev
->private;
339 conf
->mirrors
[disk
].head_position
=
340 r1_bio
->sector
+ (r1_bio
->sectors
);
344 * Find the disk number which triggered given bio
346 static int find_bio_disk(struct r1bio
*r1_bio
, struct bio
*bio
)
349 struct r1conf
*conf
= r1_bio
->mddev
->private;
350 int raid_disks
= conf
->raid_disks
;
352 for (mirror
= 0; mirror
< raid_disks
* 2; mirror
++)
353 if (r1_bio
->bios
[mirror
] == bio
)
356 BUG_ON(mirror
== raid_disks
* 2);
357 update_head_pos(mirror
, r1_bio
);
362 static void raid1_end_read_request(struct bio
*bio
)
364 int uptodate
= !bio
->bi_status
;
365 struct r1bio
*r1_bio
= bio
->bi_private
;
366 struct r1conf
*conf
= r1_bio
->mddev
->private;
367 struct md_rdev
*rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
370 * this branch is our 'one mirror IO has finished' event handler:
372 update_head_pos(r1_bio
->read_disk
, r1_bio
);
375 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
376 else if (test_bit(FailFast
, &rdev
->flags
) &&
377 test_bit(R1BIO_FailFast
, &r1_bio
->state
))
378 /* This was a fail-fast read so we definitely
382 /* If all other devices have failed, we want to return
383 * the error upwards rather than fail the last device.
384 * Here we redefine "uptodate" to mean "Don't want to retry"
387 spin_lock_irqsave(&conf
->device_lock
, flags
);
388 if (r1_bio
->mddev
->degraded
== conf
->raid_disks
||
389 (r1_bio
->mddev
->degraded
== conf
->raid_disks
-1 &&
390 test_bit(In_sync
, &rdev
->flags
)))
392 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
396 raid_end_bio_io(r1_bio
);
397 rdev_dec_pending(rdev
, conf
->mddev
);
402 pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
405 (unsigned long long)r1_bio
->sector
);
406 set_bit(R1BIO_ReadError
, &r1_bio
->state
);
407 reschedule_retry(r1_bio
);
408 /* don't drop the reference on read_disk yet */
412 static void close_write(struct r1bio
*r1_bio
)
414 struct mddev
*mddev
= r1_bio
->mddev
;
416 /* it really is the end of this request */
417 if (test_bit(R1BIO_BehindIO
, &r1_bio
->state
)) {
418 bio_free_pages(r1_bio
->behind_master_bio
);
419 bio_put(r1_bio
->behind_master_bio
);
420 r1_bio
->behind_master_bio
= NULL
;
423 /* clear the bitmap if all writes complete successfully */
424 mddev
->bitmap_ops
->endwrite(mddev
, r1_bio
->sector
, r1_bio
->sectors
,
425 !test_bit(R1BIO_Degraded
, &r1_bio
->state
),
426 test_bit(R1BIO_BehindIO
, &r1_bio
->state
));
430 static void r1_bio_write_done(struct r1bio
*r1_bio
)
432 if (!atomic_dec_and_test(&r1_bio
->remaining
))
435 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
436 reschedule_retry(r1_bio
);
439 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
))
440 reschedule_retry(r1_bio
);
442 raid_end_bio_io(r1_bio
);
446 static void raid1_end_write_request(struct bio
*bio
)
448 struct r1bio
*r1_bio
= bio
->bi_private
;
449 int behind
= test_bit(R1BIO_BehindIO
, &r1_bio
->state
);
450 struct r1conf
*conf
= r1_bio
->mddev
->private;
451 struct bio
*to_put
= NULL
;
452 int mirror
= find_bio_disk(r1_bio
, bio
);
453 struct md_rdev
*rdev
= conf
->mirrors
[mirror
].rdev
;
455 sector_t lo
= r1_bio
->sector
;
456 sector_t hi
= r1_bio
->sector
+ r1_bio
->sectors
;
458 discard_error
= bio
->bi_status
&& bio_op(bio
) == REQ_OP_DISCARD
;
461 * 'one mirror IO has finished' event handler:
463 if (bio
->bi_status
&& !discard_error
) {
464 set_bit(WriteErrorSeen
, &rdev
->flags
);
465 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
466 set_bit(MD_RECOVERY_NEEDED
, &
467 conf
->mddev
->recovery
);
469 if (test_bit(FailFast
, &rdev
->flags
) &&
470 (bio
->bi_opf
& MD_FAILFAST
) &&
471 /* We never try FailFast to WriteMostly devices */
472 !test_bit(WriteMostly
, &rdev
->flags
)) {
473 md_error(r1_bio
->mddev
, rdev
);
477 * When the device is faulty, it is not necessary to
478 * handle write error.
480 if (!test_bit(Faulty
, &rdev
->flags
))
481 set_bit(R1BIO_WriteError
, &r1_bio
->state
);
483 /* Fail the request */
484 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
485 /* Finished with this branch */
486 r1_bio
->bios
[mirror
] = NULL
;
491 * Set R1BIO_Uptodate in our master bio, so that we
492 * will return a good error code for to the higher
493 * levels even if IO on some other mirrored buffer
496 * The 'master' represents the composite IO operation
497 * to user-side. So if something waits for IO, then it
498 * will wait for the 'master' bio.
500 r1_bio
->bios
[mirror
] = NULL
;
503 * Do not set R1BIO_Uptodate if the current device is
504 * rebuilding or Faulty. This is because we cannot use
505 * such device for properly reading the data back (we could
506 * potentially use it, if the current write would have felt
507 * before rdev->recovery_offset, but for simplicity we don't
510 if (test_bit(In_sync
, &rdev
->flags
) &&
511 !test_bit(Faulty
, &rdev
->flags
))
512 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
514 /* Maybe we can clear some bad blocks. */
515 if (rdev_has_badblock(rdev
, r1_bio
->sector
, r1_bio
->sectors
) &&
517 r1_bio
->bios
[mirror
] = IO_MADE_GOOD
;
518 set_bit(R1BIO_MadeGood
, &r1_bio
->state
);
523 if (test_bit(CollisionCheck
, &rdev
->flags
))
524 remove_serial(rdev
, lo
, hi
);
525 if (test_bit(WriteMostly
, &rdev
->flags
))
526 atomic_dec(&r1_bio
->behind_remaining
);
529 * In behind mode, we ACK the master bio once the I/O
530 * has safely reached all non-writemostly
531 * disks. Setting the Returned bit ensures that this
532 * gets done only once -- we don't ever want to return
533 * -EIO here, instead we'll wait
535 if (atomic_read(&r1_bio
->behind_remaining
) >= (atomic_read(&r1_bio
->remaining
)-1) &&
536 test_bit(R1BIO_Uptodate
, &r1_bio
->state
)) {
537 /* Maybe we can return now */
538 if (!test_and_set_bit(R1BIO_Returned
, &r1_bio
->state
)) {
539 struct bio
*mbio
= r1_bio
->master_bio
;
540 pr_debug("raid1: behind end write sectors"
542 (unsigned long long) mbio
->bi_iter
.bi_sector
,
543 (unsigned long long) bio_end_sector(mbio
) - 1);
544 call_bio_endio(r1_bio
);
547 } else if (rdev
->mddev
->serialize_policy
)
548 remove_serial(rdev
, lo
, hi
);
549 if (r1_bio
->bios
[mirror
] == NULL
)
550 rdev_dec_pending(rdev
, conf
->mddev
);
553 * Let's see if all mirrored write operations have finished
556 r1_bio_write_done(r1_bio
);
562 static sector_t
align_to_barrier_unit_end(sector_t start_sector
,
567 WARN_ON(sectors
== 0);
569 * len is the number of sectors from start_sector to end of the
570 * barrier unit which start_sector belongs to.
572 len
= round_up(start_sector
+ 1, BARRIER_UNIT_SECTOR_SIZE
) -
581 static void update_read_sectors(struct r1conf
*conf
, int disk
,
582 sector_t this_sector
, int len
)
584 struct raid1_info
*info
= &conf
->mirrors
[disk
];
586 atomic_inc(&info
->rdev
->nr_pending
);
587 if (info
->next_seq_sect
!= this_sector
)
588 info
->seq_start
= this_sector
;
589 info
->next_seq_sect
= this_sector
+ len
;
592 static int choose_first_rdev(struct r1conf
*conf
, struct r1bio
*r1_bio
,
595 sector_t this_sector
= r1_bio
->sector
;
596 int len
= r1_bio
->sectors
;
599 for (disk
= 0 ; disk
< conf
->raid_disks
* 2 ; disk
++) {
600 struct md_rdev
*rdev
;
603 if (r1_bio
->bios
[disk
] == IO_BLOCKED
)
606 rdev
= conf
->mirrors
[disk
].rdev
;
607 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
610 /* choose the first disk even if it has some bad blocks. */
611 read_len
= raid1_check_read_range(rdev
, this_sector
, &len
);
613 update_read_sectors(conf
, disk
, this_sector
, read_len
);
614 *max_sectors
= read_len
;
622 static bool rdev_in_recovery(struct md_rdev
*rdev
, struct r1bio
*r1_bio
)
624 return !test_bit(In_sync
, &rdev
->flags
) &&
625 rdev
->recovery_offset
< r1_bio
->sector
+ r1_bio
->sectors
;
628 static int choose_bb_rdev(struct r1conf
*conf
, struct r1bio
*r1_bio
,
631 sector_t this_sector
= r1_bio
->sector
;
636 for (disk
= 0 ; disk
< conf
->raid_disks
* 2 ; disk
++) {
637 struct md_rdev
*rdev
;
641 if (r1_bio
->bios
[disk
] == IO_BLOCKED
)
644 rdev
= conf
->mirrors
[disk
].rdev
;
645 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
) ||
646 rdev_in_recovery(rdev
, r1_bio
) ||
647 test_bit(WriteMostly
, &rdev
->flags
))
650 /* keep track of the disk with the most readable sectors. */
651 len
= r1_bio
->sectors
;
652 read_len
= raid1_check_read_range(rdev
, this_sector
, &len
);
653 if (read_len
> best_len
) {
659 if (best_disk
!= -1) {
660 *max_sectors
= best_len
;
661 update_read_sectors(conf
, best_disk
, this_sector
, best_len
);
667 static int choose_slow_rdev(struct r1conf
*conf
, struct r1bio
*r1_bio
,
670 sector_t this_sector
= r1_bio
->sector
;
675 for (disk
= 0 ; disk
< conf
->raid_disks
* 2 ; disk
++) {
676 struct md_rdev
*rdev
;
680 if (r1_bio
->bios
[disk
] == IO_BLOCKED
)
683 rdev
= conf
->mirrors
[disk
].rdev
;
684 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
) ||
685 !test_bit(WriteMostly
, &rdev
->flags
) ||
686 rdev_in_recovery(rdev
, r1_bio
))
689 /* there are no bad blocks, we can use this disk */
690 len
= r1_bio
->sectors
;
691 read_len
= raid1_check_read_range(rdev
, this_sector
, &len
);
692 if (read_len
== r1_bio
->sectors
) {
693 *max_sectors
= read_len
;
694 update_read_sectors(conf
, disk
, this_sector
, read_len
);
699 * there are partial bad blocks, choose the rdev with largest
702 if (read_len
> bb_read_len
) {
704 bb_read_len
= read_len
;
709 *max_sectors
= bb_read_len
;
710 update_read_sectors(conf
, bb_disk
, this_sector
, bb_read_len
);
716 static bool is_sequential(struct r1conf
*conf
, int disk
, struct r1bio
*r1_bio
)
718 /* TODO: address issues with this check and concurrency. */
719 return conf
->mirrors
[disk
].next_seq_sect
== r1_bio
->sector
||
720 conf
->mirrors
[disk
].head_position
== r1_bio
->sector
;
724 * If buffered sequential IO size exceeds optimal iosize, check if there is idle
725 * disk. If yes, choose the idle disk.
727 static bool should_choose_next(struct r1conf
*conf
, int disk
)
729 struct raid1_info
*mirror
= &conf
->mirrors
[disk
];
732 if (!test_bit(Nonrot
, &mirror
->rdev
->flags
))
735 opt_iosize
= bdev_io_opt(mirror
->rdev
->bdev
) >> 9;
736 return opt_iosize
> 0 && mirror
->seq_start
!= MaxSector
&&
737 mirror
->next_seq_sect
> opt_iosize
&&
738 mirror
->next_seq_sect
- opt_iosize
>= mirror
->seq_start
;
741 static bool rdev_readable(struct md_rdev
*rdev
, struct r1bio
*r1_bio
)
743 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
746 if (rdev_in_recovery(rdev
, r1_bio
))
749 /* don't read from slow disk unless have to */
750 if (test_bit(WriteMostly
, &rdev
->flags
))
753 /* don't split IO for bad blocks unless have to */
754 if (rdev_has_badblock(rdev
, r1_bio
->sector
, r1_bio
->sectors
))
760 struct read_balance_ctl
{
761 sector_t closest_dist
;
762 int closest_dist_disk
;
764 int min_pending_disk
;
769 static int choose_best_rdev(struct r1conf
*conf
, struct r1bio
*r1_bio
)
772 struct read_balance_ctl ctl
= {
773 .closest_dist_disk
= -1,
774 .closest_dist
= MaxSector
,
775 .min_pending_disk
= -1,
776 .min_pending
= UINT_MAX
,
777 .sequential_disk
= -1,
780 for (disk
= 0 ; disk
< conf
->raid_disks
* 2 ; disk
++) {
781 struct md_rdev
*rdev
;
783 unsigned int pending
;
785 if (r1_bio
->bios
[disk
] == IO_BLOCKED
)
788 rdev
= conf
->mirrors
[disk
].rdev
;
789 if (!rdev_readable(rdev
, r1_bio
))
792 /* At least two disks to choose from so failfast is OK */
793 if (ctl
.readable_disks
++ == 1)
794 set_bit(R1BIO_FailFast
, &r1_bio
->state
);
796 pending
= atomic_read(&rdev
->nr_pending
);
797 dist
= abs(r1_bio
->sector
- conf
->mirrors
[disk
].head_position
);
799 /* Don't change to another disk for sequential reads */
800 if (is_sequential(conf
, disk
, r1_bio
)) {
801 if (!should_choose_next(conf
, disk
))
805 * Add 'pending' to avoid choosing this disk if
806 * there is other idle disk.
810 * If there is no other idle disk, this disk
813 ctl
.sequential_disk
= disk
;
816 if (ctl
.min_pending
> pending
) {
817 ctl
.min_pending
= pending
;
818 ctl
.min_pending_disk
= disk
;
821 if (ctl
.closest_dist
> dist
) {
822 ctl
.closest_dist
= dist
;
823 ctl
.closest_dist_disk
= disk
;
828 * sequential IO size exceeds optimal iosize, however, there is no other
829 * idle disk, so choose the sequential disk.
831 if (ctl
.sequential_disk
!= -1 && ctl
.min_pending
!= 0)
832 return ctl
.sequential_disk
;
835 * If all disks are rotational, choose the closest disk. If any disk is
836 * non-rotational, choose the disk with less pending request even the
837 * disk is rotational, which might/might not be optimal for raids with
838 * mixed ratation/non-rotational disks depending on workload.
840 if (ctl
.min_pending_disk
!= -1 &&
841 (READ_ONCE(conf
->nonrot_disks
) || ctl
.min_pending
== 0))
842 return ctl
.min_pending_disk
;
844 return ctl
.closest_dist_disk
;
848 * This routine returns the disk from which the requested read should be done.
850 * 1) If resync is in progress, find the first usable disk and use it even if it
851 * has some bad blocks.
853 * 2) Now that there is no resync, loop through all disks and skipping slow
854 * disks and disks with bad blocks for now. Only pay attention to key disk
857 * 3) If we've made it this far, now look for disks with bad blocks and choose
858 * the one with most number of sectors.
860 * 4) If we are all the way at the end, we have no choice but to use a disk even
861 * if it is write mostly.
863 * The rdev for the device selected will have nr_pending incremented.
865 static int read_balance(struct r1conf
*conf
, struct r1bio
*r1_bio
,
870 clear_bit(R1BIO_FailFast
, &r1_bio
->state
);
872 if (raid1_should_read_first(conf
->mddev
, r1_bio
->sector
,
874 return choose_first_rdev(conf
, r1_bio
, max_sectors
);
876 disk
= choose_best_rdev(conf
, r1_bio
);
878 *max_sectors
= r1_bio
->sectors
;
879 update_read_sectors(conf
, disk
, r1_bio
->sector
,
885 * If we are here it means we didn't find a perfectly good disk so
886 * now spend a bit more time trying to find one with the most good
889 disk
= choose_bb_rdev(conf
, r1_bio
, max_sectors
);
893 return choose_slow_rdev(conf
, r1_bio
, max_sectors
);
896 static void wake_up_barrier(struct r1conf
*conf
)
898 if (wq_has_sleeper(&conf
->wait_barrier
))
899 wake_up(&conf
->wait_barrier
);
902 static void flush_bio_list(struct r1conf
*conf
, struct bio
*bio
)
904 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
905 raid1_prepare_flush_writes(conf
->mddev
);
906 wake_up_barrier(conf
);
908 while (bio
) { /* submit pending writes */
909 struct bio
*next
= bio
->bi_next
;
911 raid1_submit_write(bio
);
917 static void flush_pending_writes(struct r1conf
*conf
)
919 /* Any writes that have been queued but are awaiting
920 * bitmap updates get flushed here.
922 spin_lock_irq(&conf
->device_lock
);
924 if (conf
->pending_bio_list
.head
) {
925 struct blk_plug plug
;
928 bio
= bio_list_get(&conf
->pending_bio_list
);
929 spin_unlock_irq(&conf
->device_lock
);
932 * As this is called in a wait_event() loop (see freeze_array),
933 * current->state might be TASK_UNINTERRUPTIBLE which will
934 * cause a warning when we prepare to wait again. As it is
935 * rare that this path is taken, it is perfectly safe to force
936 * us to go around the wait_event() loop again, so the warning
937 * is a false-positive. Silence the warning by resetting
940 __set_current_state(TASK_RUNNING
);
941 blk_start_plug(&plug
);
942 flush_bio_list(conf
, bio
);
943 blk_finish_plug(&plug
);
945 spin_unlock_irq(&conf
->device_lock
);
949 * Sometimes we need to suspend IO while we do something else,
950 * either some resync/recovery, or reconfigure the array.
951 * To do this we raise a 'barrier'.
952 * The 'barrier' is a counter that can be raised multiple times
953 * to count how many activities are happening which preclude
955 * We can only raise the barrier if there is no pending IO.
956 * i.e. if nr_pending == 0.
957 * We choose only to raise the barrier if no-one is waiting for the
958 * barrier to go down. This means that as soon as an IO request
959 * is ready, no other operations which require a barrier will start
960 * until the IO request has had a chance.
962 * So: regular IO calls 'wait_barrier'. When that returns there
963 * is no backgroup IO happening, It must arrange to call
964 * allow_barrier when it has finished its IO.
965 * backgroup IO calls must call raise_barrier. Once that returns
966 * there is no normal IO happeing. It must arrange to call
967 * lower_barrier when the particular background IO completes.
969 * If resync/recovery is interrupted, returns -EINTR;
970 * Otherwise, returns 0.
972 static int raise_barrier(struct r1conf
*conf
, sector_t sector_nr
)
974 int idx
= sector_to_idx(sector_nr
);
976 spin_lock_irq(&conf
->resync_lock
);
978 /* Wait until no block IO is waiting */
979 wait_event_lock_irq(conf
->wait_barrier
,
980 !atomic_read(&conf
->nr_waiting
[idx
]),
983 /* block any new IO from starting */
984 atomic_inc(&conf
->barrier
[idx
]);
986 * In raise_barrier() we firstly increase conf->barrier[idx] then
987 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
988 * increase conf->nr_pending[idx] then check conf->barrier[idx].
989 * A memory barrier here to make sure conf->nr_pending[idx] won't
990 * be fetched before conf->barrier[idx] is increased. Otherwise
991 * there will be a race between raise_barrier() and _wait_barrier().
993 smp_mb__after_atomic();
995 /* For these conditions we must wait:
996 * A: while the array is in frozen state
997 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
998 * existing in corresponding I/O barrier bucket.
999 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
1000 * max resync count which allowed on current I/O barrier bucket.
1002 wait_event_lock_irq(conf
->wait_barrier
,
1003 (!conf
->array_frozen
&&
1004 !atomic_read(&conf
->nr_pending
[idx
]) &&
1005 atomic_read(&conf
->barrier
[idx
]) < RESYNC_DEPTH
) ||
1006 test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
),
1009 if (test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
1010 atomic_dec(&conf
->barrier
[idx
]);
1011 spin_unlock_irq(&conf
->resync_lock
);
1012 wake_up(&conf
->wait_barrier
);
1016 atomic_inc(&conf
->nr_sync_pending
);
1017 spin_unlock_irq(&conf
->resync_lock
);
1022 static void lower_barrier(struct r1conf
*conf
, sector_t sector_nr
)
1024 int idx
= sector_to_idx(sector_nr
);
1026 BUG_ON(atomic_read(&conf
->barrier
[idx
]) <= 0);
1028 atomic_dec(&conf
->barrier
[idx
]);
1029 atomic_dec(&conf
->nr_sync_pending
);
1030 wake_up(&conf
->wait_barrier
);
1033 static bool _wait_barrier(struct r1conf
*conf
, int idx
, bool nowait
)
1038 * We need to increase conf->nr_pending[idx] very early here,
1039 * then raise_barrier() can be blocked when it waits for
1040 * conf->nr_pending[idx] to be 0. Then we can avoid holding
1041 * conf->resync_lock when there is no barrier raised in same
1042 * barrier unit bucket. Also if the array is frozen, I/O
1043 * should be blocked until array is unfrozen.
1045 atomic_inc(&conf
->nr_pending
[idx
]);
1047 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
1048 * check conf->barrier[idx]. In raise_barrier() we firstly increase
1049 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
1050 * barrier is necessary here to make sure conf->barrier[idx] won't be
1051 * fetched before conf->nr_pending[idx] is increased. Otherwise there
1052 * will be a race between _wait_barrier() and raise_barrier().
1054 smp_mb__after_atomic();
1057 * Don't worry about checking two atomic_t variables at same time
1058 * here. If during we check conf->barrier[idx], the array is
1059 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
1060 * 0, it is safe to return and make the I/O continue. Because the
1061 * array is frozen, all I/O returned here will eventually complete
1062 * or be queued, no race will happen. See code comment in
1065 if (!READ_ONCE(conf
->array_frozen
) &&
1066 !atomic_read(&conf
->barrier
[idx
]))
1070 * After holding conf->resync_lock, conf->nr_pending[idx]
1071 * should be decreased before waiting for barrier to drop.
1072 * Otherwise, we may encounter a race condition because
1073 * raise_barrer() might be waiting for conf->nr_pending[idx]
1074 * to be 0 at same time.
1076 spin_lock_irq(&conf
->resync_lock
);
1077 atomic_inc(&conf
->nr_waiting
[idx
]);
1078 atomic_dec(&conf
->nr_pending
[idx
]);
1080 * In case freeze_array() is waiting for
1081 * get_unqueued_pending() == extra
1083 wake_up_barrier(conf
);
1084 /* Wait for the barrier in same barrier unit bucket to drop. */
1086 /* Return false when nowait flag is set */
1090 wait_event_lock_irq(conf
->wait_barrier
,
1091 !conf
->array_frozen
&&
1092 !atomic_read(&conf
->barrier
[idx
]),
1094 atomic_inc(&conf
->nr_pending
[idx
]);
1097 atomic_dec(&conf
->nr_waiting
[idx
]);
1098 spin_unlock_irq(&conf
->resync_lock
);
1102 static bool wait_read_barrier(struct r1conf
*conf
, sector_t sector_nr
, bool nowait
)
1104 int idx
= sector_to_idx(sector_nr
);
1108 * Very similar to _wait_barrier(). The difference is, for read
1109 * I/O we don't need wait for sync I/O, but if the whole array
1110 * is frozen, the read I/O still has to wait until the array is
1111 * unfrozen. Since there is no ordering requirement with
1112 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1114 atomic_inc(&conf
->nr_pending
[idx
]);
1116 if (!READ_ONCE(conf
->array_frozen
))
1119 spin_lock_irq(&conf
->resync_lock
);
1120 atomic_inc(&conf
->nr_waiting
[idx
]);
1121 atomic_dec(&conf
->nr_pending
[idx
]);
1123 * In case freeze_array() is waiting for
1124 * get_unqueued_pending() == extra
1126 wake_up_barrier(conf
);
1127 /* Wait for array to be unfrozen */
1129 /* Return false when nowait flag is set */
1131 /* Return false when nowait flag is set */
1134 wait_event_lock_irq(conf
->wait_barrier
,
1135 !conf
->array_frozen
,
1137 atomic_inc(&conf
->nr_pending
[idx
]);
1140 atomic_dec(&conf
->nr_waiting
[idx
]);
1141 spin_unlock_irq(&conf
->resync_lock
);
1145 static bool wait_barrier(struct r1conf
*conf
, sector_t sector_nr
, bool nowait
)
1147 int idx
= sector_to_idx(sector_nr
);
1149 return _wait_barrier(conf
, idx
, nowait
);
1152 static void _allow_barrier(struct r1conf
*conf
, int idx
)
1154 atomic_dec(&conf
->nr_pending
[idx
]);
1155 wake_up_barrier(conf
);
1158 static void allow_barrier(struct r1conf
*conf
, sector_t sector_nr
)
1160 int idx
= sector_to_idx(sector_nr
);
1162 _allow_barrier(conf
, idx
);
1165 /* conf->resync_lock should be held */
1166 static int get_unqueued_pending(struct r1conf
*conf
)
1170 ret
= atomic_read(&conf
->nr_sync_pending
);
1171 for (idx
= 0; idx
< BARRIER_BUCKETS_NR
; idx
++)
1172 ret
+= atomic_read(&conf
->nr_pending
[idx
]) -
1173 atomic_read(&conf
->nr_queued
[idx
]);
1178 static void freeze_array(struct r1conf
*conf
, int extra
)
1180 /* Stop sync I/O and normal I/O and wait for everything to
1182 * This is called in two situations:
1183 * 1) management command handlers (reshape, remove disk, quiesce).
1184 * 2) one normal I/O request failed.
1186 * After array_frozen is set to 1, new sync IO will be blocked at
1187 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1188 * or wait_read_barrier(). The flying I/Os will either complete or be
1189 * queued. When everything goes quite, there are only queued I/Os left.
1191 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1192 * barrier bucket index which this I/O request hits. When all sync and
1193 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1194 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1195 * in handle_read_error(), we may call freeze_array() before trying to
1196 * fix the read error. In this case, the error read I/O is not queued,
1197 * so get_unqueued_pending() == 1.
1199 * Therefore before this function returns, we need to wait until
1200 * get_unqueued_pendings(conf) gets equal to extra. For
1201 * normal I/O context, extra is 1, in rested situations extra is 0.
1203 spin_lock_irq(&conf
->resync_lock
);
1204 conf
->array_frozen
= 1;
1205 mddev_add_trace_msg(conf
->mddev
, "raid1 wait freeze");
1206 wait_event_lock_irq_cmd(
1208 get_unqueued_pending(conf
) == extra
,
1210 flush_pending_writes(conf
));
1211 spin_unlock_irq(&conf
->resync_lock
);
1213 static void unfreeze_array(struct r1conf
*conf
)
1215 /* reverse the effect of the freeze */
1216 spin_lock_irq(&conf
->resync_lock
);
1217 conf
->array_frozen
= 0;
1218 spin_unlock_irq(&conf
->resync_lock
);
1219 wake_up(&conf
->wait_barrier
);
1222 static void alloc_behind_master_bio(struct r1bio
*r1_bio
,
1225 int size
= bio
->bi_iter
.bi_size
;
1226 unsigned vcnt
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1228 struct bio
*behind_bio
= NULL
;
1230 behind_bio
= bio_alloc_bioset(NULL
, vcnt
, 0, GFP_NOIO
,
1231 &r1_bio
->mddev
->bio_set
);
1233 /* discard op, we don't support writezero/writesame yet */
1234 if (!bio_has_data(bio
)) {
1235 behind_bio
->bi_iter
.bi_size
= size
;
1239 while (i
< vcnt
&& size
) {
1241 int len
= min_t(int, PAGE_SIZE
, size
);
1243 page
= alloc_page(GFP_NOIO
);
1244 if (unlikely(!page
))
1247 if (!bio_add_page(behind_bio
, page
, len
, 0)) {
1256 bio_copy_data(behind_bio
, bio
);
1258 r1_bio
->behind_master_bio
= behind_bio
;
1259 set_bit(R1BIO_BehindIO
, &r1_bio
->state
);
1264 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1265 bio
->bi_iter
.bi_size
);
1266 bio_free_pages(behind_bio
);
1267 bio_put(behind_bio
);
1270 static void raid1_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1272 struct raid1_plug_cb
*plug
= container_of(cb
, struct raid1_plug_cb
,
1274 struct mddev
*mddev
= plug
->cb
.data
;
1275 struct r1conf
*conf
= mddev
->private;
1278 if (from_schedule
) {
1279 spin_lock_irq(&conf
->device_lock
);
1280 bio_list_merge(&conf
->pending_bio_list
, &plug
->pending
);
1281 spin_unlock_irq(&conf
->device_lock
);
1282 wake_up_barrier(conf
);
1283 md_wakeup_thread(mddev
->thread
);
1288 /* we aren't scheduling, so we can do the write-out directly. */
1289 bio
= bio_list_get(&plug
->pending
);
1290 flush_bio_list(conf
, bio
);
1294 static void init_r1bio(struct r1bio
*r1_bio
, struct mddev
*mddev
, struct bio
*bio
)
1296 r1_bio
->master_bio
= bio
;
1297 r1_bio
->sectors
= bio_sectors(bio
);
1299 r1_bio
->mddev
= mddev
;
1300 r1_bio
->sector
= bio
->bi_iter
.bi_sector
;
1303 static inline struct r1bio
*
1304 alloc_r1bio(struct mddev
*mddev
, struct bio
*bio
)
1306 struct r1conf
*conf
= mddev
->private;
1307 struct r1bio
*r1_bio
;
1309 r1_bio
= mempool_alloc(&conf
->r1bio_pool
, GFP_NOIO
);
1310 /* Ensure no bio records IO_BLOCKED */
1311 memset(r1_bio
->bios
, 0, conf
->raid_disks
* sizeof(r1_bio
->bios
[0]));
1312 init_r1bio(r1_bio
, mddev
, bio
);
1316 static void raid1_read_request(struct mddev
*mddev
, struct bio
*bio
,
1317 int max_read_sectors
, struct r1bio
*r1_bio
)
1319 struct r1conf
*conf
= mddev
->private;
1320 struct raid1_info
*mirror
;
1321 struct bio
*read_bio
;
1322 const enum req_op op
= bio_op(bio
);
1323 const blk_opf_t do_sync
= bio
->bi_opf
& REQ_SYNC
;
1326 bool r1bio_existed
= !!r1_bio
;
1329 * If r1_bio is set, we are blocking the raid1d thread
1330 * so there is a tiny risk of deadlock. So ask for
1331 * emergency memory if needed.
1333 gfp_t gfp
= r1_bio
? (GFP_NOIO
| __GFP_HIGH
) : GFP_NOIO
;
1336 * Still need barrier for READ in case that whole
1339 if (!wait_read_barrier(conf
, bio
->bi_iter
.bi_sector
,
1340 bio
->bi_opf
& REQ_NOWAIT
)) {
1341 bio_wouldblock_error(bio
);
1346 r1_bio
= alloc_r1bio(mddev
, bio
);
1348 init_r1bio(r1_bio
, mddev
, bio
);
1349 r1_bio
->sectors
= max_read_sectors
;
1352 * make_request() can abort the operation when read-ahead is being
1353 * used and no empty request is available.
1355 rdisk
= read_balance(conf
, r1_bio
, &max_sectors
);
1357 /* couldn't find anywhere to read from */
1359 pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
1361 conf
->mirrors
[r1_bio
->read_disk
].rdev
->bdev
,
1363 raid_end_bio_io(r1_bio
);
1366 mirror
= conf
->mirrors
+ rdisk
;
1369 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
1371 (unsigned long long)r1_bio
->sector
,
1372 mirror
->rdev
->bdev
);
1374 if (test_bit(WriteMostly
, &mirror
->rdev
->flags
)) {
1376 * Reading from a write-mostly device must take care not to
1377 * over-take any writes that are 'behind'
1379 mddev_add_trace_msg(mddev
, "raid1 wait behind writes");
1380 mddev
->bitmap_ops
->wait_behind_writes(mddev
);
1383 if (max_sectors
< bio_sectors(bio
)) {
1384 struct bio
*split
= bio_split(bio
, max_sectors
,
1385 gfp
, &conf
->bio_split
);
1387 if (IS_ERR(split
)) {
1388 error
= PTR_ERR(split
);
1391 bio_chain(split
, bio
);
1392 submit_bio_noacct(bio
);
1394 r1_bio
->master_bio
= bio
;
1395 r1_bio
->sectors
= max_sectors
;
1398 r1_bio
->read_disk
= rdisk
;
1399 if (!r1bio_existed
) {
1400 md_account_bio(mddev
, &bio
);
1401 r1_bio
->master_bio
= bio
;
1403 read_bio
= bio_alloc_clone(mirror
->rdev
->bdev
, bio
, gfp
,
1406 r1_bio
->bios
[rdisk
] = read_bio
;
1408 read_bio
->bi_iter
.bi_sector
= r1_bio
->sector
+
1409 mirror
->rdev
->data_offset
;
1410 read_bio
->bi_end_io
= raid1_end_read_request
;
1411 read_bio
->bi_opf
= op
| do_sync
;
1412 if (test_bit(FailFast
, &mirror
->rdev
->flags
) &&
1413 test_bit(R1BIO_FailFast
, &r1_bio
->state
))
1414 read_bio
->bi_opf
|= MD_FAILFAST
;
1415 read_bio
->bi_private
= r1_bio
;
1416 mddev_trace_remap(mddev
, read_bio
, r1_bio
->sector
);
1417 submit_bio_noacct(read_bio
);
1421 atomic_dec(&mirror
->rdev
->nr_pending
);
1422 bio
->bi_status
= errno_to_blk_status(error
);
1423 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
1424 raid_end_bio_io(r1_bio
);
1427 static bool wait_blocked_rdev(struct mddev
*mddev
, struct bio
*bio
)
1429 struct r1conf
*conf
= mddev
->private;
1430 int disks
= conf
->raid_disks
* 2;
1434 for (i
= 0; i
< disks
; i
++) {
1435 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1440 /* don't write here until the bad block is acknowledged */
1441 if (test_bit(WriteErrorSeen
, &rdev
->flags
) &&
1442 rdev_has_badblock(rdev
, bio
->bi_iter
.bi_sector
,
1443 bio_sectors(bio
)) < 0)
1444 set_bit(BlockedBadBlocks
, &rdev
->flags
);
1446 if (rdev_blocked(rdev
)) {
1447 if (bio
->bi_opf
& REQ_NOWAIT
)
1450 mddev_add_trace_msg(rdev
->mddev
, "raid1 wait rdev %d blocked",
1452 atomic_inc(&rdev
->nr_pending
);
1453 md_wait_for_blocked_rdev(rdev
, rdev
->mddev
);
1461 static void raid1_write_request(struct mddev
*mddev
, struct bio
*bio
,
1462 int max_write_sectors
)
1464 struct r1conf
*conf
= mddev
->private;
1465 struct r1bio
*r1_bio
;
1466 int i
, disks
, k
, error
;
1467 unsigned long flags
;
1470 bool write_behind
= false;
1471 bool is_discard
= (bio_op(bio
) == REQ_OP_DISCARD
);
1473 if (mddev_is_clustered(mddev
) &&
1474 md_cluster_ops
->area_resyncing(mddev
, WRITE
,
1475 bio
->bi_iter
.bi_sector
, bio_end_sector(bio
))) {
1478 if (bio
->bi_opf
& REQ_NOWAIT
) {
1479 bio_wouldblock_error(bio
);
1483 prepare_to_wait(&conf
->wait_barrier
,
1485 if (!md_cluster_ops
->area_resyncing(mddev
, WRITE
,
1486 bio
->bi_iter
.bi_sector
,
1487 bio_end_sector(bio
)))
1491 finish_wait(&conf
->wait_barrier
, &w
);
1495 * Register the new request and wait if the reconstruction
1496 * thread has put up a bar for new requests.
1497 * Continue immediately if no resync is active currently.
1499 if (!wait_barrier(conf
, bio
->bi_iter
.bi_sector
,
1500 bio
->bi_opf
& REQ_NOWAIT
)) {
1501 bio_wouldblock_error(bio
);
1505 if (!wait_blocked_rdev(mddev
, bio
)) {
1506 bio_wouldblock_error(bio
);
1510 r1_bio
= alloc_r1bio(mddev
, bio
);
1511 r1_bio
->sectors
= max_write_sectors
;
1513 /* first select target devices under rcu_lock and
1514 * inc refcount on their rdev. Record them by setting
1516 * If there are known/acknowledged bad blocks on any device on
1517 * which we have seen a write error, we want to avoid writing those
1519 * This potentially requires several writes to write around
1520 * the bad blocks. Each set of writes gets it's own r1bio
1521 * with a set of bios attached.
1524 disks
= conf
->raid_disks
* 2;
1525 max_sectors
= r1_bio
->sectors
;
1526 for (i
= 0; i
< disks
; i
++) {
1527 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1530 * The write-behind io is only attempted on drives marked as
1531 * write-mostly, which means we could allocate write behind
1534 if (!is_discard
&& rdev
&& test_bit(WriteMostly
, &rdev
->flags
))
1535 write_behind
= true;
1537 r1_bio
->bios
[i
] = NULL
;
1538 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
)) {
1539 if (i
< conf
->raid_disks
)
1540 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
1544 atomic_inc(&rdev
->nr_pending
);
1545 if (test_bit(WriteErrorSeen
, &rdev
->flags
)) {
1550 is_bad
= is_badblock(rdev
, r1_bio
->sector
, max_sectors
,
1551 &first_bad
, &bad_sectors
);
1552 if (is_bad
&& first_bad
<= r1_bio
->sector
) {
1553 /* Cannot write here at all */
1554 bad_sectors
-= (r1_bio
->sector
- first_bad
);
1555 if (bad_sectors
< max_sectors
)
1556 /* mustn't write more than bad_sectors
1557 * to other devices yet
1559 max_sectors
= bad_sectors
;
1560 rdev_dec_pending(rdev
, mddev
);
1561 /* We don't set R1BIO_Degraded as that
1562 * only applies if the disk is
1563 * missing, so it might be re-added,
1564 * and we want to know to recover this
1566 * In this case the device is here,
1567 * and the fact that this chunk is not
1568 * in-sync is recorded in the bad
1574 int good_sectors
= first_bad
- r1_bio
->sector
;
1575 if (good_sectors
< max_sectors
)
1576 max_sectors
= good_sectors
;
1579 r1_bio
->bios
[i
] = bio
;
1583 * When using a bitmap, we may call alloc_behind_master_bio below.
1584 * alloc_behind_master_bio allocates a copy of the data payload a page
1585 * at a time and thus needs a new bio that can fit the whole payload
1586 * this bio in page sized chunks.
1588 if (write_behind
&& mddev
->bitmap
)
1589 max_sectors
= min_t(int, max_sectors
,
1590 BIO_MAX_VECS
* (PAGE_SIZE
>> 9));
1591 if (max_sectors
< bio_sectors(bio
)) {
1592 struct bio
*split
= bio_split(bio
, max_sectors
,
1593 GFP_NOIO
, &conf
->bio_split
);
1595 if (IS_ERR(split
)) {
1596 error
= PTR_ERR(split
);
1599 bio_chain(split
, bio
);
1600 submit_bio_noacct(bio
);
1602 r1_bio
->master_bio
= bio
;
1603 r1_bio
->sectors
= max_sectors
;
1606 md_account_bio(mddev
, &bio
);
1607 r1_bio
->master_bio
= bio
;
1608 atomic_set(&r1_bio
->remaining
, 1);
1609 atomic_set(&r1_bio
->behind_remaining
, 0);
1613 for (i
= 0; i
< disks
; i
++) {
1614 struct bio
*mbio
= NULL
;
1615 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1616 if (!r1_bio
->bios
[i
])
1620 unsigned long max_write_behind
=
1621 mddev
->bitmap_info
.max_write_behind
;
1622 struct md_bitmap_stats stats
;
1626 * Not if there are too many, or cannot
1627 * allocate memory, or a reader on WriteMostly
1628 * is waiting for behind writes to flush */
1629 err
= mddev
->bitmap_ops
->get_stats(mddev
->bitmap
, &stats
);
1630 if (!err
&& write_behind
&& !stats
.behind_wait
&&
1631 stats
.behind_writes
< max_write_behind
)
1632 alloc_behind_master_bio(r1_bio
, bio
);
1634 mddev
->bitmap_ops
->startwrite(
1635 mddev
, r1_bio
->sector
, r1_bio
->sectors
,
1636 test_bit(R1BIO_BehindIO
, &r1_bio
->state
));
1640 if (r1_bio
->behind_master_bio
) {
1641 mbio
= bio_alloc_clone(rdev
->bdev
,
1642 r1_bio
->behind_master_bio
,
1643 GFP_NOIO
, &mddev
->bio_set
);
1644 if (test_bit(CollisionCheck
, &rdev
->flags
))
1645 wait_for_serialization(rdev
, r1_bio
);
1646 if (test_bit(WriteMostly
, &rdev
->flags
))
1647 atomic_inc(&r1_bio
->behind_remaining
);
1649 mbio
= bio_alloc_clone(rdev
->bdev
, bio
, GFP_NOIO
,
1652 if (mddev
->serialize_policy
)
1653 wait_for_serialization(rdev
, r1_bio
);
1656 r1_bio
->bios
[i
] = mbio
;
1658 mbio
->bi_iter
.bi_sector
= (r1_bio
->sector
+ rdev
->data_offset
);
1659 mbio
->bi_end_io
= raid1_end_write_request
;
1660 mbio
->bi_opf
= bio_op(bio
) | (bio
->bi_opf
& (REQ_SYNC
| REQ_FUA
));
1661 if (test_bit(FailFast
, &rdev
->flags
) &&
1662 !test_bit(WriteMostly
, &rdev
->flags
) &&
1663 conf
->raid_disks
- mddev
->degraded
> 1)
1664 mbio
->bi_opf
|= MD_FAILFAST
;
1665 mbio
->bi_private
= r1_bio
;
1667 atomic_inc(&r1_bio
->remaining
);
1668 mddev_trace_remap(mddev
, mbio
, r1_bio
->sector
);
1669 /* flush_pending_writes() needs access to the rdev so...*/
1670 mbio
->bi_bdev
= (void *)rdev
;
1671 if (!raid1_add_bio_to_plug(mddev
, mbio
, raid1_unplug
, disks
)) {
1672 spin_lock_irqsave(&conf
->device_lock
, flags
);
1673 bio_list_add(&conf
->pending_bio_list
, mbio
);
1674 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1675 md_wakeup_thread(mddev
->thread
);
1679 r1_bio_write_done(r1_bio
);
1681 /* In case raid1d snuck in to freeze_array */
1682 wake_up_barrier(conf
);
1685 for (k
= 0; k
< i
; k
++) {
1686 if (r1_bio
->bios
[k
]) {
1687 rdev_dec_pending(conf
->mirrors
[k
].rdev
, mddev
);
1688 r1_bio
->bios
[k
] = NULL
;
1692 bio
->bi_status
= errno_to_blk_status(error
);
1693 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
1694 raid_end_bio_io(r1_bio
);
1697 static bool raid1_make_request(struct mddev
*mddev
, struct bio
*bio
)
1701 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)
1702 && md_flush_request(mddev
, bio
))
1706 * There is a limit to the maximum size, but
1707 * the read/write handler might find a lower limit
1708 * due to bad blocks. To avoid multiple splits,
1709 * we pass the maximum number of sectors down
1710 * and let the lower level perform the split.
1712 sectors
= align_to_barrier_unit_end(
1713 bio
->bi_iter
.bi_sector
, bio_sectors(bio
));
1715 if (bio_data_dir(bio
) == READ
)
1716 raid1_read_request(mddev
, bio
, sectors
, NULL
);
1718 md_write_start(mddev
,bio
);
1719 raid1_write_request(mddev
, bio
, sectors
);
1724 static void raid1_status(struct seq_file
*seq
, struct mddev
*mddev
)
1726 struct r1conf
*conf
= mddev
->private;
1729 lockdep_assert_held(&mddev
->lock
);
1731 seq_printf(seq
, " [%d/%d] [", conf
->raid_disks
,
1732 conf
->raid_disks
- mddev
->degraded
);
1733 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1734 struct md_rdev
*rdev
= READ_ONCE(conf
->mirrors
[i
].rdev
);
1736 seq_printf(seq
, "%s",
1737 rdev
&& test_bit(In_sync
, &rdev
->flags
) ? "U" : "_");
1739 seq_printf(seq
, "]");
1743 * raid1_error() - RAID1 error handler.
1744 * @mddev: affected md device.
1745 * @rdev: member device to fail.
1747 * The routine acknowledges &rdev failure and determines new @mddev state.
1748 * If it failed, then:
1749 * - &MD_BROKEN flag is set in &mddev->flags.
1750 * - recovery is disabled.
1751 * Otherwise, it must be degraded:
1752 * - recovery is interrupted.
1753 * - &mddev->degraded is bumped.
1755 * @rdev is marked as &Faulty excluding case when array is failed and
1756 * &mddev->fail_last_dev is off.
1758 static void raid1_error(struct mddev
*mddev
, struct md_rdev
*rdev
)
1760 struct r1conf
*conf
= mddev
->private;
1761 unsigned long flags
;
1763 spin_lock_irqsave(&conf
->device_lock
, flags
);
1765 if (test_bit(In_sync
, &rdev
->flags
) &&
1766 (conf
->raid_disks
- mddev
->degraded
) == 1) {
1767 set_bit(MD_BROKEN
, &mddev
->flags
);
1769 if (!mddev
->fail_last_dev
) {
1770 conf
->recovery_disabled
= mddev
->recovery_disabled
;
1771 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1775 set_bit(Blocked
, &rdev
->flags
);
1776 if (test_and_clear_bit(In_sync
, &rdev
->flags
))
1778 set_bit(Faulty
, &rdev
->flags
);
1779 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1781 * if recovery is running, make sure it aborts.
1783 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1784 set_mask_bits(&mddev
->sb_flags
, 0,
1785 BIT(MD_SB_CHANGE_DEVS
) | BIT(MD_SB_CHANGE_PENDING
));
1786 pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
1787 "md/raid1:%s: Operation continuing on %d devices.\n",
1788 mdname(mddev
), rdev
->bdev
,
1789 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
);
1792 static void print_conf(struct r1conf
*conf
)
1796 pr_debug("RAID1 conf printout:\n");
1798 pr_debug("(!conf)\n");
1801 pr_debug(" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
1804 lockdep_assert_held(&conf
->mddev
->reconfig_mutex
);
1805 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1806 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1808 pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
1809 i
, !test_bit(In_sync
, &rdev
->flags
),
1810 !test_bit(Faulty
, &rdev
->flags
),
1815 static void close_sync(struct r1conf
*conf
)
1819 for (idx
= 0; idx
< BARRIER_BUCKETS_NR
; idx
++) {
1820 _wait_barrier(conf
, idx
, false);
1821 _allow_barrier(conf
, idx
);
1824 mempool_exit(&conf
->r1buf_pool
);
1827 static int raid1_spare_active(struct mddev
*mddev
)
1830 struct r1conf
*conf
= mddev
->private;
1832 unsigned long flags
;
1835 * Find all failed disks within the RAID1 configuration
1836 * and mark them readable.
1837 * Called under mddev lock, so rcu protection not needed.
1838 * device_lock used to avoid races with raid1_end_read_request
1839 * which expects 'In_sync' flags and ->degraded to be consistent.
1841 spin_lock_irqsave(&conf
->device_lock
, flags
);
1842 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1843 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1844 struct md_rdev
*repl
= conf
->mirrors
[conf
->raid_disks
+ i
].rdev
;
1846 && !test_bit(Candidate
, &repl
->flags
)
1847 && repl
->recovery_offset
== MaxSector
1848 && !test_bit(Faulty
, &repl
->flags
)
1849 && !test_and_set_bit(In_sync
, &repl
->flags
)) {
1850 /* replacement has just become active */
1852 !test_and_clear_bit(In_sync
, &rdev
->flags
))
1855 /* Replaced device not technically
1856 * faulty, but we need to be sure
1857 * it gets removed and never re-added
1859 set_bit(Faulty
, &rdev
->flags
);
1860 sysfs_notify_dirent_safe(
1865 && rdev
->recovery_offset
== MaxSector
1866 && !test_bit(Faulty
, &rdev
->flags
)
1867 && !test_and_set_bit(In_sync
, &rdev
->flags
)) {
1869 sysfs_notify_dirent_safe(rdev
->sysfs_state
);
1872 mddev
->degraded
-= count
;
1873 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1879 static bool raid1_add_conf(struct r1conf
*conf
, struct md_rdev
*rdev
, int disk
,
1882 struct raid1_info
*info
= conf
->mirrors
+ disk
;
1885 info
+= conf
->raid_disks
;
1890 if (bdev_nonrot(rdev
->bdev
)) {
1891 set_bit(Nonrot
, &rdev
->flags
);
1892 WRITE_ONCE(conf
->nonrot_disks
, conf
->nonrot_disks
+ 1);
1895 rdev
->raid_disk
= disk
;
1896 info
->head_position
= 0;
1897 info
->seq_start
= MaxSector
;
1898 WRITE_ONCE(info
->rdev
, rdev
);
1903 static bool raid1_remove_conf(struct r1conf
*conf
, int disk
)
1905 struct raid1_info
*info
= conf
->mirrors
+ disk
;
1906 struct md_rdev
*rdev
= info
->rdev
;
1908 if (!rdev
|| test_bit(In_sync
, &rdev
->flags
) ||
1909 atomic_read(&rdev
->nr_pending
))
1912 /* Only remove non-faulty devices if recovery is not possible. */
1913 if (!test_bit(Faulty
, &rdev
->flags
) &&
1914 rdev
->mddev
->recovery_disabled
!= conf
->recovery_disabled
&&
1915 rdev
->mddev
->degraded
< conf
->raid_disks
)
1918 if (test_and_clear_bit(Nonrot
, &rdev
->flags
))
1919 WRITE_ONCE(conf
->nonrot_disks
, conf
->nonrot_disks
- 1);
1921 WRITE_ONCE(info
->rdev
, NULL
);
1925 static int raid1_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
1927 struct r1conf
*conf
= mddev
->private;
1929 int mirror
= 0, repl_slot
= -1;
1930 struct raid1_info
*p
;
1932 int last
= conf
->raid_disks
- 1;
1934 if (mddev
->recovery_disabled
== conf
->recovery_disabled
)
1937 if (rdev
->raid_disk
>= 0)
1938 first
= last
= rdev
->raid_disk
;
1941 * find the disk ... but prefer rdev->saved_raid_disk
1944 if (rdev
->saved_raid_disk
>= 0 &&
1945 rdev
->saved_raid_disk
>= first
&&
1946 rdev
->saved_raid_disk
< conf
->raid_disks
&&
1947 conf
->mirrors
[rdev
->saved_raid_disk
].rdev
== NULL
)
1948 first
= last
= rdev
->saved_raid_disk
;
1950 for (mirror
= first
; mirror
<= last
; mirror
++) {
1951 p
= conf
->mirrors
+ mirror
;
1953 err
= mddev_stack_new_rdev(mddev
, rdev
);
1957 raid1_add_conf(conf
, rdev
, mirror
, false);
1958 /* As all devices are equivalent, we don't need a full recovery
1959 * if this was recently any drive of the array
1961 if (rdev
->saved_raid_disk
< 0)
1965 if (test_bit(WantReplacement
, &p
->rdev
->flags
) &&
1966 p
[conf
->raid_disks
].rdev
== NULL
&& repl_slot
< 0)
1970 if (err
&& repl_slot
>= 0) {
1971 /* Add this device as a replacement */
1972 clear_bit(In_sync
, &rdev
->flags
);
1973 set_bit(Replacement
, &rdev
->flags
);
1974 raid1_add_conf(conf
, rdev
, repl_slot
, true);
1983 static int raid1_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
1985 struct r1conf
*conf
= mddev
->private;
1987 int number
= rdev
->raid_disk
;
1988 struct raid1_info
*p
= conf
->mirrors
+ number
;
1990 if (unlikely(number
>= conf
->raid_disks
))
1993 if (rdev
!= p
->rdev
) {
1994 number
+= conf
->raid_disks
;
1995 p
= conf
->mirrors
+ number
;
1999 if (rdev
== p
->rdev
) {
2000 if (!raid1_remove_conf(conf
, number
)) {
2005 if (number
< conf
->raid_disks
&&
2006 conf
->mirrors
[conf
->raid_disks
+ number
].rdev
) {
2007 /* We just removed a device that is being replaced.
2008 * Move down the replacement. We drain all IO before
2009 * doing this to avoid confusion.
2011 struct md_rdev
*repl
=
2012 conf
->mirrors
[conf
->raid_disks
+ number
].rdev
;
2013 freeze_array(conf
, 0);
2014 if (atomic_read(&repl
->nr_pending
)) {
2015 /* It means that some queued IO of retry_list
2016 * hold repl. Thus, we cannot set replacement
2017 * as NULL, avoiding rdev NULL pointer
2018 * dereference in sync_request_write and
2019 * handle_write_finished.
2022 unfreeze_array(conf
);
2025 clear_bit(Replacement
, &repl
->flags
);
2026 WRITE_ONCE(p
->rdev
, repl
);
2027 conf
->mirrors
[conf
->raid_disks
+ number
].rdev
= NULL
;
2028 unfreeze_array(conf
);
2031 clear_bit(WantReplacement
, &rdev
->flags
);
2032 err
= md_integrity_register(mddev
);
2040 static void end_sync_read(struct bio
*bio
)
2042 struct r1bio
*r1_bio
= get_resync_r1bio(bio
);
2044 update_head_pos(r1_bio
->read_disk
, r1_bio
);
2047 * we have read a block, now it needs to be re-written,
2048 * or re-read if the read failed.
2049 * We don't do much here, just schedule handling by raid1d
2051 if (!bio
->bi_status
)
2052 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
2054 if (atomic_dec_and_test(&r1_bio
->remaining
))
2055 reschedule_retry(r1_bio
);
2058 static void abort_sync_write(struct mddev
*mddev
, struct r1bio
*r1_bio
)
2060 sector_t sync_blocks
= 0;
2061 sector_t s
= r1_bio
->sector
;
2062 long sectors_to_go
= r1_bio
->sectors
;
2064 /* make sure these bits don't get cleared. */
2066 mddev
->bitmap_ops
->end_sync(mddev
, s
, &sync_blocks
);
2068 sectors_to_go
-= sync_blocks
;
2069 } while (sectors_to_go
> 0);
2072 static void put_sync_write_buf(struct r1bio
*r1_bio
, int uptodate
)
2074 if (atomic_dec_and_test(&r1_bio
->remaining
)) {
2075 struct mddev
*mddev
= r1_bio
->mddev
;
2076 int s
= r1_bio
->sectors
;
2078 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2079 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2080 reschedule_retry(r1_bio
);
2083 md_done_sync(mddev
, s
, uptodate
);
2088 static void end_sync_write(struct bio
*bio
)
2090 int uptodate
= !bio
->bi_status
;
2091 struct r1bio
*r1_bio
= get_resync_r1bio(bio
);
2092 struct mddev
*mddev
= r1_bio
->mddev
;
2093 struct r1conf
*conf
= mddev
->private;
2094 struct md_rdev
*rdev
= conf
->mirrors
[find_bio_disk(r1_bio
, bio
)].rdev
;
2097 abort_sync_write(mddev
, r1_bio
);
2098 set_bit(WriteErrorSeen
, &rdev
->flags
);
2099 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
2100 set_bit(MD_RECOVERY_NEEDED
, &
2102 set_bit(R1BIO_WriteError
, &r1_bio
->state
);
2103 } else if (rdev_has_badblock(rdev
, r1_bio
->sector
, r1_bio
->sectors
) &&
2104 !rdev_has_badblock(conf
->mirrors
[r1_bio
->read_disk
].rdev
,
2105 r1_bio
->sector
, r1_bio
->sectors
)) {
2106 set_bit(R1BIO_MadeGood
, &r1_bio
->state
);
2109 put_sync_write_buf(r1_bio
, uptodate
);
2112 static int r1_sync_page_io(struct md_rdev
*rdev
, sector_t sector
,
2113 int sectors
, struct page
*page
, blk_opf_t rw
)
2115 if (sync_page_io(rdev
, sector
, sectors
<< 9, page
, rw
, false))
2118 if (rw
== REQ_OP_WRITE
) {
2119 set_bit(WriteErrorSeen
, &rdev
->flags
);
2120 if (!test_and_set_bit(WantReplacement
,
2122 set_bit(MD_RECOVERY_NEEDED
, &
2123 rdev
->mddev
->recovery
);
2125 /* need to record an error - either for the block or the device */
2126 if (!rdev_set_badblocks(rdev
, sector
, sectors
, 0))
2127 md_error(rdev
->mddev
, rdev
);
2131 static int fix_sync_read_error(struct r1bio
*r1_bio
)
2133 /* Try some synchronous reads of other devices to get
2134 * good data, much like with normal read errors. Only
2135 * read into the pages we already have so we don't
2136 * need to re-issue the read request.
2137 * We don't need to freeze the array, because being in an
2138 * active sync request, there is no normal IO, and
2139 * no overlapping syncs.
2140 * We don't need to check is_badblock() again as we
2141 * made sure that anything with a bad block in range
2142 * will have bi_end_io clear.
2144 struct mddev
*mddev
= r1_bio
->mddev
;
2145 struct r1conf
*conf
= mddev
->private;
2146 struct bio
*bio
= r1_bio
->bios
[r1_bio
->read_disk
];
2147 struct page
**pages
= get_resync_pages(bio
)->pages
;
2148 sector_t sect
= r1_bio
->sector
;
2149 int sectors
= r1_bio
->sectors
;
2151 struct md_rdev
*rdev
;
2153 rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
2154 if (test_bit(FailFast
, &rdev
->flags
)) {
2155 /* Don't try recovering from here - just fail it
2156 * ... unless it is the last working device of course */
2157 md_error(mddev
, rdev
);
2158 if (test_bit(Faulty
, &rdev
->flags
))
2159 /* Don't try to read from here, but make sure
2160 * put_buf does it's thing
2162 bio
->bi_end_io
= end_sync_write
;
2167 int d
= r1_bio
->read_disk
;
2171 if (s
> (PAGE_SIZE
>>9))
2174 if (r1_bio
->bios
[d
]->bi_end_io
== end_sync_read
) {
2175 /* No rcu protection needed here devices
2176 * can only be removed when no resync is
2177 * active, and resync is currently active
2179 rdev
= conf
->mirrors
[d
].rdev
;
2180 if (sync_page_io(rdev
, sect
, s
<<9,
2182 REQ_OP_READ
, false)) {
2188 if (d
== conf
->raid_disks
* 2)
2190 } while (!success
&& d
!= r1_bio
->read_disk
);
2194 /* Cannot read from anywhere, this block is lost.
2195 * Record a bad block on each device. If that doesn't
2196 * work just disable and interrupt the recovery.
2197 * Don't fail devices as that won't really help.
2199 pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
2200 mdname(mddev
), bio
->bi_bdev
,
2201 (unsigned long long)r1_bio
->sector
);
2202 for (d
= 0; d
< conf
->raid_disks
* 2; d
++) {
2203 rdev
= conf
->mirrors
[d
].rdev
;
2204 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
2206 if (!rdev_set_badblocks(rdev
, sect
, s
, 0))
2210 conf
->recovery_disabled
=
2211 mddev
->recovery_disabled
;
2212 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2213 md_done_sync(mddev
, r1_bio
->sectors
, 0);
2225 /* write it back and re-read */
2226 while (d
!= r1_bio
->read_disk
) {
2228 d
= conf
->raid_disks
* 2;
2230 if (r1_bio
->bios
[d
]->bi_end_io
!= end_sync_read
)
2232 rdev
= conf
->mirrors
[d
].rdev
;
2233 if (r1_sync_page_io(rdev
, sect
, s
,
2235 REQ_OP_WRITE
) == 0) {
2236 r1_bio
->bios
[d
]->bi_end_io
= NULL
;
2237 rdev_dec_pending(rdev
, mddev
);
2241 while (d
!= r1_bio
->read_disk
) {
2243 d
= conf
->raid_disks
* 2;
2245 if (r1_bio
->bios
[d
]->bi_end_io
!= end_sync_read
)
2247 rdev
= conf
->mirrors
[d
].rdev
;
2248 if (r1_sync_page_io(rdev
, sect
, s
,
2251 atomic_add(s
, &rdev
->corrected_errors
);
2257 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
2262 static void process_checks(struct r1bio
*r1_bio
)
2264 /* We have read all readable devices. If we haven't
2265 * got the block, then there is no hope left.
2266 * If we have, then we want to do a comparison
2267 * and skip the write if everything is the same.
2268 * If any blocks failed to read, then we need to
2269 * attempt an over-write
2271 struct mddev
*mddev
= r1_bio
->mddev
;
2272 struct r1conf
*conf
= mddev
->private;
2277 /* Fix variable parts of all bios */
2278 vcnt
= (r1_bio
->sectors
+ PAGE_SIZE
/ 512 - 1) >> (PAGE_SHIFT
- 9);
2279 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2280 blk_status_t status
;
2281 struct bio
*b
= r1_bio
->bios
[i
];
2282 struct resync_pages
*rp
= get_resync_pages(b
);
2283 if (b
->bi_end_io
!= end_sync_read
)
2285 /* fixup the bio for reuse, but preserve errno */
2286 status
= b
->bi_status
;
2287 bio_reset(b
, conf
->mirrors
[i
].rdev
->bdev
, REQ_OP_READ
);
2288 b
->bi_status
= status
;
2289 b
->bi_iter
.bi_sector
= r1_bio
->sector
+
2290 conf
->mirrors
[i
].rdev
->data_offset
;
2291 b
->bi_end_io
= end_sync_read
;
2292 rp
->raid_bio
= r1_bio
;
2295 /* initialize bvec table again */
2296 md_bio_reset_resync_pages(b
, rp
, r1_bio
->sectors
<< 9);
2298 for (primary
= 0; primary
< conf
->raid_disks
* 2; primary
++)
2299 if (r1_bio
->bios
[primary
]->bi_end_io
== end_sync_read
&&
2300 !r1_bio
->bios
[primary
]->bi_status
) {
2301 r1_bio
->bios
[primary
]->bi_end_io
= NULL
;
2302 rdev_dec_pending(conf
->mirrors
[primary
].rdev
, mddev
);
2305 r1_bio
->read_disk
= primary
;
2306 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2308 struct bio
*pbio
= r1_bio
->bios
[primary
];
2309 struct bio
*sbio
= r1_bio
->bios
[i
];
2310 blk_status_t status
= sbio
->bi_status
;
2311 struct page
**ppages
= get_resync_pages(pbio
)->pages
;
2312 struct page
**spages
= get_resync_pages(sbio
)->pages
;
2314 int page_len
[RESYNC_PAGES
] = { 0 };
2315 struct bvec_iter_all iter_all
;
2317 if (sbio
->bi_end_io
!= end_sync_read
)
2319 /* Now we can 'fixup' the error value */
2320 sbio
->bi_status
= 0;
2322 bio_for_each_segment_all(bi
, sbio
, iter_all
)
2323 page_len
[j
++] = bi
->bv_len
;
2326 for (j
= vcnt
; j
-- ; ) {
2327 if (memcmp(page_address(ppages
[j
]),
2328 page_address(spages
[j
]),
2335 atomic64_add(r1_bio
->sectors
, &mddev
->resync_mismatches
);
2336 if (j
< 0 || (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)
2338 /* No need to write to this device. */
2339 sbio
->bi_end_io
= NULL
;
2340 rdev_dec_pending(conf
->mirrors
[i
].rdev
, mddev
);
2344 bio_copy_data(sbio
, pbio
);
2348 static void sync_request_write(struct mddev
*mddev
, struct r1bio
*r1_bio
)
2350 struct r1conf
*conf
= mddev
->private;
2352 int disks
= conf
->raid_disks
* 2;
2355 if (!test_bit(R1BIO_Uptodate
, &r1_bio
->state
))
2356 /* ouch - failed to read all of that. */
2357 if (!fix_sync_read_error(r1_bio
))
2360 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2361 process_checks(r1_bio
);
2366 atomic_set(&r1_bio
->remaining
, 1);
2367 for (i
= 0; i
< disks
; i
++) {
2368 wbio
= r1_bio
->bios
[i
];
2369 if (wbio
->bi_end_io
== NULL
||
2370 (wbio
->bi_end_io
== end_sync_read
&&
2371 (i
== r1_bio
->read_disk
||
2372 !test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))))
2374 if (test_bit(Faulty
, &conf
->mirrors
[i
].rdev
->flags
)) {
2375 abort_sync_write(mddev
, r1_bio
);
2379 wbio
->bi_opf
= REQ_OP_WRITE
;
2380 if (test_bit(FailFast
, &conf
->mirrors
[i
].rdev
->flags
))
2381 wbio
->bi_opf
|= MD_FAILFAST
;
2383 wbio
->bi_end_io
= end_sync_write
;
2384 atomic_inc(&r1_bio
->remaining
);
2385 md_sync_acct(conf
->mirrors
[i
].rdev
->bdev
, bio_sectors(wbio
));
2387 submit_bio_noacct(wbio
);
2390 put_sync_write_buf(r1_bio
, 1);
2394 * This is a kernel thread which:
2396 * 1. Retries failed read operations on working mirrors.
2397 * 2. Updates the raid superblock when problems encounter.
2398 * 3. Performs writes following reads for array synchronising.
2401 static void fix_read_error(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2403 sector_t sect
= r1_bio
->sector
;
2404 int sectors
= r1_bio
->sectors
;
2405 int read_disk
= r1_bio
->read_disk
;
2406 struct mddev
*mddev
= conf
->mddev
;
2407 struct md_rdev
*rdev
= conf
->mirrors
[read_disk
].rdev
;
2409 if (exceed_read_errors(mddev
, rdev
)) {
2410 r1_bio
->bios
[r1_bio
->read_disk
] = IO_BLOCKED
;
2420 if (s
> (PAGE_SIZE
>>9))
2424 rdev
= conf
->mirrors
[d
].rdev
;
2426 (test_bit(In_sync
, &rdev
->flags
) ||
2427 (!test_bit(Faulty
, &rdev
->flags
) &&
2428 rdev
->recovery_offset
>= sect
+ s
)) &&
2429 rdev_has_badblock(rdev
, sect
, s
) == 0) {
2430 atomic_inc(&rdev
->nr_pending
);
2431 if (sync_page_io(rdev
, sect
, s
<<9,
2432 conf
->tmppage
, REQ_OP_READ
, false))
2434 rdev_dec_pending(rdev
, mddev
);
2440 if (d
== conf
->raid_disks
* 2)
2442 } while (d
!= read_disk
);
2445 /* Cannot read from anywhere - mark it bad */
2446 struct md_rdev
*rdev
= conf
->mirrors
[read_disk
].rdev
;
2447 if (!rdev_set_badblocks(rdev
, sect
, s
, 0))
2448 md_error(mddev
, rdev
);
2451 /* write it back and re-read */
2453 while (d
!= read_disk
) {
2455 d
= conf
->raid_disks
* 2;
2457 rdev
= conf
->mirrors
[d
].rdev
;
2459 !test_bit(Faulty
, &rdev
->flags
)) {
2460 atomic_inc(&rdev
->nr_pending
);
2461 r1_sync_page_io(rdev
, sect
, s
,
2462 conf
->tmppage
, REQ_OP_WRITE
);
2463 rdev_dec_pending(rdev
, mddev
);
2467 while (d
!= read_disk
) {
2469 d
= conf
->raid_disks
* 2;
2471 rdev
= conf
->mirrors
[d
].rdev
;
2473 !test_bit(Faulty
, &rdev
->flags
)) {
2474 atomic_inc(&rdev
->nr_pending
);
2475 if (r1_sync_page_io(rdev
, sect
, s
,
2476 conf
->tmppage
, REQ_OP_READ
)) {
2477 atomic_add(s
, &rdev
->corrected_errors
);
2478 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
2480 (unsigned long long)(sect
+
2484 rdev_dec_pending(rdev
, mddev
);
2492 static int narrow_write_error(struct r1bio
*r1_bio
, int i
)
2494 struct mddev
*mddev
= r1_bio
->mddev
;
2495 struct r1conf
*conf
= mddev
->private;
2496 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
2498 /* bio has the data to be written to device 'i' where
2499 * we just recently had a write error.
2500 * We repeatedly clone the bio and trim down to one block,
2501 * then try the write. Where the write fails we record
2503 * It is conceivable that the bio doesn't exactly align with
2504 * blocks. We must handle this somehow.
2506 * We currently own a reference on the rdev.
2512 int sect_to_write
= r1_bio
->sectors
;
2515 if (rdev
->badblocks
.shift
< 0)
2518 block_sectors
= roundup(1 << rdev
->badblocks
.shift
,
2519 bdev_logical_block_size(rdev
->bdev
) >> 9);
2520 sector
= r1_bio
->sector
;
2521 sectors
= ((sector
+ block_sectors
)
2522 & ~(sector_t
)(block_sectors
- 1))
2525 while (sect_to_write
) {
2527 if (sectors
> sect_to_write
)
2528 sectors
= sect_to_write
;
2529 /* Write at 'sector' for 'sectors'*/
2531 if (test_bit(R1BIO_BehindIO
, &r1_bio
->state
)) {
2532 wbio
= bio_alloc_clone(rdev
->bdev
,
2533 r1_bio
->behind_master_bio
,
2534 GFP_NOIO
, &mddev
->bio_set
);
2536 wbio
= bio_alloc_clone(rdev
->bdev
, r1_bio
->master_bio
,
2537 GFP_NOIO
, &mddev
->bio_set
);
2540 wbio
->bi_opf
= REQ_OP_WRITE
;
2541 wbio
->bi_iter
.bi_sector
= r1_bio
->sector
;
2542 wbio
->bi_iter
.bi_size
= r1_bio
->sectors
<< 9;
2544 bio_trim(wbio
, sector
- r1_bio
->sector
, sectors
);
2545 wbio
->bi_iter
.bi_sector
+= rdev
->data_offset
;
2547 if (submit_bio_wait(wbio
) < 0)
2549 ok
= rdev_set_badblocks(rdev
, sector
,
2554 sect_to_write
-= sectors
;
2556 sectors
= block_sectors
;
2561 static void handle_sync_write_finished(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2564 int s
= r1_bio
->sectors
;
2565 for (m
= 0; m
< conf
->raid_disks
* 2 ; m
++) {
2566 struct md_rdev
*rdev
= conf
->mirrors
[m
].rdev
;
2567 struct bio
*bio
= r1_bio
->bios
[m
];
2568 if (bio
->bi_end_io
== NULL
)
2570 if (!bio
->bi_status
&&
2571 test_bit(R1BIO_MadeGood
, &r1_bio
->state
)) {
2572 rdev_clear_badblocks(rdev
, r1_bio
->sector
, s
, 0);
2574 if (bio
->bi_status
&&
2575 test_bit(R1BIO_WriteError
, &r1_bio
->state
)) {
2576 if (!rdev_set_badblocks(rdev
, r1_bio
->sector
, s
, 0))
2577 md_error(conf
->mddev
, rdev
);
2581 md_done_sync(conf
->mddev
, s
, 1);
2584 static void handle_write_finished(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2589 for (m
= 0; m
< conf
->raid_disks
* 2 ; m
++)
2590 if (r1_bio
->bios
[m
] == IO_MADE_GOOD
) {
2591 struct md_rdev
*rdev
= conf
->mirrors
[m
].rdev
;
2592 rdev_clear_badblocks(rdev
,
2594 r1_bio
->sectors
, 0);
2595 rdev_dec_pending(rdev
, conf
->mddev
);
2596 } else if (r1_bio
->bios
[m
] != NULL
) {
2597 /* This drive got a write error. We need to
2598 * narrow down and record precise write
2602 if (!narrow_write_error(r1_bio
, m
)) {
2603 md_error(conf
->mddev
,
2604 conf
->mirrors
[m
].rdev
);
2605 /* an I/O failed, we can't clear the bitmap */
2606 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
2608 rdev_dec_pending(conf
->mirrors
[m
].rdev
,
2612 spin_lock_irq(&conf
->device_lock
);
2613 list_add(&r1_bio
->retry_list
, &conf
->bio_end_io_list
);
2614 idx
= sector_to_idx(r1_bio
->sector
);
2615 atomic_inc(&conf
->nr_queued
[idx
]);
2616 spin_unlock_irq(&conf
->device_lock
);
2618 * In case freeze_array() is waiting for condition
2619 * get_unqueued_pending() == extra to be true.
2621 wake_up(&conf
->wait_barrier
);
2622 md_wakeup_thread(conf
->mddev
->thread
);
2624 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2625 close_write(r1_bio
);
2626 raid_end_bio_io(r1_bio
);
2630 static void handle_read_error(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2632 struct mddev
*mddev
= conf
->mddev
;
2634 struct md_rdev
*rdev
;
2637 clear_bit(R1BIO_ReadError
, &r1_bio
->state
);
2638 /* we got a read error. Maybe the drive is bad. Maybe just
2639 * the block and we can fix it.
2640 * We freeze all other IO, and try reading the block from
2641 * other devices. When we find one, we re-write
2642 * and check it that fixes the read error.
2643 * This is all done synchronously while the array is
2647 bio
= r1_bio
->bios
[r1_bio
->read_disk
];
2649 r1_bio
->bios
[r1_bio
->read_disk
] = NULL
;
2651 rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
2653 && !test_bit(FailFast
, &rdev
->flags
)) {
2654 freeze_array(conf
, 1);
2655 fix_read_error(conf
, r1_bio
);
2656 unfreeze_array(conf
);
2657 } else if (mddev
->ro
== 0 && test_bit(FailFast
, &rdev
->flags
)) {
2658 md_error(mddev
, rdev
);
2660 r1_bio
->bios
[r1_bio
->read_disk
] = IO_BLOCKED
;
2663 rdev_dec_pending(rdev
, conf
->mddev
);
2664 sector
= r1_bio
->sector
;
2665 bio
= r1_bio
->master_bio
;
2667 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2669 raid1_read_request(mddev
, bio
, r1_bio
->sectors
, r1_bio
);
2670 allow_barrier(conf
, sector
);
2673 static void raid1d(struct md_thread
*thread
)
2675 struct mddev
*mddev
= thread
->mddev
;
2676 struct r1bio
*r1_bio
;
2677 unsigned long flags
;
2678 struct r1conf
*conf
= mddev
->private;
2679 struct list_head
*head
= &conf
->retry_list
;
2680 struct blk_plug plug
;
2683 md_check_recovery(mddev
);
2685 if (!list_empty_careful(&conf
->bio_end_io_list
) &&
2686 !test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
)) {
2688 spin_lock_irqsave(&conf
->device_lock
, flags
);
2689 if (!test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
))
2690 list_splice_init(&conf
->bio_end_io_list
, &tmp
);
2691 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2692 while (!list_empty(&tmp
)) {
2693 r1_bio
= list_first_entry(&tmp
, struct r1bio
,
2695 list_del(&r1_bio
->retry_list
);
2696 idx
= sector_to_idx(r1_bio
->sector
);
2697 atomic_dec(&conf
->nr_queued
[idx
]);
2698 if (mddev
->degraded
)
2699 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
2700 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2701 close_write(r1_bio
);
2702 raid_end_bio_io(r1_bio
);
2706 blk_start_plug(&plug
);
2709 flush_pending_writes(conf
);
2711 spin_lock_irqsave(&conf
->device_lock
, flags
);
2712 if (list_empty(head
)) {
2713 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2716 r1_bio
= list_entry(head
->prev
, struct r1bio
, retry_list
);
2717 list_del(head
->prev
);
2718 idx
= sector_to_idx(r1_bio
->sector
);
2719 atomic_dec(&conf
->nr_queued
[idx
]);
2720 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2722 mddev
= r1_bio
->mddev
;
2723 conf
= mddev
->private;
2724 if (test_bit(R1BIO_IsSync
, &r1_bio
->state
)) {
2725 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2726 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2727 handle_sync_write_finished(conf
, r1_bio
);
2729 sync_request_write(mddev
, r1_bio
);
2730 } else if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2731 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2732 handle_write_finished(conf
, r1_bio
);
2733 else if (test_bit(R1BIO_ReadError
, &r1_bio
->state
))
2734 handle_read_error(conf
, r1_bio
);
2739 if (mddev
->sb_flags
& ~(1<<MD_SB_CHANGE_PENDING
))
2740 md_check_recovery(mddev
);
2742 blk_finish_plug(&plug
);
2745 static int init_resync(struct r1conf
*conf
)
2749 buffs
= RESYNC_WINDOW
/ RESYNC_BLOCK_SIZE
;
2750 BUG_ON(mempool_initialized(&conf
->r1buf_pool
));
2752 return mempool_init(&conf
->r1buf_pool
, buffs
, r1buf_pool_alloc
,
2753 r1buf_pool_free
, conf
->poolinfo
);
2756 static struct r1bio
*raid1_alloc_init_r1buf(struct r1conf
*conf
)
2758 struct r1bio
*r1bio
= mempool_alloc(&conf
->r1buf_pool
, GFP_NOIO
);
2759 struct resync_pages
*rps
;
2763 for (i
= conf
->poolinfo
->raid_disks
; i
--; ) {
2764 bio
= r1bio
->bios
[i
];
2765 rps
= bio
->bi_private
;
2766 bio_reset(bio
, NULL
, 0);
2767 bio
->bi_private
= rps
;
2769 r1bio
->master_bio
= NULL
;
2774 * perform a "sync" on one "block"
2776 * We need to make sure that no normal I/O request - particularly write
2777 * requests - conflict with active sync requests.
2779 * This is achieved by tracking pending requests and a 'barrier' concept
2780 * that can be installed to exclude normal IO requests.
2783 static sector_t
raid1_sync_request(struct mddev
*mddev
, sector_t sector_nr
,
2784 sector_t max_sector
, int *skipped
)
2786 struct r1conf
*conf
= mddev
->private;
2787 struct r1bio
*r1_bio
;
2789 sector_t nr_sectors
;
2793 int write_targets
= 0, read_targets
= 0;
2794 sector_t sync_blocks
;
2795 bool still_degraded
= false;
2796 int good_sectors
= RESYNC_SECTORS
;
2797 int min_bad
= 0; /* number of sectors that are bad in all devices */
2798 int idx
= sector_to_idx(sector_nr
);
2801 if (!mempool_initialized(&conf
->r1buf_pool
))
2802 if (init_resync(conf
))
2805 if (sector_nr
>= max_sector
) {
2806 /* If we aborted, we need to abort the
2807 * sync on the 'current' bitmap chunk (there will
2808 * only be one in raid1 resync.
2809 * We can find the current addess in mddev->curr_resync
2811 if (mddev
->curr_resync
< max_sector
) /* aborted */
2812 mddev
->bitmap_ops
->end_sync(mddev
, mddev
->curr_resync
,
2814 else /* completed sync */
2817 mddev
->bitmap_ops
->close_sync(mddev
);
2820 if (mddev_is_clustered(mddev
)) {
2821 conf
->cluster_sync_low
= 0;
2822 conf
->cluster_sync_high
= 0;
2827 if (mddev
->bitmap
== NULL
&&
2828 mddev
->recovery_cp
== MaxSector
&&
2829 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
2830 conf
->fullsync
== 0) {
2832 return max_sector
- sector_nr
;
2834 /* before building a request, check if we can skip these blocks..
2835 * This call the bitmap_start_sync doesn't actually record anything
2837 if (!mddev
->bitmap_ops
->start_sync(mddev
, sector_nr
, &sync_blocks
, true) &&
2838 !conf
->fullsync
&& !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
2839 /* We can skip this block, and probably several more */
2845 * If there is non-resync activity waiting for a turn, then let it
2846 * though before starting on this new sync request.
2848 if (atomic_read(&conf
->nr_waiting
[idx
]))
2849 schedule_timeout_uninterruptible(1);
2851 /* we are incrementing sector_nr below. To be safe, we check against
2852 * sector_nr + two times RESYNC_SECTORS
2855 mddev
->bitmap_ops
->cond_end_sync(mddev
, sector_nr
,
2856 mddev_is_clustered(mddev
) &&
2857 (sector_nr
+ 2 * RESYNC_SECTORS
> conf
->cluster_sync_high
));
2859 if (raise_barrier(conf
, sector_nr
))
2862 r1_bio
= raid1_alloc_init_r1buf(conf
);
2865 * If we get a correctably read error during resync or recovery,
2866 * we might want to read from a different device. So we
2867 * flag all drives that could conceivably be read from for READ,
2868 * and any others (which will be non-In_sync devices) for WRITE.
2869 * If a read fails, we try reading from something else for which READ
2873 r1_bio
->mddev
= mddev
;
2874 r1_bio
->sector
= sector_nr
;
2876 set_bit(R1BIO_IsSync
, &r1_bio
->state
);
2877 /* make sure good_sectors won't go across barrier unit boundary */
2878 good_sectors
= align_to_barrier_unit_end(sector_nr
, good_sectors
);
2880 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2881 struct md_rdev
*rdev
;
2882 bio
= r1_bio
->bios
[i
];
2884 rdev
= conf
->mirrors
[i
].rdev
;
2886 test_bit(Faulty
, &rdev
->flags
)) {
2887 if (i
< conf
->raid_disks
)
2888 still_degraded
= true;
2889 } else if (!test_bit(In_sync
, &rdev
->flags
)) {
2890 bio
->bi_opf
= REQ_OP_WRITE
;
2891 bio
->bi_end_io
= end_sync_write
;
2894 /* may need to read from here */
2895 sector_t first_bad
= MaxSector
;
2898 if (is_badblock(rdev
, sector_nr
, good_sectors
,
2899 &first_bad
, &bad_sectors
)) {
2900 if (first_bad
> sector_nr
)
2901 good_sectors
= first_bad
- sector_nr
;
2903 bad_sectors
-= (sector_nr
- first_bad
);
2905 min_bad
> bad_sectors
)
2906 min_bad
= bad_sectors
;
2909 if (sector_nr
< first_bad
) {
2910 if (test_bit(WriteMostly
, &rdev
->flags
)) {
2917 bio
->bi_opf
= REQ_OP_READ
;
2918 bio
->bi_end_io
= end_sync_read
;
2920 } else if (!test_bit(WriteErrorSeen
, &rdev
->flags
) &&
2921 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) &&
2922 !test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)) {
2924 * The device is suitable for reading (InSync),
2925 * but has bad block(s) here. Let's try to correct them,
2926 * if we are doing resync or repair. Otherwise, leave
2927 * this device alone for this sync request.
2929 bio
->bi_opf
= REQ_OP_WRITE
;
2930 bio
->bi_end_io
= end_sync_write
;
2934 if (rdev
&& bio
->bi_end_io
) {
2935 atomic_inc(&rdev
->nr_pending
);
2936 bio
->bi_iter
.bi_sector
= sector_nr
+ rdev
->data_offset
;
2937 bio_set_dev(bio
, rdev
->bdev
);
2938 if (test_bit(FailFast
, &rdev
->flags
))
2939 bio
->bi_opf
|= MD_FAILFAST
;
2944 r1_bio
->read_disk
= disk
;
2946 if (read_targets
== 0 && min_bad
> 0) {
2947 /* These sectors are bad on all InSync devices, so we
2948 * need to mark them bad on all write targets
2951 for (i
= 0 ; i
< conf
->raid_disks
* 2 ; i
++)
2952 if (r1_bio
->bios
[i
]->bi_end_io
== end_sync_write
) {
2953 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
2954 ok
= rdev_set_badblocks(rdev
, sector_nr
,
2958 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2963 /* Cannot record the badblocks, so need to
2965 * If there are multiple read targets, could just
2966 * fail the really bad ones ???
2968 conf
->recovery_disabled
= mddev
->recovery_disabled
;
2969 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2975 if (min_bad
> 0 && min_bad
< good_sectors
) {
2976 /* only resync enough to reach the next bad->good
2978 good_sectors
= min_bad
;
2981 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) && read_targets
> 0)
2982 /* extra read targets are also write targets */
2983 write_targets
+= read_targets
-1;
2985 if (write_targets
== 0 || read_targets
== 0) {
2986 /* There is nowhere to write, so all non-sync
2987 * drives must be failed - so we are finished
2991 max_sector
= sector_nr
+ min_bad
;
2992 rv
= max_sector
- sector_nr
;
2998 if (max_sector
> mddev
->resync_max
)
2999 max_sector
= mddev
->resync_max
; /* Don't do IO beyond here */
3000 if (max_sector
> sector_nr
+ good_sectors
)
3001 max_sector
= sector_nr
+ good_sectors
;
3006 int len
= PAGE_SIZE
;
3007 if (sector_nr
+ (len
>>9) > max_sector
)
3008 len
= (max_sector
- sector_nr
) << 9;
3011 if (sync_blocks
== 0) {
3012 if (!mddev
->bitmap_ops
->start_sync(mddev
, sector_nr
,
3013 &sync_blocks
, still_degraded
) &&
3015 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
3017 if ((len
>> 9) > sync_blocks
)
3018 len
= sync_blocks
<<9;
3021 for (i
= 0 ; i
< conf
->raid_disks
* 2; i
++) {
3022 struct resync_pages
*rp
;
3024 bio
= r1_bio
->bios
[i
];
3025 rp
= get_resync_pages(bio
);
3026 if (bio
->bi_end_io
) {
3027 page
= resync_fetch_page(rp
, page_idx
);
3030 * won't fail because the vec table is big
3031 * enough to hold all these pages
3033 __bio_add_page(bio
, page
, len
, 0);
3036 nr_sectors
+= len
>>9;
3037 sector_nr
+= len
>>9;
3038 sync_blocks
-= (len
>>9);
3039 } while (++page_idx
< RESYNC_PAGES
);
3041 r1_bio
->sectors
= nr_sectors
;
3043 if (mddev_is_clustered(mddev
) &&
3044 conf
->cluster_sync_high
< sector_nr
+ nr_sectors
) {
3045 conf
->cluster_sync_low
= mddev
->curr_resync_completed
;
3046 conf
->cluster_sync_high
= conf
->cluster_sync_low
+ CLUSTER_RESYNC_WINDOW_SECTORS
;
3047 /* Send resync message */
3048 md_cluster_ops
->resync_info_update(mddev
,
3049 conf
->cluster_sync_low
,
3050 conf
->cluster_sync_high
);
3053 /* For a user-requested sync, we read all readable devices and do a
3056 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
3057 atomic_set(&r1_bio
->remaining
, read_targets
);
3058 for (i
= 0; i
< conf
->raid_disks
* 2 && read_targets
; i
++) {
3059 bio
= r1_bio
->bios
[i
];
3060 if (bio
->bi_end_io
== end_sync_read
) {
3062 md_sync_acct_bio(bio
, nr_sectors
);
3063 if (read_targets
== 1)
3064 bio
->bi_opf
&= ~MD_FAILFAST
;
3065 submit_bio_noacct(bio
);
3069 atomic_set(&r1_bio
->remaining
, 1);
3070 bio
= r1_bio
->bios
[r1_bio
->read_disk
];
3071 md_sync_acct_bio(bio
, nr_sectors
);
3072 if (read_targets
== 1)
3073 bio
->bi_opf
&= ~MD_FAILFAST
;
3074 submit_bio_noacct(bio
);
3079 static sector_t
raid1_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
3084 return mddev
->dev_sectors
;
3087 static struct r1conf
*setup_conf(struct mddev
*mddev
)
3089 struct r1conf
*conf
;
3091 struct raid1_info
*disk
;
3092 struct md_rdev
*rdev
;
3095 conf
= kzalloc(sizeof(struct r1conf
), GFP_KERNEL
);
3099 conf
->nr_pending
= kcalloc(BARRIER_BUCKETS_NR
,
3100 sizeof(atomic_t
), GFP_KERNEL
);
3101 if (!conf
->nr_pending
)
3104 conf
->nr_waiting
= kcalloc(BARRIER_BUCKETS_NR
,
3105 sizeof(atomic_t
), GFP_KERNEL
);
3106 if (!conf
->nr_waiting
)
3109 conf
->nr_queued
= kcalloc(BARRIER_BUCKETS_NR
,
3110 sizeof(atomic_t
), GFP_KERNEL
);
3111 if (!conf
->nr_queued
)
3114 conf
->barrier
= kcalloc(BARRIER_BUCKETS_NR
,
3115 sizeof(atomic_t
), GFP_KERNEL
);
3119 conf
->mirrors
= kzalloc(array3_size(sizeof(struct raid1_info
),
3120 mddev
->raid_disks
, 2),
3125 conf
->tmppage
= alloc_page(GFP_KERNEL
);
3129 conf
->poolinfo
= kzalloc(sizeof(*conf
->poolinfo
), GFP_KERNEL
);
3130 if (!conf
->poolinfo
)
3132 conf
->poolinfo
->raid_disks
= mddev
->raid_disks
* 2;
3133 err
= mempool_init(&conf
->r1bio_pool
, NR_RAID_BIOS
, r1bio_pool_alloc
,
3134 rbio_pool_free
, conf
->poolinfo
);
3138 err
= bioset_init(&conf
->bio_split
, BIO_POOL_SIZE
, 0, 0);
3142 conf
->poolinfo
->mddev
= mddev
;
3145 spin_lock_init(&conf
->device_lock
);
3146 conf
->raid_disks
= mddev
->raid_disks
;
3147 rdev_for_each(rdev
, mddev
) {
3148 int disk_idx
= rdev
->raid_disk
;
3150 if (disk_idx
>= conf
->raid_disks
|| disk_idx
< 0)
3153 if (!raid1_add_conf(conf
, rdev
, disk_idx
,
3154 test_bit(Replacement
, &rdev
->flags
)))
3157 conf
->mddev
= mddev
;
3158 INIT_LIST_HEAD(&conf
->retry_list
);
3159 INIT_LIST_HEAD(&conf
->bio_end_io_list
);
3161 spin_lock_init(&conf
->resync_lock
);
3162 init_waitqueue_head(&conf
->wait_barrier
);
3164 bio_list_init(&conf
->pending_bio_list
);
3165 conf
->recovery_disabled
= mddev
->recovery_disabled
- 1;
3168 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
3170 disk
= conf
->mirrors
+ i
;
3172 if (i
< conf
->raid_disks
&&
3173 disk
[conf
->raid_disks
].rdev
) {
3174 /* This slot has a replacement. */
3176 /* No original, just make the replacement
3177 * a recovering spare
3180 disk
[conf
->raid_disks
].rdev
;
3181 disk
[conf
->raid_disks
].rdev
= NULL
;
3182 } else if (!test_bit(In_sync
, &disk
->rdev
->flags
))
3183 /* Original is not in_sync - bad */
3188 !test_bit(In_sync
, &disk
->rdev
->flags
)) {
3189 disk
->head_position
= 0;
3191 (disk
->rdev
->saved_raid_disk
< 0))
3197 rcu_assign_pointer(conf
->thread
,
3198 md_register_thread(raid1d
, mddev
, "raid1"));
3206 mempool_exit(&conf
->r1bio_pool
);
3207 kfree(conf
->mirrors
);
3208 safe_put_page(conf
->tmppage
);
3209 kfree(conf
->poolinfo
);
3210 kfree(conf
->nr_pending
);
3211 kfree(conf
->nr_waiting
);
3212 kfree(conf
->nr_queued
);
3213 kfree(conf
->barrier
);
3214 bioset_exit(&conf
->bio_split
);
3217 return ERR_PTR(err
);
3220 static int raid1_set_limits(struct mddev
*mddev
)
3222 struct queue_limits lim
;
3225 md_init_stacking_limits(&lim
);
3226 lim
.max_write_zeroes_sectors
= 0;
3227 err
= mddev_stack_rdev_limits(mddev
, &lim
, MDDEV_STACK_INTEGRITY
);
3229 queue_limits_cancel_update(mddev
->gendisk
->queue
);
3232 return queue_limits_set(mddev
->gendisk
->queue
, &lim
);
3235 static int raid1_run(struct mddev
*mddev
)
3237 struct r1conf
*conf
;
3241 if (mddev
->level
!= 1) {
3242 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3243 mdname(mddev
), mddev
->level
);
3246 if (mddev
->reshape_position
!= MaxSector
) {
3247 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3253 * copy the already verified devices into our private RAID1
3254 * bookkeeping area. [whatever we allocate in run(),
3255 * should be freed in raid1_free()]
3257 if (mddev
->private == NULL
)
3258 conf
= setup_conf(mddev
);
3260 conf
= mddev
->private;
3263 return PTR_ERR(conf
);
3265 if (!mddev_is_dm(mddev
)) {
3266 ret
= raid1_set_limits(mddev
);
3271 mddev
->degraded
= 0;
3272 for (i
= 0; i
< conf
->raid_disks
; i
++)
3273 if (conf
->mirrors
[i
].rdev
== NULL
||
3274 !test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
) ||
3275 test_bit(Faulty
, &conf
->mirrors
[i
].rdev
->flags
))
3278 * RAID1 needs at least one disk in active
3280 if (conf
->raid_disks
- mddev
->degraded
< 1) {
3281 md_unregister_thread(mddev
, &conf
->thread
);
3285 if (conf
->raid_disks
- mddev
->degraded
== 1)
3286 mddev
->recovery_cp
= MaxSector
;
3288 if (mddev
->recovery_cp
!= MaxSector
)
3289 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3291 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3292 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
3296 * Ok, everything is just fine now
3298 rcu_assign_pointer(mddev
->thread
, conf
->thread
);
3299 rcu_assign_pointer(conf
->thread
, NULL
);
3300 mddev
->private = conf
;
3301 set_bit(MD_FAILFAST_SUPPORTED
, &mddev
->flags
);
3303 md_set_array_sectors(mddev
, raid1_size(mddev
, 0, 0));
3305 ret
= md_integrity_register(mddev
);
3307 md_unregister_thread(mddev
, &mddev
->thread
);
3311 static void raid1_free(struct mddev
*mddev
, void *priv
)
3313 struct r1conf
*conf
= priv
;
3315 mempool_exit(&conf
->r1bio_pool
);
3316 kfree(conf
->mirrors
);
3317 safe_put_page(conf
->tmppage
);
3318 kfree(conf
->poolinfo
);
3319 kfree(conf
->nr_pending
);
3320 kfree(conf
->nr_waiting
);
3321 kfree(conf
->nr_queued
);
3322 kfree(conf
->barrier
);
3323 bioset_exit(&conf
->bio_split
);
3327 static int raid1_resize(struct mddev
*mddev
, sector_t sectors
)
3329 /* no resync is happening, and there is enough space
3330 * on all devices, so we can resize.
3331 * We need to make sure resync covers any new space.
3332 * If the array is shrinking we should possibly wait until
3333 * any io in the removed space completes, but it hardly seems
3336 sector_t newsize
= raid1_size(mddev
, sectors
, 0);
3339 if (mddev
->external_size
&&
3340 mddev
->array_sectors
> newsize
)
3343 ret
= mddev
->bitmap_ops
->resize(mddev
, newsize
, 0, false);
3347 md_set_array_sectors(mddev
, newsize
);
3348 if (sectors
> mddev
->dev_sectors
&&
3349 mddev
->recovery_cp
> mddev
->dev_sectors
) {
3350 mddev
->recovery_cp
= mddev
->dev_sectors
;
3351 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3353 mddev
->dev_sectors
= sectors
;
3354 mddev
->resync_max_sectors
= sectors
;
3358 static int raid1_reshape(struct mddev
*mddev
)
3361 * 1/ resize the r1bio_pool
3362 * 2/ resize conf->mirrors
3364 * We allocate a new r1bio_pool if we can.
3365 * Then raise a device barrier and wait until all IO stops.
3366 * Then resize conf->mirrors and swap in the new r1bio pool.
3368 * At the same time, we "pack" the devices so that all the missing
3369 * devices have the higher raid_disk numbers.
3371 mempool_t newpool
, oldpool
;
3372 struct pool_info
*newpoolinfo
;
3373 struct raid1_info
*newmirrors
;
3374 struct r1conf
*conf
= mddev
->private;
3375 int cnt
, raid_disks
;
3376 unsigned long flags
;
3380 memset(&newpool
, 0, sizeof(newpool
));
3381 memset(&oldpool
, 0, sizeof(oldpool
));
3383 /* Cannot change chunk_size, layout, or level */
3384 if (mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
||
3385 mddev
->layout
!= mddev
->new_layout
||
3386 mddev
->level
!= mddev
->new_level
) {
3387 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
3388 mddev
->new_layout
= mddev
->layout
;
3389 mddev
->new_level
= mddev
->level
;
3393 if (!mddev_is_clustered(mddev
))
3394 md_allow_write(mddev
);
3396 raid_disks
= mddev
->raid_disks
+ mddev
->delta_disks
;
3398 if (raid_disks
< conf
->raid_disks
) {
3400 for (d
= 0; d
< conf
->raid_disks
; d
++)
3401 if (conf
->mirrors
[d
].rdev
)
3403 if (cnt
> raid_disks
)
3407 newpoolinfo
= kmalloc(sizeof(*newpoolinfo
), GFP_KERNEL
);
3410 newpoolinfo
->mddev
= mddev
;
3411 newpoolinfo
->raid_disks
= raid_disks
* 2;
3413 ret
= mempool_init(&newpool
, NR_RAID_BIOS
, r1bio_pool_alloc
,
3414 rbio_pool_free
, newpoolinfo
);
3419 newmirrors
= kzalloc(array3_size(sizeof(struct raid1_info
),
3424 mempool_exit(&newpool
);
3428 freeze_array(conf
, 0);
3430 /* ok, everything is stopped */
3431 oldpool
= conf
->r1bio_pool
;
3432 conf
->r1bio_pool
= newpool
;
3434 for (d
= d2
= 0; d
< conf
->raid_disks
; d
++) {
3435 struct md_rdev
*rdev
= conf
->mirrors
[d
].rdev
;
3436 if (rdev
&& rdev
->raid_disk
!= d2
) {
3437 sysfs_unlink_rdev(mddev
, rdev
);
3438 rdev
->raid_disk
= d2
;
3439 sysfs_unlink_rdev(mddev
, rdev
);
3440 if (sysfs_link_rdev(mddev
, rdev
))
3441 pr_warn("md/raid1:%s: cannot register rd%d\n",
3442 mdname(mddev
), rdev
->raid_disk
);
3445 newmirrors
[d2
++].rdev
= rdev
;
3447 kfree(conf
->mirrors
);
3448 conf
->mirrors
= newmirrors
;
3449 kfree(conf
->poolinfo
);
3450 conf
->poolinfo
= newpoolinfo
;
3452 spin_lock_irqsave(&conf
->device_lock
, flags
);
3453 mddev
->degraded
+= (raid_disks
- conf
->raid_disks
);
3454 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3455 conf
->raid_disks
= mddev
->raid_disks
= raid_disks
;
3456 mddev
->delta_disks
= 0;
3458 unfreeze_array(conf
);
3460 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3461 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3462 md_wakeup_thread(mddev
->thread
);
3464 mempool_exit(&oldpool
);
3468 static void raid1_quiesce(struct mddev
*mddev
, int quiesce
)
3470 struct r1conf
*conf
= mddev
->private;
3473 freeze_array(conf
, 0);
3475 unfreeze_array(conf
);
3478 static void *raid1_takeover(struct mddev
*mddev
)
3480 /* raid1 can take over:
3481 * raid5 with 2 devices, any layout or chunk size
3483 if (mddev
->level
== 5 && mddev
->raid_disks
== 2) {
3484 struct r1conf
*conf
;
3485 mddev
->new_level
= 1;
3486 mddev
->new_layout
= 0;
3487 mddev
->new_chunk_sectors
= 0;
3488 conf
= setup_conf(mddev
);
3489 if (!IS_ERR(conf
)) {
3490 /* Array must appear to be quiesced */
3491 conf
->array_frozen
= 1;
3492 mddev_clear_unsupported_flags(mddev
,
3493 UNSUPPORTED_MDDEV_FLAGS
);
3497 return ERR_PTR(-EINVAL
);
3500 static struct md_personality raid1_personality
=
3504 .owner
= THIS_MODULE
,
3505 .make_request
= raid1_make_request
,
3508 .status
= raid1_status
,
3509 .error_handler
= raid1_error
,
3510 .hot_add_disk
= raid1_add_disk
,
3511 .hot_remove_disk
= raid1_remove_disk
,
3512 .spare_active
= raid1_spare_active
,
3513 .sync_request
= raid1_sync_request
,
3514 .resize
= raid1_resize
,
3516 .check_reshape
= raid1_reshape
,
3517 .quiesce
= raid1_quiesce
,
3518 .takeover
= raid1_takeover
,
3521 static int __init
raid_init(void)
3523 return register_md_personality(&raid1_personality
);
3526 static void raid_exit(void)
3528 unregister_md_personality(&raid1_personality
);
3531 module_init(raid_init
);
3532 module_exit(raid_exit
);
3533 MODULE_LICENSE("GPL");
3534 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3535 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3536 MODULE_ALIAS("md-raid1");
3537 MODULE_ALIAS("md-level-1");