1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
6 #include <linux/kernel.h>
7 #include <linux/wait.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/raid/md_p.h>
11 #include <linux/crc32c.h>
12 #include <linux/random.h>
13 #include <linux/kthread.h>
14 #include <linux/types.h>
17 #include "md-bitmap.h"
18 #include "raid5-log.h"
21 * metadata/data stored in disk with 4k size unit (a block) regardless
22 * underneath hardware sector size. only works with PAGE_SIZE == 4096
24 #define BLOCK_SECTORS (8)
25 #define BLOCK_SECTOR_SHIFT (3)
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
30 * In write through mode, the reclaim runs every log->max_free_space.
31 * This can prevent the recovery scans for too long
33 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36 /* wake up reclaim thread periodically */
37 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38 /* start flush with these full stripes */
39 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40 /* reclaim stripes in groups */
41 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
44 * We only need 2 bios per I/O unit to make progress, but ensure we
45 * have a few more available to not get too tight.
47 #define R5L_POOL_SIZE 4
49 static char *r5c_journal_mode_str
[] = {"write-through",
52 * raid5 cache state machine
54 * With the RAID cache, each stripe works in two phases:
58 * These two phases are controlled by bit STRIPE_R5C_CACHING:
59 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
62 * When there is no journal, or the journal is in write-through mode,
63 * the stripe is always in writing-out phase.
65 * For write-back journal, the stripe is sent to caching phase on write
66 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67 * the write-out phase by clearing STRIPE_R5C_CACHING.
69 * Stripes in caching phase do not write the raid disks. Instead, all
70 * writes are committed from the log device. Therefore, a stripe in
71 * caching phase handles writes as:
72 * - write to log device
75 * Stripes in writing-out phase handle writes as:
77 * - write pending data and parity to journal
78 * - write data and parity to raid disks
79 * - return IO for pending writes
87 sector_t device_size
; /* log device size, round to
89 sector_t max_free_space
; /* reclaim run if free space is at
92 sector_t last_checkpoint
; /* log tail. where recovery scan
94 u64 last_cp_seq
; /* log tail sequence */
96 sector_t log_start
; /* log head. where new data appends */
97 u64 seq
; /* log head sequence */
99 sector_t next_checkpoint
;
101 struct mutex io_mutex
;
102 struct r5l_io_unit
*current_io
; /* current io_unit accepting new data */
104 spinlock_t io_list_lock
;
105 struct list_head running_ios
; /* io_units which are still running,
106 * and have not yet been completely
107 * written to the log */
108 struct list_head io_end_ios
; /* io_units which have been completely
109 * written to the log but not yet written
111 struct list_head flushing_ios
; /* io_units which are waiting for log
113 struct list_head finished_ios
; /* io_units which settle down in log disk */
114 struct bio flush_bio
;
116 struct list_head no_mem_stripes
; /* pending stripes, -ENOMEM */
118 struct kmem_cache
*io_kc
;
123 struct md_thread
*reclaim_thread
;
124 unsigned long reclaim_target
; /* number of space that need to be
125 * reclaimed. if it's 0, reclaim spaces
126 * used by io_units which are in
127 * IO_UNIT_STRIPE_END state (eg, reclaim
128 * dones't wait for specific io_unit
129 * switching to IO_UNIT_STRIPE_END
131 wait_queue_head_t iounit_wait
;
133 struct list_head no_space_stripes
; /* pending stripes, log has no space */
134 spinlock_t no_space_stripes_lock
;
136 bool need_cache_flush
;
139 enum r5c_journal_mode r5c_journal_mode
;
141 /* all stripes in r5cache, in the order of seq at sh->log_start */
142 struct list_head stripe_in_journal_list
;
144 spinlock_t stripe_in_journal_lock
;
145 atomic_t stripe_in_journal_count
;
147 /* to submit async io_units, to fulfill ordering of flush */
148 struct work_struct deferred_io_work
;
149 /* to disable write back during in degraded mode */
150 struct work_struct disable_writeback_work
;
152 /* to for chunk_aligned_read in writeback mode, details below */
153 spinlock_t tree_lock
;
154 struct radix_tree_root big_stripe_tree
;
158 * Enable chunk_aligned_read() with write back cache.
160 * Each chunk may contain more than one stripe (for example, a 256kB
161 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163 * For each big_stripe, we count how many stripes of this big_stripe
164 * are in the write back cache. These data are tracked in a radix tree
165 * (big_stripe_tree). We use radix_tree item pointer as the counter.
166 * r5c_tree_index() is used to calculate keys for the radix tree.
168 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169 * big_stripe of each chunk in the tree. If this big_stripe is in the
170 * tree, chunk_aligned_read() aborts. This look up is protected by
173 * It is necessary to remember whether a stripe is counted in
174 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176 * two flags are set, the stripe is counted in big_stripe_tree. This
177 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178 * r5c_try_caching_write(); and moving clear_bit of
179 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180 * r5c_finish_stripe_write_out().
184 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185 * So it is necessary to left shift the counter by 2 bits before using it
186 * as data pointer of the tree.
188 #define R5C_RADIX_COUNT_SHIFT 2
191 * calculate key for big_stripe_tree
193 * sect: align_bi->bi_iter.bi_sector or sh->sector
195 static inline sector_t
r5c_tree_index(struct r5conf
*conf
,
200 offset
= sector_div(sect
, conf
->chunk_sectors
);
205 * an IO range starts from a meta data block and end at the next meta data
206 * block. The io unit's the meta data block tracks data/parity followed it. io
207 * unit is written to log disk with normal write, as we always flush log disk
208 * first and then start move data to raid disks, there is no requirement to
209 * write io unit with FLUSH/FUA
214 struct page
*meta_page
; /* store meta block */
215 int meta_offset
; /* current offset in meta_page */
217 struct bio
*current_bio
;/* current_bio accepting new data */
219 atomic_t pending_stripe
;/* how many stripes not flushed to raid */
220 u64 seq
; /* seq number of the metablock */
221 sector_t log_start
; /* where the io_unit starts */
222 sector_t log_end
; /* where the io_unit ends */
223 struct list_head log_sibling
; /* log->running_ios */
224 struct list_head stripe_list
; /* stripes added to the io_unit */
228 struct bio
*split_bio
;
230 unsigned int has_flush
:1; /* include flush request */
231 unsigned int has_fua
:1; /* include fua request */
232 unsigned int has_null_flush
:1; /* include null flush request */
233 unsigned int has_flush_payload
:1; /* include flush payload */
235 * io isn't sent yet, flush/fua request can only be submitted till it's
236 * the first IO in running_ios list
238 unsigned int io_deferred
:1;
240 struct bio_list flush_barriers
; /* size == 0 flush bios */
243 /* r5l_io_unit state */
244 enum r5l_io_unit_state
{
245 IO_UNIT_RUNNING
= 0, /* accepting new IO */
246 IO_UNIT_IO_START
= 1, /* io_unit bio start writing to log,
247 * don't accepting new bio */
248 IO_UNIT_IO_END
= 2, /* io_unit bio finish writing to log */
249 IO_UNIT_STRIPE_END
= 3, /* stripes data finished writing to raid */
252 bool r5c_is_writeback(struct r5l_log
*log
)
254 return (log
!= NULL
&&
255 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
);
258 static sector_t
r5l_ring_add(struct r5l_log
*log
, sector_t start
, sector_t inc
)
261 if (start
>= log
->device_size
)
262 start
= start
- log
->device_size
;
266 static sector_t
r5l_ring_distance(struct r5l_log
*log
, sector_t start
,
272 return end
+ log
->device_size
- start
;
275 static bool r5l_has_free_space(struct r5l_log
*log
, sector_t size
)
279 used_size
= r5l_ring_distance(log
, log
->last_checkpoint
,
282 return log
->device_size
> used_size
+ size
;
285 static void __r5l_set_io_unit_state(struct r5l_io_unit
*io
,
286 enum r5l_io_unit_state state
)
288 if (WARN_ON(io
->state
>= state
))
294 r5c_return_dev_pending_writes(struct r5conf
*conf
, struct r5dev
*dev
)
296 struct bio
*wbi
, *wbi2
;
300 while (wbi
&& wbi
->bi_iter
.bi_sector
<
301 dev
->sector
+ STRIPE_SECTORS
) {
302 wbi2
= r5_next_bio(wbi
, dev
->sector
);
303 md_write_end(conf
->mddev
);
309 void r5c_handle_cached_data_endio(struct r5conf
*conf
,
310 struct stripe_head
*sh
, int disks
)
314 for (i
= sh
->disks
; i
--; ) {
315 if (sh
->dev
[i
].written
) {
316 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
317 r5c_return_dev_pending_writes(conf
, &sh
->dev
[i
]);
318 md_bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
320 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
326 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
);
328 /* Check whether we should flush some stripes to free up stripe cache */
329 void r5c_check_stripe_cache_usage(struct r5conf
*conf
)
333 if (!r5c_is_writeback(conf
->log
))
336 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
337 atomic_read(&conf
->r5c_cached_full_stripes
);
340 * The following condition is true for either of the following:
341 * - stripe cache pressure high:
342 * total_cached > 3/4 min_nr_stripes ||
343 * empty_inactive_list_nr > 0
344 * - stripe cache pressure moderate:
345 * total_cached > 1/2 min_nr_stripes
347 if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
348 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
349 r5l_wake_reclaim(conf
->log
, 0);
353 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
354 * stripes in the cache
356 void r5c_check_cached_full_stripe(struct r5conf
*conf
)
358 if (!r5c_is_writeback(conf
->log
))
362 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
363 * or a full stripe (chunk size / 4k stripes).
365 if (atomic_read(&conf
->r5c_cached_full_stripes
) >=
366 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf
),
367 conf
->chunk_sectors
>> STRIPE_SHIFT
))
368 r5l_wake_reclaim(conf
->log
, 0);
372 * Total log space (in sectors) needed to flush all data in cache
374 * To avoid deadlock due to log space, it is necessary to reserve log
375 * space to flush critical stripes (stripes that occupying log space near
376 * last_checkpoint). This function helps check how much log space is
377 * required to flush all cached stripes.
379 * To reduce log space requirements, two mechanisms are used to give cache
380 * flush higher priorities:
381 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
382 * stripes ALREADY in journal can be flushed w/o pending writes;
383 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
384 * can be delayed (r5l_add_no_space_stripe).
386 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
387 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
388 * pages of journal space. For stripes that has not passed 1, flushing it
389 * requires (conf->raid_disks + 1) pages of journal space. There are at
390 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
391 * required to flush all cached stripes (in pages) is:
393 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
394 * (group_cnt + 1) * (raid_disks + 1)
396 * (stripe_in_journal_count) * (max_degraded + 1) +
397 * (group_cnt + 1) * (raid_disks - max_degraded)
399 static sector_t
r5c_log_required_to_flush_cache(struct r5conf
*conf
)
401 struct r5l_log
*log
= conf
->log
;
403 if (!r5c_is_writeback(log
))
406 return BLOCK_SECTORS
*
407 ((conf
->max_degraded
+ 1) * atomic_read(&log
->stripe_in_journal_count
) +
408 (conf
->raid_disks
- conf
->max_degraded
) * (conf
->group_cnt
+ 1));
412 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
414 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
415 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
416 * device is less than 2x of reclaim_required_space.
418 static inline void r5c_update_log_state(struct r5l_log
*log
)
420 struct r5conf
*conf
= log
->rdev
->mddev
->private;
422 sector_t reclaim_space
;
423 bool wake_reclaim
= false;
425 if (!r5c_is_writeback(log
))
428 free_space
= r5l_ring_distance(log
, log
->log_start
,
429 log
->last_checkpoint
);
430 reclaim_space
= r5c_log_required_to_flush_cache(conf
);
431 if (free_space
< 2 * reclaim_space
)
432 set_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
434 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
436 clear_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
438 if (free_space
< 3 * reclaim_space
)
439 set_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
441 clear_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
444 r5l_wake_reclaim(log
, 0);
448 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
449 * This function should only be called in write-back mode.
451 void r5c_make_stripe_write_out(struct stripe_head
*sh
)
453 struct r5conf
*conf
= sh
->raid_conf
;
454 struct r5l_log
*log
= conf
->log
;
456 BUG_ON(!r5c_is_writeback(log
));
458 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
459 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
461 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
462 atomic_inc(&conf
->preread_active_stripes
);
465 static void r5c_handle_data_cached(struct stripe_head
*sh
)
469 for (i
= sh
->disks
; i
--; )
470 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
471 set_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
472 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
474 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
478 * this journal write must contain full parity,
479 * it may also contain some data pages
481 static void r5c_handle_parity_cached(struct stripe_head
*sh
)
485 for (i
= sh
->disks
; i
--; )
486 if (test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
487 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
491 * Setting proper flags after writing (or flushing) data and/or parity to the
492 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494 static void r5c_finish_cache_stripe(struct stripe_head
*sh
)
496 struct r5l_log
*log
= sh
->raid_conf
->log
;
498 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
499 BUG_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
501 * Set R5_InJournal for parity dev[pd_idx]. This means
502 * all data AND parity in the journal. For RAID 6, it is
503 * NOT necessary to set the flag for dev[qd_idx], as the
504 * two parities are written out together.
506 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
507 } else if (test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
508 r5c_handle_data_cached(sh
);
510 r5c_handle_parity_cached(sh
);
511 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
515 static void r5l_io_run_stripes(struct r5l_io_unit
*io
)
517 struct stripe_head
*sh
, *next
;
519 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
520 list_del_init(&sh
->log_list
);
522 r5c_finish_cache_stripe(sh
);
524 set_bit(STRIPE_HANDLE
, &sh
->state
);
525 raid5_release_stripe(sh
);
529 static void r5l_log_run_stripes(struct r5l_log
*log
)
531 struct r5l_io_unit
*io
, *next
;
533 lockdep_assert_held(&log
->io_list_lock
);
535 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
536 /* don't change list order */
537 if (io
->state
< IO_UNIT_IO_END
)
540 list_move_tail(&io
->log_sibling
, &log
->finished_ios
);
541 r5l_io_run_stripes(io
);
545 static void r5l_move_to_end_ios(struct r5l_log
*log
)
547 struct r5l_io_unit
*io
, *next
;
549 lockdep_assert_held(&log
->io_list_lock
);
551 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
552 /* don't change list order */
553 if (io
->state
< IO_UNIT_IO_END
)
555 list_move_tail(&io
->log_sibling
, &log
->io_end_ios
);
559 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
);
560 static void r5l_log_endio(struct bio
*bio
)
562 struct r5l_io_unit
*io
= bio
->bi_private
;
563 struct r5l_io_unit
*io_deferred
;
564 struct r5l_log
*log
= io
->log
;
567 bool has_flush_payload
;
570 md_error(log
->rdev
->mddev
, log
->rdev
);
573 mempool_free(io
->meta_page
, &log
->meta_pool
);
575 spin_lock_irqsave(&log
->io_list_lock
, flags
);
576 __r5l_set_io_unit_state(io
, IO_UNIT_IO_END
);
579 * if the io doesn't not have null_flush or flush payload,
580 * it is not safe to access it after releasing io_list_lock.
581 * Therefore, it is necessary to check the condition with
584 has_null_flush
= io
->has_null_flush
;
585 has_flush_payload
= io
->has_flush_payload
;
587 if (log
->need_cache_flush
&& !list_empty(&io
->stripe_list
))
588 r5l_move_to_end_ios(log
);
590 r5l_log_run_stripes(log
);
591 if (!list_empty(&log
->running_ios
)) {
593 * FLUSH/FUA io_unit is deferred because of ordering, now we
596 io_deferred
= list_first_entry(&log
->running_ios
,
597 struct r5l_io_unit
, log_sibling
);
598 if (io_deferred
->io_deferred
)
599 schedule_work(&log
->deferred_io_work
);
602 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
604 if (log
->need_cache_flush
)
605 md_wakeup_thread(log
->rdev
->mddev
->thread
);
607 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
608 if (has_null_flush
) {
611 WARN_ON(bio_list_empty(&io
->flush_barriers
));
612 while ((bi
= bio_list_pop(&io
->flush_barriers
)) != NULL
) {
614 if (atomic_dec_and_test(&io
->pending_stripe
)) {
615 __r5l_stripe_write_finished(io
);
620 /* decrease pending_stripe for flush payload */
621 if (has_flush_payload
)
622 if (atomic_dec_and_test(&io
->pending_stripe
))
623 __r5l_stripe_write_finished(io
);
626 static void r5l_do_submit_io(struct r5l_log
*log
, struct r5l_io_unit
*io
)
630 spin_lock_irqsave(&log
->io_list_lock
, flags
);
631 __r5l_set_io_unit_state(io
, IO_UNIT_IO_START
);
632 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
635 * In case of journal device failures, submit_bio will get error
636 * and calls endio, then active stripes will continue write
637 * process. Therefore, it is not necessary to check Faulty bit
638 * of journal device here.
640 * We can't check split_bio after current_bio is submitted. If
641 * io->split_bio is null, after current_bio is submitted, current_bio
642 * might already be completed and the io_unit is freed. We submit
643 * split_bio first to avoid the issue.
647 io
->split_bio
->bi_opf
|= REQ_PREFLUSH
;
649 io
->split_bio
->bi_opf
|= REQ_FUA
;
650 submit_bio(io
->split_bio
);
654 io
->current_bio
->bi_opf
|= REQ_PREFLUSH
;
656 io
->current_bio
->bi_opf
|= REQ_FUA
;
657 submit_bio(io
->current_bio
);
660 /* deferred io_unit will be dispatched here */
661 static void r5l_submit_io_async(struct work_struct
*work
)
663 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
665 struct r5l_io_unit
*io
= NULL
;
668 spin_lock_irqsave(&log
->io_list_lock
, flags
);
669 if (!list_empty(&log
->running_ios
)) {
670 io
= list_first_entry(&log
->running_ios
, struct r5l_io_unit
,
672 if (!io
->io_deferred
)
677 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
679 r5l_do_submit_io(log
, io
);
682 static void r5c_disable_writeback_async(struct work_struct
*work
)
684 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
685 disable_writeback_work
);
686 struct mddev
*mddev
= log
->rdev
->mddev
;
687 struct r5conf
*conf
= mddev
->private;
690 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
692 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
695 /* wait superblock change before suspend */
696 wait_event(mddev
->sb_wait
,
698 (!test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
) &&
699 (locked
= mddev_trylock(mddev
))));
701 mddev_suspend(mddev
);
702 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
708 static void r5l_submit_current_io(struct r5l_log
*log
)
710 struct r5l_io_unit
*io
= log
->current_io
;
711 struct r5l_meta_block
*block
;
714 bool do_submit
= true;
719 block
= page_address(io
->meta_page
);
720 block
->meta_size
= cpu_to_le32(io
->meta_offset
);
721 crc
= crc32c_le(log
->uuid_checksum
, block
, PAGE_SIZE
);
722 block
->checksum
= cpu_to_le32(crc
);
724 log
->current_io
= NULL
;
725 spin_lock_irqsave(&log
->io_list_lock
, flags
);
726 if (io
->has_flush
|| io
->has_fua
) {
727 if (io
!= list_first_entry(&log
->running_ios
,
728 struct r5l_io_unit
, log_sibling
)) {
733 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
735 r5l_do_submit_io(log
, io
);
738 static struct bio
*r5l_bio_alloc(struct r5l_log
*log
)
740 struct bio
*bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
, &log
->bs
);
742 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
743 bio_set_dev(bio
, log
->rdev
->bdev
);
744 bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ log
->log_start
;
749 static void r5_reserve_log_entry(struct r5l_log
*log
, struct r5l_io_unit
*io
)
751 log
->log_start
= r5l_ring_add(log
, log
->log_start
, BLOCK_SECTORS
);
753 r5c_update_log_state(log
);
755 * If we filled up the log device start from the beginning again,
756 * which will require a new bio.
758 * Note: for this to work properly the log size needs to me a multiple
761 if (log
->log_start
== 0)
762 io
->need_split_bio
= true;
764 io
->log_end
= log
->log_start
;
767 static struct r5l_io_unit
*r5l_new_meta(struct r5l_log
*log
)
769 struct r5l_io_unit
*io
;
770 struct r5l_meta_block
*block
;
772 io
= mempool_alloc(&log
->io_pool
, GFP_ATOMIC
);
775 memset(io
, 0, sizeof(*io
));
778 INIT_LIST_HEAD(&io
->log_sibling
);
779 INIT_LIST_HEAD(&io
->stripe_list
);
780 bio_list_init(&io
->flush_barriers
);
781 io
->state
= IO_UNIT_RUNNING
;
783 io
->meta_page
= mempool_alloc(&log
->meta_pool
, GFP_NOIO
);
784 block
= page_address(io
->meta_page
);
786 block
->magic
= cpu_to_le32(R5LOG_MAGIC
);
787 block
->version
= R5LOG_VERSION
;
788 block
->seq
= cpu_to_le64(log
->seq
);
789 block
->position
= cpu_to_le64(log
->log_start
);
791 io
->log_start
= log
->log_start
;
792 io
->meta_offset
= sizeof(struct r5l_meta_block
);
793 io
->seq
= log
->seq
++;
795 io
->current_bio
= r5l_bio_alloc(log
);
796 io
->current_bio
->bi_end_io
= r5l_log_endio
;
797 io
->current_bio
->bi_private
= io
;
798 bio_add_page(io
->current_bio
, io
->meta_page
, PAGE_SIZE
, 0);
800 r5_reserve_log_entry(log
, io
);
802 spin_lock_irq(&log
->io_list_lock
);
803 list_add_tail(&io
->log_sibling
, &log
->running_ios
);
804 spin_unlock_irq(&log
->io_list_lock
);
809 static int r5l_get_meta(struct r5l_log
*log
, unsigned int payload_size
)
811 if (log
->current_io
&&
812 log
->current_io
->meta_offset
+ payload_size
> PAGE_SIZE
)
813 r5l_submit_current_io(log
);
815 if (!log
->current_io
) {
816 log
->current_io
= r5l_new_meta(log
);
817 if (!log
->current_io
)
824 static void r5l_append_payload_meta(struct r5l_log
*log
, u16 type
,
826 u32 checksum1
, u32 checksum2
,
827 bool checksum2_valid
)
829 struct r5l_io_unit
*io
= log
->current_io
;
830 struct r5l_payload_data_parity
*payload
;
832 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
833 payload
->header
.type
= cpu_to_le16(type
);
834 payload
->header
.flags
= cpu_to_le16(0);
835 payload
->size
= cpu_to_le32((1 + !!checksum2_valid
) <<
837 payload
->location
= cpu_to_le64(location
);
838 payload
->checksum
[0] = cpu_to_le32(checksum1
);
840 payload
->checksum
[1] = cpu_to_le32(checksum2
);
842 io
->meta_offset
+= sizeof(struct r5l_payload_data_parity
) +
843 sizeof(__le32
) * (1 + !!checksum2_valid
);
846 static void r5l_append_payload_page(struct r5l_log
*log
, struct page
*page
)
848 struct r5l_io_unit
*io
= log
->current_io
;
850 if (io
->need_split_bio
) {
851 BUG_ON(io
->split_bio
);
852 io
->split_bio
= io
->current_bio
;
853 io
->current_bio
= r5l_bio_alloc(log
);
854 bio_chain(io
->current_bio
, io
->split_bio
);
855 io
->need_split_bio
= false;
858 if (!bio_add_page(io
->current_bio
, page
, PAGE_SIZE
, 0))
861 r5_reserve_log_entry(log
, io
);
864 static void r5l_append_flush_payload(struct r5l_log
*log
, sector_t sect
)
866 struct mddev
*mddev
= log
->rdev
->mddev
;
867 struct r5conf
*conf
= mddev
->private;
868 struct r5l_io_unit
*io
;
869 struct r5l_payload_flush
*payload
;
873 * payload_flush requires extra writes to the journal.
874 * To avoid handling the extra IO in quiesce, just skip
880 mutex_lock(&log
->io_mutex
);
881 meta_size
= sizeof(struct r5l_payload_flush
) + sizeof(__le64
);
883 if (r5l_get_meta(log
, meta_size
)) {
884 mutex_unlock(&log
->io_mutex
);
888 /* current implementation is one stripe per flush payload */
889 io
= log
->current_io
;
890 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
891 payload
->header
.type
= cpu_to_le16(R5LOG_PAYLOAD_FLUSH
);
892 payload
->header
.flags
= cpu_to_le16(0);
893 payload
->size
= cpu_to_le32(sizeof(__le64
));
894 payload
->flush_stripes
[0] = cpu_to_le64(sect
);
895 io
->meta_offset
+= meta_size
;
896 /* multiple flush payloads count as one pending_stripe */
897 if (!io
->has_flush_payload
) {
898 io
->has_flush_payload
= 1;
899 atomic_inc(&io
->pending_stripe
);
901 mutex_unlock(&log
->io_mutex
);
904 static int r5l_log_stripe(struct r5l_log
*log
, struct stripe_head
*sh
,
905 int data_pages
, int parity_pages
)
910 struct r5l_io_unit
*io
;
913 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
))
915 sizeof(struct r5l_payload_data_parity
) +
916 sizeof(__le32
) * parity_pages
;
918 ret
= r5l_get_meta(log
, meta_size
);
922 io
= log
->current_io
;
924 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH
, &sh
->state
))
927 for (i
= 0; i
< sh
->disks
; i
++) {
928 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
929 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
931 if (i
== sh
->pd_idx
|| i
== sh
->qd_idx
)
933 if (test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
) &&
934 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
) {
937 * we need to flush journal to make sure recovery can
938 * reach the data with fua flag
942 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_DATA
,
943 raid5_compute_blocknr(sh
, i
, 0),
944 sh
->dev
[i
].log_checksum
, 0, false);
945 r5l_append_payload_page(log
, sh
->dev
[i
].page
);
948 if (parity_pages
== 2) {
949 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
950 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
951 sh
->dev
[sh
->qd_idx
].log_checksum
, true);
952 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
953 r5l_append_payload_page(log
, sh
->dev
[sh
->qd_idx
].page
);
954 } else if (parity_pages
== 1) {
955 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
956 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
958 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
959 } else /* Just writing data, not parity, in caching phase */
960 BUG_ON(parity_pages
!= 0);
962 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
963 atomic_inc(&io
->pending_stripe
);
966 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
969 if (sh
->log_start
== MaxSector
) {
970 BUG_ON(!list_empty(&sh
->r5c
));
971 sh
->log_start
= io
->log_start
;
972 spin_lock_irq(&log
->stripe_in_journal_lock
);
973 list_add_tail(&sh
->r5c
,
974 &log
->stripe_in_journal_list
);
975 spin_unlock_irq(&log
->stripe_in_journal_lock
);
976 atomic_inc(&log
->stripe_in_journal_count
);
981 /* add stripe to no_space_stripes, and then wake up reclaim */
982 static inline void r5l_add_no_space_stripe(struct r5l_log
*log
,
983 struct stripe_head
*sh
)
985 spin_lock(&log
->no_space_stripes_lock
);
986 list_add_tail(&sh
->log_list
, &log
->no_space_stripes
);
987 spin_unlock(&log
->no_space_stripes_lock
);
991 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
992 * data from log to raid disks), so we shouldn't wait for reclaim here
994 int r5l_write_stripe(struct r5l_log
*log
, struct stripe_head
*sh
)
996 struct r5conf
*conf
= sh
->raid_conf
;
998 int data_pages
, parity_pages
;
1002 bool wake_reclaim
= false;
1006 /* Don't support stripe batch */
1007 if (sh
->log_io
|| !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
1008 test_bit(STRIPE_SYNCING
, &sh
->state
)) {
1009 /* the stripe is written to log, we start writing it to raid */
1010 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
1014 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
1016 for (i
= 0; i
< sh
->disks
; i
++) {
1019 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
1020 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
1024 /* checksum is already calculated in last run */
1025 if (test_bit(STRIPE_LOG_TRAPPED
, &sh
->state
))
1027 addr
= kmap_atomic(sh
->dev
[i
].page
);
1028 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
1030 kunmap_atomic(addr
);
1032 parity_pages
= 1 + !!(sh
->qd_idx
>= 0);
1033 data_pages
= write_disks
- parity_pages
;
1035 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
1037 * The stripe must enter state machine again to finish the write, so
1040 clear_bit(STRIPE_DELAYED
, &sh
->state
);
1041 atomic_inc(&sh
->count
);
1043 mutex_lock(&log
->io_mutex
);
1045 reserve
= (1 + write_disks
) << (PAGE_SHIFT
- 9);
1047 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1048 if (!r5l_has_free_space(log
, reserve
)) {
1049 r5l_add_no_space_stripe(log
, sh
);
1050 wake_reclaim
= true;
1052 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1054 spin_lock_irq(&log
->io_list_lock
);
1055 list_add_tail(&sh
->log_list
,
1056 &log
->no_mem_stripes
);
1057 spin_unlock_irq(&log
->io_list_lock
);
1060 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1062 * log space critical, do not process stripes that are
1063 * not in cache yet (sh->log_start == MaxSector).
1065 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
1066 sh
->log_start
== MaxSector
) {
1067 r5l_add_no_space_stripe(log
, sh
);
1068 wake_reclaim
= true;
1070 } else if (!r5l_has_free_space(log
, reserve
)) {
1071 if (sh
->log_start
== log
->last_checkpoint
)
1074 r5l_add_no_space_stripe(log
, sh
);
1076 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1078 spin_lock_irq(&log
->io_list_lock
);
1079 list_add_tail(&sh
->log_list
,
1080 &log
->no_mem_stripes
);
1081 spin_unlock_irq(&log
->io_list_lock
);
1086 mutex_unlock(&log
->io_mutex
);
1088 r5l_wake_reclaim(log
, reserve
);
1092 void r5l_write_stripe_run(struct r5l_log
*log
)
1096 mutex_lock(&log
->io_mutex
);
1097 r5l_submit_current_io(log
);
1098 mutex_unlock(&log
->io_mutex
);
1101 int r5l_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
1103 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1105 * in write through (journal only)
1106 * we flush log disk cache first, then write stripe data to
1107 * raid disks. So if bio is finished, the log disk cache is
1108 * flushed already. The recovery guarantees we can recovery
1109 * the bio from log disk, so we don't need to flush again
1111 if (bio
->bi_iter
.bi_size
== 0) {
1115 bio
->bi_opf
&= ~REQ_PREFLUSH
;
1117 /* write back (with cache) */
1118 if (bio
->bi_iter
.bi_size
== 0) {
1119 mutex_lock(&log
->io_mutex
);
1120 r5l_get_meta(log
, 0);
1121 bio_list_add(&log
->current_io
->flush_barriers
, bio
);
1122 log
->current_io
->has_flush
= 1;
1123 log
->current_io
->has_null_flush
= 1;
1124 atomic_inc(&log
->current_io
->pending_stripe
);
1125 r5l_submit_current_io(log
);
1126 mutex_unlock(&log
->io_mutex
);
1133 /* This will run after log space is reclaimed */
1134 static void r5l_run_no_space_stripes(struct r5l_log
*log
)
1136 struct stripe_head
*sh
;
1138 spin_lock(&log
->no_space_stripes_lock
);
1139 while (!list_empty(&log
->no_space_stripes
)) {
1140 sh
= list_first_entry(&log
->no_space_stripes
,
1141 struct stripe_head
, log_list
);
1142 list_del_init(&sh
->log_list
);
1143 set_bit(STRIPE_HANDLE
, &sh
->state
);
1144 raid5_release_stripe(sh
);
1146 spin_unlock(&log
->no_space_stripes_lock
);
1150 * calculate new last_checkpoint
1151 * for write through mode, returns log->next_checkpoint
1152 * for write back, returns log_start of first sh in stripe_in_journal_list
1154 static sector_t
r5c_calculate_new_cp(struct r5conf
*conf
)
1156 struct stripe_head
*sh
;
1157 struct r5l_log
*log
= conf
->log
;
1159 unsigned long flags
;
1161 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
1162 return log
->next_checkpoint
;
1164 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1165 if (list_empty(&conf
->log
->stripe_in_journal_list
)) {
1166 /* all stripes flushed */
1167 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1168 return log
->next_checkpoint
;
1170 sh
= list_first_entry(&conf
->log
->stripe_in_journal_list
,
1171 struct stripe_head
, r5c
);
1172 new_cp
= sh
->log_start
;
1173 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1177 static sector_t
r5l_reclaimable_space(struct r5l_log
*log
)
1179 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1181 return r5l_ring_distance(log
, log
->last_checkpoint
,
1182 r5c_calculate_new_cp(conf
));
1185 static void r5l_run_no_mem_stripe(struct r5l_log
*log
)
1187 struct stripe_head
*sh
;
1189 lockdep_assert_held(&log
->io_list_lock
);
1191 if (!list_empty(&log
->no_mem_stripes
)) {
1192 sh
= list_first_entry(&log
->no_mem_stripes
,
1193 struct stripe_head
, log_list
);
1194 list_del_init(&sh
->log_list
);
1195 set_bit(STRIPE_HANDLE
, &sh
->state
);
1196 raid5_release_stripe(sh
);
1200 static bool r5l_complete_finished_ios(struct r5l_log
*log
)
1202 struct r5l_io_unit
*io
, *next
;
1205 lockdep_assert_held(&log
->io_list_lock
);
1207 list_for_each_entry_safe(io
, next
, &log
->finished_ios
, log_sibling
) {
1208 /* don't change list order */
1209 if (io
->state
< IO_UNIT_STRIPE_END
)
1212 log
->next_checkpoint
= io
->log_start
;
1214 list_del(&io
->log_sibling
);
1215 mempool_free(io
, &log
->io_pool
);
1216 r5l_run_no_mem_stripe(log
);
1224 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
)
1226 struct r5l_log
*log
= io
->log
;
1227 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1228 unsigned long flags
;
1230 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1231 __r5l_set_io_unit_state(io
, IO_UNIT_STRIPE_END
);
1233 if (!r5l_complete_finished_ios(log
)) {
1234 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1238 if (r5l_reclaimable_space(log
) > log
->max_free_space
||
1239 test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
))
1240 r5l_wake_reclaim(log
, 0);
1242 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1243 wake_up(&log
->iounit_wait
);
1246 void r5l_stripe_write_finished(struct stripe_head
*sh
)
1248 struct r5l_io_unit
*io
;
1253 if (io
&& atomic_dec_and_test(&io
->pending_stripe
))
1254 __r5l_stripe_write_finished(io
);
1257 static void r5l_log_flush_endio(struct bio
*bio
)
1259 struct r5l_log
*log
= container_of(bio
, struct r5l_log
,
1261 unsigned long flags
;
1262 struct r5l_io_unit
*io
;
1265 md_error(log
->rdev
->mddev
, log
->rdev
);
1267 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1268 list_for_each_entry(io
, &log
->flushing_ios
, log_sibling
)
1269 r5l_io_run_stripes(io
);
1270 list_splice_tail_init(&log
->flushing_ios
, &log
->finished_ios
);
1271 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1275 * Starting dispatch IO to raid.
1276 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1277 * broken meta in the middle of a log causes recovery can't find meta at the
1278 * head of log. If operations require meta at the head persistent in log, we
1279 * must make sure meta before it persistent in log too. A case is:
1281 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1282 * data/parity must be persistent in log before we do the write to raid disks.
1284 * The solution is we restrictly maintain io_unit list order. In this case, we
1285 * only write stripes of an io_unit to raid disks till the io_unit is the first
1286 * one whose data/parity is in log.
1288 void r5l_flush_stripe_to_raid(struct r5l_log
*log
)
1292 if (!log
|| !log
->need_cache_flush
)
1295 spin_lock_irq(&log
->io_list_lock
);
1296 /* flush bio is running */
1297 if (!list_empty(&log
->flushing_ios
)) {
1298 spin_unlock_irq(&log
->io_list_lock
);
1301 list_splice_tail_init(&log
->io_end_ios
, &log
->flushing_ios
);
1302 do_flush
= !list_empty(&log
->flushing_ios
);
1303 spin_unlock_irq(&log
->io_list_lock
);
1307 bio_reset(&log
->flush_bio
);
1308 bio_set_dev(&log
->flush_bio
, log
->rdev
->bdev
);
1309 log
->flush_bio
.bi_end_io
= r5l_log_flush_endio
;
1310 log
->flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
1311 submit_bio(&log
->flush_bio
);
1314 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
);
1315 static void r5l_write_super_and_discard_space(struct r5l_log
*log
,
1318 struct block_device
*bdev
= log
->rdev
->bdev
;
1319 struct mddev
*mddev
;
1321 r5l_write_super(log
, end
);
1323 if (!blk_queue_discard(bdev_get_queue(bdev
)))
1326 mddev
= log
->rdev
->mddev
;
1328 * Discard could zero data, so before discard we must make sure
1329 * superblock is updated to new log tail. Updating superblock (either
1330 * directly call md_update_sb() or depend on md thread) must hold
1331 * reconfig mutex. On the other hand, raid5_quiesce is called with
1332 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1333 * for all IO finish, hence waitting for reclaim thread, while reclaim
1334 * thread is calling this function and waitting for reconfig mutex. So
1335 * there is a deadlock. We workaround this issue with a trylock.
1336 * FIXME: we could miss discard if we can't take reconfig mutex
1338 set_mask_bits(&mddev
->sb_flags
, 0,
1339 BIT(MD_SB_CHANGE_DEVS
) | BIT(MD_SB_CHANGE_PENDING
));
1340 if (!mddev_trylock(mddev
))
1342 md_update_sb(mddev
, 1);
1343 mddev_unlock(mddev
);
1345 /* discard IO error really doesn't matter, ignore it */
1346 if (log
->last_checkpoint
< end
) {
1347 blkdev_issue_discard(bdev
,
1348 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1349 end
- log
->last_checkpoint
, GFP_NOIO
, 0);
1351 blkdev_issue_discard(bdev
,
1352 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1353 log
->device_size
- log
->last_checkpoint
,
1355 blkdev_issue_discard(bdev
, log
->rdev
->data_offset
, end
,
1361 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1362 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1364 * must hold conf->device_lock
1366 static void r5c_flush_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
1368 BUG_ON(list_empty(&sh
->lru
));
1369 BUG_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
1370 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
1373 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1374 * raid5_release_stripe() while holding conf->device_lock
1376 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
));
1377 lockdep_assert_held(&conf
->device_lock
);
1379 list_del_init(&sh
->lru
);
1380 atomic_inc(&sh
->count
);
1382 set_bit(STRIPE_HANDLE
, &sh
->state
);
1383 atomic_inc(&conf
->active_stripes
);
1384 r5c_make_stripe_write_out(sh
);
1386 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
))
1387 atomic_inc(&conf
->r5c_flushing_partial_stripes
);
1389 atomic_inc(&conf
->r5c_flushing_full_stripes
);
1390 raid5_release_stripe(sh
);
1394 * if num == 0, flush all full stripes
1395 * if num > 0, flush all full stripes. If less than num full stripes are
1396 * flushed, flush some partial stripes until totally num stripes are
1397 * flushed or there is no more cached stripes.
1399 void r5c_flush_cache(struct r5conf
*conf
, int num
)
1402 struct stripe_head
*sh
, *next
;
1404 lockdep_assert_held(&conf
->device_lock
);
1409 list_for_each_entry_safe(sh
, next
, &conf
->r5c_full_stripe_list
, lru
) {
1410 r5c_flush_stripe(conf
, sh
);
1416 list_for_each_entry_safe(sh
, next
,
1417 &conf
->r5c_partial_stripe_list
, lru
) {
1418 r5c_flush_stripe(conf
, sh
);
1424 static void r5c_do_reclaim(struct r5conf
*conf
)
1426 struct r5l_log
*log
= conf
->log
;
1427 struct stripe_head
*sh
;
1429 unsigned long flags
;
1431 int stripes_to_flush
;
1432 int flushing_partial
, flushing_full
;
1434 if (!r5c_is_writeback(log
))
1437 flushing_partial
= atomic_read(&conf
->r5c_flushing_partial_stripes
);
1438 flushing_full
= atomic_read(&conf
->r5c_flushing_full_stripes
);
1439 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
1440 atomic_read(&conf
->r5c_cached_full_stripes
) -
1441 flushing_full
- flushing_partial
;
1443 if (total_cached
> conf
->min_nr_stripes
* 3 / 4 ||
1444 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
1446 * if stripe cache pressure high, flush all full stripes and
1447 * some partial stripes
1449 stripes_to_flush
= R5C_RECLAIM_STRIPE_GROUP
;
1450 else if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
1451 atomic_read(&conf
->r5c_cached_full_stripes
) - flushing_full
>
1452 R5C_FULL_STRIPE_FLUSH_BATCH(conf
))
1454 * if stripe cache pressure moderate, or if there is many full
1455 * stripes,flush all full stripes
1457 stripes_to_flush
= 0;
1459 /* no need to flush */
1460 stripes_to_flush
= -1;
1462 if (stripes_to_flush
>= 0) {
1463 spin_lock_irqsave(&conf
->device_lock
, flags
);
1464 r5c_flush_cache(conf
, stripes_to_flush
);
1465 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1468 /* if log space is tight, flush stripes on stripe_in_journal_list */
1469 if (test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
)) {
1470 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1471 spin_lock(&conf
->device_lock
);
1472 list_for_each_entry(sh
, &log
->stripe_in_journal_list
, r5c
) {
1474 * stripes on stripe_in_journal_list could be in any
1475 * state of the stripe_cache state machine. In this
1476 * case, we only want to flush stripe on
1477 * r5c_cached_full/partial_stripes. The following
1478 * condition makes sure the stripe is on one of the
1481 if (!list_empty(&sh
->lru
) &&
1482 !test_bit(STRIPE_HANDLE
, &sh
->state
) &&
1483 atomic_read(&sh
->count
) == 0) {
1484 r5c_flush_stripe(conf
, sh
);
1485 if (count
++ >= R5C_RECLAIM_STRIPE_GROUP
)
1489 spin_unlock(&conf
->device_lock
);
1490 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1493 if (!test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
1494 r5l_run_no_space_stripes(log
);
1496 md_wakeup_thread(conf
->mddev
->thread
);
1499 static void r5l_do_reclaim(struct r5l_log
*log
)
1501 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1502 sector_t reclaim_target
= xchg(&log
->reclaim_target
, 0);
1503 sector_t reclaimable
;
1504 sector_t next_checkpoint
;
1507 spin_lock_irq(&log
->io_list_lock
);
1508 write_super
= r5l_reclaimable_space(log
) > log
->max_free_space
||
1509 reclaim_target
!= 0 || !list_empty(&log
->no_space_stripes
);
1511 * move proper io_unit to reclaim list. We should not change the order.
1512 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1513 * shouldn't reuse space of an unreclaimable io_unit
1516 reclaimable
= r5l_reclaimable_space(log
);
1517 if (reclaimable
>= reclaim_target
||
1518 (list_empty(&log
->running_ios
) &&
1519 list_empty(&log
->io_end_ios
) &&
1520 list_empty(&log
->flushing_ios
) &&
1521 list_empty(&log
->finished_ios
)))
1524 md_wakeup_thread(log
->rdev
->mddev
->thread
);
1525 wait_event_lock_irq(log
->iounit_wait
,
1526 r5l_reclaimable_space(log
) > reclaimable
,
1530 next_checkpoint
= r5c_calculate_new_cp(conf
);
1531 spin_unlock_irq(&log
->io_list_lock
);
1533 if (reclaimable
== 0 || !write_super
)
1537 * write_super will flush cache of each raid disk. We must write super
1538 * here, because the log area might be reused soon and we don't want to
1541 r5l_write_super_and_discard_space(log
, next_checkpoint
);
1543 mutex_lock(&log
->io_mutex
);
1544 log
->last_checkpoint
= next_checkpoint
;
1545 r5c_update_log_state(log
);
1546 mutex_unlock(&log
->io_mutex
);
1548 r5l_run_no_space_stripes(log
);
1551 static void r5l_reclaim_thread(struct md_thread
*thread
)
1553 struct mddev
*mddev
= thread
->mddev
;
1554 struct r5conf
*conf
= mddev
->private;
1555 struct r5l_log
*log
= conf
->log
;
1559 r5c_do_reclaim(conf
);
1560 r5l_do_reclaim(log
);
1563 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
)
1565 unsigned long target
;
1566 unsigned long new = (unsigned long)space
; /* overflow in theory */
1571 target
= log
->reclaim_target
;
1574 } while (cmpxchg(&log
->reclaim_target
, target
, new) != target
);
1575 md_wakeup_thread(log
->reclaim_thread
);
1578 void r5l_quiesce(struct r5l_log
*log
, int quiesce
)
1580 struct mddev
*mddev
;
1583 /* make sure r5l_write_super_and_discard_space exits */
1584 mddev
= log
->rdev
->mddev
;
1585 wake_up(&mddev
->sb_wait
);
1586 kthread_park(log
->reclaim_thread
->tsk
);
1587 r5l_wake_reclaim(log
, MaxSector
);
1588 r5l_do_reclaim(log
);
1590 kthread_unpark(log
->reclaim_thread
->tsk
);
1593 bool r5l_log_disk_error(struct r5conf
*conf
)
1595 struct r5l_log
*log
;
1597 /* don't allow write if journal disk is missing */
1599 log
= rcu_dereference(conf
->log
);
1602 ret
= test_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
1604 ret
= test_bit(Faulty
, &log
->rdev
->flags
);
1609 #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1611 struct r5l_recovery_ctx
{
1612 struct page
*meta_page
; /* current meta */
1613 sector_t meta_total_blocks
; /* total size of current meta and data */
1614 sector_t pos
; /* recovery position */
1615 u64 seq
; /* recovery position seq */
1616 int data_parity_stripes
; /* number of data_parity stripes */
1617 int data_only_stripes
; /* number of data_only stripes */
1618 struct list_head cached_list
;
1621 * read ahead page pool (ra_pool)
1622 * in recovery, log is read sequentially. It is not efficient to
1623 * read every page with sync_page_io(). The read ahead page pool
1624 * reads multiple pages with one IO, so further log read can
1625 * just copy data from the pool.
1627 struct page
*ra_pool
[R5L_RECOVERY_PAGE_POOL_SIZE
];
1628 sector_t pool_offset
; /* offset of first page in the pool */
1629 int total_pages
; /* total allocated pages */
1630 int valid_pages
; /* pages with valid data */
1631 struct bio
*ra_bio
; /* bio to do the read ahead */
1634 static int r5l_recovery_allocate_ra_pool(struct r5l_log
*log
,
1635 struct r5l_recovery_ctx
*ctx
)
1639 ctx
->ra_bio
= bio_alloc_bioset(GFP_KERNEL
, BIO_MAX_PAGES
, &log
->bs
);
1643 ctx
->valid_pages
= 0;
1644 ctx
->total_pages
= 0;
1645 while (ctx
->total_pages
< R5L_RECOVERY_PAGE_POOL_SIZE
) {
1646 page
= alloc_page(GFP_KERNEL
);
1650 ctx
->ra_pool
[ctx
->total_pages
] = page
;
1651 ctx
->total_pages
+= 1;
1654 if (ctx
->total_pages
== 0) {
1655 bio_put(ctx
->ra_bio
);
1659 ctx
->pool_offset
= 0;
1663 static void r5l_recovery_free_ra_pool(struct r5l_log
*log
,
1664 struct r5l_recovery_ctx
*ctx
)
1668 for (i
= 0; i
< ctx
->total_pages
; ++i
)
1669 put_page(ctx
->ra_pool
[i
]);
1670 bio_put(ctx
->ra_bio
);
1674 * fetch ctx->valid_pages pages from offset
1675 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1676 * However, if the offset is close to the end of the journal device,
1677 * ctx->valid_pages could be smaller than ctx->total_pages
1679 static int r5l_recovery_fetch_ra_pool(struct r5l_log
*log
,
1680 struct r5l_recovery_ctx
*ctx
,
1683 bio_reset(ctx
->ra_bio
);
1684 bio_set_dev(ctx
->ra_bio
, log
->rdev
->bdev
);
1685 bio_set_op_attrs(ctx
->ra_bio
, REQ_OP_READ
, 0);
1686 ctx
->ra_bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ offset
;
1688 ctx
->valid_pages
= 0;
1689 ctx
->pool_offset
= offset
;
1691 while (ctx
->valid_pages
< ctx
->total_pages
) {
1692 bio_add_page(ctx
->ra_bio
,
1693 ctx
->ra_pool
[ctx
->valid_pages
], PAGE_SIZE
, 0);
1694 ctx
->valid_pages
+= 1;
1696 offset
= r5l_ring_add(log
, offset
, BLOCK_SECTORS
);
1698 if (offset
== 0) /* reached end of the device */
1702 return submit_bio_wait(ctx
->ra_bio
);
1706 * try read a page from the read ahead page pool, if the page is not in the
1707 * pool, call r5l_recovery_fetch_ra_pool
1709 static int r5l_recovery_read_page(struct r5l_log
*log
,
1710 struct r5l_recovery_ctx
*ctx
,
1716 if (offset
< ctx
->pool_offset
||
1717 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
) {
1718 ret
= r5l_recovery_fetch_ra_pool(log
, ctx
, offset
);
1723 BUG_ON(offset
< ctx
->pool_offset
||
1724 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
);
1726 memcpy(page_address(page
),
1727 page_address(ctx
->ra_pool
[(offset
- ctx
->pool_offset
) >>
1728 BLOCK_SECTOR_SHIFT
]),
1733 static int r5l_recovery_read_meta_block(struct r5l_log
*log
,
1734 struct r5l_recovery_ctx
*ctx
)
1736 struct page
*page
= ctx
->meta_page
;
1737 struct r5l_meta_block
*mb
;
1738 u32 crc
, stored_crc
;
1741 ret
= r5l_recovery_read_page(log
, ctx
, page
, ctx
->pos
);
1745 mb
= page_address(page
);
1746 stored_crc
= le32_to_cpu(mb
->checksum
);
1749 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
1750 le64_to_cpu(mb
->seq
) != ctx
->seq
||
1751 mb
->version
!= R5LOG_VERSION
||
1752 le64_to_cpu(mb
->position
) != ctx
->pos
)
1755 crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
1756 if (stored_crc
!= crc
)
1759 if (le32_to_cpu(mb
->meta_size
) > PAGE_SIZE
)
1762 ctx
->meta_total_blocks
= BLOCK_SECTORS
;
1768 r5l_recovery_create_empty_meta_block(struct r5l_log
*log
,
1770 sector_t pos
, u64 seq
)
1772 struct r5l_meta_block
*mb
;
1774 mb
= page_address(page
);
1776 mb
->magic
= cpu_to_le32(R5LOG_MAGIC
);
1777 mb
->version
= R5LOG_VERSION
;
1778 mb
->meta_size
= cpu_to_le32(sizeof(struct r5l_meta_block
));
1779 mb
->seq
= cpu_to_le64(seq
);
1780 mb
->position
= cpu_to_le64(pos
);
1783 static int r5l_log_write_empty_meta_block(struct r5l_log
*log
, sector_t pos
,
1787 struct r5l_meta_block
*mb
;
1789 page
= alloc_page(GFP_KERNEL
);
1792 r5l_recovery_create_empty_meta_block(log
, page
, pos
, seq
);
1793 mb
= page_address(page
);
1794 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
1796 if (!sync_page_io(log
->rdev
, pos
, PAGE_SIZE
, page
, REQ_OP_WRITE
,
1797 REQ_SYNC
| REQ_FUA
, false)) {
1806 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1807 * to mark valid (potentially not flushed) data in the journal.
1809 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1810 * so there should not be any mismatch here.
1812 static void r5l_recovery_load_data(struct r5l_log
*log
,
1813 struct stripe_head
*sh
,
1814 struct r5l_recovery_ctx
*ctx
,
1815 struct r5l_payload_data_parity
*payload
,
1816 sector_t log_offset
)
1818 struct mddev
*mddev
= log
->rdev
->mddev
;
1819 struct r5conf
*conf
= mddev
->private;
1822 raid5_compute_sector(conf
,
1823 le64_to_cpu(payload
->location
), 0,
1825 r5l_recovery_read_page(log
, ctx
, sh
->dev
[dd_idx
].page
, log_offset
);
1826 sh
->dev
[dd_idx
].log_checksum
=
1827 le32_to_cpu(payload
->checksum
[0]);
1828 ctx
->meta_total_blocks
+= BLOCK_SECTORS
;
1830 set_bit(R5_Wantwrite
, &sh
->dev
[dd_idx
].flags
);
1831 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1834 static void r5l_recovery_load_parity(struct r5l_log
*log
,
1835 struct stripe_head
*sh
,
1836 struct r5l_recovery_ctx
*ctx
,
1837 struct r5l_payload_data_parity
*payload
,
1838 sector_t log_offset
)
1840 struct mddev
*mddev
= log
->rdev
->mddev
;
1841 struct r5conf
*conf
= mddev
->private;
1843 ctx
->meta_total_blocks
+= BLOCK_SECTORS
* conf
->max_degraded
;
1844 r5l_recovery_read_page(log
, ctx
, sh
->dev
[sh
->pd_idx
].page
, log_offset
);
1845 sh
->dev
[sh
->pd_idx
].log_checksum
=
1846 le32_to_cpu(payload
->checksum
[0]);
1847 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
);
1849 if (sh
->qd_idx
>= 0) {
1850 r5l_recovery_read_page(
1851 log
, ctx
, sh
->dev
[sh
->qd_idx
].page
,
1852 r5l_ring_add(log
, log_offset
, BLOCK_SECTORS
));
1853 sh
->dev
[sh
->qd_idx
].log_checksum
=
1854 le32_to_cpu(payload
->checksum
[1]);
1855 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->qd_idx
].flags
);
1857 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1860 static void r5l_recovery_reset_stripe(struct stripe_head
*sh
)
1865 sh
->log_start
= MaxSector
;
1866 for (i
= sh
->disks
; i
--; )
1867 sh
->dev
[i
].flags
= 0;
1871 r5l_recovery_replay_one_stripe(struct r5conf
*conf
,
1872 struct stripe_head
*sh
,
1873 struct r5l_recovery_ctx
*ctx
)
1875 struct md_rdev
*rdev
, *rrdev
;
1879 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1880 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1882 if (disk_index
== sh
->qd_idx
|| disk_index
== sh
->pd_idx
)
1888 * stripes that only have parity must have been flushed
1889 * before the crash that we are now recovering from, so
1890 * there is nothing more to recovery.
1892 if (data_count
== 0)
1895 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1896 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1899 /* in case device is broken */
1901 rdev
= rcu_dereference(conf
->disks
[disk_index
].rdev
);
1903 atomic_inc(&rdev
->nr_pending
);
1905 sync_page_io(rdev
, sh
->sector
, PAGE_SIZE
,
1906 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1908 rdev_dec_pending(rdev
, rdev
->mddev
);
1911 rrdev
= rcu_dereference(conf
->disks
[disk_index
].replacement
);
1913 atomic_inc(&rrdev
->nr_pending
);
1915 sync_page_io(rrdev
, sh
->sector
, PAGE_SIZE
,
1916 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1918 rdev_dec_pending(rrdev
, rrdev
->mddev
);
1923 ctx
->data_parity_stripes
++;
1925 r5l_recovery_reset_stripe(sh
);
1928 static struct stripe_head
*
1929 r5c_recovery_alloc_stripe(
1930 struct r5conf
*conf
,
1931 sector_t stripe_sect
,
1934 struct stripe_head
*sh
;
1936 sh
= raid5_get_active_stripe(conf
, stripe_sect
, 0, noblock
, 0);
1938 return NULL
; /* no more stripe available */
1940 r5l_recovery_reset_stripe(sh
);
1945 static struct stripe_head
*
1946 r5c_recovery_lookup_stripe(struct list_head
*list
, sector_t sect
)
1948 struct stripe_head
*sh
;
1950 list_for_each_entry(sh
, list
, lru
)
1951 if (sh
->sector
== sect
)
1957 r5c_recovery_drop_stripes(struct list_head
*cached_stripe_list
,
1958 struct r5l_recovery_ctx
*ctx
)
1960 struct stripe_head
*sh
, *next
;
1962 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
) {
1963 r5l_recovery_reset_stripe(sh
);
1964 list_del_init(&sh
->lru
);
1965 raid5_release_stripe(sh
);
1970 r5c_recovery_replay_stripes(struct list_head
*cached_stripe_list
,
1971 struct r5l_recovery_ctx
*ctx
)
1973 struct stripe_head
*sh
, *next
;
1975 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
)
1976 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
1977 r5l_recovery_replay_one_stripe(sh
->raid_conf
, sh
, ctx
);
1978 list_del_init(&sh
->lru
);
1979 raid5_release_stripe(sh
);
1983 /* if matches return 0; otherwise return -EINVAL */
1985 r5l_recovery_verify_data_checksum(struct r5l_log
*log
,
1986 struct r5l_recovery_ctx
*ctx
,
1988 sector_t log_offset
, __le32 log_checksum
)
1993 r5l_recovery_read_page(log
, ctx
, page
, log_offset
);
1994 addr
= kmap_atomic(page
);
1995 checksum
= crc32c_le(log
->uuid_checksum
, addr
, PAGE_SIZE
);
1996 kunmap_atomic(addr
);
1997 return (le32_to_cpu(log_checksum
) == checksum
) ? 0 : -EINVAL
;
2001 * before loading data to stripe cache, we need verify checksum for all data,
2002 * if there is mismatch for any data page, we drop all data in the mata block
2005 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log
*log
,
2006 struct r5l_recovery_ctx
*ctx
)
2008 struct mddev
*mddev
= log
->rdev
->mddev
;
2009 struct r5conf
*conf
= mddev
->private;
2010 struct r5l_meta_block
*mb
= page_address(ctx
->meta_page
);
2011 sector_t mb_offset
= sizeof(struct r5l_meta_block
);
2012 sector_t log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2014 struct r5l_payload_data_parity
*payload
;
2015 struct r5l_payload_flush
*payload_flush
;
2017 page
= alloc_page(GFP_KERNEL
);
2021 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
2022 payload
= (void *)mb
+ mb_offset
;
2023 payload_flush
= (void *)mb
+ mb_offset
;
2025 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
2026 if (r5l_recovery_verify_data_checksum(
2027 log
, ctx
, page
, log_offset
,
2028 payload
->checksum
[0]) < 0)
2030 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
) {
2031 if (r5l_recovery_verify_data_checksum(
2032 log
, ctx
, page
, log_offset
,
2033 payload
->checksum
[0]) < 0)
2035 if (conf
->max_degraded
== 2 && /* q for RAID 6 */
2036 r5l_recovery_verify_data_checksum(
2038 r5l_ring_add(log
, log_offset
,
2040 payload
->checksum
[1]) < 0)
2042 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2043 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2044 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2047 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2048 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2049 le32_to_cpu(payload_flush
->size
);
2051 /* DATA or PARITY payload */
2052 log_offset
= r5l_ring_add(log
, log_offset
,
2053 le32_to_cpu(payload
->size
));
2054 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2056 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2070 * Analyze all data/parity pages in one meta block
2073 * -EINVAL for unknown playload type
2074 * -EAGAIN for checksum mismatch of data page
2075 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2078 r5c_recovery_analyze_meta_block(struct r5l_log
*log
,
2079 struct r5l_recovery_ctx
*ctx
,
2080 struct list_head
*cached_stripe_list
)
2082 struct mddev
*mddev
= log
->rdev
->mddev
;
2083 struct r5conf
*conf
= mddev
->private;
2084 struct r5l_meta_block
*mb
;
2085 struct r5l_payload_data_parity
*payload
;
2086 struct r5l_payload_flush
*payload_flush
;
2088 sector_t log_offset
;
2089 sector_t stripe_sect
;
2090 struct stripe_head
*sh
;
2094 * for mismatch in data blocks, we will drop all data in this mb, but
2095 * we will still read next mb for other data with FLUSH flag, as
2096 * io_unit could finish out of order.
2098 ret
= r5l_recovery_verify_data_checksum_for_mb(log
, ctx
);
2102 return ret
; /* -ENOMEM duo to alloc_page() failed */
2104 mb
= page_address(ctx
->meta_page
);
2105 mb_offset
= sizeof(struct r5l_meta_block
);
2106 log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2108 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
2111 payload
= (void *)mb
+ mb_offset
;
2112 payload_flush
= (void *)mb
+ mb_offset
;
2114 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2117 count
= le32_to_cpu(payload_flush
->size
) / sizeof(__le64
);
2118 for (i
= 0; i
< count
; ++i
) {
2119 stripe_sect
= le64_to_cpu(payload_flush
->flush_stripes
[i
]);
2120 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2123 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2124 r5l_recovery_reset_stripe(sh
);
2125 list_del_init(&sh
->lru
);
2126 raid5_release_stripe(sh
);
2130 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2131 le32_to_cpu(payload_flush
->size
);
2135 /* DATA or PARITY payload */
2136 stripe_sect
= (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) ?
2137 raid5_compute_sector(
2138 conf
, le64_to_cpu(payload
->location
), 0, &dd
,
2140 : le64_to_cpu(payload
->location
);
2142 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2146 sh
= r5c_recovery_alloc_stripe(conf
, stripe_sect
, 1);
2148 * cannot get stripe from raid5_get_active_stripe
2149 * try replay some stripes
2152 r5c_recovery_replay_stripes(
2153 cached_stripe_list
, ctx
);
2154 sh
= r5c_recovery_alloc_stripe(
2155 conf
, stripe_sect
, 1);
2158 int new_size
= conf
->min_nr_stripes
* 2;
2159 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2162 ret
= raid5_set_cache_size(mddev
, new_size
);
2163 if (conf
->min_nr_stripes
<= new_size
/ 2) {
2164 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2168 conf
->min_nr_stripes
,
2169 conf
->max_nr_stripes
);
2172 sh
= r5c_recovery_alloc_stripe(
2173 conf
, stripe_sect
, 0);
2176 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2180 list_add_tail(&sh
->lru
, cached_stripe_list
);
2183 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
2184 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
) &&
2185 test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
)) {
2186 r5l_recovery_replay_one_stripe(conf
, sh
, ctx
);
2187 list_move_tail(&sh
->lru
, cached_stripe_list
);
2189 r5l_recovery_load_data(log
, sh
, ctx
, payload
,
2191 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
)
2192 r5l_recovery_load_parity(log
, sh
, ctx
, payload
,
2197 log_offset
= r5l_ring_add(log
, log_offset
,
2198 le32_to_cpu(payload
->size
));
2200 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2202 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2209 * Load the stripe into cache. The stripe will be written out later by
2210 * the stripe cache state machine.
2212 static void r5c_recovery_load_one_stripe(struct r5l_log
*log
,
2213 struct stripe_head
*sh
)
2218 for (i
= sh
->disks
; i
--; ) {
2220 if (test_and_clear_bit(R5_Wantwrite
, &dev
->flags
)) {
2221 set_bit(R5_InJournal
, &dev
->flags
);
2222 set_bit(R5_UPTODATE
, &dev
->flags
);
2228 * Scan through the log for all to-be-flushed data
2230 * For stripes with data and parity, namely Data-Parity stripe
2231 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2233 * For stripes with only data, namely Data-Only stripe
2234 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2236 * For a stripe, if we see data after parity, we should discard all previous
2237 * data and parity for this stripe, as these data are already flushed to
2240 * At the end of the scan, we return the new journal_tail, which points to
2241 * first data-only stripe on the journal device, or next invalid meta block.
2243 static int r5c_recovery_flush_log(struct r5l_log
*log
,
2244 struct r5l_recovery_ctx
*ctx
)
2246 struct stripe_head
*sh
;
2249 /* scan through the log */
2251 if (r5l_recovery_read_meta_block(log
, ctx
))
2254 ret
= r5c_recovery_analyze_meta_block(log
, ctx
,
2257 * -EAGAIN means mismatch in data block, in this case, we still
2258 * try scan the next metablock
2260 if (ret
&& ret
!= -EAGAIN
)
2261 break; /* ret == -EINVAL or -ENOMEM */
2263 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, ctx
->meta_total_blocks
);
2266 if (ret
== -ENOMEM
) {
2267 r5c_recovery_drop_stripes(&ctx
->cached_list
, ctx
);
2271 /* replay data-parity stripes */
2272 r5c_recovery_replay_stripes(&ctx
->cached_list
, ctx
);
2274 /* load data-only stripes to stripe cache */
2275 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2276 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2277 r5c_recovery_load_one_stripe(log
, sh
);
2278 ctx
->data_only_stripes
++;
2285 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2286 * log will start here. but we can't let superblock point to last valid
2287 * meta block. The log might looks like:
2288 * | meta 1| meta 2| meta 3|
2289 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2290 * superblock points to meta 1, we write a new valid meta 2n. if crash
2291 * happens again, new recovery will start from meta 1. Since meta 2n is
2292 * valid now, recovery will think meta 3 is valid, which is wrong.
2293 * The solution is we create a new meta in meta2 with its seq == meta
2294 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2295 * will not think meta 3 is a valid meta, because its seq doesn't match
2299 * Before recovery, the log looks like the following
2301 * ---------------------------------------------
2302 * | valid log | invalid log |
2303 * ---------------------------------------------
2305 * |- log->last_checkpoint
2306 * |- log->last_cp_seq
2308 * Now we scan through the log until we see invalid entry
2310 * ---------------------------------------------
2311 * | valid log | invalid log |
2312 * ---------------------------------------------
2314 * |- log->last_checkpoint |- ctx->pos
2315 * |- log->last_cp_seq |- ctx->seq
2317 * From this point, we need to increase seq number by 10 to avoid
2318 * confusing next recovery.
2320 * ---------------------------------------------
2321 * | valid log | invalid log |
2322 * ---------------------------------------------
2324 * |- log->last_checkpoint |- ctx->pos+1
2325 * |- log->last_cp_seq |- ctx->seq+10001
2327 * However, it is not safe to start the state machine yet, because data only
2328 * parities are not yet secured in RAID. To save these data only parities, we
2329 * rewrite them from seq+11.
2331 * -----------------------------------------------------------------
2332 * | valid log | data only stripes | invalid log |
2333 * -----------------------------------------------------------------
2335 * |- log->last_checkpoint |- ctx->pos+n
2336 * |- log->last_cp_seq |- ctx->seq+10000+n
2338 * If failure happens again during this process, the recovery can safe start
2339 * again from log->last_checkpoint.
2341 * Once data only stripes are rewritten to journal, we move log_tail
2343 * -----------------------------------------------------------------
2344 * | old log | data only stripes | invalid log |
2345 * -----------------------------------------------------------------
2347 * |- log->last_checkpoint |- ctx->pos+n
2348 * |- log->last_cp_seq |- ctx->seq+10000+n
2350 * Then we can safely start the state machine. If failure happens from this
2351 * point on, the recovery will start from new log->last_checkpoint.
2354 r5c_recovery_rewrite_data_only_stripes(struct r5l_log
*log
,
2355 struct r5l_recovery_ctx
*ctx
)
2357 struct stripe_head
*sh
;
2358 struct mddev
*mddev
= log
->rdev
->mddev
;
2360 sector_t next_checkpoint
= MaxSector
;
2362 page
= alloc_page(GFP_KERNEL
);
2364 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2369 WARN_ON(list_empty(&ctx
->cached_list
));
2371 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2372 struct r5l_meta_block
*mb
;
2377 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2378 r5l_recovery_create_empty_meta_block(log
, page
,
2379 ctx
->pos
, ctx
->seq
);
2380 mb
= page_address(page
);
2381 offset
= le32_to_cpu(mb
->meta_size
);
2382 write_pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2384 for (i
= sh
->disks
; i
--; ) {
2385 struct r5dev
*dev
= &sh
->dev
[i
];
2386 struct r5l_payload_data_parity
*payload
;
2389 if (test_bit(R5_InJournal
, &dev
->flags
)) {
2390 payload
= (void *)mb
+ offset
;
2391 payload
->header
.type
= cpu_to_le16(
2392 R5LOG_PAYLOAD_DATA
);
2393 payload
->size
= cpu_to_le32(BLOCK_SECTORS
);
2394 payload
->location
= cpu_to_le64(
2395 raid5_compute_blocknr(sh
, i
, 0));
2396 addr
= kmap_atomic(dev
->page
);
2397 payload
->checksum
[0] = cpu_to_le32(
2398 crc32c_le(log
->uuid_checksum
, addr
,
2400 kunmap_atomic(addr
);
2401 sync_page_io(log
->rdev
, write_pos
, PAGE_SIZE
,
2402 dev
->page
, REQ_OP_WRITE
, 0, false);
2403 write_pos
= r5l_ring_add(log
, write_pos
,
2405 offset
+= sizeof(__le32
) +
2406 sizeof(struct r5l_payload_data_parity
);
2410 mb
->meta_size
= cpu_to_le32(offset
);
2411 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
2413 sync_page_io(log
->rdev
, ctx
->pos
, PAGE_SIZE
, page
,
2414 REQ_OP_WRITE
, REQ_SYNC
| REQ_FUA
, false);
2415 sh
->log_start
= ctx
->pos
;
2416 list_add_tail(&sh
->r5c
, &log
->stripe_in_journal_list
);
2417 atomic_inc(&log
->stripe_in_journal_count
);
2418 ctx
->pos
= write_pos
;
2420 next_checkpoint
= sh
->log_start
;
2422 log
->next_checkpoint
= next_checkpoint
;
2427 static void r5c_recovery_flush_data_only_stripes(struct r5l_log
*log
,
2428 struct r5l_recovery_ctx
*ctx
)
2430 struct mddev
*mddev
= log
->rdev
->mddev
;
2431 struct r5conf
*conf
= mddev
->private;
2432 struct stripe_head
*sh
, *next
;
2434 if (ctx
->data_only_stripes
== 0)
2437 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_BACK
;
2439 list_for_each_entry_safe(sh
, next
, &ctx
->cached_list
, lru
) {
2440 r5c_make_stripe_write_out(sh
);
2441 set_bit(STRIPE_HANDLE
, &sh
->state
);
2442 list_del_init(&sh
->lru
);
2443 raid5_release_stripe(sh
);
2446 /* reuse conf->wait_for_quiescent in recovery */
2447 wait_event(conf
->wait_for_quiescent
,
2448 atomic_read(&conf
->active_stripes
) == 0);
2450 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
2453 static int r5l_recovery_log(struct r5l_log
*log
)
2455 struct mddev
*mddev
= log
->rdev
->mddev
;
2456 struct r5l_recovery_ctx
*ctx
;
2460 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
2464 ctx
->pos
= log
->last_checkpoint
;
2465 ctx
->seq
= log
->last_cp_seq
;
2466 INIT_LIST_HEAD(&ctx
->cached_list
);
2467 ctx
->meta_page
= alloc_page(GFP_KERNEL
);
2469 if (!ctx
->meta_page
) {
2474 if (r5l_recovery_allocate_ra_pool(log
, ctx
) != 0) {
2479 ret
= r5c_recovery_flush_log(log
, ctx
);
2487 if ((ctx
->data_only_stripes
== 0) && (ctx
->data_parity_stripes
== 0))
2488 pr_info("md/raid:%s: starting from clean shutdown\n",
2491 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2492 mdname(mddev
), ctx
->data_only_stripes
,
2493 ctx
->data_parity_stripes
);
2495 if (ctx
->data_only_stripes
== 0) {
2496 log
->next_checkpoint
= ctx
->pos
;
2497 r5l_log_write_empty_meta_block(log
, ctx
->pos
, ctx
->seq
++);
2498 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2499 } else if (r5c_recovery_rewrite_data_only_stripes(log
, ctx
)) {
2500 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2506 log
->log_start
= ctx
->pos
;
2507 log
->seq
= ctx
->seq
;
2508 log
->last_checkpoint
= pos
;
2509 r5l_write_super(log
, pos
);
2511 r5c_recovery_flush_data_only_stripes(log
, ctx
);
2514 r5l_recovery_free_ra_pool(log
, ctx
);
2516 __free_page(ctx
->meta_page
);
2522 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
)
2524 struct mddev
*mddev
= log
->rdev
->mddev
;
2526 log
->rdev
->journal_tail
= cp
;
2527 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2530 static ssize_t
r5c_journal_mode_show(struct mddev
*mddev
, char *page
)
2532 struct r5conf
*conf
;
2535 ret
= mddev_lock(mddev
);
2539 conf
= mddev
->private;
2540 if (!conf
|| !conf
->log
) {
2541 mddev_unlock(mddev
);
2545 switch (conf
->log
->r5c_journal_mode
) {
2546 case R5C_JOURNAL_MODE_WRITE_THROUGH
:
2548 page
, PAGE_SIZE
, "[%s] %s\n",
2549 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2550 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2552 case R5C_JOURNAL_MODE_WRITE_BACK
:
2554 page
, PAGE_SIZE
, "%s [%s]\n",
2555 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2556 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2561 mddev_unlock(mddev
);
2566 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2568 * @mode as defined in 'enum r5c_journal_mode'.
2571 int r5c_journal_mode_set(struct mddev
*mddev
, int mode
)
2573 struct r5conf
*conf
;
2575 if (mode
< R5C_JOURNAL_MODE_WRITE_THROUGH
||
2576 mode
> R5C_JOURNAL_MODE_WRITE_BACK
)
2579 conf
= mddev
->private;
2580 if (!conf
|| !conf
->log
)
2583 if (raid5_calc_degraded(conf
) > 0 &&
2584 mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
2587 mddev_suspend(mddev
);
2588 conf
->log
->r5c_journal_mode
= mode
;
2589 mddev_resume(mddev
);
2591 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2592 mdname(mddev
), mode
, r5c_journal_mode_str
[mode
]);
2595 EXPORT_SYMBOL(r5c_journal_mode_set
);
2597 static ssize_t
r5c_journal_mode_store(struct mddev
*mddev
,
2598 const char *page
, size_t length
)
2600 int mode
= ARRAY_SIZE(r5c_journal_mode_str
);
2601 size_t len
= length
;
2607 if (page
[len
- 1] == '\n')
2611 if (strlen(r5c_journal_mode_str
[mode
]) == len
&&
2612 !strncmp(page
, r5c_journal_mode_str
[mode
], len
))
2614 ret
= mddev_lock(mddev
);
2617 ret
= r5c_journal_mode_set(mddev
, mode
);
2618 mddev_unlock(mddev
);
2619 return ret
?: length
;
2622 struct md_sysfs_entry
2623 r5c_journal_mode
= __ATTR(journal_mode
, 0644,
2624 r5c_journal_mode_show
, r5c_journal_mode_store
);
2627 * Try handle write operation in caching phase. This function should only
2628 * be called in write-back mode.
2630 * If all outstanding writes can be handled in caching phase, returns 0
2631 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2632 * and returns -EAGAIN
2634 int r5c_try_caching_write(struct r5conf
*conf
,
2635 struct stripe_head
*sh
,
2636 struct stripe_head_state
*s
,
2639 struct r5l_log
*log
= conf
->log
;
2644 sector_t tree_index
;
2648 BUG_ON(!r5c_is_writeback(log
));
2650 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
2652 * There are two different scenarios here:
2653 * 1. The stripe has some data cached, and it is sent to
2654 * write-out phase for reclaim
2655 * 2. The stripe is clean, and this is the first write
2657 * For 1, return -EAGAIN, so we continue with
2658 * handle_stripe_dirtying().
2660 * For 2, set STRIPE_R5C_CACHING and continue with caching
2664 /* case 1: anything injournal or anything in written */
2665 if (s
->injournal
> 0 || s
->written
> 0)
2668 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
2672 * When run in degraded mode, array is set to write-through mode.
2673 * This check helps drain pending write safely in the transition to
2674 * write-through mode.
2676 * When a stripe is syncing, the write is also handled in write
2679 if (s
->failed
|| test_bit(STRIPE_SYNCING
, &sh
->state
)) {
2680 r5c_make_stripe_write_out(sh
);
2684 for (i
= disks
; i
--; ) {
2686 /* if non-overwrite, use writing-out phase */
2687 if (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2688 !test_bit(R5_InJournal
, &dev
->flags
)) {
2689 r5c_make_stripe_write_out(sh
);
2694 /* if the stripe is not counted in big_stripe_tree, add it now */
2695 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) &&
2696 !test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2697 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2698 spin_lock(&log
->tree_lock
);
2699 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2702 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2703 pslot
, &log
->tree_lock
) >>
2704 R5C_RADIX_COUNT_SHIFT
;
2705 radix_tree_replace_slot(
2706 &log
->big_stripe_tree
, pslot
,
2707 (void *)((refcount
+ 1) << R5C_RADIX_COUNT_SHIFT
));
2710 * this radix_tree_insert can fail safely, so no
2711 * need to call radix_tree_preload()
2713 ret
= radix_tree_insert(
2714 &log
->big_stripe_tree
, tree_index
,
2715 (void *)(1 << R5C_RADIX_COUNT_SHIFT
));
2717 spin_unlock(&log
->tree_lock
);
2718 r5c_make_stripe_write_out(sh
);
2722 spin_unlock(&log
->tree_lock
);
2725 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2726 * counted in the radix tree
2728 set_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
);
2729 atomic_inc(&conf
->r5c_cached_partial_stripes
);
2732 for (i
= disks
; i
--; ) {
2735 set_bit(R5_Wantwrite
, &dev
->flags
);
2736 set_bit(R5_Wantdrain
, &dev
->flags
);
2737 set_bit(R5_LOCKED
, &dev
->flags
);
2743 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2745 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2746 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2747 * r5c_handle_data_cached()
2749 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
2756 * free extra pages (orig_page) we allocated for prexor
2758 void r5c_release_extra_page(struct stripe_head
*sh
)
2760 struct r5conf
*conf
= sh
->raid_conf
;
2762 bool using_disk_info_extra_page
;
2764 using_disk_info_extra_page
=
2765 sh
->dev
[0].orig_page
== conf
->disks
[0].extra_page
;
2767 for (i
= sh
->disks
; i
--; )
2768 if (sh
->dev
[i
].page
!= sh
->dev
[i
].orig_page
) {
2769 struct page
*p
= sh
->dev
[i
].orig_page
;
2771 sh
->dev
[i
].orig_page
= sh
->dev
[i
].page
;
2772 clear_bit(R5_OrigPageUPTDODATE
, &sh
->dev
[i
].flags
);
2774 if (!using_disk_info_extra_page
)
2778 if (using_disk_info_extra_page
) {
2779 clear_bit(R5C_EXTRA_PAGE_IN_USE
, &conf
->cache_state
);
2780 md_wakeup_thread(conf
->mddev
->thread
);
2784 void r5c_use_extra_page(struct stripe_head
*sh
)
2786 struct r5conf
*conf
= sh
->raid_conf
;
2790 for (i
= sh
->disks
; i
--; ) {
2792 if (dev
->orig_page
!= dev
->page
)
2793 put_page(dev
->orig_page
);
2794 dev
->orig_page
= conf
->disks
[i
].extra_page
;
2799 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2800 * stripe is committed to RAID disks.
2802 void r5c_finish_stripe_write_out(struct r5conf
*conf
,
2803 struct stripe_head
*sh
,
2804 struct stripe_head_state
*s
)
2806 struct r5l_log
*log
= conf
->log
;
2809 sector_t tree_index
;
2813 if (!log
|| !test_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
))
2816 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2817 clear_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
2819 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
2822 for (i
= sh
->disks
; i
--; ) {
2823 clear_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
2824 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2829 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2830 * We updated R5_InJournal, so we also update s->injournal.
2834 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2835 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2836 md_wakeup_thread(conf
->mddev
->thread
);
2839 wake_up(&conf
->wait_for_overlap
);
2841 spin_lock_irq(&log
->stripe_in_journal_lock
);
2842 list_del_init(&sh
->r5c
);
2843 spin_unlock_irq(&log
->stripe_in_journal_lock
);
2844 sh
->log_start
= MaxSector
;
2846 atomic_dec(&log
->stripe_in_journal_count
);
2847 r5c_update_log_state(log
);
2849 /* stop counting this stripe in big_stripe_tree */
2850 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) ||
2851 test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2852 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2853 spin_lock(&log
->tree_lock
);
2854 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2856 BUG_ON(pslot
== NULL
);
2857 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2858 pslot
, &log
->tree_lock
) >>
2859 R5C_RADIX_COUNT_SHIFT
;
2861 radix_tree_delete(&log
->big_stripe_tree
, tree_index
);
2863 radix_tree_replace_slot(
2864 &log
->big_stripe_tree
, pslot
,
2865 (void *)((refcount
- 1) << R5C_RADIX_COUNT_SHIFT
));
2866 spin_unlock(&log
->tree_lock
);
2869 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
)) {
2870 BUG_ON(atomic_read(&conf
->r5c_cached_partial_stripes
) == 0);
2871 atomic_dec(&conf
->r5c_flushing_partial_stripes
);
2872 atomic_dec(&conf
->r5c_cached_partial_stripes
);
2875 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2876 BUG_ON(atomic_read(&conf
->r5c_cached_full_stripes
) == 0);
2877 atomic_dec(&conf
->r5c_flushing_full_stripes
);
2878 atomic_dec(&conf
->r5c_cached_full_stripes
);
2881 r5l_append_flush_payload(log
, sh
->sector
);
2882 /* stripe is flused to raid disks, we can do resync now */
2883 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
))
2884 set_bit(STRIPE_HANDLE
, &sh
->state
);
2887 int r5c_cache_data(struct r5l_log
*log
, struct stripe_head
*sh
)
2889 struct r5conf
*conf
= sh
->raid_conf
;
2897 for (i
= 0; i
< sh
->disks
; i
++) {
2900 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
2902 addr
= kmap_atomic(sh
->dev
[i
].page
);
2903 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
2905 kunmap_atomic(addr
);
2908 WARN_ON(pages
== 0);
2911 * The stripe must enter state machine again to call endio, so
2914 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2915 atomic_inc(&sh
->count
);
2917 mutex_lock(&log
->io_mutex
);
2919 reserve
= (1 + pages
) << (PAGE_SHIFT
- 9);
2921 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
2922 sh
->log_start
== MaxSector
)
2923 r5l_add_no_space_stripe(log
, sh
);
2924 else if (!r5l_has_free_space(log
, reserve
)) {
2925 if (sh
->log_start
== log
->last_checkpoint
)
2928 r5l_add_no_space_stripe(log
, sh
);
2930 ret
= r5l_log_stripe(log
, sh
, pages
, 0);
2932 spin_lock_irq(&log
->io_list_lock
);
2933 list_add_tail(&sh
->log_list
, &log
->no_mem_stripes
);
2934 spin_unlock_irq(&log
->io_list_lock
);
2938 mutex_unlock(&log
->io_mutex
);
2942 /* check whether this big stripe is in write back cache. */
2943 bool r5c_big_stripe_cached(struct r5conf
*conf
, sector_t sect
)
2945 struct r5l_log
*log
= conf
->log
;
2946 sector_t tree_index
;
2952 WARN_ON_ONCE(!rcu_read_lock_held());
2953 tree_index
= r5c_tree_index(conf
, sect
);
2954 slot
= radix_tree_lookup(&log
->big_stripe_tree
, tree_index
);
2955 return slot
!= NULL
;
2958 static int r5l_load_log(struct r5l_log
*log
)
2960 struct md_rdev
*rdev
= log
->rdev
;
2962 struct r5l_meta_block
*mb
;
2963 sector_t cp
= log
->rdev
->journal_tail
;
2964 u32 stored_crc
, expected_crc
;
2965 bool create_super
= false;
2968 /* Make sure it's valid */
2969 if (cp
>= rdev
->sectors
|| round_down(cp
, BLOCK_SECTORS
) != cp
)
2971 page
= alloc_page(GFP_KERNEL
);
2975 if (!sync_page_io(rdev
, cp
, PAGE_SIZE
, page
, REQ_OP_READ
, 0, false)) {
2979 mb
= page_address(page
);
2981 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
2982 mb
->version
!= R5LOG_VERSION
) {
2983 create_super
= true;
2986 stored_crc
= le32_to_cpu(mb
->checksum
);
2988 expected_crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
2989 if (stored_crc
!= expected_crc
) {
2990 create_super
= true;
2993 if (le64_to_cpu(mb
->position
) != cp
) {
2994 create_super
= true;
2999 log
->last_cp_seq
= prandom_u32();
3001 r5l_log_write_empty_meta_block(log
, cp
, log
->last_cp_seq
);
3003 * Make sure super points to correct address. Log might have
3004 * data very soon. If super hasn't correct log tail address,
3005 * recovery can't find the log
3007 r5l_write_super(log
, cp
);
3009 log
->last_cp_seq
= le64_to_cpu(mb
->seq
);
3011 log
->device_size
= round_down(rdev
->sectors
, BLOCK_SECTORS
);
3012 log
->max_free_space
= log
->device_size
>> RECLAIM_MAX_FREE_SPACE_SHIFT
;
3013 if (log
->max_free_space
> RECLAIM_MAX_FREE_SPACE
)
3014 log
->max_free_space
= RECLAIM_MAX_FREE_SPACE
;
3015 log
->last_checkpoint
= cp
;
3020 log
->log_start
= r5l_ring_add(log
, cp
, BLOCK_SECTORS
);
3021 log
->seq
= log
->last_cp_seq
+ 1;
3022 log
->next_checkpoint
= cp
;
3024 ret
= r5l_recovery_log(log
);
3026 r5c_update_log_state(log
);
3033 int r5l_start(struct r5l_log
*log
)
3040 ret
= r5l_load_log(log
);
3042 struct mddev
*mddev
= log
->rdev
->mddev
;
3043 struct r5conf
*conf
= mddev
->private;
3050 void r5c_update_on_rdev_error(struct mddev
*mddev
, struct md_rdev
*rdev
)
3052 struct r5conf
*conf
= mddev
->private;
3053 struct r5l_log
*log
= conf
->log
;
3058 if ((raid5_calc_degraded(conf
) > 0 ||
3059 test_bit(Journal
, &rdev
->flags
)) &&
3060 conf
->log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
3061 schedule_work(&log
->disable_writeback_work
);
3064 int r5l_init_log(struct r5conf
*conf
, struct md_rdev
*rdev
)
3066 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
3067 struct r5l_log
*log
;
3068 char b
[BDEVNAME_SIZE
];
3071 pr_debug("md/raid:%s: using device %s as journal\n",
3072 mdname(conf
->mddev
), bdevname(rdev
->bdev
, b
));
3074 if (PAGE_SIZE
!= 4096)
3078 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3079 * raid_disks r5l_payload_data_parity.
3081 * Write journal and cache does not work for very big array
3082 * (raid_disks > 203)
3084 if (sizeof(struct r5l_meta_block
) +
3085 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
)) *
3086 conf
->raid_disks
) > PAGE_SIZE
) {
3087 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3088 mdname(conf
->mddev
), conf
->raid_disks
);
3092 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
3097 log
->need_cache_flush
= test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
) != 0;
3099 log
->uuid_checksum
= crc32c_le(~0, rdev
->mddev
->uuid
,
3100 sizeof(rdev
->mddev
->uuid
));
3102 mutex_init(&log
->io_mutex
);
3104 spin_lock_init(&log
->io_list_lock
);
3105 INIT_LIST_HEAD(&log
->running_ios
);
3106 INIT_LIST_HEAD(&log
->io_end_ios
);
3107 INIT_LIST_HEAD(&log
->flushing_ios
);
3108 INIT_LIST_HEAD(&log
->finished_ios
);
3109 bio_init(&log
->flush_bio
, NULL
, 0);
3111 log
->io_kc
= KMEM_CACHE(r5l_io_unit
, 0);
3115 ret
= mempool_init_slab_pool(&log
->io_pool
, R5L_POOL_SIZE
, log
->io_kc
);
3119 ret
= bioset_init(&log
->bs
, R5L_POOL_SIZE
, 0, BIOSET_NEED_BVECS
);
3123 ret
= mempool_init_page_pool(&log
->meta_pool
, R5L_POOL_SIZE
, 0);
3127 spin_lock_init(&log
->tree_lock
);
3128 INIT_RADIX_TREE(&log
->big_stripe_tree
, GFP_NOWAIT
| __GFP_NOWARN
);
3130 log
->reclaim_thread
= md_register_thread(r5l_reclaim_thread
,
3131 log
->rdev
->mddev
, "reclaim");
3132 if (!log
->reclaim_thread
)
3133 goto reclaim_thread
;
3134 log
->reclaim_thread
->timeout
= R5C_RECLAIM_WAKEUP_INTERVAL
;
3136 init_waitqueue_head(&log
->iounit_wait
);
3138 INIT_LIST_HEAD(&log
->no_mem_stripes
);
3140 INIT_LIST_HEAD(&log
->no_space_stripes
);
3141 spin_lock_init(&log
->no_space_stripes_lock
);
3143 INIT_WORK(&log
->deferred_io_work
, r5l_submit_io_async
);
3144 INIT_WORK(&log
->disable_writeback_work
, r5c_disable_writeback_async
);
3146 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
3147 INIT_LIST_HEAD(&log
->stripe_in_journal_list
);
3148 spin_lock_init(&log
->stripe_in_journal_lock
);
3149 atomic_set(&log
->stripe_in_journal_count
, 0);
3151 rcu_assign_pointer(conf
->log
, log
);
3153 set_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
3157 mempool_exit(&log
->meta_pool
);
3159 bioset_exit(&log
->bs
);
3161 mempool_exit(&log
->io_pool
);
3163 kmem_cache_destroy(log
->io_kc
);
3169 void r5l_exit_log(struct r5conf
*conf
)
3171 struct r5l_log
*log
= conf
->log
;
3176 /* Ensure disable_writeback_work wakes up and exits */
3177 wake_up(&conf
->mddev
->sb_wait
);
3178 flush_work(&log
->disable_writeback_work
);
3179 md_unregister_thread(&log
->reclaim_thread
);
3180 mempool_exit(&log
->meta_pool
);
3181 bioset_exit(&log
->bs
);
3182 mempool_exit(&log
->io_pool
);
3183 kmem_cache_destroy(log
->io_kc
);