2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/wait.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/raid/md_p.h>
20 #include <linux/crc32c.h>
21 #include <linux/random.h>
22 #include <linux/kthread.h>
23 #include <linux/types.h>
27 #include "raid5-log.h"
30 * metadata/data stored in disk with 4k size unit (a block) regardless
31 * underneath hardware sector size. only works with PAGE_SIZE == 4096
33 #define BLOCK_SECTORS (8)
34 #define BLOCK_SECTOR_SHIFT (3)
37 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
39 * In write through mode, the reclaim runs every log->max_free_space.
40 * This can prevent the recovery scans for too long
42 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
43 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
45 /* wake up reclaim thread periodically */
46 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
47 /* start flush with these full stripes */
48 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
49 /* reclaim stripes in groups */
50 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
53 * We only need 2 bios per I/O unit to make progress, but ensure we
54 * have a few more available to not get too tight.
56 #define R5L_POOL_SIZE 4
58 static char *r5c_journal_mode_str
[] = {"write-through",
61 * raid5 cache state machine
63 * With the RAID cache, each stripe works in two phases:
67 * These two phases are controlled by bit STRIPE_R5C_CACHING:
68 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
69 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
71 * When there is no journal, or the journal is in write-through mode,
72 * the stripe is always in writing-out phase.
74 * For write-back journal, the stripe is sent to caching phase on write
75 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
76 * the write-out phase by clearing STRIPE_R5C_CACHING.
78 * Stripes in caching phase do not write the raid disks. Instead, all
79 * writes are committed from the log device. Therefore, a stripe in
80 * caching phase handles writes as:
81 * - write to log device
84 * Stripes in writing-out phase handle writes as:
86 * - write pending data and parity to journal
87 * - write data and parity to raid disks
88 * - return IO for pending writes
96 sector_t device_size
; /* log device size, round to
98 sector_t max_free_space
; /* reclaim run if free space is at
101 sector_t last_checkpoint
; /* log tail. where recovery scan
103 u64 last_cp_seq
; /* log tail sequence */
105 sector_t log_start
; /* log head. where new data appends */
106 u64 seq
; /* log head sequence */
108 sector_t next_checkpoint
;
110 struct mutex io_mutex
;
111 struct r5l_io_unit
*current_io
; /* current io_unit accepting new data */
113 spinlock_t io_list_lock
;
114 struct list_head running_ios
; /* io_units which are still running,
115 * and have not yet been completely
116 * written to the log */
117 struct list_head io_end_ios
; /* io_units which have been completely
118 * written to the log but not yet written
120 struct list_head flushing_ios
; /* io_units which are waiting for log
122 struct list_head finished_ios
; /* io_units which settle down in log disk */
123 struct bio flush_bio
;
125 struct list_head no_mem_stripes
; /* pending stripes, -ENOMEM */
127 struct kmem_cache
*io_kc
;
130 mempool_t
*meta_pool
;
132 struct md_thread
*reclaim_thread
;
133 unsigned long reclaim_target
; /* number of space that need to be
134 * reclaimed. if it's 0, reclaim spaces
135 * used by io_units which are in
136 * IO_UNIT_STRIPE_END state (eg, reclaim
137 * dones't wait for specific io_unit
138 * switching to IO_UNIT_STRIPE_END
140 wait_queue_head_t iounit_wait
;
142 struct list_head no_space_stripes
; /* pending stripes, log has no space */
143 spinlock_t no_space_stripes_lock
;
145 bool need_cache_flush
;
148 enum r5c_journal_mode r5c_journal_mode
;
150 /* all stripes in r5cache, in the order of seq at sh->log_start */
151 struct list_head stripe_in_journal_list
;
153 spinlock_t stripe_in_journal_lock
;
154 atomic_t stripe_in_journal_count
;
156 /* to submit async io_units, to fulfill ordering of flush */
157 struct work_struct deferred_io_work
;
158 /* to disable write back during in degraded mode */
159 struct work_struct disable_writeback_work
;
161 /* to for chunk_aligned_read in writeback mode, details below */
162 spinlock_t tree_lock
;
163 struct radix_tree_root big_stripe_tree
;
167 * Enable chunk_aligned_read() with write back cache.
169 * Each chunk may contain more than one stripe (for example, a 256kB
170 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
171 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
172 * For each big_stripe, we count how many stripes of this big_stripe
173 * are in the write back cache. These data are tracked in a radix tree
174 * (big_stripe_tree). We use radix_tree item pointer as the counter.
175 * r5c_tree_index() is used to calculate keys for the radix tree.
177 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
178 * big_stripe of each chunk in the tree. If this big_stripe is in the
179 * tree, chunk_aligned_read() aborts. This look up is protected by
182 * It is necessary to remember whether a stripe is counted in
183 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
184 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
185 * two flags are set, the stripe is counted in big_stripe_tree. This
186 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
187 * r5c_try_caching_write(); and moving clear_bit of
188 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
189 * r5c_finish_stripe_write_out().
193 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
194 * So it is necessary to left shift the counter by 2 bits before using it
195 * as data pointer of the tree.
197 #define R5C_RADIX_COUNT_SHIFT 2
200 * calculate key for big_stripe_tree
202 * sect: align_bi->bi_iter.bi_sector or sh->sector
204 static inline sector_t
r5c_tree_index(struct r5conf
*conf
,
209 offset
= sector_div(sect
, conf
->chunk_sectors
);
214 * an IO range starts from a meta data block and end at the next meta data
215 * block. The io unit's the meta data block tracks data/parity followed it. io
216 * unit is written to log disk with normal write, as we always flush log disk
217 * first and then start move data to raid disks, there is no requirement to
218 * write io unit with FLUSH/FUA
223 struct page
*meta_page
; /* store meta block */
224 int meta_offset
; /* current offset in meta_page */
226 struct bio
*current_bio
;/* current_bio accepting new data */
228 atomic_t pending_stripe
;/* how many stripes not flushed to raid */
229 u64 seq
; /* seq number of the metablock */
230 sector_t log_start
; /* where the io_unit starts */
231 sector_t log_end
; /* where the io_unit ends */
232 struct list_head log_sibling
; /* log->running_ios */
233 struct list_head stripe_list
; /* stripes added to the io_unit */
237 struct bio
*split_bio
;
239 unsigned int has_flush
:1; /* include flush request */
240 unsigned int has_fua
:1; /* include fua request */
241 unsigned int has_null_flush
:1; /* include null flush request */
242 unsigned int has_flush_payload
:1; /* include flush payload */
244 * io isn't sent yet, flush/fua request can only be submitted till it's
245 * the first IO in running_ios list
247 unsigned int io_deferred
:1;
249 struct bio_list flush_barriers
; /* size == 0 flush bios */
252 /* r5l_io_unit state */
253 enum r5l_io_unit_state
{
254 IO_UNIT_RUNNING
= 0, /* accepting new IO */
255 IO_UNIT_IO_START
= 1, /* io_unit bio start writing to log,
256 * don't accepting new bio */
257 IO_UNIT_IO_END
= 2, /* io_unit bio finish writing to log */
258 IO_UNIT_STRIPE_END
= 3, /* stripes data finished writing to raid */
261 bool r5c_is_writeback(struct r5l_log
*log
)
263 return (log
!= NULL
&&
264 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
);
267 static sector_t
r5l_ring_add(struct r5l_log
*log
, sector_t start
, sector_t inc
)
270 if (start
>= log
->device_size
)
271 start
= start
- log
->device_size
;
275 static sector_t
r5l_ring_distance(struct r5l_log
*log
, sector_t start
,
281 return end
+ log
->device_size
- start
;
284 static bool r5l_has_free_space(struct r5l_log
*log
, sector_t size
)
288 used_size
= r5l_ring_distance(log
, log
->last_checkpoint
,
291 return log
->device_size
> used_size
+ size
;
294 static void __r5l_set_io_unit_state(struct r5l_io_unit
*io
,
295 enum r5l_io_unit_state state
)
297 if (WARN_ON(io
->state
>= state
))
303 r5c_return_dev_pending_writes(struct r5conf
*conf
, struct r5dev
*dev
)
305 struct bio
*wbi
, *wbi2
;
309 while (wbi
&& wbi
->bi_iter
.bi_sector
<
310 dev
->sector
+ STRIPE_SECTORS
) {
311 wbi2
= r5_next_bio(wbi
, dev
->sector
);
312 md_write_end(conf
->mddev
);
318 void r5c_handle_cached_data_endio(struct r5conf
*conf
,
319 struct stripe_head
*sh
, int disks
)
323 for (i
= sh
->disks
; i
--; ) {
324 if (sh
->dev
[i
].written
) {
325 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
326 r5c_return_dev_pending_writes(conf
, &sh
->dev
[i
]);
327 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
329 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
335 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
);
337 /* Check whether we should flush some stripes to free up stripe cache */
338 void r5c_check_stripe_cache_usage(struct r5conf
*conf
)
342 if (!r5c_is_writeback(conf
->log
))
345 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
346 atomic_read(&conf
->r5c_cached_full_stripes
);
349 * The following condition is true for either of the following:
350 * - stripe cache pressure high:
351 * total_cached > 3/4 min_nr_stripes ||
352 * empty_inactive_list_nr > 0
353 * - stripe cache pressure moderate:
354 * total_cached > 1/2 min_nr_stripes
356 if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
357 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
358 r5l_wake_reclaim(conf
->log
, 0);
362 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
363 * stripes in the cache
365 void r5c_check_cached_full_stripe(struct r5conf
*conf
)
367 if (!r5c_is_writeback(conf
->log
))
371 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
372 * or a full stripe (chunk size / 4k stripes).
374 if (atomic_read(&conf
->r5c_cached_full_stripes
) >=
375 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf
),
376 conf
->chunk_sectors
>> STRIPE_SHIFT
))
377 r5l_wake_reclaim(conf
->log
, 0);
381 * Total log space (in sectors) needed to flush all data in cache
383 * To avoid deadlock due to log space, it is necessary to reserve log
384 * space to flush critical stripes (stripes that occupying log space near
385 * last_checkpoint). This function helps check how much log space is
386 * required to flush all cached stripes.
388 * To reduce log space requirements, two mechanisms are used to give cache
389 * flush higher priorities:
390 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
391 * stripes ALREADY in journal can be flushed w/o pending writes;
392 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
393 * can be delayed (r5l_add_no_space_stripe).
395 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
396 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
397 * pages of journal space. For stripes that has not passed 1, flushing it
398 * requires (conf->raid_disks + 1) pages of journal space. There are at
399 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
400 * required to flush all cached stripes (in pages) is:
402 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
403 * (group_cnt + 1) * (raid_disks + 1)
405 * (stripe_in_journal_count) * (max_degraded + 1) +
406 * (group_cnt + 1) * (raid_disks - max_degraded)
408 static sector_t
r5c_log_required_to_flush_cache(struct r5conf
*conf
)
410 struct r5l_log
*log
= conf
->log
;
412 if (!r5c_is_writeback(log
))
415 return BLOCK_SECTORS
*
416 ((conf
->max_degraded
+ 1) * atomic_read(&log
->stripe_in_journal_count
) +
417 (conf
->raid_disks
- conf
->max_degraded
) * (conf
->group_cnt
+ 1));
421 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
423 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
424 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
425 * device is less than 2x of reclaim_required_space.
427 static inline void r5c_update_log_state(struct r5l_log
*log
)
429 struct r5conf
*conf
= log
->rdev
->mddev
->private;
431 sector_t reclaim_space
;
432 bool wake_reclaim
= false;
434 if (!r5c_is_writeback(log
))
437 free_space
= r5l_ring_distance(log
, log
->log_start
,
438 log
->last_checkpoint
);
439 reclaim_space
= r5c_log_required_to_flush_cache(conf
);
440 if (free_space
< 2 * reclaim_space
)
441 set_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
443 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
445 clear_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
447 if (free_space
< 3 * reclaim_space
)
448 set_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
450 clear_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
453 r5l_wake_reclaim(log
, 0);
457 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
458 * This function should only be called in write-back mode.
460 void r5c_make_stripe_write_out(struct stripe_head
*sh
)
462 struct r5conf
*conf
= sh
->raid_conf
;
463 struct r5l_log
*log
= conf
->log
;
465 BUG_ON(!r5c_is_writeback(log
));
467 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
468 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
470 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
471 atomic_inc(&conf
->preread_active_stripes
);
474 static void r5c_handle_data_cached(struct stripe_head
*sh
)
478 for (i
= sh
->disks
; i
--; )
479 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
480 set_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
481 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
483 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
487 * this journal write must contain full parity,
488 * it may also contain some data pages
490 static void r5c_handle_parity_cached(struct stripe_head
*sh
)
494 for (i
= sh
->disks
; i
--; )
495 if (test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
496 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
500 * Setting proper flags after writing (or flushing) data and/or parity to the
501 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
503 static void r5c_finish_cache_stripe(struct stripe_head
*sh
)
505 struct r5l_log
*log
= sh
->raid_conf
->log
;
507 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
508 BUG_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
510 * Set R5_InJournal for parity dev[pd_idx]. This means
511 * all data AND parity in the journal. For RAID 6, it is
512 * NOT necessary to set the flag for dev[qd_idx], as the
513 * two parities are written out together.
515 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
516 } else if (test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
517 r5c_handle_data_cached(sh
);
519 r5c_handle_parity_cached(sh
);
520 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
524 static void r5l_io_run_stripes(struct r5l_io_unit
*io
)
526 struct stripe_head
*sh
, *next
;
528 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
529 list_del_init(&sh
->log_list
);
531 r5c_finish_cache_stripe(sh
);
533 set_bit(STRIPE_HANDLE
, &sh
->state
);
534 raid5_release_stripe(sh
);
538 static void r5l_log_run_stripes(struct r5l_log
*log
)
540 struct r5l_io_unit
*io
, *next
;
542 assert_spin_locked(&log
->io_list_lock
);
544 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
545 /* don't change list order */
546 if (io
->state
< IO_UNIT_IO_END
)
549 list_move_tail(&io
->log_sibling
, &log
->finished_ios
);
550 r5l_io_run_stripes(io
);
554 static void r5l_move_to_end_ios(struct r5l_log
*log
)
556 struct r5l_io_unit
*io
, *next
;
558 assert_spin_locked(&log
->io_list_lock
);
560 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
561 /* don't change list order */
562 if (io
->state
< IO_UNIT_IO_END
)
564 list_move_tail(&io
->log_sibling
, &log
->io_end_ios
);
568 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
);
569 static void r5l_log_endio(struct bio
*bio
)
571 struct r5l_io_unit
*io
= bio
->bi_private
;
572 struct r5l_io_unit
*io_deferred
;
573 struct r5l_log
*log
= io
->log
;
576 bool has_flush_payload
;
579 md_error(log
->rdev
->mddev
, log
->rdev
);
582 mempool_free(io
->meta_page
, log
->meta_pool
);
584 spin_lock_irqsave(&log
->io_list_lock
, flags
);
585 __r5l_set_io_unit_state(io
, IO_UNIT_IO_END
);
588 * if the io doesn't not have null_flush or flush payload,
589 * it is not safe to access it after releasing io_list_lock.
590 * Therefore, it is necessary to check the condition with
593 has_null_flush
= io
->has_null_flush
;
594 has_flush_payload
= io
->has_flush_payload
;
596 if (log
->need_cache_flush
&& !list_empty(&io
->stripe_list
))
597 r5l_move_to_end_ios(log
);
599 r5l_log_run_stripes(log
);
600 if (!list_empty(&log
->running_ios
)) {
602 * FLUSH/FUA io_unit is deferred because of ordering, now we
605 io_deferred
= list_first_entry(&log
->running_ios
,
606 struct r5l_io_unit
, log_sibling
);
607 if (io_deferred
->io_deferred
)
608 schedule_work(&log
->deferred_io_work
);
611 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
613 if (log
->need_cache_flush
)
614 md_wakeup_thread(log
->rdev
->mddev
->thread
);
616 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
617 if (has_null_flush
) {
620 WARN_ON(bio_list_empty(&io
->flush_barriers
));
621 while ((bi
= bio_list_pop(&io
->flush_barriers
)) != NULL
) {
623 if (atomic_dec_and_test(&io
->pending_stripe
)) {
624 __r5l_stripe_write_finished(io
);
629 /* decrease pending_stripe for flush payload */
630 if (has_flush_payload
)
631 if (atomic_dec_and_test(&io
->pending_stripe
))
632 __r5l_stripe_write_finished(io
);
635 static void r5l_do_submit_io(struct r5l_log
*log
, struct r5l_io_unit
*io
)
639 spin_lock_irqsave(&log
->io_list_lock
, flags
);
640 __r5l_set_io_unit_state(io
, IO_UNIT_IO_START
);
641 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
644 * In case of journal device failures, submit_bio will get error
645 * and calls endio, then active stripes will continue write
646 * process. Therefore, it is not necessary to check Faulty bit
647 * of journal device here.
649 * We can't check split_bio after current_bio is submitted. If
650 * io->split_bio is null, after current_bio is submitted, current_bio
651 * might already be completed and the io_unit is freed. We submit
652 * split_bio first to avoid the issue.
656 io
->split_bio
->bi_opf
|= REQ_PREFLUSH
;
658 io
->split_bio
->bi_opf
|= REQ_FUA
;
659 submit_bio(io
->split_bio
);
663 io
->current_bio
->bi_opf
|= REQ_PREFLUSH
;
665 io
->current_bio
->bi_opf
|= REQ_FUA
;
666 submit_bio(io
->current_bio
);
669 /* deferred io_unit will be dispatched here */
670 static void r5l_submit_io_async(struct work_struct
*work
)
672 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
674 struct r5l_io_unit
*io
= NULL
;
677 spin_lock_irqsave(&log
->io_list_lock
, flags
);
678 if (!list_empty(&log
->running_ios
)) {
679 io
= list_first_entry(&log
->running_ios
, struct r5l_io_unit
,
681 if (!io
->io_deferred
)
686 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
688 r5l_do_submit_io(log
, io
);
691 static void r5c_disable_writeback_async(struct work_struct
*work
)
693 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
694 disable_writeback_work
);
695 struct mddev
*mddev
= log
->rdev
->mddev
;
696 struct r5conf
*conf
= mddev
->private;
699 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
701 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
704 /* wait superblock change before suspend */
705 wait_event(mddev
->sb_wait
,
707 (!test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
) &&
708 (locked
= mddev_trylock(mddev
))));
710 mddev_suspend(mddev
);
711 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
717 static void r5l_submit_current_io(struct r5l_log
*log
)
719 struct r5l_io_unit
*io
= log
->current_io
;
721 struct r5l_meta_block
*block
;
724 bool do_submit
= true;
729 block
= page_address(io
->meta_page
);
730 block
->meta_size
= cpu_to_le32(io
->meta_offset
);
731 crc
= crc32c_le(log
->uuid_checksum
, block
, PAGE_SIZE
);
732 block
->checksum
= cpu_to_le32(crc
);
733 bio
= io
->current_bio
;
735 log
->current_io
= NULL
;
736 spin_lock_irqsave(&log
->io_list_lock
, flags
);
737 if (io
->has_flush
|| io
->has_fua
) {
738 if (io
!= list_first_entry(&log
->running_ios
,
739 struct r5l_io_unit
, log_sibling
)) {
744 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
746 r5l_do_submit_io(log
, io
);
749 static struct bio
*r5l_bio_alloc(struct r5l_log
*log
)
751 struct bio
*bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
, log
->bs
);
753 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
754 bio_set_dev(bio
, log
->rdev
->bdev
);
755 bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ log
->log_start
;
760 static void r5_reserve_log_entry(struct r5l_log
*log
, struct r5l_io_unit
*io
)
762 log
->log_start
= r5l_ring_add(log
, log
->log_start
, BLOCK_SECTORS
);
764 r5c_update_log_state(log
);
766 * If we filled up the log device start from the beginning again,
767 * which will require a new bio.
769 * Note: for this to work properly the log size needs to me a multiple
772 if (log
->log_start
== 0)
773 io
->need_split_bio
= true;
775 io
->log_end
= log
->log_start
;
778 static struct r5l_io_unit
*r5l_new_meta(struct r5l_log
*log
)
780 struct r5l_io_unit
*io
;
781 struct r5l_meta_block
*block
;
783 io
= mempool_alloc(log
->io_pool
, GFP_ATOMIC
);
786 memset(io
, 0, sizeof(*io
));
789 INIT_LIST_HEAD(&io
->log_sibling
);
790 INIT_LIST_HEAD(&io
->stripe_list
);
791 bio_list_init(&io
->flush_barriers
);
792 io
->state
= IO_UNIT_RUNNING
;
794 io
->meta_page
= mempool_alloc(log
->meta_pool
, GFP_NOIO
);
795 block
= page_address(io
->meta_page
);
797 block
->magic
= cpu_to_le32(R5LOG_MAGIC
);
798 block
->version
= R5LOG_VERSION
;
799 block
->seq
= cpu_to_le64(log
->seq
);
800 block
->position
= cpu_to_le64(log
->log_start
);
802 io
->log_start
= log
->log_start
;
803 io
->meta_offset
= sizeof(struct r5l_meta_block
);
804 io
->seq
= log
->seq
++;
806 io
->current_bio
= r5l_bio_alloc(log
);
807 io
->current_bio
->bi_end_io
= r5l_log_endio
;
808 io
->current_bio
->bi_private
= io
;
809 bio_add_page(io
->current_bio
, io
->meta_page
, PAGE_SIZE
, 0);
811 r5_reserve_log_entry(log
, io
);
813 spin_lock_irq(&log
->io_list_lock
);
814 list_add_tail(&io
->log_sibling
, &log
->running_ios
);
815 spin_unlock_irq(&log
->io_list_lock
);
820 static int r5l_get_meta(struct r5l_log
*log
, unsigned int payload_size
)
822 if (log
->current_io
&&
823 log
->current_io
->meta_offset
+ payload_size
> PAGE_SIZE
)
824 r5l_submit_current_io(log
);
826 if (!log
->current_io
) {
827 log
->current_io
= r5l_new_meta(log
);
828 if (!log
->current_io
)
835 static void r5l_append_payload_meta(struct r5l_log
*log
, u16 type
,
837 u32 checksum1
, u32 checksum2
,
838 bool checksum2_valid
)
840 struct r5l_io_unit
*io
= log
->current_io
;
841 struct r5l_payload_data_parity
*payload
;
843 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
844 payload
->header
.type
= cpu_to_le16(type
);
845 payload
->header
.flags
= cpu_to_le16(0);
846 payload
->size
= cpu_to_le32((1 + !!checksum2_valid
) <<
848 payload
->location
= cpu_to_le64(location
);
849 payload
->checksum
[0] = cpu_to_le32(checksum1
);
851 payload
->checksum
[1] = cpu_to_le32(checksum2
);
853 io
->meta_offset
+= sizeof(struct r5l_payload_data_parity
) +
854 sizeof(__le32
) * (1 + !!checksum2_valid
);
857 static void r5l_append_payload_page(struct r5l_log
*log
, struct page
*page
)
859 struct r5l_io_unit
*io
= log
->current_io
;
861 if (io
->need_split_bio
) {
862 BUG_ON(io
->split_bio
);
863 io
->split_bio
= io
->current_bio
;
864 io
->current_bio
= r5l_bio_alloc(log
);
865 bio_chain(io
->current_bio
, io
->split_bio
);
866 io
->need_split_bio
= false;
869 if (!bio_add_page(io
->current_bio
, page
, PAGE_SIZE
, 0))
872 r5_reserve_log_entry(log
, io
);
875 static void r5l_append_flush_payload(struct r5l_log
*log
, sector_t sect
)
877 struct mddev
*mddev
= log
->rdev
->mddev
;
878 struct r5conf
*conf
= mddev
->private;
879 struct r5l_io_unit
*io
;
880 struct r5l_payload_flush
*payload
;
884 * payload_flush requires extra writes to the journal.
885 * To avoid handling the extra IO in quiesce, just skip
891 mutex_lock(&log
->io_mutex
);
892 meta_size
= sizeof(struct r5l_payload_flush
) + sizeof(__le64
);
894 if (r5l_get_meta(log
, meta_size
)) {
895 mutex_unlock(&log
->io_mutex
);
899 /* current implementation is one stripe per flush payload */
900 io
= log
->current_io
;
901 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
902 payload
->header
.type
= cpu_to_le16(R5LOG_PAYLOAD_FLUSH
);
903 payload
->header
.flags
= cpu_to_le16(0);
904 payload
->size
= cpu_to_le32(sizeof(__le64
));
905 payload
->flush_stripes
[0] = cpu_to_le64(sect
);
906 io
->meta_offset
+= meta_size
;
907 /* multiple flush payloads count as one pending_stripe */
908 if (!io
->has_flush_payload
) {
909 io
->has_flush_payload
= 1;
910 atomic_inc(&io
->pending_stripe
);
912 mutex_unlock(&log
->io_mutex
);
915 static int r5l_log_stripe(struct r5l_log
*log
, struct stripe_head
*sh
,
916 int data_pages
, int parity_pages
)
921 struct r5l_io_unit
*io
;
924 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
))
926 sizeof(struct r5l_payload_data_parity
) +
927 sizeof(__le32
) * parity_pages
;
929 ret
= r5l_get_meta(log
, meta_size
);
933 io
= log
->current_io
;
935 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH
, &sh
->state
))
938 for (i
= 0; i
< sh
->disks
; i
++) {
939 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
940 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
942 if (i
== sh
->pd_idx
|| i
== sh
->qd_idx
)
944 if (test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
) &&
945 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
) {
948 * we need to flush journal to make sure recovery can
949 * reach the data with fua flag
953 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_DATA
,
954 raid5_compute_blocknr(sh
, i
, 0),
955 sh
->dev
[i
].log_checksum
, 0, false);
956 r5l_append_payload_page(log
, sh
->dev
[i
].page
);
959 if (parity_pages
== 2) {
960 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
961 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
962 sh
->dev
[sh
->qd_idx
].log_checksum
, true);
963 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
964 r5l_append_payload_page(log
, sh
->dev
[sh
->qd_idx
].page
);
965 } else if (parity_pages
== 1) {
966 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
967 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
969 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
970 } else /* Just writing data, not parity, in caching phase */
971 BUG_ON(parity_pages
!= 0);
973 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
974 atomic_inc(&io
->pending_stripe
);
977 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
980 if (sh
->log_start
== MaxSector
) {
981 BUG_ON(!list_empty(&sh
->r5c
));
982 sh
->log_start
= io
->log_start
;
983 spin_lock_irq(&log
->stripe_in_journal_lock
);
984 list_add_tail(&sh
->r5c
,
985 &log
->stripe_in_journal_list
);
986 spin_unlock_irq(&log
->stripe_in_journal_lock
);
987 atomic_inc(&log
->stripe_in_journal_count
);
992 /* add stripe to no_space_stripes, and then wake up reclaim */
993 static inline void r5l_add_no_space_stripe(struct r5l_log
*log
,
994 struct stripe_head
*sh
)
996 spin_lock(&log
->no_space_stripes_lock
);
997 list_add_tail(&sh
->log_list
, &log
->no_space_stripes
);
998 spin_unlock(&log
->no_space_stripes_lock
);
1002 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
1003 * data from log to raid disks), so we shouldn't wait for reclaim here
1005 int r5l_write_stripe(struct r5l_log
*log
, struct stripe_head
*sh
)
1007 struct r5conf
*conf
= sh
->raid_conf
;
1008 int write_disks
= 0;
1009 int data_pages
, parity_pages
;
1013 bool wake_reclaim
= false;
1017 /* Don't support stripe batch */
1018 if (sh
->log_io
|| !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
1019 test_bit(STRIPE_SYNCING
, &sh
->state
)) {
1020 /* the stripe is written to log, we start writing it to raid */
1021 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
1025 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
1027 for (i
= 0; i
< sh
->disks
; i
++) {
1030 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
1031 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
1035 /* checksum is already calculated in last run */
1036 if (test_bit(STRIPE_LOG_TRAPPED
, &sh
->state
))
1038 addr
= kmap_atomic(sh
->dev
[i
].page
);
1039 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
1041 kunmap_atomic(addr
);
1043 parity_pages
= 1 + !!(sh
->qd_idx
>= 0);
1044 data_pages
= write_disks
- parity_pages
;
1046 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
1048 * The stripe must enter state machine again to finish the write, so
1051 clear_bit(STRIPE_DELAYED
, &sh
->state
);
1052 atomic_inc(&sh
->count
);
1054 mutex_lock(&log
->io_mutex
);
1056 reserve
= (1 + write_disks
) << (PAGE_SHIFT
- 9);
1058 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1059 if (!r5l_has_free_space(log
, reserve
)) {
1060 r5l_add_no_space_stripe(log
, sh
);
1061 wake_reclaim
= true;
1063 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1065 spin_lock_irq(&log
->io_list_lock
);
1066 list_add_tail(&sh
->log_list
,
1067 &log
->no_mem_stripes
);
1068 spin_unlock_irq(&log
->io_list_lock
);
1071 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1073 * log space critical, do not process stripes that are
1074 * not in cache yet (sh->log_start == MaxSector).
1076 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
1077 sh
->log_start
== MaxSector
) {
1078 r5l_add_no_space_stripe(log
, sh
);
1079 wake_reclaim
= true;
1081 } else if (!r5l_has_free_space(log
, reserve
)) {
1082 if (sh
->log_start
== log
->last_checkpoint
)
1085 r5l_add_no_space_stripe(log
, sh
);
1087 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1089 spin_lock_irq(&log
->io_list_lock
);
1090 list_add_tail(&sh
->log_list
,
1091 &log
->no_mem_stripes
);
1092 spin_unlock_irq(&log
->io_list_lock
);
1097 mutex_unlock(&log
->io_mutex
);
1099 r5l_wake_reclaim(log
, reserve
);
1103 void r5l_write_stripe_run(struct r5l_log
*log
)
1107 mutex_lock(&log
->io_mutex
);
1108 r5l_submit_current_io(log
);
1109 mutex_unlock(&log
->io_mutex
);
1112 int r5l_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
1117 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1119 * in write through (journal only)
1120 * we flush log disk cache first, then write stripe data to
1121 * raid disks. So if bio is finished, the log disk cache is
1122 * flushed already. The recovery guarantees we can recovery
1123 * the bio from log disk, so we don't need to flush again
1125 if (bio
->bi_iter
.bi_size
== 0) {
1129 bio
->bi_opf
&= ~REQ_PREFLUSH
;
1131 /* write back (with cache) */
1132 if (bio
->bi_iter
.bi_size
== 0) {
1133 mutex_lock(&log
->io_mutex
);
1134 r5l_get_meta(log
, 0);
1135 bio_list_add(&log
->current_io
->flush_barriers
, bio
);
1136 log
->current_io
->has_flush
= 1;
1137 log
->current_io
->has_null_flush
= 1;
1138 atomic_inc(&log
->current_io
->pending_stripe
);
1139 r5l_submit_current_io(log
);
1140 mutex_unlock(&log
->io_mutex
);
1147 /* This will run after log space is reclaimed */
1148 static void r5l_run_no_space_stripes(struct r5l_log
*log
)
1150 struct stripe_head
*sh
;
1152 spin_lock(&log
->no_space_stripes_lock
);
1153 while (!list_empty(&log
->no_space_stripes
)) {
1154 sh
= list_first_entry(&log
->no_space_stripes
,
1155 struct stripe_head
, log_list
);
1156 list_del_init(&sh
->log_list
);
1157 set_bit(STRIPE_HANDLE
, &sh
->state
);
1158 raid5_release_stripe(sh
);
1160 spin_unlock(&log
->no_space_stripes_lock
);
1164 * calculate new last_checkpoint
1165 * for write through mode, returns log->next_checkpoint
1166 * for write back, returns log_start of first sh in stripe_in_journal_list
1168 static sector_t
r5c_calculate_new_cp(struct r5conf
*conf
)
1170 struct stripe_head
*sh
;
1171 struct r5l_log
*log
= conf
->log
;
1173 unsigned long flags
;
1175 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
1176 return log
->next_checkpoint
;
1178 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1179 if (list_empty(&conf
->log
->stripe_in_journal_list
)) {
1180 /* all stripes flushed */
1181 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1182 return log
->next_checkpoint
;
1184 sh
= list_first_entry(&conf
->log
->stripe_in_journal_list
,
1185 struct stripe_head
, r5c
);
1186 new_cp
= sh
->log_start
;
1187 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1191 static sector_t
r5l_reclaimable_space(struct r5l_log
*log
)
1193 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1195 return r5l_ring_distance(log
, log
->last_checkpoint
,
1196 r5c_calculate_new_cp(conf
));
1199 static void r5l_run_no_mem_stripe(struct r5l_log
*log
)
1201 struct stripe_head
*sh
;
1203 assert_spin_locked(&log
->io_list_lock
);
1205 if (!list_empty(&log
->no_mem_stripes
)) {
1206 sh
= list_first_entry(&log
->no_mem_stripes
,
1207 struct stripe_head
, log_list
);
1208 list_del_init(&sh
->log_list
);
1209 set_bit(STRIPE_HANDLE
, &sh
->state
);
1210 raid5_release_stripe(sh
);
1214 static bool r5l_complete_finished_ios(struct r5l_log
*log
)
1216 struct r5l_io_unit
*io
, *next
;
1219 assert_spin_locked(&log
->io_list_lock
);
1221 list_for_each_entry_safe(io
, next
, &log
->finished_ios
, log_sibling
) {
1222 /* don't change list order */
1223 if (io
->state
< IO_UNIT_STRIPE_END
)
1226 log
->next_checkpoint
= io
->log_start
;
1228 list_del(&io
->log_sibling
);
1229 mempool_free(io
, log
->io_pool
);
1230 r5l_run_no_mem_stripe(log
);
1238 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
)
1240 struct r5l_log
*log
= io
->log
;
1241 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1242 unsigned long flags
;
1244 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1245 __r5l_set_io_unit_state(io
, IO_UNIT_STRIPE_END
);
1247 if (!r5l_complete_finished_ios(log
)) {
1248 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1252 if (r5l_reclaimable_space(log
) > log
->max_free_space
||
1253 test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
))
1254 r5l_wake_reclaim(log
, 0);
1256 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1257 wake_up(&log
->iounit_wait
);
1260 void r5l_stripe_write_finished(struct stripe_head
*sh
)
1262 struct r5l_io_unit
*io
;
1267 if (io
&& atomic_dec_and_test(&io
->pending_stripe
))
1268 __r5l_stripe_write_finished(io
);
1271 static void r5l_log_flush_endio(struct bio
*bio
)
1273 struct r5l_log
*log
= container_of(bio
, struct r5l_log
,
1275 unsigned long flags
;
1276 struct r5l_io_unit
*io
;
1279 md_error(log
->rdev
->mddev
, log
->rdev
);
1281 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1282 list_for_each_entry(io
, &log
->flushing_ios
, log_sibling
)
1283 r5l_io_run_stripes(io
);
1284 list_splice_tail_init(&log
->flushing_ios
, &log
->finished_ios
);
1285 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1289 * Starting dispatch IO to raid.
1290 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1291 * broken meta in the middle of a log causes recovery can't find meta at the
1292 * head of log. If operations require meta at the head persistent in log, we
1293 * must make sure meta before it persistent in log too. A case is:
1295 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1296 * data/parity must be persistent in log before we do the write to raid disks.
1298 * The solution is we restrictly maintain io_unit list order. In this case, we
1299 * only write stripes of an io_unit to raid disks till the io_unit is the first
1300 * one whose data/parity is in log.
1302 void r5l_flush_stripe_to_raid(struct r5l_log
*log
)
1306 if (!log
|| !log
->need_cache_flush
)
1309 spin_lock_irq(&log
->io_list_lock
);
1310 /* flush bio is running */
1311 if (!list_empty(&log
->flushing_ios
)) {
1312 spin_unlock_irq(&log
->io_list_lock
);
1315 list_splice_tail_init(&log
->io_end_ios
, &log
->flushing_ios
);
1316 do_flush
= !list_empty(&log
->flushing_ios
);
1317 spin_unlock_irq(&log
->io_list_lock
);
1321 bio_reset(&log
->flush_bio
);
1322 bio_set_dev(&log
->flush_bio
, log
->rdev
->bdev
);
1323 log
->flush_bio
.bi_end_io
= r5l_log_flush_endio
;
1324 log
->flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
1325 submit_bio(&log
->flush_bio
);
1328 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
);
1329 static void r5l_write_super_and_discard_space(struct r5l_log
*log
,
1332 struct block_device
*bdev
= log
->rdev
->bdev
;
1333 struct mddev
*mddev
;
1335 r5l_write_super(log
, end
);
1337 if (!blk_queue_discard(bdev_get_queue(bdev
)))
1340 mddev
= log
->rdev
->mddev
;
1342 * Discard could zero data, so before discard we must make sure
1343 * superblock is updated to new log tail. Updating superblock (either
1344 * directly call md_update_sb() or depend on md thread) must hold
1345 * reconfig mutex. On the other hand, raid5_quiesce is called with
1346 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1347 * for all IO finish, hence waitting for reclaim thread, while reclaim
1348 * thread is calling this function and waitting for reconfig mutex. So
1349 * there is a deadlock. We workaround this issue with a trylock.
1350 * FIXME: we could miss discard if we can't take reconfig mutex
1352 set_mask_bits(&mddev
->sb_flags
, 0,
1353 BIT(MD_SB_CHANGE_DEVS
) | BIT(MD_SB_CHANGE_PENDING
));
1354 if (!mddev_trylock(mddev
))
1356 md_update_sb(mddev
, 1);
1357 mddev_unlock(mddev
);
1359 /* discard IO error really doesn't matter, ignore it */
1360 if (log
->last_checkpoint
< end
) {
1361 blkdev_issue_discard(bdev
,
1362 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1363 end
- log
->last_checkpoint
, GFP_NOIO
, 0);
1365 blkdev_issue_discard(bdev
,
1366 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1367 log
->device_size
- log
->last_checkpoint
,
1369 blkdev_issue_discard(bdev
, log
->rdev
->data_offset
, end
,
1375 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1376 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1378 * must hold conf->device_lock
1380 static void r5c_flush_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
1382 BUG_ON(list_empty(&sh
->lru
));
1383 BUG_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
1384 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
1387 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1388 * raid5_release_stripe() while holding conf->device_lock
1390 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
));
1391 assert_spin_locked(&conf
->device_lock
);
1393 list_del_init(&sh
->lru
);
1394 atomic_inc(&sh
->count
);
1396 set_bit(STRIPE_HANDLE
, &sh
->state
);
1397 atomic_inc(&conf
->active_stripes
);
1398 r5c_make_stripe_write_out(sh
);
1400 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
))
1401 atomic_inc(&conf
->r5c_flushing_partial_stripes
);
1403 atomic_inc(&conf
->r5c_flushing_full_stripes
);
1404 raid5_release_stripe(sh
);
1408 * if num == 0, flush all full stripes
1409 * if num > 0, flush all full stripes. If less than num full stripes are
1410 * flushed, flush some partial stripes until totally num stripes are
1411 * flushed or there is no more cached stripes.
1413 void r5c_flush_cache(struct r5conf
*conf
, int num
)
1416 struct stripe_head
*sh
, *next
;
1418 assert_spin_locked(&conf
->device_lock
);
1423 list_for_each_entry_safe(sh
, next
, &conf
->r5c_full_stripe_list
, lru
) {
1424 r5c_flush_stripe(conf
, sh
);
1430 list_for_each_entry_safe(sh
, next
,
1431 &conf
->r5c_partial_stripe_list
, lru
) {
1432 r5c_flush_stripe(conf
, sh
);
1438 static void r5c_do_reclaim(struct r5conf
*conf
)
1440 struct r5l_log
*log
= conf
->log
;
1441 struct stripe_head
*sh
;
1443 unsigned long flags
;
1445 int stripes_to_flush
;
1446 int flushing_partial
, flushing_full
;
1448 if (!r5c_is_writeback(log
))
1451 flushing_partial
= atomic_read(&conf
->r5c_flushing_partial_stripes
);
1452 flushing_full
= atomic_read(&conf
->r5c_flushing_full_stripes
);
1453 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
1454 atomic_read(&conf
->r5c_cached_full_stripes
) -
1455 flushing_full
- flushing_partial
;
1457 if (total_cached
> conf
->min_nr_stripes
* 3 / 4 ||
1458 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
1460 * if stripe cache pressure high, flush all full stripes and
1461 * some partial stripes
1463 stripes_to_flush
= R5C_RECLAIM_STRIPE_GROUP
;
1464 else if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
1465 atomic_read(&conf
->r5c_cached_full_stripes
) - flushing_full
>
1466 R5C_FULL_STRIPE_FLUSH_BATCH(conf
))
1468 * if stripe cache pressure moderate, or if there is many full
1469 * stripes,flush all full stripes
1471 stripes_to_flush
= 0;
1473 /* no need to flush */
1474 stripes_to_flush
= -1;
1476 if (stripes_to_flush
>= 0) {
1477 spin_lock_irqsave(&conf
->device_lock
, flags
);
1478 r5c_flush_cache(conf
, stripes_to_flush
);
1479 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1482 /* if log space is tight, flush stripes on stripe_in_journal_list */
1483 if (test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
)) {
1484 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1485 spin_lock(&conf
->device_lock
);
1486 list_for_each_entry(sh
, &log
->stripe_in_journal_list
, r5c
) {
1488 * stripes on stripe_in_journal_list could be in any
1489 * state of the stripe_cache state machine. In this
1490 * case, we only want to flush stripe on
1491 * r5c_cached_full/partial_stripes. The following
1492 * condition makes sure the stripe is on one of the
1495 if (!list_empty(&sh
->lru
) &&
1496 !test_bit(STRIPE_HANDLE
, &sh
->state
) &&
1497 atomic_read(&sh
->count
) == 0) {
1498 r5c_flush_stripe(conf
, sh
);
1499 if (count
++ >= R5C_RECLAIM_STRIPE_GROUP
)
1503 spin_unlock(&conf
->device_lock
);
1504 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1507 if (!test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
1508 r5l_run_no_space_stripes(log
);
1510 md_wakeup_thread(conf
->mddev
->thread
);
1513 static void r5l_do_reclaim(struct r5l_log
*log
)
1515 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1516 sector_t reclaim_target
= xchg(&log
->reclaim_target
, 0);
1517 sector_t reclaimable
;
1518 sector_t next_checkpoint
;
1521 spin_lock_irq(&log
->io_list_lock
);
1522 write_super
= r5l_reclaimable_space(log
) > log
->max_free_space
||
1523 reclaim_target
!= 0 || !list_empty(&log
->no_space_stripes
);
1525 * move proper io_unit to reclaim list. We should not change the order.
1526 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1527 * shouldn't reuse space of an unreclaimable io_unit
1530 reclaimable
= r5l_reclaimable_space(log
);
1531 if (reclaimable
>= reclaim_target
||
1532 (list_empty(&log
->running_ios
) &&
1533 list_empty(&log
->io_end_ios
) &&
1534 list_empty(&log
->flushing_ios
) &&
1535 list_empty(&log
->finished_ios
)))
1538 md_wakeup_thread(log
->rdev
->mddev
->thread
);
1539 wait_event_lock_irq(log
->iounit_wait
,
1540 r5l_reclaimable_space(log
) > reclaimable
,
1544 next_checkpoint
= r5c_calculate_new_cp(conf
);
1545 spin_unlock_irq(&log
->io_list_lock
);
1547 if (reclaimable
== 0 || !write_super
)
1551 * write_super will flush cache of each raid disk. We must write super
1552 * here, because the log area might be reused soon and we don't want to
1555 r5l_write_super_and_discard_space(log
, next_checkpoint
);
1557 mutex_lock(&log
->io_mutex
);
1558 log
->last_checkpoint
= next_checkpoint
;
1559 r5c_update_log_state(log
);
1560 mutex_unlock(&log
->io_mutex
);
1562 r5l_run_no_space_stripes(log
);
1565 static void r5l_reclaim_thread(struct md_thread
*thread
)
1567 struct mddev
*mddev
= thread
->mddev
;
1568 struct r5conf
*conf
= mddev
->private;
1569 struct r5l_log
*log
= conf
->log
;
1573 r5c_do_reclaim(conf
);
1574 r5l_do_reclaim(log
);
1577 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
)
1579 unsigned long target
;
1580 unsigned long new = (unsigned long)space
; /* overflow in theory */
1585 target
= log
->reclaim_target
;
1588 } while (cmpxchg(&log
->reclaim_target
, target
, new) != target
);
1589 md_wakeup_thread(log
->reclaim_thread
);
1592 void r5l_quiesce(struct r5l_log
*log
, int quiesce
)
1594 struct mddev
*mddev
;
1599 /* make sure r5l_write_super_and_discard_space exits */
1600 mddev
= log
->rdev
->mddev
;
1601 wake_up(&mddev
->sb_wait
);
1602 kthread_park(log
->reclaim_thread
->tsk
);
1603 r5l_wake_reclaim(log
, MaxSector
);
1604 r5l_do_reclaim(log
);
1606 kthread_unpark(log
->reclaim_thread
->tsk
);
1609 bool r5l_log_disk_error(struct r5conf
*conf
)
1611 struct r5l_log
*log
;
1613 /* don't allow write if journal disk is missing */
1615 log
= rcu_dereference(conf
->log
);
1618 ret
= test_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
1620 ret
= test_bit(Faulty
, &log
->rdev
->flags
);
1625 #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1627 struct r5l_recovery_ctx
{
1628 struct page
*meta_page
; /* current meta */
1629 sector_t meta_total_blocks
; /* total size of current meta and data */
1630 sector_t pos
; /* recovery position */
1631 u64 seq
; /* recovery position seq */
1632 int data_parity_stripes
; /* number of data_parity stripes */
1633 int data_only_stripes
; /* number of data_only stripes */
1634 struct list_head cached_list
;
1637 * read ahead page pool (ra_pool)
1638 * in recovery, log is read sequentially. It is not efficient to
1639 * read every page with sync_page_io(). The read ahead page pool
1640 * reads multiple pages with one IO, so further log read can
1641 * just copy data from the pool.
1643 struct page
*ra_pool
[R5L_RECOVERY_PAGE_POOL_SIZE
];
1644 sector_t pool_offset
; /* offset of first page in the pool */
1645 int total_pages
; /* total allocated pages */
1646 int valid_pages
; /* pages with valid data */
1647 struct bio
*ra_bio
; /* bio to do the read ahead */
1650 static int r5l_recovery_allocate_ra_pool(struct r5l_log
*log
,
1651 struct r5l_recovery_ctx
*ctx
)
1655 ctx
->ra_bio
= bio_alloc_bioset(GFP_KERNEL
, BIO_MAX_PAGES
, log
->bs
);
1659 ctx
->valid_pages
= 0;
1660 ctx
->total_pages
= 0;
1661 while (ctx
->total_pages
< R5L_RECOVERY_PAGE_POOL_SIZE
) {
1662 page
= alloc_page(GFP_KERNEL
);
1666 ctx
->ra_pool
[ctx
->total_pages
] = page
;
1667 ctx
->total_pages
+= 1;
1670 if (ctx
->total_pages
== 0) {
1671 bio_put(ctx
->ra_bio
);
1675 ctx
->pool_offset
= 0;
1679 static void r5l_recovery_free_ra_pool(struct r5l_log
*log
,
1680 struct r5l_recovery_ctx
*ctx
)
1684 for (i
= 0; i
< ctx
->total_pages
; ++i
)
1685 put_page(ctx
->ra_pool
[i
]);
1686 bio_put(ctx
->ra_bio
);
1690 * fetch ctx->valid_pages pages from offset
1691 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1692 * However, if the offset is close to the end of the journal device,
1693 * ctx->valid_pages could be smaller than ctx->total_pages
1695 static int r5l_recovery_fetch_ra_pool(struct r5l_log
*log
,
1696 struct r5l_recovery_ctx
*ctx
,
1699 bio_reset(ctx
->ra_bio
);
1700 bio_set_dev(ctx
->ra_bio
, log
->rdev
->bdev
);
1701 bio_set_op_attrs(ctx
->ra_bio
, REQ_OP_READ
, 0);
1702 ctx
->ra_bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ offset
;
1704 ctx
->valid_pages
= 0;
1705 ctx
->pool_offset
= offset
;
1707 while (ctx
->valid_pages
< ctx
->total_pages
) {
1708 bio_add_page(ctx
->ra_bio
,
1709 ctx
->ra_pool
[ctx
->valid_pages
], PAGE_SIZE
, 0);
1710 ctx
->valid_pages
+= 1;
1712 offset
= r5l_ring_add(log
, offset
, BLOCK_SECTORS
);
1714 if (offset
== 0) /* reached end of the device */
1718 return submit_bio_wait(ctx
->ra_bio
);
1722 * try read a page from the read ahead page pool, if the page is not in the
1723 * pool, call r5l_recovery_fetch_ra_pool
1725 static int r5l_recovery_read_page(struct r5l_log
*log
,
1726 struct r5l_recovery_ctx
*ctx
,
1732 if (offset
< ctx
->pool_offset
||
1733 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
) {
1734 ret
= r5l_recovery_fetch_ra_pool(log
, ctx
, offset
);
1739 BUG_ON(offset
< ctx
->pool_offset
||
1740 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
);
1742 memcpy(page_address(page
),
1743 page_address(ctx
->ra_pool
[(offset
- ctx
->pool_offset
) >>
1744 BLOCK_SECTOR_SHIFT
]),
1749 static int r5l_recovery_read_meta_block(struct r5l_log
*log
,
1750 struct r5l_recovery_ctx
*ctx
)
1752 struct page
*page
= ctx
->meta_page
;
1753 struct r5l_meta_block
*mb
;
1754 u32 crc
, stored_crc
;
1757 ret
= r5l_recovery_read_page(log
, ctx
, page
, ctx
->pos
);
1761 mb
= page_address(page
);
1762 stored_crc
= le32_to_cpu(mb
->checksum
);
1765 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
1766 le64_to_cpu(mb
->seq
) != ctx
->seq
||
1767 mb
->version
!= R5LOG_VERSION
||
1768 le64_to_cpu(mb
->position
) != ctx
->pos
)
1771 crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
1772 if (stored_crc
!= crc
)
1775 if (le32_to_cpu(mb
->meta_size
) > PAGE_SIZE
)
1778 ctx
->meta_total_blocks
= BLOCK_SECTORS
;
1784 r5l_recovery_create_empty_meta_block(struct r5l_log
*log
,
1786 sector_t pos
, u64 seq
)
1788 struct r5l_meta_block
*mb
;
1790 mb
= page_address(page
);
1792 mb
->magic
= cpu_to_le32(R5LOG_MAGIC
);
1793 mb
->version
= R5LOG_VERSION
;
1794 mb
->meta_size
= cpu_to_le32(sizeof(struct r5l_meta_block
));
1795 mb
->seq
= cpu_to_le64(seq
);
1796 mb
->position
= cpu_to_le64(pos
);
1799 static int r5l_log_write_empty_meta_block(struct r5l_log
*log
, sector_t pos
,
1803 struct r5l_meta_block
*mb
;
1805 page
= alloc_page(GFP_KERNEL
);
1808 r5l_recovery_create_empty_meta_block(log
, page
, pos
, seq
);
1809 mb
= page_address(page
);
1810 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
1812 if (!sync_page_io(log
->rdev
, pos
, PAGE_SIZE
, page
, REQ_OP_WRITE
,
1813 REQ_SYNC
| REQ_FUA
, false)) {
1822 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1823 * to mark valid (potentially not flushed) data in the journal.
1825 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1826 * so there should not be any mismatch here.
1828 static void r5l_recovery_load_data(struct r5l_log
*log
,
1829 struct stripe_head
*sh
,
1830 struct r5l_recovery_ctx
*ctx
,
1831 struct r5l_payload_data_parity
*payload
,
1832 sector_t log_offset
)
1834 struct mddev
*mddev
= log
->rdev
->mddev
;
1835 struct r5conf
*conf
= mddev
->private;
1838 raid5_compute_sector(conf
,
1839 le64_to_cpu(payload
->location
), 0,
1841 r5l_recovery_read_page(log
, ctx
, sh
->dev
[dd_idx
].page
, log_offset
);
1842 sh
->dev
[dd_idx
].log_checksum
=
1843 le32_to_cpu(payload
->checksum
[0]);
1844 ctx
->meta_total_blocks
+= BLOCK_SECTORS
;
1846 set_bit(R5_Wantwrite
, &sh
->dev
[dd_idx
].flags
);
1847 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1850 static void r5l_recovery_load_parity(struct r5l_log
*log
,
1851 struct stripe_head
*sh
,
1852 struct r5l_recovery_ctx
*ctx
,
1853 struct r5l_payload_data_parity
*payload
,
1854 sector_t log_offset
)
1856 struct mddev
*mddev
= log
->rdev
->mddev
;
1857 struct r5conf
*conf
= mddev
->private;
1859 ctx
->meta_total_blocks
+= BLOCK_SECTORS
* conf
->max_degraded
;
1860 r5l_recovery_read_page(log
, ctx
, sh
->dev
[sh
->pd_idx
].page
, log_offset
);
1861 sh
->dev
[sh
->pd_idx
].log_checksum
=
1862 le32_to_cpu(payload
->checksum
[0]);
1863 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
);
1865 if (sh
->qd_idx
>= 0) {
1866 r5l_recovery_read_page(
1867 log
, ctx
, sh
->dev
[sh
->qd_idx
].page
,
1868 r5l_ring_add(log
, log_offset
, BLOCK_SECTORS
));
1869 sh
->dev
[sh
->qd_idx
].log_checksum
=
1870 le32_to_cpu(payload
->checksum
[1]);
1871 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->qd_idx
].flags
);
1873 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1876 static void r5l_recovery_reset_stripe(struct stripe_head
*sh
)
1881 sh
->log_start
= MaxSector
;
1882 for (i
= sh
->disks
; i
--; )
1883 sh
->dev
[i
].flags
= 0;
1887 r5l_recovery_replay_one_stripe(struct r5conf
*conf
,
1888 struct stripe_head
*sh
,
1889 struct r5l_recovery_ctx
*ctx
)
1891 struct md_rdev
*rdev
, *rrdev
;
1895 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1896 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1898 if (disk_index
== sh
->qd_idx
|| disk_index
== sh
->pd_idx
)
1904 * stripes that only have parity must have been flushed
1905 * before the crash that we are now recovering from, so
1906 * there is nothing more to recovery.
1908 if (data_count
== 0)
1911 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1912 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1915 /* in case device is broken */
1917 rdev
= rcu_dereference(conf
->disks
[disk_index
].rdev
);
1919 atomic_inc(&rdev
->nr_pending
);
1921 sync_page_io(rdev
, sh
->sector
, PAGE_SIZE
,
1922 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1924 rdev_dec_pending(rdev
, rdev
->mddev
);
1927 rrdev
= rcu_dereference(conf
->disks
[disk_index
].replacement
);
1929 atomic_inc(&rrdev
->nr_pending
);
1931 sync_page_io(rrdev
, sh
->sector
, PAGE_SIZE
,
1932 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1934 rdev_dec_pending(rrdev
, rrdev
->mddev
);
1939 ctx
->data_parity_stripes
++;
1941 r5l_recovery_reset_stripe(sh
);
1944 static struct stripe_head
*
1945 r5c_recovery_alloc_stripe(
1946 struct r5conf
*conf
,
1947 sector_t stripe_sect
,
1950 struct stripe_head
*sh
;
1952 sh
= raid5_get_active_stripe(conf
, stripe_sect
, 0, noblock
, 0);
1954 return NULL
; /* no more stripe available */
1956 r5l_recovery_reset_stripe(sh
);
1961 static struct stripe_head
*
1962 r5c_recovery_lookup_stripe(struct list_head
*list
, sector_t sect
)
1964 struct stripe_head
*sh
;
1966 list_for_each_entry(sh
, list
, lru
)
1967 if (sh
->sector
== sect
)
1973 r5c_recovery_drop_stripes(struct list_head
*cached_stripe_list
,
1974 struct r5l_recovery_ctx
*ctx
)
1976 struct stripe_head
*sh
, *next
;
1978 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
) {
1979 r5l_recovery_reset_stripe(sh
);
1980 list_del_init(&sh
->lru
);
1981 raid5_release_stripe(sh
);
1986 r5c_recovery_replay_stripes(struct list_head
*cached_stripe_list
,
1987 struct r5l_recovery_ctx
*ctx
)
1989 struct stripe_head
*sh
, *next
;
1991 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
)
1992 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
1993 r5l_recovery_replay_one_stripe(sh
->raid_conf
, sh
, ctx
);
1994 list_del_init(&sh
->lru
);
1995 raid5_release_stripe(sh
);
1999 /* if matches return 0; otherwise return -EINVAL */
2001 r5l_recovery_verify_data_checksum(struct r5l_log
*log
,
2002 struct r5l_recovery_ctx
*ctx
,
2004 sector_t log_offset
, __le32 log_checksum
)
2009 r5l_recovery_read_page(log
, ctx
, page
, log_offset
);
2010 addr
= kmap_atomic(page
);
2011 checksum
= crc32c_le(log
->uuid_checksum
, addr
, PAGE_SIZE
);
2012 kunmap_atomic(addr
);
2013 return (le32_to_cpu(log_checksum
) == checksum
) ? 0 : -EINVAL
;
2017 * before loading data to stripe cache, we need verify checksum for all data,
2018 * if there is mismatch for any data page, we drop all data in the mata block
2021 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log
*log
,
2022 struct r5l_recovery_ctx
*ctx
)
2024 struct mddev
*mddev
= log
->rdev
->mddev
;
2025 struct r5conf
*conf
= mddev
->private;
2026 struct r5l_meta_block
*mb
= page_address(ctx
->meta_page
);
2027 sector_t mb_offset
= sizeof(struct r5l_meta_block
);
2028 sector_t log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2030 struct r5l_payload_data_parity
*payload
;
2031 struct r5l_payload_flush
*payload_flush
;
2033 page
= alloc_page(GFP_KERNEL
);
2037 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
2038 payload
= (void *)mb
+ mb_offset
;
2039 payload_flush
= (void *)mb
+ mb_offset
;
2041 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
2042 if (r5l_recovery_verify_data_checksum(
2043 log
, ctx
, page
, log_offset
,
2044 payload
->checksum
[0]) < 0)
2046 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
) {
2047 if (r5l_recovery_verify_data_checksum(
2048 log
, ctx
, page
, log_offset
,
2049 payload
->checksum
[0]) < 0)
2051 if (conf
->max_degraded
== 2 && /* q for RAID 6 */
2052 r5l_recovery_verify_data_checksum(
2054 r5l_ring_add(log
, log_offset
,
2056 payload
->checksum
[1]) < 0)
2058 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2059 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2060 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2063 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2064 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2065 le32_to_cpu(payload_flush
->size
);
2067 /* DATA or PARITY payload */
2068 log_offset
= r5l_ring_add(log
, log_offset
,
2069 le32_to_cpu(payload
->size
));
2070 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2072 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2086 * Analyze all data/parity pages in one meta block
2089 * -EINVAL for unknown playload type
2090 * -EAGAIN for checksum mismatch of data page
2091 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2094 r5c_recovery_analyze_meta_block(struct r5l_log
*log
,
2095 struct r5l_recovery_ctx
*ctx
,
2096 struct list_head
*cached_stripe_list
)
2098 struct mddev
*mddev
= log
->rdev
->mddev
;
2099 struct r5conf
*conf
= mddev
->private;
2100 struct r5l_meta_block
*mb
;
2101 struct r5l_payload_data_parity
*payload
;
2102 struct r5l_payload_flush
*payload_flush
;
2104 sector_t log_offset
;
2105 sector_t stripe_sect
;
2106 struct stripe_head
*sh
;
2110 * for mismatch in data blocks, we will drop all data in this mb, but
2111 * we will still read next mb for other data with FLUSH flag, as
2112 * io_unit could finish out of order.
2114 ret
= r5l_recovery_verify_data_checksum_for_mb(log
, ctx
);
2118 return ret
; /* -ENOMEM duo to alloc_page() failed */
2120 mb
= page_address(ctx
->meta_page
);
2121 mb_offset
= sizeof(struct r5l_meta_block
);
2122 log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2124 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
2127 payload
= (void *)mb
+ mb_offset
;
2128 payload_flush
= (void *)mb
+ mb_offset
;
2130 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2133 count
= le32_to_cpu(payload_flush
->size
) / sizeof(__le64
);
2134 for (i
= 0; i
< count
; ++i
) {
2135 stripe_sect
= le64_to_cpu(payload_flush
->flush_stripes
[i
]);
2136 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2139 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2140 r5l_recovery_reset_stripe(sh
);
2141 list_del_init(&sh
->lru
);
2142 raid5_release_stripe(sh
);
2146 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2147 le32_to_cpu(payload_flush
->size
);
2151 /* DATA or PARITY payload */
2152 stripe_sect
= (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) ?
2153 raid5_compute_sector(
2154 conf
, le64_to_cpu(payload
->location
), 0, &dd
,
2156 : le64_to_cpu(payload
->location
);
2158 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2162 sh
= r5c_recovery_alloc_stripe(conf
, stripe_sect
, 1);
2164 * cannot get stripe from raid5_get_active_stripe
2165 * try replay some stripes
2168 r5c_recovery_replay_stripes(
2169 cached_stripe_list
, ctx
);
2170 sh
= r5c_recovery_alloc_stripe(
2171 conf
, stripe_sect
, 1);
2174 int new_size
= conf
->min_nr_stripes
* 2;
2175 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2178 ret
= raid5_set_cache_size(mddev
, new_size
);
2179 if (conf
->min_nr_stripes
<= new_size
/ 2) {
2180 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2184 conf
->min_nr_stripes
,
2185 conf
->max_nr_stripes
);
2188 sh
= r5c_recovery_alloc_stripe(
2189 conf
, stripe_sect
, 0);
2192 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2196 list_add_tail(&sh
->lru
, cached_stripe_list
);
2199 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
2200 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
) &&
2201 test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
)) {
2202 r5l_recovery_replay_one_stripe(conf
, sh
, ctx
);
2203 list_move_tail(&sh
->lru
, cached_stripe_list
);
2205 r5l_recovery_load_data(log
, sh
, ctx
, payload
,
2207 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
)
2208 r5l_recovery_load_parity(log
, sh
, ctx
, payload
,
2213 log_offset
= r5l_ring_add(log
, log_offset
,
2214 le32_to_cpu(payload
->size
));
2216 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2218 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2225 * Load the stripe into cache. The stripe will be written out later by
2226 * the stripe cache state machine.
2228 static void r5c_recovery_load_one_stripe(struct r5l_log
*log
,
2229 struct stripe_head
*sh
)
2234 for (i
= sh
->disks
; i
--; ) {
2236 if (test_and_clear_bit(R5_Wantwrite
, &dev
->flags
)) {
2237 set_bit(R5_InJournal
, &dev
->flags
);
2238 set_bit(R5_UPTODATE
, &dev
->flags
);
2244 * Scan through the log for all to-be-flushed data
2246 * For stripes with data and parity, namely Data-Parity stripe
2247 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2249 * For stripes with only data, namely Data-Only stripe
2250 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2252 * For a stripe, if we see data after parity, we should discard all previous
2253 * data and parity for this stripe, as these data are already flushed to
2256 * At the end of the scan, we return the new journal_tail, which points to
2257 * first data-only stripe on the journal device, or next invalid meta block.
2259 static int r5c_recovery_flush_log(struct r5l_log
*log
,
2260 struct r5l_recovery_ctx
*ctx
)
2262 struct stripe_head
*sh
;
2265 /* scan through the log */
2267 if (r5l_recovery_read_meta_block(log
, ctx
))
2270 ret
= r5c_recovery_analyze_meta_block(log
, ctx
,
2273 * -EAGAIN means mismatch in data block, in this case, we still
2274 * try scan the next metablock
2276 if (ret
&& ret
!= -EAGAIN
)
2277 break; /* ret == -EINVAL or -ENOMEM */
2279 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, ctx
->meta_total_blocks
);
2282 if (ret
== -ENOMEM
) {
2283 r5c_recovery_drop_stripes(&ctx
->cached_list
, ctx
);
2287 /* replay data-parity stripes */
2288 r5c_recovery_replay_stripes(&ctx
->cached_list
, ctx
);
2290 /* load data-only stripes to stripe cache */
2291 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2292 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2293 r5c_recovery_load_one_stripe(log
, sh
);
2294 ctx
->data_only_stripes
++;
2301 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2302 * log will start here. but we can't let superblock point to last valid
2303 * meta block. The log might looks like:
2304 * | meta 1| meta 2| meta 3|
2305 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2306 * superblock points to meta 1, we write a new valid meta 2n. if crash
2307 * happens again, new recovery will start from meta 1. Since meta 2n is
2308 * valid now, recovery will think meta 3 is valid, which is wrong.
2309 * The solution is we create a new meta in meta2 with its seq == meta
2310 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2311 * will not think meta 3 is a valid meta, because its seq doesn't match
2315 * Before recovery, the log looks like the following
2317 * ---------------------------------------------
2318 * | valid log | invalid log |
2319 * ---------------------------------------------
2321 * |- log->last_checkpoint
2322 * |- log->last_cp_seq
2324 * Now we scan through the log until we see invalid entry
2326 * ---------------------------------------------
2327 * | valid log | invalid log |
2328 * ---------------------------------------------
2330 * |- log->last_checkpoint |- ctx->pos
2331 * |- log->last_cp_seq |- ctx->seq
2333 * From this point, we need to increase seq number by 10 to avoid
2334 * confusing next recovery.
2336 * ---------------------------------------------
2337 * | valid log | invalid log |
2338 * ---------------------------------------------
2340 * |- log->last_checkpoint |- ctx->pos+1
2341 * |- log->last_cp_seq |- ctx->seq+10001
2343 * However, it is not safe to start the state machine yet, because data only
2344 * parities are not yet secured in RAID. To save these data only parities, we
2345 * rewrite them from seq+11.
2347 * -----------------------------------------------------------------
2348 * | valid log | data only stripes | invalid log |
2349 * -----------------------------------------------------------------
2351 * |- log->last_checkpoint |- ctx->pos+n
2352 * |- log->last_cp_seq |- ctx->seq+10000+n
2354 * If failure happens again during this process, the recovery can safe start
2355 * again from log->last_checkpoint.
2357 * Once data only stripes are rewritten to journal, we move log_tail
2359 * -----------------------------------------------------------------
2360 * | old log | data only stripes | invalid log |
2361 * -----------------------------------------------------------------
2363 * |- log->last_checkpoint |- ctx->pos+n
2364 * |- log->last_cp_seq |- ctx->seq+10000+n
2366 * Then we can safely start the state machine. If failure happens from this
2367 * point on, the recovery will start from new log->last_checkpoint.
2370 r5c_recovery_rewrite_data_only_stripes(struct r5l_log
*log
,
2371 struct r5l_recovery_ctx
*ctx
)
2373 struct stripe_head
*sh
;
2374 struct mddev
*mddev
= log
->rdev
->mddev
;
2376 sector_t next_checkpoint
= MaxSector
;
2378 page
= alloc_page(GFP_KERNEL
);
2380 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2385 WARN_ON(list_empty(&ctx
->cached_list
));
2387 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2388 struct r5l_meta_block
*mb
;
2393 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2394 r5l_recovery_create_empty_meta_block(log
, page
,
2395 ctx
->pos
, ctx
->seq
);
2396 mb
= page_address(page
);
2397 offset
= le32_to_cpu(mb
->meta_size
);
2398 write_pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2400 for (i
= sh
->disks
; i
--; ) {
2401 struct r5dev
*dev
= &sh
->dev
[i
];
2402 struct r5l_payload_data_parity
*payload
;
2405 if (test_bit(R5_InJournal
, &dev
->flags
)) {
2406 payload
= (void *)mb
+ offset
;
2407 payload
->header
.type
= cpu_to_le16(
2408 R5LOG_PAYLOAD_DATA
);
2409 payload
->size
= cpu_to_le32(BLOCK_SECTORS
);
2410 payload
->location
= cpu_to_le64(
2411 raid5_compute_blocknr(sh
, i
, 0));
2412 addr
= kmap_atomic(dev
->page
);
2413 payload
->checksum
[0] = cpu_to_le32(
2414 crc32c_le(log
->uuid_checksum
, addr
,
2416 kunmap_atomic(addr
);
2417 sync_page_io(log
->rdev
, write_pos
, PAGE_SIZE
,
2418 dev
->page
, REQ_OP_WRITE
, 0, false);
2419 write_pos
= r5l_ring_add(log
, write_pos
,
2421 offset
+= sizeof(__le32
) +
2422 sizeof(struct r5l_payload_data_parity
);
2426 mb
->meta_size
= cpu_to_le32(offset
);
2427 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
2429 sync_page_io(log
->rdev
, ctx
->pos
, PAGE_SIZE
, page
,
2430 REQ_OP_WRITE
, REQ_SYNC
| REQ_FUA
, false);
2431 sh
->log_start
= ctx
->pos
;
2432 list_add_tail(&sh
->r5c
, &log
->stripe_in_journal_list
);
2433 atomic_inc(&log
->stripe_in_journal_count
);
2434 ctx
->pos
= write_pos
;
2436 next_checkpoint
= sh
->log_start
;
2438 log
->next_checkpoint
= next_checkpoint
;
2443 static void r5c_recovery_flush_data_only_stripes(struct r5l_log
*log
,
2444 struct r5l_recovery_ctx
*ctx
)
2446 struct mddev
*mddev
= log
->rdev
->mddev
;
2447 struct r5conf
*conf
= mddev
->private;
2448 struct stripe_head
*sh
, *next
;
2450 if (ctx
->data_only_stripes
== 0)
2453 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_BACK
;
2455 list_for_each_entry_safe(sh
, next
, &ctx
->cached_list
, lru
) {
2456 r5c_make_stripe_write_out(sh
);
2457 set_bit(STRIPE_HANDLE
, &sh
->state
);
2458 list_del_init(&sh
->lru
);
2459 raid5_release_stripe(sh
);
2462 md_wakeup_thread(conf
->mddev
->thread
);
2463 /* reuse conf->wait_for_quiescent in recovery */
2464 wait_event(conf
->wait_for_quiescent
,
2465 atomic_read(&conf
->active_stripes
) == 0);
2467 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
2470 static int r5l_recovery_log(struct r5l_log
*log
)
2472 struct mddev
*mddev
= log
->rdev
->mddev
;
2473 struct r5l_recovery_ctx
*ctx
;
2477 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
2481 ctx
->pos
= log
->last_checkpoint
;
2482 ctx
->seq
= log
->last_cp_seq
;
2483 INIT_LIST_HEAD(&ctx
->cached_list
);
2484 ctx
->meta_page
= alloc_page(GFP_KERNEL
);
2486 if (!ctx
->meta_page
) {
2491 if (r5l_recovery_allocate_ra_pool(log
, ctx
) != 0) {
2496 ret
= r5c_recovery_flush_log(log
, ctx
);
2504 if ((ctx
->data_only_stripes
== 0) && (ctx
->data_parity_stripes
== 0))
2505 pr_debug("md/raid:%s: starting from clean shutdown\n",
2508 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2509 mdname(mddev
), ctx
->data_only_stripes
,
2510 ctx
->data_parity_stripes
);
2512 if (ctx
->data_only_stripes
== 0) {
2513 log
->next_checkpoint
= ctx
->pos
;
2514 r5l_log_write_empty_meta_block(log
, ctx
->pos
, ctx
->seq
++);
2515 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2516 } else if (r5c_recovery_rewrite_data_only_stripes(log
, ctx
)) {
2517 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2523 log
->log_start
= ctx
->pos
;
2524 log
->seq
= ctx
->seq
;
2525 log
->last_checkpoint
= pos
;
2526 r5l_write_super(log
, pos
);
2528 r5c_recovery_flush_data_only_stripes(log
, ctx
);
2531 r5l_recovery_free_ra_pool(log
, ctx
);
2533 __free_page(ctx
->meta_page
);
2539 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
)
2541 struct mddev
*mddev
= log
->rdev
->mddev
;
2543 log
->rdev
->journal_tail
= cp
;
2544 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2547 static ssize_t
r5c_journal_mode_show(struct mddev
*mddev
, char *page
)
2549 struct r5conf
*conf
;
2552 ret
= mddev_lock(mddev
);
2556 conf
= mddev
->private;
2557 if (!conf
|| !conf
->log
) {
2558 mddev_unlock(mddev
);
2562 switch (conf
->log
->r5c_journal_mode
) {
2563 case R5C_JOURNAL_MODE_WRITE_THROUGH
:
2565 page
, PAGE_SIZE
, "[%s] %s\n",
2566 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2567 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2569 case R5C_JOURNAL_MODE_WRITE_BACK
:
2571 page
, PAGE_SIZE
, "%s [%s]\n",
2572 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2573 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2578 mddev_unlock(mddev
);
2583 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2585 * @mode as defined in 'enum r5c_journal_mode'.
2588 int r5c_journal_mode_set(struct mddev
*mddev
, int mode
)
2590 struct r5conf
*conf
;
2592 if (mode
< R5C_JOURNAL_MODE_WRITE_THROUGH
||
2593 mode
> R5C_JOURNAL_MODE_WRITE_BACK
)
2596 conf
= mddev
->private;
2597 if (!conf
|| !conf
->log
)
2600 if (raid5_calc_degraded(conf
) > 0 &&
2601 mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
2604 mddev_suspend(mddev
);
2605 conf
->log
->r5c_journal_mode
= mode
;
2606 mddev_resume(mddev
);
2608 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2609 mdname(mddev
), mode
, r5c_journal_mode_str
[mode
]);
2612 EXPORT_SYMBOL(r5c_journal_mode_set
);
2614 static ssize_t
r5c_journal_mode_store(struct mddev
*mddev
,
2615 const char *page
, size_t length
)
2617 int mode
= ARRAY_SIZE(r5c_journal_mode_str
);
2618 size_t len
= length
;
2624 if (page
[len
- 1] == '\n')
2628 if (strlen(r5c_journal_mode_str
[mode
]) == len
&&
2629 !strncmp(page
, r5c_journal_mode_str
[mode
], len
))
2631 ret
= mddev_lock(mddev
);
2634 ret
= r5c_journal_mode_set(mddev
, mode
);
2635 mddev_unlock(mddev
);
2636 return ret
?: length
;
2639 struct md_sysfs_entry
2640 r5c_journal_mode
= __ATTR(journal_mode
, 0644,
2641 r5c_journal_mode_show
, r5c_journal_mode_store
);
2644 * Try handle write operation in caching phase. This function should only
2645 * be called in write-back mode.
2647 * If all outstanding writes can be handled in caching phase, returns 0
2648 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2649 * and returns -EAGAIN
2651 int r5c_try_caching_write(struct r5conf
*conf
,
2652 struct stripe_head
*sh
,
2653 struct stripe_head_state
*s
,
2656 struct r5l_log
*log
= conf
->log
;
2661 sector_t tree_index
;
2665 BUG_ON(!r5c_is_writeback(log
));
2667 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
2669 * There are two different scenarios here:
2670 * 1. The stripe has some data cached, and it is sent to
2671 * write-out phase for reclaim
2672 * 2. The stripe is clean, and this is the first write
2674 * For 1, return -EAGAIN, so we continue with
2675 * handle_stripe_dirtying().
2677 * For 2, set STRIPE_R5C_CACHING and continue with caching
2681 /* case 1: anything injournal or anything in written */
2682 if (s
->injournal
> 0 || s
->written
> 0)
2685 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
2689 * When run in degraded mode, array is set to write-through mode.
2690 * This check helps drain pending write safely in the transition to
2691 * write-through mode.
2693 * When a stripe is syncing, the write is also handled in write
2696 if (s
->failed
|| test_bit(STRIPE_SYNCING
, &sh
->state
)) {
2697 r5c_make_stripe_write_out(sh
);
2701 for (i
= disks
; i
--; ) {
2703 /* if non-overwrite, use writing-out phase */
2704 if (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2705 !test_bit(R5_InJournal
, &dev
->flags
)) {
2706 r5c_make_stripe_write_out(sh
);
2711 /* if the stripe is not counted in big_stripe_tree, add it now */
2712 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) &&
2713 !test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2714 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2715 spin_lock(&log
->tree_lock
);
2716 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2719 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2720 pslot
, &log
->tree_lock
) >>
2721 R5C_RADIX_COUNT_SHIFT
;
2722 radix_tree_replace_slot(
2723 &log
->big_stripe_tree
, pslot
,
2724 (void *)((refcount
+ 1) << R5C_RADIX_COUNT_SHIFT
));
2727 * this radix_tree_insert can fail safely, so no
2728 * need to call radix_tree_preload()
2730 ret
= radix_tree_insert(
2731 &log
->big_stripe_tree
, tree_index
,
2732 (void *)(1 << R5C_RADIX_COUNT_SHIFT
));
2734 spin_unlock(&log
->tree_lock
);
2735 r5c_make_stripe_write_out(sh
);
2739 spin_unlock(&log
->tree_lock
);
2742 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2743 * counted in the radix tree
2745 set_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
);
2746 atomic_inc(&conf
->r5c_cached_partial_stripes
);
2749 for (i
= disks
; i
--; ) {
2752 set_bit(R5_Wantwrite
, &dev
->flags
);
2753 set_bit(R5_Wantdrain
, &dev
->flags
);
2754 set_bit(R5_LOCKED
, &dev
->flags
);
2760 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2762 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2763 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2764 * r5c_handle_data_cached()
2766 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
2773 * free extra pages (orig_page) we allocated for prexor
2775 void r5c_release_extra_page(struct stripe_head
*sh
)
2777 struct r5conf
*conf
= sh
->raid_conf
;
2779 bool using_disk_info_extra_page
;
2781 using_disk_info_extra_page
=
2782 sh
->dev
[0].orig_page
== conf
->disks
[0].extra_page
;
2784 for (i
= sh
->disks
; i
--; )
2785 if (sh
->dev
[i
].page
!= sh
->dev
[i
].orig_page
) {
2786 struct page
*p
= sh
->dev
[i
].orig_page
;
2788 sh
->dev
[i
].orig_page
= sh
->dev
[i
].page
;
2789 clear_bit(R5_OrigPageUPTDODATE
, &sh
->dev
[i
].flags
);
2791 if (!using_disk_info_extra_page
)
2795 if (using_disk_info_extra_page
) {
2796 clear_bit(R5C_EXTRA_PAGE_IN_USE
, &conf
->cache_state
);
2797 md_wakeup_thread(conf
->mddev
->thread
);
2801 void r5c_use_extra_page(struct stripe_head
*sh
)
2803 struct r5conf
*conf
= sh
->raid_conf
;
2807 for (i
= sh
->disks
; i
--; ) {
2809 if (dev
->orig_page
!= dev
->page
)
2810 put_page(dev
->orig_page
);
2811 dev
->orig_page
= conf
->disks
[i
].extra_page
;
2816 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2817 * stripe is committed to RAID disks.
2819 void r5c_finish_stripe_write_out(struct r5conf
*conf
,
2820 struct stripe_head
*sh
,
2821 struct stripe_head_state
*s
)
2823 struct r5l_log
*log
= conf
->log
;
2826 sector_t tree_index
;
2830 if (!log
|| !test_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
))
2833 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2834 clear_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
2836 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
2839 for (i
= sh
->disks
; i
--; ) {
2840 clear_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
2841 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2846 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2847 * We updated R5_InJournal, so we also update s->injournal.
2851 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2852 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2853 md_wakeup_thread(conf
->mddev
->thread
);
2856 wake_up(&conf
->wait_for_overlap
);
2858 spin_lock_irq(&log
->stripe_in_journal_lock
);
2859 list_del_init(&sh
->r5c
);
2860 spin_unlock_irq(&log
->stripe_in_journal_lock
);
2861 sh
->log_start
= MaxSector
;
2863 atomic_dec(&log
->stripe_in_journal_count
);
2864 r5c_update_log_state(log
);
2866 /* stop counting this stripe in big_stripe_tree */
2867 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) ||
2868 test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2869 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2870 spin_lock(&log
->tree_lock
);
2871 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2873 BUG_ON(pslot
== NULL
);
2874 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2875 pslot
, &log
->tree_lock
) >>
2876 R5C_RADIX_COUNT_SHIFT
;
2878 radix_tree_delete(&log
->big_stripe_tree
, tree_index
);
2880 radix_tree_replace_slot(
2881 &log
->big_stripe_tree
, pslot
,
2882 (void *)((refcount
- 1) << R5C_RADIX_COUNT_SHIFT
));
2883 spin_unlock(&log
->tree_lock
);
2886 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
)) {
2887 BUG_ON(atomic_read(&conf
->r5c_cached_partial_stripes
) == 0);
2888 atomic_dec(&conf
->r5c_flushing_partial_stripes
);
2889 atomic_dec(&conf
->r5c_cached_partial_stripes
);
2892 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2893 BUG_ON(atomic_read(&conf
->r5c_cached_full_stripes
) == 0);
2894 atomic_dec(&conf
->r5c_flushing_full_stripes
);
2895 atomic_dec(&conf
->r5c_cached_full_stripes
);
2898 r5l_append_flush_payload(log
, sh
->sector
);
2899 /* stripe is flused to raid disks, we can do resync now */
2900 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
))
2901 set_bit(STRIPE_HANDLE
, &sh
->state
);
2904 int r5c_cache_data(struct r5l_log
*log
, struct stripe_head
*sh
)
2906 struct r5conf
*conf
= sh
->raid_conf
;
2914 for (i
= 0; i
< sh
->disks
; i
++) {
2917 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
2919 addr
= kmap_atomic(sh
->dev
[i
].page
);
2920 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
2922 kunmap_atomic(addr
);
2925 WARN_ON(pages
== 0);
2928 * The stripe must enter state machine again to call endio, so
2931 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2932 atomic_inc(&sh
->count
);
2934 mutex_lock(&log
->io_mutex
);
2936 reserve
= (1 + pages
) << (PAGE_SHIFT
- 9);
2938 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
2939 sh
->log_start
== MaxSector
)
2940 r5l_add_no_space_stripe(log
, sh
);
2941 else if (!r5l_has_free_space(log
, reserve
)) {
2942 if (sh
->log_start
== log
->last_checkpoint
)
2945 r5l_add_no_space_stripe(log
, sh
);
2947 ret
= r5l_log_stripe(log
, sh
, pages
, 0);
2949 spin_lock_irq(&log
->io_list_lock
);
2950 list_add_tail(&sh
->log_list
, &log
->no_mem_stripes
);
2951 spin_unlock_irq(&log
->io_list_lock
);
2955 mutex_unlock(&log
->io_mutex
);
2959 /* check whether this big stripe is in write back cache. */
2960 bool r5c_big_stripe_cached(struct r5conf
*conf
, sector_t sect
)
2962 struct r5l_log
*log
= conf
->log
;
2963 sector_t tree_index
;
2969 WARN_ON_ONCE(!rcu_read_lock_held());
2970 tree_index
= r5c_tree_index(conf
, sect
);
2971 slot
= radix_tree_lookup(&log
->big_stripe_tree
, tree_index
);
2972 return slot
!= NULL
;
2975 static int r5l_load_log(struct r5l_log
*log
)
2977 struct md_rdev
*rdev
= log
->rdev
;
2979 struct r5l_meta_block
*mb
;
2980 sector_t cp
= log
->rdev
->journal_tail
;
2981 u32 stored_crc
, expected_crc
;
2982 bool create_super
= false;
2985 /* Make sure it's valid */
2986 if (cp
>= rdev
->sectors
|| round_down(cp
, BLOCK_SECTORS
) != cp
)
2988 page
= alloc_page(GFP_KERNEL
);
2992 if (!sync_page_io(rdev
, cp
, PAGE_SIZE
, page
, REQ_OP_READ
, 0, false)) {
2996 mb
= page_address(page
);
2998 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
2999 mb
->version
!= R5LOG_VERSION
) {
3000 create_super
= true;
3003 stored_crc
= le32_to_cpu(mb
->checksum
);
3005 expected_crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
3006 if (stored_crc
!= expected_crc
) {
3007 create_super
= true;
3010 if (le64_to_cpu(mb
->position
) != cp
) {
3011 create_super
= true;
3016 log
->last_cp_seq
= prandom_u32();
3018 r5l_log_write_empty_meta_block(log
, cp
, log
->last_cp_seq
);
3020 * Make sure super points to correct address. Log might have
3021 * data very soon. If super hasn't correct log tail address,
3022 * recovery can't find the log
3024 r5l_write_super(log
, cp
);
3026 log
->last_cp_seq
= le64_to_cpu(mb
->seq
);
3028 log
->device_size
= round_down(rdev
->sectors
, BLOCK_SECTORS
);
3029 log
->max_free_space
= log
->device_size
>> RECLAIM_MAX_FREE_SPACE_SHIFT
;
3030 if (log
->max_free_space
> RECLAIM_MAX_FREE_SPACE
)
3031 log
->max_free_space
= RECLAIM_MAX_FREE_SPACE
;
3032 log
->last_checkpoint
= cp
;
3037 log
->log_start
= r5l_ring_add(log
, cp
, BLOCK_SECTORS
);
3038 log
->seq
= log
->last_cp_seq
+ 1;
3039 log
->next_checkpoint
= cp
;
3041 ret
= r5l_recovery_log(log
);
3043 r5c_update_log_state(log
);
3050 void r5c_update_on_rdev_error(struct mddev
*mddev
, struct md_rdev
*rdev
)
3052 struct r5conf
*conf
= mddev
->private;
3053 struct r5l_log
*log
= conf
->log
;
3058 if ((raid5_calc_degraded(conf
) > 0 ||
3059 test_bit(Journal
, &rdev
->flags
)) &&
3060 conf
->log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
3061 schedule_work(&log
->disable_writeback_work
);
3064 int r5l_init_log(struct r5conf
*conf
, struct md_rdev
*rdev
)
3066 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
3067 struct r5l_log
*log
;
3068 char b
[BDEVNAME_SIZE
];
3070 pr_debug("md/raid:%s: using device %s as journal\n",
3071 mdname(conf
->mddev
), bdevname(rdev
->bdev
, b
));
3073 if (PAGE_SIZE
!= 4096)
3077 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3078 * raid_disks r5l_payload_data_parity.
3080 * Write journal and cache does not work for very big array
3081 * (raid_disks > 203)
3083 if (sizeof(struct r5l_meta_block
) +
3084 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
)) *
3085 conf
->raid_disks
) > PAGE_SIZE
) {
3086 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3087 mdname(conf
->mddev
), conf
->raid_disks
);
3091 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
3096 log
->need_cache_flush
= test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
) != 0;
3098 log
->uuid_checksum
= crc32c_le(~0, rdev
->mddev
->uuid
,
3099 sizeof(rdev
->mddev
->uuid
));
3101 mutex_init(&log
->io_mutex
);
3103 spin_lock_init(&log
->io_list_lock
);
3104 INIT_LIST_HEAD(&log
->running_ios
);
3105 INIT_LIST_HEAD(&log
->io_end_ios
);
3106 INIT_LIST_HEAD(&log
->flushing_ios
);
3107 INIT_LIST_HEAD(&log
->finished_ios
);
3108 bio_init(&log
->flush_bio
, NULL
, 0);
3110 log
->io_kc
= KMEM_CACHE(r5l_io_unit
, 0);
3114 log
->io_pool
= mempool_create_slab_pool(R5L_POOL_SIZE
, log
->io_kc
);
3118 log
->bs
= bioset_create(R5L_POOL_SIZE
, 0, BIOSET_NEED_BVECS
);
3122 log
->meta_pool
= mempool_create_page_pool(R5L_POOL_SIZE
, 0);
3123 if (!log
->meta_pool
)
3126 spin_lock_init(&log
->tree_lock
);
3127 INIT_RADIX_TREE(&log
->big_stripe_tree
, GFP_NOWAIT
| __GFP_NOWARN
);
3129 log
->reclaim_thread
= md_register_thread(r5l_reclaim_thread
,
3130 log
->rdev
->mddev
, "reclaim");
3131 if (!log
->reclaim_thread
)
3132 goto reclaim_thread
;
3133 log
->reclaim_thread
->timeout
= R5C_RECLAIM_WAKEUP_INTERVAL
;
3135 init_waitqueue_head(&log
->iounit_wait
);
3137 INIT_LIST_HEAD(&log
->no_mem_stripes
);
3139 INIT_LIST_HEAD(&log
->no_space_stripes
);
3140 spin_lock_init(&log
->no_space_stripes_lock
);
3142 INIT_WORK(&log
->deferred_io_work
, r5l_submit_io_async
);
3143 INIT_WORK(&log
->disable_writeback_work
, r5c_disable_writeback_async
);
3145 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
3146 INIT_LIST_HEAD(&log
->stripe_in_journal_list
);
3147 spin_lock_init(&log
->stripe_in_journal_lock
);
3148 atomic_set(&log
->stripe_in_journal_count
, 0);
3150 rcu_assign_pointer(conf
->log
, log
);
3152 if (r5l_load_log(log
))
3155 set_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
3159 rcu_assign_pointer(conf
->log
, NULL
);
3160 md_unregister_thread(&log
->reclaim_thread
);
3162 mempool_destroy(log
->meta_pool
);
3164 bioset_free(log
->bs
);
3166 mempool_destroy(log
->io_pool
);
3168 kmem_cache_destroy(log
->io_kc
);
3174 void r5l_exit_log(struct r5conf
*conf
)
3176 struct r5l_log
*log
= conf
->log
;
3181 /* Ensure disable_writeback_work wakes up and exits */
3182 wake_up(&conf
->mddev
->sb_wait
);
3183 flush_work(&log
->disable_writeback_work
);
3184 md_unregister_thread(&log
->reclaim_thread
);
3185 mempool_destroy(log
->meta_pool
);
3186 bioset_free(log
->bs
);
3187 mempool_destroy(log
->io_pool
);
3188 kmem_cache_destroy(log
->io_kc
);