1 // SPDX-License-Identifier: GPL-2.0-only
3 * Partial Parity Log for closing the RAID5 write hole
4 * Copyright (c) 2017, Intel Corporation.
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/async_tx.h>
12 #include <linux/raid/md_p.h>
15 #include "raid5-log.h"
18 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19 * partial parity data. The header contains an array of entries
20 * (struct ppl_header_entry) which describe the logged write requests.
21 * Partial parity for the entries comes after the header, written in the same
22 * sequence as the entries:
33 * An entry describes one or more consecutive stripe_heads, up to a full
34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35 * number of stripe_heads in the entry and n is the number of modified data
36 * disks. Every stripe_head in the entry must write to the same data disks.
37 * An example of a valid case described by a single entry (writes to the first
38 * stripe of a 4 disk array, 16k chunk size):
40 * sh->sector dd0 dd1 dd2 ppl
42 * 0 | --- | --- | --- | +----+
43 * 8 | -W- | -W- | --- | | pp | data_sector = 8
44 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
45 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
46 * +-----+-----+-----+ +----+
48 * data_sector is the first raid sector of the modified data, data_size is the
49 * total size of modified data and pp_size is the size of partial parity for
50 * this entry. Entries for full stripe writes contain no partial parity
51 * (pp_size = 0), they only mark the stripes for which parity should be
52 * recalculated after an unclean shutdown. Every entry holds a checksum of its
53 * partial parity, the header also has a checksum of the header itself.
55 * A write request is always logged to the PPL instance stored on the parity
56 * disk of the corresponding stripe. For each member disk there is one ppl_log
57 * used to handle logging for this disk, independently from others. They are
58 * grouped in child_logs array in struct ppl_conf, which is assigned to
59 * r5conf->log_private.
61 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63 * can be appended to the last entry if it meets the conditions for a valid
64 * entry described above, otherwise a new entry is added. Checksums of entries
65 * are calculated incrementally as stripes containing partial parity are being
66 * added. ppl_submit_iounit() calculates the checksum of the header and submits
67 * a bio containing the header page and partial parity pages (sh->ppl_page) for
68 * all stripes of the io_unit. When the PPL write completes, the stripes
69 * associated with the io_unit are released and raid5d starts writing their data
70 * and parity. When all stripes are written, the io_unit is freed and the next
73 * An io_unit is used to gather stripes until it is submitted or becomes full
74 * (if the maximum number of entries or size of PPL is reached). Another io_unit
75 * can't be submitted until the previous has completed (PPL and stripe
76 * data+parity is written). The log->io_list tracks all io_units of a log
77 * (for a single member disk). New io_units are added to the end of the list
78 * and the first io_unit is submitted, if it is not submitted already.
79 * The current io_unit accepting new stripes is always at the end of the list.
81 * If write-back cache is enabled for any of the disks in the array, its data
82 * must be flushed before next io_unit is submitted.
85 #define PPL_SPACE_SIZE (128 * 1024)
90 /* array of child logs, one for each raid disk */
91 struct ppl_log
*child_logs
;
94 int block_size
; /* the logical block size used for data_sector
95 * in ppl_header_entry */
96 u32 signature
; /* raid array identifier */
97 atomic64_t seq
; /* current log write sequence number */
99 struct kmem_cache
*io_kc
;
102 struct bio_set flush_bs
;
104 /* used only for recovery */
105 int recovered_entries
;
108 /* stripes to retry if failed to allocate io_unit */
109 struct list_head no_mem_stripes
;
110 spinlock_t no_mem_stripes_lock
;
112 unsigned short write_hint
;
116 struct ppl_conf
*ppl_conf
; /* shared between all log instances */
118 struct md_rdev
*rdev
; /* array member disk associated with
119 * this log instance */
120 struct mutex io_mutex
;
121 struct ppl_io_unit
*current_io
; /* current io_unit accepting new data
122 * always at the end of io_list */
123 spinlock_t io_list_lock
;
124 struct list_head io_list
; /* all io_units of this log */
126 sector_t next_io_sector
;
127 unsigned int entry_space
;
130 unsigned long disk_flush_bitmap
;
133 #define PPL_IO_INLINE_BVECS 32
138 struct page
*header_page
; /* for ppl_header */
140 unsigned int entries_count
; /* number of entries in ppl_header */
141 unsigned int pp_size
; /* total size current of partial parity */
143 u64 seq
; /* sequence number of this log write */
144 struct list_head log_sibling
; /* log->io_list */
146 struct list_head stripe_list
; /* stripes added to the io_unit */
147 atomic_t pending_stripes
; /* how many stripes not written to raid */
148 atomic_t pending_flushes
; /* how many disk flushes are in progress */
150 bool submitted
; /* true if write to log started */
152 /* inline bio and its biovec for submitting the iounit */
154 struct bio_vec biovec
[PPL_IO_INLINE_BVECS
];
157 struct dma_async_tx_descriptor
*
158 ops_run_partial_parity(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
159 struct dma_async_tx_descriptor
*tx
)
161 int disks
= sh
->disks
;
162 struct page
**srcs
= percpu
->scribble
;
163 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
164 struct async_submit_ctl submit
;
166 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
169 * Partial parity is the XOR of stripe data chunks that are not changed
170 * during the write request. Depending on available data
171 * (read-modify-write vs. reconstruct-write case) we calculate it
174 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
176 * rmw: xor old data and parity from updated disks
177 * This is calculated earlier by ops_run_prexor5() so just copy
178 * the parity dev page.
180 srcs
[count
++] = sh
->dev
[pd_idx
].page
;
181 } else if (sh
->reconstruct_state
== reconstruct_state_drain_run
) {
182 /* rcw: xor data from all not updated disks */
183 for (i
= disks
; i
--;) {
184 struct r5dev
*dev
= &sh
->dev
[i
];
185 if (test_bit(R5_UPTODATE
, &dev
->flags
))
186 srcs
[count
++] = dev
->page
;
192 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, tx
,
193 NULL
, sh
, (void *) (srcs
+ sh
->disks
+ 2));
196 tx
= async_memcpy(sh
->ppl_page
, srcs
[0], 0, 0, PAGE_SIZE
,
199 tx
= async_xor(sh
->ppl_page
, srcs
, 0, count
, PAGE_SIZE
,
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask
, void *pool_data
)
207 struct kmem_cache
*kc
= pool_data
;
208 struct ppl_io_unit
*io
;
210 io
= kmem_cache_alloc(kc
, gfp_mask
);
214 io
->header_page
= alloc_page(gfp_mask
);
215 if (!io
->header_page
) {
216 kmem_cache_free(kc
, io
);
223 static void ppl_io_pool_free(void *element
, void *pool_data
)
225 struct kmem_cache
*kc
= pool_data
;
226 struct ppl_io_unit
*io
= element
;
228 __free_page(io
->header_page
);
229 kmem_cache_free(kc
, io
);
232 static struct ppl_io_unit
*ppl_new_iounit(struct ppl_log
*log
,
233 struct stripe_head
*sh
)
235 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
236 struct ppl_io_unit
*io
;
237 struct ppl_header
*pplhdr
;
238 struct page
*header_page
;
240 io
= mempool_alloc(&ppl_conf
->io_pool
, GFP_NOWAIT
);
244 header_page
= io
->header_page
;
245 memset(io
, 0, sizeof(*io
));
246 io
->header_page
= header_page
;
249 INIT_LIST_HEAD(&io
->log_sibling
);
250 INIT_LIST_HEAD(&io
->stripe_list
);
251 atomic_set(&io
->pending_stripes
, 0);
252 atomic_set(&io
->pending_flushes
, 0);
253 bio_init(&io
->bio
, log
->rdev
->bdev
, io
->biovec
, PPL_IO_INLINE_BVECS
,
254 REQ_OP_WRITE
| REQ_FUA
);
256 pplhdr
= page_address(io
->header_page
);
258 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
259 pplhdr
->signature
= cpu_to_le32(ppl_conf
->signature
);
261 io
->seq
= atomic64_inc_return(&ppl_conf
->seq
);
262 pplhdr
->generation
= cpu_to_le64(io
->seq
);
267 static int ppl_log_stripe(struct ppl_log
*log
, struct stripe_head
*sh
)
269 struct ppl_io_unit
*io
= log
->current_io
;
270 struct ppl_header_entry
*e
= NULL
;
271 struct ppl_header
*pplhdr
;
273 sector_t data_sector
= 0;
275 struct r5conf
*conf
= sh
->raid_conf
;
277 pr_debug("%s: stripe: %llu\n", __func__
, (unsigned long long)sh
->sector
);
279 /* check if current io_unit is full */
280 if (io
&& (io
->pp_size
== log
->entry_space
||
281 io
->entries_count
== PPL_HDR_MAX_ENTRIES
)) {
282 pr_debug("%s: add io_unit blocked by seq: %llu\n",
287 /* add a new unit if there is none or the current is full */
289 io
= ppl_new_iounit(log
, sh
);
292 spin_lock_irq(&log
->io_list_lock
);
293 list_add_tail(&io
->log_sibling
, &log
->io_list
);
294 spin_unlock_irq(&log
->io_list_lock
);
296 log
->current_io
= io
;
299 for (i
= 0; i
< sh
->disks
; i
++) {
300 struct r5dev
*dev
= &sh
->dev
[i
];
302 if (i
!= sh
->pd_idx
&& test_bit(R5_Wantwrite
, &dev
->flags
)) {
303 if (!data_disks
|| dev
->sector
< data_sector
)
304 data_sector
= dev
->sector
;
310 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__
,
311 io
->seq
, (unsigned long long)data_sector
, data_disks
);
313 pplhdr
= page_address(io
->header_page
);
315 if (io
->entries_count
> 0) {
316 struct ppl_header_entry
*last
=
317 &pplhdr
->entries
[io
->entries_count
- 1];
318 struct stripe_head
*sh_last
= list_last_entry(
319 &io
->stripe_list
, struct stripe_head
, log_list
);
320 u64 data_sector_last
= le64_to_cpu(last
->data_sector
);
321 u32 data_size_last
= le32_to_cpu(last
->data_size
);
324 * Check if we can append the stripe to the last entry. It must
325 * be just after the last logged stripe and write to the same
326 * disks. Use bit shift and logarithm to avoid 64-bit division.
328 if ((sh
->sector
== sh_last
->sector
+ RAID5_STRIPE_SECTORS(conf
)) &&
329 (data_sector
>> ilog2(conf
->chunk_sectors
) ==
330 data_sector_last
>> ilog2(conf
->chunk_sectors
)) &&
331 ((data_sector
- data_sector_last
) * data_disks
==
332 data_size_last
>> 9))
337 e
= &pplhdr
->entries
[io
->entries_count
++];
338 e
->data_sector
= cpu_to_le64(data_sector
);
339 e
->parity_disk
= cpu_to_le32(sh
->pd_idx
);
340 e
->checksum
= cpu_to_le32(~0);
343 le32_add_cpu(&e
->data_size
, data_disks
<< PAGE_SHIFT
);
345 /* don't write any PP if full stripe write */
346 if (!test_bit(STRIPE_FULL_WRITE
, &sh
->state
)) {
347 le32_add_cpu(&e
->pp_size
, PAGE_SIZE
);
348 io
->pp_size
+= PAGE_SIZE
;
349 e
->checksum
= cpu_to_le32(crc32c_le(le32_to_cpu(e
->checksum
),
350 page_address(sh
->ppl_page
),
354 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
355 atomic_inc(&io
->pending_stripes
);
361 int ppl_write_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
363 struct ppl_conf
*ppl_conf
= conf
->log_private
;
364 struct ppl_io_unit
*io
= sh
->ppl_io
;
367 if (io
|| test_bit(STRIPE_SYNCING
, &sh
->state
) || !sh
->ppl_page
||
368 !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
369 !test_bit(R5_Insync
, &sh
->dev
[sh
->pd_idx
].flags
)) {
370 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
374 log
= &ppl_conf
->child_logs
[sh
->pd_idx
];
376 mutex_lock(&log
->io_mutex
);
378 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
379 mutex_unlock(&log
->io_mutex
);
383 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
384 clear_bit(STRIPE_DELAYED
, &sh
->state
);
385 atomic_inc(&sh
->count
);
387 if (ppl_log_stripe(log
, sh
)) {
388 spin_lock_irq(&ppl_conf
->no_mem_stripes_lock
);
389 list_add_tail(&sh
->log_list
, &ppl_conf
->no_mem_stripes
);
390 spin_unlock_irq(&ppl_conf
->no_mem_stripes_lock
);
393 mutex_unlock(&log
->io_mutex
);
398 static void ppl_log_endio(struct bio
*bio
)
400 struct ppl_io_unit
*io
= bio
->bi_private
;
401 struct ppl_log
*log
= io
->log
;
402 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
403 struct stripe_head
*sh
, *next
;
405 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
408 md_error(ppl_conf
->mddev
, log
->rdev
);
410 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
411 list_del_init(&sh
->log_list
);
413 set_bit(STRIPE_HANDLE
, &sh
->state
);
414 raid5_release_stripe(sh
);
418 static void ppl_submit_iounit_bio(struct ppl_io_unit
*io
, struct bio
*bio
)
420 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
421 __func__
, io
->seq
, bio
->bi_iter
.bi_size
,
422 (unsigned long long)bio
->bi_iter
.bi_sector
,
428 static void ppl_submit_iounit(struct ppl_io_unit
*io
)
430 struct ppl_log
*log
= io
->log
;
431 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
432 struct ppl_header
*pplhdr
= page_address(io
->header_page
);
433 struct bio
*bio
= &io
->bio
;
434 struct stripe_head
*sh
;
437 bio
->bi_private
= io
;
439 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
444 for (i
= 0; i
< io
->entries_count
; i
++) {
445 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
447 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
448 __func__
, io
->seq
, i
, le64_to_cpu(e
->data_sector
),
449 le32_to_cpu(e
->pp_size
), le32_to_cpu(e
->data_size
));
451 e
->data_sector
= cpu_to_le64(le64_to_cpu(e
->data_sector
) >>
452 ilog2(ppl_conf
->block_size
>> 9));
453 e
->checksum
= cpu_to_le32(~le32_to_cpu(e
->checksum
));
456 pplhdr
->entries_count
= cpu_to_le32(io
->entries_count
);
457 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PPL_HEADER_SIZE
));
459 /* Rewind the buffer if current PPL is larger then remaining space */
460 if (log
->use_multippl
&&
461 log
->rdev
->ppl
.sector
+ log
->rdev
->ppl
.size
- log
->next_io_sector
<
462 (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9)
463 log
->next_io_sector
= log
->rdev
->ppl
.sector
;
466 bio
->bi_end_io
= ppl_log_endio
;
467 bio
->bi_iter
.bi_sector
= log
->next_io_sector
;
468 __bio_add_page(bio
, io
->header_page
, PAGE_SIZE
, 0);
470 pr_debug("%s: log->current_io_sector: %llu\n", __func__
,
471 (unsigned long long)log
->next_io_sector
);
473 if (log
->use_multippl
)
474 log
->next_io_sector
+= (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9;
476 WARN_ON(log
->disk_flush_bitmap
!= 0);
478 list_for_each_entry(sh
, &io
->stripe_list
, log_list
) {
479 for (i
= 0; i
< sh
->disks
; i
++) {
480 struct r5dev
*dev
= &sh
->dev
[i
];
482 if ((ppl_conf
->child_logs
[i
].wb_cache_on
) &&
483 (test_bit(R5_Wantwrite
, &dev
->flags
))) {
484 set_bit(i
, &log
->disk_flush_bitmap
);
488 /* entries for full stripe writes have no partial parity */
489 if (test_bit(STRIPE_FULL_WRITE
, &sh
->state
))
492 if (!bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0)) {
493 struct bio
*prev
= bio
;
495 bio
= bio_alloc_bioset(prev
->bi_bdev
, BIO_MAX_VECS
,
496 prev
->bi_opf
, GFP_NOIO
,
498 bio
->bi_iter
.bi_sector
= bio_end_sector(prev
);
499 __bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0);
501 bio_chain(bio
, prev
);
502 ppl_submit_iounit_bio(io
, prev
);
506 ppl_submit_iounit_bio(io
, bio
);
509 static void ppl_submit_current_io(struct ppl_log
*log
)
511 struct ppl_io_unit
*io
;
513 spin_lock_irq(&log
->io_list_lock
);
515 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
517 if (io
&& io
->submitted
)
520 spin_unlock_irq(&log
->io_list_lock
);
523 io
->submitted
= true;
525 if (io
== log
->current_io
)
526 log
->current_io
= NULL
;
528 ppl_submit_iounit(io
);
532 void ppl_write_stripe_run(struct r5conf
*conf
)
534 struct ppl_conf
*ppl_conf
= conf
->log_private
;
538 for (i
= 0; i
< ppl_conf
->count
; i
++) {
539 log
= &ppl_conf
->child_logs
[i
];
541 mutex_lock(&log
->io_mutex
);
542 ppl_submit_current_io(log
);
543 mutex_unlock(&log
->io_mutex
);
547 static void ppl_io_unit_finished(struct ppl_io_unit
*io
)
549 struct ppl_log
*log
= io
->log
;
550 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
551 struct r5conf
*conf
= ppl_conf
->mddev
->private;
554 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
556 local_irq_save(flags
);
558 spin_lock(&log
->io_list_lock
);
559 list_del(&io
->log_sibling
);
560 spin_unlock(&log
->io_list_lock
);
562 mempool_free(io
, &ppl_conf
->io_pool
);
564 spin_lock(&ppl_conf
->no_mem_stripes_lock
);
565 if (!list_empty(&ppl_conf
->no_mem_stripes
)) {
566 struct stripe_head
*sh
;
568 sh
= list_first_entry(&ppl_conf
->no_mem_stripes
,
569 struct stripe_head
, log_list
);
570 list_del_init(&sh
->log_list
);
571 set_bit(STRIPE_HANDLE
, &sh
->state
);
572 raid5_release_stripe(sh
);
574 spin_unlock(&ppl_conf
->no_mem_stripes_lock
);
576 local_irq_restore(flags
);
578 wake_up(&conf
->wait_for_quiescent
);
581 static void ppl_flush_endio(struct bio
*bio
)
583 struct ppl_io_unit
*io
= bio
->bi_private
;
584 struct ppl_log
*log
= io
->log
;
585 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
586 struct r5conf
*conf
= ppl_conf
->mddev
->private;
588 pr_debug("%s: dev: %pg\n", __func__
, bio
->bi_bdev
);
590 if (bio
->bi_status
) {
591 struct md_rdev
*rdev
;
594 rdev
= md_find_rdev_rcu(conf
->mddev
, bio_dev(bio
));
596 md_error(rdev
->mddev
, rdev
);
602 if (atomic_dec_and_test(&io
->pending_flushes
)) {
603 ppl_io_unit_finished(io
);
604 md_wakeup_thread(conf
->mddev
->thread
);
608 static void ppl_do_flush(struct ppl_io_unit
*io
)
610 struct ppl_log
*log
= io
->log
;
611 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
612 struct r5conf
*conf
= ppl_conf
->mddev
->private;
613 int raid_disks
= conf
->raid_disks
;
614 int flushed_disks
= 0;
617 atomic_set(&io
->pending_flushes
, raid_disks
);
619 for_each_set_bit(i
, &log
->disk_flush_bitmap
, raid_disks
) {
620 struct md_rdev
*rdev
;
621 struct block_device
*bdev
= NULL
;
623 rdev
= conf
->disks
[i
].rdev
;
624 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
630 bio
= bio_alloc_bioset(bdev
, 0,
631 REQ_OP_WRITE
| REQ_PREFLUSH
,
632 GFP_NOIO
, &ppl_conf
->flush_bs
);
633 bio
->bi_private
= io
;
634 bio
->bi_end_io
= ppl_flush_endio
;
636 pr_debug("%s: dev: %ps\n", __func__
, bio
->bi_bdev
);
643 log
->disk_flush_bitmap
= 0;
645 for (i
= flushed_disks
; i
< raid_disks
; i
++) {
646 if (atomic_dec_and_test(&io
->pending_flushes
))
647 ppl_io_unit_finished(io
);
651 static inline bool ppl_no_io_unit_submitted(struct r5conf
*conf
,
654 struct ppl_io_unit
*io
;
656 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
659 return !io
|| !io
->submitted
;
662 void ppl_quiesce(struct r5conf
*conf
, int quiesce
)
664 struct ppl_conf
*ppl_conf
= conf
->log_private
;
668 for (i
= 0; i
< ppl_conf
->count
; i
++) {
669 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
671 spin_lock_irq(&log
->io_list_lock
);
672 wait_event_lock_irq(conf
->wait_for_quiescent
,
673 ppl_no_io_unit_submitted(conf
, log
),
675 spin_unlock_irq(&log
->io_list_lock
);
680 int ppl_handle_flush_request(struct bio
*bio
)
682 if (bio
->bi_iter
.bi_size
== 0) {
686 bio
->bi_opf
&= ~REQ_PREFLUSH
;
690 void ppl_stripe_write_finished(struct stripe_head
*sh
)
692 struct ppl_io_unit
*io
;
697 if (io
&& atomic_dec_and_test(&io
->pending_stripes
)) {
698 if (io
->log
->disk_flush_bitmap
)
701 ppl_io_unit_finished(io
);
705 static void ppl_xor(int size
, struct page
*page1
, struct page
*page2
)
707 struct async_submit_ctl submit
;
708 struct dma_async_tx_descriptor
*tx
;
709 struct page
*xor_srcs
[] = { page1
, page2
};
711 init_async_submit(&submit
, ASYNC_TX_ACK
|ASYNC_TX_XOR_DROP_DST
,
712 NULL
, NULL
, NULL
, NULL
);
713 tx
= async_xor(page1
, xor_srcs
, 0, 2, size
, &submit
);
715 async_tx_quiesce(&tx
);
719 * PPL recovery strategy: xor partial parity and data from all modified data
720 * disks within a stripe and write the result as the new stripe parity. If all
721 * stripe data disks are modified (full stripe write), no partial parity is
722 * available, so just xor the data disks.
724 * Recovery of a PPL entry shall occur only if all modified data disks are
725 * available and read from all of them succeeds.
727 * A PPL entry applies to a stripe, partial parity size for an entry is at most
728 * the size of the chunk. Examples of possible cases for a single entry:
730 * case 0: single data disk write:
731 * data0 data1 data2 ppl parity
732 * +--------+--------+--------+ +--------------------+
733 * | ------ | ------ | ------ | +----+ | (no change) |
734 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
735 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
736 * | ------ | ------ | ------ | +----+ | (no change) |
737 * +--------+--------+--------+ +--------------------+
738 * pp_size = data_size
740 * case 1: more than one data disk write:
741 * data0 data1 data2 ppl parity
742 * +--------+--------+--------+ +--------------------+
743 * | ------ | ------ | ------ | +----+ | (no change) |
744 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
745 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
746 * | ------ | ------ | ------ | +----+ | (no change) |
747 * +--------+--------+--------+ +--------------------+
748 * pp_size = data_size / modified_data_disks
750 * case 2: write to all data disks (also full stripe write):
751 * data0 data1 data2 parity
752 * +--------+--------+--------+ +--------------------+
753 * | ------ | ------ | ------ | | (no change) |
754 * | -data- | -data- | -data- | --------> | xor all data |
755 * | ------ | ------ | ------ | --------> | (no change) |
756 * | ------ | ------ | ------ | | (no change) |
757 * +--------+--------+--------+ +--------------------+
760 * The following cases are possible only in other implementations. The recovery
761 * code can handle them, but they are not generated at runtime because they can
762 * be reduced to cases 0, 1 and 2:
765 * data0 data1 data2 ppl parity
766 * +--------+--------+--------+ +----+ +--------------------+
767 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
768 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
769 * | -data- | -data- | -data- | | -- | -> | xor all data |
770 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
771 * +--------+--------+--------+ +----+ +--------------------+
772 * pp_size = chunk_size
775 * data0 data1 data2 ppl parity
776 * +--------+--------+--------+ +----+ +--------------------+
777 * | ------ | -data- | ------ | | pp | | data1 ^ pp |
778 * | ------ | ------ | ------ | | -- | -> | (no change) |
779 * | ------ | ------ | ------ | | -- | -> | (no change) |
780 * | -data- | ------ | ------ | | pp | | data0 ^ pp |
781 * +--------+--------+--------+ +----+ +--------------------+
782 * pp_size = chunk_size
784 static int ppl_recover_entry(struct ppl_log
*log
, struct ppl_header_entry
*e
,
787 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
788 struct mddev
*mddev
= ppl_conf
->mddev
;
789 struct r5conf
*conf
= mddev
->private;
790 int block_size
= ppl_conf
->block_size
;
793 sector_t r_sector_first
;
794 sector_t r_sector_last
;
799 unsigned int pp_size
= le32_to_cpu(e
->pp_size
);
800 unsigned int data_size
= le32_to_cpu(e
->data_size
);
802 page1
= alloc_page(GFP_KERNEL
);
803 page2
= alloc_page(GFP_KERNEL
);
805 if (!page1
|| !page2
) {
810 r_sector_first
= le64_to_cpu(e
->data_sector
) * (block_size
>> 9);
812 if ((pp_size
>> 9) < conf
->chunk_sectors
) {
814 data_disks
= data_size
/ pp_size
;
815 strip_sectors
= pp_size
>> 9;
817 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
818 strip_sectors
= (data_size
>> 9) / data_disks
;
820 r_sector_last
= r_sector_first
+
821 (data_disks
- 1) * conf
->chunk_sectors
+
824 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
825 strip_sectors
= conf
->chunk_sectors
;
826 r_sector_last
= r_sector_first
+ (data_size
>> 9);
829 pr_debug("%s: array sector first: %llu last: %llu\n", __func__
,
830 (unsigned long long)r_sector_first
,
831 (unsigned long long)r_sector_last
);
833 /* if start and end is 4k aligned, use a 4k block */
834 if (block_size
== 512 &&
835 (r_sector_first
& (RAID5_STRIPE_SECTORS(conf
) - 1)) == 0 &&
836 (r_sector_last
& (RAID5_STRIPE_SECTORS(conf
) - 1)) == 0)
837 block_size
= RAID5_STRIPE_SIZE(conf
);
839 /* iterate through blocks in strip */
840 for (i
= 0; i
< strip_sectors
; i
+= (block_size
>> 9)) {
841 bool update_parity
= false;
842 sector_t parity_sector
;
843 struct md_rdev
*parity_rdev
;
844 struct stripe_head sh
;
848 pr_debug("%s:%*s iter %d start\n", __func__
, indent
, "", i
);
851 memset(page_address(page1
), 0, PAGE_SIZE
);
853 /* iterate through data member disks */
854 for (disk
= 0; disk
< data_disks
; disk
++) {
856 struct md_rdev
*rdev
;
858 sector_t r_sector
= r_sector_first
+ i
+
859 (disk
* conf
->chunk_sectors
);
861 pr_debug("%s:%*s data member disk %d start\n",
862 __func__
, indent
, "", disk
);
865 if (r_sector
>= r_sector_last
) {
866 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
867 __func__
, indent
, "",
868 (unsigned long long)r_sector
);
873 update_parity
= true;
875 /* map raid sector to member disk */
876 sector
= raid5_compute_sector(conf
, r_sector
, 0,
878 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
879 __func__
, indent
, "",
880 (unsigned long long)r_sector
, dd_idx
,
881 (unsigned long long)sector
);
883 rdev
= conf
->disks
[dd_idx
].rdev
;
884 if (!rdev
|| (!test_bit(In_sync
, &rdev
->flags
) &&
885 sector
>= rdev
->recovery_offset
)) {
886 pr_debug("%s:%*s data member disk %d missing\n",
887 __func__
, indent
, "", dd_idx
);
888 update_parity
= false;
892 pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
893 __func__
, indent
, "", rdev
->bdev
,
894 (unsigned long long)sector
);
895 if (!sync_page_io(rdev
, sector
, block_size
, page2
,
896 REQ_OP_READ
, false)) {
897 md_error(mddev
, rdev
);
898 pr_debug("%s:%*s read failed!\n", __func__
,
904 ppl_xor(block_size
, page1
, page2
);
913 pr_debug("%s:%*s reading pp disk sector %llu\n",
914 __func__
, indent
, "",
915 (unsigned long long)(ppl_sector
+ i
));
916 if (!sync_page_io(log
->rdev
,
917 ppl_sector
- log
->rdev
->data_offset
+ i
,
918 block_size
, page2
, REQ_OP_READ
,
920 pr_debug("%s:%*s read failed!\n", __func__
,
922 md_error(mddev
, log
->rdev
);
927 ppl_xor(block_size
, page1
, page2
);
930 /* map raid sector to parity disk */
931 parity_sector
= raid5_compute_sector(conf
, r_sector_first
+ i
,
933 BUG_ON(sh
.pd_idx
!= le32_to_cpu(e
->parity_disk
));
935 parity_rdev
= conf
->disks
[sh
.pd_idx
].rdev
;
937 BUG_ON(parity_rdev
->bdev
->bd_dev
!= log
->rdev
->bdev
->bd_dev
);
938 pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
939 __func__
, indent
, "",
940 (unsigned long long)parity_sector
,
942 if (!sync_page_io(parity_rdev
, parity_sector
, block_size
,
943 page1
, REQ_OP_WRITE
, false)) {
944 pr_debug("%s:%*s parity write error!\n", __func__
,
946 md_error(mddev
, parity_rdev
);
959 static int ppl_recover(struct ppl_log
*log
, struct ppl_header
*pplhdr
,
962 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
963 struct md_rdev
*rdev
= log
->rdev
;
964 struct mddev
*mddev
= rdev
->mddev
;
965 sector_t ppl_sector
= rdev
->ppl
.sector
+ offset
+
966 (PPL_HEADER_SIZE
>> 9);
971 page
= alloc_page(GFP_KERNEL
);
975 /* iterate through all PPL entries saved */
976 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++) {
977 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
978 u32 pp_size
= le32_to_cpu(e
->pp_size
);
979 sector_t sector
= ppl_sector
;
980 int ppl_entry_sectors
= pp_size
>> 9;
983 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
984 __func__
, rdev
->raid_disk
, i
,
985 (unsigned long long)ppl_sector
, pp_size
);
988 crc_stored
= le32_to_cpu(e
->checksum
);
990 /* read parial parity for this entry and calculate its checksum */
992 int s
= pp_size
> PAGE_SIZE
? PAGE_SIZE
: pp_size
;
994 if (!sync_page_io(rdev
, sector
- rdev
->data_offset
,
995 s
, page
, REQ_OP_READ
, false)) {
996 md_error(mddev
, rdev
);
1001 crc
= crc32c_le(crc
, page_address(page
), s
);
1009 if (crc
!= crc_stored
) {
1011 * Don't recover this entry if the checksum does not
1012 * match, but keep going and try to recover other
1015 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1016 __func__
, crc_stored
, crc
);
1017 ppl_conf
->mismatch_count
++;
1019 ret
= ppl_recover_entry(log
, e
, ppl_sector
);
1022 ppl_conf
->recovered_entries
++;
1025 ppl_sector
+= ppl_entry_sectors
;
1028 /* flush the disk cache after recovery if necessary */
1029 ret
= blkdev_issue_flush(rdev
->bdev
);
1035 static int ppl_write_empty_header(struct ppl_log
*log
)
1038 struct ppl_header
*pplhdr
;
1039 struct md_rdev
*rdev
= log
->rdev
;
1042 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__
,
1043 rdev
->raid_disk
, (unsigned long long)rdev
->ppl
.sector
);
1045 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
);
1049 pplhdr
= page_address(page
);
1050 /* zero out PPL space to avoid collision with old PPLs */
1051 blkdev_issue_zeroout(rdev
->bdev
, rdev
->ppl
.sector
,
1052 log
->rdev
->ppl
.size
, GFP_NOIO
, 0);
1053 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
1054 pplhdr
->signature
= cpu_to_le32(log
->ppl_conf
->signature
);
1055 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PAGE_SIZE
));
1057 if (!sync_page_io(rdev
, rdev
->ppl
.sector
- rdev
->data_offset
,
1058 PPL_HEADER_SIZE
, page
, REQ_OP_WRITE
| REQ_SYNC
|
1060 md_error(rdev
->mddev
, rdev
);
1068 static int ppl_load_distributed(struct ppl_log
*log
)
1070 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
1071 struct md_rdev
*rdev
= log
->rdev
;
1072 struct mddev
*mddev
= rdev
->mddev
;
1073 struct page
*page
, *page2
;
1074 struct ppl_header
*pplhdr
= NULL
, *prev_pplhdr
= NULL
;
1075 u32 crc
, crc_stored
;
1078 sector_t pplhdr_offset
= 0, prev_pplhdr_offset
= 0;
1080 pr_debug("%s: disk: %d\n", __func__
, rdev
->raid_disk
);
1081 /* read PPL headers, find the recent one */
1082 page
= alloc_page(GFP_KERNEL
);
1086 page2
= alloc_page(GFP_KERNEL
);
1092 /* searching ppl area for latest ppl */
1093 while (pplhdr_offset
< rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9)) {
1094 if (!sync_page_io(rdev
,
1095 rdev
->ppl
.sector
- rdev
->data_offset
+
1096 pplhdr_offset
, PAGE_SIZE
, page
, REQ_OP_READ
,
1098 md_error(mddev
, rdev
);
1100 /* if not able to read - don't recover any PPL */
1104 pplhdr
= page_address(page
);
1106 /* check header validity */
1107 crc_stored
= le32_to_cpu(pplhdr
->checksum
);
1108 pplhdr
->checksum
= 0;
1109 crc
= ~crc32c_le(~0, pplhdr
, PAGE_SIZE
);
1111 if (crc_stored
!= crc
) {
1112 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1113 __func__
, crc_stored
, crc
,
1114 (unsigned long long)pplhdr_offset
);
1115 pplhdr
= prev_pplhdr
;
1116 pplhdr_offset
= prev_pplhdr_offset
;
1120 signature
= le32_to_cpu(pplhdr
->signature
);
1122 if (mddev
->external
) {
1124 * For external metadata the header signature is set and
1125 * validated in userspace.
1127 ppl_conf
->signature
= signature
;
1128 } else if (ppl_conf
->signature
!= signature
) {
1129 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1130 __func__
, signature
, ppl_conf
->signature
,
1131 (unsigned long long)pplhdr_offset
);
1132 pplhdr
= prev_pplhdr
;
1133 pplhdr_offset
= prev_pplhdr_offset
;
1137 if (prev_pplhdr
&& le64_to_cpu(prev_pplhdr
->generation
) >
1138 le64_to_cpu(pplhdr
->generation
)) {
1139 /* previous was newest */
1140 pplhdr
= prev_pplhdr
;
1141 pplhdr_offset
= prev_pplhdr_offset
;
1145 prev_pplhdr_offset
= pplhdr_offset
;
1146 prev_pplhdr
= pplhdr
;
1150 /* calculate next potential ppl offset */
1151 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++)
1153 le32_to_cpu(pplhdr
->entries
[i
].pp_size
) >> 9;
1154 pplhdr_offset
+= PPL_HEADER_SIZE
>> 9;
1157 /* no valid ppl found */
1159 ppl_conf
->mismatch_count
++;
1161 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1162 __func__
, (unsigned long long)pplhdr_offset
,
1163 le64_to_cpu(pplhdr
->generation
));
1165 /* attempt to recover from log if we are starting a dirty array */
1166 if (pplhdr
&& !mddev
->pers
&& mddev
->recovery_cp
!= MaxSector
)
1167 ret
= ppl_recover(log
, pplhdr
, pplhdr_offset
);
1169 /* write empty header if we are starting the array */
1170 if (!ret
&& !mddev
->pers
)
1171 ret
= ppl_write_empty_header(log
);
1176 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1177 __func__
, ret
, ppl_conf
->mismatch_count
,
1178 ppl_conf
->recovered_entries
);
1182 static int ppl_load(struct ppl_conf
*ppl_conf
)
1186 bool signature_set
= false;
1189 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1190 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1192 /* skip missing drive */
1196 ret
= ppl_load_distributed(log
);
1201 * For external metadata we can't check if the signature is
1202 * correct on a single drive, but we can check if it is the same
1205 if (ppl_conf
->mddev
->external
) {
1206 if (!signature_set
) {
1207 signature
= ppl_conf
->signature
;
1208 signature_set
= true;
1209 } else if (signature
!= ppl_conf
->signature
) {
1210 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1211 mdname(ppl_conf
->mddev
));
1218 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1219 __func__
, ret
, ppl_conf
->mismatch_count
,
1220 ppl_conf
->recovered_entries
);
1224 static void __ppl_exit_log(struct ppl_conf
*ppl_conf
)
1226 clear_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1227 clear_bit(MD_HAS_MULTIPLE_PPLS
, &ppl_conf
->mddev
->flags
);
1229 kfree(ppl_conf
->child_logs
);
1231 bioset_exit(&ppl_conf
->bs
);
1232 bioset_exit(&ppl_conf
->flush_bs
);
1233 mempool_exit(&ppl_conf
->io_pool
);
1234 kmem_cache_destroy(ppl_conf
->io_kc
);
1239 void ppl_exit_log(struct r5conf
*conf
)
1241 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1244 __ppl_exit_log(ppl_conf
);
1245 conf
->log_private
= NULL
;
1249 static int ppl_validate_rdev(struct md_rdev
*rdev
)
1251 int ppl_data_sectors
;
1255 * The configured PPL size must be enough to store
1256 * the header and (at the very least) partial parity
1257 * for one stripe. Round it down to ensure the data
1258 * space is cleanly divisible by stripe size.
1260 ppl_data_sectors
= rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9);
1262 if (ppl_data_sectors
> 0)
1263 ppl_data_sectors
= rounddown(ppl_data_sectors
,
1264 RAID5_STRIPE_SECTORS((struct r5conf
*)rdev
->mddev
->private));
1266 if (ppl_data_sectors
<= 0) {
1267 pr_warn("md/raid:%s: PPL space too small on %pg\n",
1268 mdname(rdev
->mddev
), rdev
->bdev
);
1272 ppl_size_new
= ppl_data_sectors
+ (PPL_HEADER_SIZE
>> 9);
1274 if ((rdev
->ppl
.sector
< rdev
->data_offset
&&
1275 rdev
->ppl
.sector
+ ppl_size_new
> rdev
->data_offset
) ||
1276 (rdev
->ppl
.sector
>= rdev
->data_offset
&&
1277 rdev
->data_offset
+ rdev
->sectors
> rdev
->ppl
.sector
)) {
1278 pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
1279 mdname(rdev
->mddev
), rdev
->bdev
);
1283 if (!rdev
->mddev
->external
&&
1284 ((rdev
->ppl
.offset
> 0 && rdev
->ppl
.offset
< (rdev
->sb_size
>> 9)) ||
1285 (rdev
->ppl
.offset
<= 0 && rdev
->ppl
.offset
+ ppl_size_new
> 0))) {
1286 pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
1287 mdname(rdev
->mddev
), rdev
->bdev
);
1291 rdev
->ppl
.size
= ppl_size_new
;
1296 static void ppl_init_child_log(struct ppl_log
*log
, struct md_rdev
*rdev
)
1298 if ((rdev
->ppl
.size
<< 9) >= (PPL_SPACE_SIZE
+
1299 PPL_HEADER_SIZE
) * 2) {
1300 log
->use_multippl
= true;
1301 set_bit(MD_HAS_MULTIPLE_PPLS
,
1302 &log
->ppl_conf
->mddev
->flags
);
1303 log
->entry_space
= PPL_SPACE_SIZE
;
1305 log
->use_multippl
= false;
1306 log
->entry_space
= (log
->rdev
->ppl
.size
<< 9) -
1309 log
->next_io_sector
= rdev
->ppl
.sector
;
1311 if (bdev_write_cache(rdev
->bdev
))
1312 log
->wb_cache_on
= true;
1315 int ppl_init_log(struct r5conf
*conf
)
1317 struct ppl_conf
*ppl_conf
;
1318 struct mddev
*mddev
= conf
->mddev
;
1323 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1324 mdname(conf
->mddev
));
1326 if (PAGE_SIZE
!= 4096)
1329 if (mddev
->level
!= 5) {
1330 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1331 mdname(mddev
), mddev
->level
);
1335 if (mddev
->bitmap_info
.file
|| mddev
->bitmap_info
.offset
) {
1336 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1341 if (test_bit(MD_HAS_JOURNAL
, &mddev
->flags
)) {
1342 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1347 max_disks
= sizeof_field(struct ppl_log
, disk_flush_bitmap
) *
1349 if (conf
->raid_disks
> max_disks
) {
1350 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1351 mdname(mddev
), max_disks
);
1355 ppl_conf
= kzalloc(sizeof(struct ppl_conf
), GFP_KERNEL
);
1359 ppl_conf
->mddev
= mddev
;
1361 ppl_conf
->io_kc
= KMEM_CACHE(ppl_io_unit
, 0);
1362 if (!ppl_conf
->io_kc
) {
1367 ret
= mempool_init(&ppl_conf
->io_pool
, conf
->raid_disks
, ppl_io_pool_alloc
,
1368 ppl_io_pool_free
, ppl_conf
->io_kc
);
1372 ret
= bioset_init(&ppl_conf
->bs
, conf
->raid_disks
, 0, BIOSET_NEED_BVECS
);
1376 ret
= bioset_init(&ppl_conf
->flush_bs
, conf
->raid_disks
, 0, 0);
1380 ppl_conf
->count
= conf
->raid_disks
;
1381 ppl_conf
->child_logs
= kcalloc(ppl_conf
->count
, sizeof(struct ppl_log
),
1383 if (!ppl_conf
->child_logs
) {
1388 atomic64_set(&ppl_conf
->seq
, 0);
1389 INIT_LIST_HEAD(&ppl_conf
->no_mem_stripes
);
1390 spin_lock_init(&ppl_conf
->no_mem_stripes_lock
);
1392 if (!mddev
->external
) {
1393 ppl_conf
->signature
= ~crc32c_le(~0, mddev
->uuid
, sizeof(mddev
->uuid
));
1394 ppl_conf
->block_size
= 512;
1396 ppl_conf
->block_size
=
1397 queue_logical_block_size(mddev
->gendisk
->queue
);
1400 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1401 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1402 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
1404 mutex_init(&log
->io_mutex
);
1405 spin_lock_init(&log
->io_list_lock
);
1406 INIT_LIST_HEAD(&log
->io_list
);
1408 log
->ppl_conf
= ppl_conf
;
1412 ret
= ppl_validate_rdev(rdev
);
1416 ppl_init_child_log(log
, rdev
);
1420 /* load and possibly recover the logs from the member disks */
1421 ret
= ppl_load(ppl_conf
);
1425 } else if (!mddev
->pers
&& mddev
->recovery_cp
== 0 &&
1426 ppl_conf
->recovered_entries
> 0 &&
1427 ppl_conf
->mismatch_count
== 0) {
1429 * If we are starting a dirty array and the recovery succeeds
1430 * without any issues, set the array as clean.
1432 mddev
->recovery_cp
= MaxSector
;
1433 set_bit(MD_SB_CHANGE_CLEAN
, &mddev
->sb_flags
);
1434 } else if (mddev
->pers
&& ppl_conf
->mismatch_count
> 0) {
1435 /* no mismatch allowed when enabling PPL for a running array */
1440 conf
->log_private
= ppl_conf
;
1441 set_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1445 __ppl_exit_log(ppl_conf
);
1449 int ppl_modify_log(struct r5conf
*conf
, struct md_rdev
*rdev
, bool add
)
1451 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1452 struct ppl_log
*log
;
1458 pr_debug("%s: disk: %d operation: %s dev: %pg\n",
1459 __func__
, rdev
->raid_disk
, add
? "add" : "remove",
1462 if (rdev
->raid_disk
< 0)
1465 if (rdev
->raid_disk
>= ppl_conf
->count
)
1468 log
= &ppl_conf
->child_logs
[rdev
->raid_disk
];
1470 mutex_lock(&log
->io_mutex
);
1472 ret
= ppl_validate_rdev(rdev
);
1475 ret
= ppl_write_empty_header(log
);
1476 ppl_init_child_log(log
, rdev
);
1481 mutex_unlock(&log
->io_mutex
);
1487 ppl_write_hint_show(struct mddev
*mddev
, char *buf
)
1489 return sprintf(buf
, "%d\n", 0);
1493 ppl_write_hint_store(struct mddev
*mddev
, const char *page
, size_t len
)
1495 struct r5conf
*conf
;
1499 if (len
>= PAGE_SIZE
)
1501 if (kstrtou16(page
, 10, &new))
1504 err
= mddev_lock(mddev
);
1508 conf
= mddev
->private;
1511 else if (!raid5_has_ppl(conf
) || !conf
->log_private
)
1514 mddev_unlock(mddev
);
1519 struct md_sysfs_entry
1520 ppl_write_hint
= __ATTR(ppl_write_hint
, S_IRUGO
| S_IWUSR
,
1521 ppl_write_hint_show
,
1522 ppl_write_hint_store
);