1 // SPDX-License-Identifier: GPL-2.0-only
3 * Partial Parity Log for closing the RAID5 write hole
4 * Copyright (c) 2017, Intel Corporation.
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/async_tx.h>
12 #include <linux/raid/md_p.h>
15 #include "raid5-log.h"
18 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19 * partial parity data. The header contains an array of entries
20 * (struct ppl_header_entry) which describe the logged write requests.
21 * Partial parity for the entries comes after the header, written in the same
22 * sequence as the entries:
33 * An entry describes one or more consecutive stripe_heads, up to a full
34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35 * number of stripe_heads in the entry and n is the number of modified data
36 * disks. Every stripe_head in the entry must write to the same data disks.
37 * An example of a valid case described by a single entry (writes to the first
38 * stripe of a 4 disk array, 16k chunk size):
40 * sh->sector dd0 dd1 dd2 ppl
42 * 0 | --- | --- | --- | +----+
43 * 8 | -W- | -W- | --- | | pp | data_sector = 8
44 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
45 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
46 * +-----+-----+-----+ +----+
48 * data_sector is the first raid sector of the modified data, data_size is the
49 * total size of modified data and pp_size is the size of partial parity for
50 * this entry. Entries for full stripe writes contain no partial parity
51 * (pp_size = 0), they only mark the stripes for which parity should be
52 * recalculated after an unclean shutdown. Every entry holds a checksum of its
53 * partial parity, the header also has a checksum of the header itself.
55 * A write request is always logged to the PPL instance stored on the parity
56 * disk of the corresponding stripe. For each member disk there is one ppl_log
57 * used to handle logging for this disk, independently from others. They are
58 * grouped in child_logs array in struct ppl_conf, which is assigned to
59 * r5conf->log_private.
61 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63 * can be appended to the last entry if it meets the conditions for a valid
64 * entry described above, otherwise a new entry is added. Checksums of entries
65 * are calculated incrementally as stripes containing partial parity are being
66 * added. ppl_submit_iounit() calculates the checksum of the header and submits
67 * a bio containing the header page and partial parity pages (sh->ppl_page) for
68 * all stripes of the io_unit. When the PPL write completes, the stripes
69 * associated with the io_unit are released and raid5d starts writing their data
70 * and parity. When all stripes are written, the io_unit is freed and the next
73 * An io_unit is used to gather stripes until it is submitted or becomes full
74 * (if the maximum number of entries or size of PPL is reached). Another io_unit
75 * can't be submitted until the previous has completed (PPL and stripe
76 * data+parity is written). The log->io_list tracks all io_units of a log
77 * (for a single member disk). New io_units are added to the end of the list
78 * and the first io_unit is submitted, if it is not submitted already.
79 * The current io_unit accepting new stripes is always at the end of the list.
81 * If write-back cache is enabled for any of the disks in the array, its data
82 * must be flushed before next io_unit is submitted.
85 #define PPL_SPACE_SIZE (128 * 1024)
90 /* array of child logs, one for each raid disk */
91 struct ppl_log
*child_logs
;
94 int block_size
; /* the logical block size used for data_sector
95 * in ppl_header_entry */
96 u32 signature
; /* raid array identifier */
97 atomic64_t seq
; /* current log write sequence number */
99 struct kmem_cache
*io_kc
;
102 struct bio_set flush_bs
;
104 /* used only for recovery */
105 int recovered_entries
;
108 /* stripes to retry if failed to allocate io_unit */
109 struct list_head no_mem_stripes
;
110 spinlock_t no_mem_stripes_lock
;
112 unsigned short write_hint
;
116 struct ppl_conf
*ppl_conf
; /* shared between all log instances */
118 struct md_rdev
*rdev
; /* array member disk associated with
119 * this log instance */
120 struct mutex io_mutex
;
121 struct ppl_io_unit
*current_io
; /* current io_unit accepting new data
122 * always at the end of io_list */
123 spinlock_t io_list_lock
;
124 struct list_head io_list
; /* all io_units of this log */
126 sector_t next_io_sector
;
127 unsigned int entry_space
;
130 unsigned long disk_flush_bitmap
;
133 #define PPL_IO_INLINE_BVECS 32
138 struct page
*header_page
; /* for ppl_header */
140 unsigned int entries_count
; /* number of entries in ppl_header */
141 unsigned int pp_size
; /* total size current of partial parity */
143 u64 seq
; /* sequence number of this log write */
144 struct list_head log_sibling
; /* log->io_list */
146 struct list_head stripe_list
; /* stripes added to the io_unit */
147 atomic_t pending_stripes
; /* how many stripes not written to raid */
148 atomic_t pending_flushes
; /* how many disk flushes are in progress */
150 bool submitted
; /* true if write to log started */
152 /* inline bio and its biovec for submitting the iounit */
154 struct bio_vec biovec
[PPL_IO_INLINE_BVECS
];
157 struct dma_async_tx_descriptor
*
158 ops_run_partial_parity(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
159 struct dma_async_tx_descriptor
*tx
)
161 int disks
= sh
->disks
;
162 struct page
**srcs
= percpu
->scribble
;
163 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
164 struct async_submit_ctl submit
;
166 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
169 * Partial parity is the XOR of stripe data chunks that are not changed
170 * during the write request. Depending on available data
171 * (read-modify-write vs. reconstruct-write case) we calculate it
174 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
176 * rmw: xor old data and parity from updated disks
177 * This is calculated earlier by ops_run_prexor5() so just copy
178 * the parity dev page.
180 srcs
[count
++] = sh
->dev
[pd_idx
].page
;
181 } else if (sh
->reconstruct_state
== reconstruct_state_drain_run
) {
182 /* rcw: xor data from all not updated disks */
183 for (i
= disks
; i
--;) {
184 struct r5dev
*dev
= &sh
->dev
[i
];
185 if (test_bit(R5_UPTODATE
, &dev
->flags
))
186 srcs
[count
++] = dev
->page
;
192 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, tx
,
193 NULL
, sh
, (void *) (srcs
+ sh
->disks
+ 2));
196 tx
= async_memcpy(sh
->ppl_page
, srcs
[0], 0, 0, PAGE_SIZE
,
199 tx
= async_xor(sh
->ppl_page
, srcs
, 0, count
, PAGE_SIZE
,
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask
, void *pool_data
)
207 struct kmem_cache
*kc
= pool_data
;
208 struct ppl_io_unit
*io
;
210 io
= kmem_cache_alloc(kc
, gfp_mask
);
214 io
->header_page
= alloc_page(gfp_mask
);
215 if (!io
->header_page
) {
216 kmem_cache_free(kc
, io
);
223 static void ppl_io_pool_free(void *element
, void *pool_data
)
225 struct kmem_cache
*kc
= pool_data
;
226 struct ppl_io_unit
*io
= element
;
228 __free_page(io
->header_page
);
229 kmem_cache_free(kc
, io
);
232 static struct ppl_io_unit
*ppl_new_iounit(struct ppl_log
*log
,
233 struct stripe_head
*sh
)
235 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
236 struct ppl_io_unit
*io
;
237 struct ppl_header
*pplhdr
;
238 struct page
*header_page
;
240 io
= mempool_alloc(&ppl_conf
->io_pool
, GFP_NOWAIT
);
244 header_page
= io
->header_page
;
245 memset(io
, 0, sizeof(*io
));
246 io
->header_page
= header_page
;
249 INIT_LIST_HEAD(&io
->log_sibling
);
250 INIT_LIST_HEAD(&io
->stripe_list
);
251 atomic_set(&io
->pending_stripes
, 0);
252 atomic_set(&io
->pending_flushes
, 0);
253 bio_init(&io
->bio
, io
->biovec
, PPL_IO_INLINE_BVECS
);
255 pplhdr
= page_address(io
->header_page
);
257 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
258 pplhdr
->signature
= cpu_to_le32(ppl_conf
->signature
);
260 io
->seq
= atomic64_add_return(1, &ppl_conf
->seq
);
261 pplhdr
->generation
= cpu_to_le64(io
->seq
);
266 static int ppl_log_stripe(struct ppl_log
*log
, struct stripe_head
*sh
)
268 struct ppl_io_unit
*io
= log
->current_io
;
269 struct ppl_header_entry
*e
= NULL
;
270 struct ppl_header
*pplhdr
;
272 sector_t data_sector
= 0;
274 struct r5conf
*conf
= sh
->raid_conf
;
276 pr_debug("%s: stripe: %llu\n", __func__
, (unsigned long long)sh
->sector
);
278 /* check if current io_unit is full */
279 if (io
&& (io
->pp_size
== log
->entry_space
||
280 io
->entries_count
== PPL_HDR_MAX_ENTRIES
)) {
281 pr_debug("%s: add io_unit blocked by seq: %llu\n",
286 /* add a new unit if there is none or the current is full */
288 io
= ppl_new_iounit(log
, sh
);
291 spin_lock_irq(&log
->io_list_lock
);
292 list_add_tail(&io
->log_sibling
, &log
->io_list
);
293 spin_unlock_irq(&log
->io_list_lock
);
295 log
->current_io
= io
;
298 for (i
= 0; i
< sh
->disks
; i
++) {
299 struct r5dev
*dev
= &sh
->dev
[i
];
301 if (i
!= sh
->pd_idx
&& test_bit(R5_Wantwrite
, &dev
->flags
)) {
302 if (!data_disks
|| dev
->sector
< data_sector
)
303 data_sector
= dev
->sector
;
309 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__
,
310 io
->seq
, (unsigned long long)data_sector
, data_disks
);
312 pplhdr
= page_address(io
->header_page
);
314 if (io
->entries_count
> 0) {
315 struct ppl_header_entry
*last
=
316 &pplhdr
->entries
[io
->entries_count
- 1];
317 struct stripe_head
*sh_last
= list_last_entry(
318 &io
->stripe_list
, struct stripe_head
, log_list
);
319 u64 data_sector_last
= le64_to_cpu(last
->data_sector
);
320 u32 data_size_last
= le32_to_cpu(last
->data_size
);
323 * Check if we can append the stripe to the last entry. It must
324 * be just after the last logged stripe and write to the same
325 * disks. Use bit shift and logarithm to avoid 64-bit division.
327 if ((sh
->sector
== sh_last
->sector
+ STRIPE_SECTORS
) &&
328 (data_sector
>> ilog2(conf
->chunk_sectors
) ==
329 data_sector_last
>> ilog2(conf
->chunk_sectors
)) &&
330 ((data_sector
- data_sector_last
) * data_disks
==
331 data_size_last
>> 9))
336 e
= &pplhdr
->entries
[io
->entries_count
++];
337 e
->data_sector
= cpu_to_le64(data_sector
);
338 e
->parity_disk
= cpu_to_le32(sh
->pd_idx
);
339 e
->checksum
= cpu_to_le32(~0);
342 le32_add_cpu(&e
->data_size
, data_disks
<< PAGE_SHIFT
);
344 /* don't write any PP if full stripe write */
345 if (!test_bit(STRIPE_FULL_WRITE
, &sh
->state
)) {
346 le32_add_cpu(&e
->pp_size
, PAGE_SIZE
);
347 io
->pp_size
+= PAGE_SIZE
;
348 e
->checksum
= cpu_to_le32(crc32c_le(le32_to_cpu(e
->checksum
),
349 page_address(sh
->ppl_page
),
353 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
354 atomic_inc(&io
->pending_stripes
);
360 int ppl_write_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
362 struct ppl_conf
*ppl_conf
= conf
->log_private
;
363 struct ppl_io_unit
*io
= sh
->ppl_io
;
366 if (io
|| test_bit(STRIPE_SYNCING
, &sh
->state
) || !sh
->ppl_page
||
367 !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
368 !test_bit(R5_Insync
, &sh
->dev
[sh
->pd_idx
].flags
)) {
369 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
373 log
= &ppl_conf
->child_logs
[sh
->pd_idx
];
375 mutex_lock(&log
->io_mutex
);
377 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
378 mutex_unlock(&log
->io_mutex
);
382 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
383 clear_bit(STRIPE_DELAYED
, &sh
->state
);
384 atomic_inc(&sh
->count
);
386 if (ppl_log_stripe(log
, sh
)) {
387 spin_lock_irq(&ppl_conf
->no_mem_stripes_lock
);
388 list_add_tail(&sh
->log_list
, &ppl_conf
->no_mem_stripes
);
389 spin_unlock_irq(&ppl_conf
->no_mem_stripes_lock
);
392 mutex_unlock(&log
->io_mutex
);
397 static void ppl_log_endio(struct bio
*bio
)
399 struct ppl_io_unit
*io
= bio
->bi_private
;
400 struct ppl_log
*log
= io
->log
;
401 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
402 struct stripe_head
*sh
, *next
;
404 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
407 md_error(ppl_conf
->mddev
, log
->rdev
);
409 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
410 list_del_init(&sh
->log_list
);
412 set_bit(STRIPE_HANDLE
, &sh
->state
);
413 raid5_release_stripe(sh
);
417 static void ppl_submit_iounit_bio(struct ppl_io_unit
*io
, struct bio
*bio
)
419 char b
[BDEVNAME_SIZE
];
421 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
422 __func__
, io
->seq
, bio
->bi_iter
.bi_size
,
423 (unsigned long long)bio
->bi_iter
.bi_sector
,
424 bio_devname(bio
, b
));
429 static void ppl_submit_iounit(struct ppl_io_unit
*io
)
431 struct ppl_log
*log
= io
->log
;
432 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
433 struct ppl_header
*pplhdr
= page_address(io
->header_page
);
434 struct bio
*bio
= &io
->bio
;
435 struct stripe_head
*sh
;
438 bio
->bi_private
= io
;
440 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
445 for (i
= 0; i
< io
->entries_count
; i
++) {
446 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
448 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
449 __func__
, io
->seq
, i
, le64_to_cpu(e
->data_sector
),
450 le32_to_cpu(e
->pp_size
), le32_to_cpu(e
->data_size
));
452 e
->data_sector
= cpu_to_le64(le64_to_cpu(e
->data_sector
) >>
453 ilog2(ppl_conf
->block_size
>> 9));
454 e
->checksum
= cpu_to_le32(~le32_to_cpu(e
->checksum
));
457 pplhdr
->entries_count
= cpu_to_le32(io
->entries_count
);
458 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PPL_HEADER_SIZE
));
460 /* Rewind the buffer if current PPL is larger then remaining space */
461 if (log
->use_multippl
&&
462 log
->rdev
->ppl
.sector
+ log
->rdev
->ppl
.size
- log
->next_io_sector
<
463 (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9)
464 log
->next_io_sector
= log
->rdev
->ppl
.sector
;
467 bio
->bi_end_io
= ppl_log_endio
;
468 bio
->bi_opf
= REQ_OP_WRITE
| REQ_FUA
;
469 bio_set_dev(bio
, log
->rdev
->bdev
);
470 bio
->bi_iter
.bi_sector
= log
->next_io_sector
;
471 bio_add_page(bio
, io
->header_page
, PAGE_SIZE
, 0);
472 bio
->bi_write_hint
= ppl_conf
->write_hint
;
474 pr_debug("%s: log->current_io_sector: %llu\n", __func__
,
475 (unsigned long long)log
->next_io_sector
);
477 if (log
->use_multippl
)
478 log
->next_io_sector
+= (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9;
480 WARN_ON(log
->disk_flush_bitmap
!= 0);
482 list_for_each_entry(sh
, &io
->stripe_list
, log_list
) {
483 for (i
= 0; i
< sh
->disks
; i
++) {
484 struct r5dev
*dev
= &sh
->dev
[i
];
486 if ((ppl_conf
->child_logs
[i
].wb_cache_on
) &&
487 (test_bit(R5_Wantwrite
, &dev
->flags
))) {
488 set_bit(i
, &log
->disk_flush_bitmap
);
492 /* entries for full stripe writes have no partial parity */
493 if (test_bit(STRIPE_FULL_WRITE
, &sh
->state
))
496 if (!bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0)) {
497 struct bio
*prev
= bio
;
499 bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
,
501 bio
->bi_opf
= prev
->bi_opf
;
502 bio
->bi_write_hint
= prev
->bi_write_hint
;
503 bio_copy_dev(bio
, prev
);
504 bio
->bi_iter
.bi_sector
= bio_end_sector(prev
);
505 bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0);
507 bio_chain(bio
, prev
);
508 ppl_submit_iounit_bio(io
, prev
);
512 ppl_submit_iounit_bio(io
, bio
);
515 static void ppl_submit_current_io(struct ppl_log
*log
)
517 struct ppl_io_unit
*io
;
519 spin_lock_irq(&log
->io_list_lock
);
521 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
523 if (io
&& io
->submitted
)
526 spin_unlock_irq(&log
->io_list_lock
);
529 io
->submitted
= true;
531 if (io
== log
->current_io
)
532 log
->current_io
= NULL
;
534 ppl_submit_iounit(io
);
538 void ppl_write_stripe_run(struct r5conf
*conf
)
540 struct ppl_conf
*ppl_conf
= conf
->log_private
;
544 for (i
= 0; i
< ppl_conf
->count
; i
++) {
545 log
= &ppl_conf
->child_logs
[i
];
547 mutex_lock(&log
->io_mutex
);
548 ppl_submit_current_io(log
);
549 mutex_unlock(&log
->io_mutex
);
553 static void ppl_io_unit_finished(struct ppl_io_unit
*io
)
555 struct ppl_log
*log
= io
->log
;
556 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
557 struct r5conf
*conf
= ppl_conf
->mddev
->private;
560 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
562 local_irq_save(flags
);
564 spin_lock(&log
->io_list_lock
);
565 list_del(&io
->log_sibling
);
566 spin_unlock(&log
->io_list_lock
);
568 mempool_free(io
, &ppl_conf
->io_pool
);
570 spin_lock(&ppl_conf
->no_mem_stripes_lock
);
571 if (!list_empty(&ppl_conf
->no_mem_stripes
)) {
572 struct stripe_head
*sh
;
574 sh
= list_first_entry(&ppl_conf
->no_mem_stripes
,
575 struct stripe_head
, log_list
);
576 list_del_init(&sh
->log_list
);
577 set_bit(STRIPE_HANDLE
, &sh
->state
);
578 raid5_release_stripe(sh
);
580 spin_unlock(&ppl_conf
->no_mem_stripes_lock
);
582 local_irq_restore(flags
);
584 wake_up(&conf
->wait_for_quiescent
);
587 static void ppl_flush_endio(struct bio
*bio
)
589 struct ppl_io_unit
*io
= bio
->bi_private
;
590 struct ppl_log
*log
= io
->log
;
591 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
592 struct r5conf
*conf
= ppl_conf
->mddev
->private;
593 char b
[BDEVNAME_SIZE
];
595 pr_debug("%s: dev: %s\n", __func__
, bio_devname(bio
, b
));
597 if (bio
->bi_status
) {
598 struct md_rdev
*rdev
;
601 rdev
= md_find_rdev_rcu(conf
->mddev
, bio_dev(bio
));
603 md_error(rdev
->mddev
, rdev
);
609 if (atomic_dec_and_test(&io
->pending_flushes
)) {
610 ppl_io_unit_finished(io
);
611 md_wakeup_thread(conf
->mddev
->thread
);
615 static void ppl_do_flush(struct ppl_io_unit
*io
)
617 struct ppl_log
*log
= io
->log
;
618 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
619 struct r5conf
*conf
= ppl_conf
->mddev
->private;
620 int raid_disks
= conf
->raid_disks
;
621 int flushed_disks
= 0;
624 atomic_set(&io
->pending_flushes
, raid_disks
);
626 for_each_set_bit(i
, &log
->disk_flush_bitmap
, raid_disks
) {
627 struct md_rdev
*rdev
;
628 struct block_device
*bdev
= NULL
;
631 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
632 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
638 char b
[BDEVNAME_SIZE
];
640 bio
= bio_alloc_bioset(GFP_NOIO
, 0, &ppl_conf
->flush_bs
);
641 bio_set_dev(bio
, bdev
);
642 bio
->bi_private
= io
;
643 bio
->bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
644 bio
->bi_end_io
= ppl_flush_endio
;
646 pr_debug("%s: dev: %s\n", __func__
,
647 bio_devname(bio
, b
));
654 log
->disk_flush_bitmap
= 0;
656 for (i
= flushed_disks
; i
< raid_disks
; i
++) {
657 if (atomic_dec_and_test(&io
->pending_flushes
))
658 ppl_io_unit_finished(io
);
662 static inline bool ppl_no_io_unit_submitted(struct r5conf
*conf
,
665 struct ppl_io_unit
*io
;
667 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
670 return !io
|| !io
->submitted
;
673 void ppl_quiesce(struct r5conf
*conf
, int quiesce
)
675 struct ppl_conf
*ppl_conf
= conf
->log_private
;
679 for (i
= 0; i
< ppl_conf
->count
; i
++) {
680 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
682 spin_lock_irq(&log
->io_list_lock
);
683 wait_event_lock_irq(conf
->wait_for_quiescent
,
684 ppl_no_io_unit_submitted(conf
, log
),
686 spin_unlock_irq(&log
->io_list_lock
);
691 int ppl_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
693 if (bio
->bi_iter
.bi_size
== 0) {
697 bio
->bi_opf
&= ~REQ_PREFLUSH
;
701 void ppl_stripe_write_finished(struct stripe_head
*sh
)
703 struct ppl_io_unit
*io
;
708 if (io
&& atomic_dec_and_test(&io
->pending_stripes
)) {
709 if (io
->log
->disk_flush_bitmap
)
712 ppl_io_unit_finished(io
);
716 static void ppl_xor(int size
, struct page
*page1
, struct page
*page2
)
718 struct async_submit_ctl submit
;
719 struct dma_async_tx_descriptor
*tx
;
720 struct page
*xor_srcs
[] = { page1
, page2
};
722 init_async_submit(&submit
, ASYNC_TX_ACK
|ASYNC_TX_XOR_DROP_DST
,
723 NULL
, NULL
, NULL
, NULL
);
724 tx
= async_xor(page1
, xor_srcs
, 0, 2, size
, &submit
);
726 async_tx_quiesce(&tx
);
730 * PPL recovery strategy: xor partial parity and data from all modified data
731 * disks within a stripe and write the result as the new stripe parity. If all
732 * stripe data disks are modified (full stripe write), no partial parity is
733 * available, so just xor the data disks.
735 * Recovery of a PPL entry shall occur only if all modified data disks are
736 * available and read from all of them succeeds.
738 * A PPL entry applies to a stripe, partial parity size for an entry is at most
739 * the size of the chunk. Examples of possible cases for a single entry:
741 * case 0: single data disk write:
742 * data0 data1 data2 ppl parity
743 * +--------+--------+--------+ +--------------------+
744 * | ------ | ------ | ------ | +----+ | (no change) |
745 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
746 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
747 * | ------ | ------ | ------ | +----+ | (no change) |
748 * +--------+--------+--------+ +--------------------+
749 * pp_size = data_size
751 * case 1: more than one data disk write:
752 * data0 data1 data2 ppl parity
753 * +--------+--------+--------+ +--------------------+
754 * | ------ | ------ | ------ | +----+ | (no change) |
755 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
756 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
757 * | ------ | ------ | ------ | +----+ | (no change) |
758 * +--------+--------+--------+ +--------------------+
759 * pp_size = data_size / modified_data_disks
761 * case 2: write to all data disks (also full stripe write):
762 * data0 data1 data2 parity
763 * +--------+--------+--------+ +--------------------+
764 * | ------ | ------ | ------ | | (no change) |
765 * | -data- | -data- | -data- | --------> | xor all data |
766 * | ------ | ------ | ------ | --------> | (no change) |
767 * | ------ | ------ | ------ | | (no change) |
768 * +--------+--------+--------+ +--------------------+
771 * The following cases are possible only in other implementations. The recovery
772 * code can handle them, but they are not generated at runtime because they can
773 * be reduced to cases 0, 1 and 2:
776 * data0 data1 data2 ppl parity
777 * +--------+--------+--------+ +----+ +--------------------+
778 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
779 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
780 * | -data- | -data- | -data- | | -- | -> | xor all data |
781 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
782 * +--------+--------+--------+ +----+ +--------------------+
783 * pp_size = chunk_size
786 * data0 data1 data2 ppl parity
787 * +--------+--------+--------+ +----+ +--------------------+
788 * | ------ | -data- | ------ | | pp | | data1 ^ pp |
789 * | ------ | ------ | ------ | | -- | -> | (no change) |
790 * | ------ | ------ | ------ | | -- | -> | (no change) |
791 * | -data- | ------ | ------ | | pp | | data0 ^ pp |
792 * +--------+--------+--------+ +----+ +--------------------+
793 * pp_size = chunk_size
795 static int ppl_recover_entry(struct ppl_log
*log
, struct ppl_header_entry
*e
,
798 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
799 struct mddev
*mddev
= ppl_conf
->mddev
;
800 struct r5conf
*conf
= mddev
->private;
801 int block_size
= ppl_conf
->block_size
;
804 sector_t r_sector_first
;
805 sector_t r_sector_last
;
810 char b
[BDEVNAME_SIZE
];
811 unsigned int pp_size
= le32_to_cpu(e
->pp_size
);
812 unsigned int data_size
= le32_to_cpu(e
->data_size
);
814 page1
= alloc_page(GFP_KERNEL
);
815 page2
= alloc_page(GFP_KERNEL
);
817 if (!page1
|| !page2
) {
822 r_sector_first
= le64_to_cpu(e
->data_sector
) * (block_size
>> 9);
824 if ((pp_size
>> 9) < conf
->chunk_sectors
) {
826 data_disks
= data_size
/ pp_size
;
827 strip_sectors
= pp_size
>> 9;
829 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
830 strip_sectors
= (data_size
>> 9) / data_disks
;
832 r_sector_last
= r_sector_first
+
833 (data_disks
- 1) * conf
->chunk_sectors
+
836 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
837 strip_sectors
= conf
->chunk_sectors
;
838 r_sector_last
= r_sector_first
+ (data_size
>> 9);
841 pr_debug("%s: array sector first: %llu last: %llu\n", __func__
,
842 (unsigned long long)r_sector_first
,
843 (unsigned long long)r_sector_last
);
845 /* if start and end is 4k aligned, use a 4k block */
846 if (block_size
== 512 &&
847 (r_sector_first
& (STRIPE_SECTORS
- 1)) == 0 &&
848 (r_sector_last
& (STRIPE_SECTORS
- 1)) == 0)
849 block_size
= STRIPE_SIZE
;
851 /* iterate through blocks in strip */
852 for (i
= 0; i
< strip_sectors
; i
+= (block_size
>> 9)) {
853 bool update_parity
= false;
854 sector_t parity_sector
;
855 struct md_rdev
*parity_rdev
;
856 struct stripe_head sh
;
860 pr_debug("%s:%*s iter %d start\n", __func__
, indent
, "", i
);
863 memset(page_address(page1
), 0, PAGE_SIZE
);
865 /* iterate through data member disks */
866 for (disk
= 0; disk
< data_disks
; disk
++) {
868 struct md_rdev
*rdev
;
870 sector_t r_sector
= r_sector_first
+ i
+
871 (disk
* conf
->chunk_sectors
);
873 pr_debug("%s:%*s data member disk %d start\n",
874 __func__
, indent
, "", disk
);
877 if (r_sector
>= r_sector_last
) {
878 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
879 __func__
, indent
, "",
880 (unsigned long long)r_sector
);
885 update_parity
= true;
887 /* map raid sector to member disk */
888 sector
= raid5_compute_sector(conf
, r_sector
, 0,
890 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
891 __func__
, indent
, "",
892 (unsigned long long)r_sector
, dd_idx
,
893 (unsigned long long)sector
);
895 rdev
= conf
->disks
[dd_idx
].rdev
;
896 if (!rdev
|| (!test_bit(In_sync
, &rdev
->flags
) &&
897 sector
>= rdev
->recovery_offset
)) {
898 pr_debug("%s:%*s data member disk %d missing\n",
899 __func__
, indent
, "", dd_idx
);
900 update_parity
= false;
904 pr_debug("%s:%*s reading data member disk %s sector %llu\n",
905 __func__
, indent
, "", bdevname(rdev
->bdev
, b
),
906 (unsigned long long)sector
);
907 if (!sync_page_io(rdev
, sector
, block_size
, page2
,
908 REQ_OP_READ
, 0, false)) {
909 md_error(mddev
, rdev
);
910 pr_debug("%s:%*s read failed!\n", __func__
,
916 ppl_xor(block_size
, page1
, page2
);
925 pr_debug("%s:%*s reading pp disk sector %llu\n",
926 __func__
, indent
, "",
927 (unsigned long long)(ppl_sector
+ i
));
928 if (!sync_page_io(log
->rdev
,
929 ppl_sector
- log
->rdev
->data_offset
+ i
,
930 block_size
, page2
, REQ_OP_READ
, 0,
932 pr_debug("%s:%*s read failed!\n", __func__
,
934 md_error(mddev
, log
->rdev
);
939 ppl_xor(block_size
, page1
, page2
);
942 /* map raid sector to parity disk */
943 parity_sector
= raid5_compute_sector(conf
, r_sector_first
+ i
,
945 BUG_ON(sh
.pd_idx
!= le32_to_cpu(e
->parity_disk
));
946 parity_rdev
= conf
->disks
[sh
.pd_idx
].rdev
;
948 BUG_ON(parity_rdev
->bdev
->bd_dev
!= log
->rdev
->bdev
->bd_dev
);
949 pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
950 __func__
, indent
, "",
951 (unsigned long long)parity_sector
,
952 bdevname(parity_rdev
->bdev
, b
));
953 if (!sync_page_io(parity_rdev
, parity_sector
, block_size
,
954 page1
, REQ_OP_WRITE
, 0, false)) {
955 pr_debug("%s:%*s parity write error!\n", __func__
,
957 md_error(mddev
, parity_rdev
);
970 static int ppl_recover(struct ppl_log
*log
, struct ppl_header
*pplhdr
,
973 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
974 struct md_rdev
*rdev
= log
->rdev
;
975 struct mddev
*mddev
= rdev
->mddev
;
976 sector_t ppl_sector
= rdev
->ppl
.sector
+ offset
+
977 (PPL_HEADER_SIZE
>> 9);
982 page
= alloc_page(GFP_KERNEL
);
986 /* iterate through all PPL entries saved */
987 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++) {
988 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
989 u32 pp_size
= le32_to_cpu(e
->pp_size
);
990 sector_t sector
= ppl_sector
;
991 int ppl_entry_sectors
= pp_size
>> 9;
994 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
995 __func__
, rdev
->raid_disk
, i
,
996 (unsigned long long)ppl_sector
, pp_size
);
999 crc_stored
= le32_to_cpu(e
->checksum
);
1001 /* read parial parity for this entry and calculate its checksum */
1003 int s
= pp_size
> PAGE_SIZE
? PAGE_SIZE
: pp_size
;
1005 if (!sync_page_io(rdev
, sector
- rdev
->data_offset
,
1006 s
, page
, REQ_OP_READ
, 0, false)) {
1007 md_error(mddev
, rdev
);
1012 crc
= crc32c_le(crc
, page_address(page
), s
);
1020 if (crc
!= crc_stored
) {
1022 * Don't recover this entry if the checksum does not
1023 * match, but keep going and try to recover other
1026 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1027 __func__
, crc_stored
, crc
);
1028 ppl_conf
->mismatch_count
++;
1030 ret
= ppl_recover_entry(log
, e
, ppl_sector
);
1033 ppl_conf
->recovered_entries
++;
1036 ppl_sector
+= ppl_entry_sectors
;
1039 /* flush the disk cache after recovery if necessary */
1040 ret
= blkdev_issue_flush(rdev
->bdev
, GFP_KERNEL
, NULL
);
1046 static int ppl_write_empty_header(struct ppl_log
*log
)
1049 struct ppl_header
*pplhdr
;
1050 struct md_rdev
*rdev
= log
->rdev
;
1053 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__
,
1054 rdev
->raid_disk
, (unsigned long long)rdev
->ppl
.sector
);
1056 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
);
1060 pplhdr
= page_address(page
);
1061 /* zero out PPL space to avoid collision with old PPLs */
1062 blkdev_issue_zeroout(rdev
->bdev
, rdev
->ppl
.sector
,
1063 log
->rdev
->ppl
.size
, GFP_NOIO
, 0);
1064 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
1065 pplhdr
->signature
= cpu_to_le32(log
->ppl_conf
->signature
);
1066 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PAGE_SIZE
));
1068 if (!sync_page_io(rdev
, rdev
->ppl
.sector
- rdev
->data_offset
,
1069 PPL_HEADER_SIZE
, page
, REQ_OP_WRITE
| REQ_SYNC
|
1070 REQ_FUA
, 0, false)) {
1071 md_error(rdev
->mddev
, rdev
);
1079 static int ppl_load_distributed(struct ppl_log
*log
)
1081 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
1082 struct md_rdev
*rdev
= log
->rdev
;
1083 struct mddev
*mddev
= rdev
->mddev
;
1084 struct page
*page
, *page2
, *tmp
;
1085 struct ppl_header
*pplhdr
= NULL
, *prev_pplhdr
= NULL
;
1086 u32 crc
, crc_stored
;
1089 sector_t pplhdr_offset
= 0, prev_pplhdr_offset
= 0;
1091 pr_debug("%s: disk: %d\n", __func__
, rdev
->raid_disk
);
1092 /* read PPL headers, find the recent one */
1093 page
= alloc_page(GFP_KERNEL
);
1097 page2
= alloc_page(GFP_KERNEL
);
1103 /* searching ppl area for latest ppl */
1104 while (pplhdr_offset
< rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9)) {
1105 if (!sync_page_io(rdev
,
1106 rdev
->ppl
.sector
- rdev
->data_offset
+
1107 pplhdr_offset
, PAGE_SIZE
, page
, REQ_OP_READ
,
1109 md_error(mddev
, rdev
);
1111 /* if not able to read - don't recover any PPL */
1115 pplhdr
= page_address(page
);
1117 /* check header validity */
1118 crc_stored
= le32_to_cpu(pplhdr
->checksum
);
1119 pplhdr
->checksum
= 0;
1120 crc
= ~crc32c_le(~0, pplhdr
, PAGE_SIZE
);
1122 if (crc_stored
!= crc
) {
1123 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1124 __func__
, crc_stored
, crc
,
1125 (unsigned long long)pplhdr_offset
);
1126 pplhdr
= prev_pplhdr
;
1127 pplhdr_offset
= prev_pplhdr_offset
;
1131 signature
= le32_to_cpu(pplhdr
->signature
);
1133 if (mddev
->external
) {
1135 * For external metadata the header signature is set and
1136 * validated in userspace.
1138 ppl_conf
->signature
= signature
;
1139 } else if (ppl_conf
->signature
!= signature
) {
1140 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1141 __func__
, signature
, ppl_conf
->signature
,
1142 (unsigned long long)pplhdr_offset
);
1143 pplhdr
= prev_pplhdr
;
1144 pplhdr_offset
= prev_pplhdr_offset
;
1148 if (prev_pplhdr
&& le64_to_cpu(prev_pplhdr
->generation
) >
1149 le64_to_cpu(pplhdr
->generation
)) {
1150 /* previous was newest */
1151 pplhdr
= prev_pplhdr
;
1152 pplhdr_offset
= prev_pplhdr_offset
;
1156 prev_pplhdr_offset
= pplhdr_offset
;
1157 prev_pplhdr
= pplhdr
;
1163 /* calculate next potential ppl offset */
1164 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++)
1166 le32_to_cpu(pplhdr
->entries
[i
].pp_size
) >> 9;
1167 pplhdr_offset
+= PPL_HEADER_SIZE
>> 9;
1170 /* no valid ppl found */
1172 ppl_conf
->mismatch_count
++;
1174 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1175 __func__
, (unsigned long long)pplhdr_offset
,
1176 le64_to_cpu(pplhdr
->generation
));
1178 /* attempt to recover from log if we are starting a dirty array */
1179 if (pplhdr
&& !mddev
->pers
&& mddev
->recovery_cp
!= MaxSector
)
1180 ret
= ppl_recover(log
, pplhdr
, pplhdr_offset
);
1182 /* write empty header if we are starting the array */
1183 if (!ret
&& !mddev
->pers
)
1184 ret
= ppl_write_empty_header(log
);
1189 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1190 __func__
, ret
, ppl_conf
->mismatch_count
,
1191 ppl_conf
->recovered_entries
);
1195 static int ppl_load(struct ppl_conf
*ppl_conf
)
1199 bool signature_set
= false;
1202 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1203 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1205 /* skip missing drive */
1209 ret
= ppl_load_distributed(log
);
1214 * For external metadata we can't check if the signature is
1215 * correct on a single drive, but we can check if it is the same
1218 if (ppl_conf
->mddev
->external
) {
1219 if (!signature_set
) {
1220 signature
= ppl_conf
->signature
;
1221 signature_set
= true;
1222 } else if (signature
!= ppl_conf
->signature
) {
1223 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1224 mdname(ppl_conf
->mddev
));
1231 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1232 __func__
, ret
, ppl_conf
->mismatch_count
,
1233 ppl_conf
->recovered_entries
);
1237 static void __ppl_exit_log(struct ppl_conf
*ppl_conf
)
1239 clear_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1240 clear_bit(MD_HAS_MULTIPLE_PPLS
, &ppl_conf
->mddev
->flags
);
1242 kfree(ppl_conf
->child_logs
);
1244 bioset_exit(&ppl_conf
->bs
);
1245 bioset_exit(&ppl_conf
->flush_bs
);
1246 mempool_exit(&ppl_conf
->io_pool
);
1247 kmem_cache_destroy(ppl_conf
->io_kc
);
1252 void ppl_exit_log(struct r5conf
*conf
)
1254 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1257 __ppl_exit_log(ppl_conf
);
1258 conf
->log_private
= NULL
;
1262 static int ppl_validate_rdev(struct md_rdev
*rdev
)
1264 char b
[BDEVNAME_SIZE
];
1265 int ppl_data_sectors
;
1269 * The configured PPL size must be enough to store
1270 * the header and (at the very least) partial parity
1271 * for one stripe. Round it down to ensure the data
1272 * space is cleanly divisible by stripe size.
1274 ppl_data_sectors
= rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9);
1276 if (ppl_data_sectors
> 0)
1277 ppl_data_sectors
= rounddown(ppl_data_sectors
, STRIPE_SECTORS
);
1279 if (ppl_data_sectors
<= 0) {
1280 pr_warn("md/raid:%s: PPL space too small on %s\n",
1281 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1285 ppl_size_new
= ppl_data_sectors
+ (PPL_HEADER_SIZE
>> 9);
1287 if ((rdev
->ppl
.sector
< rdev
->data_offset
&&
1288 rdev
->ppl
.sector
+ ppl_size_new
> rdev
->data_offset
) ||
1289 (rdev
->ppl
.sector
>= rdev
->data_offset
&&
1290 rdev
->data_offset
+ rdev
->sectors
> rdev
->ppl
.sector
)) {
1291 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1292 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1296 if (!rdev
->mddev
->external
&&
1297 ((rdev
->ppl
.offset
> 0 && rdev
->ppl
.offset
< (rdev
->sb_size
>> 9)) ||
1298 (rdev
->ppl
.offset
<= 0 && rdev
->ppl
.offset
+ ppl_size_new
> 0))) {
1299 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1300 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1304 rdev
->ppl
.size
= ppl_size_new
;
1309 static void ppl_init_child_log(struct ppl_log
*log
, struct md_rdev
*rdev
)
1311 struct request_queue
*q
;
1313 if ((rdev
->ppl
.size
<< 9) >= (PPL_SPACE_SIZE
+
1314 PPL_HEADER_SIZE
) * 2) {
1315 log
->use_multippl
= true;
1316 set_bit(MD_HAS_MULTIPLE_PPLS
,
1317 &log
->ppl_conf
->mddev
->flags
);
1318 log
->entry_space
= PPL_SPACE_SIZE
;
1320 log
->use_multippl
= false;
1321 log
->entry_space
= (log
->rdev
->ppl
.size
<< 9) -
1324 log
->next_io_sector
= rdev
->ppl
.sector
;
1326 q
= bdev_get_queue(rdev
->bdev
);
1327 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
1328 log
->wb_cache_on
= true;
1331 int ppl_init_log(struct r5conf
*conf
)
1333 struct ppl_conf
*ppl_conf
;
1334 struct mddev
*mddev
= conf
->mddev
;
1339 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1340 mdname(conf
->mddev
));
1342 if (PAGE_SIZE
!= 4096)
1345 if (mddev
->level
!= 5) {
1346 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1347 mdname(mddev
), mddev
->level
);
1351 if (mddev
->bitmap_info
.file
|| mddev
->bitmap_info
.offset
) {
1352 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1357 if (test_bit(MD_HAS_JOURNAL
, &mddev
->flags
)) {
1358 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1363 max_disks
= sizeof_field(struct ppl_log
, disk_flush_bitmap
) *
1365 if (conf
->raid_disks
> max_disks
) {
1366 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1367 mdname(mddev
), max_disks
);
1371 ppl_conf
= kzalloc(sizeof(struct ppl_conf
), GFP_KERNEL
);
1375 ppl_conf
->mddev
= mddev
;
1377 ppl_conf
->io_kc
= KMEM_CACHE(ppl_io_unit
, 0);
1378 if (!ppl_conf
->io_kc
) {
1383 ret
= mempool_init(&ppl_conf
->io_pool
, conf
->raid_disks
, ppl_io_pool_alloc
,
1384 ppl_io_pool_free
, ppl_conf
->io_kc
);
1388 ret
= bioset_init(&ppl_conf
->bs
, conf
->raid_disks
, 0, BIOSET_NEED_BVECS
);
1392 ret
= bioset_init(&ppl_conf
->flush_bs
, conf
->raid_disks
, 0, 0);
1396 ppl_conf
->count
= conf
->raid_disks
;
1397 ppl_conf
->child_logs
= kcalloc(ppl_conf
->count
, sizeof(struct ppl_log
),
1399 if (!ppl_conf
->child_logs
) {
1404 atomic64_set(&ppl_conf
->seq
, 0);
1405 INIT_LIST_HEAD(&ppl_conf
->no_mem_stripes
);
1406 spin_lock_init(&ppl_conf
->no_mem_stripes_lock
);
1407 ppl_conf
->write_hint
= RWH_WRITE_LIFE_NOT_SET
;
1409 if (!mddev
->external
) {
1410 ppl_conf
->signature
= ~crc32c_le(~0, mddev
->uuid
, sizeof(mddev
->uuid
));
1411 ppl_conf
->block_size
= 512;
1413 ppl_conf
->block_size
= queue_logical_block_size(mddev
->queue
);
1416 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1417 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1418 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
1420 mutex_init(&log
->io_mutex
);
1421 spin_lock_init(&log
->io_list_lock
);
1422 INIT_LIST_HEAD(&log
->io_list
);
1424 log
->ppl_conf
= ppl_conf
;
1428 ret
= ppl_validate_rdev(rdev
);
1432 ppl_init_child_log(log
, rdev
);
1436 /* load and possibly recover the logs from the member disks */
1437 ret
= ppl_load(ppl_conf
);
1441 } else if (!mddev
->pers
&& mddev
->recovery_cp
== 0 &&
1442 ppl_conf
->recovered_entries
> 0 &&
1443 ppl_conf
->mismatch_count
== 0) {
1445 * If we are starting a dirty array and the recovery succeeds
1446 * without any issues, set the array as clean.
1448 mddev
->recovery_cp
= MaxSector
;
1449 set_bit(MD_SB_CHANGE_CLEAN
, &mddev
->sb_flags
);
1450 } else if (mddev
->pers
&& ppl_conf
->mismatch_count
> 0) {
1451 /* no mismatch allowed when enabling PPL for a running array */
1456 conf
->log_private
= ppl_conf
;
1457 set_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1461 __ppl_exit_log(ppl_conf
);
1465 int ppl_modify_log(struct r5conf
*conf
, struct md_rdev
*rdev
, bool add
)
1467 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1468 struct ppl_log
*log
;
1470 char b
[BDEVNAME_SIZE
];
1475 pr_debug("%s: disk: %d operation: %s dev: %s\n",
1476 __func__
, rdev
->raid_disk
, add
? "add" : "remove",
1477 bdevname(rdev
->bdev
, b
));
1479 if (rdev
->raid_disk
< 0)
1482 if (rdev
->raid_disk
>= ppl_conf
->count
)
1485 log
= &ppl_conf
->child_logs
[rdev
->raid_disk
];
1487 mutex_lock(&log
->io_mutex
);
1489 ret
= ppl_validate_rdev(rdev
);
1492 ret
= ppl_write_empty_header(log
);
1493 ppl_init_child_log(log
, rdev
);
1498 mutex_unlock(&log
->io_mutex
);
1504 ppl_write_hint_show(struct mddev
*mddev
, char *buf
)
1507 struct r5conf
*conf
;
1508 struct ppl_conf
*ppl_conf
= NULL
;
1510 spin_lock(&mddev
->lock
);
1511 conf
= mddev
->private;
1512 if (conf
&& raid5_has_ppl(conf
))
1513 ppl_conf
= conf
->log_private
;
1514 ret
= sprintf(buf
, "%d\n", ppl_conf
? ppl_conf
->write_hint
: 0);
1515 spin_unlock(&mddev
->lock
);
1521 ppl_write_hint_store(struct mddev
*mddev
, const char *page
, size_t len
)
1523 struct r5conf
*conf
;
1524 struct ppl_conf
*ppl_conf
;
1528 if (len
>= PAGE_SIZE
)
1530 if (kstrtou16(page
, 10, &new))
1533 err
= mddev_lock(mddev
);
1537 conf
= mddev
->private;
1540 } else if (raid5_has_ppl(conf
)) {
1541 ppl_conf
= conf
->log_private
;
1545 ppl_conf
->write_hint
= new;
1550 mddev_unlock(mddev
);
1555 struct md_sysfs_entry
1556 ppl_write_hint
= __ATTR(ppl_write_hint
, S_IRUGO
| S_IWUSR
,
1557 ppl_write_hint_show
,
1558 ppl_write_hint_store
);