4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2016 Gvozden Nešković. All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_impl.h>
32 #include <sys/zio_checksum.h>
34 #include <sys/fs/zfs.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/vdev_raidz.h>
37 #include <sys/vdev_raidz_impl.h>
38 #include <sys/vdev_draid.h>
41 #include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
45 * Virtual device vector for RAID-Z.
47 * This vdev supports single, double, and triple parity. For single parity,
48 * we use a simple XOR of all the data columns. For double or triple parity,
49 * we use a special case of Reed-Solomon coding. This extends the
50 * technique described in "The mathematics of RAID-6" by H. Peter Anvin by
51 * drawing on the system described in "A Tutorial on Reed-Solomon Coding for
52 * Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
53 * former is also based. The latter is designed to provide higher performance
56 * Note that the Plank paper claimed to support arbitrary N+M, but was then
57 * amended six years later identifying a critical flaw that invalidates its
58 * claims. Nevertheless, the technique can be adapted to work for up to
59 * triple parity. For additional parity, the amendment "Note: Correction to
60 * the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
61 * is viable, but the additional complexity means that write performance will
64 * All of the methods above operate on a Galois field, defined over the
65 * integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
66 * can be expressed with a single byte. Briefly, the operations on the
67 * field are defined as follows:
69 * o addition (+) is represented by a bitwise XOR
70 * o subtraction (-) is therefore identical to addition: A + B = A - B
71 * o multiplication of A by 2 is defined by the following bitwise expression:
76 * (A * 2)_4 = A_3 + A_7
77 * (A * 2)_3 = A_2 + A_7
78 * (A * 2)_2 = A_1 + A_7
82 * In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
83 * As an aside, this multiplication is derived from the error correcting
84 * primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
86 * Observe that any number in the field (except for 0) can be expressed as a
87 * power of 2 -- a generator for the field. We store a table of the powers of
88 * 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
89 * be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
90 * than field addition). The inverse of a field element A (A^-1) is therefore
91 * A ^ (255 - 1) = A^254.
93 * The up-to-three parity columns, P, Q, R over several data columns,
94 * D_0, ... D_n-1, can be expressed by field operations:
96 * P = D_0 + D_1 + ... + D_n-2 + D_n-1
97 * Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
98 * = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
99 * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
100 * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
102 * We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
103 * XOR operation, and 2 and 4 can be computed quickly and generate linearly-
104 * independent coefficients. (There are no additional coefficients that have
105 * this property which is why the uncorrected Plank method breaks down.)
107 * See the reconstruction code below for how P, Q and R can used individually
108 * or in concert to recover missing data columns.
111 #define VDEV_RAIDZ_P 0
112 #define VDEV_RAIDZ_Q 1
113 #define VDEV_RAIDZ_R 2
115 #define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
116 #define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
119 * We provide a mechanism to perform the field multiplication operation on a
120 * 64-bit value all at once rather than a byte at a time. This works by
121 * creating a mask from the top bit in each byte and using that to
122 * conditionally apply the XOR of 0x1d.
124 #define VDEV_RAIDZ_64MUL_2(x, mask) \
126 (mask) = (x) & 0x8080808080808080ULL; \
127 (mask) = ((mask) << 1) - ((mask) >> 7); \
128 (x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
129 ((mask) & 0x1d1d1d1d1d1d1d1dULL); \
132 #define VDEV_RAIDZ_64MUL_4(x, mask) \
134 VDEV_RAIDZ_64MUL_2((x), mask); \
135 VDEV_RAIDZ_64MUL_2((x), mask); \
139 vdev_raidz_row_free(raidz_row_t
*rr
)
141 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
142 raidz_col_t
*rc
= &rr
->rr_col
[c
];
144 if (rc
->rc_size
!= 0)
145 abd_free(rc
->rc_abd
);
146 if (rc
->rc_orig_data
!= NULL
)
147 abd_free(rc
->rc_orig_data
);
150 if (rr
->rr_abd_empty
!= NULL
)
151 abd_free(rr
->rr_abd_empty
);
153 kmem_free(rr
, offsetof(raidz_row_t
, rr_col
[rr
->rr_scols
]));
157 vdev_raidz_map_free(raidz_map_t
*rm
)
159 for (int i
= 0; i
< rm
->rm_nrows
; i
++)
160 vdev_raidz_row_free(rm
->rm_row
[i
]);
162 kmem_free(rm
, offsetof(raidz_map_t
, rm_row
[rm
->rm_nrows
]));
166 vdev_raidz_map_free_vsd(zio_t
*zio
)
168 raidz_map_t
*rm
= zio
->io_vsd
;
170 vdev_raidz_map_free(rm
);
173 const zio_vsd_ops_t vdev_raidz_vsd_ops
= {
174 .vsd_free
= vdev_raidz_map_free_vsd
,
178 * Divides the IO evenly across all child vdevs; usually, dcols is
179 * the number of children in the target vdev.
181 * Avoid inlining the function to keep vdev_raidz_io_start(), which
182 * is this functions only caller, as small as possible on the stack.
184 noinline raidz_map_t
*
185 vdev_raidz_map_alloc(zio_t
*zio
, uint64_t ashift
, uint64_t dcols
,
189 /* The starting RAIDZ (parent) vdev sector of the block. */
190 uint64_t b
= zio
->io_offset
>> ashift
;
191 /* The zio's size in units of the vdev's minimum sector size. */
192 uint64_t s
= zio
->io_size
>> ashift
;
193 /* The first column for this stripe. */
194 uint64_t f
= b
% dcols
;
195 /* The starting byte offset on each child vdev. */
196 uint64_t o
= (b
/ dcols
) << ashift
;
197 uint64_t q
, r
, c
, bc
, col
, acols
, scols
, coff
, devidx
, asize
, tot
;
200 kmem_zalloc(offsetof(raidz_map_t
, rm_row
[1]), KM_SLEEP
);
204 * "Quotient": The number of data sectors for this stripe on all but
205 * the "big column" child vdevs that also contain "remainder" data.
207 q
= s
/ (dcols
- nparity
);
210 * "Remainder": The number of partial stripe data sectors in this I/O.
211 * This will add a sector to some, but not all, child vdevs.
213 r
= s
- q
* (dcols
- nparity
);
215 /* The number of "big columns" - those which contain remainder data. */
216 bc
= (r
== 0 ? 0 : r
+ nparity
);
219 * The total number of data and parity sectors associated with
222 tot
= s
+ nparity
* (q
+ (r
== 0 ? 0 : 1));
225 * acols: The columns that will be accessed.
226 * scols: The columns that will be accessed or skipped.
229 /* Our I/O request doesn't span all child vdevs. */
231 scols
= MIN(dcols
, roundup(bc
, nparity
+ 1));
237 ASSERT3U(acols
, <=, scols
);
239 rr
= kmem_alloc(offsetof(raidz_row_t
, rr_col
[scols
]), KM_SLEEP
);
243 rr
->rr_scols
= scols
;
245 rr
->rr_missingdata
= 0;
246 rr
->rr_missingparity
= 0;
247 rr
->rr_firstdatacol
= nparity
;
248 rr
->rr_abd_empty
= NULL
;
251 rr
->rr_offset
= zio
->io_offset
;
252 rr
->rr_size
= zio
->io_size
;
257 for (c
= 0; c
< scols
; c
++) {
258 raidz_col_t
*rc
= &rr
->rr_col
[c
];
263 coff
+= 1ULL << ashift
;
266 rc
->rc_offset
= coff
;
268 rc
->rc_orig_data
= NULL
;
272 rc
->rc_force_repair
= 0;
273 rc
->rc_allow_repair
= 1;
274 rc
->rc_need_orig_restore
= B_FALSE
;
279 rc
->rc_size
= (q
+ 1) << ashift
;
281 rc
->rc_size
= q
<< ashift
;
283 asize
+= rc
->rc_size
;
286 ASSERT3U(asize
, ==, tot
<< ashift
);
287 rm
->rm_nskip
= roundup(tot
, nparity
+ 1) - tot
;
288 rm
->rm_skipstart
= bc
;
290 for (c
= 0; c
< rr
->rr_firstdatacol
; c
++)
291 rr
->rr_col
[c
].rc_abd
=
292 abd_alloc_linear(rr
->rr_col
[c
].rc_size
, B_FALSE
);
294 for (uint64_t off
= 0; c
< acols
; c
++) {
295 raidz_col_t
*rc
= &rr
->rr_col
[c
];
296 rc
->rc_abd
= abd_get_offset_struct(&rc
->rc_abdstruct
,
297 zio
->io_abd
, off
, rc
->rc_size
);
302 * If all data stored spans all columns, there's a danger that parity
303 * will always be on the same device and, since parity isn't read
304 * during normal operation, that device's I/O bandwidth won't be
305 * used effectively. We therefore switch the parity every 1MB.
307 * ... at least that was, ostensibly, the theory. As a practical
308 * matter unless we juggle the parity between all devices evenly, we
309 * won't see any benefit. Further, occasional writes that aren't a
310 * multiple of the LCM of the number of children and the minimum
311 * stripe width are sufficient to avoid pessimal behavior.
312 * Unfortunately, this decision created an implicit on-disk format
313 * requirement that we need to support for all eternity, but only
314 * for single-parity RAID-Z.
316 * If we intend to skip a sector in the zeroth column for padding
317 * we must make sure to note this swap. We will never intend to
318 * skip the first column since at least one data and one parity
319 * column must appear in each row.
321 ASSERT(rr
->rr_cols
>= 2);
322 ASSERT(rr
->rr_col
[0].rc_size
== rr
->rr_col
[1].rc_size
);
324 if (rr
->rr_firstdatacol
== 1 && (zio
->io_offset
& (1ULL << 20))) {
325 devidx
= rr
->rr_col
[0].rc_devidx
;
326 o
= rr
->rr_col
[0].rc_offset
;
327 rr
->rr_col
[0].rc_devidx
= rr
->rr_col
[1].rc_devidx
;
328 rr
->rr_col
[0].rc_offset
= rr
->rr_col
[1].rc_offset
;
329 rr
->rr_col
[1].rc_devidx
= devidx
;
330 rr
->rr_col
[1].rc_offset
= o
;
332 if (rm
->rm_skipstart
== 0)
333 rm
->rm_skipstart
= 1;
336 /* init RAIDZ parity ops */
337 rm
->rm_ops
= vdev_raidz_math_get_ops();
349 vdev_raidz_p_func(void *buf
, size_t size
, void *private)
351 struct pqr_struct
*pqr
= private;
352 const uint64_t *src
= buf
;
353 int i
, cnt
= size
/ sizeof (src
[0]);
355 ASSERT(pqr
->p
&& !pqr
->q
&& !pqr
->r
);
357 for (i
= 0; i
< cnt
; i
++, src
++, pqr
->p
++)
364 vdev_raidz_pq_func(void *buf
, size_t size
, void *private)
366 struct pqr_struct
*pqr
= private;
367 const uint64_t *src
= buf
;
369 int i
, cnt
= size
/ sizeof (src
[0]);
371 ASSERT(pqr
->p
&& pqr
->q
&& !pqr
->r
);
373 for (i
= 0; i
< cnt
; i
++, src
++, pqr
->p
++, pqr
->q
++) {
375 VDEV_RAIDZ_64MUL_2(*pqr
->q
, mask
);
383 vdev_raidz_pqr_func(void *buf
, size_t size
, void *private)
385 struct pqr_struct
*pqr
= private;
386 const uint64_t *src
= buf
;
388 int i
, cnt
= size
/ sizeof (src
[0]);
390 ASSERT(pqr
->p
&& pqr
->q
&& pqr
->r
);
392 for (i
= 0; i
< cnt
; i
++, src
++, pqr
->p
++, pqr
->q
++, pqr
->r
++) {
394 VDEV_RAIDZ_64MUL_2(*pqr
->q
, mask
);
396 VDEV_RAIDZ_64MUL_4(*pqr
->r
, mask
);
404 vdev_raidz_generate_parity_p(raidz_row_t
*rr
)
406 uint64_t *p
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
);
408 for (int c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
409 abd_t
*src
= rr
->rr_col
[c
].rc_abd
;
411 if (c
== rr
->rr_firstdatacol
) {
412 abd_copy_to_buf(p
, src
, rr
->rr_col
[c
].rc_size
);
414 struct pqr_struct pqr
= { p
, NULL
, NULL
};
415 (void) abd_iterate_func(src
, 0, rr
->rr_col
[c
].rc_size
,
416 vdev_raidz_p_func
, &pqr
);
422 vdev_raidz_generate_parity_pq(raidz_row_t
*rr
)
424 uint64_t *p
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
);
425 uint64_t *q
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
);
426 uint64_t pcnt
= rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
/ sizeof (p
[0]);
427 ASSERT(rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
==
428 rr
->rr_col
[VDEV_RAIDZ_Q
].rc_size
);
430 for (int c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
431 abd_t
*src
= rr
->rr_col
[c
].rc_abd
;
433 uint64_t ccnt
= rr
->rr_col
[c
].rc_size
/ sizeof (p
[0]);
435 if (c
== rr
->rr_firstdatacol
) {
436 ASSERT(ccnt
== pcnt
|| ccnt
== 0);
437 abd_copy_to_buf(p
, src
, rr
->rr_col
[c
].rc_size
);
438 (void) memcpy(q
, p
, rr
->rr_col
[c
].rc_size
);
440 for (uint64_t i
= ccnt
; i
< pcnt
; i
++) {
445 struct pqr_struct pqr
= { p
, q
, NULL
};
447 ASSERT(ccnt
<= pcnt
);
448 (void) abd_iterate_func(src
, 0, rr
->rr_col
[c
].rc_size
,
449 vdev_raidz_pq_func
, &pqr
);
452 * Treat short columns as though they are full of 0s.
453 * Note that there's therefore nothing needed for P.
456 for (uint64_t i
= ccnt
; i
< pcnt
; i
++) {
457 VDEV_RAIDZ_64MUL_2(q
[i
], mask
);
464 vdev_raidz_generate_parity_pqr(raidz_row_t
*rr
)
466 uint64_t *p
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
);
467 uint64_t *q
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
);
468 uint64_t *r
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_R
].rc_abd
);
469 uint64_t pcnt
= rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
/ sizeof (p
[0]);
470 ASSERT(rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
==
471 rr
->rr_col
[VDEV_RAIDZ_Q
].rc_size
);
472 ASSERT(rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
==
473 rr
->rr_col
[VDEV_RAIDZ_R
].rc_size
);
475 for (int c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
476 abd_t
*src
= rr
->rr_col
[c
].rc_abd
;
478 uint64_t ccnt
= rr
->rr_col
[c
].rc_size
/ sizeof (p
[0]);
480 if (c
== rr
->rr_firstdatacol
) {
481 ASSERT(ccnt
== pcnt
|| ccnt
== 0);
482 abd_copy_to_buf(p
, src
, rr
->rr_col
[c
].rc_size
);
483 (void) memcpy(q
, p
, rr
->rr_col
[c
].rc_size
);
484 (void) memcpy(r
, p
, rr
->rr_col
[c
].rc_size
);
486 for (uint64_t i
= ccnt
; i
< pcnt
; i
++) {
492 struct pqr_struct pqr
= { p
, q
, r
};
494 ASSERT(ccnt
<= pcnt
);
495 (void) abd_iterate_func(src
, 0, rr
->rr_col
[c
].rc_size
,
496 vdev_raidz_pqr_func
, &pqr
);
499 * Treat short columns as though they are full of 0s.
500 * Note that there's therefore nothing needed for P.
503 for (uint64_t i
= ccnt
; i
< pcnt
; i
++) {
504 VDEV_RAIDZ_64MUL_2(q
[i
], mask
);
505 VDEV_RAIDZ_64MUL_4(r
[i
], mask
);
512 * Generate RAID parity in the first virtual columns according to the number of
513 * parity columns available.
516 vdev_raidz_generate_parity_row(raidz_map_t
*rm
, raidz_row_t
*rr
)
518 ASSERT3U(rr
->rr_cols
, !=, 0);
520 /* Generate using the new math implementation */
521 if (vdev_raidz_math_generate(rm
, rr
) != RAIDZ_ORIGINAL_IMPL
)
524 switch (rr
->rr_firstdatacol
) {
526 vdev_raidz_generate_parity_p(rr
);
529 vdev_raidz_generate_parity_pq(rr
);
532 vdev_raidz_generate_parity_pqr(rr
);
535 cmn_err(CE_PANIC
, "invalid RAID-Z configuration");
540 vdev_raidz_generate_parity(raidz_map_t
*rm
)
542 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
543 raidz_row_t
*rr
= rm
->rm_row
[i
];
544 vdev_raidz_generate_parity_row(rm
, rr
);
550 vdev_raidz_reconst_p_func(void *dbuf
, void *sbuf
, size_t size
, void *private)
552 uint64_t *dst
= dbuf
;
553 uint64_t *src
= sbuf
;
554 int cnt
= size
/ sizeof (src
[0]);
556 for (int i
= 0; i
< cnt
; i
++) {
565 vdev_raidz_reconst_q_pre_func(void *dbuf
, void *sbuf
, size_t size
,
568 uint64_t *dst
= dbuf
;
569 uint64_t *src
= sbuf
;
571 int cnt
= size
/ sizeof (dst
[0]);
573 for (int i
= 0; i
< cnt
; i
++, dst
++, src
++) {
574 VDEV_RAIDZ_64MUL_2(*dst
, mask
);
583 vdev_raidz_reconst_q_pre_tail_func(void *buf
, size_t size
, void *private)
587 int cnt
= size
/ sizeof (dst
[0]);
589 for (int i
= 0; i
< cnt
; i
++, dst
++) {
590 /* same operation as vdev_raidz_reconst_q_pre_func() on dst */
591 VDEV_RAIDZ_64MUL_2(*dst
, mask
);
597 struct reconst_q_struct
{
603 vdev_raidz_reconst_q_post_func(void *buf
, size_t size
, void *private)
605 struct reconst_q_struct
*rq
= private;
607 int cnt
= size
/ sizeof (dst
[0]);
609 for (int i
= 0; i
< cnt
; i
++, dst
++, rq
->q
++) {
614 for (j
= 0, b
= (uint8_t *)dst
; j
< 8; j
++, b
++) {
615 *b
= vdev_raidz_exp2(*b
, rq
->exp
);
622 struct reconst_pq_struct
{
632 vdev_raidz_reconst_pq_func(void *xbuf
, void *ybuf
, size_t size
, void *private)
634 struct reconst_pq_struct
*rpq
= private;
638 for (int i
= 0; i
< size
;
639 i
++, rpq
->p
++, rpq
->q
++, rpq
->pxy
++, rpq
->qxy
++, xd
++, yd
++) {
640 *xd
= vdev_raidz_exp2(*rpq
->p
^ *rpq
->pxy
, rpq
->aexp
) ^
641 vdev_raidz_exp2(*rpq
->q
^ *rpq
->qxy
, rpq
->bexp
);
642 *yd
= *rpq
->p
^ *rpq
->pxy
^ *xd
;
649 vdev_raidz_reconst_pq_tail_func(void *xbuf
, size_t size
, void *private)
651 struct reconst_pq_struct
*rpq
= private;
654 for (int i
= 0; i
< size
;
655 i
++, rpq
->p
++, rpq
->q
++, rpq
->pxy
++, rpq
->qxy
++, xd
++) {
656 /* same operation as vdev_raidz_reconst_pq_func() on xd */
657 *xd
= vdev_raidz_exp2(*rpq
->p
^ *rpq
->pxy
, rpq
->aexp
) ^
658 vdev_raidz_exp2(*rpq
->q
^ *rpq
->qxy
, rpq
->bexp
);
665 vdev_raidz_reconstruct_p(raidz_row_t
*rr
, int *tgts
, int ntgts
)
670 ASSERT3U(ntgts
, ==, 1);
671 ASSERT3U(x
, >=, rr
->rr_firstdatacol
);
672 ASSERT3U(x
, <, rr
->rr_cols
);
674 ASSERT3U(rr
->rr_col
[x
].rc_size
, <=, rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
);
676 src
= rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
;
677 dst
= rr
->rr_col
[x
].rc_abd
;
679 abd_copy_from_buf(dst
, abd_to_buf(src
), rr
->rr_col
[x
].rc_size
);
681 for (int c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
682 uint64_t size
= MIN(rr
->rr_col
[x
].rc_size
,
683 rr
->rr_col
[c
].rc_size
);
685 src
= rr
->rr_col
[c
].rc_abd
;
690 (void) abd_iterate_func2(dst
, src
, 0, 0, size
,
691 vdev_raidz_reconst_p_func
, NULL
);
696 vdev_raidz_reconstruct_q(raidz_row_t
*rr
, int *tgts
, int ntgts
)
704 ASSERT(rr
->rr_col
[x
].rc_size
<= rr
->rr_col
[VDEV_RAIDZ_Q
].rc_size
);
706 for (c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
707 uint64_t size
= (c
== x
) ? 0 : MIN(rr
->rr_col
[x
].rc_size
,
708 rr
->rr_col
[c
].rc_size
);
710 src
= rr
->rr_col
[c
].rc_abd
;
711 dst
= rr
->rr_col
[x
].rc_abd
;
713 if (c
== rr
->rr_firstdatacol
) {
714 abd_copy(dst
, src
, size
);
715 if (rr
->rr_col
[x
].rc_size
> size
) {
716 abd_zero_off(dst
, size
,
717 rr
->rr_col
[x
].rc_size
- size
);
720 ASSERT3U(size
, <=, rr
->rr_col
[x
].rc_size
);
721 (void) abd_iterate_func2(dst
, src
, 0, 0, size
,
722 vdev_raidz_reconst_q_pre_func
, NULL
);
723 (void) abd_iterate_func(dst
,
724 size
, rr
->rr_col
[x
].rc_size
- size
,
725 vdev_raidz_reconst_q_pre_tail_func
, NULL
);
729 src
= rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
;
730 dst
= rr
->rr_col
[x
].rc_abd
;
731 exp
= 255 - (rr
->rr_cols
- 1 - x
);
733 struct reconst_q_struct rq
= { abd_to_buf(src
), exp
};
734 (void) abd_iterate_func(dst
, 0, rr
->rr_col
[x
].rc_size
,
735 vdev_raidz_reconst_q_post_func
, &rq
);
739 vdev_raidz_reconstruct_pq(raidz_row_t
*rr
, int *tgts
, int ntgts
)
741 uint8_t *p
, *q
, *pxy
, *qxy
, tmp
, a
, b
, aexp
, bexp
;
742 abd_t
*pdata
, *qdata
;
743 uint64_t xsize
, ysize
;
750 ASSERT(x
>= rr
->rr_firstdatacol
);
751 ASSERT(y
< rr
->rr_cols
);
753 ASSERT(rr
->rr_col
[x
].rc_size
>= rr
->rr_col
[y
].rc_size
);
756 * Move the parity data aside -- we're going to compute parity as
757 * though columns x and y were full of zeros -- Pxy and Qxy. We want to
758 * reuse the parity generation mechanism without trashing the actual
759 * parity so we make those columns appear to be full of zeros by
760 * setting their lengths to zero.
762 pdata
= rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
;
763 qdata
= rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
;
764 xsize
= rr
->rr_col
[x
].rc_size
;
765 ysize
= rr
->rr_col
[y
].rc_size
;
767 rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
=
768 abd_alloc_linear(rr
->rr_col
[VDEV_RAIDZ_P
].rc_size
, B_TRUE
);
769 rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
=
770 abd_alloc_linear(rr
->rr_col
[VDEV_RAIDZ_Q
].rc_size
, B_TRUE
);
771 rr
->rr_col
[x
].rc_size
= 0;
772 rr
->rr_col
[y
].rc_size
= 0;
774 vdev_raidz_generate_parity_pq(rr
);
776 rr
->rr_col
[x
].rc_size
= xsize
;
777 rr
->rr_col
[y
].rc_size
= ysize
;
779 p
= abd_to_buf(pdata
);
780 q
= abd_to_buf(qdata
);
781 pxy
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
);
782 qxy
= abd_to_buf(rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
);
783 xd
= rr
->rr_col
[x
].rc_abd
;
784 yd
= rr
->rr_col
[y
].rc_abd
;
788 * Pxy = P + D_x + D_y
789 * Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
791 * We can then solve for D_x:
792 * D_x = A * (P + Pxy) + B * (Q + Qxy)
794 * A = 2^(x - y) * (2^(x - y) + 1)^-1
795 * B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
797 * With D_x in hand, we can easily solve for D_y:
798 * D_y = P + Pxy + D_x
801 a
= vdev_raidz_pow2
[255 + x
- y
];
802 b
= vdev_raidz_pow2
[255 - (rr
->rr_cols
- 1 - x
)];
803 tmp
= 255 - vdev_raidz_log2
[a
^ 1];
805 aexp
= vdev_raidz_log2
[vdev_raidz_exp2(a
, tmp
)];
806 bexp
= vdev_raidz_log2
[vdev_raidz_exp2(b
, tmp
)];
808 ASSERT3U(xsize
, >=, ysize
);
809 struct reconst_pq_struct rpq
= { p
, q
, pxy
, qxy
, aexp
, bexp
};
811 (void) abd_iterate_func2(xd
, yd
, 0, 0, ysize
,
812 vdev_raidz_reconst_pq_func
, &rpq
);
813 (void) abd_iterate_func(xd
, ysize
, xsize
- ysize
,
814 vdev_raidz_reconst_pq_tail_func
, &rpq
);
816 abd_free(rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
);
817 abd_free(rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
);
820 * Restore the saved parity data.
822 rr
->rr_col
[VDEV_RAIDZ_P
].rc_abd
= pdata
;
823 rr
->rr_col
[VDEV_RAIDZ_Q
].rc_abd
= qdata
;
828 * In the general case of reconstruction, we must solve the system of linear
829 * equations defined by the coefficients used to generate parity as well as
830 * the contents of the data and parity disks. This can be expressed with
831 * vectors for the original data (D) and the actual data (d) and parity (p)
832 * and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
836 * | V | | D_0 | | p_m-1 |
837 * | | x | : | = | d_0 |
838 * | I | | D_n-1 | | : |
839 * | | ~~ ~~ | d_n-1 |
842 * I is simply a square identity matrix of size n, and V is a vandermonde
843 * matrix defined by the coefficients we chose for the various parity columns
844 * (1, 2, 4). Note that these values were chosen both for simplicity, speedy
845 * computation as well as linear separability.
848 * | 1 .. 1 1 1 | | p_0 |
849 * | 2^n-1 .. 4 2 1 | __ __ | : |
850 * | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
851 * | 1 .. 0 0 0 | | D_1 | | d_0 |
852 * | 0 .. 0 0 0 | x | D_2 | = | d_1 |
853 * | : : : : | | : | | d_2 |
854 * | 0 .. 1 0 0 | | D_n-1 | | : |
855 * | 0 .. 0 1 0 | ~~ ~~ | : |
856 * | 0 .. 0 0 1 | | d_n-1 |
859 * Note that I, V, d, and p are known. To compute D, we must invert the
860 * matrix and use the known data and parity values to reconstruct the unknown
861 * data values. We begin by removing the rows in V|I and d|p that correspond
862 * to failed or missing columns; we then make V|I square (n x n) and d|p
863 * sized n by removing rows corresponding to unused parity from the bottom up
864 * to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
865 * using Gauss-Jordan elimination. In the example below we use m=3 parity
866 * columns, n=8 data columns, with errors in d_1, d_2, and p_1:
868 * | 1 1 1 1 1 1 1 1 |
869 * | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
870 * | 19 205 116 29 64 16 4 1 | / /
871 * | 1 0 0 0 0 0 0 0 | / /
872 * | 0 1 0 0 0 0 0 0 | <--' /
873 * (V|I) = | 0 0 1 0 0 0 0 0 | <---'
874 * | 0 0 0 1 0 0 0 0 |
875 * | 0 0 0 0 1 0 0 0 |
876 * | 0 0 0 0 0 1 0 0 |
877 * | 0 0 0 0 0 0 1 0 |
878 * | 0 0 0 0 0 0 0 1 |
881 * | 1 1 1 1 1 1 1 1 |
882 * | 128 64 32 16 8 4 2 1 |
883 * | 19 205 116 29 64 16 4 1 |
884 * | 1 0 0 0 0 0 0 0 |
885 * | 0 1 0 0 0 0 0 0 |
886 * (V|I)' = | 0 0 1 0 0 0 0 0 |
887 * | 0 0 0 1 0 0 0 0 |
888 * | 0 0 0 0 1 0 0 0 |
889 * | 0 0 0 0 0 1 0 0 |
890 * | 0 0 0 0 0 0 1 0 |
891 * | 0 0 0 0 0 0 0 1 |
894 * Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
895 * have carefully chosen the seed values 1, 2, and 4 to ensure that this
896 * matrix is not singular.
898 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
899 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
900 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
901 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
902 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
903 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
904 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
905 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
908 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
909 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
910 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
911 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
912 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
913 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
914 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
915 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
918 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
919 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
920 * | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
921 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
922 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
923 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
924 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
925 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
928 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
929 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
930 * | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
931 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
932 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
933 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
934 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
935 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
938 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
939 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
940 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
941 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
942 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
943 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
944 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
945 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
948 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
949 * | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
950 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
951 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
952 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
953 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
954 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
955 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
958 * | 0 0 1 0 0 0 0 0 |
959 * | 167 100 5 41 159 169 217 208 |
960 * | 166 100 4 40 158 168 216 209 |
961 * (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
962 * | 0 0 0 0 1 0 0 0 |
963 * | 0 0 0 0 0 1 0 0 |
964 * | 0 0 0 0 0 0 1 0 |
965 * | 0 0 0 0 0 0 0 1 |
968 * We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
969 * of the missing data.
971 * As is apparent from the example above, the only non-trivial rows in the
972 * inverse matrix correspond to the data disks that we're trying to
973 * reconstruct. Indeed, those are the only rows we need as the others would
974 * only be useful for reconstructing data known or assumed to be valid. For
975 * that reason, we only build the coefficients in the rows that correspond to
981 vdev_raidz_matrix_init(raidz_row_t
*rr
, int n
, int nmap
, int *map
,
987 ASSERT(n
== rr
->rr_cols
- rr
->rr_firstdatacol
);
990 * Fill in the missing rows of interest.
992 for (i
= 0; i
< nmap
; i
++) {
993 ASSERT3S(0, <=, map
[i
]);
994 ASSERT3S(map
[i
], <=, 2);
1001 for (j
= 0; j
< n
; j
++) {
1005 rows
[i
][j
] = vdev_raidz_pow2
[pow
];
1011 vdev_raidz_matrix_invert(raidz_row_t
*rr
, int n
, int nmissing
, int *missing
,
1012 uint8_t **rows
, uint8_t **invrows
, const uint8_t *used
)
1018 * Assert that the first nmissing entries from the array of used
1019 * columns correspond to parity columns and that subsequent entries
1020 * correspond to data columns.
1022 for (i
= 0; i
< nmissing
; i
++) {
1023 ASSERT3S(used
[i
], <, rr
->rr_firstdatacol
);
1025 for (; i
< n
; i
++) {
1026 ASSERT3S(used
[i
], >=, rr
->rr_firstdatacol
);
1030 * First initialize the storage where we'll compute the inverse rows.
1032 for (i
= 0; i
< nmissing
; i
++) {
1033 for (j
= 0; j
< n
; j
++) {
1034 invrows
[i
][j
] = (i
== j
) ? 1 : 0;
1039 * Subtract all trivial rows from the rows of consequence.
1041 for (i
= 0; i
< nmissing
; i
++) {
1042 for (j
= nmissing
; j
< n
; j
++) {
1043 ASSERT3U(used
[j
], >=, rr
->rr_firstdatacol
);
1044 jj
= used
[j
] - rr
->rr_firstdatacol
;
1046 invrows
[i
][j
] = rows
[i
][jj
];
1052 * For each of the rows of interest, we must normalize it and subtract
1053 * a multiple of it from the other rows.
1055 for (i
= 0; i
< nmissing
; i
++) {
1056 for (j
= 0; j
< missing
[i
]; j
++) {
1057 ASSERT0(rows
[i
][j
]);
1059 ASSERT3U(rows
[i
][missing
[i
]], !=, 0);
1062 * Compute the inverse of the first element and multiply each
1063 * element in the row by that value.
1065 log
= 255 - vdev_raidz_log2
[rows
[i
][missing
[i
]]];
1067 for (j
= 0; j
< n
; j
++) {
1068 rows
[i
][j
] = vdev_raidz_exp2(rows
[i
][j
], log
);
1069 invrows
[i
][j
] = vdev_raidz_exp2(invrows
[i
][j
], log
);
1072 for (ii
= 0; ii
< nmissing
; ii
++) {
1076 ASSERT3U(rows
[ii
][missing
[i
]], !=, 0);
1078 log
= vdev_raidz_log2
[rows
[ii
][missing
[i
]]];
1080 for (j
= 0; j
< n
; j
++) {
1082 vdev_raidz_exp2(rows
[i
][j
], log
);
1084 vdev_raidz_exp2(invrows
[i
][j
], log
);
1090 * Verify that the data that is left in the rows are properly part of
1091 * an identity matrix.
1093 for (i
= 0; i
< nmissing
; i
++) {
1094 for (j
= 0; j
< n
; j
++) {
1095 if (j
== missing
[i
]) {
1096 ASSERT3U(rows
[i
][j
], ==, 1);
1098 ASSERT0(rows
[i
][j
]);
1105 vdev_raidz_matrix_reconstruct(raidz_row_t
*rr
, int n
, int nmissing
,
1106 int *missing
, uint8_t **invrows
, const uint8_t *used
)
1111 uint8_t *dst
[VDEV_RAIDZ_MAXPARITY
] = { NULL
};
1112 uint64_t dcount
[VDEV_RAIDZ_MAXPARITY
] = { 0 };
1116 uint8_t *invlog
[VDEV_RAIDZ_MAXPARITY
];
1120 psize
= sizeof (invlog
[0][0]) * n
* nmissing
;
1121 p
= kmem_alloc(psize
, KM_SLEEP
);
1123 for (pp
= p
, i
= 0; i
< nmissing
; i
++) {
1128 for (i
= 0; i
< nmissing
; i
++) {
1129 for (j
= 0; j
< n
; j
++) {
1130 ASSERT3U(invrows
[i
][j
], !=, 0);
1131 invlog
[i
][j
] = vdev_raidz_log2
[invrows
[i
][j
]];
1135 for (i
= 0; i
< n
; i
++) {
1137 ASSERT3U(c
, <, rr
->rr_cols
);
1139 ccount
= rr
->rr_col
[c
].rc_size
;
1140 ASSERT(ccount
>= rr
->rr_col
[missing
[0]].rc_size
|| i
> 0);
1143 src
= abd_to_buf(rr
->rr_col
[c
].rc_abd
);
1144 for (j
= 0; j
< nmissing
; j
++) {
1145 cc
= missing
[j
] + rr
->rr_firstdatacol
;
1146 ASSERT3U(cc
, >=, rr
->rr_firstdatacol
);
1147 ASSERT3U(cc
, <, rr
->rr_cols
);
1148 ASSERT3U(cc
, !=, c
);
1150 dcount
[j
] = rr
->rr_col
[cc
].rc_size
;
1152 dst
[j
] = abd_to_buf(rr
->rr_col
[cc
].rc_abd
);
1155 for (x
= 0; x
< ccount
; x
++, src
++) {
1157 log
= vdev_raidz_log2
[*src
];
1159 for (cc
= 0; cc
< nmissing
; cc
++) {
1160 if (x
>= dcount
[cc
])
1166 if ((ll
= log
+ invlog
[cc
][i
]) >= 255)
1168 val
= vdev_raidz_pow2
[ll
];
1179 kmem_free(p
, psize
);
1183 vdev_raidz_reconstruct_general(raidz_row_t
*rr
, int *tgts
, int ntgts
)
1187 int missing_rows
[VDEV_RAIDZ_MAXPARITY
];
1188 int parity_map
[VDEV_RAIDZ_MAXPARITY
];
1191 uint8_t *rows
[VDEV_RAIDZ_MAXPARITY
];
1192 uint8_t *invrows
[VDEV_RAIDZ_MAXPARITY
];
1195 abd_t
**bufs
= NULL
;
1198 * Matrix reconstruction can't use scatter ABDs yet, so we allocate
1199 * temporary linear ABDs if any non-linear ABDs are found.
1201 for (i
= rr
->rr_firstdatacol
; i
< rr
->rr_cols
; i
++) {
1202 if (!abd_is_linear(rr
->rr_col
[i
].rc_abd
)) {
1203 bufs
= kmem_alloc(rr
->rr_cols
* sizeof (abd_t
*),
1206 for (c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
1207 raidz_col_t
*col
= &rr
->rr_col
[c
];
1209 bufs
[c
] = col
->rc_abd
;
1210 if (bufs
[c
] != NULL
) {
1211 col
->rc_abd
= abd_alloc_linear(
1212 col
->rc_size
, B_TRUE
);
1213 abd_copy(col
->rc_abd
, bufs
[c
],
1222 n
= rr
->rr_cols
- rr
->rr_firstdatacol
;
1225 * Figure out which data columns are missing.
1228 for (t
= 0; t
< ntgts
; t
++) {
1229 if (tgts
[t
] >= rr
->rr_firstdatacol
) {
1230 missing_rows
[nmissing_rows
++] =
1231 tgts
[t
] - rr
->rr_firstdatacol
;
1236 * Figure out which parity columns to use to help generate the missing
1239 for (tt
= 0, c
= 0, i
= 0; i
< nmissing_rows
; c
++) {
1241 ASSERT(c
< rr
->rr_firstdatacol
);
1244 * Skip any targeted parity columns.
1246 if (c
== tgts
[tt
]) {
1255 psize
= (sizeof (rows
[0][0]) + sizeof (invrows
[0][0])) *
1256 nmissing_rows
* n
+ sizeof (used
[0]) * n
;
1257 p
= kmem_alloc(psize
, KM_SLEEP
);
1259 for (pp
= p
, i
= 0; i
< nmissing_rows
; i
++) {
1267 for (i
= 0; i
< nmissing_rows
; i
++) {
1268 used
[i
] = parity_map
[i
];
1271 for (tt
= 0, c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
1272 if (tt
< nmissing_rows
&&
1273 c
== missing_rows
[tt
] + rr
->rr_firstdatacol
) {
1284 * Initialize the interesting rows of the matrix.
1286 vdev_raidz_matrix_init(rr
, n
, nmissing_rows
, parity_map
, rows
);
1289 * Invert the matrix.
1291 vdev_raidz_matrix_invert(rr
, n
, nmissing_rows
, missing_rows
, rows
,
1295 * Reconstruct the missing data using the generated matrix.
1297 vdev_raidz_matrix_reconstruct(rr
, n
, nmissing_rows
, missing_rows
,
1300 kmem_free(p
, psize
);
1303 * copy back from temporary linear abds and free them
1306 for (c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
1307 raidz_col_t
*col
= &rr
->rr_col
[c
];
1309 if (bufs
[c
] != NULL
) {
1310 abd_copy(bufs
[c
], col
->rc_abd
, col
->rc_size
);
1311 abd_free(col
->rc_abd
);
1313 col
->rc_abd
= bufs
[c
];
1315 kmem_free(bufs
, rr
->rr_cols
* sizeof (abd_t
*));
1320 vdev_raidz_reconstruct_row(raidz_map_t
*rm
, raidz_row_t
*rr
,
1321 const int *t
, int nt
)
1323 int tgts
[VDEV_RAIDZ_MAXPARITY
], *dt
;
1326 int nbadparity
, nbaddata
;
1327 int parity_valid
[VDEV_RAIDZ_MAXPARITY
];
1329 nbadparity
= rr
->rr_firstdatacol
;
1330 nbaddata
= rr
->rr_cols
- nbadparity
;
1332 for (i
= 0, c
= 0; c
< rr
->rr_cols
; c
++) {
1333 if (c
< rr
->rr_firstdatacol
)
1334 parity_valid
[c
] = B_FALSE
;
1336 if (i
< nt
&& c
== t
[i
]) {
1339 } else if (rr
->rr_col
[c
].rc_error
!= 0) {
1341 } else if (c
>= rr
->rr_firstdatacol
) {
1344 parity_valid
[c
] = B_TRUE
;
1349 ASSERT(ntgts
>= nt
);
1350 ASSERT(nbaddata
>= 0);
1351 ASSERT(nbaddata
+ nbadparity
== ntgts
);
1353 dt
= &tgts
[nbadparity
];
1355 /* Reconstruct using the new math implementation */
1356 ret
= vdev_raidz_math_reconstruct(rm
, rr
, parity_valid
, dt
, nbaddata
);
1357 if (ret
!= RAIDZ_ORIGINAL_IMPL
)
1361 * See if we can use any of our optimized reconstruction routines.
1365 if (parity_valid
[VDEV_RAIDZ_P
]) {
1366 vdev_raidz_reconstruct_p(rr
, dt
, 1);
1370 ASSERT(rr
->rr_firstdatacol
> 1);
1372 if (parity_valid
[VDEV_RAIDZ_Q
]) {
1373 vdev_raidz_reconstruct_q(rr
, dt
, 1);
1377 ASSERT(rr
->rr_firstdatacol
> 2);
1381 ASSERT(rr
->rr_firstdatacol
> 1);
1383 if (parity_valid
[VDEV_RAIDZ_P
] &&
1384 parity_valid
[VDEV_RAIDZ_Q
]) {
1385 vdev_raidz_reconstruct_pq(rr
, dt
, 2);
1389 ASSERT(rr
->rr_firstdatacol
> 2);
1394 vdev_raidz_reconstruct_general(rr
, tgts
, ntgts
);
1398 vdev_raidz_open(vdev_t
*vd
, uint64_t *asize
, uint64_t *max_asize
,
1399 uint64_t *logical_ashift
, uint64_t *physical_ashift
)
1401 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
1402 uint64_t nparity
= vdrz
->vd_nparity
;
1407 ASSERT(nparity
> 0);
1409 if (nparity
> VDEV_RAIDZ_MAXPARITY
||
1410 vd
->vdev_children
< nparity
+ 1) {
1411 vd
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
1412 return (SET_ERROR(EINVAL
));
1415 vdev_open_children(vd
);
1417 for (c
= 0; c
< vd
->vdev_children
; c
++) {
1418 vdev_t
*cvd
= vd
->vdev_child
[c
];
1420 if (cvd
->vdev_open_error
!= 0) {
1421 lasterror
= cvd
->vdev_open_error
;
1426 *asize
= MIN(*asize
- 1, cvd
->vdev_asize
- 1) + 1;
1427 *max_asize
= MIN(*max_asize
- 1, cvd
->vdev_max_asize
- 1) + 1;
1428 *logical_ashift
= MAX(*logical_ashift
, cvd
->vdev_ashift
);
1429 *physical_ashift
= MAX(*physical_ashift
,
1430 cvd
->vdev_physical_ashift
);
1433 *asize
*= vd
->vdev_children
;
1434 *max_asize
*= vd
->vdev_children
;
1436 if (numerrors
> nparity
) {
1437 vd
->vdev_stat
.vs_aux
= VDEV_AUX_NO_REPLICAS
;
1445 vdev_raidz_close(vdev_t
*vd
)
1447 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
1448 if (vd
->vdev_child
[c
] != NULL
)
1449 vdev_close(vd
->vdev_child
[c
]);
1454 vdev_raidz_asize(vdev_t
*vd
, uint64_t psize
)
1456 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
1458 uint64_t ashift
= vd
->vdev_top
->vdev_ashift
;
1459 uint64_t cols
= vdrz
->vd_logical_width
;
1460 uint64_t nparity
= vdrz
->vd_nparity
;
1462 asize
= ((psize
- 1) >> ashift
) + 1;
1463 asize
+= nparity
* ((asize
+ cols
- nparity
- 1) / (cols
- nparity
));
1464 asize
= roundup(asize
, nparity
+ 1) << ashift
;
1470 * The allocatable space for a raidz vdev is N * sizeof(smallest child)
1471 * so each child must provide at least 1/Nth of its asize.
1474 vdev_raidz_min_asize(vdev_t
*vd
)
1476 return ((vd
->vdev_min_asize
+ vd
->vdev_children
- 1) /
1481 vdev_raidz_child_done(zio_t
*zio
)
1483 raidz_col_t
*rc
= zio
->io_private
;
1485 rc
->rc_error
= zio
->io_error
;
1491 vdev_raidz_io_verify(vdev_t
*vd
, raidz_row_t
*rr
, int col
)
1494 vdev_t
*tvd
= vd
->vdev_top
;
1496 range_seg64_t logical_rs
, physical_rs
, remain_rs
;
1497 logical_rs
.rs_start
= rr
->rr_offset
;
1498 logical_rs
.rs_end
= logical_rs
.rs_start
+
1499 vdev_raidz_asize(vd
, rr
->rr_size
);
1501 raidz_col_t
*rc
= &rr
->rr_col
[col
];
1502 vdev_t
*cvd
= vd
->vdev_child
[rc
->rc_devidx
];
1504 vdev_xlate(cvd
, &logical_rs
, &physical_rs
, &remain_rs
);
1505 ASSERT(vdev_xlate_is_empty(&remain_rs
));
1506 ASSERT3U(rc
->rc_offset
, ==, physical_rs
.rs_start
);
1507 ASSERT3U(rc
->rc_offset
, <, physical_rs
.rs_end
);
1509 * It would be nice to assert that rs_end is equal
1510 * to rc_offset + rc_size but there might be an
1511 * optional I/O at the end that is not accounted in
1514 if (physical_rs
.rs_end
> rc
->rc_offset
+ rc
->rc_size
) {
1515 ASSERT3U(physical_rs
.rs_end
, ==, rc
->rc_offset
+
1516 rc
->rc_size
+ (1 << tvd
->vdev_ashift
));
1518 ASSERT3U(physical_rs
.rs_end
, ==, rc
->rc_offset
+ rc
->rc_size
);
1524 vdev_raidz_io_start_write(zio_t
*zio
, raidz_row_t
*rr
, uint64_t ashift
)
1526 vdev_t
*vd
= zio
->io_vd
;
1527 raidz_map_t
*rm
= zio
->io_vsd
;
1530 vdev_raidz_generate_parity_row(rm
, rr
);
1532 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1533 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1534 if (rc
->rc_size
== 0)
1537 /* Verify physical to logical translation */
1538 vdev_raidz_io_verify(vd
, rr
, c
);
1540 zio_nowait(zio_vdev_child_io(zio
, NULL
,
1541 vd
->vdev_child
[rc
->rc_devidx
], rc
->rc_offset
,
1542 rc
->rc_abd
, rc
->rc_size
, zio
->io_type
, zio
->io_priority
,
1543 0, vdev_raidz_child_done
, rc
));
1547 * Generate optional I/Os for skip sectors to improve aggregation
1550 for (c
= rm
->rm_skipstart
, i
= 0; i
< rm
->rm_nskip
; c
++, i
++) {
1551 ASSERT(c
<= rr
->rr_scols
);
1552 if (c
== rr
->rr_scols
)
1555 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1556 vdev_t
*cvd
= vd
->vdev_child
[rc
->rc_devidx
];
1558 zio_nowait(zio_vdev_child_io(zio
, NULL
, cvd
,
1559 rc
->rc_offset
+ rc
->rc_size
, NULL
, 1ULL << ashift
,
1560 zio
->io_type
, zio
->io_priority
,
1561 ZIO_FLAG_NODATA
| ZIO_FLAG_OPTIONAL
, NULL
, NULL
));
1566 vdev_raidz_io_start_read(zio_t
*zio
, raidz_row_t
*rr
)
1568 vdev_t
*vd
= zio
->io_vd
;
1571 * Iterate over the columns in reverse order so that we hit the parity
1572 * last -- any errors along the way will force us to read the parity.
1574 for (int c
= rr
->rr_cols
- 1; c
>= 0; c
--) {
1575 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1576 if (rc
->rc_size
== 0)
1578 vdev_t
*cvd
= vd
->vdev_child
[rc
->rc_devidx
];
1579 if (!vdev_readable(cvd
)) {
1580 if (c
>= rr
->rr_firstdatacol
)
1581 rr
->rr_missingdata
++;
1583 rr
->rr_missingparity
++;
1584 rc
->rc_error
= SET_ERROR(ENXIO
);
1585 rc
->rc_tried
= 1; /* don't even try */
1589 if (vdev_dtl_contains(cvd
, DTL_MISSING
, zio
->io_txg
, 1)) {
1590 if (c
>= rr
->rr_firstdatacol
)
1591 rr
->rr_missingdata
++;
1593 rr
->rr_missingparity
++;
1594 rc
->rc_error
= SET_ERROR(ESTALE
);
1598 if (c
>= rr
->rr_firstdatacol
|| rr
->rr_missingdata
> 0 ||
1599 (zio
->io_flags
& (ZIO_FLAG_SCRUB
| ZIO_FLAG_RESILVER
))) {
1600 zio_nowait(zio_vdev_child_io(zio
, NULL
, cvd
,
1601 rc
->rc_offset
, rc
->rc_abd
, rc
->rc_size
,
1602 zio
->io_type
, zio
->io_priority
, 0,
1603 vdev_raidz_child_done
, rc
));
1609 * Start an IO operation on a RAIDZ VDev
1612 * - For write operations:
1613 * 1. Generate the parity data
1614 * 2. Create child zio write operations to each column's vdev, for both
1616 * 3. If the column skips any sectors for padding, create optional dummy
1617 * write zio children for those areas to improve aggregation continuity.
1618 * - For read operations:
1619 * 1. Create child zio read operations to each data column's vdev to read
1620 * the range of data required for zio.
1621 * 2. If this is a scrub or resilver operation, or if any of the data
1622 * vdevs have had errors, then create zio read operations to the parity
1623 * columns' VDevs as well.
1626 vdev_raidz_io_start(zio_t
*zio
)
1628 vdev_t
*vd
= zio
->io_vd
;
1629 vdev_t
*tvd
= vd
->vdev_top
;
1630 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
1632 raidz_map_t
*rm
= vdev_raidz_map_alloc(zio
, tvd
->vdev_ashift
,
1633 vdrz
->vd_logical_width
, vdrz
->vd_nparity
);
1635 zio
->io_vsd_ops
= &vdev_raidz_vsd_ops
;
1638 * Until raidz expansion is implemented all maps for a raidz vdev
1639 * contain a single row.
1641 ASSERT3U(rm
->rm_nrows
, ==, 1);
1642 raidz_row_t
*rr
= rm
->rm_row
[0];
1644 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
1645 vdev_raidz_io_start_write(zio
, rr
, tvd
->vdev_ashift
);
1647 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
1648 vdev_raidz_io_start_read(zio
, rr
);
1655 * Report a checksum error for a child of a RAID-Z device.
1658 raidz_checksum_error(zio_t
*zio
, raidz_col_t
*rc
, abd_t
*bad_data
)
1660 vdev_t
*vd
= zio
->io_vd
->vdev_child
[rc
->rc_devidx
];
1662 if (!(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
) &&
1663 zio
->io_priority
!= ZIO_PRIORITY_REBUILD
) {
1664 zio_bad_cksum_t zbc
;
1665 raidz_map_t
*rm
= zio
->io_vsd
;
1667 zbc
.zbc_has_cksum
= 0;
1668 zbc
.zbc_injected
= rm
->rm_ecksuminjected
;
1670 (void) zfs_ereport_post_checksum(zio
->io_spa
, vd
,
1671 &zio
->io_bookmark
, zio
, rc
->rc_offset
, rc
->rc_size
,
1672 rc
->rc_abd
, bad_data
, &zbc
);
1673 mutex_enter(&vd
->vdev_stat_lock
);
1674 vd
->vdev_stat
.vs_checksum_errors
++;
1675 mutex_exit(&vd
->vdev_stat_lock
);
1680 * We keep track of whether or not there were any injected errors, so that
1681 * any ereports we generate can note it.
1684 raidz_checksum_verify(zio_t
*zio
)
1686 zio_bad_cksum_t zbc
;
1687 raidz_map_t
*rm
= zio
->io_vsd
;
1689 bzero(&zbc
, sizeof (zio_bad_cksum_t
));
1691 int ret
= zio_checksum_error(zio
, &zbc
);
1692 if (ret
!= 0 && zbc
.zbc_injected
!= 0)
1693 rm
->rm_ecksuminjected
= 1;
1699 * Generate the parity from the data columns. If we tried and were able to
1700 * read the parity without error, verify that the generated parity matches the
1701 * data we read. If it doesn't, we fire off a checksum error. Return the
1702 * number of such failures.
1705 raidz_parity_verify(zio_t
*zio
, raidz_row_t
*rr
)
1707 abd_t
*orig
[VDEV_RAIDZ_MAXPARITY
];
1709 raidz_map_t
*rm
= zio
->io_vsd
;
1712 blkptr_t
*bp
= zio
->io_bp
;
1713 enum zio_checksum checksum
= (bp
== NULL
? zio
->io_prop
.zp_checksum
:
1714 (BP_IS_GANG(bp
) ? ZIO_CHECKSUM_GANG_HEADER
: BP_GET_CHECKSUM(bp
)));
1716 if (checksum
== ZIO_CHECKSUM_NOPARITY
)
1719 for (c
= 0; c
< rr
->rr_firstdatacol
; c
++) {
1720 rc
= &rr
->rr_col
[c
];
1721 if (!rc
->rc_tried
|| rc
->rc_error
!= 0)
1724 orig
[c
] = abd_alloc_sametype(rc
->rc_abd
, rc
->rc_size
);
1725 abd_copy(orig
[c
], rc
->rc_abd
, rc
->rc_size
);
1729 * Regenerates parity even for !tried||rc_error!=0 columns. This
1730 * isn't harmful but it does have the side effect of fixing stuff
1731 * we didn't realize was necessary (i.e. even if we return 0).
1733 vdev_raidz_generate_parity_row(rm
, rr
);
1735 for (c
= 0; c
< rr
->rr_firstdatacol
; c
++) {
1736 rc
= &rr
->rr_col
[c
];
1738 if (!rc
->rc_tried
|| rc
->rc_error
!= 0)
1741 if (abd_cmp(orig
[c
], rc
->rc_abd
) != 0) {
1742 raidz_checksum_error(zio
, rc
, orig
[c
]);
1743 rc
->rc_error
= SET_ERROR(ECKSUM
);
1753 vdev_raidz_worst_error(raidz_row_t
*rr
)
1757 for (int c
= 0; c
< rr
->rr_cols
; c
++)
1758 error
= zio_worst_error(error
, rr
->rr_col
[c
].rc_error
);
1764 vdev_raidz_io_done_verified(zio_t
*zio
, raidz_row_t
*rr
)
1766 int unexpected_errors
= 0;
1767 int parity_errors
= 0;
1768 int parity_untried
= 0;
1769 int data_errors
= 0;
1771 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_READ
);
1773 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1774 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1777 if (c
< rr
->rr_firstdatacol
)
1782 if (!rc
->rc_skipped
)
1783 unexpected_errors
++;
1784 } else if (c
< rr
->rr_firstdatacol
&& !rc
->rc_tried
) {
1790 * If we read more parity disks than were used for
1791 * reconstruction, confirm that the other parity disks produced
1794 * Note that we also regenerate parity when resilvering so we
1795 * can write it out to failed devices later.
1797 if (parity_errors
+ parity_untried
<
1798 rr
->rr_firstdatacol
- data_errors
||
1799 (zio
->io_flags
& ZIO_FLAG_RESILVER
)) {
1800 int n
= raidz_parity_verify(zio
, rr
);
1801 unexpected_errors
+= n
;
1802 ASSERT3U(parity_errors
+ n
, <=, rr
->rr_firstdatacol
);
1805 if (zio
->io_error
== 0 && spa_writeable(zio
->io_spa
) &&
1806 (unexpected_errors
> 0 || (zio
->io_flags
& ZIO_FLAG_RESILVER
))) {
1808 * Use the good data we have in hand to repair damaged children.
1810 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1811 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1812 vdev_t
*vd
= zio
->io_vd
;
1813 vdev_t
*cvd
= vd
->vdev_child
[rc
->rc_devidx
];
1815 if (!rc
->rc_allow_repair
) {
1817 } else if (!rc
->rc_force_repair
&&
1818 (rc
->rc_error
== 0 || rc
->rc_size
== 0)) {
1822 zio_nowait(zio_vdev_child_io(zio
, NULL
, cvd
,
1823 rc
->rc_offset
, rc
->rc_abd
, rc
->rc_size
,
1825 zio
->io_priority
== ZIO_PRIORITY_REBUILD
?
1826 ZIO_PRIORITY_REBUILD
: ZIO_PRIORITY_ASYNC_WRITE
,
1827 ZIO_FLAG_IO_REPAIR
| (unexpected_errors
?
1828 ZIO_FLAG_SELF_HEAL
: 0), NULL
, NULL
));
1834 raidz_restore_orig_data(raidz_map_t
*rm
)
1836 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
1837 raidz_row_t
*rr
= rm
->rm_row
[i
];
1838 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1839 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1840 if (rc
->rc_need_orig_restore
) {
1841 abd_copy(rc
->rc_abd
,
1842 rc
->rc_orig_data
, rc
->rc_size
);
1843 rc
->rc_need_orig_restore
= B_FALSE
;
1850 * returns EINVAL if reconstruction of the block will not be possible
1851 * returns ECKSUM if this specific reconstruction failed
1852 * returns 0 on successful reconstruction
1855 raidz_reconstruct(zio_t
*zio
, int *ltgts
, int ntgts
, int nparity
)
1857 raidz_map_t
*rm
= zio
->io_vsd
;
1859 /* Reconstruct each row */
1860 for (int r
= 0; r
< rm
->rm_nrows
; r
++) {
1861 raidz_row_t
*rr
= rm
->rm_row
[r
];
1862 int my_tgts
[VDEV_RAIDZ_MAXPARITY
]; /* value is child id */
1867 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1868 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1869 ASSERT0(rc
->rc_need_orig_restore
);
1870 if (rc
->rc_error
!= 0) {
1876 if (rc
->rc_size
== 0)
1878 for (int lt
= 0; lt
< ntgts
; lt
++) {
1879 if (rc
->rc_devidx
== ltgts
[lt
]) {
1880 if (rc
->rc_orig_data
== NULL
) {
1883 rc
->rc_size
, B_TRUE
);
1884 abd_copy(rc
->rc_orig_data
,
1885 rc
->rc_abd
, rc
->rc_size
);
1887 rc
->rc_need_orig_restore
= B_TRUE
;
1897 if (dead
> nparity
) {
1898 /* reconstruction not possible */
1899 raidz_restore_orig_data(rm
);
1903 vdev_raidz_reconstruct_row(rm
, rr
, my_tgts
, t
);
1906 /* Check for success */
1907 if (raidz_checksum_verify(zio
) == 0) {
1909 /* Reconstruction succeeded - report errors */
1910 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
1911 raidz_row_t
*rr
= rm
->rm_row
[i
];
1913 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
1914 raidz_col_t
*rc
= &rr
->rr_col
[c
];
1915 if (rc
->rc_need_orig_restore
) {
1917 * Note: if this is a parity column,
1918 * we don't really know if it's wrong.
1920 * vdev_raidz_io_done_verified() check
1921 * it, and if we set rc_error, it will
1922 * think that it is a "known" error
1923 * that doesn't need to be checked
1926 if (rc
->rc_error
== 0 &&
1927 c
>= rr
->rr_firstdatacol
) {
1928 raidz_checksum_error(zio
,
1929 rc
, rc
->rc_orig_data
);
1933 rc
->rc_need_orig_restore
= B_FALSE
;
1937 vdev_raidz_io_done_verified(zio
, rr
);
1940 zio_checksum_verified(zio
);
1945 /* Reconstruction failed - restore original data */
1946 raidz_restore_orig_data(rm
);
1951 * Iterate over all combinations of N bad vdevs and attempt a reconstruction.
1952 * Note that the algorithm below is non-optimal because it doesn't take into
1953 * account how reconstruction is actually performed. For example, with
1954 * triple-parity RAID-Z the reconstruction procedure is the same if column 4
1955 * is targeted as invalid as if columns 1 and 4 are targeted since in both
1956 * cases we'd only use parity information in column 0.
1958 * The order that we find the various possible combinations of failed
1959 * disks is dictated by these rules:
1960 * - Examine each "slot" (the "i" in tgts[i])
1961 * - Try to increment this slot (tgts[i] = tgts[i] + 1)
1962 * - if we can't increment because it runs into the next slot,
1963 * reset our slot to the minimum, and examine the next slot
1965 * For example, with a 6-wide RAIDZ3, and no known errors (so we have to choose
1966 * 3 columns to reconstruct), we will generate the following sequence:
1969 * 0 1 2 special case: skip since these are all parity
1970 * 0 1 3 first slot: reset to 0; middle slot: increment to 2
1971 * 0 2 3 first slot: increment to 1
1972 * 1 2 3 first: reset to 0; middle: reset to 1; last: increment to 4
1973 * 0 1 4 first: reset to 0; middle: increment to 2
1974 * 0 2 4 first: increment to 1
1975 * 1 2 4 first: reset to 0; middle: increment to 3
1976 * 0 3 4 first: increment to 1
1977 * 1 3 4 first: increment to 2
1978 * 2 3 4 first: reset to 0; middle: reset to 1; last: increment to 5
1979 * 0 1 5 first: reset to 0; middle: increment to 2
1980 * 0 2 5 first: increment to 1
1981 * 1 2 5 first: reset to 0; middle: increment to 3
1982 * 0 3 5 first: increment to 1
1983 * 1 3 5 first: increment to 2
1984 * 2 3 5 first: reset to 0; middle: increment to 4
1985 * 0 4 5 first: increment to 1
1986 * 1 4 5 first: increment to 2
1987 * 2 4 5 first: increment to 3
1990 * This strategy works for dRAID but is less efficient when there are a large
1991 * number of child vdevs and therefore permutations to check. Furthermore,
1992 * since the raidz_map_t rows likely do not overlap reconstruction would be
1993 * possible as long as there are no more than nparity data errors per row.
1994 * These additional permutations are not currently checked but could be as
1995 * a future improvement.
1998 vdev_raidz_combrec(zio_t
*zio
)
2000 int nparity
= vdev_get_nparity(zio
->io_vd
);
2001 raidz_map_t
*rm
= zio
->io_vsd
;
2003 /* Check if there's enough data to attempt reconstrution. */
2004 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2005 raidz_row_t
*rr
= rm
->rm_row
[i
];
2006 int total_errors
= 0;
2008 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
2009 if (rr
->rr_col
[c
].rc_error
)
2013 if (total_errors
> nparity
)
2014 return (vdev_raidz_worst_error(rr
));
2017 for (int num_failures
= 1; num_failures
<= nparity
; num_failures
++) {
2018 int tstore
[VDEV_RAIDZ_MAXPARITY
+ 2];
2019 int *ltgts
= &tstore
[1]; /* value is logical child ID */
2021 /* Determine number of logical children, n */
2022 int n
= zio
->io_vd
->vdev_children
;
2024 ASSERT3U(num_failures
, <=, nparity
);
2025 ASSERT3U(num_failures
, <=, VDEV_RAIDZ_MAXPARITY
);
2027 /* Handle corner cases in combrec logic */
2029 for (int i
= 0; i
< num_failures
; i
++) {
2032 ltgts
[num_failures
] = n
;
2035 int err
= raidz_reconstruct(zio
, ltgts
, num_failures
,
2037 if (err
== EINVAL
) {
2039 * Reconstruction not possible with this #
2040 * failures; try more failures.
2043 } else if (err
== 0)
2046 /* Compute next targets to try */
2047 for (int t
= 0; ; t
++) {
2048 ASSERT3U(t
, <, num_failures
);
2050 if (ltgts
[t
] == n
) {
2051 /* try more failures */
2052 ASSERT3U(t
, ==, num_failures
- 1);
2056 ASSERT3U(ltgts
[t
], <, n
);
2057 ASSERT3U(ltgts
[t
], <=, ltgts
[t
+ 1]);
2060 * If that spot is available, we're done here.
2061 * Try the next combination.
2063 if (ltgts
[t
] != ltgts
[t
+ 1])
2067 * Otherwise, reset this tgt to the minimum,
2068 * and move on to the next tgt.
2070 ltgts
[t
] = ltgts
[t
- 1] + 1;
2071 ASSERT3U(ltgts
[t
], ==, t
);
2074 /* Increase the number of failures and keep trying. */
2075 if (ltgts
[num_failures
- 1] == n
)
2084 vdev_raidz_reconstruct(raidz_map_t
*rm
, const int *t
, int nt
)
2086 for (uint64_t row
= 0; row
< rm
->rm_nrows
; row
++) {
2087 raidz_row_t
*rr
= rm
->rm_row
[row
];
2088 vdev_raidz_reconstruct_row(rm
, rr
, t
, nt
);
2093 * Complete a write IO operation on a RAIDZ VDev
2096 * 1. Check for errors on the child IOs.
2097 * 2. Return, setting an error code if too few child VDevs were written
2098 * to reconstruct the data later. Note that partial writes are
2099 * considered successful if they can be reconstructed at all.
2102 vdev_raidz_io_done_write_impl(zio_t
*zio
, raidz_row_t
*rr
)
2104 int total_errors
= 0;
2106 ASSERT3U(rr
->rr_missingparity
, <=, rr
->rr_firstdatacol
);
2107 ASSERT3U(rr
->rr_missingdata
, <=, rr
->rr_cols
- rr
->rr_firstdatacol
);
2108 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_WRITE
);
2110 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
2111 raidz_col_t
*rc
= &rr
->rr_col
[c
];
2114 ASSERT(rc
->rc_error
!= ECKSUM
); /* child has no bp */
2121 * Treat partial writes as a success. If we couldn't write enough
2122 * columns to reconstruct the data, the I/O failed. Otherwise,
2125 * Now that we support write reallocation, it would be better
2126 * to treat partial failure as real failure unless there are
2127 * no non-degraded top-level vdevs left, and not update DTLs
2128 * if we intend to reallocate.
2130 if (total_errors
> rr
->rr_firstdatacol
) {
2131 zio
->io_error
= zio_worst_error(zio
->io_error
,
2132 vdev_raidz_worst_error(rr
));
2137 vdev_raidz_io_done_reconstruct_known_missing(zio_t
*zio
, raidz_map_t
*rm
,
2140 int parity_errors
= 0;
2141 int parity_untried
= 0;
2142 int data_errors
= 0;
2143 int total_errors
= 0;
2145 ASSERT3U(rr
->rr_missingparity
, <=, rr
->rr_firstdatacol
);
2146 ASSERT3U(rr
->rr_missingdata
, <=, rr
->rr_cols
- rr
->rr_firstdatacol
);
2147 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_READ
);
2149 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
2150 raidz_col_t
*rc
= &rr
->rr_col
[c
];
2153 ASSERT(rc
->rc_error
!= ECKSUM
); /* child has no bp */
2155 if (c
< rr
->rr_firstdatacol
)
2161 } else if (c
< rr
->rr_firstdatacol
&& !rc
->rc_tried
) {
2167 * If there were data errors and the number of errors we saw was
2168 * correctable -- less than or equal to the number of parity disks read
2169 * -- reconstruct based on the missing data.
2171 if (data_errors
!= 0 &&
2172 total_errors
<= rr
->rr_firstdatacol
- parity_untried
) {
2174 * We either attempt to read all the parity columns or
2175 * none of them. If we didn't try to read parity, we
2176 * wouldn't be here in the correctable case. There must
2177 * also have been fewer parity errors than parity
2178 * columns or, again, we wouldn't be in this code path.
2180 ASSERT(parity_untried
== 0);
2181 ASSERT(parity_errors
< rr
->rr_firstdatacol
);
2184 * Identify the data columns that reported an error.
2187 int tgts
[VDEV_RAIDZ_MAXPARITY
];
2188 for (int c
= rr
->rr_firstdatacol
; c
< rr
->rr_cols
; c
++) {
2189 raidz_col_t
*rc
= &rr
->rr_col
[c
];
2190 if (rc
->rc_error
!= 0) {
2191 ASSERT(n
< VDEV_RAIDZ_MAXPARITY
);
2196 ASSERT(rr
->rr_firstdatacol
>= n
);
2198 vdev_raidz_reconstruct_row(rm
, rr
, tgts
, n
);
2203 * Return the number of reads issued.
2206 vdev_raidz_read_all(zio_t
*zio
, raidz_row_t
*rr
)
2208 vdev_t
*vd
= zio
->io_vd
;
2211 rr
->rr_missingdata
= 0;
2212 rr
->rr_missingparity
= 0;
2215 * If this rows contains empty sectors which are not required
2216 * for a normal read then allocate an ABD for them now so they
2217 * may be read, verified, and any needed repairs performed.
2219 if (rr
->rr_nempty
&& rr
->rr_abd_empty
== NULL
)
2220 vdev_draid_map_alloc_empty(zio
, rr
);
2222 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
2223 raidz_col_t
*rc
= &rr
->rr_col
[c
];
2224 if (rc
->rc_tried
|| rc
->rc_size
== 0)
2227 zio_nowait(zio_vdev_child_io(zio
, NULL
,
2228 vd
->vdev_child
[rc
->rc_devidx
],
2229 rc
->rc_offset
, rc
->rc_abd
, rc
->rc_size
,
2230 zio
->io_type
, zio
->io_priority
, 0,
2231 vdev_raidz_child_done
, rc
));
2238 * We're here because either there were too many errors to even attempt
2239 * reconstruction (total_errors == rm_first_datacol), or vdev_*_combrec()
2240 * failed. In either case, there is enough bad data to prevent reconstruction.
2241 * Start checksum ereports for all children which haven't failed.
2244 vdev_raidz_io_done_unrecoverable(zio_t
*zio
)
2246 raidz_map_t
*rm
= zio
->io_vsd
;
2248 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2249 raidz_row_t
*rr
= rm
->rm_row
[i
];
2251 for (int c
= 0; c
< rr
->rr_cols
; c
++) {
2252 raidz_col_t
*rc
= &rr
->rr_col
[c
];
2253 vdev_t
*cvd
= zio
->io_vd
->vdev_child
[rc
->rc_devidx
];
2255 if (rc
->rc_error
!= 0)
2258 zio_bad_cksum_t zbc
;
2259 zbc
.zbc_has_cksum
= 0;
2260 zbc
.zbc_injected
= rm
->rm_ecksuminjected
;
2262 (void) zfs_ereport_start_checksum(zio
->io_spa
,
2263 cvd
, &zio
->io_bookmark
, zio
, rc
->rc_offset
,
2265 mutex_enter(&cvd
->vdev_stat_lock
);
2266 cvd
->vdev_stat
.vs_checksum_errors
++;
2267 mutex_exit(&cvd
->vdev_stat_lock
);
2273 vdev_raidz_io_done(zio_t
*zio
)
2275 raidz_map_t
*rm
= zio
->io_vsd
;
2277 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
2278 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2279 vdev_raidz_io_done_write_impl(zio
, rm
->rm_row
[i
]);
2282 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2283 raidz_row_t
*rr
= rm
->rm_row
[i
];
2284 vdev_raidz_io_done_reconstruct_known_missing(zio
,
2288 if (raidz_checksum_verify(zio
) == 0) {
2289 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2290 raidz_row_t
*rr
= rm
->rm_row
[i
];
2291 vdev_raidz_io_done_verified(zio
, rr
);
2293 zio_checksum_verified(zio
);
2296 * A sequential resilver has no checksum which makes
2297 * combinatoral reconstruction impossible. This code
2298 * path is unreachable since raidz_checksum_verify()
2299 * has no checksum to verify and must succeed.
2301 ASSERT3U(zio
->io_priority
, !=, ZIO_PRIORITY_REBUILD
);
2304 * This isn't a typical situation -- either we got a
2305 * read error or a child silently returned bad data.
2306 * Read every block so we can try again with as much
2307 * data and parity as we can track down. If we've
2308 * already been through once before, all children will
2309 * be marked as tried so we'll proceed to combinatorial
2313 for (int i
= 0; i
< rm
->rm_nrows
; i
++) {
2314 nread
+= vdev_raidz_read_all(zio
,
2319 * Normally our stage is VDEV_IO_DONE, but if
2320 * we've already called redone(), it will have
2321 * changed to VDEV_IO_START, in which case we
2322 * don't want to call redone() again.
2324 if (zio
->io_stage
!= ZIO_STAGE_VDEV_IO_START
)
2325 zio_vdev_io_redone(zio
);
2329 zio
->io_error
= vdev_raidz_combrec(zio
);
2330 if (zio
->io_error
== ECKSUM
&&
2331 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)) {
2332 vdev_raidz_io_done_unrecoverable(zio
);
2339 vdev_raidz_state_change(vdev_t
*vd
, int faulted
, int degraded
)
2341 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
2342 if (faulted
> vdrz
->vd_nparity
)
2343 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_CANT_OPEN
,
2344 VDEV_AUX_NO_REPLICAS
);
2345 else if (degraded
+ faulted
!= 0)
2346 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_DEGRADED
, VDEV_AUX_NONE
);
2348 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_HEALTHY
, VDEV_AUX_NONE
);
2352 * Determine if any portion of the provided block resides on a child vdev
2353 * with a dirty DTL and therefore needs to be resilvered. The function
2354 * assumes that at least one DTL is dirty which implies that full stripe
2355 * width blocks must be resilvered.
2358 vdev_raidz_need_resilver(vdev_t
*vd
, const dva_t
*dva
, size_t psize
,
2359 uint64_t phys_birth
)
2361 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
2362 uint64_t dcols
= vd
->vdev_children
;
2363 uint64_t nparity
= vdrz
->vd_nparity
;
2364 uint64_t ashift
= vd
->vdev_top
->vdev_ashift
;
2365 /* The starting RAIDZ (parent) vdev sector of the block. */
2366 uint64_t b
= DVA_GET_OFFSET(dva
) >> ashift
;
2367 /* The zio's size in units of the vdev's minimum sector size. */
2368 uint64_t s
= ((psize
- 1) >> ashift
) + 1;
2369 /* The first column for this stripe. */
2370 uint64_t f
= b
% dcols
;
2372 /* Unreachable by sequential resilver. */
2373 ASSERT3U(phys_birth
, !=, TXG_UNKNOWN
);
2375 if (!vdev_dtl_contains(vd
, DTL_PARTIAL
, phys_birth
, 1))
2378 if (s
+ nparity
>= dcols
)
2381 for (uint64_t c
= 0; c
< s
+ nparity
; c
++) {
2382 uint64_t devidx
= (f
+ c
) % dcols
;
2383 vdev_t
*cvd
= vd
->vdev_child
[devidx
];
2386 * dsl_scan_need_resilver() already checked vd with
2387 * vdev_dtl_contains(). So here just check cvd with
2388 * vdev_dtl_empty(), cheaper and a good approximation.
2390 if (!vdev_dtl_empty(cvd
, DTL_PARTIAL
))
2398 vdev_raidz_xlate(vdev_t
*cvd
, const range_seg64_t
*logical_rs
,
2399 range_seg64_t
*physical_rs
, range_seg64_t
*remain_rs
)
2401 vdev_t
*raidvd
= cvd
->vdev_parent
;
2402 ASSERT(raidvd
->vdev_ops
== &vdev_raidz_ops
);
2404 uint64_t width
= raidvd
->vdev_children
;
2405 uint64_t tgt_col
= cvd
->vdev_id
;
2406 uint64_t ashift
= raidvd
->vdev_top
->vdev_ashift
;
2408 /* make sure the offsets are block-aligned */
2409 ASSERT0(logical_rs
->rs_start
% (1 << ashift
));
2410 ASSERT0(logical_rs
->rs_end
% (1 << ashift
));
2411 uint64_t b_start
= logical_rs
->rs_start
>> ashift
;
2412 uint64_t b_end
= logical_rs
->rs_end
>> ashift
;
2414 uint64_t start_row
= 0;
2415 if (b_start
> tgt_col
) /* avoid underflow */
2416 start_row
= ((b_start
- tgt_col
- 1) / width
) + 1;
2418 uint64_t end_row
= 0;
2419 if (b_end
> tgt_col
)
2420 end_row
= ((b_end
- tgt_col
- 1) / width
) + 1;
2422 physical_rs
->rs_start
= start_row
<< ashift
;
2423 physical_rs
->rs_end
= end_row
<< ashift
;
2425 ASSERT3U(physical_rs
->rs_start
, <=, logical_rs
->rs_start
);
2426 ASSERT3U(physical_rs
->rs_end
- physical_rs
->rs_start
, <=,
2427 logical_rs
->rs_end
- logical_rs
->rs_start
);
2431 * Initialize private RAIDZ specific fields from the nvlist.
2434 vdev_raidz_init(spa_t
*spa
, nvlist_t
*nv
, void **tsd
)
2441 int error
= nvlist_lookup_nvlist_array(nv
,
2442 ZPOOL_CONFIG_CHILDREN
, &child
, &children
);
2444 return (SET_ERROR(EINVAL
));
2446 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
, &nparity
) == 0) {
2447 if (nparity
== 0 || nparity
> VDEV_RAIDZ_MAXPARITY
)
2448 return (SET_ERROR(EINVAL
));
2451 * Previous versions could only support 1 or 2 parity
2454 if (nparity
> 1 && spa_version(spa
) < SPA_VERSION_RAIDZ2
)
2455 return (SET_ERROR(EINVAL
));
2456 else if (nparity
> 2 && spa_version(spa
) < SPA_VERSION_RAIDZ3
)
2457 return (SET_ERROR(EINVAL
));
2460 * We require the parity to be specified for SPAs that
2461 * support multiple parity levels.
2463 if (spa_version(spa
) >= SPA_VERSION_RAIDZ2
)
2464 return (SET_ERROR(EINVAL
));
2467 * Otherwise, we default to 1 parity device for RAID-Z.
2472 vdrz
= kmem_zalloc(sizeof (*vdrz
), KM_SLEEP
);
2473 vdrz
->vd_logical_width
= children
;
2474 vdrz
->vd_nparity
= nparity
;
2482 vdev_raidz_fini(vdev_t
*vd
)
2484 kmem_free(vd
->vdev_tsd
, sizeof (vdev_raidz_t
));
2488 * Add RAIDZ specific fields to the config nvlist.
2491 vdev_raidz_config_generate(vdev_t
*vd
, nvlist_t
*nv
)
2493 ASSERT3P(vd
->vdev_ops
, ==, &vdev_raidz_ops
);
2494 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
2497 * Make sure someone hasn't managed to sneak a fancy new vdev
2498 * into a crufty old storage pool.
2500 ASSERT(vdrz
->vd_nparity
== 1 ||
2501 (vdrz
->vd_nparity
<= 2 &&
2502 spa_version(vd
->vdev_spa
) >= SPA_VERSION_RAIDZ2
) ||
2503 (vdrz
->vd_nparity
<= 3 &&
2504 spa_version(vd
->vdev_spa
) >= SPA_VERSION_RAIDZ3
));
2507 * Note that we'll add these even on storage pools where they
2508 * aren't strictly required -- older software will just ignore
2511 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_NPARITY
, vdrz
->vd_nparity
);
2515 vdev_raidz_nparity(vdev_t
*vd
)
2517 vdev_raidz_t
*vdrz
= vd
->vdev_tsd
;
2518 return (vdrz
->vd_nparity
);
2522 vdev_raidz_ndisks(vdev_t
*vd
)
2524 return (vd
->vdev_children
);
2527 vdev_ops_t vdev_raidz_ops
= {
2528 .vdev_op_init
= vdev_raidz_init
,
2529 .vdev_op_fini
= vdev_raidz_fini
,
2530 .vdev_op_open
= vdev_raidz_open
,
2531 .vdev_op_close
= vdev_raidz_close
,
2532 .vdev_op_asize
= vdev_raidz_asize
,
2533 .vdev_op_min_asize
= vdev_raidz_min_asize
,
2534 .vdev_op_min_alloc
= NULL
,
2535 .vdev_op_io_start
= vdev_raidz_io_start
,
2536 .vdev_op_io_done
= vdev_raidz_io_done
,
2537 .vdev_op_state_change
= vdev_raidz_state_change
,
2538 .vdev_op_need_resilver
= vdev_raidz_need_resilver
,
2539 .vdev_op_hold
= NULL
,
2540 .vdev_op_rele
= NULL
,
2541 .vdev_op_remap
= NULL
,
2542 .vdev_op_xlate
= vdev_raidz_xlate
,
2543 .vdev_op_rebuild_asize
= NULL
,
2544 .vdev_op_metaslab_init
= NULL
,
2545 .vdev_op_config_generate
= vdev_raidz_config_generate
,
2546 .vdev_op_nparity
= vdev_raidz_nparity
,
2547 .vdev_op_ndisks
= vdev_raidz_ndisks
,
2548 .vdev_op_type
= VDEV_TYPE_RAIDZ
, /* name of this vdev type */
2549 .vdev_op_leaf
= B_FALSE
/* not a leaf vdev */