4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/vdev_impl.h>
35 #include <sys/fs/zfs.h>
38 * Virtual device vector for mirroring.
41 typedef struct mirror_child
{
47 uint8_t mc_speculative
;
50 typedef struct mirror_map
{
55 mirror_child_t mm_child
[1];
58 int vdev_mirror_shift
= 21;
61 vdev_mirror_map_free(zio_t
*zio
)
63 mirror_map_t
*mm
= zio
->io_vsd
;
65 kmem_free(mm
, offsetof(mirror_map_t
, mm_child
[mm
->mm_children
]));
68 static const zio_vsd_ops_t vdev_mirror_vsd_ops
= {
70 zio_vsd_default_cksum_report
74 vdev_mirror_map_alloc(zio_t
*zio
)
76 mirror_map_t
*mm
= NULL
;
78 vdev_t
*vd
= zio
->io_vd
;
82 dva_t
*dva
= zio
->io_bp
->blk_dva
;
83 spa_t
*spa
= zio
->io_spa
;
85 c
= BP_GET_NDVAS(zio
->io_bp
);
87 mm
= kmem_zalloc(offsetof(mirror_map_t
, mm_child
[c
]), KM_SLEEP
);
89 mm
->mm_replacing
= B_FALSE
;
90 mm
->mm_preferred
= spa_get_random(c
);
94 * Check the other, lower-index DVAs to see if they're on
95 * the same vdev as the child we picked. If they are, use
96 * them since they are likely to have been allocated from
97 * the primary metaslab in use at the time, and hence are
98 * more likely to have locality with single-copy data.
100 for (c
= mm
->mm_preferred
, d
= c
- 1; d
>= 0; d
--) {
101 if (DVA_GET_VDEV(&dva
[d
]) == DVA_GET_VDEV(&dva
[c
]))
102 mm
->mm_preferred
= d
;
105 for (c
= 0; c
< mm
->mm_children
; c
++) {
106 mc
= &mm
->mm_child
[c
];
108 mc
->mc_vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[c
]));
109 mc
->mc_offset
= DVA_GET_OFFSET(&dva
[c
]);
112 c
= vd
->vdev_children
;
114 mm
= kmem_zalloc(offsetof(mirror_map_t
, mm_child
[c
]), KM_SLEEP
);
116 mm
->mm_replacing
= (vd
->vdev_ops
== &vdev_replacing_ops
||
117 vd
->vdev_ops
== &vdev_spare_ops
);
118 mm
->mm_preferred
= mm
->mm_replacing
? 0 :
119 (zio
->io_offset
>> vdev_mirror_shift
) % c
;
120 mm
->mm_root
= B_FALSE
;
122 for (c
= 0; c
< mm
->mm_children
; c
++) {
123 mc
= &mm
->mm_child
[c
];
124 mc
->mc_vd
= vd
->vdev_child
[c
];
125 mc
->mc_offset
= zio
->io_offset
;
130 zio
->io_vsd_ops
= &vdev_mirror_vsd_ops
;
135 vdev_mirror_open(vdev_t
*vd
, uint64_t *asize
, uint64_t *max_asize
,
141 if (vd
->vdev_children
== 0) {
142 vd
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
143 return (SET_ERROR(EINVAL
));
146 vdev_open_children(vd
);
148 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
149 vdev_t
*cvd
= vd
->vdev_child
[c
];
151 if (cvd
->vdev_open_error
) {
152 lasterror
= cvd
->vdev_open_error
;
157 *asize
= MIN(*asize
- 1, cvd
->vdev_asize
- 1) + 1;
158 *max_asize
= MIN(*max_asize
- 1, cvd
->vdev_max_asize
- 1) + 1;
159 *ashift
= MAX(*ashift
, cvd
->vdev_ashift
);
162 if (numerrors
== vd
->vdev_children
) {
163 vd
->vdev_stat
.vs_aux
= VDEV_AUX_NO_REPLICAS
;
171 vdev_mirror_close(vdev_t
*vd
)
173 for (int c
= 0; c
< vd
->vdev_children
; c
++)
174 vdev_close(vd
->vdev_child
[c
]);
178 vdev_mirror_child_done(zio_t
*zio
)
180 mirror_child_t
*mc
= zio
->io_private
;
182 mc
->mc_error
= zio
->io_error
;
188 vdev_mirror_scrub_done(zio_t
*zio
)
190 mirror_child_t
*mc
= zio
->io_private
;
192 if (zio
->io_error
== 0) {
194 zio_link_t
*zl
= NULL
;
196 mutex_enter(&zio
->io_lock
);
197 while ((pio
= zio_walk_parents(zio
, &zl
)) != NULL
) {
198 mutex_enter(&pio
->io_lock
);
199 ASSERT3U(zio
->io_size
, >=, pio
->io_size
);
200 abd_copy(pio
->io_abd
, zio
->io_abd
, pio
->io_size
);
201 mutex_exit(&pio
->io_lock
);
203 mutex_exit(&zio
->io_lock
);
205 abd_free(zio
->io_abd
);
207 mc
->mc_error
= zio
->io_error
;
213 * Try to find a child whose DTL doesn't contain the block we want to read.
214 * If we can't, try the read on any vdev we haven't already tried.
217 vdev_mirror_child_select(zio_t
*zio
)
219 mirror_map_t
*mm
= zio
->io_vsd
;
221 uint64_t txg
= zio
->io_txg
;
224 ASSERT(zio
->io_bp
== NULL
|| BP_PHYSICAL_BIRTH(zio
->io_bp
) == txg
);
227 * Try to find a child whose DTL doesn't contain the block to read.
228 * If a child is known to be completely inaccessible (indicated by
229 * vdev_readable() returning B_FALSE), don't even try.
231 for (i
= 0, c
= mm
->mm_preferred
; i
< mm
->mm_children
; i
++, c
++) {
232 if (c
>= mm
->mm_children
)
234 mc
= &mm
->mm_child
[c
];
235 if (mc
->mc_tried
|| mc
->mc_skipped
)
237 if (!vdev_readable(mc
->mc_vd
)) {
238 mc
->mc_error
= SET_ERROR(ENXIO
);
239 mc
->mc_tried
= 1; /* don't even try */
243 if (!vdev_dtl_contains(mc
->mc_vd
, DTL_MISSING
, txg
, 1))
245 mc
->mc_error
= SET_ERROR(ESTALE
);
247 mc
->mc_speculative
= 1;
251 * Every device is either missing or has this txg in its DTL.
252 * Look for any child we haven't already tried before giving up.
254 for (c
= 0; c
< mm
->mm_children
; c
++)
255 if (!mm
->mm_child
[c
].mc_tried
)
259 * Every child failed. There's no place left to look.
265 vdev_mirror_io_start(zio_t
*zio
)
271 mm
= vdev_mirror_map_alloc(zio
);
273 if (zio
->io_type
== ZIO_TYPE_READ
) {
274 if ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && !mm
->mm_replacing
) {
276 * For scrubbing reads we need to allocate a read
277 * buffer for each child and issue reads to all
278 * children. If any child succeeds, it will copy its
279 * data into zio->io_data in vdev_mirror_scrub_done.
281 for (c
= 0; c
< mm
->mm_children
; c
++) {
282 mc
= &mm
->mm_child
[c
];
283 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
284 mc
->mc_vd
, mc
->mc_offset
,
285 abd_alloc_sametype(zio
->io_abd
,
286 zio
->io_size
), zio
->io_size
,
287 zio
->io_type
, zio
->io_priority
, 0,
288 vdev_mirror_scrub_done
, mc
));
294 * For normal reads just pick one child.
296 c
= vdev_mirror_child_select(zio
);
299 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
302 * Writes go to all children.
305 children
= mm
->mm_children
;
309 mc
= &mm
->mm_child
[c
];
310 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
311 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
312 zio
->io_type
, zio
->io_priority
, 0,
313 vdev_mirror_child_done
, mc
));
321 vdev_mirror_worst_error(mirror_map_t
*mm
)
323 int error
[2] = { 0, 0 };
325 for (int c
= 0; c
< mm
->mm_children
; c
++) {
326 mirror_child_t
*mc
= &mm
->mm_child
[c
];
327 int s
= mc
->mc_speculative
;
328 error
[s
] = zio_worst_error(error
[s
], mc
->mc_error
);
331 return (error
[0] ? error
[0] : error
[1]);
335 vdev_mirror_io_done(zio_t
*zio
)
337 mirror_map_t
*mm
= zio
->io_vsd
;
341 int unexpected_errors
= 0;
343 for (c
= 0; c
< mm
->mm_children
; c
++) {
344 mc
= &mm
->mm_child
[c
];
349 } else if (mc
->mc_tried
) {
354 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
356 * XXX -- for now, treat partial writes as success.
358 * Now that we support write reallocation, it would be better
359 * to treat partial failure as real failure unless there are
360 * no non-degraded top-level vdevs left, and not update DTLs
361 * if we intend to reallocate.
364 if (good_copies
!= mm
->mm_children
) {
366 * Always require at least one good copy.
368 * For ditto blocks (io_vd == NULL), require
369 * all copies to be good.
371 * XXX -- for replacing vdevs, there's no great answer.
372 * If the old device is really dead, we may not even
373 * be able to access it -- so we only want to
374 * require good writes to the new device. But if
375 * the new device turns out to be flaky, we want
376 * to be able to detach it -- which requires all
377 * writes to the old device to have succeeded.
379 if (good_copies
== 0 || zio
->io_vd
== NULL
)
380 zio
->io_error
= vdev_mirror_worst_error(mm
);
385 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
388 * If we don't have a good copy yet, keep trying other children.
391 if (good_copies
== 0 && (c
= vdev_mirror_child_select(zio
)) != -1) {
392 ASSERT(c
>= 0 && c
< mm
->mm_children
);
393 mc
= &mm
->mm_child
[c
];
394 zio_vdev_io_redone(zio
);
395 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
396 mc
->mc_vd
, mc
->mc_offset
, zio
->io_abd
, zio
->io_size
,
397 ZIO_TYPE_READ
, zio
->io_priority
, 0,
398 vdev_mirror_child_done
, mc
));
403 if (good_copies
== 0) {
404 zio
->io_error
= vdev_mirror_worst_error(mm
);
405 ASSERT(zio
->io_error
!= 0);
408 if (good_copies
&& spa_writeable(zio
->io_spa
) &&
409 (unexpected_errors
||
410 (zio
->io_flags
& ZIO_FLAG_RESILVER
) ||
411 ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && mm
->mm_replacing
))) {
413 * Use the good data we have in hand to repair damaged children.
415 for (c
= 0; c
< mm
->mm_children
; c
++) {
417 * Don't rewrite known good children.
418 * Not only is it unnecessary, it could
419 * actually be harmful: if the system lost
420 * power while rewriting the only good copy,
421 * there would be no good copies left!
423 mc
= &mm
->mm_child
[c
];
425 if (mc
->mc_error
== 0) {
428 if (!(zio
->io_flags
& ZIO_FLAG_SCRUB
) &&
429 !vdev_dtl_contains(mc
->mc_vd
, DTL_PARTIAL
,
432 mc
->mc_error
= SET_ERROR(ESTALE
);
435 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
436 mc
->mc_vd
, mc
->mc_offset
,
437 zio
->io_abd
, zio
->io_size
,
438 ZIO_TYPE_WRITE
, ZIO_PRIORITY_ASYNC_WRITE
,
439 ZIO_FLAG_IO_REPAIR
| (unexpected_errors
?
440 ZIO_FLAG_SELF_HEAL
: 0), NULL
, NULL
));
446 vdev_mirror_state_change(vdev_t
*vd
, int faulted
, int degraded
)
448 if (faulted
== vd
->vdev_children
)
449 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_CANT_OPEN
,
450 VDEV_AUX_NO_REPLICAS
);
451 else if (degraded
+ faulted
!= 0)
452 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_DEGRADED
, VDEV_AUX_NONE
);
454 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_HEALTHY
, VDEV_AUX_NONE
);
457 vdev_ops_t vdev_mirror_ops
= {
461 vdev_mirror_io_start
,
463 vdev_mirror_state_change
,
466 VDEV_TYPE_MIRROR
, /* name of this vdev type */
467 B_FALSE
/* not a leaf vdev */
470 vdev_ops_t vdev_replacing_ops
= {
474 vdev_mirror_io_start
,
476 vdev_mirror_state_change
,
479 VDEV_TYPE_REPLACING
, /* name of this vdev type */
480 B_FALSE
/* not a leaf vdev */
483 vdev_ops_t vdev_spare_ops
= {
487 vdev_mirror_io_start
,
489 vdev_mirror_state_change
,
492 VDEV_TYPE_SPARE
, /* name of this vdev type */
493 B_FALSE
/* not a leaf vdev */