4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016, 2024 by Delphix. All rights reserved.
27 #include <sys/spa_impl.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/vdev_initialize.h>
37 * Value that is written to disk during initialization.
39 static uint64_t zfs_initialize_value
= 0xdeadbeefdeadbeeeULL
;
41 /* maximum number of I/Os outstanding per leaf vdev */
42 static const int zfs_initialize_limit
= 1;
44 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
45 static uint64_t zfs_initialize_chunk_size
= 1024 * 1024;
48 vdev_initialize_should_stop(vdev_t
*vd
)
50 return (vd
->vdev_initialize_exit_wanted
|| !vdev_writeable(vd
) ||
51 vd
->vdev_detached
|| vd
->vdev_top
->vdev_removing
||
52 vd
->vdev_top
->vdev_rz_expanding
);
56 vdev_initialize_zap_update_sync(void *arg
, dmu_tx_t
*tx
)
59 * We pass in the guid instead of the vdev_t since the vdev may
60 * have been freed prior to the sync task being processed. This
61 * happens when a vdev is detached as we call spa_config_vdev_exit(),
62 * stop the initializing thread, schedule the sync task, and free
63 * the vdev. Later when the scheduled sync task is invoked, it would
64 * find that the vdev has been freed.
66 uint64_t guid
= *(uint64_t *)arg
;
67 uint64_t txg
= dmu_tx_get_txg(tx
);
68 kmem_free(arg
, sizeof (uint64_t));
70 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
71 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
||
72 !vdev_is_concrete(vd
) || vd
->vdev_top
->vdev_rz_expanding
)
75 uint64_t last_offset
= vd
->vdev_initialize_offset
[txg
& TXG_MASK
];
76 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = 0;
78 VERIFY(vd
->vdev_leaf_zap
!= 0);
80 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
82 if (last_offset
> 0) {
83 vd
->vdev_initialize_last_offset
= last_offset
;
84 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
85 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
86 sizeof (last_offset
), 1, &last_offset
, tx
));
88 if (vd
->vdev_initialize_action_time
> 0) {
89 uint64_t val
= (uint64_t)vd
->vdev_initialize_action_time
;
90 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
91 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
, sizeof (val
),
95 uint64_t initialize_state
= vd
->vdev_initialize_state
;
96 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
97 VDEV_LEAF_ZAP_INITIALIZE_STATE
, sizeof (initialize_state
), 1,
98 &initialize_state
, tx
));
102 vdev_initialize_zap_remove_sync(void *arg
, dmu_tx_t
*tx
)
104 uint64_t guid
= *(uint64_t *)arg
;
106 kmem_free(arg
, sizeof (uint64_t));
108 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
109 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
|| !vdev_is_concrete(vd
))
112 ASSERT3S(vd
->vdev_initialize_state
, ==, VDEV_INITIALIZE_NONE
);
113 ASSERT3U(vd
->vdev_leaf_zap
, !=, 0);
115 vd
->vdev_initialize_last_offset
= 0;
116 vd
->vdev_initialize_action_time
= 0;
118 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
121 error
= zap_remove(mos
, vd
->vdev_leaf_zap
,
122 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
, tx
);
123 VERIFY(error
== 0 || error
== ENOENT
);
125 error
= zap_remove(mos
, vd
->vdev_leaf_zap
,
126 VDEV_LEAF_ZAP_INITIALIZE_STATE
, tx
);
127 VERIFY(error
== 0 || error
== ENOENT
);
129 error
= zap_remove(mos
, vd
->vdev_leaf_zap
,
130 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
, tx
);
131 VERIFY(error
== 0 || error
== ENOENT
);
135 vdev_initialize_change_state(vdev_t
*vd
, vdev_initializing_state_t new_state
)
137 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
138 spa_t
*spa
= vd
->vdev_spa
;
140 if (new_state
== vd
->vdev_initialize_state
)
144 * Copy the vd's guid, this will be freed by the sync task.
146 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
147 *guid
= vd
->vdev_guid
;
150 * If we're suspending, then preserving the original start time.
152 if (vd
->vdev_initialize_state
!= VDEV_INITIALIZE_SUSPENDED
) {
153 vd
->vdev_initialize_action_time
= gethrestime_sec();
156 vdev_initializing_state_t old_state
= vd
->vdev_initialize_state
;
157 vd
->vdev_initialize_state
= new_state
;
159 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
160 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
162 if (new_state
!= VDEV_INITIALIZE_NONE
) {
163 dsl_sync_task_nowait(spa_get_dsl(spa
),
164 vdev_initialize_zap_update_sync
, guid
, tx
);
166 dsl_sync_task_nowait(spa_get_dsl(spa
),
167 vdev_initialize_zap_remove_sync
, guid
, tx
);
171 case VDEV_INITIALIZE_ACTIVE
:
172 spa_history_log_internal(spa
, "initialize", tx
,
173 "vdev=%s activated", vd
->vdev_path
);
175 case VDEV_INITIALIZE_SUSPENDED
:
176 spa_history_log_internal(spa
, "initialize", tx
,
177 "vdev=%s suspended", vd
->vdev_path
);
179 case VDEV_INITIALIZE_CANCELED
:
180 if (old_state
== VDEV_INITIALIZE_ACTIVE
||
181 old_state
== VDEV_INITIALIZE_SUSPENDED
)
182 spa_history_log_internal(spa
, "initialize", tx
,
183 "vdev=%s canceled", vd
->vdev_path
);
185 case VDEV_INITIALIZE_COMPLETE
:
186 spa_history_log_internal(spa
, "initialize", tx
,
187 "vdev=%s complete", vd
->vdev_path
);
189 case VDEV_INITIALIZE_NONE
:
190 spa_history_log_internal(spa
, "uninitialize", tx
,
191 "vdev=%s", vd
->vdev_path
);
194 panic("invalid state %llu", (unsigned long long)new_state
);
199 if (new_state
!= VDEV_INITIALIZE_ACTIVE
)
200 spa_notify_waiters(spa
);
204 vdev_initialize_cb(zio_t
*zio
)
206 vdev_t
*vd
= zio
->io_vd
;
207 mutex_enter(&vd
->vdev_initialize_io_lock
);
208 if (zio
->io_error
== ENXIO
&& !vdev_writeable(vd
)) {
210 * The I/O failed because the vdev was unavailable; roll the
211 * last offset back. (This works because spa_sync waits on
212 * spa_txg_zio before it runs sync tasks.)
215 &vd
->vdev_initialize_offset
[zio
->io_txg
& TXG_MASK
];
216 *off
= MIN(*off
, zio
->io_offset
);
219 * Since initializing is best-effort, we ignore I/O errors and
220 * rely on vdev_probe to determine if the errors are more
223 if (zio
->io_error
!= 0)
224 vd
->vdev_stat
.vs_initialize_errors
++;
226 vd
->vdev_initialize_bytes_done
+= zio
->io_orig_size
;
228 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
229 vd
->vdev_initialize_inflight
--;
230 cv_broadcast(&vd
->vdev_initialize_io_cv
);
231 mutex_exit(&vd
->vdev_initialize_io_lock
);
233 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
236 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
238 vdev_initialize_write(vdev_t
*vd
, uint64_t start
, uint64_t size
, abd_t
*data
)
240 spa_t
*spa
= vd
->vdev_spa
;
242 /* Limit inflight initializing I/Os */
243 mutex_enter(&vd
->vdev_initialize_io_lock
);
244 while (vd
->vdev_initialize_inflight
>= zfs_initialize_limit
) {
245 cv_wait(&vd
->vdev_initialize_io_cv
,
246 &vd
->vdev_initialize_io_lock
);
248 vd
->vdev_initialize_inflight
++;
249 mutex_exit(&vd
->vdev_initialize_io_lock
);
251 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
252 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
253 uint64_t txg
= dmu_tx_get_txg(tx
);
255 spa_config_enter(spa
, SCL_STATE_ALL
, vd
, RW_READER
);
256 mutex_enter(&vd
->vdev_initialize_lock
);
258 if (vd
->vdev_initialize_offset
[txg
& TXG_MASK
] == 0) {
259 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
260 *guid
= vd
->vdev_guid
;
262 /* This is the first write of this txg. */
263 dsl_sync_task_nowait(spa_get_dsl(spa
),
264 vdev_initialize_zap_update_sync
, guid
, tx
);
268 * We know the vdev struct will still be around since all
269 * consumers of vdev_free must stop the initialization first.
271 if (vdev_initialize_should_stop(vd
)) {
272 mutex_enter(&vd
->vdev_initialize_io_lock
);
273 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
274 vd
->vdev_initialize_inflight
--;
275 mutex_exit(&vd
->vdev_initialize_io_lock
);
276 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
277 mutex_exit(&vd
->vdev_initialize_lock
);
279 return (SET_ERROR(EINTR
));
281 mutex_exit(&vd
->vdev_initialize_lock
);
283 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = start
+ size
;
284 zio_nowait(zio_write_phys(spa
->spa_txg_zio
[txg
& TXG_MASK
], vd
, start
,
285 size
, data
, ZIO_CHECKSUM_OFF
, vdev_initialize_cb
, NULL
,
286 ZIO_PRIORITY_INITIALIZING
, ZIO_FLAG_CANFAIL
, B_FALSE
));
287 /* vdev_initialize_cb releases SCL_STATE_ALL */
295 * Callback to fill each ABD chunk with zfs_initialize_value. len must be
296 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
297 * allocation will guarantee these for us.
300 vdev_initialize_block_fill(void *buf
, size_t len
, void *unused
)
304 ASSERT0(len
% sizeof (uint64_t));
305 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint64_t)) {
306 *(uint64_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
312 vdev_initialize_block_alloc(void)
314 /* Allocate ABD for filler data */
315 abd_t
*data
= abd_alloc_for_io(zfs_initialize_chunk_size
, B_FALSE
);
317 ASSERT0(zfs_initialize_chunk_size
% sizeof (uint64_t));
318 (void) abd_iterate_func(data
, 0, zfs_initialize_chunk_size
,
319 vdev_initialize_block_fill
, NULL
);
325 vdev_initialize_block_free(abd_t
*data
)
331 vdev_initialize_ranges(vdev_t
*vd
, abd_t
*data
)
333 range_tree_t
*rt
= vd
->vdev_initialize_tree
;
334 zfs_btree_t
*bt
= &rt
->rt_root
;
335 zfs_btree_index_t where
;
337 for (range_seg_t
*rs
= zfs_btree_first(bt
, &where
); rs
!= NULL
;
338 rs
= zfs_btree_next(bt
, &where
, &where
)) {
339 uint64_t size
= rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
);
341 /* Split range into legally-sized physical chunks */
342 uint64_t writes_required
=
343 ((size
- 1) / zfs_initialize_chunk_size
) + 1;
345 for (uint64_t w
= 0; w
< writes_required
; w
++) {
348 error
= vdev_initialize_write(vd
,
349 VDEV_LABEL_START_SIZE
+ rs_get_start(rs
, rt
) +
350 (w
* zfs_initialize_chunk_size
),
351 MIN(size
- (w
* zfs_initialize_chunk_size
),
352 zfs_initialize_chunk_size
), data
);
361 vdev_initialize_xlate_last_rs_end(void *arg
, range_seg64_t
*physical_rs
)
363 uint64_t *last_rs_end
= (uint64_t *)arg
;
365 if (physical_rs
->rs_end
> *last_rs_end
)
366 *last_rs_end
= physical_rs
->rs_end
;
370 vdev_initialize_xlate_progress(void *arg
, range_seg64_t
*physical_rs
)
372 vdev_t
*vd
= (vdev_t
*)arg
;
374 uint64_t size
= physical_rs
->rs_end
- physical_rs
->rs_start
;
375 vd
->vdev_initialize_bytes_est
+= size
;
377 if (vd
->vdev_initialize_last_offset
> physical_rs
->rs_end
) {
378 vd
->vdev_initialize_bytes_done
+= size
;
379 } else if (vd
->vdev_initialize_last_offset
> physical_rs
->rs_start
&&
380 vd
->vdev_initialize_last_offset
< physical_rs
->rs_end
) {
381 vd
->vdev_initialize_bytes_done
+=
382 vd
->vdev_initialize_last_offset
- physical_rs
->rs_start
;
387 vdev_initialize_calculate_progress(vdev_t
*vd
)
389 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
390 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
391 ASSERT(vd
->vdev_leaf_zap
!= 0);
393 vd
->vdev_initialize_bytes_est
= 0;
394 vd
->vdev_initialize_bytes_done
= 0;
396 for (uint64_t i
= 0; i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
397 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
398 mutex_enter(&msp
->ms_lock
);
400 uint64_t ms_free
= (msp
->ms_size
-
401 metaslab_allocated_space(msp
)) /
402 vdev_get_ndisks(vd
->vdev_top
);
405 * Convert the metaslab range to a physical range
406 * on our vdev. We use this to determine if we are
407 * in the middle of this metaslab range.
409 range_seg64_t logical_rs
, physical_rs
, remain_rs
;
410 logical_rs
.rs_start
= msp
->ms_start
;
411 logical_rs
.rs_end
= msp
->ms_start
+ msp
->ms_size
;
413 /* Metaslab space after this offset has not been initialized */
414 vdev_xlate(vd
, &logical_rs
, &physical_rs
, &remain_rs
);
415 if (vd
->vdev_initialize_last_offset
<= physical_rs
.rs_start
) {
416 vd
->vdev_initialize_bytes_est
+= ms_free
;
417 mutex_exit(&msp
->ms_lock
);
421 /* Metaslab space before this offset has been initialized */
422 uint64_t last_rs_end
= physical_rs
.rs_end
;
423 if (!vdev_xlate_is_empty(&remain_rs
)) {
424 vdev_xlate_walk(vd
, &remain_rs
,
425 vdev_initialize_xlate_last_rs_end
, &last_rs_end
);
428 if (vd
->vdev_initialize_last_offset
> last_rs_end
) {
429 vd
->vdev_initialize_bytes_done
+= ms_free
;
430 vd
->vdev_initialize_bytes_est
+= ms_free
;
431 mutex_exit(&msp
->ms_lock
);
436 * If we get here, we're in the middle of initializing this
437 * metaslab. Load it and walk the free tree for more accurate
438 * progress estimation.
440 VERIFY0(metaslab_load(msp
));
442 zfs_btree_index_t where
;
443 range_tree_t
*rt
= msp
->ms_allocatable
;
444 for (range_seg_t
*rs
=
445 zfs_btree_first(&rt
->rt_root
, &where
); rs
;
446 rs
= zfs_btree_next(&rt
->rt_root
, &where
,
448 logical_rs
.rs_start
= rs_get_start(rs
, rt
);
449 logical_rs
.rs_end
= rs_get_end(rs
, rt
);
451 vdev_xlate_walk(vd
, &logical_rs
,
452 vdev_initialize_xlate_progress
, vd
);
454 mutex_exit(&msp
->ms_lock
);
459 vdev_initialize_load(vdev_t
*vd
)
462 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
463 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
464 ASSERT(vd
->vdev_leaf_zap
!= 0);
466 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_ACTIVE
||
467 vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
) {
468 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
469 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
470 sizeof (vd
->vdev_initialize_last_offset
), 1,
471 &vd
->vdev_initialize_last_offset
);
473 vd
->vdev_initialize_last_offset
= 0;
478 vdev_initialize_calculate_progress(vd
);
483 vdev_initialize_xlate_range_add(void *arg
, range_seg64_t
*physical_rs
)
487 /* Only add segments that we have not visited yet */
488 if (physical_rs
->rs_end
<= vd
->vdev_initialize_last_offset
)
491 /* Pick up where we left off mid-range. */
492 if (vd
->vdev_initialize_last_offset
> physical_rs
->rs_start
) {
493 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
494 "(%llu, %llu)", vd
->vdev_path
,
495 (u_longlong_t
)physical_rs
->rs_start
,
496 (u_longlong_t
)physical_rs
->rs_end
,
497 (u_longlong_t
)vd
->vdev_initialize_last_offset
,
498 (u_longlong_t
)physical_rs
->rs_end
);
499 ASSERT3U(physical_rs
->rs_end
, >,
500 vd
->vdev_initialize_last_offset
);
501 physical_rs
->rs_start
= vd
->vdev_initialize_last_offset
;
504 ASSERT3U(physical_rs
->rs_end
, >, physical_rs
->rs_start
);
506 range_tree_add(vd
->vdev_initialize_tree
, physical_rs
->rs_start
,
507 physical_rs
->rs_end
- physical_rs
->rs_start
);
511 * Convert the logical range into a physical range and add it to our
515 vdev_initialize_range_add(void *arg
, uint64_t start
, uint64_t size
)
518 range_seg64_t logical_rs
;
519 logical_rs
.rs_start
= start
;
520 logical_rs
.rs_end
= start
+ size
;
522 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
523 vdev_xlate_walk(vd
, &logical_rs
, vdev_initialize_xlate_range_add
, arg
);
526 static __attribute__((noreturn
)) void
527 vdev_initialize_thread(void *arg
)
530 spa_t
*spa
= vd
->vdev_spa
;
532 uint64_t ms_count
= 0;
534 ASSERT(vdev_is_concrete(vd
));
535 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
537 vd
->vdev_initialize_last_offset
= 0;
538 VERIFY0(vdev_initialize_load(vd
));
540 abd_t
*deadbeef
= vdev_initialize_block_alloc();
542 vd
->vdev_initialize_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
,
545 for (uint64_t i
= 0; !vd
->vdev_detached
&&
546 i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
547 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
548 boolean_t unload_when_done
= B_FALSE
;
551 * If we've expanded the top-level vdev or it's our
552 * first pass, calculate our progress.
554 if (vd
->vdev_top
->vdev_ms_count
!= ms_count
) {
555 vdev_initialize_calculate_progress(vd
);
556 ms_count
= vd
->vdev_top
->vdev_ms_count
;
559 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
560 metaslab_disable(msp
);
561 mutex_enter(&msp
->ms_lock
);
562 if (!msp
->ms_loaded
&& !msp
->ms_loading
)
563 unload_when_done
= B_TRUE
;
564 VERIFY0(metaslab_load(msp
));
566 range_tree_walk(msp
->ms_allocatable
, vdev_initialize_range_add
,
568 mutex_exit(&msp
->ms_lock
);
570 error
= vdev_initialize_ranges(vd
, deadbeef
);
571 metaslab_enable(msp
, B_TRUE
, unload_when_done
);
572 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
574 range_tree_vacate(vd
->vdev_initialize_tree
, NULL
, NULL
);
579 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
580 mutex_enter(&vd
->vdev_initialize_io_lock
);
581 while (vd
->vdev_initialize_inflight
> 0) {
582 cv_wait(&vd
->vdev_initialize_io_cv
,
583 &vd
->vdev_initialize_io_lock
);
585 mutex_exit(&vd
->vdev_initialize_io_lock
);
587 range_tree_destroy(vd
->vdev_initialize_tree
);
588 vdev_initialize_block_free(deadbeef
);
589 vd
->vdev_initialize_tree
= NULL
;
591 mutex_enter(&vd
->vdev_initialize_lock
);
592 if (!vd
->vdev_initialize_exit_wanted
) {
593 if (vdev_writeable(vd
)) {
594 vdev_initialize_change_state(vd
,
595 VDEV_INITIALIZE_COMPLETE
);
596 } else if (vd
->vdev_faulted
) {
597 vdev_initialize_change_state(vd
,
598 VDEV_INITIALIZE_CANCELED
);
601 ASSERT(vd
->vdev_initialize_thread
!= NULL
||
602 vd
->vdev_initialize_inflight
== 0);
605 * Drop the vdev_initialize_lock while we sync out the
606 * txg since it's possible that a device might be trying to
607 * come online and must check to see if it needs to restart an
608 * initialization. That thread will be holding the spa_config_lock
609 * which would prevent the txg_wait_synced from completing.
611 mutex_exit(&vd
->vdev_initialize_lock
);
612 txg_wait_synced(spa_get_dsl(spa
), 0);
613 mutex_enter(&vd
->vdev_initialize_lock
);
615 vd
->vdev_initialize_thread
= NULL
;
616 cv_broadcast(&vd
->vdev_initialize_cv
);
617 mutex_exit(&vd
->vdev_initialize_lock
);
623 * Initiates a device. Caller must hold vdev_initialize_lock.
624 * Device must be a leaf and not already be initializing.
627 vdev_initialize(vdev_t
*vd
)
629 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
630 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
631 ASSERT(vdev_is_concrete(vd
));
632 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
633 ASSERT(!vd
->vdev_detached
);
634 ASSERT(!vd
->vdev_initialize_exit_wanted
);
635 ASSERT(!vd
->vdev_top
->vdev_removing
);
636 ASSERT(!vd
->vdev_top
->vdev_rz_expanding
);
638 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_ACTIVE
);
639 vd
->vdev_initialize_thread
= thread_create(NULL
, 0,
640 vdev_initialize_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
644 * Uninitializes a device. Caller must hold vdev_initialize_lock.
645 * Device must be a leaf and not already be initializing.
648 vdev_uninitialize(vdev_t
*vd
)
650 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
651 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
652 ASSERT(vdev_is_concrete(vd
));
653 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
654 ASSERT(!vd
->vdev_detached
);
655 ASSERT(!vd
->vdev_initialize_exit_wanted
);
656 ASSERT(!vd
->vdev_top
->vdev_removing
);
658 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_NONE
);
662 * Wait for the initialize thread to be terminated (cancelled or stopped).
665 vdev_initialize_stop_wait_impl(vdev_t
*vd
)
667 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
669 while (vd
->vdev_initialize_thread
!= NULL
)
670 cv_wait(&vd
->vdev_initialize_cv
, &vd
->vdev_initialize_lock
);
672 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
673 vd
->vdev_initialize_exit_wanted
= B_FALSE
;
677 * Wait for vdev initialize threads which were either to cleanly exit.
680 vdev_initialize_stop_wait(spa_t
*spa
, list_t
*vd_list
)
685 ASSERT(MUTEX_HELD(&spa_namespace_lock
) ||
686 spa
->spa_export_thread
== curthread
);
688 while ((vd
= list_remove_head(vd_list
)) != NULL
) {
689 mutex_enter(&vd
->vdev_initialize_lock
);
690 vdev_initialize_stop_wait_impl(vd
);
691 mutex_exit(&vd
->vdev_initialize_lock
);
696 * Stop initializing a device, with the resultant initializing state being
697 * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
698 * a list_t is provided the stopping vdev is inserted in to the list. Callers
699 * are then required to call vdev_initialize_stop_wait() to block for all the
700 * initialization threads to exit. The caller must hold vdev_initialize_lock
701 * and must not be writing to the spa config, as the initializing thread may
702 * try to enter the config as a reader before exiting.
705 vdev_initialize_stop(vdev_t
*vd
, vdev_initializing_state_t tgt_state
,
708 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_CONFIG
|SCL_STATE
, RW_WRITER
));
709 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
710 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
711 ASSERT(vdev_is_concrete(vd
));
714 * Allow cancel requests to proceed even if the initialize thread
717 if (vd
->vdev_initialize_thread
== NULL
&&
718 tgt_state
!= VDEV_INITIALIZE_CANCELED
) {
722 vdev_initialize_change_state(vd
, tgt_state
);
723 vd
->vdev_initialize_exit_wanted
= B_TRUE
;
725 if (vd_list
== NULL
) {
726 vdev_initialize_stop_wait_impl(vd
);
728 ASSERT(MUTEX_HELD(&spa_namespace_lock
) ||
729 vd
->vdev_spa
->spa_export_thread
== curthread
);
730 list_insert_tail(vd_list
, vd
);
735 vdev_initialize_stop_all_impl(vdev_t
*vd
, vdev_initializing_state_t tgt_state
,
738 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_is_concrete(vd
)) {
739 mutex_enter(&vd
->vdev_initialize_lock
);
740 vdev_initialize_stop(vd
, tgt_state
, vd_list
);
741 mutex_exit(&vd
->vdev_initialize_lock
);
745 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
746 vdev_initialize_stop_all_impl(vd
->vdev_child
[i
], tgt_state
,
752 * Convenience function to stop initializing of a vdev tree and set all
753 * initialize thread pointers to NULL.
756 vdev_initialize_stop_all(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
758 spa_t
*spa
= vd
->vdev_spa
;
761 ASSERT(MUTEX_HELD(&spa_namespace_lock
) ||
762 spa
->spa_export_thread
== curthread
);
764 list_create(&vd_list
, sizeof (vdev_t
),
765 offsetof(vdev_t
, vdev_initialize_node
));
767 vdev_initialize_stop_all_impl(vd
, tgt_state
, &vd_list
);
768 vdev_initialize_stop_wait(spa
, &vd_list
);
770 if (vd
->vdev_spa
->spa_sync_on
) {
771 /* Make sure that our state has been synced to disk */
772 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
775 list_destroy(&vd_list
);
779 vdev_initialize_restart(vdev_t
*vd
)
781 ASSERT(MUTEX_HELD(&spa_namespace_lock
) ||
782 vd
->vdev_spa
->spa_load_thread
== curthread
);
783 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
785 if (vd
->vdev_leaf_zap
!= 0) {
786 mutex_enter(&vd
->vdev_initialize_lock
);
787 uint64_t initialize_state
= VDEV_INITIALIZE_NONE
;
788 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
789 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_STATE
,
790 sizeof (initialize_state
), 1, &initialize_state
);
791 ASSERT(err
== 0 || err
== ENOENT
);
792 vd
->vdev_initialize_state
= initialize_state
;
794 uint64_t timestamp
= 0;
795 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
796 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
,
797 sizeof (timestamp
), 1, ×tamp
);
798 ASSERT(err
== 0 || err
== ENOENT
);
799 vd
->vdev_initialize_action_time
= timestamp
;
801 if ((vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
||
802 vd
->vdev_offline
) && !vd
->vdev_top
->vdev_rz_expanding
) {
803 /* load progress for reporting, but don't resume */
804 VERIFY0(vdev_initialize_load(vd
));
805 } else if (vd
->vdev_initialize_state
==
806 VDEV_INITIALIZE_ACTIVE
&& vdev_writeable(vd
) &&
807 !vd
->vdev_top
->vdev_removing
&&
808 !vd
->vdev_top
->vdev_rz_expanding
&&
809 vd
->vdev_initialize_thread
== NULL
) {
813 mutex_exit(&vd
->vdev_initialize_lock
);
816 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
817 vdev_initialize_restart(vd
->vdev_child
[i
]);
821 EXPORT_SYMBOL(vdev_initialize
);
822 EXPORT_SYMBOL(vdev_uninitialize
);
823 EXPORT_SYMBOL(vdev_initialize_stop
);
824 EXPORT_SYMBOL(vdev_initialize_stop_all
);
825 EXPORT_SYMBOL(vdev_initialize_stop_wait
);
826 EXPORT_SYMBOL(vdev_initialize_restart
);
828 ZFS_MODULE_PARAM(zfs
, zfs_
, initialize_value
, U64
, ZMOD_RW
,
829 "Value written during zpool initialize");
831 ZFS_MODULE_PARAM(zfs
, zfs_
, initialize_chunk_size
, U64
, ZMOD_RW
,
832 "Size in bytes of writes by zpool initialize");