4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
44 typedef int (scrub_cb_t
)(dsl_pool_t
*, const blkptr_t
*, const zbookmark_t
*);
46 static scrub_cb_t dsl_pool_scrub_clean_cb
;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync
;
49 int zfs_scrub_min_time
= 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time
= 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io
= B_FALSE
; /* set to disable scrub i/o */
53 extern int zfs_txg_timeout
;
55 static scrub_cb_t
*scrub_funcs
[SCRUB_FUNC_NUMFUNCS
] = {
57 dsl_pool_scrub_clean_cb
60 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
62 (zb)->zb_objset = objset; \
63 (zb)->zb_object = object; \
64 (zb)->zb_level = level; \
65 (zb)->zb_blkid = blkid; \
70 dsl_pool_scrub_setup_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
72 dsl_pool_t
*dp
= arg1
;
73 enum scrub_func
*funcp
= arg2
;
74 dmu_object_type_t ot
= 0;
75 boolean_t complete
= B_FALSE
;
77 dsl_pool_scrub_cancel_sync(dp
, &complete
, cr
, tx
);
79 ASSERT(dp
->dp_scrub_func
== SCRUB_FUNC_NONE
);
80 ASSERT(*funcp
> SCRUB_FUNC_NONE
);
81 ASSERT(*funcp
< SCRUB_FUNC_NUMFUNCS
);
83 dp
->dp_scrub_min_txg
= 0;
84 dp
->dp_scrub_max_txg
= tx
->tx_txg
;
86 if (*funcp
== SCRUB_FUNC_CLEAN
) {
87 vdev_t
*rvd
= dp
->dp_spa
->spa_root_vdev
;
89 /* rewrite all disk labels */
90 vdev_config_dirty(rvd
);
92 if (vdev_resilver_needed(rvd
,
93 &dp
->dp_scrub_min_txg
, &dp
->dp_scrub_max_txg
)) {
94 spa_event_notify(dp
->dp_spa
, NULL
,
95 ESC_ZFS_RESILVER_START
);
96 dp
->dp_scrub_max_txg
= MIN(dp
->dp_scrub_max_txg
,
100 /* zero out the scrub stats in all vdev_stat_t's */
101 vdev_scrub_stat_update(rvd
,
102 dp
->dp_scrub_min_txg
? POOL_SCRUB_RESILVER
:
103 POOL_SCRUB_EVERYTHING
, B_FALSE
);
105 dp
->dp_spa
->spa_scrub_started
= B_TRUE
;
108 /* back to the generic stuff */
110 if (dp
->dp_blkstats
== NULL
) {
112 kmem_alloc(sizeof (zfs_all_blkstats_t
), KM_SLEEP
);
114 bzero(dp
->dp_blkstats
, sizeof (zfs_all_blkstats_t
));
116 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
)
117 ot
= DMU_OT_ZAP_OTHER
;
119 dp
->dp_scrub_func
= *funcp
;
120 dp
->dp_scrub_queue_obj
= zap_create(dp
->dp_meta_objset
,
121 ot
? ot
: DMU_OT_SCRUB_QUEUE
, DMU_OT_NONE
, 0, tx
);
122 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
123 dp
->dp_scrub_restart
= B_FALSE
;
124 dp
->dp_spa
->spa_scrub_errors
= 0;
126 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
127 DMU_POOL_SCRUB_FUNC
, sizeof (uint32_t), 1,
128 &dp
->dp_scrub_func
, tx
));
129 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
130 DMU_POOL_SCRUB_QUEUE
, sizeof (uint64_t), 1,
131 &dp
->dp_scrub_queue_obj
, tx
));
132 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
133 DMU_POOL_SCRUB_MIN_TXG
, sizeof (uint64_t), 1,
134 &dp
->dp_scrub_min_txg
, tx
));
135 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
136 DMU_POOL_SCRUB_MAX_TXG
, sizeof (uint64_t), 1,
137 &dp
->dp_scrub_max_txg
, tx
));
138 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
139 DMU_POOL_SCRUB_BOOKMARK
, sizeof (uint64_t), 4,
140 &dp
->dp_scrub_bookmark
, tx
));
141 VERIFY(0 == zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
142 DMU_POOL_SCRUB_ERRORS
, sizeof (uint64_t), 1,
143 &dp
->dp_spa
->spa_scrub_errors
, tx
));
145 spa_history_internal_log(LOG_POOL_SCRUB
, dp
->dp_spa
, tx
, cr
,
146 "func=%u mintxg=%llu maxtxg=%llu",
147 *funcp
, dp
->dp_scrub_min_txg
, dp
->dp_scrub_max_txg
);
151 dsl_pool_scrub_setup(dsl_pool_t
*dp
, enum scrub_func func
)
153 return (dsl_sync_task_do(dp
, NULL
,
154 dsl_pool_scrub_setup_sync
, dp
, &func
, 0));
159 dsl_pool_scrub_cancel_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
161 dsl_pool_t
*dp
= arg1
;
162 boolean_t
*completep
= arg2
;
164 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
167 mutex_enter(&dp
->dp_scrub_cancel_lock
);
169 if (dp
->dp_scrub_restart
) {
170 dp
->dp_scrub_restart
= B_FALSE
;
171 *completep
= B_FALSE
;
174 /* XXX this is scrub-clean specific */
175 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
176 while (dp
->dp_spa
->spa_scrub_inflight
> 0) {
177 cv_wait(&dp
->dp_spa
->spa_scrub_io_cv
,
178 &dp
->dp_spa
->spa_scrub_lock
);
180 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
181 dp
->dp_spa
->spa_scrub_started
= B_FALSE
;
182 dp
->dp_spa
->spa_scrub_active
= B_FALSE
;
184 dp
->dp_scrub_func
= SCRUB_FUNC_NONE
;
185 VERIFY(0 == dmu_object_free(dp
->dp_meta_objset
,
186 dp
->dp_scrub_queue_obj
, tx
));
187 dp
->dp_scrub_queue_obj
= 0;
188 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
190 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
191 DMU_POOL_SCRUB_QUEUE
, tx
));
192 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
193 DMU_POOL_SCRUB_MIN_TXG
, tx
));
194 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
195 DMU_POOL_SCRUB_MAX_TXG
, tx
));
196 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
197 DMU_POOL_SCRUB_BOOKMARK
, tx
));
198 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
199 DMU_POOL_SCRUB_FUNC
, tx
));
200 VERIFY(0 == zap_remove(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
201 DMU_POOL_SCRUB_ERRORS
, tx
));
203 spa_history_internal_log(LOG_POOL_SCRUB_DONE
, dp
->dp_spa
, tx
, cr
,
204 "complete=%u", *completep
);
206 /* below is scrub-clean specific */
207 vdev_scrub_stat_update(dp
->dp_spa
->spa_root_vdev
, POOL_SCRUB_NONE
,
210 * If the scrub/resilver completed, update all DTLs to reflect this.
211 * Whether it succeeded or not, vacate all temporary scrub DTLs.
213 vdev_dtl_reassess(dp
->dp_spa
->spa_root_vdev
, tx
->tx_txg
,
214 *completep
? dp
->dp_scrub_max_txg
: 0, B_TRUE
);
215 if (dp
->dp_scrub_min_txg
&& *completep
)
216 spa_event_notify(dp
->dp_spa
, NULL
, ESC_ZFS_RESILVER_FINISH
);
217 spa_errlog_rotate(dp
->dp_spa
);
220 * We may have finished replacing a device.
221 * Let the async thread assess this and handle the detach.
223 spa_async_request(dp
->dp_spa
, SPA_ASYNC_RESILVER_DONE
);
225 dp
->dp_scrub_min_txg
= dp
->dp_scrub_max_txg
= 0;
226 mutex_exit(&dp
->dp_scrub_cancel_lock
);
230 dsl_pool_scrub_cancel(dsl_pool_t
*dp
)
232 boolean_t complete
= B_FALSE
;
234 return (dsl_sync_task_do(dp
, NULL
,
235 dsl_pool_scrub_cancel_sync
, dp
, &complete
, 3));
239 dsl_free(zio_t
*pio
, dsl_pool_t
*dp
, uint64_t txg
, const blkptr_t
*bpp
,
240 zio_done_func_t
*done
, void *private, uint32_t arc_flags
)
243 * This function will be used by bp-rewrite wad to intercept frees.
245 return (arc_free(pio
, dp
->dp_spa
, txg
, (blkptr_t
*)bpp
,
246 done
, private, arc_flags
));
250 bookmark_is_zero(const zbookmark_t
*zb
)
252 return (zb
->zb_objset
== 0 && zb
->zb_object
== 0 &&
253 zb
->zb_level
== 0 && zb
->zb_blkid
== 0);
256 /* dnp is the dnode for zb1->zb_object */
258 bookmark_is_before(dnode_phys_t
*dnp
, const zbookmark_t
*zb1
,
259 const zbookmark_t
*zb2
)
261 uint64_t zb1nextL0
, zb2thisobj
;
263 ASSERT(zb1
->zb_objset
== zb2
->zb_objset
);
264 ASSERT(zb1
->zb_object
!= -1ULL);
265 ASSERT(zb2
->zb_level
== 0);
268 * A bookmark in the deadlist is considered to be after
271 if (zb2
->zb_object
== -1ULL)
274 /* The objset_phys_t isn't before anything. */
278 zb1nextL0
= (zb1
->zb_blkid
+ 1) <<
279 ((zb1
->zb_level
) * (dnp
->dn_indblkshift
- SPA_BLKPTRSHIFT
));
281 zb2thisobj
= zb2
->zb_object
? zb2
->zb_object
:
282 zb2
->zb_blkid
<< (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
);
284 if (zb1
->zb_object
== 0) {
285 uint64_t nextobj
= zb1nextL0
*
286 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
) >> DNODE_SHIFT
;
287 return (nextobj
<= zb2thisobj
);
290 if (zb1
->zb_object
< zb2thisobj
)
292 if (zb1
->zb_object
> zb2thisobj
)
294 if (zb2
->zb_object
== 0)
296 return (zb1nextL0
<= zb2
->zb_blkid
);
300 scrub_pause(dsl_pool_t
*dp
, const zbookmark_t
*zb
)
305 if (dp
->dp_scrub_pausing
)
306 return (B_TRUE
); /* we're already pausing */
308 if (!bookmark_is_zero(&dp
->dp_scrub_bookmark
))
309 return (B_FALSE
); /* we're resuming */
311 /* We only know how to resume from level-0 blocks. */
312 if (zb
->zb_level
!= 0)
315 mintime
= dp
->dp_scrub_isresilver
? zfs_resilver_min_time
:
317 elapsed_ticks
= lbolt64
- dp
->dp_scrub_start_time
;
318 if (elapsed_ticks
> hz
* zfs_txg_timeout
||
319 (elapsed_ticks
> hz
* mintime
&& txg_sync_waiting(dp
))) {
320 dprintf("pausing at %llx/%llx/%llx/%llx\n",
321 (longlong_t
)zb
->zb_objset
, (longlong_t
)zb
->zb_object
,
322 (longlong_t
)zb
->zb_level
, (longlong_t
)zb
->zb_blkid
);
323 dp
->dp_scrub_pausing
= B_TRUE
;
324 dp
->dp_scrub_bookmark
= *zb
;
330 typedef struct zil_traverse_arg
{
332 zil_header_t
*zta_zh
;
333 } zil_traverse_arg_t
;
337 traverse_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
339 zil_traverse_arg_t
*zta
= arg
;
340 dsl_pool_t
*dp
= zta
->zta_dp
;
341 zil_header_t
*zh
= zta
->zta_zh
;
344 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
347 if (claim_txg
== 0 && bp
->blk_birth
>= spa_first_txg(dp
->dp_spa
))
350 zb
.zb_objset
= zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
353 zb
.zb_blkid
= bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
354 VERIFY(0 == scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, &zb
));
359 traverse_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
361 if (lrc
->lrc_txtype
== TX_WRITE
) {
362 zil_traverse_arg_t
*zta
= arg
;
363 dsl_pool_t
*dp
= zta
->zta_dp
;
364 zil_header_t
*zh
= zta
->zta_zh
;
365 lr_write_t
*lr
= (lr_write_t
*)lrc
;
366 blkptr_t
*bp
= &lr
->lr_blkptr
;
369 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
372 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
375 zb
.zb_objset
= zh
->zh_log
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
];
376 zb
.zb_object
= lr
->lr_foid
;
377 zb
.zb_level
= BP_GET_LEVEL(bp
);
378 zb
.zb_blkid
= lr
->lr_offset
/ BP_GET_LSIZE(bp
);
379 VERIFY(0 == scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, &zb
));
384 traverse_zil(dsl_pool_t
*dp
, zil_header_t
*zh
)
386 uint64_t claim_txg
= zh
->zh_claim_txg
;
387 zil_traverse_arg_t zta
= { dp
, zh
};
391 * We only want to visit blocks that have been claimed but not yet
392 * replayed (or, in read-only mode, blocks that *would* be claimed).
394 if (claim_txg
== 0 && (spa_mode
& FWRITE
))
397 zilog
= zil_alloc(dp
->dp_meta_objset
, zh
);
399 (void) zil_parse(zilog
, traverse_zil_block
, traverse_zil_record
, &zta
,
406 scrub_visitbp(dsl_pool_t
*dp
, dnode_phys_t
*dnp
,
407 arc_buf_t
*pbuf
, blkptr_t
*bp
, const zbookmark_t
*zb
)
410 arc_buf_t
*buf
= NULL
;
412 if (bp
->blk_birth
== 0)
415 if (bp
->blk_birth
<= dp
->dp_scrub_min_txg
)
418 if (scrub_pause(dp
, zb
))
421 if (!bookmark_is_zero(&dp
->dp_scrub_bookmark
)) {
423 * If we already visited this bp & everything below (in
424 * a prior txg), don't bother doing it again.
426 if (bookmark_is_before(dnp
, zb
, &dp
->dp_scrub_bookmark
))
430 * If we found the block we're trying to resume from, or
431 * we went past it to a different object, zero it out to
432 * indicate that it's OK to start checking for pausing
435 if (bcmp(zb
, &dp
->dp_scrub_bookmark
, sizeof (*zb
)) == 0 ||
436 zb
->zb_object
> dp
->dp_scrub_bookmark
.zb_object
) {
437 dprintf("resuming at %llx/%llx/%llx/%llx\n",
438 (longlong_t
)zb
->zb_objset
,
439 (longlong_t
)zb
->zb_object
,
440 (longlong_t
)zb
->zb_level
,
441 (longlong_t
)zb
->zb_blkid
);
442 bzero(&dp
->dp_scrub_bookmark
, sizeof (*zb
));
446 if (BP_GET_LEVEL(bp
) > 0) {
447 uint32_t flags
= ARC_WAIT
;
450 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
452 err
= arc_read(NULL
, dp
->dp_spa
, bp
, pbuf
,
453 arc_getbuf_func
, &buf
,
454 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
456 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
457 dp
->dp_spa
->spa_scrub_errors
++;
458 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
463 for (i
= 0; i
< epb
; i
++, cbp
++) {
466 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
468 zb
->zb_blkid
* epb
+ i
);
469 scrub_visitbp(dp
, dnp
, buf
, cbp
, &czb
);
471 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
472 uint32_t flags
= ARC_WAIT
;
473 dnode_phys_t
*child_dnp
;
475 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
477 err
= arc_read(NULL
, dp
->dp_spa
, bp
, pbuf
,
478 arc_getbuf_func
, &buf
,
479 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
481 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
482 dp
->dp_spa
->spa_scrub_errors
++;
483 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
486 child_dnp
= buf
->b_data
;
488 for (i
= 0; i
< epb
; i
++, child_dnp
++) {
489 for (j
= 0; j
< child_dnp
->dn_nblkptr
; j
++) {
492 SET_BOOKMARK(&czb
, zb
->zb_objset
,
493 zb
->zb_blkid
* epb
+ i
,
494 child_dnp
->dn_nlevels
- 1, j
);
495 scrub_visitbp(dp
, child_dnp
, buf
,
496 &child_dnp
->dn_blkptr
[j
], &czb
);
499 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
500 uint32_t flags
= ARC_WAIT
;
504 err
= arc_read_nolock(NULL
, dp
->dp_spa
, bp
,
505 arc_getbuf_func
, &buf
,
506 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
508 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
509 dp
->dp_spa
->spa_scrub_errors
++;
510 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
516 traverse_zil(dp
, &osp
->os_zil_header
);
518 for (j
= 0; j
< osp
->os_meta_dnode
.dn_nblkptr
; j
++) {
521 SET_BOOKMARK(&czb
, zb
->zb_objset
, 0,
522 osp
->os_meta_dnode
.dn_nlevels
- 1, j
);
523 scrub_visitbp(dp
, &osp
->os_meta_dnode
, buf
,
524 &osp
->os_meta_dnode
.dn_blkptr
[j
], &czb
);
528 (void) scrub_funcs
[dp
->dp_scrub_func
](dp
, bp
, zb
);
530 (void) arc_buf_remove_ref(buf
, &buf
);
534 scrub_visit_rootbp(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, blkptr_t
*bp
)
538 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: 0, 0, -1, 0);
539 scrub_visitbp(dp
, NULL
, NULL
, bp
, &zb
);
543 dsl_pool_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
545 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
547 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
550 if (dp
->dp_scrub_bookmark
.zb_objset
== ds
->ds_object
) {
551 SET_BOOKMARK(&dp
->dp_scrub_bookmark
, -1, 0, 0, 0);
552 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
553 ds
->ds_object
, tx
) != 0) {
557 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
558 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
559 ds
->ds_phys
->ds_next_snap_obj
, tx
) == 0);
561 ASSERT3U(ds
->ds_phys
->ds_num_children
, <=, 1);
565 dsl_pool_ds_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
567 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
569 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
572 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
!= 0);
574 if (dp
->dp_scrub_bookmark
.zb_objset
== ds
->ds_object
) {
575 dp
->dp_scrub_bookmark
.zb_objset
=
576 ds
->ds_phys
->ds_prev_snap_obj
;
577 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
578 ds
->ds_object
, tx
) == 0) {
579 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
580 ds
->ds_phys
->ds_prev_snap_obj
, tx
) == 0);
585 dsl_pool_ds_clone_swapped(dsl_dataset_t
*ds1
, dsl_dataset_t
*ds2
, dmu_tx_t
*tx
)
587 dsl_pool_t
*dp
= ds1
->ds_dir
->dd_pool
;
589 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
592 if (dp
->dp_scrub_bookmark
.zb_objset
== ds1
->ds_object
) {
593 dp
->dp_scrub_bookmark
.zb_objset
= ds2
->ds_object
;
594 } else if (dp
->dp_scrub_bookmark
.zb_objset
== ds2
->ds_object
) {
595 dp
->dp_scrub_bookmark
.zb_objset
= ds1
->ds_object
;
598 if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
599 ds1
->ds_object
, tx
) == 0) {
600 int err
= zap_add_int(dp
->dp_meta_objset
,
601 dp
->dp_scrub_queue_obj
, ds2
->ds_object
, tx
);
602 VERIFY(err
== 0 || err
== EEXIST
);
604 /* Both were there to begin with */
605 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
606 dp
->dp_scrub_queue_obj
, ds1
->ds_object
, tx
));
608 } else if (zap_remove_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
609 ds2
->ds_object
, tx
) == 0) {
610 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
611 dp
->dp_scrub_queue_obj
, ds1
->ds_object
, tx
));
615 struct enqueue_clones_arg
{
622 enqueue_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
624 struct enqueue_clones_arg
*eca
= arg
;
629 err
= dsl_dataset_hold_obj(spa
->spa_dsl_pool
, dsobj
, FTAG
, &ds
);
632 dp
= ds
->ds_dir
->dd_pool
;
634 if (ds
->ds_dir
->dd_phys
->dd_origin_obj
== eca
->originobj
) {
635 while (ds
->ds_phys
->ds_prev_snap_obj
!= eca
->originobj
) {
637 err
= dsl_dataset_hold_obj(dp
,
638 ds
->ds_phys
->ds_prev_snap_obj
, FTAG
, &prev
);
640 dsl_dataset_rele(ds
, FTAG
);
645 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
646 ds
->ds_object
, eca
->tx
) == 0);
648 dsl_dataset_rele(ds
, FTAG
);
653 scrub_visitds(dsl_pool_t
*dp
, uint64_t dsobj
, dmu_tx_t
*tx
)
656 uint64_t min_txg_save
;
658 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
661 * Iterate over the bps in this ds.
663 min_txg_save
= dp
->dp_scrub_min_txg
;
664 dp
->dp_scrub_min_txg
=
665 MAX(dp
->dp_scrub_min_txg
, ds
->ds_phys
->ds_prev_snap_txg
);
666 scrub_visit_rootbp(dp
, ds
, &ds
->ds_phys
->ds_bp
);
667 dp
->dp_scrub_min_txg
= min_txg_save
;
669 if (dp
->dp_scrub_pausing
)
673 * Add descendent datasets to work queue.
675 if (ds
->ds_phys
->ds_next_snap_obj
!= 0) {
676 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
677 ds
->ds_phys
->ds_next_snap_obj
, tx
) == 0);
679 if (ds
->ds_phys
->ds_num_children
> 1) {
680 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
681 struct enqueue_clones_arg eca
;
683 eca
.originobj
= ds
->ds_object
;
685 (void) dmu_objset_find_spa(ds
->ds_dir
->dd_pool
->dp_spa
,
686 NULL
, enqueue_clones_cb
, &eca
, DS_FIND_CHILDREN
);
688 VERIFY(zap_join(dp
->dp_meta_objset
,
689 ds
->ds_phys
->ds_next_clones_obj
,
690 dp
->dp_scrub_queue_obj
, tx
) == 0);
695 dsl_dataset_rele(ds
, FTAG
);
700 enqueue_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
707 err
= dsl_dataset_hold_obj(spa
->spa_dsl_pool
, dsobj
, FTAG
, &ds
);
711 dp
= ds
->ds_dir
->dd_pool
;
713 while (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
715 err
= dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
718 dsl_dataset_rele(ds
, FTAG
);
723 * If this is a clone, we don't need to worry about it for now.
725 if (prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
) {
726 dsl_dataset_rele(ds
, FTAG
);
727 dsl_dataset_rele(prev
, FTAG
);
730 dsl_dataset_rele(ds
, FTAG
);
734 VERIFY(zap_add_int(dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
,
735 ds
->ds_object
, tx
) == 0);
736 dsl_dataset_rele(ds
, FTAG
);
741 dsl_pool_scrub_sync(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
745 boolean_t complete
= B_TRUE
;
747 if (dp
->dp_scrub_func
== SCRUB_FUNC_NONE
)
750 /* If the spa is not fully loaded, don't bother. */
751 if (dp
->dp_spa
->spa_load_state
!= SPA_LOAD_NONE
)
754 if (dp
->dp_scrub_restart
) {
755 enum scrub_func func
= dp
->dp_scrub_func
;
756 dp
->dp_scrub_restart
= B_FALSE
;
757 dsl_pool_scrub_setup_sync(dp
, &func
, kcred
, tx
);
760 if (dp
->dp_spa
->spa_root_vdev
->vdev_stat
.vs_scrub_type
== 0) {
762 * We must have resumed after rebooting; reset the vdev
763 * stats to know that we're doing a scrub (although it
764 * will think we're just starting now).
766 vdev_scrub_stat_update(dp
->dp_spa
->spa_root_vdev
,
767 dp
->dp_scrub_min_txg
? POOL_SCRUB_RESILVER
:
768 POOL_SCRUB_EVERYTHING
, B_FALSE
);
771 dp
->dp_scrub_pausing
= B_FALSE
;
772 dp
->dp_scrub_start_time
= lbolt64
;
773 dp
->dp_scrub_isresilver
= (dp
->dp_scrub_min_txg
!= 0);
774 dp
->dp_spa
->spa_scrub_active
= B_TRUE
;
776 if (dp
->dp_scrub_bookmark
.zb_objset
== 0) {
777 /* First do the MOS & ORIGIN */
778 scrub_visit_rootbp(dp
, NULL
, &dp
->dp_meta_rootbp
);
779 if (dp
->dp_scrub_pausing
)
782 if (spa_version(dp
->dp_spa
) < SPA_VERSION_DSL_SCRUB
) {
783 VERIFY(0 == dmu_objset_find_spa(dp
->dp_spa
,
784 NULL
, enqueue_cb
, tx
, DS_FIND_CHILDREN
));
786 scrub_visitds(dp
, dp
->dp_origin_snap
->ds_object
, tx
);
788 ASSERT(!dp
->dp_scrub_pausing
);
789 } else if (dp
->dp_scrub_bookmark
.zb_objset
!= -1ULL) {
791 * If we were paused, continue from here. Note if the
792 * ds we were paused on was deleted, the zb_objset will
793 * be -1, so we will skip this and find a new objset
796 scrub_visitds(dp
, dp
->dp_scrub_bookmark
.zb_objset
, tx
);
797 if (dp
->dp_scrub_pausing
)
802 * In case we were paused right at the end of the ds, zero the
803 * bookmark so we don't think that we're still trying to resume.
805 bzero(&dp
->dp_scrub_bookmark
, sizeof (zbookmark_t
));
807 /* keep pulling things out of the zap-object-as-queue */
808 while (zap_cursor_init(&zc
, dp
->dp_meta_objset
, dp
->dp_scrub_queue_obj
),
809 zap_cursor_retrieve(&zc
, &za
) == 0) {
810 VERIFY(0 == zap_remove(dp
->dp_meta_objset
,
811 dp
->dp_scrub_queue_obj
, za
.za_name
, tx
));
812 scrub_visitds(dp
, za
.za_first_integer
, tx
);
813 if (dp
->dp_scrub_pausing
)
815 zap_cursor_fini(&zc
);
817 zap_cursor_fini(&zc
);
818 if (dp
->dp_scrub_pausing
)
823 dsl_pool_scrub_cancel_sync(dp
, &complete
, kcred
, tx
);
826 VERIFY(0 == zap_update(dp
->dp_meta_objset
,
827 DMU_POOL_DIRECTORY_OBJECT
,
828 DMU_POOL_SCRUB_BOOKMARK
, sizeof (uint64_t), 4,
829 &dp
->dp_scrub_bookmark
, tx
));
830 VERIFY(0 == zap_update(dp
->dp_meta_objset
,
831 DMU_POOL_DIRECTORY_OBJECT
,
832 DMU_POOL_SCRUB_ERRORS
, sizeof (uint64_t), 1,
833 &dp
->dp_spa
->spa_scrub_errors
, tx
));
835 /* XXX this is scrub-clean specific */
836 mutex_enter(&dp
->dp_spa
->spa_scrub_lock
);
837 while (dp
->dp_spa
->spa_scrub_inflight
> 0) {
838 cv_wait(&dp
->dp_spa
->spa_scrub_io_cv
,
839 &dp
->dp_spa
->spa_scrub_lock
);
841 mutex_exit(&dp
->dp_spa
->spa_scrub_lock
);
845 dsl_pool_scrub_restart(dsl_pool_t
*dp
)
847 mutex_enter(&dp
->dp_scrub_cancel_lock
);
848 dp
->dp_scrub_restart
= B_TRUE
;
849 mutex_exit(&dp
->dp_scrub_cancel_lock
);
857 count_block(zfs_all_blkstats_t
*zab
, const blkptr_t
*bp
)
862 * If we resume after a reboot, zab will be NULL; don't record
863 * incomplete stats in that case.
868 for (i
= 0; i
< 4; i
++) {
869 int l
= (i
< 2) ? BP_GET_LEVEL(bp
) : DN_MAX_LEVELS
;
870 int t
= (i
& 1) ? BP_GET_TYPE(bp
) : DMU_OT_TOTAL
;
871 zfs_blkstat_t
*zb
= &zab
->zab_type
[l
][t
];
875 zb
->zb_asize
+= BP_GET_ASIZE(bp
);
876 zb
->zb_lsize
+= BP_GET_LSIZE(bp
);
877 zb
->zb_psize
+= BP_GET_PSIZE(bp
);
878 zb
->zb_gangs
+= BP_COUNT_GANG(bp
);
880 switch (BP_GET_NDVAS(bp
)) {
882 if (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
883 DVA_GET_VDEV(&bp
->blk_dva
[1]))
884 zb
->zb_ditto_2_of_2_samevdev
++;
887 equal
= (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
888 DVA_GET_VDEV(&bp
->blk_dva
[1])) +
889 (DVA_GET_VDEV(&bp
->blk_dva
[0]) ==
890 DVA_GET_VDEV(&bp
->blk_dva
[2])) +
891 (DVA_GET_VDEV(&bp
->blk_dva
[1]) ==
892 DVA_GET_VDEV(&bp
->blk_dva
[2]));
894 zb
->zb_ditto_2_of_3_samevdev
++;
896 zb
->zb_ditto_3_of_3_samevdev
++;
903 dsl_pool_scrub_clean_done(zio_t
*zio
)
905 spa_t
*spa
= zio
->io_spa
;
907 zio_data_buf_free(zio
->io_data
, zio
->io_size
);
909 mutex_enter(&spa
->spa_scrub_lock
);
910 spa
->spa_scrub_inflight
--;
911 cv_broadcast(&spa
->spa_scrub_io_cv
);
913 if (zio
->io_error
&& (zio
->io_error
!= ECKSUM
||
914 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)))
915 spa
->spa_scrub_errors
++;
916 mutex_exit(&spa
->spa_scrub_lock
);
920 dsl_pool_scrub_clean_cb(dsl_pool_t
*dp
,
921 const blkptr_t
*bp
, const zbookmark_t
*zb
)
923 size_t size
= BP_GET_LSIZE(bp
);
925 spa_t
*spa
= dp
->dp_spa
;
927 int zio_flags
= ZIO_FLAG_SCRUB_THREAD
| ZIO_FLAG_CANFAIL
;
930 count_block(dp
->dp_blkstats
, bp
);
932 if (dp
->dp_scrub_isresilver
== 0) {
934 zio_flags
|= ZIO_FLAG_SCRUB
;
935 zio_priority
= ZIO_PRIORITY_SCRUB
;
938 /* It's a resilver */
939 zio_flags
|= ZIO_FLAG_RESILVER
;
940 zio_priority
= ZIO_PRIORITY_RESILVER
;
944 /* If it's an intent log block, failure is expected. */
945 if (zb
->zb_level
== -1 && BP_GET_TYPE(bp
) != DMU_OT_OBJSET
)
946 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
948 for (d
= 0; d
< BP_GET_NDVAS(bp
); d
++) {
949 vdev_t
*vd
= vdev_lookup_top(spa
,
950 DVA_GET_VDEV(&bp
->blk_dva
[d
]));
953 * Keep track of how much data we've examined so that
954 * zpool(1M) status can make useful progress reports.
956 mutex_enter(&vd
->vdev_stat_lock
);
957 vd
->vdev_stat
.vs_scrub_examined
+=
958 DVA_GET_ASIZE(&bp
->blk_dva
[d
]);
959 mutex_exit(&vd
->vdev_stat_lock
);
961 /* if it's a resilver, this may not be in the target range */
963 if (DVA_GET_GANG(&bp
->blk_dva
[d
])) {
965 * Gang members may be spread across multiple
966 * vdevs, so the best we can do is look at the
968 * XXX -- it would be better to change our
969 * allocation policy to ensure that this can't
972 vd
= spa
->spa_root_vdev
;
974 needs_io
= vdev_dtl_contains(&vd
->vdev_dtl_map
,
979 if (needs_io
&& !zfs_no_scrub_io
) {
980 void *data
= zio_data_buf_alloc(size
);
982 mutex_enter(&spa
->spa_scrub_lock
);
983 while (spa
->spa_scrub_inflight
>= spa
->spa_scrub_maxinflight
)
984 cv_wait(&spa
->spa_scrub_io_cv
, &spa
->spa_scrub_lock
);
985 spa
->spa_scrub_inflight
++;
986 mutex_exit(&spa
->spa_scrub_lock
);
988 zio_nowait(zio_read(NULL
, spa
, bp
, data
, size
,
989 dsl_pool_scrub_clean_done
, NULL
, zio_priority
,
993 /* do not relocate this block */
998 dsl_pool_scrub_clean(dsl_pool_t
*dp
)
1001 * Purge all vdev caches. We do this here rather than in sync
1002 * context because this requires a writer lock on the spa_config
1003 * lock, which we can't do from sync context. The
1004 * spa_scrub_reopen flag indicates that vdev_open() should not
1005 * attempt to start another scrub.
1007 spa_config_enter(dp
->dp_spa
, SCL_ALL
, FTAG
, RW_WRITER
);
1008 dp
->dp_spa
->spa_scrub_reopen
= B_TRUE
;
1009 vdev_reopen(dp
->dp_spa
->spa_root_vdev
);
1010 dp
->dp_spa
->spa_scrub_reopen
= B_FALSE
;
1011 spa_config_exit(dp
->dp_spa
, SCL_ALL
, FTAG
);
1013 return (dsl_pool_scrub_setup(dp
, SCRUB_FUNC_CLEAN
));