FreeBSD: Parameterize ZFS_ENTER/ZFS_VERIFY_VP with an error code
[zfs.git] / module / zfs / dsl_destroy.c
blobb32929b3320c908c65750717ed36cfb585cdedd6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dsl_bookmark.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/dsl_pool.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_scan.h>
40 #include <sys/dmu_objset.h>
41 #include <sys/zap.h>
42 #include <sys/zfeature.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/dsl_deleg.h>
45 #include <sys/dmu_impl.h>
46 #include <sys/zvol.h>
47 #include <sys/zcp.h>
48 #include <sys/dsl_deadlist.h>
49 #include <sys/zthr.h>
50 #include <sys/spa_impl.h>
52 int
53 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
55 if (!ds->ds_is_snapshot)
56 return (SET_ERROR(EINVAL));
58 if (dsl_dataset_long_held(ds))
59 return (SET_ERROR(EBUSY));
62 * Only allow deferred destroy on pools that support it.
63 * NOTE: deferred destroy is only supported on snapshots.
65 if (defer) {
66 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
67 SPA_VERSION_USERREFS)
68 return (SET_ERROR(ENOTSUP));
69 return (0);
73 * If this snapshot has an elevated user reference count,
74 * we can't destroy it yet.
76 if (ds->ds_userrefs > 0)
77 return (SET_ERROR(EBUSY));
80 * Can't delete a branch point.
82 if (dsl_dataset_phys(ds)->ds_num_children > 1)
83 return (SET_ERROR(EEXIST));
85 return (0);
88 int
89 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
91 dsl_destroy_snapshot_arg_t *ddsa = arg;
92 const char *dsname = ddsa->ddsa_name;
93 boolean_t defer = ddsa->ddsa_defer;
95 dsl_pool_t *dp = dmu_tx_pool(tx);
96 int error = 0;
97 dsl_dataset_t *ds;
99 error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
102 * If the snapshot does not exist, silently ignore it, and
103 * dsl_destroy_snapshot_sync() will be a no-op
104 * (it's "already destroyed").
106 if (error == ENOENT)
107 return (0);
109 if (error == 0) {
110 error = dsl_destroy_snapshot_check_impl(ds, defer);
111 dsl_dataset_rele(ds, FTAG);
114 return (error);
117 struct process_old_arg {
118 dsl_dataset_t *ds;
119 dsl_dataset_t *ds_prev;
120 boolean_t after_branch_point;
121 zio_t *pio;
122 uint64_t used, comp, uncomp;
125 static int
126 process_old_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
128 struct process_old_arg *poa = arg;
129 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
131 ASSERT(!BP_IS_HOLE(bp));
133 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
134 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, bp_freed, tx);
135 if (poa->ds_prev && !poa->after_branch_point &&
136 bp->blk_birth >
137 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
138 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
139 bp_get_dsize_sync(dp->dp_spa, bp);
141 } else {
142 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
143 poa->comp += BP_GET_PSIZE(bp);
144 poa->uncomp += BP_GET_UCSIZE(bp);
145 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
147 return (0);
150 static void
151 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
152 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
154 struct process_old_arg poa = { 0 };
155 dsl_pool_t *dp = ds->ds_dir->dd_pool;
156 objset_t *mos = dp->dp_meta_objset;
157 uint64_t deadlist_obj;
159 ASSERT(ds->ds_deadlist.dl_oldfmt);
160 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
162 poa.ds = ds;
163 poa.ds_prev = ds_prev;
164 poa.after_branch_point = after_branch_point;
165 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
166 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
167 process_old_cb, &poa, tx));
168 VERIFY0(zio_wait(poa.pio));
169 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
171 /* change snapused */
172 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
173 -poa.used, -poa.comp, -poa.uncomp, tx);
175 /* swap next's deadlist to our deadlist */
176 dsl_deadlist_close(&ds->ds_deadlist);
177 dsl_deadlist_close(&ds_next->ds_deadlist);
178 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
179 dsl_dataset_phys(ds)->ds_deadlist_obj =
180 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
181 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
182 dsl_deadlist_open(&ds->ds_deadlist, mos,
183 dsl_dataset_phys(ds)->ds_deadlist_obj);
184 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
185 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
188 typedef struct remaining_clones_key {
189 dsl_dataset_t *rck_clone;
190 list_node_t rck_node;
191 } remaining_clones_key_t;
193 static remaining_clones_key_t *
194 rck_alloc(dsl_dataset_t *clone)
196 remaining_clones_key_t *rck = kmem_alloc(sizeof (*rck), KM_SLEEP);
197 rck->rck_clone = clone;
198 return (rck);
201 static void
202 dsl_dir_remove_clones_key_impl(dsl_dir_t *dd, uint64_t mintxg, dmu_tx_t *tx,
203 list_t *stack, void *tag)
205 objset_t *mos = dd->dd_pool->dp_meta_objset;
208 * If it is the old version, dd_clones doesn't exist so we can't
209 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
210 * doesn't matter.
212 if (dsl_dir_phys(dd)->dd_clones == 0)
213 return;
215 zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
216 zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
218 for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
219 zap_cursor_retrieve(zc, za) == 0;
220 zap_cursor_advance(zc)) {
221 dsl_dataset_t *clone;
223 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
224 za->za_first_integer, tag, &clone));
226 if (clone->ds_dir->dd_origin_txg > mintxg) {
227 dsl_deadlist_remove_key(&clone->ds_deadlist,
228 mintxg, tx);
230 if (dsl_dataset_remap_deadlist_exists(clone)) {
231 dsl_deadlist_remove_key(
232 &clone->ds_remap_deadlist, mintxg, tx);
235 list_insert_head(stack, rck_alloc(clone));
236 } else {
237 dsl_dataset_rele(clone, tag);
240 zap_cursor_fini(zc);
242 kmem_free(za, sizeof (zap_attribute_t));
243 kmem_free(zc, sizeof (zap_cursor_t));
246 void
247 dsl_dir_remove_clones_key(dsl_dir_t *top_dd, uint64_t mintxg, dmu_tx_t *tx)
249 list_t stack;
251 list_create(&stack, sizeof (remaining_clones_key_t),
252 offsetof(remaining_clones_key_t, rck_node));
254 dsl_dir_remove_clones_key_impl(top_dd, mintxg, tx, &stack, FTAG);
255 for (remaining_clones_key_t *rck = list_remove_head(&stack);
256 rck != NULL; rck = list_remove_head(&stack)) {
257 dsl_dataset_t *clone = rck->rck_clone;
258 dsl_dir_t *clone_dir = clone->ds_dir;
260 kmem_free(rck, sizeof (*rck));
262 dsl_dir_remove_clones_key_impl(clone_dir, mintxg, tx,
263 &stack, FTAG);
264 dsl_dataset_rele(clone, FTAG);
267 list_destroy(&stack);
270 static void
271 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
272 dmu_tx_t *tx)
274 dsl_pool_t *dp = ds->ds_dir->dd_pool;
276 /* Move blocks to be obsoleted to pool's obsolete list. */
277 if (dsl_dataset_remap_deadlist_exists(ds_next)) {
278 if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
279 dsl_pool_create_obsolete_bpobj(dp, tx);
281 dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
282 &dp->dp_obsolete_bpobj,
283 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
286 /* Merge our deadlist into next's and free it. */
287 if (dsl_dataset_remap_deadlist_exists(ds)) {
288 uint64_t remap_deadlist_object =
289 dsl_dataset_get_remap_deadlist_object(ds);
290 ASSERT(remap_deadlist_object != 0);
292 mutex_enter(&ds_next->ds_remap_deadlist_lock);
293 if (!dsl_dataset_remap_deadlist_exists(ds_next))
294 dsl_dataset_create_remap_deadlist(ds_next, tx);
295 mutex_exit(&ds_next->ds_remap_deadlist_lock);
297 dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
298 remap_deadlist_object, tx);
299 dsl_dataset_destroy_remap_deadlist(ds, tx);
303 void
304 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
306 int after_branch_point = FALSE;
307 dsl_pool_t *dp = ds->ds_dir->dd_pool;
308 objset_t *mos = dp->dp_meta_objset;
309 dsl_dataset_t *ds_prev = NULL;
310 uint64_t obj;
312 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
313 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
314 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
315 rrw_exit(&ds->ds_bp_rwlock, FTAG);
316 ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
318 if (defer &&
319 (ds->ds_userrefs > 0 ||
320 dsl_dataset_phys(ds)->ds_num_children > 1)) {
321 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
322 dmu_buf_will_dirty(ds->ds_dbuf, tx);
323 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
324 spa_history_log_internal_ds(ds, "defer_destroy", tx, " ");
325 return;
328 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
330 /* We need to log before removing it from the namespace. */
331 spa_history_log_internal_ds(ds, "destroy", tx, " ");
333 dsl_scan_ds_destroyed(ds, tx);
335 obj = ds->ds_object;
337 boolean_t book_exists = dsl_bookmark_ds_destroyed(ds, tx);
339 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
340 if (dsl_dataset_feature_is_active(ds, f))
341 dsl_dataset_deactivate_feature(ds, f, tx);
343 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
344 ASSERT3P(ds->ds_prev, ==, NULL);
345 VERIFY0(dsl_dataset_hold_obj(dp,
346 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
347 after_branch_point =
348 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
350 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
351 if (after_branch_point &&
352 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
353 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
354 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
355 VERIFY0(zap_add_int(mos,
356 dsl_dataset_phys(ds_prev)->
357 ds_next_clones_obj,
358 dsl_dataset_phys(ds)->ds_next_snap_obj,
359 tx));
362 if (!after_branch_point) {
363 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
364 dsl_dataset_phys(ds)->ds_next_snap_obj;
368 dsl_dataset_t *ds_next;
369 uint64_t old_unique;
370 uint64_t used = 0, comp = 0, uncomp = 0;
372 VERIFY0(dsl_dataset_hold_obj(dp,
373 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
374 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
376 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
378 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
379 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
380 dsl_dataset_phys(ds)->ds_prev_snap_obj;
381 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
382 dsl_dataset_phys(ds)->ds_prev_snap_txg;
383 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
384 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
386 if (ds_next->ds_deadlist.dl_oldfmt) {
387 process_old_deadlist(ds, ds_prev, ds_next,
388 after_branch_point, tx);
389 } else {
390 /* Adjust prev's unique space. */
391 if (ds_prev && !after_branch_point) {
392 dsl_deadlist_space_range(&ds_next->ds_deadlist,
393 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
394 dsl_dataset_phys(ds)->ds_prev_snap_txg,
395 &used, &comp, &uncomp);
396 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
399 /* Adjust snapused. */
400 dsl_deadlist_space_range(&ds_next->ds_deadlist,
401 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
402 &used, &comp, &uncomp);
403 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
404 -used, -comp, -uncomp, tx);
406 /* Move blocks to be freed to pool's free list. */
407 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
408 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
409 tx);
410 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
411 DD_USED_HEAD, used, comp, uncomp, tx);
413 /* Merge our deadlist into next's and free it. */
414 dsl_deadlist_merge(&ds_next->ds_deadlist,
415 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
418 * We are done with the deadlist tree (generated/used
419 * by dsl_deadlist_move_bpobj() and dsl_deadlist_merge()).
420 * Discard it to save memory.
422 dsl_deadlist_discard_tree(&ds_next->ds_deadlist);
425 dsl_deadlist_close(&ds->ds_deadlist);
426 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
427 dmu_buf_will_dirty(ds->ds_dbuf, tx);
428 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
430 dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
432 if (!book_exists) {
433 /* Collapse range in clone heads */
434 dsl_dir_remove_clones_key(ds->ds_dir,
435 dsl_dataset_phys(ds)->ds_creation_txg, tx);
438 if (ds_next->ds_is_snapshot) {
439 dsl_dataset_t *ds_nextnext;
442 * Update next's unique to include blocks which
443 * were previously shared by only this snapshot
444 * and it. Those blocks will be born after the
445 * prev snap and before this snap, and will have
446 * died after the next snap and before the one
447 * after that (ie. be on the snap after next's
448 * deadlist).
450 VERIFY0(dsl_dataset_hold_obj(dp,
451 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
452 FTAG, &ds_nextnext));
453 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
454 dsl_dataset_phys(ds)->ds_prev_snap_txg,
455 dsl_dataset_phys(ds)->ds_creation_txg,
456 &used, &comp, &uncomp);
457 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
458 dsl_dataset_rele(ds_nextnext, FTAG);
459 ASSERT3P(ds_next->ds_prev, ==, NULL);
461 /* Collapse range in this head. */
462 dsl_dataset_t *hds;
463 VERIFY0(dsl_dataset_hold_obj(dp,
464 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
465 FTAG, &hds));
466 if (!book_exists) {
467 /* Collapse range in this head. */
468 dsl_deadlist_remove_key(&hds->ds_deadlist,
469 dsl_dataset_phys(ds)->ds_creation_txg, tx);
471 if (dsl_dataset_remap_deadlist_exists(hds)) {
472 dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
473 dsl_dataset_phys(ds)->ds_creation_txg, tx);
475 dsl_dataset_rele(hds, FTAG);
477 } else {
478 ASSERT3P(ds_next->ds_prev, ==, ds);
479 dsl_dataset_rele(ds_next->ds_prev, ds_next);
480 ds_next->ds_prev = NULL;
481 if (ds_prev) {
482 VERIFY0(dsl_dataset_hold_obj(dp,
483 dsl_dataset_phys(ds)->ds_prev_snap_obj,
484 ds_next, &ds_next->ds_prev));
487 dsl_dataset_recalc_head_uniq(ds_next);
490 * Reduce the amount of our unconsumed refreservation
491 * being charged to our parent by the amount of
492 * new unique data we have gained.
494 if (old_unique < ds_next->ds_reserved) {
495 int64_t mrsdelta;
496 uint64_t new_unique =
497 dsl_dataset_phys(ds_next)->ds_unique_bytes;
499 ASSERT(old_unique <= new_unique);
500 mrsdelta = MIN(new_unique - old_unique,
501 ds_next->ds_reserved - old_unique);
502 dsl_dir_diduse_space(ds->ds_dir,
503 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
506 dsl_dataset_rele(ds_next, FTAG);
509 * This must be done after the dsl_traverse(), because it will
510 * re-open the objset.
512 if (ds->ds_objset) {
513 dmu_objset_evict(ds->ds_objset);
514 ds->ds_objset = NULL;
517 /* remove from snapshot namespace */
518 dsl_dataset_t *ds_head;
519 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
520 VERIFY0(dsl_dataset_hold_obj(dp,
521 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
522 VERIFY0(dsl_dataset_get_snapname(ds));
523 #ifdef ZFS_DEBUG
525 uint64_t val;
526 int err;
528 err = dsl_dataset_snap_lookup(ds_head,
529 ds->ds_snapname, &val);
530 ASSERT0(err);
531 ASSERT3U(val, ==, obj);
533 #endif
534 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
535 dsl_dataset_rele(ds_head, FTAG);
537 if (ds_prev != NULL)
538 dsl_dataset_rele(ds_prev, FTAG);
540 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
542 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
543 uint64_t count __maybe_unused;
544 ASSERT0(zap_count(mos,
545 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
546 count == 0);
547 VERIFY0(dmu_object_free(mos,
548 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
550 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
551 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
552 tx));
553 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
554 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
555 tx));
556 dsl_dir_rele(ds->ds_dir, ds);
557 ds->ds_dir = NULL;
558 dmu_object_free_zapified(mos, obj, tx);
561 void
562 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
564 dsl_destroy_snapshot_arg_t *ddsa = arg;
565 const char *dsname = ddsa->ddsa_name;
566 boolean_t defer = ddsa->ddsa_defer;
568 dsl_pool_t *dp = dmu_tx_pool(tx);
569 dsl_dataset_t *ds;
571 int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
572 if (error == ENOENT)
573 return;
574 ASSERT0(error);
575 dsl_destroy_snapshot_sync_impl(ds, defer, tx);
576 zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
577 dsl_dataset_rele(ds, FTAG);
581 * The semantics of this function are described in the comment above
582 * lzc_destroy_snaps(). To summarize:
584 * The snapshots must all be in the same pool.
586 * Snapshots that don't exist will be silently ignored (considered to be
587 * "already deleted").
589 * On success, all snaps will be destroyed and this will return 0.
590 * On failure, no snaps will be destroyed, the errlist will be filled in,
591 * and this will return an errno.
594 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
595 nvlist_t *errlist)
597 if (nvlist_next_nvpair(snaps, NULL) == NULL)
598 return (0);
601 * lzc_destroy_snaps() is documented to take an nvlist whose
602 * values "don't matter". We need to convert that nvlist to
603 * one that we know can be converted to LUA.
605 nvlist_t *snaps_normalized = fnvlist_alloc();
606 for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
607 pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
608 fnvlist_add_boolean_value(snaps_normalized,
609 nvpair_name(pair), B_TRUE);
612 nvlist_t *arg = fnvlist_alloc();
613 fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
614 fnvlist_free(snaps_normalized);
615 fnvlist_add_boolean_value(arg, "defer", defer);
617 nvlist_t *wrapper = fnvlist_alloc();
618 fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
619 fnvlist_free(arg);
621 const char *program =
622 "arg = ...\n"
623 "snaps = arg['snaps']\n"
624 "defer = arg['defer']\n"
625 "errors = { }\n"
626 "has_errors = false\n"
627 "for snap, v in pairs(snaps) do\n"
628 " errno = zfs.check.destroy{snap, defer=defer}\n"
629 " zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
630 " if errno == ENOENT then\n"
631 " snaps[snap] = nil\n"
632 " elseif errno ~= 0 then\n"
633 " errors[snap] = errno\n"
634 " has_errors = true\n"
635 " end\n"
636 "end\n"
637 "if has_errors then\n"
638 " return errors\n"
639 "end\n"
640 "for snap, v in pairs(snaps) do\n"
641 " errno = zfs.sync.destroy{snap, defer=defer}\n"
642 " assert(errno == 0)\n"
643 "end\n"
644 "return { }\n";
646 nvlist_t *result = fnvlist_alloc();
647 int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
648 program,
649 B_TRUE,
651 zfs_lua_max_memlimit,
652 fnvlist_lookup_nvpair(wrapper, ZCP_ARG_ARGLIST), result);
653 if (error != 0) {
654 char *errorstr = NULL;
655 (void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
656 if (errorstr != NULL) {
657 zfs_dbgmsg("%s", errorstr);
659 fnvlist_free(wrapper);
660 fnvlist_free(result);
661 return (error);
663 fnvlist_free(wrapper);
666 * lzc_destroy_snaps() is documented to fill the errlist with
667 * int32 values, so we need to convert the int64 values that are
668 * returned from LUA.
670 int rv = 0;
671 nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
672 for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
673 pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
674 int32_t val = (int32_t)fnvpair_value_int64(pair);
675 if (rv == 0)
676 rv = val;
677 fnvlist_add_int32(errlist, nvpair_name(pair), val);
679 fnvlist_free(result);
680 return (rv);
684 dsl_destroy_snapshot(const char *name, boolean_t defer)
686 int error;
687 nvlist_t *nvl = fnvlist_alloc();
688 nvlist_t *errlist = fnvlist_alloc();
690 fnvlist_add_boolean(nvl, name);
691 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
692 fnvlist_free(errlist);
693 fnvlist_free(nvl);
694 return (error);
697 struct killarg {
698 dsl_dataset_t *ds;
699 dmu_tx_t *tx;
702 static int
703 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
704 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
706 (void) spa, (void) dnp;
707 struct killarg *ka = arg;
708 dmu_tx_t *tx = ka->tx;
710 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
711 BP_IS_EMBEDDED(bp))
712 return (0);
714 if (zb->zb_level == ZB_ZIL_LEVEL) {
715 ASSERT(zilog != NULL);
717 * It's a block in the intent log. It has no
718 * accounting, so just free it.
720 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
721 } else {
722 ASSERT(zilog == NULL);
723 ASSERT3U(bp->blk_birth, >,
724 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
725 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
728 return (0);
731 static void
732 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
734 struct killarg ka;
736 spa_history_log_internal_ds(ds, "destroy", tx,
737 "(synchronous, mintxg=%llu)",
738 (long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
741 * Free everything that we point to (that's born after
742 * the previous snapshot, if we are a clone)
744 * NB: this should be very quick, because we already
745 * freed all the objects in open context.
747 ka.ds = ds;
748 ka.tx = tx;
749 VERIFY0(traverse_dataset(ds,
750 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
751 TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
752 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
753 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
757 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
759 int error;
760 uint64_t count;
761 objset_t *mos;
763 ASSERT(!ds->ds_is_snapshot);
764 if (ds->ds_is_snapshot)
765 return (SET_ERROR(EINVAL));
767 if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
768 return (SET_ERROR(EBUSY));
770 ASSERT0(ds->ds_dir->dd_activity_waiters);
772 mos = ds->ds_dir->dd_pool->dp_meta_objset;
775 * Can't delete a head dataset if there are snapshots of it.
776 * (Except if the only snapshots are from the branch we cloned
777 * from.)
779 if (ds->ds_prev != NULL &&
780 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
781 return (SET_ERROR(EBUSY));
784 * Can't delete if there are children of this fs.
786 error = zap_count(mos,
787 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
788 if (error != 0)
789 return (error);
790 if (count != 0)
791 return (SET_ERROR(EEXIST));
793 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
794 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
795 ds->ds_prev->ds_userrefs == 0) {
796 /* We need to remove the origin snapshot as well. */
797 if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
798 return (SET_ERROR(EBUSY));
800 return (0);
804 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
806 dsl_destroy_head_arg_t *ddha = arg;
807 dsl_pool_t *dp = dmu_tx_pool(tx);
808 dsl_dataset_t *ds;
809 int error;
811 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
812 if (error != 0)
813 return (error);
815 error = dsl_destroy_head_check_impl(ds, 0);
816 dsl_dataset_rele(ds, FTAG);
817 return (error);
820 static void
821 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
823 dsl_dir_t *dd;
824 dsl_pool_t *dp = dmu_tx_pool(tx);
825 objset_t *mos = dp->dp_meta_objset;
826 dd_used_t t;
828 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
830 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
832 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
834 /* Decrement the filesystem count for all parent filesystems. */
835 if (dd->dd_parent != NULL)
836 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
837 DD_FIELD_FILESYSTEM_COUNT, tx);
840 * Remove our reservation. The impl() routine avoids setting the
841 * actual property, which would require the (already destroyed) ds.
843 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
845 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
846 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
847 for (t = 0; t < DD_USED_NUM; t++)
848 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
850 if (dd->dd_crypto_obj != 0) {
851 dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
852 (void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
855 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
856 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
857 if (dsl_dir_phys(dd)->dd_clones != 0)
858 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_clones, tx));
859 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
860 VERIFY0(zap_remove(mos,
861 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
862 dd->dd_myname, tx));
864 dsl_dir_rele(dd, FTAG);
865 dmu_object_free_zapified(mos, ddobj, tx);
868 static void
869 dsl_clone_destroy_assert(dsl_dir_t *dd)
871 uint64_t used, comp, uncomp;
873 ASSERT(dsl_dir_is_clone(dd));
874 dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
876 ASSERT3U(dsl_dir_phys(dd)->dd_used_bytes, ==, used);
877 ASSERT3U(dsl_dir_phys(dd)->dd_compressed_bytes, ==, comp);
879 * Greater than because we do not track embedded block pointers in
880 * the livelist
882 ASSERT3U(dsl_dir_phys(dd)->dd_uncompressed_bytes, >=, uncomp);
884 ASSERT(list_is_empty(&dd->dd_pending_allocs.bpl_list));
885 ASSERT(list_is_empty(&dd->dd_pending_frees.bpl_list));
889 * Start the delete process for a clone. Free its zil, verify the space usage
890 * and queue the blkptrs for deletion by adding the livelist to the pool-wide
891 * delete queue.
893 static void
894 dsl_async_clone_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
896 uint64_t zap_obj, to_delete, used, comp, uncomp;
897 objset_t *os;
898 dsl_dir_t *dd = ds->ds_dir;
899 dsl_pool_t *dp = dmu_tx_pool(tx);
900 objset_t *mos = dp->dp_meta_objset;
901 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
902 VERIFY0(dmu_objset_from_ds(ds, &os));
904 uint64_t mintxg = 0;
905 dsl_deadlist_entry_t *dle = dsl_deadlist_first(&dd->dd_livelist);
906 if (dle != NULL)
907 mintxg = dle->dle_mintxg;
909 spa_history_log_internal_ds(ds, "destroy", tx,
910 "(livelist, mintxg=%llu)", (long long)mintxg);
912 /* Check that the clone is in a correct state to be deleted */
913 dsl_clone_destroy_assert(dd);
915 /* Destroy the zil */
916 zil_destroy_sync(dmu_objset_zil(os), tx);
918 VERIFY0(zap_lookup(mos, dd->dd_object,
919 DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &to_delete));
920 /* Initialize deleted_clones entry to track livelists to cleanup */
921 int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
922 DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
923 if (error == ENOENT) {
924 zap_obj = zap_create(mos, DMU_OTN_ZAP_METADATA,
925 DMU_OT_NONE, 0, tx);
926 VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
927 DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1,
928 &(zap_obj), tx));
929 spa->spa_livelists_to_delete = zap_obj;
930 } else if (error != 0) {
931 zfs_panic_recover("zfs: error %d was returned while looking "
932 "up DMU_POOL_DELETED_CLONES in the zap", error);
933 return;
935 VERIFY0(zap_add_int(mos, zap_obj, to_delete, tx));
937 /* Clone is no longer using space, now tracked by dp_free_dir */
938 dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
939 dsl_dir_diduse_space(dd, DD_USED_HEAD,
940 -used, -comp, -dsl_dir_phys(dd)->dd_uncompressed_bytes,
941 tx);
942 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
943 used, comp, uncomp, tx);
944 dsl_dir_remove_livelist(dd, tx, B_FALSE);
945 zthr_wakeup(spa->spa_livelist_delete_zthr);
949 * Move the bptree into the pool's list of trees to clean up, update space
950 * accounting information and destroy the zil.
952 static void
953 dsl_async_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
955 uint64_t used, comp, uncomp;
956 objset_t *os;
958 VERIFY0(dmu_objset_from_ds(ds, &os));
959 dsl_pool_t *dp = dmu_tx_pool(tx);
960 objset_t *mos = dp->dp_meta_objset;
962 spa_history_log_internal_ds(ds, "destroy", tx,
963 "(bptree, mintxg=%llu)",
964 (long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
966 zil_destroy_sync(dmu_objset_zil(os), tx);
968 if (!spa_feature_is_active(dp->dp_spa,
969 SPA_FEATURE_ASYNC_DESTROY)) {
970 dsl_scan_t *scn = dp->dp_scan;
971 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
972 tx);
973 dp->dp_bptree_obj = bptree_alloc(mos, tx);
974 VERIFY0(zap_add(mos,
975 DMU_POOL_DIRECTORY_OBJECT,
976 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
977 &dp->dp_bptree_obj, tx));
978 ASSERT(!scn->scn_async_destroying);
979 scn->scn_async_destroying = B_TRUE;
982 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
983 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
984 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
986 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
987 dsl_dataset_phys(ds)->ds_unique_bytes == used);
989 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
990 bptree_add(mos, dp->dp_bptree_obj,
991 &dsl_dataset_phys(ds)->ds_bp,
992 dsl_dataset_phys(ds)->ds_prev_snap_txg,
993 used, comp, uncomp, tx);
994 rrw_exit(&ds->ds_bp_rwlock, FTAG);
995 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
996 -used, -comp, -uncomp, tx);
997 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
998 used, comp, uncomp, tx);
1001 void
1002 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
1004 dsl_pool_t *dp = dmu_tx_pool(tx);
1005 objset_t *mos = dp->dp_meta_objset;
1006 uint64_t obj, ddobj, prevobj = 0;
1007 boolean_t rmorigin;
1009 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
1010 ASSERT(ds->ds_prev == NULL ||
1011 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
1012 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1013 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
1014 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1015 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
1017 dsl_dir_cancel_waiters(ds->ds_dir);
1019 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
1020 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
1021 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
1022 ds->ds_prev->ds_userrefs == 0);
1024 /* Remove our reservation. */
1025 if (ds->ds_reserved != 0) {
1026 dsl_dataset_set_refreservation_sync_impl(ds,
1027 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1028 0, tx);
1029 ASSERT0(ds->ds_reserved);
1032 obj = ds->ds_object;
1034 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1035 if (dsl_dataset_feature_is_active(ds, f))
1036 dsl_dataset_deactivate_feature(ds, f, tx);
1039 dsl_scan_ds_destroyed(ds, tx);
1041 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
1042 /* This is a clone */
1043 ASSERT(ds->ds_prev != NULL);
1044 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
1045 obj);
1046 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
1048 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1049 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
1050 dsl_dataset_remove_from_next_clones(ds->ds_prev,
1051 obj, tx);
1054 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
1055 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
1059 * Destroy the deadlist. Unless it's a clone, the
1060 * deadlist should be empty since the dataset has no snapshots.
1061 * (If it's a clone, it's safe to ignore the deadlist contents
1062 * since they are still referenced by the origin snapshot.)
1064 dsl_deadlist_close(&ds->ds_deadlist);
1065 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
1066 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1067 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
1069 if (dsl_dataset_remap_deadlist_exists(ds))
1070 dsl_dataset_destroy_remap_deadlist(ds, tx);
1073 * Each destroy is responsible for both destroying (enqueuing
1074 * to be destroyed) the blkptrs comprising the dataset as well as
1075 * those belonging to the zil.
1077 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
1078 dsl_async_clone_destroy(ds, tx);
1079 } else if (spa_feature_is_enabled(dp->dp_spa,
1080 SPA_FEATURE_ASYNC_DESTROY)) {
1081 dsl_async_dataset_destroy(ds, tx);
1082 } else {
1083 old_synchronous_dataset_destroy(ds, tx);
1086 if (ds->ds_prev != NULL) {
1087 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1088 VERIFY0(zap_remove_int(mos,
1089 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
1090 ds->ds_object, tx));
1092 prevobj = ds->ds_prev->ds_object;
1093 dsl_dataset_rele(ds->ds_prev, ds);
1094 ds->ds_prev = NULL;
1098 * This must be done after the dsl_traverse(), because it will
1099 * re-open the objset.
1101 if (ds->ds_objset) {
1102 dmu_objset_evict(ds->ds_objset);
1103 ds->ds_objset = NULL;
1106 /* Erase the link in the dir */
1107 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1108 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
1109 ddobj = ds->ds_dir->dd_object;
1110 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
1111 VERIFY0(zap_destroy(mos,
1112 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
1114 if (ds->ds_bookmarks_obj != 0) {
1115 void *cookie = NULL;
1116 dsl_bookmark_node_t *dbn;
1118 while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) !=
1119 NULL) {
1120 if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1121 VERIFY0(dmu_object_free(mos,
1122 dbn->dbn_phys.zbm_redaction_obj, tx));
1123 spa_feature_decr(dmu_objset_spa(mos),
1124 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1126 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1127 spa_feature_decr(dmu_objset_spa(mos),
1128 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1130 spa_strfree(dbn->dbn_name);
1131 mutex_destroy(&dbn->dbn_lock);
1132 kmem_free(dbn, sizeof (*dbn));
1134 avl_destroy(&ds->ds_bookmarks);
1135 VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1136 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1139 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1141 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
1142 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
1143 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
1144 dsl_dir_rele(ds->ds_dir, ds);
1145 ds->ds_dir = NULL;
1146 dmu_object_free_zapified(mos, obj, tx);
1148 dsl_dir_destroy_sync(ddobj, tx);
1150 if (rmorigin) {
1151 dsl_dataset_t *prev;
1152 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
1153 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
1154 dsl_dataset_rele(prev, FTAG);
1158 void
1159 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
1161 dsl_destroy_head_arg_t *ddha = arg;
1162 dsl_pool_t *dp = dmu_tx_pool(tx);
1163 dsl_dataset_t *ds;
1165 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1166 dsl_destroy_head_sync_impl(ds, tx);
1167 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
1168 dsl_dataset_rele(ds, FTAG);
1171 static void
1172 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
1174 dsl_destroy_head_arg_t *ddha = arg;
1175 dsl_pool_t *dp = dmu_tx_pool(tx);
1176 dsl_dataset_t *ds;
1178 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1180 /* Mark it as inconsistent on-disk, in case we crash */
1181 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1182 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1184 spa_history_log_internal_ds(ds, "destroy begin", tx, " ");
1185 dsl_dataset_rele(ds, FTAG);
1189 dsl_destroy_head(const char *name)
1191 dsl_destroy_head_arg_t ddha;
1192 int error;
1193 spa_t *spa;
1194 boolean_t isenabled;
1196 #ifdef _KERNEL
1197 zfs_destroy_unmount_origin(name);
1198 #endif
1200 error = spa_open(name, &spa, FTAG);
1201 if (error != 0)
1202 return (error);
1203 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
1204 spa_close(spa, FTAG);
1206 ddha.ddha_name = name;
1208 if (!isenabled) {
1209 objset_t *os;
1211 error = dsl_sync_task(name, dsl_destroy_head_check,
1212 dsl_destroy_head_begin_sync, &ddha,
1213 0, ZFS_SPACE_CHECK_DESTROY);
1214 if (error != 0)
1215 return (error);
1218 * Head deletion is processed in one txg on old pools;
1219 * remove the objects from open context so that the txg sync
1220 * is not too long. This optimization can only work for
1221 * encrypted datasets if the wrapping key is loaded.
1223 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_TRUE,
1224 FTAG, &os);
1225 if (error == 0) {
1226 uint64_t prev_snap_txg =
1227 dsl_dataset_phys(dmu_objset_ds(os))->
1228 ds_prev_snap_txg;
1229 for (uint64_t obj = 0; error == 0;
1230 error = dmu_object_next(os, &obj, FALSE,
1231 prev_snap_txg))
1232 (void) dmu_free_long_object(os, obj);
1233 /* sync out all frees */
1234 txg_wait_synced(dmu_objset_pool(os), 0);
1235 dmu_objset_disown(os, B_TRUE, FTAG);
1239 return (dsl_sync_task(name, dsl_destroy_head_check,
1240 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
1244 * Note, this function is used as the callback for dmu_objset_find(). We
1245 * always return 0 so that we will continue to find and process
1246 * inconsistent datasets, even if we encounter an error trying to
1247 * process one of them.
1250 dsl_destroy_inconsistent(const char *dsname, void *arg)
1252 (void) arg;
1253 objset_t *os;
1255 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
1256 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1259 * If the dataset is inconsistent because a resumable receive
1260 * has failed, then do not destroy it.
1262 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1263 need_destroy = B_FALSE;
1265 dmu_objset_rele(os, FTAG);
1266 if (need_destroy)
1267 (void) dsl_destroy_head(dsname);
1269 return (0);
1273 #if defined(_KERNEL)
1274 EXPORT_SYMBOL(dsl_destroy_head);
1275 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1276 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1277 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1278 EXPORT_SYMBOL(dsl_destroy_inconsistent);
1279 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1280 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1281 #endif