4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 RackTop Systems.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
31 * Copyright (c) 2019, Klara Inc.
32 * Copyright (c) 2019, Allan Jude
33 * Copyright (c) 2020 The FreeBSD Foundation [1]
35 * [1] Portions of this software were developed by Allan Jude
36 * under sponsorship from the FreeBSD Foundation.
39 #include <sys/dmu_objset.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/dmu_traverse.h>
45 #include <sys/dmu_impl.h>
46 #include <sys/dmu_tx.h>
50 #include <sys/zfeature.h>
51 #include <sys/unique.h>
52 #include <sys/zfs_context.h>
53 #include <sys/zfs_ioctl.h>
55 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/zfs_onexit.h>
60 #include <sys/dsl_scan.h>
61 #include <sys/dsl_deadlist.h>
62 #include <sys/dsl_destroy.h>
63 #include <sys/dsl_userhold.h>
64 #include <sys/dsl_bookmark.h>
65 #include <sys/policy.h>
66 #include <sys/dmu_send.h>
67 #include <sys/dmu_recv.h>
68 #include <sys/zio_compress.h>
69 #include <zfs_fletcher.h>
70 #include <sys/zio_checksum.h>
74 * The SPA supports block sizes up to 16MB. However, very large blocks
75 * can have an impact on i/o latency (e.g. tying up a spinning disk for
76 * ~300ms), and also potentially on the memory allocator. Therefore,
77 * we did not allow the recordsize to be set larger than zfs_max_recordsize
78 * (former default: 1MB). Larger blocks could be created by changing this
79 * tunable, and pools with larger blocks could always be imported and used,
80 * regardless of this setting.
82 * We do, however, still limit it by default to 1M on x86_32, because Linux's
83 * 3/1 memory split doesn't leave much room for 16M chunks.
86 uint_t zfs_max_recordsize
= 1 * 1024 * 1024;
88 uint_t zfs_max_recordsize
= 16 * 1024 * 1024;
90 static int zfs_allow_redacted_dataset_mount
= 0;
92 int zfs_snapshot_history_enabled
= 1;
94 #define SWITCH64(x, y) \
96 uint64_t __tmp = (x); \
101 #define DS_REF_MAX (1ULL << 62)
103 static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t
*ds
,
104 uint64_t obj
, dmu_tx_t
*tx
);
105 static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t
*ds
,
108 static void unload_zfeature(dsl_dataset_t
*ds
, spa_feature_t f
);
110 extern uint_t spa_asize_inflation
;
112 static zil_header_t zero_zil
;
115 * Figure out how much of this delta should be propagated to the dsl_dir
116 * layer. If there's a refreservation, that space has already been
117 * partially accounted for in our ancestors.
120 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
122 dsl_dataset_phys_t
*ds_phys
;
123 uint64_t old_bytes
, new_bytes
;
125 if (ds
->ds_reserved
== 0)
128 ds_phys
= dsl_dataset_phys(ds
);
129 old_bytes
= MAX(ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
130 new_bytes
= MAX(ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
132 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
133 return (new_bytes
- old_bytes
);
137 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
139 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
140 int used
= bp_get_dsize_sync(spa
, bp
);
141 int compressed
= BP_GET_PSIZE(bp
);
142 int uncompressed
= BP_GET_UCSIZE(bp
);
146 dprintf_bp(bp
, "ds=%p", ds
);
148 ASSERT(dmu_tx_is_syncing(tx
));
149 /* It could have been compressed away to nothing */
150 if (BP_IS_HOLE(bp
) || BP_IS_REDACTED(bp
))
152 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
153 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp
)));
155 dsl_pool_mos_diduse_space(tx
->tx_pool
,
156 used
, compressed
, uncompressed
);
160 ASSERT3U(BP_GET_LOGICAL_BIRTH(bp
), >,
161 dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
162 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
163 mutex_enter(&ds
->ds_lock
);
164 delta
= parent_delta(ds
, used
);
165 dsl_dataset_phys(ds
)->ds_referenced_bytes
+= used
;
166 dsl_dataset_phys(ds
)->ds_compressed_bytes
+= compressed
;
167 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
+= uncompressed
;
168 dsl_dataset_phys(ds
)->ds_unique_bytes
+= used
;
170 if (BP_GET_LSIZE(bp
) > SPA_OLD_MAXBLOCKSIZE
) {
171 ds
->ds_feature_activation
[SPA_FEATURE_LARGE_BLOCKS
] =
176 f
= zio_checksum_to_feature(BP_GET_CHECKSUM(bp
));
177 if (f
!= SPA_FEATURE_NONE
) {
178 ASSERT3S(spa_feature_table
[f
].fi_type
, ==,
179 ZFEATURE_TYPE_BOOLEAN
);
180 ds
->ds_feature_activation
[f
] = (void *)B_TRUE
;
183 f
= zio_compress_to_feature(BP_GET_COMPRESS(bp
));
184 if (f
!= SPA_FEATURE_NONE
) {
185 ASSERT3S(spa_feature_table
[f
].fi_type
, ==,
186 ZFEATURE_TYPE_BOOLEAN
);
187 ds
->ds_feature_activation
[f
] = (void *)B_TRUE
;
191 * Track block for livelist, but ignore embedded blocks because
192 * they do not need to be freed.
194 if (dsl_deadlist_is_open(&ds
->ds_dir
->dd_livelist
) &&
195 BP_GET_LOGICAL_BIRTH(bp
) > ds
->ds_dir
->dd_origin_txg
&&
196 !(BP_IS_EMBEDDED(bp
))) {
197 ASSERT(dsl_dir_is_clone(ds
->ds_dir
));
198 ASSERT(spa_feature_is_enabled(spa
,
199 SPA_FEATURE_LIVELIST
));
200 bplist_append(&ds
->ds_dir
->dd_pending_allocs
, bp
);
203 mutex_exit(&ds
->ds_lock
);
204 dsl_dir_diduse_transfer_space(ds
->ds_dir
, delta
,
205 compressed
, uncompressed
, used
,
206 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
210 * Called when the specified segment has been remapped, and is thus no
211 * longer referenced in the head dataset. The vdev must be indirect.
213 * If the segment is referenced by a snapshot, put it on the remap deadlist.
214 * Otherwise, add this segment to the obsolete spacemap.
217 dsl_dataset_block_remapped(dsl_dataset_t
*ds
, uint64_t vdev
, uint64_t offset
,
218 uint64_t size
, uint64_t birth
, dmu_tx_t
*tx
)
220 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
222 ASSERT(dmu_tx_is_syncing(tx
));
223 ASSERT(birth
<= tx
->tx_txg
);
224 ASSERT(!ds
->ds_is_snapshot
);
226 if (birth
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
227 spa_vdev_indirect_mark_obsolete(spa
, vdev
, offset
, size
, tx
);
230 dva_t
*dva
= &fakebp
.blk_dva
[0];
234 mutex_enter(&ds
->ds_remap_deadlist_lock
);
235 if (!dsl_dataset_remap_deadlist_exists(ds
)) {
236 dsl_dataset_create_remap_deadlist(ds
, tx
);
238 mutex_exit(&ds
->ds_remap_deadlist_lock
);
241 BP_SET_LOGICAL_BIRTH(&fakebp
, birth
);
242 DVA_SET_VDEV(dva
, vdev
);
243 DVA_SET_OFFSET(dva
, offset
);
244 DVA_SET_ASIZE(dva
, size
);
245 dsl_deadlist_insert(&ds
->ds_remap_deadlist
, &fakebp
, B_FALSE
,
251 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
254 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
256 int used
= bp_get_dsize_sync(spa
, bp
);
257 int compressed
= BP_GET_PSIZE(bp
);
258 int uncompressed
= BP_GET_UCSIZE(bp
);
260 if (BP_IS_HOLE(bp
) || BP_IS_REDACTED(bp
))
263 ASSERT(dmu_tx_is_syncing(tx
));
264 ASSERT(BP_GET_LOGICAL_BIRTH(bp
) <= tx
->tx_txg
);
267 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
268 dsl_pool_mos_diduse_space(tx
->tx_pool
,
269 -used
, -compressed
, -uncompressed
);
272 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
274 ASSERT(!ds
->ds_is_snapshot
);
275 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
278 * Track block for livelist, but ignore embedded blocks because
279 * they do not need to be freed.
281 if (dsl_deadlist_is_open(&ds
->ds_dir
->dd_livelist
) &&
282 BP_GET_LOGICAL_BIRTH(bp
) > ds
->ds_dir
->dd_origin_txg
&&
283 !(BP_IS_EMBEDDED(bp
))) {
284 ASSERT(dsl_dir_is_clone(ds
->ds_dir
));
285 ASSERT(spa_feature_is_enabled(spa
,
286 SPA_FEATURE_LIVELIST
));
287 bplist_append(&ds
->ds_dir
->dd_pending_frees
, bp
);
290 if (BP_GET_LOGICAL_BIRTH(bp
) > dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
294 * Put blocks that would create IO on the pool's deadlist for
295 * dsl_process_async_destroys() to find. This is to prevent
296 * zio_free() from creating a ZIO_TYPE_FREE IO for them, which
297 * are very heavy and can lead to out-of-memory conditions if
298 * something tries to free millions of blocks on the same txg.
300 boolean_t defer
= spa_version(spa
) >= SPA_VERSION_DEADLISTS
&&
301 (BP_IS_GANG(bp
) || BP_GET_DEDUP(bp
) ||
302 brt_maybe_exists(spa
, bp
));
305 dprintf_bp(bp
, "putting on free list: %s", "");
306 bpobj_enqueue(&ds
->ds_dir
->dd_pool
->dp_free_bpobj
,
309 dprintf_bp(bp
, "freeing ds=%llu",
310 (u_longlong_t
)ds
->ds_object
);
311 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
314 mutex_enter(&ds
->ds_lock
);
315 ASSERT(dsl_dataset_phys(ds
)->ds_unique_bytes
>= used
||
316 !DS_UNIQUE_IS_ACCURATE(ds
));
317 delta
= parent_delta(ds
, -used
);
318 dsl_dataset_phys(ds
)->ds_unique_bytes
-= used
;
319 mutex_exit(&ds
->ds_lock
);
321 dsl_dir_diduse_transfer_space(ds
->ds_dir
,
322 delta
, -compressed
, -uncompressed
, -used
,
323 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
326 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
327 DD_USED_HEAD
, used
, compressed
, uncompressed
, tx
);
329 dprintf_bp(bp
, "putting on dead list: %s", "");
332 * We are here as part of zio's write done callback,
333 * which means we're a zio interrupt thread. We can't
334 * call dsl_deadlist_insert() now because it may block
335 * waiting for I/O. Instead, put bp on the deferred
336 * queue and let dsl_pool_sync() finish the job.
338 bplist_append(&ds
->ds_pending_deadlist
, bp
);
340 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, B_FALSE
, tx
);
342 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
343 dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
344 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 0);
345 /* if (logical birth > prev prev snap txg) prev unique += bs */
346 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
347 ds
->ds_object
&& BP_GET_LOGICAL_BIRTH(bp
) >
348 dsl_dataset_phys(ds
->ds_prev
)->ds_prev_snap_txg
) {
349 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
350 mutex_enter(&ds
->ds_prev
->ds_lock
);
351 dsl_dataset_phys(ds
->ds_prev
)->ds_unique_bytes
+= used
;
352 mutex_exit(&ds
->ds_prev
->ds_lock
);
354 if (BP_GET_LOGICAL_BIRTH(bp
) > ds
->ds_dir
->dd_origin_txg
) {
355 dsl_dir_transfer_space(ds
->ds_dir
, used
,
356 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
360 dsl_bookmark_block_killed(ds
, bp
, tx
);
362 mutex_enter(&ds
->ds_lock
);
363 ASSERT3U(dsl_dataset_phys(ds
)->ds_referenced_bytes
, >=, used
);
364 dsl_dataset_phys(ds
)->ds_referenced_bytes
-= used
;
365 ASSERT3U(dsl_dataset_phys(ds
)->ds_compressed_bytes
, >=, compressed
);
366 dsl_dataset_phys(ds
)->ds_compressed_bytes
-= compressed
;
367 ASSERT3U(dsl_dataset_phys(ds
)->ds_uncompressed_bytes
, >=, uncompressed
);
368 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
-= uncompressed
;
369 mutex_exit(&ds
->ds_lock
);
374 struct feature_type_uint64_array_arg
{
380 unload_zfeature(dsl_dataset_t
*ds
, spa_feature_t f
)
382 switch (spa_feature_table
[f
].fi_type
) {
383 case ZFEATURE_TYPE_BOOLEAN
:
385 case ZFEATURE_TYPE_UINT64_ARRAY
:
387 struct feature_type_uint64_array_arg
*ftuaa
= ds
->ds_feature
[f
];
388 kmem_free(ftuaa
->array
, ftuaa
->length
* sizeof (uint64_t));
389 kmem_free(ftuaa
, sizeof (*ftuaa
));
393 panic("Invalid zfeature type %d", spa_feature_table
[f
].fi_type
);
398 load_zfeature(objset_t
*mos
, dsl_dataset_t
*ds
, spa_feature_t f
)
401 switch (spa_feature_table
[f
].fi_type
) {
402 case ZFEATURE_TYPE_BOOLEAN
:
403 err
= zap_contains(mos
, ds
->ds_object
,
404 spa_feature_table
[f
].fi_guid
);
406 ds
->ds_feature
[f
] = (void *)B_TRUE
;
408 ASSERT3U(err
, ==, ENOENT
);
412 case ZFEATURE_TYPE_UINT64_ARRAY
:
414 uint64_t int_size
, num_int
;
416 err
= zap_length(mos
, ds
->ds_object
,
417 spa_feature_table
[f
].fi_guid
, &int_size
, &num_int
);
419 ASSERT3U(err
, ==, ENOENT
);
423 ASSERT3U(int_size
, ==, sizeof (uint64_t));
424 data
= kmem_alloc(int_size
* num_int
, KM_SLEEP
);
425 VERIFY0(zap_lookup(mos
, ds
->ds_object
,
426 spa_feature_table
[f
].fi_guid
, int_size
, num_int
, data
));
427 struct feature_type_uint64_array_arg
*ftuaa
=
428 kmem_alloc(sizeof (*ftuaa
), KM_SLEEP
);
429 ftuaa
->length
= num_int
;
431 ds
->ds_feature
[f
] = ftuaa
;
435 panic("Invalid zfeature type %d", spa_feature_table
[f
].fi_type
);
441 * We have to release the fsid synchronously or we risk that a subsequent
442 * mount of the same dataset will fail to unique_insert the fsid. This
443 * failure would manifest itself as the fsid of this dataset changing
444 * between mounts which makes NFS clients quite unhappy.
447 dsl_dataset_evict_sync(void *dbu
)
449 dsl_dataset_t
*ds
= dbu
;
451 ASSERT(ds
->ds_owner
== NULL
);
453 unique_remove(ds
->ds_fsid_guid
);
457 dsl_dataset_evict_async(void *dbu
)
459 dsl_dataset_t
*ds
= dbu
;
461 ASSERT(ds
->ds_owner
== NULL
);
465 if (ds
->ds_objset
!= NULL
)
466 dmu_objset_evict(ds
->ds_objset
);
469 dsl_dataset_rele(ds
->ds_prev
, ds
);
473 dsl_bookmark_fini_ds(ds
);
475 bplist_destroy(&ds
->ds_pending_deadlist
);
476 if (dsl_deadlist_is_open(&ds
->ds_deadlist
))
477 dsl_deadlist_close(&ds
->ds_deadlist
);
478 if (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
))
479 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
481 dsl_dir_async_rele(ds
->ds_dir
, ds
);
483 ASSERT(!list_link_active(&ds
->ds_synced_link
));
485 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
486 if (dsl_dataset_feature_is_active(ds
, f
))
487 unload_zfeature(ds
, f
);
490 list_destroy(&ds
->ds_prop_cbs
);
491 mutex_destroy(&ds
->ds_lock
);
492 mutex_destroy(&ds
->ds_opening_lock
);
493 mutex_destroy(&ds
->ds_sendstream_lock
);
494 mutex_destroy(&ds
->ds_remap_deadlist_lock
);
495 zfs_refcount_destroy(&ds
->ds_longholds
);
496 rrw_destroy(&ds
->ds_bp_rwlock
);
498 kmem_free(ds
, sizeof (dsl_dataset_t
));
502 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
504 dsl_dataset_phys_t
*headphys
;
507 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
508 objset_t
*mos
= dp
->dp_meta_objset
;
510 if (ds
->ds_snapname
[0])
512 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0)
515 err
= dmu_bonus_hold(mos
, dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
,
519 headphys
= headdbuf
->db_data
;
520 err
= zap_value_search(dp
->dp_meta_objset
,
521 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
,
522 sizeof (ds
->ds_snapname
));
523 if (err
!= 0 && zfs_recover
== B_TRUE
) {
525 (void) snprintf(ds
->ds_snapname
, sizeof (ds
->ds_snapname
),
526 "SNAPOBJ=%llu-ERR=%d",
527 (unsigned long long)ds
->ds_object
, err
);
529 dmu_buf_rele(headdbuf
, FTAG
);
534 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
536 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
537 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
541 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
544 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
545 value
, mt
, NULL
, 0, NULL
);
546 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
547 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
552 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, const char *name
, dmu_tx_t
*tx
,
555 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
556 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
560 dsl_dir_snap_cmtime_update(ds
->ds_dir
, tx
);
562 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
565 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
566 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
567 err
= zap_remove(mos
, snapobj
, name
, tx
);
569 if (err
== 0 && adj_cnt
)
570 dsl_fs_ss_count_adjust(ds
->ds_dir
, -1,
571 DD_FIELD_SNAPSHOT_COUNT
, tx
);
577 dsl_dataset_try_add_ref(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, const void *tag
)
579 dmu_buf_t
*dbuf
= ds
->ds_dbuf
;
580 boolean_t result
= B_FALSE
;
582 if (dbuf
!= NULL
&& dmu_buf_try_add_ref(dbuf
, dp
->dp_meta_objset
,
583 ds
->ds_object
, DMU_BONUS_BLKID
, tag
)) {
585 if (ds
== dmu_buf_get_user(dbuf
))
588 dmu_buf_rele(dbuf
, tag
);
595 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, const void *tag
,
598 objset_t
*mos
= dp
->dp_meta_objset
;
602 dmu_object_info_t doi
;
604 ASSERT(dsl_pool_config_held(dp
));
606 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
610 /* Make sure dsobj has the correct object type. */
611 dmu_object_info_from_db(dbuf
, &doi
);
612 if (doi
.doi_bonus_type
!= DMU_OT_DSL_DATASET
) {
613 dmu_buf_rele(dbuf
, tag
);
614 return (SET_ERROR(EINVAL
));
617 ds
= dmu_buf_get_user(dbuf
);
619 dsl_dataset_t
*winner
= NULL
;
621 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
623 ds
->ds_object
= dsobj
;
624 ds
->ds_is_snapshot
= dsl_dataset_phys(ds
)->ds_num_children
!= 0;
625 list_link_init(&ds
->ds_synced_link
);
627 err
= dsl_dir_hold_obj(dp
, dsl_dataset_phys(ds
)->ds_dir_obj
,
628 NULL
, ds
, &ds
->ds_dir
);
630 kmem_free(ds
, sizeof (dsl_dataset_t
));
631 dmu_buf_rele(dbuf
, tag
);
635 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
636 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
637 mutex_init(&ds
->ds_sendstream_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
638 mutex_init(&ds
->ds_remap_deadlist_lock
,
639 NULL
, MUTEX_DEFAULT
, NULL
);
640 rrw_init(&ds
->ds_bp_rwlock
, B_FALSE
);
641 zfs_refcount_create(&ds
->ds_longholds
);
643 bplist_create(&ds
->ds_pending_deadlist
);
645 list_create(&ds
->ds_sendstreams
, sizeof (dmu_sendstatus_t
),
646 offsetof(dmu_sendstatus_t
, dss_link
));
648 list_create(&ds
->ds_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
649 offsetof(dsl_prop_cb_record_t
, cbr_ds_node
));
651 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
654 for (f
= 0; f
< SPA_FEATURES
; f
++) {
655 if (!(spa_feature_table
[f
].fi_flags
&
656 ZFEATURE_FLAG_PER_DATASET
))
658 err
= load_zfeature(mos
, ds
, f
);
662 if (!ds
->ds_is_snapshot
) {
663 ds
->ds_snapname
[0] = '\0';
664 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
665 err
= dsl_dataset_hold_obj(dp
,
666 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
670 goto after_dsl_bookmark_fini
;
671 err
= dsl_bookmark_init_ds(ds
);
673 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
674 err
= dsl_dataset_get_snapname(ds
);
676 dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0) {
678 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
679 dsl_dataset_phys(ds
)->ds_userrefs_obj
,
684 if (err
== 0 && !ds
->ds_is_snapshot
) {
685 err
= dsl_prop_get_int_ds(ds
,
686 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
689 err
= dsl_prop_get_int_ds(ds
,
690 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
694 ds
->ds_reserved
= ds
->ds_quota
= 0;
697 if (err
== 0 && ds
->ds_dir
->dd_crypto_obj
!= 0 &&
698 ds
->ds_is_snapshot
&&
699 zap_contains(mos
, dsobj
, DS_FIELD_IVSET_GUID
) != 0) {
700 dp
->dp_spa
->spa_errata
=
701 ZPOOL_ERRATA_ZOL_8308_ENCRYPTION
;
705 err
= dsl_deadlist_open(&ds
->ds_deadlist
,
706 mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
);
709 uint64_t remap_deadlist_obj
=
710 dsl_dataset_get_remap_deadlist_object(ds
);
711 if (remap_deadlist_obj
!= 0) {
712 err
= dsl_deadlist_open(&ds
->ds_remap_deadlist
,
713 mos
, remap_deadlist_obj
);
717 dmu_buf_init_user(&ds
->ds_dbu
, dsl_dataset_evict_sync
,
718 dsl_dataset_evict_async
, &ds
->ds_dbuf
);
720 winner
= dmu_buf_set_user_ie(dbuf
, &ds
->ds_dbu
);
722 if (err
!= 0 || winner
!= NULL
) {
723 if (dsl_deadlist_is_open(&ds
->ds_deadlist
))
724 dsl_deadlist_close(&ds
->ds_deadlist
);
725 if (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
))
726 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
727 dsl_bookmark_fini_ds(ds
);
728 after_dsl_bookmark_fini
:
730 dsl_dataset_rele(ds
->ds_prev
, ds
);
731 dsl_dir_rele(ds
->ds_dir
, ds
);
732 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
733 if (dsl_dataset_feature_is_active(ds
, f
))
734 unload_zfeature(ds
, f
);
737 list_destroy(&ds
->ds_prop_cbs
);
738 list_destroy(&ds
->ds_sendstreams
);
739 bplist_destroy(&ds
->ds_pending_deadlist
);
740 mutex_destroy(&ds
->ds_lock
);
741 mutex_destroy(&ds
->ds_opening_lock
);
742 mutex_destroy(&ds
->ds_sendstream_lock
);
743 mutex_destroy(&ds
->ds_remap_deadlist_lock
);
744 zfs_refcount_destroy(&ds
->ds_longholds
);
745 rrw_destroy(&ds
->ds_bp_rwlock
);
746 kmem_free(ds
, sizeof (dsl_dataset_t
));
748 dmu_buf_rele(dbuf
, tag
);
754 unique_insert(dsl_dataset_phys(ds
)->ds_fsid_guid
);
755 if (ds
->ds_fsid_guid
!=
756 dsl_dataset_phys(ds
)->ds_fsid_guid
) {
757 zfs_dbgmsg("ds_fsid_guid changed from "
758 "%llx to %llx for pool %s dataset id %llu",
760 dsl_dataset_phys(ds
)->ds_fsid_guid
,
761 (long long)ds
->ds_fsid_guid
,
762 spa_name(dp
->dp_spa
),
763 (u_longlong_t
)dsobj
);
768 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
769 ASSERT3P(dsl_dataset_phys(ds
), ==, dbuf
->db_data
);
770 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0 ||
771 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
772 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
779 dsl_dataset_create_key_mapping(dsl_dataset_t
*ds
)
781 dsl_dir_t
*dd
= ds
->ds_dir
;
783 if (dd
->dd_crypto_obj
== 0)
786 return (spa_keystore_create_mapping(dd
->dd_pool
->dp_spa
,
787 ds
, ds
, &ds
->ds_key_mapping
));
791 dsl_dataset_hold_obj_flags(dsl_pool_t
*dp
, uint64_t dsobj
,
792 ds_hold_flags_t flags
, const void *tag
, dsl_dataset_t
**dsp
)
796 err
= dsl_dataset_hold_obj(dp
, dsobj
, tag
, dsp
);
800 ASSERT3P(*dsp
, !=, NULL
);
802 if (flags
& DS_HOLD_FLAG_DECRYPT
) {
803 err
= dsl_dataset_create_key_mapping(*dsp
);
805 dsl_dataset_rele(*dsp
, tag
);
812 dsl_dataset_hold_flags(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
813 const void *tag
, dsl_dataset_t
**dsp
)
816 const char *snapname
;
821 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, &snapname
);
825 ASSERT(dsl_pool_config_held(dp
));
826 obj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
828 err
= dsl_dataset_hold_obj_flags(dp
, obj
, flags
, tag
, &ds
);
830 err
= SET_ERROR(ENOENT
);
832 /* we may be looking for a snapshot */
833 if (err
== 0 && snapname
!= NULL
) {
834 dsl_dataset_t
*snap_ds
;
836 if (*snapname
++ != '@') {
837 dsl_dataset_rele_flags(ds
, flags
, tag
);
838 dsl_dir_rele(dd
, FTAG
);
839 return (SET_ERROR(ENOENT
));
842 dprintf("looking for snapshot '%s'\n", snapname
);
843 err
= dsl_dataset_snap_lookup(ds
, snapname
, &obj
);
845 err
= dsl_dataset_hold_obj_flags(dp
, obj
, flags
, tag
,
848 dsl_dataset_rele_flags(ds
, flags
, tag
);
851 mutex_enter(&snap_ds
->ds_lock
);
852 if (snap_ds
->ds_snapname
[0] == 0)
853 (void) strlcpy(snap_ds
->ds_snapname
, snapname
,
854 sizeof (snap_ds
->ds_snapname
));
855 mutex_exit(&snap_ds
->ds_lock
);
861 dsl_dir_rele(dd
, FTAG
);
866 dsl_dataset_hold(dsl_pool_t
*dp
, const char *name
, const void *tag
,
869 return (dsl_dataset_hold_flags(dp
, name
, 0, tag
, dsp
));
873 dsl_dataset_own_obj_impl(dsl_pool_t
*dp
, uint64_t dsobj
, ds_hold_flags_t flags
,
874 const void *tag
, boolean_t override
, dsl_dataset_t
**dsp
)
876 int err
= dsl_dataset_hold_obj_flags(dp
, dsobj
, flags
, tag
, dsp
);
879 if (!dsl_dataset_tryown(*dsp
, tag
, override
)) {
880 dsl_dataset_rele_flags(*dsp
, flags
, tag
);
882 return (SET_ERROR(EBUSY
));
889 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
, ds_hold_flags_t flags
,
890 const void *tag
, dsl_dataset_t
**dsp
)
892 return (dsl_dataset_own_obj_impl(dp
, dsobj
, flags
, tag
, B_FALSE
, dsp
));
896 dsl_dataset_own_obj_force(dsl_pool_t
*dp
, uint64_t dsobj
,
897 ds_hold_flags_t flags
, const void *tag
, dsl_dataset_t
**dsp
)
899 return (dsl_dataset_own_obj_impl(dp
, dsobj
, flags
, tag
, B_TRUE
, dsp
));
903 dsl_dataset_own_impl(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
904 const void *tag
, boolean_t override
, dsl_dataset_t
**dsp
)
906 int err
= dsl_dataset_hold_flags(dp
, name
, flags
, tag
, dsp
);
909 if (!dsl_dataset_tryown(*dsp
, tag
, override
)) {
910 dsl_dataset_rele_flags(*dsp
, flags
, tag
);
911 return (SET_ERROR(EBUSY
));
917 dsl_dataset_own_force(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
918 const void *tag
, dsl_dataset_t
**dsp
)
920 return (dsl_dataset_own_impl(dp
, name
, flags
, tag
, B_TRUE
, dsp
));
924 dsl_dataset_own(dsl_pool_t
*dp
, const char *name
, ds_hold_flags_t flags
,
925 const void *tag
, dsl_dataset_t
**dsp
)
927 return (dsl_dataset_own_impl(dp
, name
, flags
, tag
, B_FALSE
, dsp
));
931 * See the comment above dsl_pool_hold() for details. In summary, a long
932 * hold is used to prevent destruction of a dataset while the pool hold
933 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
935 * The dataset and pool must be held when this function is called. After it
936 * is called, the pool hold may be released while the dataset is still held
940 dsl_dataset_long_hold(dsl_dataset_t
*ds
, const void *tag
)
942 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
943 (void) zfs_refcount_add(&ds
->ds_longholds
, tag
);
947 dsl_dataset_long_rele(dsl_dataset_t
*ds
, const void *tag
)
949 (void) zfs_refcount_remove(&ds
->ds_longholds
, tag
);
952 /* Return B_TRUE if there are any long holds on this dataset. */
954 dsl_dataset_long_held(dsl_dataset_t
*ds
)
956 return (!zfs_refcount_is_zero(&ds
->ds_longholds
));
960 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
963 (void) strlcpy(name
, "mos", ZFS_MAX_DATASET_NAME_LEN
);
965 dsl_dir_name(ds
->ds_dir
, name
);
966 VERIFY0(dsl_dataset_get_snapname(ds
));
967 if (ds
->ds_snapname
[0]) {
968 VERIFY3U(strlcat(name
, "@", ZFS_MAX_DATASET_NAME_LEN
),
969 <, ZFS_MAX_DATASET_NAME_LEN
);
971 * We use a "recursive" mutex so that we
972 * can call dprintf_ds() with ds_lock held.
974 if (!MUTEX_HELD(&ds
->ds_lock
)) {
975 mutex_enter(&ds
->ds_lock
);
976 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
977 ZFS_MAX_DATASET_NAME_LEN
), <,
978 ZFS_MAX_DATASET_NAME_LEN
);
979 mutex_exit(&ds
->ds_lock
);
981 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
982 ZFS_MAX_DATASET_NAME_LEN
), <,
983 ZFS_MAX_DATASET_NAME_LEN
);
990 dsl_dataset_namelen(dsl_dataset_t
*ds
)
992 VERIFY0(dsl_dataset_get_snapname(ds
));
993 mutex_enter(&ds
->ds_lock
);
994 int len
= strlen(ds
->ds_snapname
);
995 mutex_exit(&ds
->ds_lock
);
996 /* add '@' if ds is a snap */
999 len
+= dsl_dir_namelen(ds
->ds_dir
);
1004 dsl_dataset_rele(dsl_dataset_t
*ds
, const void *tag
)
1006 dmu_buf_rele(ds
->ds_dbuf
, tag
);
1010 dsl_dataset_remove_key_mapping(dsl_dataset_t
*ds
)
1012 dsl_dir_t
*dd
= ds
->ds_dir
;
1014 if (dd
== NULL
|| dd
->dd_crypto_obj
== 0)
1017 (void) spa_keystore_remove_mapping(dd
->dd_pool
->dp_spa
,
1022 dsl_dataset_rele_flags(dsl_dataset_t
*ds
, ds_hold_flags_t flags
,
1025 if (flags
& DS_HOLD_FLAG_DECRYPT
)
1026 dsl_dataset_remove_key_mapping(ds
);
1028 dsl_dataset_rele(ds
, tag
);
1032 dsl_dataset_disown(dsl_dataset_t
*ds
, ds_hold_flags_t flags
, const void *tag
)
1034 ASSERT3P(ds
->ds_owner
, ==, tag
);
1035 ASSERT(ds
->ds_dbuf
!= NULL
);
1037 mutex_enter(&ds
->ds_lock
);
1038 ds
->ds_owner
= NULL
;
1039 mutex_exit(&ds
->ds_lock
);
1040 dsl_dataset_long_rele(ds
, tag
);
1041 dsl_dataset_rele_flags(ds
, flags
, tag
);
1045 dsl_dataset_tryown(dsl_dataset_t
*ds
, const void *tag
, boolean_t override
)
1047 boolean_t gotit
= FALSE
;
1049 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1050 mutex_enter(&ds
->ds_lock
);
1051 if (ds
->ds_owner
== NULL
&& (override
|| !(DS_IS_INCONSISTENT(ds
) ||
1052 (dsl_dataset_feature_is_active(ds
,
1053 SPA_FEATURE_REDACTED_DATASETS
) &&
1054 !zfs_allow_redacted_dataset_mount
)))) {
1056 dsl_dataset_long_hold(ds
, tag
);
1059 mutex_exit(&ds
->ds_lock
);
1064 dsl_dataset_has_owner(dsl_dataset_t
*ds
)
1067 mutex_enter(&ds
->ds_lock
);
1068 rv
= (ds
->ds_owner
!= NULL
);
1069 mutex_exit(&ds
->ds_lock
);
1074 zfeature_active(spa_feature_t f
, void *arg
)
1076 switch (spa_feature_table
[f
].fi_type
) {
1077 case ZFEATURE_TYPE_BOOLEAN
: {
1078 boolean_t val
= (boolean_t
)(uintptr_t)arg
;
1079 ASSERT(val
== B_FALSE
|| val
== B_TRUE
);
1082 case ZFEATURE_TYPE_UINT64_ARRAY
:
1084 * In this case, arg is a uint64_t array. The feature is active
1085 * if the array is non-null.
1087 return (arg
!= NULL
);
1089 panic("Invalid zfeature type %d", spa_feature_table
[f
].fi_type
);
1095 dsl_dataset_feature_is_active(dsl_dataset_t
*ds
, spa_feature_t f
)
1097 return (zfeature_active(f
, ds
->ds_feature
[f
]));
1101 * The buffers passed out by this function are references to internal buffers;
1102 * they should not be freed by callers of this function, and they should not be
1103 * used after the dataset has been released.
1106 dsl_dataset_get_uint64_array_feature(dsl_dataset_t
*ds
, spa_feature_t f
,
1107 uint64_t *outlength
, uint64_t **outp
)
1109 VERIFY(spa_feature_table
[f
].fi_type
& ZFEATURE_TYPE_UINT64_ARRAY
);
1110 if (!dsl_dataset_feature_is_active(ds
, f
)) {
1113 struct feature_type_uint64_array_arg
*ftuaa
= ds
->ds_feature
[f
];
1114 *outp
= ftuaa
->array
;
1115 *outlength
= ftuaa
->length
;
1120 dsl_dataset_activate_feature(uint64_t dsobj
, spa_feature_t f
, void *arg
,
1123 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1124 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
1127 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
1129 spa_feature_incr(spa
, f
, tx
);
1130 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
1132 switch (spa_feature_table
[f
].fi_type
) {
1133 case ZFEATURE_TYPE_BOOLEAN
:
1134 ASSERT3S((boolean_t
)(uintptr_t)arg
, ==, B_TRUE
);
1135 VERIFY0(zap_add(mos
, dsobj
, spa_feature_table
[f
].fi_guid
,
1136 sizeof (zero
), 1, &zero
, tx
));
1138 case ZFEATURE_TYPE_UINT64_ARRAY
:
1140 struct feature_type_uint64_array_arg
*ftuaa
= arg
;
1141 VERIFY0(zap_add(mos
, dsobj
, spa_feature_table
[f
].fi_guid
,
1142 sizeof (uint64_t), ftuaa
->length
, ftuaa
->array
, tx
));
1146 panic("Invalid zfeature type %d", spa_feature_table
[f
].fi_type
);
1151 dsl_dataset_deactivate_feature_impl(dsl_dataset_t
*ds
, spa_feature_t f
,
1154 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1155 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
1156 uint64_t dsobj
= ds
->ds_object
;
1158 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
1160 VERIFY0(zap_remove(mos
, dsobj
, spa_feature_table
[f
].fi_guid
, tx
));
1161 spa_feature_decr(spa
, f
, tx
);
1162 ds
->ds_feature
[f
] = NULL
;
1166 dsl_dataset_deactivate_feature(dsl_dataset_t
*ds
, spa_feature_t f
, dmu_tx_t
*tx
)
1168 unload_zfeature(ds
, f
);
1169 dsl_dataset_deactivate_feature_impl(ds
, f
, tx
);
1173 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
1174 dsl_crypto_params_t
*dcp
, uint64_t flags
, dmu_tx_t
*tx
)
1176 dsl_pool_t
*dp
= dd
->dd_pool
;
1178 dsl_dataset_phys_t
*dsphys
;
1180 objset_t
*mos
= dp
->dp_meta_objset
;
1183 origin
= dp
->dp_origin_snap
;
1185 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
1186 ASSERT(origin
== NULL
|| dsl_dataset_phys(origin
)->ds_num_children
> 0);
1187 ASSERT(dmu_tx_is_syncing(tx
));
1188 ASSERT(dsl_dir_phys(dd
)->dd_head_dataset_obj
== 0);
1190 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
1191 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
1192 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
1193 dmu_buf_will_dirty(dbuf
, tx
);
1194 dsphys
= dbuf
->db_data
;
1195 memset(dsphys
, 0, sizeof (dsl_dataset_phys_t
));
1196 dsphys
->ds_dir_obj
= dd
->dd_object
;
1197 dsphys
->ds_flags
= flags
;
1198 dsphys
->ds_fsid_guid
= unique_create();
1199 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
1200 sizeof (dsphys
->ds_guid
));
1201 dsphys
->ds_snapnames_zapobj
=
1202 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
1203 DMU_OT_NONE
, 0, tx
);
1204 dsphys
->ds_creation_time
= gethrestime_sec();
1205 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
1207 if (origin
== NULL
) {
1208 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
1210 dsl_dataset_t
*ohds
; /* head of the origin snapshot */
1212 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
1213 dsphys
->ds_prev_snap_txg
=
1214 dsl_dataset_phys(origin
)->ds_creation_txg
;
1215 dsphys
->ds_referenced_bytes
=
1216 dsl_dataset_phys(origin
)->ds_referenced_bytes
;
1217 dsphys
->ds_compressed_bytes
=
1218 dsl_dataset_phys(origin
)->ds_compressed_bytes
;
1219 dsphys
->ds_uncompressed_bytes
=
1220 dsl_dataset_phys(origin
)->ds_uncompressed_bytes
;
1221 rrw_enter(&origin
->ds_bp_rwlock
, RW_READER
, FTAG
);
1222 dsphys
->ds_bp
= dsl_dataset_phys(origin
)->ds_bp
;
1223 rrw_exit(&origin
->ds_bp_rwlock
, FTAG
);
1226 * Inherit flags that describe the dataset's contents
1227 * (INCONSISTENT) or properties (Case Insensitive).
1229 dsphys
->ds_flags
|= dsl_dataset_phys(origin
)->ds_flags
&
1230 (DS_FLAG_INCONSISTENT
| DS_FLAG_CI_DATASET
);
1232 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1233 if (zfeature_active(f
, origin
->ds_feature
[f
])) {
1234 dsl_dataset_activate_feature(dsobj
, f
,
1235 origin
->ds_feature
[f
], tx
);
1239 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
1240 dsl_dataset_phys(origin
)->ds_num_children
++;
1242 VERIFY0(dsl_dataset_hold_obj(dp
,
1243 dsl_dir_phys(origin
->ds_dir
)->dd_head_dataset_obj
,
1245 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
1246 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
1247 dsl_dataset_rele(ohds
, FTAG
);
1249 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
1250 if (dsl_dataset_phys(origin
)->ds_next_clones_obj
== 0) {
1251 dsl_dataset_phys(origin
)->ds_next_clones_obj
=
1253 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
1255 VERIFY0(zap_add_int(mos
,
1256 dsl_dataset_phys(origin
)->ds_next_clones_obj
,
1260 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1261 dsl_dir_phys(dd
)->dd_origin_obj
= origin
->ds_object
;
1262 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
1263 if (dsl_dir_phys(origin
->ds_dir
)->dd_clones
== 0) {
1264 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
1265 dsl_dir_phys(origin
->ds_dir
)->dd_clones
=
1267 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
1269 VERIFY0(zap_add_int(mos
,
1270 dsl_dir_phys(origin
->ds_dir
)->dd_clones
,
1275 /* handle encryption */
1276 dsl_dataset_create_crypt_sync(dsobj
, dd
, origin
, dcp
, tx
);
1278 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
1279 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1281 dmu_buf_rele(dbuf
, FTAG
);
1283 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1284 dsl_dir_phys(dd
)->dd_head_dataset_obj
= dsobj
;
1290 dsl_dataset_zero_zil(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1294 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1295 if (memcmp(&os
->os_zil_header
, &zero_zil
, sizeof (zero_zil
)) != 0) {
1296 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1299 memset(&os
->os_zil_header
, 0, sizeof (os
->os_zil_header
));
1300 if (os
->os_encrypted
)
1301 os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
] = B_TRUE
;
1303 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1304 dsl_dataset_sync(ds
, zio
, tx
);
1305 VERIFY0(zio_wait(zio
));
1306 dsl_dataset_sync_done(ds
, tx
);
1311 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
1312 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
,
1313 dsl_crypto_params_t
*dcp
, dmu_tx_t
*tx
)
1315 dsl_pool_t
*dp
= pdd
->dd_pool
;
1316 uint64_t dsobj
, ddobj
;
1319 ASSERT(dmu_tx_is_syncing(tx
));
1320 ASSERT(lastname
[0] != '@');
1322 * Filesystems will eventually have their origin set to dp_origin_snap,
1323 * but that's taken care of in dsl_dataset_create_sync_dd. When
1324 * creating a filesystem, this function is called with origin equal to
1328 ASSERT3P(origin
, !=, dp
->dp_origin_snap
);
1330 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
1331 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
1333 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
, dcp
,
1334 flags
& ~DS_CREATE_FLAG_NODIRTY
, tx
);
1336 dsl_deleg_set_create_perms(dd
, tx
, cr
);
1339 * If we are creating a clone and the livelist feature is enabled,
1340 * add the entry DD_FIELD_LIVELIST to ZAP.
1342 if (origin
!= NULL
&&
1343 spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LIVELIST
)) {
1344 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
1345 dsl_dir_zapify(dd
, tx
);
1346 uint64_t obj
= dsl_deadlist_alloc(mos
, tx
);
1347 VERIFY0(zap_add(mos
, dd
->dd_object
, DD_FIELD_LIVELIST
,
1348 sizeof (uint64_t), 1, &obj
, tx
));
1349 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_LIVELIST
, tx
);
1353 * Since we're creating a new node we know it's a leaf, so we can
1354 * initialize the counts if the limit feature is active.
1356 if (spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_FS_SS_LIMIT
)) {
1358 objset_t
*os
= dd
->dd_pool
->dp_meta_objset
;
1360 dsl_dir_zapify(dd
, tx
);
1361 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_FILESYSTEM_COUNT
,
1362 sizeof (cnt
), 1, &cnt
, tx
));
1363 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_SNAPSHOT_COUNT
,
1364 sizeof (cnt
), 1, &cnt
, tx
));
1367 dsl_dir_rele(dd
, FTAG
);
1370 * If we are creating a clone, make sure we zero out any stale
1371 * data from the origin snapshots zil header.
1373 if (origin
!= NULL
&& !(flags
& DS_CREATE_FLAG_NODIRTY
)) {
1376 VERIFY0(dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
1377 dsl_dataset_zero_zil(ds
, tx
);
1378 dsl_dataset_rele(ds
, FTAG
);
1385 * The unique space in the head dataset can be calculated by subtracting
1386 * the space used in the most recent snapshot, that is still being used
1387 * in this file system, from the space currently in use. To figure out
1388 * the space in the most recent snapshot still in use, we need to take
1389 * the total space used in the snapshot and subtract out the space that
1390 * has been freed up since the snapshot was taken.
1393 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
1396 uint64_t dlused
, dlcomp
, dluncomp
;
1398 ASSERT(!ds
->ds_is_snapshot
);
1400 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0)
1401 mrs_used
= dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
;
1405 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
1407 ASSERT3U(dlused
, <=, mrs_used
);
1408 dsl_dataset_phys(ds
)->ds_unique_bytes
=
1409 dsl_dataset_phys(ds
)->ds_referenced_bytes
- (mrs_used
- dlused
);
1411 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
1412 SPA_VERSION_UNIQUE_ACCURATE
)
1413 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1417 dsl_dataset_remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
,
1420 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1421 uint64_t count __maybe_unused
;
1424 ASSERT(dsl_dataset_phys(ds
)->ds_num_children
>= 2);
1425 err
= zap_remove_int(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1428 * The err should not be ENOENT, but a bug in a previous version
1429 * of the code could cause upgrade_clones_cb() to not set
1430 * ds_next_snap_obj when it should, leading to a missing entry.
1431 * If we knew that the pool was created after
1432 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1433 * ENOENT. However, at least we can check that we don't have
1434 * too many entries in the next_clones_obj even after failing to
1439 ASSERT0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1441 ASSERT3U(count
, <=, dsl_dataset_phys(ds
)->ds_num_children
- 2);
1446 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
1448 return (&dsl_dataset_phys(ds
)->ds_bp
);
1452 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
1454 return (ds
->ds_dir
->dd_pool
->dp_spa
);
1458 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1462 if (ds
== NULL
) /* this is the meta-objset */
1465 ASSERT(ds
->ds_objset
!= NULL
);
1467 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0)
1468 panic("dirtying snapshot!");
1470 /* Must not dirty a dataset in the same txg where it got snapshotted. */
1471 ASSERT3U(tx
->tx_txg
, >, dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
1473 dp
= ds
->ds_dir
->dd_pool
;
1474 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
)) {
1475 objset_t
*os
= ds
->ds_objset
;
1477 /* up the hold count until we can be written out */
1478 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1480 /* if this dataset is encrypted, grab a reference to the DCK */
1481 if (ds
->ds_dir
->dd_crypto_obj
!= 0 &&
1482 !os
->os_raw_receive
&&
1483 !os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
]) {
1484 ASSERT3P(ds
->ds_key_mapping
, !=, NULL
);
1485 key_mapping_add_ref(ds
->ds_key_mapping
, ds
);
1491 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1495 if (!dmu_tx_is_syncing(tx
))
1499 * If there's an fs-only reservation, any blocks that might become
1500 * owned by the snapshot dataset must be accommodated by space
1501 * outside of the reservation.
1503 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1504 asize
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
, ds
->ds_reserved
);
1505 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1506 return (SET_ERROR(ENOSPC
));
1509 * Propagate any reserved space for this snapshot to other
1510 * snapshot checks in this sync group.
1513 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1519 dsl_dataset_snapshot_check_impl(dsl_dataset_t
*ds
, const char *snapname
,
1520 dmu_tx_t
*tx
, boolean_t recv
, uint64_t cnt
, cred_t
*cr
, proc_t
*proc
)
1525 ds
->ds_trysnap_txg
= tx
->tx_txg
;
1527 if (!dmu_tx_is_syncing(tx
))
1531 * We don't allow multiple snapshots of the same txg. If there
1532 * is already one, try again.
1534 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
)
1535 return (SET_ERROR(EAGAIN
));
1538 * Check for conflicting snapshot name.
1540 error
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
1542 return (SET_ERROR(EEXIST
));
1543 if (error
!= ENOENT
)
1547 * We don't allow taking snapshots of inconsistent datasets, such as
1548 * those into which we are currently receiving. However, if we are
1549 * creating this snapshot as part of a receive, this check will be
1550 * executed atomically with respect to the completion of the receive
1551 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1552 * case we ignore this, knowing it will be fixed up for us shortly in
1553 * dmu_recv_end_sync().
1555 if (!recv
&& DS_IS_INCONSISTENT(ds
))
1556 return (SET_ERROR(EBUSY
));
1559 * Skip the check for temporary snapshots or if we have already checked
1560 * the counts in dsl_dataset_snapshot_check. This means we really only
1561 * check the count here when we're receiving a stream.
1563 if (cnt
!= 0 && cr
!= NULL
) {
1564 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1565 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, cr
, proc
);
1570 error
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
1578 dsl_dataset_snapshot_check(void *arg
, dmu_tx_t
*tx
)
1580 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1581 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1586 * Pre-compute how many total new snapshots will be created for each
1587 * level in the tree and below. This is needed for validating the
1588 * snapshot limit when either taking a recursive snapshot or when
1589 * taking multiple snapshots.
1591 * The problem is that the counts are not actually adjusted when
1592 * we are checking, only when we finally sync. For a single snapshot,
1593 * this is easy, the count will increase by 1 at each node up the tree,
1594 * but its more complicated for the recursive/multiple snapshot case.
1596 * The dsl_fs_ss_limit_check function does recursively check the count
1597 * at each level up the tree but since it is validating each snapshot
1598 * independently we need to be sure that we are validating the complete
1599 * count for the entire set of snapshots. We do this by rolling up the
1600 * counts for each component of the name into an nvlist and then
1601 * checking each of those cases with the aggregated count.
1603 * This approach properly handles not only the recursive snapshot
1604 * case (where we get all of those on the ddsa_snaps list) but also
1605 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1606 * validate the limit on 'a' using a count of 2).
1608 * We validate the snapshot names in the third loop and only report
1611 if (dmu_tx_is_syncing(tx
)) {
1613 nvlist_t
*cnt_track
= NULL
;
1614 cnt_track
= fnvlist_alloc();
1616 nm
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1618 /* Rollup aggregated counts into the cnt_track list */
1619 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1621 pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1625 (void) strlcpy(nm
, nvpair_name(pair
), MAXPATHLEN
);
1626 pdelim
= strchr(nm
, '@');
1632 if (nvlist_lookup_uint64(cnt_track
, nm
,
1634 /* update existing entry */
1635 fnvlist_add_uint64(cnt_track
, nm
,
1639 fnvlist_add_uint64(cnt_track
, nm
, 1);
1642 pdelim
= strrchr(nm
, '/');
1645 } while (pdelim
!= NULL
);
1648 kmem_free(nm
, MAXPATHLEN
);
1650 /* Check aggregated counts at each level */
1651 for (pair
= nvlist_next_nvpair(cnt_track
, NULL
);
1652 pair
!= NULL
; pair
= nvlist_next_nvpair(cnt_track
, pair
)) {
1658 name
= nvpair_name(pair
);
1659 cnt
= fnvpair_value_uint64(pair
);
1662 error
= dsl_dataset_hold(dp
, name
, FTAG
, &ds
);
1664 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1665 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
1666 ddsa
->ddsa_cr
, ddsa
->ddsa_proc
);
1667 dsl_dataset_rele(ds
, FTAG
);
1671 if (ddsa
->ddsa_errors
!= NULL
)
1672 fnvlist_add_int32(ddsa
->ddsa_errors
,
1675 /* only report one error for this check */
1679 nvlist_free(cnt_track
);
1682 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1683 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1686 const char *name
, *atp
= NULL
;
1687 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1689 name
= nvpair_name(pair
);
1690 if (strlen(name
) >= ZFS_MAX_DATASET_NAME_LEN
)
1691 error
= SET_ERROR(ENAMETOOLONG
);
1693 atp
= strchr(name
, '@');
1695 error
= SET_ERROR(EINVAL
);
1697 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1700 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
1702 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1703 error
= dsl_dataset_snapshot_check_impl(ds
,
1704 atp
+ 1, tx
, B_FALSE
, 0, NULL
, NULL
);
1705 dsl_dataset_rele(ds
, FTAG
);
1709 if (ddsa
->ddsa_errors
!= NULL
) {
1710 fnvlist_add_int32(ddsa
->ddsa_errors
,
1721 dsl_dataset_snapshot_sync_impl(dsl_dataset_t
*ds
, const char *snapname
,
1724 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1726 dsl_dataset_phys_t
*dsphys
;
1727 uint64_t dsobj
, crtxg
;
1728 objset_t
*mos
= dp
->dp_meta_objset
;
1729 objset_t
*os __maybe_unused
;
1731 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
1734 * If we are on an old pool, the zil must not be active, in which
1735 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1737 ASSERT(spa_version(dmu_tx_pool(tx
)->dp_spa
) >= SPA_VERSION_FAST_SNAP
||
1738 dmu_objset_from_ds(ds
, &os
) != 0 ||
1739 memcmp(&os
->os_phys
->os_zil_header
, &zero_zil
,
1740 sizeof (zero_zil
)) == 0);
1742 /* Should not snapshot a dirty dataset. */
1743 ASSERT(!txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1746 dsl_fs_ss_count_adjust(ds
->ds_dir
, 1, DD_FIELD_SNAPSHOT_COUNT
, tx
);
1749 * The origin's ds_creation_txg has to be < TXG_INITIAL
1751 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
1756 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
1757 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
1758 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
1759 dmu_buf_will_dirty(dbuf
, tx
);
1760 dsphys
= dbuf
->db_data
;
1761 memset(dsphys
, 0, sizeof (dsl_dataset_phys_t
));
1762 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
1763 dsphys
->ds_fsid_guid
= unique_create();
1764 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
1765 sizeof (dsphys
->ds_guid
));
1766 dsphys
->ds_prev_snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1767 dsphys
->ds_prev_snap_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1768 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
1769 dsphys
->ds_num_children
= 1;
1770 dsphys
->ds_creation_time
= gethrestime_sec();
1771 dsphys
->ds_creation_txg
= crtxg
;
1772 dsphys
->ds_deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
1773 dsphys
->ds_referenced_bytes
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1774 dsphys
->ds_compressed_bytes
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1775 dsphys
->ds_uncompressed_bytes
=
1776 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1777 dsphys
->ds_flags
= dsl_dataset_phys(ds
)->ds_flags
;
1778 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1779 dsphys
->ds_bp
= dsl_dataset_phys(ds
)->ds_bp
;
1780 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1781 dmu_buf_rele(dbuf
, FTAG
);
1783 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1784 if (zfeature_active(f
, ds
->ds_feature
[f
])) {
1785 dsl_dataset_activate_feature(dsobj
, f
,
1786 ds
->ds_feature
[f
], tx
);
1790 ASSERT3U(ds
->ds_prev
!= 0, ==,
1791 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
1793 uint64_t next_clones_obj
=
1794 dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
;
1795 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1797 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 1);
1798 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1800 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1801 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
1802 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_txg
);
1803 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
= dsobj
;
1804 } else if (next_clones_obj
!= 0) {
1805 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
1806 dsphys
->ds_next_snap_obj
, tx
);
1807 VERIFY0(zap_add_int(mos
,
1808 next_clones_obj
, dsobj
, tx
));
1813 * If we have a reference-reservation on this dataset, we will
1814 * need to increase the amount of refreservation being charged
1815 * since our unique space is going to zero.
1817 if (ds
->ds_reserved
) {
1819 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
1820 delta
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
,
1822 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
1826 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1827 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
1828 dsl_deadlist_clone(&ds
->ds_deadlist
, UINT64_MAX
,
1829 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
1830 dsl_deadlist_close(&ds
->ds_deadlist
);
1831 VERIFY0(dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
1832 dsl_dataset_phys(ds
)->ds_deadlist_obj
));
1833 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1834 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
1835 dsl_bookmark_snapshotted(ds
, tx
);
1837 if (dsl_dataset_remap_deadlist_exists(ds
)) {
1838 uint64_t remap_deadlist_obj
=
1839 dsl_dataset_get_remap_deadlist_object(ds
);
1841 * Move the remap_deadlist to the snapshot. The head
1842 * will create a new remap deadlist on demand, from
1843 * dsl_dataset_block_remapped().
1845 dsl_dataset_unset_remap_deadlist_object(ds
, tx
);
1846 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
1848 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
1849 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_REMAP_DEADLIST
,
1850 sizeof (remap_deadlist_obj
), 1, &remap_deadlist_obj
, tx
));
1854 * Create a ivset guid for this snapshot if the dataset is
1855 * encrypted. This may be overridden by a raw receive. A
1856 * previous implementation of this code did not have this
1857 * field as part of the on-disk format for ZFS encryption
1858 * (see errata #4). As part of the remediation for this
1859 * issue, we ask the user to enable the bookmark_v2 feature
1860 * which is now a dependency of the encryption feature. We
1861 * use this as a heuristic to determine when the user has
1862 * elected to correct any datasets created with the old code.
1863 * As a result, we only do this step if the bookmark_v2
1864 * feature is enabled, which limits the number of states a
1865 * given pool / dataset can be in with regards to terms of
1866 * correcting the issue.
1868 if (ds
->ds_dir
->dd_crypto_obj
!= 0 &&
1869 spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARK_V2
)) {
1870 uint64_t ivset_guid
= unique_create();
1872 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
1873 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_IVSET_GUID
,
1874 sizeof (ivset_guid
), 1, &ivset_guid
, tx
));
1877 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, <, tx
->tx_txg
);
1878 dsl_dataset_phys(ds
)->ds_prev_snap_obj
= dsobj
;
1879 dsl_dataset_phys(ds
)->ds_prev_snap_txg
= crtxg
;
1880 dsl_dataset_phys(ds
)->ds_unique_bytes
= 0;
1882 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
1883 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1885 VERIFY0(zap_add(mos
, dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1886 snapname
, 8, 1, &dsobj
, tx
));
1889 dsl_dataset_rele(ds
->ds_prev
, ds
);
1890 VERIFY0(dsl_dataset_hold_obj(dp
,
1891 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
1893 dsl_scan_ds_snapshotted(ds
, tx
);
1895 dsl_dir_snap_cmtime_update(ds
->ds_dir
, tx
);
1897 if (zfs_snapshot_history_enabled
)
1898 spa_history_log_internal_ds(ds
->ds_prev
, "snapshot", tx
, " ");
1902 dsl_dataset_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
1904 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1905 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1908 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1909 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1911 const char *name
, *atp
;
1912 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1914 name
= nvpair_name(pair
);
1915 atp
= strchr(name
, '@');
1916 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1917 VERIFY0(dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
));
1919 dsl_dataset_snapshot_sync_impl(ds
, atp
+ 1, tx
);
1920 if (ddsa
->ddsa_props
!= NULL
) {
1921 dsl_props_set_sync_impl(ds
->ds_prev
,
1922 ZPROP_SRC_LOCAL
, ddsa
->ddsa_props
, tx
);
1924 dsl_dataset_rele(ds
, FTAG
);
1929 * The snapshots must all be in the same pool.
1930 * All-or-nothing: if there are any failures, nothing will be modified.
1933 dsl_dataset_snapshot(nvlist_t
*snaps
, nvlist_t
*props
, nvlist_t
*errors
)
1935 dsl_dataset_snapshot_arg_t ddsa
;
1937 boolean_t needsuspend
;
1940 const char *firstname
;
1941 nvlist_t
*suspended
= NULL
;
1943 pair
= nvlist_next_nvpair(snaps
, NULL
);
1946 firstname
= nvpair_name(pair
);
1948 error
= spa_open(firstname
, &spa
, FTAG
);
1951 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1952 spa_close(spa
, FTAG
);
1955 suspended
= fnvlist_alloc();
1956 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
1957 pair
= nvlist_next_nvpair(snaps
, pair
)) {
1958 char fsname
[ZFS_MAX_DATASET_NAME_LEN
];
1959 const char *snapname
= nvpair_name(pair
);
1963 atp
= strchr(snapname
, '@');
1965 error
= SET_ERROR(EINVAL
);
1968 (void) strlcpy(fsname
, snapname
, atp
- snapname
+ 1);
1970 error
= zil_suspend(fsname
, &cookie
);
1973 fnvlist_add_uint64(suspended
, fsname
,
1978 ddsa
.ddsa_snaps
= snaps
;
1979 ddsa
.ddsa_props
= props
;
1980 ddsa
.ddsa_errors
= errors
;
1981 ddsa
.ddsa_cr
= CRED();
1982 ddsa
.ddsa_proc
= curproc
;
1985 error
= dsl_sync_task(firstname
, dsl_dataset_snapshot_check
,
1986 dsl_dataset_snapshot_sync
, &ddsa
,
1987 fnvlist_num_pairs(snaps
) * 3, ZFS_SPACE_CHECK_NORMAL
);
1990 if (suspended
!= NULL
) {
1991 for (pair
= nvlist_next_nvpair(suspended
, NULL
); pair
!= NULL
;
1992 pair
= nvlist_next_nvpair(suspended
, pair
)) {
1993 zil_resume((void *)(uintptr_t)
1994 fnvpair_value_uint64(pair
));
1996 fnvlist_free(suspended
);
2000 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
2001 pair
= nvlist_next_nvpair(snaps
, pair
)) {
2002 zvol_create_minor(nvpair_name(pair
));
2009 typedef struct dsl_dataset_snapshot_tmp_arg
{
2010 const char *ddsta_fsname
;
2011 const char *ddsta_snapname
;
2012 minor_t ddsta_cleanup_minor
;
2013 const char *ddsta_htag
;
2014 } dsl_dataset_snapshot_tmp_arg_t
;
2017 dsl_dataset_snapshot_tmp_check(void *arg
, dmu_tx_t
*tx
)
2019 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
2020 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2024 error
= dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
);
2028 /* NULL cred means no limit check for tmp snapshot */
2029 error
= dsl_dataset_snapshot_check_impl(ds
, ddsta
->ddsta_snapname
,
2030 tx
, B_FALSE
, 0, NULL
, NULL
);
2032 dsl_dataset_rele(ds
, FTAG
);
2036 if (spa_version(dp
->dp_spa
) < SPA_VERSION_USERREFS
) {
2037 dsl_dataset_rele(ds
, FTAG
);
2038 return (SET_ERROR(ENOTSUP
));
2040 error
= dsl_dataset_user_hold_check_one(NULL
, ddsta
->ddsta_htag
,
2043 dsl_dataset_rele(ds
, FTAG
);
2047 dsl_dataset_rele(ds
, FTAG
);
2052 dsl_dataset_snapshot_tmp_sync(void *arg
, dmu_tx_t
*tx
)
2054 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
2055 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2056 dsl_dataset_t
*ds
= NULL
;
2058 VERIFY0(dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
));
2060 dsl_dataset_snapshot_sync_impl(ds
, ddsta
->ddsta_snapname
, tx
);
2061 dsl_dataset_user_hold_sync_one(ds
->ds_prev
, ddsta
->ddsta_htag
,
2062 ddsta
->ddsta_cleanup_minor
, gethrestime_sec(), tx
);
2063 dsl_destroy_snapshot_sync_impl(ds
->ds_prev
, B_TRUE
, tx
);
2065 dsl_dataset_rele(ds
, FTAG
);
2069 dsl_dataset_snapshot_tmp(const char *fsname
, const char *snapname
,
2070 minor_t cleanup_minor
, const char *htag
)
2072 dsl_dataset_snapshot_tmp_arg_t ddsta
;
2075 boolean_t needsuspend
;
2078 ddsta
.ddsta_fsname
= fsname
;
2079 ddsta
.ddsta_snapname
= snapname
;
2080 ddsta
.ddsta_cleanup_minor
= cleanup_minor
;
2081 ddsta
.ddsta_htag
= htag
;
2083 error
= spa_open(fsname
, &spa
, FTAG
);
2086 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
2087 spa_close(spa
, FTAG
);
2090 error
= zil_suspend(fsname
, &cookie
);
2095 error
= dsl_sync_task(fsname
, dsl_dataset_snapshot_tmp_check
,
2096 dsl_dataset_snapshot_tmp_sync
, &ddsta
, 3, ZFS_SPACE_CHECK_RESERVED
);
2103 /* Nonblocking dataset sync. Assumes dataset:objset is always 1:1 */
2105 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*rio
, dmu_tx_t
*tx
)
2107 ASSERT(dmu_tx_is_syncing(tx
));
2108 ASSERT(ds
->ds_objset
!= NULL
);
2109 ASSERT(dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0);
2112 * in case we had to change ds_fsid_guid when we opened it,
2115 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2116 dsl_dataset_phys(ds
)->ds_fsid_guid
= ds
->ds_fsid_guid
;
2118 if (ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] != 0) {
2119 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
2120 ds
->ds_object
, DS_FIELD_RESUME_OBJECT
, 8, 1,
2121 &ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
], tx
));
2122 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
2123 ds
->ds_object
, DS_FIELD_RESUME_OFFSET
, 8, 1,
2124 &ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
], tx
));
2125 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
2126 ds
->ds_object
, DS_FIELD_RESUME_BYTES
, 8, 1,
2127 &ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
], tx
));
2128 ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
] = 0;
2129 ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
] = 0;
2130 ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] = 0;
2133 dmu_objset_sync(ds
->ds_objset
, rio
, tx
);
2137 * Check if the percentage of blocks shared between the clone and the
2138 * snapshot (as opposed to those that are clone only) is below a certain
2142 dsl_livelist_should_disable(dsl_dataset_t
*ds
)
2144 uint64_t used
, referenced
;
2147 used
= dsl_dir_get_usedds(ds
->ds_dir
);
2148 referenced
= dsl_get_referenced(ds
);
2149 if (referenced
== 0)
2151 percent_shared
= (100 * (referenced
- used
)) / referenced
;
2152 if (percent_shared
<= zfs_livelist_min_percent_shared
)
2158 * Check if it is possible to combine two livelist entries into one.
2159 * This is the case if the combined number of 'live' blkptrs (ALLOCs that
2160 * don't have a matching FREE) is under the maximum sublist size.
2161 * We check this by subtracting twice the total number of frees from the total
2162 * number of blkptrs. FREEs are counted twice because each FREE blkptr
2163 * will cancel out an ALLOC blkptr when the livelist is processed.
2166 dsl_livelist_should_condense(dsl_deadlist_entry_t
*first
,
2167 dsl_deadlist_entry_t
*next
)
2169 uint64_t total_free
= first
->dle_bpobj
.bpo_phys
->bpo_num_freed
+
2170 next
->dle_bpobj
.bpo_phys
->bpo_num_freed
;
2171 uint64_t total_entries
= first
->dle_bpobj
.bpo_phys
->bpo_num_blkptrs
+
2172 next
->dle_bpobj
.bpo_phys
->bpo_num_blkptrs
;
2173 if ((total_entries
- (2 * total_free
)) < zfs_livelist_max_entries
)
2178 typedef struct try_condense_arg
{
2181 } try_condense_arg_t
;
2184 * Iterate over the livelist entries, searching for a pair to condense.
2185 * A nonzero return value means stop, 0 means keep looking.
2188 dsl_livelist_try_condense(void *arg
, dsl_deadlist_entry_t
*first
)
2190 try_condense_arg_t
*tca
= arg
;
2191 spa_t
*spa
= tca
->spa
;
2192 dsl_dataset_t
*ds
= tca
->ds
;
2193 dsl_deadlist_t
*ll
= &ds
->ds_dir
->dd_livelist
;
2194 dsl_deadlist_entry_t
*next
;
2196 /* The condense thread has not yet been created at import */
2197 if (spa
->spa_livelist_condense_zthr
== NULL
)
2200 /* A condense is already in progress */
2201 if (spa
->spa_to_condense
.ds
!= NULL
)
2204 next
= AVL_NEXT(&ll
->dl_tree
, &first
->dle_node
);
2205 /* The livelist has only one entry - don't condense it */
2209 /* Next is the newest entry - don't condense it */
2210 if (AVL_NEXT(&ll
->dl_tree
, &next
->dle_node
) == NULL
)
2213 /* This pair is not ready to condense but keep looking */
2214 if (!dsl_livelist_should_condense(first
, next
))
2218 * Add a ref to prevent the dataset from being evicted while
2219 * the condense zthr or synctask are running. Ref will be
2220 * released at the end of the condense synctask
2222 dmu_buf_add_ref(ds
->ds_dbuf
, spa
);
2224 spa
->spa_to_condense
.ds
= ds
;
2225 spa
->spa_to_condense
.first
= first
;
2226 spa
->spa_to_condense
.next
= next
;
2227 spa
->spa_to_condense
.syncing
= B_FALSE
;
2228 spa
->spa_to_condense
.cancelled
= B_FALSE
;
2230 zthr_wakeup(spa
->spa_livelist_condense_zthr
);
2235 dsl_flush_pending_livelist(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2237 dsl_dir_t
*dd
= ds
->ds_dir
;
2238 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
2239 dsl_deadlist_entry_t
*last
= dsl_deadlist_last(&dd
->dd_livelist
);
2241 /* Check if we need to add a new sub-livelist */
2243 /* The livelist is empty */
2244 dsl_deadlist_add_key(&dd
->dd_livelist
,
2245 tx
->tx_txg
- 1, tx
);
2246 } else if (spa_sync_pass(spa
) == 1) {
2248 * Check if the newest entry is full. If it is, make a new one.
2249 * We only do this once per sync because we could overfill a
2250 * sublist in one sync pass and don't want to add another entry
2251 * for a txg that is already represented. This ensures that
2252 * blkptrs born in the same txg are stored in the same sublist.
2254 bpobj_t bpobj
= last
->dle_bpobj
;
2255 uint64_t all
= bpobj
.bpo_phys
->bpo_num_blkptrs
;
2256 uint64_t free
= bpobj
.bpo_phys
->bpo_num_freed
;
2257 uint64_t alloc
= all
- free
;
2258 if (alloc
> zfs_livelist_max_entries
) {
2259 dsl_deadlist_add_key(&dd
->dd_livelist
,
2260 tx
->tx_txg
- 1, tx
);
2264 /* Insert each entry into the on-disk livelist */
2265 bplist_iterate(&dd
->dd_pending_allocs
,
2266 dsl_deadlist_insert_alloc_cb
, &dd
->dd_livelist
, tx
);
2267 bplist_iterate(&dd
->dd_pending_frees
,
2268 dsl_deadlist_insert_free_cb
, &dd
->dd_livelist
, tx
);
2270 /* Attempt to condense every pair of adjacent entries */
2271 try_condense_arg_t arg
= {
2275 dsl_deadlist_iterate(&dd
->dd_livelist
, dsl_livelist_try_condense
,
2280 dsl_dataset_sync_done(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
2282 objset_t
*os
= ds
->ds_objset
;
2284 bplist_iterate(&ds
->ds_pending_deadlist
,
2285 dsl_deadlist_insert_alloc_cb
, &ds
->ds_deadlist
, tx
);
2287 if (dsl_deadlist_is_open(&ds
->ds_dir
->dd_livelist
)) {
2288 dsl_flush_pending_livelist(ds
, tx
);
2289 if (dsl_livelist_should_disable(ds
)) {
2290 dsl_dir_remove_livelist(ds
->ds_dir
, tx
, B_TRUE
);
2294 dsl_bookmark_sync_done(ds
, tx
);
2296 multilist_destroy(&os
->os_synced_dnodes
);
2298 if (os
->os_encrypted
)
2299 os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
] = B_FALSE
;
2301 ASSERT0(os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
]);
2303 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
2304 if (zfeature_active(f
,
2305 ds
->ds_feature_activation
[f
])) {
2306 if (zfeature_active(f
, ds
->ds_feature
[f
]))
2308 dsl_dataset_activate_feature(ds
->ds_object
, f
,
2309 ds
->ds_feature_activation
[f
], tx
);
2310 ds
->ds_feature
[f
] = ds
->ds_feature_activation
[f
];
2314 ASSERT(!dmu_objset_is_dirty(os
, dmu_tx_get_txg(tx
)));
2318 get_clones_stat_impl(dsl_dataset_t
*ds
, nvlist_t
*val
)
2321 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
2323 zap_attribute_t
*za
;
2325 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
2328 * There may be missing entries in ds_next_clones_obj
2329 * due to a bug in a previous version of the code.
2330 * Only trust it if it has the right number of entries.
2332 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
2333 VERIFY0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
2336 if (count
!= dsl_dataset_phys(ds
)->ds_num_children
- 1) {
2337 return (SET_ERROR(ENOENT
));
2340 za
= zap_attribute_alloc();
2341 for (zap_cursor_init(&zc
, mos
,
2342 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2343 zap_cursor_retrieve(&zc
, za
) == 0;
2344 zap_cursor_advance(&zc
)) {
2345 dsl_dataset_t
*clone
;
2346 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
2347 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
2348 za
->za_first_integer
, FTAG
, &clone
));
2349 dsl_dir_name(clone
->ds_dir
, buf
);
2350 fnvlist_add_boolean(val
, buf
);
2351 dsl_dataset_rele(clone
, FTAG
);
2353 zap_cursor_fini(&zc
);
2354 zap_attribute_free(za
);
2359 get_clones_stat(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2361 nvlist_t
*propval
= fnvlist_alloc();
2362 nvlist_t
*val
= fnvlist_alloc();
2364 if (get_clones_stat_impl(ds
, val
) == 0) {
2365 fnvlist_add_nvlist(propval
, ZPROP_VALUE
, val
);
2366 fnvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_CLONES
),
2371 nvlist_free(propval
);
2375 get_receive_resume_token_impl(dsl_dataset_t
*ds
)
2377 if (!dsl_dataset_has_resume_receive_state(ds
))
2380 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2383 uint8_t *compressed
;
2385 nvlist_t
*token_nv
= fnvlist_alloc();
2386 size_t packed_size
, compressed_size
;
2388 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2389 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
) == 0) {
2390 fnvlist_add_uint64(token_nv
, "fromguid", val
);
2392 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2393 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
) == 0) {
2394 fnvlist_add_uint64(token_nv
, "object", val
);
2396 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2397 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
) == 0) {
2398 fnvlist_add_uint64(token_nv
, "offset", val
);
2400 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2401 DS_FIELD_RESUME_BYTES
, sizeof (val
), 1, &val
) == 0) {
2402 fnvlist_add_uint64(token_nv
, "bytes", val
);
2404 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2405 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
) == 0) {
2406 fnvlist_add_uint64(token_nv
, "toguid", val
);
2408 char buf
[MAXNAMELEN
];
2409 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2410 DS_FIELD_RESUME_TONAME
, 1, sizeof (buf
), buf
) == 0) {
2411 fnvlist_add_string(token_nv
, "toname", buf
);
2413 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
2414 DS_FIELD_RESUME_LARGEBLOCK
) == 0) {
2415 fnvlist_add_boolean(token_nv
, "largeblockok");
2417 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
2418 DS_FIELD_RESUME_EMBEDOK
) == 0) {
2419 fnvlist_add_boolean(token_nv
, "embedok");
2421 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
2422 DS_FIELD_RESUME_COMPRESSOK
) == 0) {
2423 fnvlist_add_boolean(token_nv
, "compressok");
2425 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
2426 DS_FIELD_RESUME_RAWOK
) == 0) {
2427 fnvlist_add_boolean(token_nv
, "rawok");
2429 if (dsl_dataset_feature_is_active(ds
,
2430 SPA_FEATURE_REDACTED_DATASETS
)) {
2431 uint64_t num_redact_snaps
= 0;
2432 uint64_t *redact_snaps
= NULL
;
2433 VERIFY3B(dsl_dataset_get_uint64_array_feature(ds
,
2434 SPA_FEATURE_REDACTED_DATASETS
, &num_redact_snaps
,
2435 &redact_snaps
), ==, B_TRUE
);
2436 fnvlist_add_uint64_array(token_nv
, "redact_snaps",
2437 redact_snaps
, num_redact_snaps
);
2439 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
2440 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
) == 0) {
2441 uint64_t num_redact_snaps
= 0, int_size
= 0;
2442 uint64_t *redact_snaps
= NULL
;
2443 VERIFY0(zap_length(dp
->dp_meta_objset
, ds
->ds_object
,
2444 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
, &int_size
,
2445 &num_redact_snaps
));
2446 ASSERT3U(int_size
, ==, sizeof (uint64_t));
2448 redact_snaps
= kmem_alloc(int_size
* num_redact_snaps
,
2450 VERIFY0(zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2451 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
, int_size
,
2452 num_redact_snaps
, redact_snaps
));
2453 fnvlist_add_uint64_array(token_nv
, "book_redact_snaps",
2454 redact_snaps
, num_redact_snaps
);
2455 kmem_free(redact_snaps
, int_size
* num_redact_snaps
);
2457 packed
= fnvlist_pack(token_nv
, &packed_size
);
2458 fnvlist_free(token_nv
);
2459 compressed
= kmem_alloc(packed_size
, KM_SLEEP
);
2461 /* Call compress function directly to avoid hole detection. */
2463 abd_get_from_buf_struct(&pabd
, packed
, packed_size
);
2464 abd_get_from_buf_struct(&cabd
, compressed
, packed_size
);
2465 compressed_size
= zfs_gzip_compress(&pabd
, &cabd
,
2466 packed_size
, packed_size
, 6);
2471 fletcher_4_native_varsize(compressed
, compressed_size
, &cksum
);
2473 size_t alloc_size
= compressed_size
* 2 + 1;
2474 str
= kmem_alloc(alloc_size
, KM_SLEEP
);
2475 for (int i
= 0; i
< compressed_size
; i
++) {
2476 size_t offset
= i
* 2;
2477 (void) snprintf(str
+ offset
, alloc_size
- offset
,
2478 "%02x", compressed
[i
]);
2480 str
[compressed_size
* 2] = '\0';
2481 char *propval
= kmem_asprintf("%u-%llx-%llx-%s",
2482 ZFS_SEND_RESUME_TOKEN_VERSION
,
2483 (longlong_t
)cksum
.zc_word
[0],
2484 (longlong_t
)packed_size
, str
);
2485 kmem_free(packed
, packed_size
);
2486 kmem_free(str
, alloc_size
);
2487 kmem_free(compressed
, packed_size
);
2492 * Returns a string that represents the receive resume state token. It should
2493 * be freed with strfree(). NULL is returned if no resume state is present.
2496 get_receive_resume_token(dsl_dataset_t
*ds
)
2499 * A failed "newfs" (e.g. full) resumable receive leaves
2500 * the stats set on this dataset. Check here for the prop.
2502 char *token
= get_receive_resume_token_impl(ds
);
2506 * A failed incremental resumable receive leaves the
2507 * stats set on our child named "%recv". Check the child
2510 /* 6 extra bytes for /%recv */
2511 char name
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
2512 dsl_dataset_t
*recv_ds
;
2513 dsl_dataset_name(ds
, name
);
2514 if (strlcat(name
, "/", sizeof (name
)) < sizeof (name
) &&
2515 strlcat(name
, recv_clone_name
, sizeof (name
)) < sizeof (name
) &&
2516 dsl_dataset_hold(ds
->ds_dir
->dd_pool
, name
, FTAG
, &recv_ds
) == 0) {
2517 token
= get_receive_resume_token_impl(recv_ds
);
2518 dsl_dataset_rele(recv_ds
, FTAG
);
2524 dsl_get_refratio(dsl_dataset_t
*ds
)
2526 uint64_t ratio
= dsl_dataset_phys(ds
)->ds_compressed_bytes
== 0 ? 100 :
2527 (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
* 100 /
2528 dsl_dataset_phys(ds
)->ds_compressed_bytes
);
2533 dsl_get_logicalreferenced(dsl_dataset_t
*ds
)
2535 return (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
);
2539 dsl_get_compressratio(dsl_dataset_t
*ds
)
2541 if (ds
->ds_is_snapshot
) {
2542 return (dsl_get_refratio(ds
));
2544 dsl_dir_t
*dd
= ds
->ds_dir
;
2545 mutex_enter(&dd
->dd_lock
);
2546 uint64_t val
= dsl_dir_get_compressratio(dd
);
2547 mutex_exit(&dd
->dd_lock
);
2553 dsl_get_used(dsl_dataset_t
*ds
)
2555 if (ds
->ds_is_snapshot
) {
2556 return (dsl_dataset_phys(ds
)->ds_unique_bytes
);
2558 dsl_dir_t
*dd
= ds
->ds_dir
;
2559 mutex_enter(&dd
->dd_lock
);
2560 uint64_t val
= dsl_dir_get_used(dd
);
2561 mutex_exit(&dd
->dd_lock
);
2567 dsl_get_creation(dsl_dataset_t
*ds
)
2569 return (dsl_dataset_phys(ds
)->ds_creation_time
);
2573 dsl_get_creationtxg(dsl_dataset_t
*ds
)
2575 return (dsl_dataset_phys(ds
)->ds_creation_txg
);
2579 dsl_get_refquota(dsl_dataset_t
*ds
)
2581 return (ds
->ds_quota
);
2585 dsl_get_refreservation(dsl_dataset_t
*ds
)
2587 return (ds
->ds_reserved
);
2591 dsl_get_guid(dsl_dataset_t
*ds
)
2593 return (dsl_dataset_phys(ds
)->ds_guid
);
2597 dsl_get_unique(dsl_dataset_t
*ds
)
2599 return (dsl_dataset_phys(ds
)->ds_unique_bytes
);
2603 dsl_get_objsetid(dsl_dataset_t
*ds
)
2605 return (ds
->ds_object
);
2609 dsl_get_userrefs(dsl_dataset_t
*ds
)
2611 return (ds
->ds_userrefs
);
2615 dsl_get_defer_destroy(dsl_dataset_t
*ds
)
2617 return (DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
2621 dsl_get_referenced(dsl_dataset_t
*ds
)
2623 return (dsl_dataset_phys(ds
)->ds_referenced_bytes
);
2627 dsl_get_numclones(dsl_dataset_t
*ds
)
2629 ASSERT(ds
->ds_is_snapshot
);
2630 return (dsl_dataset_phys(ds
)->ds_num_children
- 1);
2634 dsl_get_inconsistent(dsl_dataset_t
*ds
)
2636 return ((dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_INCONSISTENT
) ?
2641 dsl_get_redacted(dsl_dataset_t
*ds
)
2643 return (dsl_dataset_feature_is_active(ds
,
2644 SPA_FEATURE_REDACTED_DATASETS
));
2648 dsl_get_available(dsl_dataset_t
*ds
)
2650 uint64_t refdbytes
= dsl_get_referenced(ds
);
2651 uint64_t availbytes
= dsl_dir_space_available(ds
->ds_dir
,
2653 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
2655 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
2657 if (ds
->ds_quota
!= 0) {
2659 * Adjust available bytes according to refquota
2661 if (refdbytes
< ds
->ds_quota
) {
2662 availbytes
= MIN(availbytes
,
2663 ds
->ds_quota
- refdbytes
);
2668 return (availbytes
);
2672 dsl_get_written(dsl_dataset_t
*ds
, uint64_t *written
)
2674 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2675 dsl_dataset_t
*prev
;
2676 int err
= dsl_dataset_hold_obj(dp
,
2677 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
2679 uint64_t comp
, uncomp
;
2680 err
= dsl_dataset_space_written(prev
, ds
, written
,
2682 dsl_dataset_rele(prev
, FTAG
);
2688 * 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
2691 dsl_get_prev_snap(dsl_dataset_t
*ds
, char *snap
)
2693 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2694 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
!= dp
->dp_origin_snap
) {
2695 dsl_dataset_name(ds
->ds_prev
, snap
);
2698 return (SET_ERROR(ENOENT
));
2703 dsl_get_redact_snaps(dsl_dataset_t
*ds
, nvlist_t
*propval
)
2707 if (dsl_dataset_get_uint64_array_feature(ds
,
2708 SPA_FEATURE_REDACTED_DATASETS
, &nsnaps
, &snaps
)) {
2709 fnvlist_add_uint64_array(propval
, ZPROP_VALUE
, snaps
,
2715 * Returns the mountpoint property and source for the given dataset in the value
2716 * and source buffers. The value buffer must be at least as large as MAXPATHLEN
2717 * and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
2718 * Returns 0 on success and an error on failure.
2721 dsl_get_mountpoint(dsl_dataset_t
*ds
, const char *dsname
, char *value
,
2725 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
2727 /* Retrieve the mountpoint value stored in the zap object */
2728 error
= dsl_prop_get_ds(ds
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
), 1,
2729 ZAP_MAXVALUELEN
, value
, source
);
2735 * Process the dsname and source to find the full mountpoint string.
2736 * Can be skipped for 'legacy' or 'none'.
2738 if (value
[0] == '/') {
2739 char *buf
= kmem_alloc(ZAP_MAXVALUELEN
, KM_SLEEP
);
2741 const char *relpath
;
2744 * If we inherit the mountpoint, even from a dataset
2745 * with a received value, the source will be the path of
2746 * the dataset we inherit from. If source is
2747 * ZPROP_SOURCE_VAL_RECVD, the received value is not
2750 if (strcmp(source
, ZPROP_SOURCE_VAL_RECVD
) == 0) {
2753 ASSERT0(strncmp(dsname
, source
, strlen(source
)));
2754 relpath
= dsname
+ strlen(source
);
2755 if (relpath
[0] == '/')
2759 spa_altroot(dp
->dp_spa
, root
, ZAP_MAXVALUELEN
);
2762 * Special case an alternate root of '/'. This will
2763 * avoid having multiple leading slashes in the
2766 if (strcmp(root
, "/") == 0)
2770 * If the mountpoint is '/' then skip over this
2771 * if we are obtaining either an alternate root or
2772 * an inherited mountpoint.
2775 if (value
[1] == '\0' && (root
[0] != '\0' ||
2776 relpath
[0] != '\0'))
2779 mnt
= kmem_strdup(mnt
);
2781 if (relpath
[0] == '\0') {
2782 (void) snprintf(value
, ZAP_MAXVALUELEN
, "%s%s",
2785 (void) snprintf(value
, ZAP_MAXVALUELEN
, "%s%s%s%s",
2786 root
, mnt
, relpath
[0] == '@' ? "" : "/",
2789 kmem_free(buf
, ZAP_MAXVALUELEN
);
2797 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
2799 dsl_pool_t
*dp __maybe_unused
= ds
->ds_dir
->dd_pool
;
2801 ASSERT(dsl_pool_config_held(dp
));
2803 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
,
2804 dsl_get_refratio(ds
));
2805 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_LOGICALREFERENCED
,
2806 dsl_get_logicalreferenced(ds
));
2807 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
,
2808 dsl_get_compressratio(ds
));
2809 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
2812 if (ds
->ds_is_snapshot
) {
2813 get_clones_stat(ds
, nv
);
2815 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
2816 if (dsl_get_prev_snap(ds
, buf
) == 0)
2817 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_PREV_SNAP
,
2819 dsl_dir_stats(ds
->ds_dir
, nv
);
2822 nvlist_t
*propval
= fnvlist_alloc();
2823 dsl_get_redact_snaps(ds
, propval
);
2824 fnvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS
),
2826 nvlist_free(propval
);
2828 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
,
2829 dsl_get_available(ds
));
2830 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
,
2831 dsl_get_referenced(ds
));
2832 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
2833 dsl_get_creation(ds
));
2834 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
2835 dsl_get_creationtxg(ds
));
2836 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
2837 dsl_get_refquota(ds
));
2838 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
2839 dsl_get_refreservation(ds
));
2840 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
2842 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
2843 dsl_get_unique(ds
));
2844 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
2845 dsl_get_objsetid(ds
));
2846 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
2847 dsl_get_userrefs(ds
));
2848 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
2849 dsl_get_defer_destroy(ds
));
2850 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_SNAPSHOTS_CHANGED
,
2851 dsl_dir_snap_cmtime(ds
->ds_dir
).tv_sec
);
2852 dsl_dataset_crypt_stats(ds
, nv
);
2854 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
2856 if (dsl_get_written(ds
, &written
) == 0) {
2857 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_WRITTEN
,
2862 if (!dsl_dataset_is_snapshot(ds
)) {
2863 char *token
= get_receive_resume_token(ds
);
2864 if (token
!= NULL
) {
2865 dsl_prop_nvlist_add_string(nv
,
2866 ZFS_PROP_RECEIVE_RESUME_TOKEN
, token
);
2867 kmem_strfree(token
);
2873 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
2875 dsl_pool_t
*dp __maybe_unused
= ds
->ds_dir
->dd_pool
;
2876 ASSERT(dsl_pool_config_held(dp
));
2878 stat
->dds_creation_txg
= dsl_get_creationtxg(ds
);
2879 stat
->dds_inconsistent
= dsl_get_inconsistent(ds
);
2880 stat
->dds_guid
= dsl_get_guid(ds
);
2881 stat
->dds_redacted
= dsl_get_redacted(ds
);
2882 stat
->dds_origin
[0] = '\0';
2883 if (ds
->ds_is_snapshot
) {
2884 stat
->dds_is_snapshot
= B_TRUE
;
2885 stat
->dds_num_clones
= dsl_get_numclones(ds
);
2887 stat
->dds_is_snapshot
= B_FALSE
;
2888 stat
->dds_num_clones
= 0;
2890 if (dsl_dir_is_clone(ds
->ds_dir
)) {
2891 dsl_dir_get_origin(ds
->ds_dir
, stat
->dds_origin
);
2897 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
2899 return (ds
->ds_fsid_guid
);
2903 dsl_dataset_space(dsl_dataset_t
*ds
,
2904 uint64_t *refdbytesp
, uint64_t *availbytesp
,
2905 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2907 *refdbytesp
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
2908 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
2909 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
)
2911 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
2912 if (ds
->ds_quota
!= 0) {
2914 * Adjust available bytes according to refquota
2916 if (*refdbytesp
< ds
->ds_quota
)
2917 *availbytesp
= MIN(*availbytesp
,
2918 ds
->ds_quota
- *refdbytesp
);
2922 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2923 *usedobjsp
= BP_GET_FILL(&dsl_dataset_phys(ds
)->ds_bp
);
2924 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2925 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
2929 dsl_dataset_modified_since_snap(dsl_dataset_t
*ds
, dsl_dataset_t
*snap
)
2931 dsl_pool_t
*dp __maybe_unused
= ds
->ds_dir
->dd_pool
;
2934 ASSERT(dsl_pool_config_held(dp
));
2937 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2938 birth
= BP_GET_LOGICAL_BIRTH(dsl_dataset_get_blkptr(ds
));
2939 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2940 if (birth
> dsl_dataset_phys(snap
)->ds_creation_txg
) {
2941 objset_t
*os
, *os_snap
;
2943 * It may be that only the ZIL differs, because it was
2944 * reset in the head. Don't count that as being
2947 if (dmu_objset_from_ds(ds
, &os
) != 0)
2949 if (dmu_objset_from_ds(snap
, &os_snap
) != 0)
2951 return (memcmp(&os
->os_phys
->os_meta_dnode
,
2952 &os_snap
->os_phys
->os_meta_dnode
,
2953 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
2959 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t
*dp
,
2960 dsl_dataset_t
*hds
, void *arg
)
2963 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2967 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
2969 /* ignore nonexistent snapshots */
2970 return (error
== ENOENT
? 0 : error
);
2973 /* new name should not exist */
2974 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_newsnapname
, &val
);
2976 error
= SET_ERROR(EEXIST
);
2977 else if (error
== ENOENT
)
2980 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2981 if (dsl_dir_namelen(hds
->ds_dir
) + 1 +
2982 strlen(ddrsa
->ddrsa_newsnapname
) >= ZFS_MAX_DATASET_NAME_LEN
)
2983 error
= SET_ERROR(ENAMETOOLONG
);
2989 dsl_dataset_rename_snapshot_check(void *arg
, dmu_tx_t
*tx
)
2991 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2992 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2996 error
= dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
);
3000 if (ddrsa
->ddrsa_recursive
) {
3001 error
= dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
3002 dsl_dataset_rename_snapshot_check_impl
, ddrsa
,
3005 error
= dsl_dataset_rename_snapshot_check_impl(dp
, hds
, ddrsa
);
3007 dsl_dataset_rele(hds
, FTAG
);
3012 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t
*dp
,
3013 dsl_dataset_t
*hds
, void *arg
)
3015 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
3018 dmu_tx_t
*tx
= ddrsa
->ddrsa_tx
;
3019 char *oldname
, *newname
;
3022 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
3023 ASSERT(error
== 0 || error
== ENOENT
);
3024 if (error
== ENOENT
) {
3025 /* ignore nonexistent snapshots */
3029 VERIFY0(dsl_dataset_hold_obj(dp
, val
, FTAG
, &ds
));
3031 /* log before we change the name */
3032 spa_history_log_internal_ds(ds
, "rename", tx
,
3033 "-> @%s", ddrsa
->ddrsa_newsnapname
);
3035 VERIFY0(dsl_dataset_snap_remove(hds
, ddrsa
->ddrsa_oldsnapname
, tx
,
3037 mutex_enter(&ds
->ds_lock
);
3038 (void) strlcpy(ds
->ds_snapname
, ddrsa
->ddrsa_newsnapname
,
3039 sizeof (ds
->ds_snapname
));
3040 mutex_exit(&ds
->ds_lock
);
3041 VERIFY0(zap_add(dp
->dp_meta_objset
,
3042 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
,
3043 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
));
3045 oldname
= kmem_asprintf("%s@%s", ddrsa
->ddrsa_fsname
,
3046 ddrsa
->ddrsa_oldsnapname
);
3047 newname
= kmem_asprintf("%s@%s", ddrsa
->ddrsa_fsname
,
3048 ddrsa
->ddrsa_newsnapname
);
3049 zvol_rename_minors(dp
->dp_spa
, oldname
, newname
, B_TRUE
);
3050 kmem_strfree(oldname
);
3051 kmem_strfree(newname
);
3053 dsl_dataset_rele(ds
, FTAG
);
3058 dsl_dataset_rename_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
3060 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
3061 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3062 dsl_dataset_t
*hds
= NULL
;
3064 VERIFY0(dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
));
3065 ddrsa
->ddrsa_tx
= tx
;
3066 if (ddrsa
->ddrsa_recursive
) {
3067 VERIFY0(dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
3068 dsl_dataset_rename_snapshot_sync_impl
, ddrsa
,
3071 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp
, hds
, ddrsa
));
3073 dsl_dataset_rele(hds
, FTAG
);
3077 dsl_dataset_rename_snapshot(const char *fsname
,
3078 const char *oldsnapname
, const char *newsnapname
, boolean_t recursive
)
3080 dsl_dataset_rename_snapshot_arg_t ddrsa
;
3082 ddrsa
.ddrsa_fsname
= fsname
;
3083 ddrsa
.ddrsa_oldsnapname
= oldsnapname
;
3084 ddrsa
.ddrsa_newsnapname
= newsnapname
;
3085 ddrsa
.ddrsa_recursive
= recursive
;
3087 return (dsl_sync_task(fsname
, dsl_dataset_rename_snapshot_check
,
3088 dsl_dataset_rename_snapshot_sync
, &ddrsa
,
3089 1, ZFS_SPACE_CHECK_RESERVED
));
3093 * If we're doing an ownership handoff, we need to make sure that there is
3094 * only one long hold on the dataset. We're not allowed to change anything here
3095 * so we don't permanently release the long hold or regular hold here. We want
3096 * to do this only when syncing to avoid the dataset unexpectedly going away
3097 * when we release the long hold.
3100 dsl_dataset_handoff_check(dsl_dataset_t
*ds
, void *owner
, dmu_tx_t
*tx
)
3102 boolean_t held
= B_FALSE
;
3104 if (!dmu_tx_is_syncing(tx
))
3107 dsl_dir_t
*dd
= ds
->ds_dir
;
3108 mutex_enter(&dd
->dd_activity_lock
);
3109 uint64_t holds
= zfs_refcount_count(&ds
->ds_longholds
) -
3110 (owner
!= NULL
? 1 : 0);
3112 * The value of dd_activity_waiters can chance as soon as we drop the
3113 * lock, but we're fine with that; new waiters coming in or old
3114 * waiters leaving doesn't cause problems, since we're going to cancel
3115 * waiters later anyway. The goal of this check is to verify that no
3116 * non-waiters have long-holds, and all new long-holds will be
3117 * prevented because we're holding the pool config as writer.
3119 if (holds
!= dd
->dd_activity_waiters
)
3121 mutex_exit(&dd
->dd_activity_lock
);
3124 return (SET_ERROR(EBUSY
));
3130 dsl_dataset_rollback_check(void *arg
, dmu_tx_t
*tx
)
3132 dsl_dataset_rollback_arg_t
*ddra
= arg
;
3133 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3135 int64_t unused_refres_delta
;
3138 error
= dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
);
3142 /* must not be a snapshot */
3143 if (ds
->ds_is_snapshot
) {
3144 dsl_dataset_rele(ds
, FTAG
);
3145 return (SET_ERROR(EINVAL
));
3148 /* must have a most recent snapshot */
3149 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
< TXG_INITIAL
) {
3150 dsl_dataset_rele(ds
, FTAG
);
3151 return (SET_ERROR(ESRCH
));
3155 * No rollback to a snapshot created in the current txg, because
3156 * the rollback may dirty the dataset and create blocks that are
3157 * not reachable from the rootbp while having a birth txg that
3158 * falls into the snapshot's range.
3160 if (dmu_tx_is_syncing(tx
) &&
3161 dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
) {
3162 dsl_dataset_rele(ds
, FTAG
);
3163 return (SET_ERROR(EAGAIN
));
3167 * If the expected target snapshot is specified, then check that
3168 * the latest snapshot is it.
3170 if (ddra
->ddra_tosnap
!= NULL
) {
3171 dsl_dataset_t
*snapds
;
3173 /* Check if the target snapshot exists at all. */
3174 error
= dsl_dataset_hold(dp
, ddra
->ddra_tosnap
, FTAG
, &snapds
);
3177 * ESRCH is used to signal that the target snapshot does
3178 * not exist, while ENOENT is used to report that
3179 * the rolled back dataset does not exist.
3180 * ESRCH is also used to cover other cases where the
3181 * target snapshot is not related to the dataset being
3182 * rolled back such as being in a different pool.
3184 if (error
== ENOENT
|| error
== EXDEV
)
3185 error
= SET_ERROR(ESRCH
);
3186 dsl_dataset_rele(ds
, FTAG
);
3189 ASSERT(snapds
->ds_is_snapshot
);
3191 /* Check if the snapshot is the latest snapshot indeed. */
3192 if (snapds
!= ds
->ds_prev
) {
3194 * Distinguish between the case where the only problem
3195 * is intervening snapshots (EEXIST) vs the snapshot
3196 * not being a valid target for rollback (ESRCH).
3198 if (snapds
->ds_dir
== ds
->ds_dir
||
3199 (dsl_dir_is_clone(ds
->ds_dir
) &&
3200 dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
==
3201 snapds
->ds_object
)) {
3202 error
= SET_ERROR(EEXIST
);
3204 error
= SET_ERROR(ESRCH
);
3206 dsl_dataset_rele(snapds
, FTAG
);
3207 dsl_dataset_rele(ds
, FTAG
);
3210 dsl_dataset_rele(snapds
, FTAG
);
3213 /* must not have any bookmarks after the most recent snapshot */
3214 if (dsl_bookmark_latest_txg(ds
) >
3215 dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
3216 dsl_dataset_rele(ds
, FTAG
);
3217 return (SET_ERROR(EEXIST
));
3220 error
= dsl_dataset_handoff_check(ds
, ddra
->ddra_owner
, tx
);
3222 dsl_dataset_rele(ds
, FTAG
);
3227 * Check if the snap we are rolling back to uses more than
3230 if (ds
->ds_quota
!= 0 &&
3231 dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
> ds
->ds_quota
) {
3232 dsl_dataset_rele(ds
, FTAG
);
3233 return (SET_ERROR(EDQUOT
));
3237 * When we do the clone swap, we will temporarily use more space
3238 * due to the refreservation (the head will no longer have any
3239 * unique space, so the entire amount of the refreservation will need
3240 * to be free). We will immediately destroy the clone, freeing
3241 * this space, but the freeing happens over many txg's.
3243 unused_refres_delta
= (int64_t)MIN(ds
->ds_reserved
,
3244 dsl_dataset_phys(ds
)->ds_unique_bytes
);
3246 if (unused_refres_delta
> 0 &&
3247 unused_refres_delta
>
3248 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
)) {
3249 dsl_dataset_rele(ds
, FTAG
);
3250 return (SET_ERROR(ENOSPC
));
3253 dsl_dataset_rele(ds
, FTAG
);
3258 dsl_dataset_rollback_sync(void *arg
, dmu_tx_t
*tx
)
3260 dsl_dataset_rollback_arg_t
*ddra
= arg
;
3261 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3262 dsl_dataset_t
*ds
, *clone
;
3264 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
3266 VERIFY0(dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
));
3268 dsl_dataset_name(ds
->ds_prev
, namebuf
);
3269 fnvlist_add_string(ddra
->ddra_result
, "target", namebuf
);
3271 cloneobj
= dsl_dataset_create_sync(ds
->ds_dir
, "%rollback",
3272 ds
->ds_prev
, DS_CREATE_FLAG_NODIRTY
, kcred
, NULL
, tx
);
3274 VERIFY0(dsl_dataset_hold_obj(dp
, cloneobj
, FTAG
, &clone
));
3276 dsl_dataset_clone_swap_sync_impl(clone
, ds
, tx
);
3277 dsl_dataset_zero_zil(ds
, tx
);
3279 dsl_destroy_head_sync_impl(clone
, tx
);
3281 dsl_dataset_rele(clone
, FTAG
);
3282 dsl_dataset_rele(ds
, FTAG
);
3286 * Rolls back the given filesystem or volume to the most recent snapshot.
3287 * The name of the most recent snapshot will be returned under key "target"
3288 * in the result nvlist.
3291 * - The existing dataset MUST be owned by the specified owner at entry
3292 * - Upon return, dataset will still be held by the same owner, whether we
3295 * This mode is required any time the existing filesystem is mounted. See
3296 * notes above zfs_suspend_fs() for further details.
3299 dsl_dataset_rollback(const char *fsname
, const char *tosnap
, void *owner
,
3302 dsl_dataset_rollback_arg_t ddra
;
3304 ddra
.ddra_fsname
= fsname
;
3305 ddra
.ddra_tosnap
= tosnap
;
3306 ddra
.ddra_owner
= owner
;
3307 ddra
.ddra_result
= result
;
3309 return (dsl_sync_task(fsname
, dsl_dataset_rollback_check
,
3310 dsl_dataset_rollback_sync
, &ddra
,
3311 1, ZFS_SPACE_CHECK_RESERVED
));
3314 struct promotenode
{
3319 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
3320 static int promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
,
3322 static void promote_rele(dsl_dataset_promote_arg_t
*ddpa
, const void *tag
);
3325 dsl_dataset_promote_check(void *arg
, dmu_tx_t
*tx
)
3327 dsl_dataset_promote_arg_t
*ddpa
= arg
;
3328 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3330 struct promotenode
*snap
;
3334 size_t max_snap_len
;
3335 boolean_t conflicting_snaps
;
3337 err
= promote_hold(ddpa
, dp
, FTAG
);
3341 hds
= ddpa
->ddpa_clone
;
3342 max_snap_len
= MAXNAMELEN
- strlen(ddpa
->ddpa_clonename
) - 1;
3344 if (dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
) {
3345 promote_rele(ddpa
, FTAG
);
3346 return (SET_ERROR(EXDEV
));
3349 snap
= list_head(&ddpa
->shared_snaps
);
3351 err
= SET_ERROR(ENOENT
);
3354 dsl_dataset_t
*const origin_ds
= snap
->ds
;
3357 * Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
3358 * When doing a promote we must make sure the encryption root for
3359 * both the target and the target's origin does not change to avoid
3360 * needing to rewrap encryption keys
3362 err
= dsl_dataset_promote_crypt_check(hds
->ds_dir
, origin_ds
->ds_dir
);
3367 * Compute and check the amount of space to transfer. Since this is
3368 * so expensive, don't do the preliminary check.
3370 if (!dmu_tx_is_syncing(tx
)) {
3371 promote_rele(ddpa
, FTAG
);
3375 /* compute origin's new unique space */
3376 snap
= list_tail(&ddpa
->clone_snaps
);
3377 ASSERT(snap
!= NULL
);
3378 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
3379 origin_ds
->ds_object
);
3380 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
3381 dsl_dataset_phys(origin_ds
)->ds_prev_snap_txg
, UINT64_MAX
,
3382 &ddpa
->unique
, &unused
, &unused
);
3385 * Walk the snapshots that we are moving
3387 * Compute space to transfer. Consider the incremental changes
3388 * to used by each snapshot:
3389 * (my used) = (prev's used) + (blocks born) - (blocks killed)
3390 * So each snapshot gave birth to:
3391 * (blocks born) = (my used) - (prev's used) + (blocks killed)
3392 * So a sequence would look like:
3393 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
3394 * Which simplifies to:
3395 * uN + kN + kN-1 + ... + k1 + k0
3396 * Note however, if we stop before we reach the ORIGIN we get:
3397 * uN + kN + kN-1 + ... + kM - uM-1
3399 conflicting_snaps
= B_FALSE
;
3401 ddpa
->used
= dsl_dataset_phys(origin_ds
)->ds_referenced_bytes
;
3402 ddpa
->comp
= dsl_dataset_phys(origin_ds
)->ds_compressed_bytes
;
3403 ddpa
->uncomp
= dsl_dataset_phys(origin_ds
)->ds_uncompressed_bytes
;
3404 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
3405 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
3406 uint64_t val
, dlused
, dlcomp
, dluncomp
;
3407 dsl_dataset_t
*ds
= snap
->ds
;
3412 * If there are long holds, we won't be able to evict
3415 if (dsl_dataset_long_held(ds
)) {
3416 err
= SET_ERROR(EBUSY
);
3420 /* Check that the snapshot name does not conflict */
3421 VERIFY0(dsl_dataset_get_snapname(ds
));
3422 if (strlen(ds
->ds_snapname
) >= max_snap_len
) {
3423 err
= SET_ERROR(ENAMETOOLONG
);
3426 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
3428 fnvlist_add_boolean(ddpa
->err_ds
,
3429 snap
->ds
->ds_snapname
);
3430 conflicting_snaps
= B_TRUE
;
3431 } else if (err
!= ENOENT
) {
3435 /* The very first snapshot does not have a deadlist */
3436 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
== 0)
3439 dsl_deadlist_space(&ds
->ds_deadlist
,
3440 &dlused
, &dlcomp
, &dluncomp
);
3441 ddpa
->used
+= dlused
;
3442 ddpa
->comp
+= dlcomp
;
3443 ddpa
->uncomp
+= dluncomp
;
3447 * Check that bookmarks that are being transferred don't have
3450 for (dsl_bookmark_node_t
*dbn
= avl_first(&origin_ds
->ds_bookmarks
);
3451 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
<=
3452 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
3453 dbn
= AVL_NEXT(&origin_ds
->ds_bookmarks
, dbn
)) {
3454 if (strlen(dbn
->dbn_name
) >= max_snap_len
) {
3455 err
= SET_ERROR(ENAMETOOLONG
);
3458 zfs_bookmark_phys_t bm
;
3459 err
= dsl_bookmark_lookup_impl(ddpa
->ddpa_clone
,
3460 dbn
->dbn_name
, &bm
);
3463 fnvlist_add_boolean(ddpa
->err_ds
, dbn
->dbn_name
);
3464 conflicting_snaps
= B_TRUE
;
3465 } else if (err
== ESRCH
) {
3474 * In order to return the full list of conflicting snapshots, we check
3475 * whether there was a conflict after traversing all of them.
3477 if (conflicting_snaps
) {
3478 err
= SET_ERROR(EEXIST
);
3483 * If we are a clone of a clone then we never reached ORIGIN,
3484 * so we need to subtract out the clone origin's used space.
3486 if (ddpa
->origin_origin
) {
3488 dsl_dataset_phys(ddpa
->origin_origin
)->ds_referenced_bytes
;
3490 dsl_dataset_phys(ddpa
->origin_origin
)->ds_compressed_bytes
;
3492 dsl_dataset_phys(ddpa
->origin_origin
)->
3493 ds_uncompressed_bytes
;
3496 /* Check that there is enough space and limit headroom here */
3497 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
3498 0, ss_mv_cnt
, ddpa
->used
, ddpa
->cr
, ddpa
->proc
);
3503 * Compute the amounts of space that will be used by snapshots
3504 * after the promotion (for both origin and clone). For each,
3505 * it is the amount of space that will be on all of their
3506 * deadlists (that was not born before their new origin).
3508 if (dsl_dir_phys(hds
->ds_dir
)->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
3512 * Note, typically this will not be a clone of a clone,
3513 * so dd_origin_txg will be < TXG_INITIAL, so
3514 * these snaplist_space() -> dsl_deadlist_space_range()
3515 * calls will be fast because they do not have to
3516 * iterate over all bps.
3518 snap
= list_head(&ddpa
->origin_snaps
);
3520 err
= SET_ERROR(ENOENT
);
3523 err
= snaplist_space(&ddpa
->shared_snaps
,
3524 snap
->ds
->ds_dir
->dd_origin_txg
, &ddpa
->cloneusedsnap
);
3528 err
= snaplist_space(&ddpa
->clone_snaps
,
3529 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
3532 ddpa
->cloneusedsnap
+= space
;
3534 if (dsl_dir_phys(origin_ds
->ds_dir
)->dd_flags
&
3535 DD_FLAG_USED_BREAKDOWN
) {
3536 err
= snaplist_space(&ddpa
->origin_snaps
,
3537 dsl_dataset_phys(origin_ds
)->ds_creation_txg
,
3538 &ddpa
->originusedsnap
);
3544 promote_rele(ddpa
, FTAG
);
3549 dsl_dataset_promote_sync(void *arg
, dmu_tx_t
*tx
)
3551 dsl_dataset_promote_arg_t
*ddpa
= arg
;
3552 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3554 struct promotenode
*snap
;
3555 dsl_dataset_t
*origin_ds
;
3556 dsl_dataset_t
*origin_head
;
3558 dsl_dir_t
*odd
= NULL
;
3559 uint64_t oldnext_obj
;
3562 ASSERT(nvlist_empty(ddpa
->err_ds
));
3564 VERIFY0(promote_hold(ddpa
, dp
, FTAG
));
3565 hds
= ddpa
->ddpa_clone
;
3567 ASSERT0(dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
);
3569 snap
= list_head(&ddpa
->shared_snaps
);
3570 origin_ds
= snap
->ds
;
3573 snap
= list_head(&ddpa
->origin_snaps
);
3574 origin_head
= snap
->ds
;
3577 * We need to explicitly open odd, since origin_ds's dd will be
3580 VERIFY0(dsl_dir_hold_obj(dp
, origin_ds
->ds_dir
->dd_object
,
3583 dsl_dataset_promote_crypt_sync(hds
->ds_dir
, odd
, tx
);
3585 /* change origin's next snap */
3586 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
3587 oldnext_obj
= dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
;
3588 snap
= list_tail(&ddpa
->clone_snaps
);
3589 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
3590 origin_ds
->ds_object
);
3591 dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
= snap
->ds
->ds_object
;
3593 /* change the origin's next clone */
3594 if (dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
) {
3595 dsl_dataset_remove_from_next_clones(origin_ds
,
3596 snap
->ds
->ds_object
, tx
);
3597 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3598 dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
,
3603 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
3604 ASSERT3U(dsl_dir_phys(dd
)->dd_origin_obj
, ==, origin_ds
->ds_object
);
3605 dsl_dir_phys(dd
)->dd_origin_obj
= dsl_dir_phys(odd
)->dd_origin_obj
;
3606 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
3607 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
3608 dsl_dir_phys(odd
)->dd_origin_obj
= origin_ds
->ds_object
;
3609 origin_head
->ds_dir
->dd_origin_txg
=
3610 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
3612 /* change dd_clone entries */
3613 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
3614 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3615 dsl_dir_phys(odd
)->dd_clones
, hds
->ds_object
, tx
));
3616 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3617 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
3618 hds
->ds_object
, tx
));
3620 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3621 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
3622 origin_head
->ds_object
, tx
));
3623 if (dsl_dir_phys(dd
)->dd_clones
== 0) {
3624 dsl_dir_phys(dd
)->dd_clones
=
3625 zap_create(dp
->dp_meta_objset
, DMU_OT_DSL_CLONES
,
3626 DMU_OT_NONE
, 0, tx
);
3628 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3629 dsl_dir_phys(dd
)->dd_clones
, origin_head
->ds_object
, tx
));
3633 * Move bookmarks to this dir.
3635 dsl_bookmark_node_t
*dbn_next
;
3636 for (dsl_bookmark_node_t
*dbn
= avl_first(&origin_head
->ds_bookmarks
);
3637 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
<=
3638 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
3640 dbn_next
= AVL_NEXT(&origin_head
->ds_bookmarks
, dbn
);
3642 avl_remove(&origin_head
->ds_bookmarks
, dbn
);
3643 VERIFY0(zap_remove(dp
->dp_meta_objset
,
3644 origin_head
->ds_bookmarks_obj
, dbn
->dbn_name
, tx
));
3646 dsl_bookmark_node_add(hds
, dbn
, tx
);
3649 dsl_bookmark_next_changed(hds
, origin_ds
, tx
);
3651 /* move snapshots to this dir */
3652 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
3653 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
3654 dsl_dataset_t
*ds
= snap
->ds
;
3657 * Property callbacks are registered to a particular
3658 * dsl_dir. Since ours is changing, evict the objset
3659 * so that they will be unregistered from the old dsl_dir.
3661 if (ds
->ds_objset
) {
3662 dmu_objset_evict(ds
->ds_objset
);
3663 ds
->ds_objset
= NULL
;
3666 /* move snap name entry */
3667 VERIFY0(dsl_dataset_get_snapname(ds
));
3668 VERIFY0(dsl_dataset_snap_remove(origin_head
,
3669 ds
->ds_snapname
, tx
, B_TRUE
));
3670 VERIFY0(zap_add(dp
->dp_meta_objset
,
3671 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
, ds
->ds_snapname
,
3672 8, 1, &ds
->ds_object
, tx
));
3673 dsl_fs_ss_count_adjust(hds
->ds_dir
, 1,
3674 DD_FIELD_SNAPSHOT_COUNT
, tx
);
3676 /* change containing dsl_dir */
3677 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3678 ASSERT3U(dsl_dataset_phys(ds
)->ds_dir_obj
, ==, odd
->dd_object
);
3679 dsl_dataset_phys(ds
)->ds_dir_obj
= dd
->dd_object
;
3680 ASSERT3P(ds
->ds_dir
, ==, odd
);
3681 dsl_dir_rele(ds
->ds_dir
, ds
);
3682 VERIFY0(dsl_dir_hold_obj(dp
, dd
->dd_object
,
3683 NULL
, ds
, &ds
->ds_dir
));
3685 /* move any clone references */
3686 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
&&
3687 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
3689 zap_attribute_t
*za
= zap_attribute_alloc();
3691 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
3692 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
3693 zap_cursor_retrieve(&zc
, za
) == 0;
3694 zap_cursor_advance(&zc
)) {
3695 dsl_dataset_t
*cnds
;
3698 if (za
->za_first_integer
== oldnext_obj
) {
3700 * We've already moved the
3701 * origin's reference.
3706 VERIFY0(dsl_dataset_hold_obj(dp
,
3707 za
->za_first_integer
, FTAG
, &cnds
));
3708 o
= dsl_dir_phys(cnds
->ds_dir
)->
3709 dd_head_dataset_obj
;
3711 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
3712 dsl_dir_phys(odd
)->dd_clones
, o
, tx
));
3713 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
3714 dsl_dir_phys(dd
)->dd_clones
, o
, tx
));
3715 dsl_dataset_rele(cnds
, FTAG
);
3717 zap_cursor_fini(&zc
);
3718 zap_attribute_free(za
);
3721 ASSERT(!dsl_prop_hascb(ds
));
3725 * Change space accounting.
3726 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
3727 * both be valid, or both be 0 (resulting in delta == 0). This
3728 * is true for each of {clone,origin} independently.
3731 delta
= ddpa
->cloneusedsnap
-
3732 dsl_dir_phys(dd
)->dd_used_breakdown
[DD_USED_SNAP
];
3733 ASSERT3S(delta
, >=, 0);
3734 ASSERT3U(ddpa
->used
, >=, delta
);
3735 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
3736 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
3737 ddpa
->used
- delta
, ddpa
->comp
, ddpa
->uncomp
, tx
);
3739 delta
= ddpa
->originusedsnap
-
3740 dsl_dir_phys(odd
)->dd_used_breakdown
[DD_USED_SNAP
];
3741 ASSERT3S(delta
, <=, 0);
3742 ASSERT3U(ddpa
->used
, >=, -delta
);
3743 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
3744 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
3745 -ddpa
->used
- delta
, -ddpa
->comp
, -ddpa
->uncomp
, tx
);
3747 dsl_dataset_phys(origin_ds
)->ds_unique_bytes
= ddpa
->unique
;
3750 * Since livelists are specific to a clone's origin txg, they
3751 * are no longer accurate. Destroy the livelist from the clone being
3752 * promoted. If the origin dataset is a clone, destroy its livelist
3755 dsl_dir_remove_livelist(dd
, tx
, B_TRUE
);
3756 dsl_dir_remove_livelist(odd
, tx
, B_TRUE
);
3758 /* log history record */
3759 spa_history_log_internal_ds(hds
, "promote", tx
, " ");
3761 dsl_dir_rele(odd
, FTAG
);
3764 * Transfer common error blocks from old head to new head, before
3765 * calling promote_rele() on ddpa since we need to dereference
3766 * origin_head and hds.
3768 if (spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
3769 uint64_t old_head
= origin_head
->ds_object
;
3770 uint64_t new_head
= hds
->ds_object
;
3771 spa_swap_errlog(dp
->dp_spa
, new_head
, old_head
, tx
);
3774 promote_rele(ddpa
, FTAG
);
3778 * Make a list of dsl_dataset_t's for the snapshots between first_obj
3779 * (exclusive) and last_obj (inclusive). The list will be in reverse
3780 * order (last_obj will be the list_head()). If first_obj == 0, do all
3781 * snapshots back to this dataset's origin.
3784 snaplist_make(dsl_pool_t
*dp
,
3785 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
, const void *tag
)
3787 uint64_t obj
= last_obj
;
3789 list_create(l
, sizeof (struct promotenode
),
3790 offsetof(struct promotenode
, link
));
3792 while (obj
!= first_obj
) {
3794 struct promotenode
*snap
;
3797 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
3798 ASSERT(err
!= ENOENT
);
3803 first_obj
= dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
;
3805 snap
= kmem_alloc(sizeof (*snap
), KM_SLEEP
);
3807 list_insert_tail(l
, snap
);
3808 obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
3815 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
3817 struct promotenode
*snap
;
3820 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
3821 uint64_t used
, comp
, uncomp
;
3822 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
3823 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
3830 snaplist_destroy(list_t
*l
, const void *tag
)
3832 struct promotenode
*snap
;
3834 if (l
== NULL
|| !list_link_active(&l
->list_head
))
3837 while ((snap
= list_remove_tail(l
)) != NULL
) {
3838 dsl_dataset_rele(snap
->ds
, tag
);
3839 kmem_free(snap
, sizeof (*snap
));
3845 promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
, const void *tag
)
3849 struct promotenode
*snap
;
3851 error
= dsl_dataset_hold(dp
, ddpa
->ddpa_clonename
, tag
,
3855 dd
= ddpa
->ddpa_clone
->ds_dir
;
3857 if (ddpa
->ddpa_clone
->ds_is_snapshot
||
3858 !dsl_dir_is_clone(dd
)) {
3859 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
3860 return (SET_ERROR(EINVAL
));
3863 error
= snaplist_make(dp
, 0, dsl_dir_phys(dd
)->dd_origin_obj
,
3864 &ddpa
->shared_snaps
, tag
);
3868 error
= snaplist_make(dp
, 0, ddpa
->ddpa_clone
->ds_object
,
3869 &ddpa
->clone_snaps
, tag
);
3873 snap
= list_head(&ddpa
->shared_snaps
);
3874 ASSERT3U(snap
->ds
->ds_object
, ==, dsl_dir_phys(dd
)->dd_origin_obj
);
3875 error
= snaplist_make(dp
, dsl_dir_phys(dd
)->dd_origin_obj
,
3876 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_head_dataset_obj
,
3877 &ddpa
->origin_snaps
, tag
);
3881 if (dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
!= 0) {
3882 error
= dsl_dataset_hold_obj(dp
,
3883 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
,
3884 tag
, &ddpa
->origin_origin
);
3890 promote_rele(ddpa
, tag
);
3895 promote_rele(dsl_dataset_promote_arg_t
*ddpa
, const void *tag
)
3897 snaplist_destroy(&ddpa
->shared_snaps
, tag
);
3898 snaplist_destroy(&ddpa
->clone_snaps
, tag
);
3899 snaplist_destroy(&ddpa
->origin_snaps
, tag
);
3900 if (ddpa
->origin_origin
!= NULL
)
3901 dsl_dataset_rele(ddpa
->origin_origin
, tag
);
3902 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
3908 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
3909 * in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
3912 dsl_dataset_promote(const char *name
, char *conflsnap
)
3914 dsl_dataset_promote_arg_t ddpa
= { 0 };
3917 nvpair_t
*snap_pair
;
3921 * We will modify space proportional to the number of
3922 * snapshots. Compute numsnaps.
3924 error
= dmu_objset_hold(name
, FTAG
, &os
);
3927 error
= zap_count(dmu_objset_pool(os
)->dp_meta_objset
,
3928 dsl_dataset_phys(dmu_objset_ds(os
))->ds_snapnames_zapobj
,
3930 dmu_objset_rele(os
, FTAG
);
3934 ddpa
.ddpa_clonename
= name
;
3935 ddpa
.err_ds
= fnvlist_alloc();
3937 ddpa
.proc
= curproc
;
3939 error
= dsl_sync_task(name
, dsl_dataset_promote_check
,
3940 dsl_dataset_promote_sync
, &ddpa
,
3941 2 + numsnaps
, ZFS_SPACE_CHECK_RESERVED
);
3944 * Return the first conflicting snapshot found.
3946 snap_pair
= nvlist_next_nvpair(ddpa
.err_ds
, NULL
);
3947 if (snap_pair
!= NULL
&& conflsnap
!= NULL
)
3948 (void) strlcpy(conflsnap
, nvpair_name(snap_pair
),
3949 ZFS_MAX_DATASET_NAME_LEN
);
3951 fnvlist_free(ddpa
.err_ds
);
3956 dsl_dataset_clone_swap_check_impl(dsl_dataset_t
*clone
,
3957 dsl_dataset_t
*origin_head
, boolean_t force
, void *owner
, dmu_tx_t
*tx
)
3960 * "slack" factor for received datasets with refquota set on them.
3961 * See the bottom of this function for details on its use.
3963 uint64_t refquota_slack
= (uint64_t)DMU_MAX_ACCESS
*
3964 spa_asize_inflation
;
3965 int64_t unused_refres_delta
;
3967 /* they should both be heads */
3968 if (clone
->ds_is_snapshot
||
3969 origin_head
->ds_is_snapshot
)
3970 return (SET_ERROR(EINVAL
));
3972 /* if we are not forcing, the branch point should be just before them */
3973 if (!force
&& clone
->ds_prev
!= origin_head
->ds_prev
)
3974 return (SET_ERROR(EINVAL
));
3976 /* clone should be the clone (unless they are unrelated) */
3977 if (clone
->ds_prev
!= NULL
&&
3978 clone
->ds_prev
!= clone
->ds_dir
->dd_pool
->dp_origin_snap
&&
3979 origin_head
->ds_dir
!= clone
->ds_prev
->ds_dir
)
3980 return (SET_ERROR(EINVAL
));
3982 /* the clone should be a child of the origin */
3983 if (clone
->ds_dir
->dd_parent
!= origin_head
->ds_dir
)
3984 return (SET_ERROR(EINVAL
));
3986 /* origin_head shouldn't be modified unless 'force' */
3988 dsl_dataset_modified_since_snap(origin_head
, origin_head
->ds_prev
))
3989 return (SET_ERROR(ETXTBSY
));
3991 /* origin_head should have no long holds (e.g. is not mounted) */
3992 if (dsl_dataset_handoff_check(origin_head
, owner
, tx
))
3993 return (SET_ERROR(EBUSY
));
3995 /* check amount of any unconsumed refreservation */
3996 unused_refres_delta
=
3997 (int64_t)MIN(origin_head
->ds_reserved
,
3998 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
3999 (int64_t)MIN(origin_head
->ds_reserved
,
4000 dsl_dataset_phys(clone
)->ds_unique_bytes
);
4002 if (unused_refres_delta
> 0 &&
4003 unused_refres_delta
>
4004 dsl_dir_space_available(origin_head
->ds_dir
, NULL
, 0, TRUE
))
4005 return (SET_ERROR(ENOSPC
));
4008 * The clone can't be too much over the head's refquota.
4010 * To ensure that the entire refquota can be used, we allow one
4011 * transaction to exceed the refquota. Therefore, this check
4012 * needs to also allow for the space referenced to be more than the
4013 * refquota. The maximum amount of space that one transaction can use
4014 * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
4015 * overage ensures that we are able to receive a filesystem that
4016 * exceeds the refquota on the source system.
4018 * So that overage is the refquota_slack we use below.
4020 if (origin_head
->ds_quota
!= 0 &&
4021 dsl_dataset_phys(clone
)->ds_referenced_bytes
>
4022 origin_head
->ds_quota
+ refquota_slack
)
4023 return (SET_ERROR(EDQUOT
));
4029 dsl_dataset_swap_remap_deadlists(dsl_dataset_t
*clone
,
4030 dsl_dataset_t
*origin
, dmu_tx_t
*tx
)
4032 uint64_t clone_remap_dl_obj
, origin_remap_dl_obj
;
4033 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4035 ASSERT(dsl_pool_sync_context(dp
));
4037 clone_remap_dl_obj
= dsl_dataset_get_remap_deadlist_object(clone
);
4038 origin_remap_dl_obj
= dsl_dataset_get_remap_deadlist_object(origin
);
4040 if (clone_remap_dl_obj
!= 0) {
4041 dsl_deadlist_close(&clone
->ds_remap_deadlist
);
4042 dsl_dataset_unset_remap_deadlist_object(clone
, tx
);
4044 if (origin_remap_dl_obj
!= 0) {
4045 dsl_deadlist_close(&origin
->ds_remap_deadlist
);
4046 dsl_dataset_unset_remap_deadlist_object(origin
, tx
);
4049 if (clone_remap_dl_obj
!= 0) {
4050 dsl_dataset_set_remap_deadlist_object(origin
,
4051 clone_remap_dl_obj
, tx
);
4052 VERIFY0(dsl_deadlist_open(&origin
->ds_remap_deadlist
,
4053 dp
->dp_meta_objset
, clone_remap_dl_obj
));
4055 if (origin_remap_dl_obj
!= 0) {
4056 dsl_dataset_set_remap_deadlist_object(clone
,
4057 origin_remap_dl_obj
, tx
);
4058 VERIFY0(dsl_deadlist_open(&clone
->ds_remap_deadlist
,
4059 dp
->dp_meta_objset
, origin_remap_dl_obj
));
4064 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t
*clone
,
4065 dsl_dataset_t
*origin_head
, dmu_tx_t
*tx
)
4067 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4068 int64_t unused_refres_delta
;
4070 ASSERT(clone
->ds_reserved
== 0);
4072 * NOTE: On DEBUG kernels there could be a race between this and
4073 * the check function if spa_asize_inflation is adjusted...
4075 ASSERT(origin_head
->ds_quota
== 0 ||
4076 dsl_dataset_phys(clone
)->ds_unique_bytes
<= origin_head
->ds_quota
+
4077 DMU_MAX_ACCESS
* spa_asize_inflation
);
4078 ASSERT3P(clone
->ds_prev
, ==, origin_head
->ds_prev
);
4080 dsl_dir_cancel_waiters(origin_head
->ds_dir
);
4083 * Swap per-dataset feature flags.
4085 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
4086 if (!(spa_feature_table
[f
].fi_flags
&
4087 ZFEATURE_FLAG_PER_DATASET
)) {
4088 ASSERT(!dsl_dataset_feature_is_active(clone
, f
));
4089 ASSERT(!dsl_dataset_feature_is_active(origin_head
, f
));
4093 boolean_t clone_inuse
= dsl_dataset_feature_is_active(clone
, f
);
4094 void *clone_feature
= clone
->ds_feature
[f
];
4095 boolean_t origin_head_inuse
=
4096 dsl_dataset_feature_is_active(origin_head
, f
);
4097 void *origin_head_feature
= origin_head
->ds_feature
[f
];
4100 dsl_dataset_deactivate_feature_impl(clone
, f
, tx
);
4101 if (origin_head_inuse
)
4102 dsl_dataset_deactivate_feature_impl(origin_head
, f
, tx
);
4105 dsl_dataset_activate_feature(origin_head
->ds_object
, f
,
4107 origin_head
->ds_feature
[f
] = clone_feature
;
4109 if (origin_head_inuse
) {
4110 dsl_dataset_activate_feature(clone
->ds_object
, f
,
4111 origin_head_feature
, tx
);
4112 clone
->ds_feature
[f
] = origin_head_feature
;
4116 dmu_buf_will_dirty(clone
->ds_dbuf
, tx
);
4117 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
4119 if (clone
->ds_objset
!= NULL
) {
4120 dmu_objset_evict(clone
->ds_objset
);
4121 clone
->ds_objset
= NULL
;
4124 if (origin_head
->ds_objset
!= NULL
) {
4125 dmu_objset_evict(origin_head
->ds_objset
);
4126 origin_head
->ds_objset
= NULL
;
4129 unused_refres_delta
=
4130 (int64_t)MIN(origin_head
->ds_reserved
,
4131 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
4132 (int64_t)MIN(origin_head
->ds_reserved
,
4133 dsl_dataset_phys(clone
)->ds_unique_bytes
);
4136 * Reset origin's unique bytes.
4139 dsl_dataset_t
*origin
= clone
->ds_prev
;
4140 uint64_t comp
, uncomp
;
4142 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
4143 dsl_deadlist_space_range(&clone
->ds_deadlist
,
4144 dsl_dataset_phys(origin
)->ds_prev_snap_txg
, UINT64_MAX
,
4145 &dsl_dataset_phys(origin
)->ds_unique_bytes
, &comp
, &uncomp
);
4150 rrw_enter(&clone
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
4151 rrw_enter(&origin_head
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
4153 tmp
= dsl_dataset_phys(origin_head
)->ds_bp
;
4154 dsl_dataset_phys(origin_head
)->ds_bp
=
4155 dsl_dataset_phys(clone
)->ds_bp
;
4156 dsl_dataset_phys(clone
)->ds_bp
= tmp
;
4157 rrw_exit(&origin_head
->ds_bp_rwlock
, FTAG
);
4158 rrw_exit(&clone
->ds_bp_rwlock
, FTAG
);
4161 /* set dd_*_bytes */
4163 int64_t dused
, dcomp
, duncomp
;
4164 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
4165 uint64_t odl_used
, odl_comp
, odl_uncomp
;
4167 ASSERT3U(dsl_dir_phys(clone
->ds_dir
)->
4168 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
4170 dsl_deadlist_space(&clone
->ds_deadlist
,
4171 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
4172 dsl_deadlist_space(&origin_head
->ds_deadlist
,
4173 &odl_used
, &odl_comp
, &odl_uncomp
);
4175 dused
= dsl_dataset_phys(clone
)->ds_referenced_bytes
+
4177 (dsl_dataset_phys(origin_head
)->ds_referenced_bytes
+
4179 dcomp
= dsl_dataset_phys(clone
)->ds_compressed_bytes
+
4181 (dsl_dataset_phys(origin_head
)->ds_compressed_bytes
+
4183 duncomp
= dsl_dataset_phys(clone
)->ds_uncompressed_bytes
+
4185 (dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
+
4188 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_HEAD
,
4189 dused
, dcomp
, duncomp
, tx
);
4190 dsl_dir_diduse_space(clone
->ds_dir
, DD_USED_HEAD
,
4191 -dused
, -dcomp
, -duncomp
, tx
);
4194 * The difference in the space used by snapshots is the
4195 * difference in snapshot space due to the head's
4196 * deadlist (since that's the only thing that's
4197 * changing that affects the snapused).
4199 dsl_deadlist_space_range(&clone
->ds_deadlist
,
4200 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
4201 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
4202 dsl_deadlist_space_range(&origin_head
->ds_deadlist
,
4203 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
4204 &odl_used
, &odl_comp
, &odl_uncomp
);
4205 dsl_dir_transfer_space(origin_head
->ds_dir
, cdl_used
- odl_used
,
4206 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
4209 /* swap ds_*_bytes */
4210 SWITCH64(dsl_dataset_phys(origin_head
)->ds_referenced_bytes
,
4211 dsl_dataset_phys(clone
)->ds_referenced_bytes
);
4212 SWITCH64(dsl_dataset_phys(origin_head
)->ds_compressed_bytes
,
4213 dsl_dataset_phys(clone
)->ds_compressed_bytes
);
4214 SWITCH64(dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
,
4215 dsl_dataset_phys(clone
)->ds_uncompressed_bytes
);
4216 SWITCH64(dsl_dataset_phys(origin_head
)->ds_unique_bytes
,
4217 dsl_dataset_phys(clone
)->ds_unique_bytes
);
4219 /* apply any parent delta for change in unconsumed refreservation */
4220 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_REFRSRV
,
4221 unused_refres_delta
, 0, 0, tx
);
4226 dsl_deadlist_close(&clone
->ds_deadlist
);
4227 dsl_deadlist_close(&origin_head
->ds_deadlist
);
4228 SWITCH64(dsl_dataset_phys(origin_head
)->ds_deadlist_obj
,
4229 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
4230 VERIFY0(dsl_deadlist_open(&clone
->ds_deadlist
, dp
->dp_meta_objset
,
4231 dsl_dataset_phys(clone
)->ds_deadlist_obj
));
4232 VERIFY0(dsl_deadlist_open(&origin_head
->ds_deadlist
, dp
->dp_meta_objset
,
4233 dsl_dataset_phys(origin_head
)->ds_deadlist_obj
));
4234 dsl_dataset_swap_remap_deadlists(clone
, origin_head
, tx
);
4237 * If there is a bookmark at the origin, its "next dataset" is
4238 * changing, so we need to reset its FBN.
4240 dsl_bookmark_next_changed(origin_head
, origin_head
->ds_prev
, tx
);
4242 dsl_scan_ds_clone_swapped(origin_head
, clone
, tx
);
4245 * Destroy any livelists associated with the clone or the origin,
4246 * since after the swap the corresponding livelists are no longer
4249 dsl_dir_remove_livelist(clone
->ds_dir
, tx
, B_TRUE
);
4250 dsl_dir_remove_livelist(origin_head
->ds_dir
, tx
, B_TRUE
);
4252 spa_history_log_internal_ds(clone
, "clone swap", tx
,
4253 "parent=%s", origin_head
->ds_dir
->dd_myname
);
4257 * Given a pool name and a dataset object number in that pool,
4258 * return the name of that dataset.
4261 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
4267 error
= dsl_pool_hold(pname
, FTAG
, &dp
);
4271 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
4273 dsl_dataset_name(ds
, buf
);
4274 dsl_dataset_rele(ds
, FTAG
);
4276 dsl_pool_rele(dp
, FTAG
);
4282 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
4283 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
4287 ASSERT3S(asize
, >, 0);
4290 * *ref_rsrv is the portion of asize that will come from any
4291 * unconsumed refreservation space.
4295 mutex_enter(&ds
->ds_lock
);
4297 * Make a space adjustment for reserved bytes.
4299 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
4301 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
4303 (ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
4305 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
4308 if (!check_quota
|| ds
->ds_quota
== 0) {
4309 mutex_exit(&ds
->ds_lock
);
4313 * If they are requesting more space, and our current estimate
4314 * is over quota, they get to try again unless the actual
4315 * on-disk is over quota and there are no pending changes (which
4316 * may free up space for us).
4318 if (dsl_dataset_phys(ds
)->ds_referenced_bytes
+ inflight
>=
4321 dsl_dataset_phys(ds
)->ds_referenced_bytes
< ds
->ds_quota
)
4322 error
= SET_ERROR(ERESTART
);
4324 error
= SET_ERROR(EDQUOT
);
4326 mutex_exit(&ds
->ds_lock
);
4331 typedef struct dsl_dataset_set_qr_arg
{
4332 const char *ddsqra_name
;
4333 zprop_source_t ddsqra_source
;
4334 uint64_t ddsqra_value
;
4335 } dsl_dataset_set_qr_arg_t
;
4339 dsl_dataset_set_refquota_check(void *arg
, dmu_tx_t
*tx
)
4341 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
4342 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4347 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFQUOTA
)
4348 return (SET_ERROR(ENOTSUP
));
4350 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
4354 if (ds
->ds_is_snapshot
) {
4355 dsl_dataset_rele(ds
, FTAG
);
4356 return (SET_ERROR(EINVAL
));
4359 error
= dsl_prop_predict(ds
->ds_dir
,
4360 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
4361 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
4363 dsl_dataset_rele(ds
, FTAG
);
4368 dsl_dataset_rele(ds
, FTAG
);
4372 if (newval
< dsl_dataset_phys(ds
)->ds_referenced_bytes
||
4373 newval
< ds
->ds_reserved
) {
4374 dsl_dataset_rele(ds
, FTAG
);
4375 return (SET_ERROR(ENOSPC
));
4378 dsl_dataset_rele(ds
, FTAG
);
4383 dsl_dataset_set_refquota_sync(void *arg
, dmu_tx_t
*tx
)
4385 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
4386 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4387 dsl_dataset_t
*ds
= NULL
;
4390 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
4392 dsl_prop_set_sync_impl(ds
,
4393 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
4394 ddsqra
->ddsqra_source
, sizeof (ddsqra
->ddsqra_value
), 1,
4395 &ddsqra
->ddsqra_value
, tx
);
4397 VERIFY0(dsl_prop_get_int_ds(ds
,
4398 zfs_prop_to_name(ZFS_PROP_REFQUOTA
), &newval
));
4400 if (ds
->ds_quota
!= newval
) {
4401 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
4402 ds
->ds_quota
= newval
;
4404 dsl_dataset_rele(ds
, FTAG
);
4408 dsl_dataset_set_refquota(const char *dsname
, zprop_source_t source
,
4411 dsl_dataset_set_qr_arg_t ddsqra
;
4413 ddsqra
.ddsqra_name
= dsname
;
4414 ddsqra
.ddsqra_source
= source
;
4415 ddsqra
.ddsqra_value
= refquota
;
4417 return (dsl_sync_task(dsname
, dsl_dataset_set_refquota_check
,
4418 dsl_dataset_set_refquota_sync
, &ddsqra
, 0,
4419 ZFS_SPACE_CHECK_EXTRA_RESERVED
));
4423 dsl_dataset_set_refreservation_check(void *arg
, dmu_tx_t
*tx
)
4425 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
4426 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4429 uint64_t newval
, unique
;
4431 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFRESERVATION
)
4432 return (SET_ERROR(ENOTSUP
));
4434 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
4438 if (ds
->ds_is_snapshot
) {
4439 dsl_dataset_rele(ds
, FTAG
);
4440 return (SET_ERROR(EINVAL
));
4443 error
= dsl_prop_predict(ds
->ds_dir
,
4444 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
4445 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
4447 dsl_dataset_rele(ds
, FTAG
);
4452 * If we are doing the preliminary check in open context, the
4453 * space estimates may be inaccurate.
4455 if (!dmu_tx_is_syncing(tx
)) {
4456 dsl_dataset_rele(ds
, FTAG
);
4460 mutex_enter(&ds
->ds_lock
);
4461 if (!DS_UNIQUE_IS_ACCURATE(ds
))
4462 dsl_dataset_recalc_head_uniq(ds
);
4463 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
4464 mutex_exit(&ds
->ds_lock
);
4466 if (MAX(unique
, newval
) > MAX(unique
, ds
->ds_reserved
)) {
4467 uint64_t delta
= MAX(unique
, newval
) -
4468 MAX(unique
, ds
->ds_reserved
);
4471 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, B_TRUE
) ||
4472 (ds
->ds_quota
> 0 && newval
> ds
->ds_quota
)) {
4473 dsl_dataset_rele(ds
, FTAG
);
4474 return (SET_ERROR(ENOSPC
));
4478 dsl_dataset_rele(ds
, FTAG
);
4483 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t
*ds
,
4484 zprop_source_t source
, uint64_t value
, dmu_tx_t
*tx
)
4490 dsl_prop_set_sync_impl(ds
, zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
4491 source
, sizeof (value
), 1, &value
, tx
);
4493 VERIFY0(dsl_prop_get_int_ds(ds
,
4494 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &newval
));
4496 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
4497 mutex_enter(&ds
->ds_dir
->dd_lock
);
4498 mutex_enter(&ds
->ds_lock
);
4499 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
4500 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
4501 delta
= MAX(0, (int64_t)(newval
- unique
)) -
4502 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
4503 ds
->ds_reserved
= newval
;
4504 mutex_exit(&ds
->ds_lock
);
4506 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
4507 mutex_exit(&ds
->ds_dir
->dd_lock
);
4511 dsl_dataset_set_refreservation_sync(void *arg
, dmu_tx_t
*tx
)
4513 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
4514 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4515 dsl_dataset_t
*ds
= NULL
;
4517 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
4518 dsl_dataset_set_refreservation_sync_impl(ds
,
4519 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, tx
);
4520 dsl_dataset_rele(ds
, FTAG
);
4524 dsl_dataset_set_refreservation(const char *dsname
, zprop_source_t source
,
4525 uint64_t refreservation
)
4527 dsl_dataset_set_qr_arg_t ddsqra
;
4529 ddsqra
.ddsqra_name
= dsname
;
4530 ddsqra
.ddsqra_source
= source
;
4531 ddsqra
.ddsqra_value
= refreservation
;
4533 return (dsl_sync_task(dsname
, dsl_dataset_set_refreservation_check
,
4534 dsl_dataset_set_refreservation_sync
, &ddsqra
, 0,
4535 ZFS_SPACE_CHECK_EXTRA_RESERVED
));
4538 typedef struct dsl_dataset_set_compression_arg
{
4539 const char *ddsca_name
;
4540 zprop_source_t ddsca_source
;
4541 uint64_t ddsca_value
;
4542 } dsl_dataset_set_compression_arg_t
;
4545 dsl_dataset_set_compression_check(void *arg
, dmu_tx_t
*tx
)
4547 dsl_dataset_set_compression_arg_t
*ddsca
= arg
;
4548 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4550 uint64_t compval
= ZIO_COMPRESS_ALGO(ddsca
->ddsca_value
);
4551 spa_feature_t f
= zio_compress_to_feature(compval
);
4553 if (f
== SPA_FEATURE_NONE
)
4554 return (SET_ERROR(EINVAL
));
4556 if (!spa_feature_is_enabled(dp
->dp_spa
, f
))
4557 return (SET_ERROR(ENOTSUP
));
4563 dsl_dataset_set_compression_sync(void *arg
, dmu_tx_t
*tx
)
4565 dsl_dataset_set_compression_arg_t
*ddsca
= arg
;
4566 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4567 dsl_dataset_t
*ds
= NULL
;
4569 uint64_t compval
= ZIO_COMPRESS_ALGO(ddsca
->ddsca_value
);
4570 spa_feature_t f
= zio_compress_to_feature(compval
);
4571 ASSERT3S(f
, !=, SPA_FEATURE_NONE
);
4572 ASSERT3S(spa_feature_table
[f
].fi_type
, ==, ZFEATURE_TYPE_BOOLEAN
);
4574 VERIFY0(dsl_dataset_hold(dp
, ddsca
->ddsca_name
, FTAG
, &ds
));
4575 if (zfeature_active(f
, ds
->ds_feature
[f
]) != B_TRUE
) {
4576 ds
->ds_feature_activation
[f
] = (void *)B_TRUE
;
4577 dsl_dataset_activate_feature(ds
->ds_object
, f
,
4578 ds
->ds_feature_activation
[f
], tx
);
4579 ds
->ds_feature
[f
] = ds
->ds_feature_activation
[f
];
4581 dsl_dataset_rele(ds
, FTAG
);
4585 dsl_dataset_set_compression(const char *dsname
, zprop_source_t source
,
4586 uint64_t compression
)
4588 dsl_dataset_set_compression_arg_t ddsca
;
4591 * The sync task is only required for zstd in order to activate
4592 * the feature flag when the property is first set.
4594 if (ZIO_COMPRESS_ALGO(compression
) != ZIO_COMPRESS_ZSTD
)
4597 ddsca
.ddsca_name
= dsname
;
4598 ddsca
.ddsca_source
= source
;
4599 ddsca
.ddsca_value
= compression
;
4601 return (dsl_sync_task(dsname
, dsl_dataset_set_compression_check
,
4602 dsl_dataset_set_compression_sync
, &ddsca
, 0,
4603 ZFS_SPACE_CHECK_EXTRA_RESERVED
));
4607 * Return (in *usedp) the amount of space referenced by "new" that was not
4608 * referenced at the time the bookmark corresponds to. "New" may be a
4609 * snapshot or a head. The bookmark must be before new, in
4610 * new's filesystem (or its origin) -- caller verifies this.
4612 * The written space is calculated by considering two components: First, we
4613 * ignore any freed space, and calculate the written as new's used space
4614 * minus old's used space. Next, we add in the amount of space that was freed
4615 * between the two time points, thus reducing new's used space relative to
4616 * old's. Specifically, this is the space that was born before
4617 * zbm_creation_txg, and freed before new (ie. on new's deadlist or a
4618 * previous deadlist).
4620 * space freed [---------------------]
4621 * snapshots ---O-------O--------O-------O------
4624 * Note, the bookmark's zbm_*_bytes_refd must be valid, but if the HAS_FBN
4625 * flag is not set, we will calculate the freed_before_next based on the
4626 * next snapshot's deadlist, rather than using zbm_*_freed_before_next_snap.
4629 dsl_dataset_space_written_impl(zfs_bookmark_phys_t
*bmp
,
4630 dsl_dataset_t
*new, uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4633 dsl_pool_t
*dp
= new->ds_dir
->dd_pool
;
4635 ASSERT(dsl_pool_config_held(dp
));
4636 if (dsl_dataset_is_snapshot(new)) {
4637 ASSERT3U(bmp
->zbm_creation_txg
, <,
4638 dsl_dataset_phys(new)->ds_creation_txg
);
4642 *usedp
+= dsl_dataset_phys(new)->ds_referenced_bytes
;
4643 *usedp
-= bmp
->zbm_referenced_bytes_refd
;
4646 *compp
+= dsl_dataset_phys(new)->ds_compressed_bytes
;
4647 *compp
-= bmp
->zbm_compressed_bytes_refd
;
4650 *uncompp
+= dsl_dataset_phys(new)->ds_uncompressed_bytes
;
4651 *uncompp
-= bmp
->zbm_uncompressed_bytes_refd
;
4653 dsl_dataset_t
*snap
= new;
4655 while (dsl_dataset_phys(snap
)->ds_prev_snap_txg
>
4656 bmp
->zbm_creation_txg
) {
4657 uint64_t used
, comp
, uncomp
;
4659 dsl_deadlist_space_range(&snap
->ds_deadlist
,
4660 0, bmp
->zbm_creation_txg
,
4661 &used
, &comp
, &uncomp
);
4666 uint64_t snapobj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
4668 dsl_dataset_rele(snap
, FTAG
);
4669 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snap
);
4675 * We might not have the FBN if we are calculating written from
4676 * a snapshot (because we didn't know the correct "next" snapshot
4679 if (bmp
->zbm_flags
& ZBM_FLAG_HAS_FBN
) {
4680 *usedp
+= bmp
->zbm_referenced_freed_before_next_snap
;
4681 *compp
+= bmp
->zbm_compressed_freed_before_next_snap
;
4682 *uncompp
+= bmp
->zbm_uncompressed_freed_before_next_snap
;
4684 ASSERT3U(dsl_dataset_phys(snap
)->ds_prev_snap_txg
, ==,
4685 bmp
->zbm_creation_txg
);
4686 uint64_t used
, comp
, uncomp
;
4687 dsl_deadlist_space(&snap
->ds_deadlist
, &used
, &comp
, &uncomp
);
4693 dsl_dataset_rele(snap
, FTAG
);
4698 * Return (in *usedp) the amount of space written in new that was not
4699 * present at the time the bookmark corresponds to. New may be a
4700 * snapshot or the head. Old must be a bookmark before new, in
4701 * new's filesystem (or its origin) -- caller verifies this.
4704 dsl_dataset_space_written_bookmark(zfs_bookmark_phys_t
*bmp
,
4705 dsl_dataset_t
*new, uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4707 if (!(bmp
->zbm_flags
& ZBM_FLAG_HAS_FBN
))
4708 return (SET_ERROR(ENOTSUP
));
4709 return (dsl_dataset_space_written_impl(bmp
, new,
4710 usedp
, compp
, uncompp
));
4714 * Return (in *usedp) the amount of space written in new that is not
4715 * present in oldsnap. New may be a snapshot or the head. Old must be
4716 * a snapshot before new, in new's filesystem (or its origin). If not then
4717 * fail and return EINVAL.
4720 dsl_dataset_space_written(dsl_dataset_t
*oldsnap
, dsl_dataset_t
*new,
4721 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4723 if (!dsl_dataset_is_before(new, oldsnap
, 0))
4724 return (SET_ERROR(EINVAL
));
4726 zfs_bookmark_phys_t zbm
= { 0 };
4727 dsl_dataset_phys_t
*dsp
= dsl_dataset_phys(oldsnap
);
4728 zbm
.zbm_guid
= dsp
->ds_guid
;
4729 zbm
.zbm_creation_txg
= dsp
->ds_creation_txg
;
4730 zbm
.zbm_creation_time
= dsp
->ds_creation_time
;
4731 zbm
.zbm_referenced_bytes_refd
= dsp
->ds_referenced_bytes
;
4732 zbm
.zbm_compressed_bytes_refd
= dsp
->ds_compressed_bytes
;
4733 zbm
.zbm_uncompressed_bytes_refd
= dsp
->ds_uncompressed_bytes
;
4736 * If oldsnap is the origin (or origin's origin, ...) of new,
4737 * we can't easily calculate the effective FBN. Therefore,
4738 * we do not set ZBM_FLAG_HAS_FBN, so that the _impl will calculate
4739 * it relative to the correct "next": the next snapshot towards "new",
4740 * rather than the next snapshot in oldsnap's dsl_dir.
4742 return (dsl_dataset_space_written_impl(&zbm
, new,
4743 usedp
, compp
, uncompp
));
4747 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4748 * lastsnap, and all snapshots in between are deleted.
4750 * blocks that would be freed [---------------------------]
4751 * snapshots ---O-------O--------O-------O--------O
4752 * firstsnap lastsnap
4754 * This is the set of blocks that were born after the snap before firstsnap,
4755 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4756 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4757 * We calculate this by iterating over the relevant deadlists (from the snap
4758 * after lastsnap, backward to the snap after firstsnap), summing up the
4759 * space on the deadlist that was born after the snap before firstsnap.
4762 dsl_dataset_space_wouldfree(dsl_dataset_t
*firstsnap
,
4763 dsl_dataset_t
*lastsnap
,
4764 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
4768 dsl_pool_t
*dp
= firstsnap
->ds_dir
->dd_pool
;
4770 ASSERT(firstsnap
->ds_is_snapshot
);
4771 ASSERT(lastsnap
->ds_is_snapshot
);
4774 * Check that the snapshots are in the same dsl_dir, and firstsnap
4775 * is before lastsnap.
4777 if (firstsnap
->ds_dir
!= lastsnap
->ds_dir
||
4778 dsl_dataset_phys(firstsnap
)->ds_creation_txg
>
4779 dsl_dataset_phys(lastsnap
)->ds_creation_txg
)
4780 return (SET_ERROR(EINVAL
));
4782 *usedp
= *compp
= *uncompp
= 0;
4784 snapobj
= dsl_dataset_phys(lastsnap
)->ds_next_snap_obj
;
4785 while (snapobj
!= firstsnap
->ds_object
) {
4787 uint64_t used
, comp
, uncomp
;
4789 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &ds
);
4793 dsl_deadlist_space_range(&ds
->ds_deadlist
,
4794 dsl_dataset_phys(firstsnap
)->ds_prev_snap_txg
, UINT64_MAX
,
4795 &used
, &comp
, &uncomp
);
4800 snapobj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
4801 ASSERT3U(snapobj
, !=, 0);
4802 dsl_dataset_rele(ds
, FTAG
);
4808 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
4809 * For example, they could both be snapshots of the same filesystem, and
4810 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
4811 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
4812 * filesystem. Or 'earlier' could be the origin's origin.
4814 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
4817 dsl_dataset_is_before(dsl_dataset_t
*later
, dsl_dataset_t
*earlier
,
4818 uint64_t earlier_txg
)
4820 dsl_pool_t
*dp
= later
->ds_dir
->dd_pool
;
4824 ASSERT(dsl_pool_config_held(dp
));
4825 ASSERT(earlier
->ds_is_snapshot
|| earlier_txg
!= 0);
4827 if (earlier_txg
== 0)
4828 earlier_txg
= dsl_dataset_phys(earlier
)->ds_creation_txg
;
4830 if (later
->ds_is_snapshot
&&
4831 earlier_txg
>= dsl_dataset_phys(later
)->ds_creation_txg
)
4834 if (later
->ds_dir
== earlier
->ds_dir
)
4838 * We check dd_origin_obj explicitly here rather than using
4839 * dsl_dir_is_clone() so that we will return TRUE if "earlier"
4840 * is $ORIGIN@$ORIGIN. dsl_dataset_space_written() depends on
4843 if (dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
== 0)
4846 dsl_dataset_t
*origin
;
4847 error
= dsl_dataset_hold_obj(dp
,
4848 dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
, FTAG
, &origin
);
4851 if (dsl_dataset_phys(origin
)->ds_creation_txg
== earlier_txg
&&
4852 origin
->ds_dir
== earlier
->ds_dir
) {
4853 dsl_dataset_rele(origin
, FTAG
);
4856 ret
= dsl_dataset_is_before(origin
, earlier
, earlier_txg
);
4857 dsl_dataset_rele(origin
, FTAG
);
4862 dsl_dataset_zapify(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4864 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
4865 dmu_object_zapify(mos
, ds
->ds_object
, DMU_OT_DSL_DATASET
, tx
);
4869 dsl_dataset_is_zapified(dsl_dataset_t
*ds
)
4871 dmu_object_info_t doi
;
4873 dmu_object_info_from_db(ds
->ds_dbuf
, &doi
);
4874 return (doi
.doi_type
== DMU_OTN_ZAP_METADATA
);
4878 dsl_dataset_has_resume_receive_state(dsl_dataset_t
*ds
)
4880 return (dsl_dataset_is_zapified(ds
) &&
4881 zap_contains(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4882 ds
->ds_object
, DS_FIELD_RESUME_TOGUID
) == 0);
4886 dsl_dataset_get_remap_deadlist_object(dsl_dataset_t
*ds
)
4888 uint64_t remap_deadlist_obj
;
4891 if (!dsl_dataset_is_zapified(ds
))
4894 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
, ds
->ds_object
,
4895 DS_FIELD_REMAP_DEADLIST
, sizeof (remap_deadlist_obj
), 1,
4896 &remap_deadlist_obj
);
4899 VERIFY3S(err
, ==, ENOENT
);
4903 ASSERT(remap_deadlist_obj
!= 0);
4904 return (remap_deadlist_obj
);
4908 dsl_dataset_remap_deadlist_exists(dsl_dataset_t
*ds
)
4910 EQUIV(dsl_deadlist_is_open(&ds
->ds_remap_deadlist
),
4911 dsl_dataset_get_remap_deadlist_object(ds
) != 0);
4912 return (dsl_deadlist_is_open(&ds
->ds_remap_deadlist
));
4916 dsl_dataset_set_remap_deadlist_object(dsl_dataset_t
*ds
, uint64_t obj
,
4920 dsl_dataset_zapify(ds
, tx
);
4921 VERIFY0(zap_add(ds
->ds_dir
->dd_pool
->dp_meta_objset
, ds
->ds_object
,
4922 DS_FIELD_REMAP_DEADLIST
, sizeof (obj
), 1, &obj
, tx
));
4926 dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4928 VERIFY0(zap_remove(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
4929 ds
->ds_object
, DS_FIELD_REMAP_DEADLIST
, tx
));
4933 dsl_dataset_destroy_remap_deadlist(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4935 uint64_t remap_deadlist_object
;
4936 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
4938 ASSERT(dmu_tx_is_syncing(tx
));
4939 ASSERT(dsl_dataset_remap_deadlist_exists(ds
));
4941 remap_deadlist_object
= ds
->ds_remap_deadlist
.dl_object
;
4942 dsl_deadlist_close(&ds
->ds_remap_deadlist
);
4943 dsl_deadlist_free(spa_meta_objset(spa
), remap_deadlist_object
, tx
);
4944 dsl_dataset_unset_remap_deadlist_object(ds
, tx
);
4945 spa_feature_decr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
4949 dsl_dataset_create_remap_deadlist(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
4951 uint64_t remap_deadlist_obj
;
4952 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
4954 ASSERT(dmu_tx_is_syncing(tx
));
4955 ASSERT(MUTEX_HELD(&ds
->ds_remap_deadlist_lock
));
4957 * Currently we only create remap deadlists when there are indirect
4958 * vdevs with referenced mappings.
4960 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_DEVICE_REMOVAL
));
4962 remap_deadlist_obj
= dsl_deadlist_clone(
4963 &ds
->ds_deadlist
, UINT64_MAX
,
4964 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
4965 dsl_dataset_set_remap_deadlist_object(ds
,
4966 remap_deadlist_obj
, tx
);
4967 VERIFY0(dsl_deadlist_open(&ds
->ds_remap_deadlist
, spa_meta_objset(spa
),
4968 remap_deadlist_obj
));
4969 spa_feature_incr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
4973 dsl_dataset_activate_redaction(dsl_dataset_t
*ds
, uint64_t *redact_snaps
,
4974 uint64_t num_redact_snaps
, dmu_tx_t
*tx
)
4976 uint64_t dsobj
= ds
->ds_object
;
4977 struct feature_type_uint64_array_arg
*ftuaa
=
4978 kmem_zalloc(sizeof (*ftuaa
), KM_SLEEP
);
4979 ftuaa
->length
= (int64_t)num_redact_snaps
;
4980 if (num_redact_snaps
> 0) {
4981 ftuaa
->array
= kmem_alloc(num_redact_snaps
* sizeof (uint64_t),
4983 memcpy(ftuaa
->array
, redact_snaps
, num_redact_snaps
*
4986 dsl_dataset_activate_feature(dsobj
, SPA_FEATURE_REDACTED_DATASETS
,
4988 ds
->ds_feature
[SPA_FEATURE_REDACTED_DATASETS
] = ftuaa
;
4992 * Find and return (in *oldest_dsobj) the oldest snapshot of the dsobj
4993 * dataset whose birth time is >= min_txg.
4996 dsl_dataset_oldest_snapshot(spa_t
*spa
, uint64_t head_ds
, uint64_t min_txg
,
4997 uint64_t *oldest_dsobj
)
5000 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
5002 int error
= dsl_dataset_hold_obj(dp
, head_ds
, FTAG
, &ds
);
5006 uint64_t prev_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
5007 uint64_t prev_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
5009 while (prev_obj
!= 0 && min_txg
< prev_obj_txg
) {
5010 dsl_dataset_rele(ds
, FTAG
);
5011 if ((error
= dsl_dataset_hold_obj(dp
, prev_obj
,
5014 prev_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
5015 prev_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
5017 *oldest_dsobj
= ds
->ds_object
;
5018 dsl_dataset_rele(ds
, FTAG
);
5022 ZFS_MODULE_PARAM(zfs
, zfs_
, max_recordsize
, UINT
, ZMOD_RW
,
5023 "Max allowed record size");
5025 ZFS_MODULE_PARAM(zfs
, zfs_
, allow_redacted_dataset_mount
, INT
, ZMOD_RW
,
5026 "Allow mounting of redacted datasets");
5028 ZFS_MODULE_PARAM(zfs
, zfs_
, snapshot_history_enabled
, INT
, ZMOD_RW
,
5029 "Include snapshot events in pool history/events");
5031 EXPORT_SYMBOL(dsl_dataset_hold
);
5032 EXPORT_SYMBOL(dsl_dataset_hold_flags
);
5033 EXPORT_SYMBOL(dsl_dataset_hold_obj
);
5034 EXPORT_SYMBOL(dsl_dataset_hold_obj_flags
);
5035 EXPORT_SYMBOL(dsl_dataset_own
);
5036 EXPORT_SYMBOL(dsl_dataset_own_obj
);
5037 EXPORT_SYMBOL(dsl_dataset_name
);
5038 EXPORT_SYMBOL(dsl_dataset_rele
);
5039 EXPORT_SYMBOL(dsl_dataset_rele_flags
);
5040 EXPORT_SYMBOL(dsl_dataset_disown
);
5041 EXPORT_SYMBOL(dsl_dataset_tryown
);
5042 EXPORT_SYMBOL(dsl_dataset_create_sync
);
5043 EXPORT_SYMBOL(dsl_dataset_create_sync_dd
);
5044 EXPORT_SYMBOL(dsl_dataset_snapshot_check
);
5045 EXPORT_SYMBOL(dsl_dataset_snapshot_sync
);
5046 EXPORT_SYMBOL(dsl_dataset_promote
);
5047 EXPORT_SYMBOL(dsl_dataset_user_hold
);
5048 EXPORT_SYMBOL(dsl_dataset_user_release
);
5049 EXPORT_SYMBOL(dsl_dataset_get_holds
);
5050 EXPORT_SYMBOL(dsl_dataset_get_blkptr
);
5051 EXPORT_SYMBOL(dsl_dataset_get_spa
);
5052 EXPORT_SYMBOL(dsl_dataset_modified_since_snap
);
5053 EXPORT_SYMBOL(dsl_dataset_space_written
);
5054 EXPORT_SYMBOL(dsl_dataset_space_wouldfree
);
5055 EXPORT_SYMBOL(dsl_dataset_sync
);
5056 EXPORT_SYMBOL(dsl_dataset_block_born
);
5057 EXPORT_SYMBOL(dsl_dataset_block_kill
);
5058 EXPORT_SYMBOL(dsl_dataset_dirty
);
5059 EXPORT_SYMBOL(dsl_dataset_stats
);
5060 EXPORT_SYMBOL(dsl_dataset_fast_stat
);
5061 EXPORT_SYMBOL(dsl_dataset_space
);
5062 EXPORT_SYMBOL(dsl_dataset_fsid_guid
);
5063 EXPORT_SYMBOL(dsl_dsobj_to_dsname
);
5064 EXPORT_SYMBOL(dsl_dataset_check_quota
);
5065 EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl
);
5066 EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl
);