4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18 * Copyright 2017 Nexenta Systems, Inc.
19 * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
22 #include <sys/zfs_context.h>
23 #include <sys/dsl_dataset.h>
24 #include <sys/dsl_dir.h>
25 #include <sys/dsl_prop.h>
26 #include <sys/dsl_synctask.h>
27 #include <sys/dsl_destroy.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
32 #include <sys/zfeature.h>
34 #include <sys/dsl_bookmark.h>
35 #include <zfs_namecheck.h>
36 #include <sys/dmu_send.h>
40 dsl_bookmark_hold_ds(dsl_pool_t
*dp
, const char *fullname
,
41 dsl_dataset_t
**dsp
, const void *tag
, char **shortnamep
)
43 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
46 if (strlen(fullname
) >= ZFS_MAX_DATASET_NAME_LEN
)
47 return (SET_ERROR(ENAMETOOLONG
));
48 hashp
= strchr(fullname
, '#');
50 return (SET_ERROR(EINVAL
));
52 *shortnamep
= hashp
+ 1;
53 if (zfs_component_namecheck(*shortnamep
, NULL
, NULL
))
54 return (SET_ERROR(EINVAL
));
55 (void) strlcpy(buf
, fullname
, hashp
- fullname
+ 1);
56 return (dsl_dataset_hold(dp
, buf
, tag
, dsp
));
60 * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
63 * Returns ESRCH if bookmark is not found.
64 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
65 * by name, because only the ZAP honors the casesensitivity setting.
68 dsl_bookmark_lookup_impl(dsl_dataset_t
*ds
, const char *shortname
,
69 zfs_bookmark_phys_t
*bmark_phys
)
71 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
72 uint64_t bmark_zapobj
= ds
->ds_bookmarks_obj
;
76 if (bmark_zapobj
== 0)
77 return (SET_ERROR(ESRCH
));
79 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
83 * Zero out the bookmark in case the one stored on disk
84 * is in an older, shorter format.
86 memset(bmark_phys
, 0, sizeof (*bmark_phys
));
88 err
= zap_lookup_norm(mos
, bmark_zapobj
, shortname
, sizeof (uint64_t),
89 sizeof (*bmark_phys
) / sizeof (uint64_t), bmark_phys
, mt
, NULL
, 0,
92 return (err
== ENOENT
? SET_ERROR(ESRCH
) : err
);
96 * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
97 * does not represents an earlier point in later_ds's timeline. However,
98 * bmp will still be filled in if we return EXDEV.
100 * Returns ENOENT if the dataset containing the bookmark does not exist.
101 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
104 dsl_bookmark_lookup(dsl_pool_t
*dp
, const char *fullname
,
105 dsl_dataset_t
*later_ds
, zfs_bookmark_phys_t
*bmp
)
111 error
= dsl_bookmark_hold_ds(dp
, fullname
, &ds
, FTAG
, &shortname
);
115 error
= dsl_bookmark_lookup_impl(ds
, shortname
, bmp
);
116 if (error
== 0 && later_ds
!= NULL
) {
117 if (!dsl_dataset_is_before(later_ds
, ds
, bmp
->zbm_creation_txg
))
118 error
= SET_ERROR(EXDEV
);
120 dsl_dataset_rele(ds
, FTAG
);
126 * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
127 * - source is a full path of a snapshot or bookmark
128 * ({bookmark,snapshot}_namecheck)
130 * Returns 0 if valid, -1 otherwise.
133 dsl_bookmark_create_nvl_validate_pair(const char *bmark
, const char *source
)
135 if (bookmark_namecheck(bmark
, NULL
, NULL
) != 0)
138 int is_bmark
, is_snap
;
139 is_bmark
= bookmark_namecheck(source
, NULL
, NULL
) == 0;
140 is_snap
= snapshot_namecheck(source
, NULL
, NULL
) == 0;
141 if (!is_bmark
&& !is_snap
)
148 * Check that the given nvlist corresponds to the following schema:
149 * { newbookmark -> source, ... }
151 * - each pair passes dsl_bookmark_create_nvl_validate_pair
152 * - all newbookmarks are in the same pool
153 * - all newbookmarks have unique names
155 * Note that this function is only validates above schema. Callers must ensure
156 * that the bookmarks can be created, e.g. that sources exist.
158 * Returns 0 if the nvlist adheres to above schema.
159 * Returns -1 if it doesn't.
162 dsl_bookmark_create_nvl_validate(nvlist_t
*bmarks
)
164 const char *first
= NULL
;
165 size_t first_len
= 0;
167 for (nvpair_t
*pair
= nvlist_next_nvpair(bmarks
, NULL
);
168 pair
!= NULL
; pair
= nvlist_next_nvpair(bmarks
, pair
)) {
170 const char *bmark
= nvpair_name(pair
);
173 /* list structure: values must be snapshots XOR bookmarks */
174 if (nvpair_value_string(pair
, &source
) != 0)
176 if (dsl_bookmark_create_nvl_validate_pair(bmark
, source
) != 0)
179 /* same pool check */
181 const char *cp
= strpbrk(bmark
, "/#");
185 first_len
= cp
- bmark
;
187 if (strncmp(first
, bmark
, first_len
) != 0)
189 switch (*(bmark
+ first_len
)) {
190 case '/': /* fallthrough */
197 /* unique newbookmark names; todo: O(n^2) */
198 for (nvpair_t
*pair2
= nvlist_next_nvpair(bmarks
, pair
);
199 pair2
!= NULL
; pair2
= nvlist_next_nvpair(bmarks
, pair2
)) {
200 if (strcmp(nvpair_name(pair
), nvpair_name(pair2
)) == 0)
209 * expects that newbm and source have been validated using
210 * dsl_bookmark_create_nvl_validate_pair
213 dsl_bookmark_create_check_impl(dsl_pool_t
*dp
,
214 const char *newbm
, const char *source
)
216 ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm
, source
));
217 /* defer source namecheck until we know it's a snapshot or bookmark */
220 dsl_dataset_t
*newbm_ds
;
222 zfs_bookmark_phys_t bmark_phys
;
224 error
= dsl_bookmark_hold_ds(dp
, newbm
, &newbm_ds
, FTAG
, &newbm_short
);
228 /* Verify that the new bookmark does not already exist */
229 error
= dsl_bookmark_lookup_impl(newbm_ds
, newbm_short
, &bmark_phys
);
232 /* happy path: new bmark doesn't exist, proceed after switch */
235 error
= SET_ERROR(EEXIST
);
238 /* dsl_bookmark_lookup_impl already did SET_ERROR */
242 /* error is retval of the following if-cascade */
243 if (strchr(source
, '@') != NULL
) {
244 dsl_dataset_t
*source_snap_ds
;
245 ASSERT3S(snapshot_namecheck(source
, NULL
, NULL
), ==, 0);
246 error
= dsl_dataset_hold(dp
, source
, FTAG
, &source_snap_ds
);
248 VERIFY(source_snap_ds
->ds_is_snapshot
);
250 * Verify that source snapshot is an earlier point in
251 * newbm_ds's timeline (source may be newbm_ds's origin)
253 if (!dsl_dataset_is_before(newbm_ds
, source_snap_ds
, 0))
255 ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR
);
256 dsl_dataset_rele(source_snap_ds
, FTAG
);
258 } else if (strchr(source
, '#') != NULL
) {
259 zfs_bookmark_phys_t source_phys
;
260 ASSERT3S(bookmark_namecheck(source
, NULL
, NULL
), ==, 0);
262 * Source must exists and be an earlier point in newbm_ds's
263 * timeline (newbm_ds's origin may be a snap of source's ds)
265 error
= dsl_bookmark_lookup(dp
, source
, newbm_ds
, &source_phys
);
268 break; /* happy path */
270 error
= SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR
);
273 /* dsl_bookmark_lookup already did SET_ERROR */
278 * dsl_bookmark_create_nvl_validate validates that source is
279 * either snapshot or bookmark
281 panic("unreachable code: %s", source
);
285 dsl_dataset_rele(newbm_ds
, FTAG
);
290 dsl_bookmark_create_check(void *arg
, dmu_tx_t
*tx
)
292 dsl_bookmark_create_arg_t
*dbca
= arg
;
295 ASSERT3P(dbca
, !=, NULL
);
296 ASSERT3P(dbca
->dbca_bmarks
, !=, NULL
);
297 /* dbca->dbca_errors is allowed to be NULL */
299 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
301 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
))
302 return (SET_ERROR(ENOTSUP
));
304 if (dsl_bookmark_create_nvl_validate(dbca
->dbca_bmarks
) != 0)
305 rv
= schema_err
= SET_ERROR(EINVAL
);
307 for (nvpair_t
*pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, NULL
);
308 pair
!= NULL
; pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, pair
)) {
309 const char *new = nvpair_name(pair
);
311 int error
= schema_err
;
313 const char *source
= fnvpair_value_string(pair
);
314 error
= dsl_bookmark_create_check_impl(dp
, new, source
);
316 error
= SET_ERROR(error
);
321 if (dbca
->dbca_errors
!= NULL
)
322 fnvlist_add_int32(dbca
->dbca_errors
,
330 static dsl_bookmark_node_t
*
331 dsl_bookmark_node_alloc(char *shortname
)
333 dsl_bookmark_node_t
*dbn
= kmem_alloc(sizeof (*dbn
), KM_SLEEP
);
334 dbn
->dbn_name
= spa_strdup(shortname
);
335 dbn
->dbn_dirty
= B_FALSE
;
336 mutex_init(&dbn
->dbn_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
341 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
344 dsl_bookmark_set_phys(zfs_bookmark_phys_t
*zbm
, dsl_dataset_t
*snap
)
346 spa_t
*spa
= dsl_dataset_get_spa(snap
);
347 objset_t
*mos
= spa_get_dsl(spa
)->dp_meta_objset
;
348 dsl_dataset_phys_t
*dsp
= dsl_dataset_phys(snap
);
350 memset(zbm
, 0, sizeof (zfs_bookmark_phys_t
));
351 zbm
->zbm_guid
= dsp
->ds_guid
;
352 zbm
->zbm_creation_txg
= dsp
->ds_creation_txg
;
353 zbm
->zbm_creation_time
= dsp
->ds_creation_time
;
354 zbm
->zbm_redaction_obj
= 0;
357 * If the dataset is encrypted create a larger bookmark to
358 * accommodate the IVset guid. The IVset guid was added
359 * after the encryption feature to prevent a problem with
360 * raw sends. If we encounter an encrypted dataset without
361 * an IVset guid we fall back to a normal bookmark.
363 if (snap
->ds_dir
->dd_crypto_obj
!= 0 &&
364 spa_feature_is_enabled(spa
, SPA_FEATURE_BOOKMARK_V2
)) {
365 (void) zap_lookup(mos
, snap
->ds_object
,
366 DS_FIELD_IVSET_GUID
, sizeof (uint64_t), 1,
367 &zbm
->zbm_ivset_guid
);
370 if (spa_feature_is_enabled(spa
, SPA_FEATURE_BOOKMARK_WRITTEN
)) {
371 zbm
->zbm_flags
= ZBM_FLAG_SNAPSHOT_EXISTS
| ZBM_FLAG_HAS_FBN
;
372 zbm
->zbm_referenced_bytes_refd
= dsp
->ds_referenced_bytes
;
373 zbm
->zbm_compressed_bytes_refd
= dsp
->ds_compressed_bytes
;
374 zbm
->zbm_uncompressed_bytes_refd
= dsp
->ds_uncompressed_bytes
;
376 dsl_dataset_t
*nextds
;
377 VERIFY0(dsl_dataset_hold_obj(snap
->ds_dir
->dd_pool
,
378 dsp
->ds_next_snap_obj
, FTAG
, &nextds
));
379 dsl_deadlist_space(&nextds
->ds_deadlist
,
380 &zbm
->zbm_referenced_freed_before_next_snap
,
381 &zbm
->zbm_compressed_freed_before_next_snap
,
382 &zbm
->zbm_uncompressed_freed_before_next_snap
);
383 dsl_dataset_rele(nextds
, FTAG
);
388 * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
389 * SPA feature counters.
392 dsl_bookmark_node_add(dsl_dataset_t
*hds
, dsl_bookmark_node_t
*dbn
,
395 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
396 objset_t
*mos
= dp
->dp_meta_objset
;
398 if (hds
->ds_bookmarks_obj
== 0) {
399 hds
->ds_bookmarks_obj
= zap_create_norm(mos
,
400 U8_TEXTPREP_TOUPPER
, DMU_OTN_ZAP_METADATA
, DMU_OT_NONE
, 0,
402 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
404 dsl_dataset_zapify(hds
, tx
);
405 VERIFY0(zap_add(mos
, hds
->ds_object
,
406 DS_FIELD_BOOKMARK_NAMES
,
407 sizeof (hds
->ds_bookmarks_obj
), 1,
408 &hds
->ds_bookmarks_obj
, tx
));
411 avl_add(&hds
->ds_bookmarks
, dbn
);
414 * To maintain backwards compatibility with software that doesn't
415 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
416 * possible bookmark size.
418 uint64_t bookmark_phys_size
= BOOKMARK_PHYS_SIZE_V1
;
419 if (spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARK_V2
) &&
420 (dbn
->dbn_phys
.zbm_ivset_guid
!= 0 || dbn
->dbn_phys
.zbm_flags
&
421 ZBM_FLAG_HAS_FBN
|| dbn
->dbn_phys
.zbm_redaction_obj
!= 0)) {
422 bookmark_phys_size
= BOOKMARK_PHYS_SIZE_V2
;
423 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_BOOKMARK_V2
, tx
);
426 zfs_bookmark_phys_t zero_phys
= { 0 };
427 ASSERT0(memcmp(((char *)&dbn
->dbn_phys
) + bookmark_phys_size
,
428 &zero_phys
, sizeof (zfs_bookmark_phys_t
) - bookmark_phys_size
));
430 VERIFY0(zap_add(mos
, hds
->ds_bookmarks_obj
, dbn
->dbn_name
,
431 sizeof (uint64_t), bookmark_phys_size
/ sizeof (uint64_t),
432 &dbn
->dbn_phys
, tx
));
436 * If redaction_list is non-null, we create a redacted bookmark and redaction
437 * list, and store the object number of the redaction list in redact_obj.
440 dsl_bookmark_create_sync_impl_snap(const char *bookmark
, const char *snapshot
,
441 dmu_tx_t
*tx
, uint64_t num_redact_snaps
, uint64_t *redact_snaps
,
442 const void *tag
, redaction_list_t
**redaction_list
)
444 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
445 objset_t
*mos
= dp
->dp_meta_objset
;
446 dsl_dataset_t
*snapds
, *bmark_fs
;
448 boolean_t bookmark_redacted
;
449 uint64_t *dsredactsnaps
;
452 VERIFY0(dsl_dataset_hold(dp
, snapshot
, FTAG
, &snapds
));
453 VERIFY0(dsl_bookmark_hold_ds(dp
, bookmark
, &bmark_fs
, FTAG
,
456 dsl_bookmark_node_t
*dbn
= dsl_bookmark_node_alloc(shortname
);
457 dsl_bookmark_set_phys(&dbn
->dbn_phys
, snapds
);
459 bookmark_redacted
= dsl_dataset_get_uint64_array_feature(snapds
,
460 SPA_FEATURE_REDACTED_DATASETS
, &dsnumsnaps
, &dsredactsnaps
);
461 if (redaction_list
!= NULL
|| bookmark_redacted
) {
462 redaction_list_t
*local_rl
;
463 boolean_t spill
= B_FALSE
;
464 if (bookmark_redacted
) {
465 redact_snaps
= dsredactsnaps
;
466 num_redact_snaps
= dsnumsnaps
;
468 int bonuslen
= sizeof (redaction_list_phys_t
) +
469 num_redact_snaps
* sizeof (uint64_t);
470 if (bonuslen
> dmu_bonus_max())
472 dbn
->dbn_phys
.zbm_redaction_obj
= dmu_object_alloc(mos
,
473 DMU_OTN_UINT64_METADATA
, SPA_OLD_MAXBLOCKSIZE
,
474 DMU_OTN_UINT64_METADATA
, spill
? 0 : bonuslen
, tx
);
475 spa_feature_incr(dp
->dp_spa
,
476 SPA_FEATURE_REDACTION_BOOKMARKS
, tx
);
478 spa_feature_incr(dp
->dp_spa
,
479 SPA_FEATURE_REDACTION_LIST_SPILL
, tx
);
482 VERIFY0(dsl_redaction_list_hold_obj(dp
,
483 dbn
->dbn_phys
.zbm_redaction_obj
, tag
, &local_rl
));
484 dsl_redaction_list_long_hold(dp
, local_rl
, tag
);
487 ASSERT3U(local_rl
->rl_bonus
->db_size
, >=, bonuslen
);
488 dmu_buf_will_dirty(local_rl
->rl_bonus
, tx
);
491 VERIFY0(dmu_spill_hold_by_bonus(local_rl
->rl_bonus
,
492 DB_RF_MUST_SUCCEED
, FTAG
, &db
));
493 dmu_buf_will_fill(db
, tx
, B_FALSE
);
494 VERIFY0(dbuf_spill_set_blksz(db
, P2ROUNDUP(bonuslen
,
495 SPA_MINBLOCKSIZE
), tx
));
496 local_rl
->rl_phys
= db
->db_data
;
497 local_rl
->rl_dbuf
= db
;
499 memcpy(local_rl
->rl_phys
->rlp_snaps
, redact_snaps
,
500 sizeof (uint64_t) * num_redact_snaps
);
501 local_rl
->rl_phys
->rlp_num_snaps
= num_redact_snaps
;
502 if (bookmark_redacted
) {
503 ASSERT3P(redaction_list
, ==, NULL
);
504 local_rl
->rl_phys
->rlp_last_blkid
= UINT64_MAX
;
505 local_rl
->rl_phys
->rlp_last_object
= UINT64_MAX
;
506 dsl_redaction_list_long_rele(local_rl
, tag
);
507 dsl_redaction_list_rele(local_rl
, tag
);
509 *redaction_list
= local_rl
;
513 if (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) {
514 spa_feature_incr(dp
->dp_spa
,
515 SPA_FEATURE_BOOKMARK_WRITTEN
, tx
);
518 dsl_bookmark_node_add(bmark_fs
, dbn
, tx
);
520 spa_history_log_internal_ds(bmark_fs
, "bookmark", tx
,
521 "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
522 shortname
, (longlong_t
)dbn
->dbn_phys
.zbm_creation_txg
,
523 (longlong_t
)snapds
->ds_object
,
524 (longlong_t
)dbn
->dbn_phys
.zbm_redaction_obj
);
526 dsl_dataset_rele(bmark_fs
, FTAG
);
527 dsl_dataset_rele(snapds
, FTAG
);
532 dsl_bookmark_create_sync_impl_book(
533 const char *new_name
, const char *source_name
, dmu_tx_t
*tx
)
535 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
536 dsl_dataset_t
*bmark_fs_source
, *bmark_fs_new
;
537 char *source_shortname
, *new_shortname
;
538 zfs_bookmark_phys_t source_phys
;
540 VERIFY0(dsl_bookmark_hold_ds(dp
, source_name
, &bmark_fs_source
, FTAG
,
542 VERIFY0(dsl_bookmark_hold_ds(dp
, new_name
, &bmark_fs_new
, FTAG
,
546 * create a copy of the source bookmark by copying most of its members
548 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
549 * -----------------------------------------------------------------
551 * - The zbm_redaction_obj would be referred to by both source and new
552 * bookmark, but would be destroyed once either source or new is
553 * destroyed, resulting in use-after-free of the referred object.
554 * - User expectation when issuing the `zfs bookmark` command is that
555 * a normal bookmark of the source is created
557 * Design Alternatives For Full Redaction Bookmark Copying:
558 * - reference-count the redaction object => would require on-disk
559 * format change for existing redaction objects
560 * - Copy the redaction object => cannot be done in syncing context
561 * because the redaction object might be too large
564 VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source
, source_shortname
,
566 dsl_bookmark_node_t
*new_dbn
= dsl_bookmark_node_alloc(new_shortname
);
568 memcpy(&new_dbn
->dbn_phys
, &source_phys
, sizeof (source_phys
));
569 new_dbn
->dbn_phys
.zbm_redaction_obj
= 0;
571 /* update feature counters */
572 if (new_dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) {
573 spa_feature_incr(dp
->dp_spa
,
574 SPA_FEATURE_BOOKMARK_WRITTEN
, tx
);
576 /* no need for redaction bookmark counter; nulled zbm_redaction_obj */
577 /* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
582 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
583 * v1 bookmark, the v2-only fields are zeroed.
584 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
585 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
586 * => bookmark copying works on pre-bookmark-v2 pools
588 dsl_bookmark_node_add(bmark_fs_new
, new_dbn
, tx
);
590 spa_history_log_internal_ds(bmark_fs_source
, "bookmark", tx
,
591 "name=%s creation_txg=%llu source_guid=%llu",
592 new_shortname
, (longlong_t
)new_dbn
->dbn_phys
.zbm_creation_txg
,
593 (longlong_t
)source_phys
.zbm_guid
);
595 dsl_dataset_rele(bmark_fs_source
, FTAG
);
596 dsl_dataset_rele(bmark_fs_new
, FTAG
);
600 dsl_bookmark_create_sync(void *arg
, dmu_tx_t
*tx
)
602 dsl_bookmark_create_arg_t
*dbca
= arg
;
604 ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx
)->dp_spa
,
605 SPA_FEATURE_BOOKMARKS
));
607 for (nvpair_t
*pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, NULL
);
608 pair
!= NULL
; pair
= nvlist_next_nvpair(dbca
->dbca_bmarks
, pair
)) {
610 const char *new = nvpair_name(pair
);
611 const char *source
= fnvpair_value_string(pair
);
613 if (strchr(source
, '@') != NULL
) {
614 dsl_bookmark_create_sync_impl_snap(new, source
, tx
,
615 0, NULL
, NULL
, NULL
);
616 } else if (strchr(source
, '#') != NULL
) {
617 dsl_bookmark_create_sync_impl_book(new, source
, tx
);
619 panic("unreachable code");
626 * The bookmarks must all be in the same pool.
629 dsl_bookmark_create(nvlist_t
*bmarks
, nvlist_t
*errors
)
632 dsl_bookmark_create_arg_t dbca
;
634 pair
= nvlist_next_nvpair(bmarks
, NULL
);
638 dbca
.dbca_bmarks
= bmarks
;
639 dbca
.dbca_errors
= errors
;
641 return (dsl_sync_task(nvpair_name(pair
), dsl_bookmark_create_check
,
642 dsl_bookmark_create_sync
, &dbca
,
643 fnvlist_num_pairs(bmarks
), ZFS_SPACE_CHECK_NORMAL
));
647 dsl_bookmark_create_redacted_check(void *arg
, dmu_tx_t
*tx
)
649 dsl_bookmark_create_redacted_arg_t
*dbcra
= arg
;
650 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
653 if (!spa_feature_is_enabled(dp
->dp_spa
,
654 SPA_FEATURE_REDACTION_BOOKMARKS
))
655 return (SET_ERROR(ENOTSUP
));
657 * If the list of redact snaps will not fit in the bonus buffer (or
658 * spill block, with the REDACTION_LIST_SPILL feature) with the
659 * furthest reached object and offset, fail.
661 uint64_t snaplimit
= ((spa_feature_is_enabled(dp
->dp_spa
,
662 SPA_FEATURE_REDACTION_LIST_SPILL
) ? spa_maxblocksize(dp
->dp_spa
) :
664 sizeof (redaction_list_phys_t
)) / sizeof (uint64_t);
665 if (dbcra
->dbcra_numsnaps
> snaplimit
)
666 return (SET_ERROR(E2BIG
));
668 if (dsl_bookmark_create_nvl_validate_pair(
669 dbcra
->dbcra_bmark
, dbcra
->dbcra_snap
) != 0)
670 return (SET_ERROR(EINVAL
));
672 rv
= dsl_bookmark_create_check_impl(dp
,
673 dbcra
->dbcra_bmark
, dbcra
->dbcra_snap
);
678 dsl_bookmark_create_redacted_sync(void *arg
, dmu_tx_t
*tx
)
680 dsl_bookmark_create_redacted_arg_t
*dbcra
= arg
;
681 dsl_bookmark_create_sync_impl_snap(dbcra
->dbcra_bmark
,
682 dbcra
->dbcra_snap
, tx
, dbcra
->dbcra_numsnaps
, dbcra
->dbcra_snaps
,
683 dbcra
->dbcra_tag
, dbcra
->dbcra_rl
);
687 dsl_bookmark_create_redacted(const char *bookmark
, const char *snapshot
,
688 uint64_t numsnaps
, uint64_t *snapguids
, const void *tag
,
689 redaction_list_t
**rl
)
691 dsl_bookmark_create_redacted_arg_t dbcra
;
693 dbcra
.dbcra_bmark
= bookmark
;
694 dbcra
.dbcra_snap
= snapshot
;
696 dbcra
.dbcra_numsnaps
= numsnaps
;
697 dbcra
.dbcra_snaps
= snapguids
;
698 dbcra
.dbcra_tag
= tag
;
700 return (dsl_sync_task(bookmark
, dsl_bookmark_create_redacted_check
,
701 dsl_bookmark_create_redacted_sync
, &dbcra
, 5,
702 ZFS_SPACE_CHECK_NORMAL
));
706 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
707 * If 'props' is NULL, retrieves all properties.
710 dsl_bookmark_fetch_props(dsl_pool_t
*dp
, zfs_bookmark_phys_t
*bmark_phys
,
711 nvlist_t
*props
, nvlist_t
*out_props
)
713 ASSERT3P(dp
, !=, NULL
);
714 ASSERT3P(bmark_phys
, !=, NULL
);
715 ASSERT3P(out_props
, !=, NULL
);
716 ASSERT(RRW_LOCK_HELD(&dp
->dp_config_rwlock
));
718 if (props
== NULL
|| nvlist_exists(props
,
719 zfs_prop_to_name(ZFS_PROP_GUID
))) {
720 dsl_prop_nvlist_add_uint64(out_props
,
721 ZFS_PROP_GUID
, bmark_phys
->zbm_guid
);
723 if (props
== NULL
|| nvlist_exists(props
,
724 zfs_prop_to_name(ZFS_PROP_CREATETXG
))) {
725 dsl_prop_nvlist_add_uint64(out_props
,
726 ZFS_PROP_CREATETXG
, bmark_phys
->zbm_creation_txg
);
728 if (props
== NULL
|| nvlist_exists(props
,
729 zfs_prop_to_name(ZFS_PROP_CREATION
))) {
730 dsl_prop_nvlist_add_uint64(out_props
,
731 ZFS_PROP_CREATION
, bmark_phys
->zbm_creation_time
);
733 if (props
== NULL
|| nvlist_exists(props
,
734 zfs_prop_to_name(ZFS_PROP_IVSET_GUID
))) {
735 dsl_prop_nvlist_add_uint64(out_props
,
736 ZFS_PROP_IVSET_GUID
, bmark_phys
->zbm_ivset_guid
);
738 if (bmark_phys
->zbm_flags
& ZBM_FLAG_HAS_FBN
) {
739 if (props
== NULL
|| nvlist_exists(props
,
740 zfs_prop_to_name(ZFS_PROP_REFERENCED
))) {
741 dsl_prop_nvlist_add_uint64(out_props
,
743 bmark_phys
->zbm_referenced_bytes_refd
);
745 if (props
== NULL
|| nvlist_exists(props
,
746 zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED
))) {
747 dsl_prop_nvlist_add_uint64(out_props
,
748 ZFS_PROP_LOGICALREFERENCED
,
749 bmark_phys
->zbm_uncompressed_bytes_refd
);
751 if (props
== NULL
|| nvlist_exists(props
,
752 zfs_prop_to_name(ZFS_PROP_REFRATIO
))) {
754 bmark_phys
->zbm_compressed_bytes_refd
== 0 ? 100 :
755 bmark_phys
->zbm_uncompressed_bytes_refd
* 100 /
756 bmark_phys
->zbm_compressed_bytes_refd
;
757 dsl_prop_nvlist_add_uint64(out_props
,
758 ZFS_PROP_REFRATIO
, ratio
);
762 if ((props
== NULL
|| nvlist_exists(props
, "redact_snaps") ||
763 nvlist_exists(props
, "redact_complete")) &&
764 bmark_phys
->zbm_redaction_obj
!= 0) {
765 redaction_list_t
*rl
;
766 int err
= dsl_redaction_list_hold_obj(dp
,
767 bmark_phys
->zbm_redaction_obj
, FTAG
, &rl
);
769 if (nvlist_exists(props
, "redact_snaps")) {
771 nvl
= fnvlist_alloc();
772 fnvlist_add_uint64_array(nvl
, ZPROP_VALUE
,
773 rl
->rl_phys
->rlp_snaps
,
774 rl
->rl_phys
->rlp_num_snaps
);
775 fnvlist_add_nvlist(out_props
, "redact_snaps",
779 if (nvlist_exists(props
, "redact_complete")) {
781 nvl
= fnvlist_alloc();
782 fnvlist_add_boolean_value(nvl
, ZPROP_VALUE
,
783 rl
->rl_phys
->rlp_last_blkid
== UINT64_MAX
&&
784 rl
->rl_phys
->rlp_last_object
== UINT64_MAX
);
785 fnvlist_add_nvlist(out_props
, "redact_complete",
789 dsl_redaction_list_rele(rl
, FTAG
);
795 dsl_get_bookmarks_impl(dsl_dataset_t
*ds
, nvlist_t
*props
, nvlist_t
*outnvl
)
797 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
799 ASSERT(dsl_pool_config_held(dp
));
801 if (dsl_dataset_is_snapshot(ds
))
802 return (SET_ERROR(EINVAL
));
804 for (dsl_bookmark_node_t
*dbn
= avl_first(&ds
->ds_bookmarks
);
805 dbn
!= NULL
; dbn
= AVL_NEXT(&ds
->ds_bookmarks
, dbn
)) {
806 nvlist_t
*out_props
= fnvlist_alloc();
808 dsl_bookmark_fetch_props(dp
, &dbn
->dbn_phys
, props
, out_props
);
810 fnvlist_add_nvlist(outnvl
, dbn
->dbn_name
, out_props
);
811 fnvlist_free(out_props
);
817 * Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
818 * their TXG, then by their FBN-ness. The "FBN-ness" component ensures
819 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
820 * dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
821 * multiple bookmarks at the same TXG (with the same FBN-ness). In this
822 * case we differentiate them by an arbitrary metric (in this case,
826 dsl_bookmark_compare(const void *l
, const void *r
)
828 const dsl_bookmark_node_t
*ldbn
= l
;
829 const dsl_bookmark_node_t
*rdbn
= r
;
831 int64_t cmp
= TREE_CMP(ldbn
->dbn_phys
.zbm_creation_txg
,
832 rdbn
->dbn_phys
.zbm_creation_txg
);
835 cmp
= TREE_CMP((ldbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
),
836 (rdbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
));
839 cmp
= strcmp(ldbn
->dbn_name
, rdbn
->dbn_name
);
840 return (TREE_ISIGN(cmp
));
844 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
847 dsl_bookmark_init_ds(dsl_dataset_t
*ds
)
849 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
850 objset_t
*mos
= dp
->dp_meta_objset
;
852 ASSERT(!ds
->ds_is_snapshot
);
854 avl_create(&ds
->ds_bookmarks
, dsl_bookmark_compare
,
855 sizeof (dsl_bookmark_node_t
),
856 offsetof(dsl_bookmark_node_t
, dbn_node
));
858 if (!dsl_dataset_is_zapified(ds
))
861 int zaperr
= zap_lookup(mos
, ds
->ds_object
, DS_FIELD_BOOKMARK_NAMES
,
862 sizeof (ds
->ds_bookmarks_obj
), 1, &ds
->ds_bookmarks_obj
);
863 if (zaperr
== ENOENT
)
868 if (ds
->ds_bookmarks_obj
== 0)
873 zap_attribute_t
*attr
;
875 attr
= zap_attribute_alloc();
876 for (zap_cursor_init(&zc
, mos
, ds
->ds_bookmarks_obj
);
877 (err
= zap_cursor_retrieve(&zc
, attr
)) == 0;
878 zap_cursor_advance(&zc
)) {
879 dsl_bookmark_node_t
*dbn
=
880 dsl_bookmark_node_alloc(attr
->za_name
);
882 err
= dsl_bookmark_lookup_impl(ds
,
883 dbn
->dbn_name
, &dbn
->dbn_phys
);
884 ASSERT3U(err
, !=, ENOENT
);
886 kmem_free(dbn
, sizeof (*dbn
));
889 avl_add(&ds
->ds_bookmarks
, dbn
);
891 zap_cursor_fini(&zc
);
892 zap_attribute_free(attr
);
899 dsl_bookmark_fini_ds(dsl_dataset_t
*ds
)
902 dsl_bookmark_node_t
*dbn
;
904 if (ds
->ds_is_snapshot
)
907 while ((dbn
= avl_destroy_nodes(&ds
->ds_bookmarks
, &cookie
)) != NULL
) {
908 spa_strfree(dbn
->dbn_name
);
909 mutex_destroy(&dbn
->dbn_lock
);
910 kmem_free(dbn
, sizeof (*dbn
));
912 avl_destroy(&ds
->ds_bookmarks
);
916 * Retrieve the bookmarks that exist in the specified dataset, and the
917 * requested properties of each bookmark.
919 * The "props" nvlist specifies which properties are requested.
920 * See lzc_get_bookmarks() for the list of valid properties.
923 dsl_get_bookmarks(const char *dsname
, nvlist_t
*props
, nvlist_t
*outnvl
)
929 err
= dsl_pool_hold(dsname
, FTAG
, &dp
);
932 err
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
934 dsl_pool_rele(dp
, FTAG
);
938 err
= dsl_get_bookmarks_impl(ds
, props
, outnvl
);
940 dsl_dataset_rele(ds
, FTAG
);
941 dsl_pool_rele(dp
, FTAG
);
946 * Retrieve all properties for a single bookmark in the given dataset.
949 dsl_get_bookmark_props(const char *dsname
, const char *bmname
, nvlist_t
*props
)
953 zfs_bookmark_phys_t bmark_phys
= { 0 };
956 err
= dsl_pool_hold(dsname
, FTAG
, &dp
);
959 err
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
961 dsl_pool_rele(dp
, FTAG
);
965 err
= dsl_bookmark_lookup_impl(ds
, bmname
, &bmark_phys
);
969 dsl_bookmark_fetch_props(dp
, &bmark_phys
, NULL
, props
);
971 dsl_dataset_rele(ds
, FTAG
);
972 dsl_pool_rele(dp
, FTAG
);
976 typedef struct dsl_bookmark_destroy_arg
{
977 nvlist_t
*dbda_bmarks
;
978 nvlist_t
*dbda_success
;
979 nvlist_t
*dbda_errors
;
980 } dsl_bookmark_destroy_arg_t
;
983 dsl_bookmark_destroy_sync_impl(dsl_dataset_t
*ds
, const char *name
,
986 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
987 uint64_t bmark_zapobj
= ds
->ds_bookmarks_obj
;
989 uint64_t int_size
, num_ints
;
991 * 'search' must be zeroed so that dbn_flags (which is used in
992 * dsl_bookmark_compare()) will be zeroed even if the on-disk
993 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
995 dsl_bookmark_node_t search
= { 0 };
996 char realname
[ZFS_MAX_DATASET_NAME_LEN
];
999 * Find the real name of this bookmark, which may be different
1000 * from the given name if the dataset is case-insensitive. Then
1001 * use the real name to find the node in the ds_bookmarks AVL tree.
1004 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
1007 VERIFY0(zap_length(mos
, bmark_zapobj
, name
, &int_size
, &num_ints
));
1009 ASSERT3U(int_size
, ==, sizeof (uint64_t));
1011 if (num_ints
* int_size
> BOOKMARK_PHYS_SIZE_V1
) {
1012 spa_feature_decr(dmu_objset_spa(mos
),
1013 SPA_FEATURE_BOOKMARK_V2
, tx
);
1015 VERIFY0(zap_lookup_norm(mos
, bmark_zapobj
, name
, sizeof (uint64_t),
1016 num_ints
, &search
.dbn_phys
, mt
, realname
, sizeof (realname
), NULL
));
1018 search
.dbn_name
= realname
;
1019 dsl_bookmark_node_t
*dbn
= avl_find(&ds
->ds_bookmarks
, &search
, NULL
);
1020 ASSERT(dbn
!= NULL
);
1022 if (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) {
1024 * If this bookmark HAS_FBN, and it is before the most
1025 * recent snapshot, then its TXG is a key in the head's
1026 * deadlist (and all clones' heads' deadlists). If this is
1027 * the last thing keeping the key (i.e. there are no more
1028 * bookmarks with HAS_FBN at this TXG, and there is no
1029 * snapshot at this TXG), then remove the key.
1031 * Note that this algorithm depends on ds_bookmarks being
1032 * sorted such that all bookmarks at the same TXG with
1033 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1034 * at the same TXG in between them). If this were not
1035 * the case, we would need to examine *all* bookmarks
1036 * at this TXG, rather than just the adjacent ones.
1039 dsl_bookmark_node_t
*dbn_prev
=
1040 AVL_PREV(&ds
->ds_bookmarks
, dbn
);
1041 dsl_bookmark_node_t
*dbn_next
=
1042 AVL_NEXT(&ds
->ds_bookmarks
, dbn
);
1044 boolean_t more_bookmarks_at_this_txg
=
1045 (dbn_prev
!= NULL
&& dbn_prev
->dbn_phys
.zbm_creation_txg
==
1046 dbn
->dbn_phys
.zbm_creation_txg
&&
1047 (dbn_prev
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) ||
1048 (dbn_next
!= NULL
&& dbn_next
->dbn_phys
.zbm_creation_txg
==
1049 dbn
->dbn_phys
.zbm_creation_txg
&&
1050 (dbn_next
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
));
1052 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_SNAPSHOT_EXISTS
) &&
1053 !more_bookmarks_at_this_txg
&&
1054 dbn
->dbn_phys
.zbm_creation_txg
<
1055 dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
1056 dsl_dir_remove_clones_key(ds
->ds_dir
,
1057 dbn
->dbn_phys
.zbm_creation_txg
, tx
);
1058 dsl_deadlist_remove_key(&ds
->ds_deadlist
,
1059 dbn
->dbn_phys
.zbm_creation_txg
, tx
);
1062 spa_feature_decr(dmu_objset_spa(mos
),
1063 SPA_FEATURE_BOOKMARK_WRITTEN
, tx
);
1066 if (dbn
->dbn_phys
.zbm_redaction_obj
!= 0) {
1068 VERIFY0(dnode_hold(mos
,
1069 dbn
->dbn_phys
.zbm_redaction_obj
, FTAG
, &rl
));
1070 if (rl
->dn_have_spill
) {
1071 spa_feature_decr(dmu_objset_spa(mos
),
1072 SPA_FEATURE_REDACTION_LIST_SPILL
, tx
);
1074 dnode_rele(rl
, FTAG
);
1075 VERIFY0(dmu_object_free(mos
,
1076 dbn
->dbn_phys
.zbm_redaction_obj
, tx
));
1077 spa_feature_decr(dmu_objset_spa(mos
),
1078 SPA_FEATURE_REDACTION_BOOKMARKS
, tx
);
1081 avl_remove(&ds
->ds_bookmarks
, dbn
);
1082 spa_strfree(dbn
->dbn_name
);
1083 mutex_destroy(&dbn
->dbn_lock
);
1084 kmem_free(dbn
, sizeof (*dbn
));
1086 VERIFY0(zap_remove_norm(mos
, bmark_zapobj
, name
, mt
, tx
));
1090 dsl_bookmark_destroy_check(void *arg
, dmu_tx_t
*tx
)
1092 dsl_bookmark_destroy_arg_t
*dbda
= arg
;
1093 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1096 ASSERT(nvlist_empty(dbda
->dbda_success
));
1097 ASSERT(nvlist_empty(dbda
->dbda_errors
));
1099 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
))
1102 for (nvpair_t
*pair
= nvlist_next_nvpair(dbda
->dbda_bmarks
, NULL
);
1103 pair
!= NULL
; pair
= nvlist_next_nvpair(dbda
->dbda_bmarks
, pair
)) {
1104 const char *fullname
= nvpair_name(pair
);
1106 zfs_bookmark_phys_t bm
;
1110 error
= dsl_bookmark_hold_ds(dp
, fullname
, &ds
,
1112 if (error
== ENOENT
) {
1113 /* ignore it; the bookmark is "already destroyed" */
1117 error
= dsl_bookmark_lookup_impl(ds
, shortname
, &bm
);
1118 dsl_dataset_rele(ds
, FTAG
);
1119 if (error
== ESRCH
) {
1121 * ignore it; the bookmark is
1122 * "already destroyed"
1126 if (error
== 0 && bm
.zbm_redaction_obj
!= 0) {
1127 redaction_list_t
*rl
= NULL
;
1128 error
= dsl_redaction_list_hold_obj(tx
->tx_pool
,
1129 bm
.zbm_redaction_obj
, FTAG
, &rl
);
1130 if (error
== ENOENT
) {
1132 } else if (error
== 0 &&
1133 dsl_redaction_list_long_held(rl
)) {
1134 error
= SET_ERROR(EBUSY
);
1137 dsl_redaction_list_rele(rl
, FTAG
);
1142 if (dmu_tx_is_syncing(tx
)) {
1143 fnvlist_add_boolean(dbda
->dbda_success
,
1147 fnvlist_add_int32(dbda
->dbda_errors
, fullname
, error
);
1155 dsl_bookmark_destroy_sync(void *arg
, dmu_tx_t
*tx
)
1157 dsl_bookmark_destroy_arg_t
*dbda
= arg
;
1158 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1159 objset_t
*mos
= dp
->dp_meta_objset
;
1161 for (nvpair_t
*pair
= nvlist_next_nvpair(dbda
->dbda_success
, NULL
);
1162 pair
!= NULL
; pair
= nvlist_next_nvpair(dbda
->dbda_success
, pair
)) {
1167 VERIFY0(dsl_bookmark_hold_ds(dp
, nvpair_name(pair
),
1168 &ds
, FTAG
, &shortname
));
1169 dsl_bookmark_destroy_sync_impl(ds
, shortname
, tx
);
1172 * If all of this dataset's bookmarks have been destroyed,
1173 * free the zap object and decrement the feature's use count.
1175 VERIFY0(zap_count(mos
, ds
->ds_bookmarks_obj
, &zap_cnt
));
1177 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1178 VERIFY0(zap_destroy(mos
, ds
->ds_bookmarks_obj
, tx
));
1179 ds
->ds_bookmarks_obj
= 0;
1180 spa_feature_decr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
1181 VERIFY0(zap_remove(mos
, ds
->ds_object
,
1182 DS_FIELD_BOOKMARK_NAMES
, tx
));
1185 spa_history_log_internal_ds(ds
, "remove bookmark", tx
,
1186 "name=%s", shortname
);
1188 dsl_dataset_rele(ds
, FTAG
);
1193 * The bookmarks must all be in the same pool.
1196 dsl_bookmark_destroy(nvlist_t
*bmarks
, nvlist_t
*errors
)
1199 dsl_bookmark_destroy_arg_t dbda
;
1200 nvpair_t
*pair
= nvlist_next_nvpair(bmarks
, NULL
);
1204 dbda
.dbda_bmarks
= bmarks
;
1205 dbda
.dbda_errors
= errors
;
1206 dbda
.dbda_success
= fnvlist_alloc();
1208 rv
= dsl_sync_task(nvpair_name(pair
), dsl_bookmark_destroy_check
,
1209 dsl_bookmark_destroy_sync
, &dbda
, fnvlist_num_pairs(bmarks
),
1210 ZFS_SPACE_CHECK_RESERVED
);
1211 fnvlist_free(dbda
.dbda_success
);
1215 /* Return B_TRUE if there are any long holds on this dataset. */
1217 dsl_redaction_list_long_held(redaction_list_t
*rl
)
1219 return (!zfs_refcount_is_zero(&rl
->rl_longholds
));
1223 dsl_redaction_list_long_hold(dsl_pool_t
*dp
, redaction_list_t
*rl
,
1226 ASSERT(dsl_pool_config_held(dp
));
1227 (void) zfs_refcount_add(&rl
->rl_longholds
, tag
);
1231 dsl_redaction_list_long_rele(redaction_list_t
*rl
, const void *tag
)
1233 (void) zfs_refcount_remove(&rl
->rl_longholds
, tag
);
1237 redaction_list_evict_sync(void *rlu
)
1239 redaction_list_t
*rl
= rlu
;
1240 zfs_refcount_destroy(&rl
->rl_longholds
);
1242 kmem_free(rl
, sizeof (redaction_list_t
));
1246 dsl_redaction_list_rele(redaction_list_t
*rl
, const void *tag
)
1248 if (rl
->rl_bonus
!= rl
->rl_dbuf
)
1249 dmu_buf_rele(rl
->rl_dbuf
, tag
);
1250 dmu_buf_rele(rl
->rl_bonus
, tag
);
1254 dsl_redaction_list_hold_obj(dsl_pool_t
*dp
, uint64_t rlobj
, const void *tag
,
1255 redaction_list_t
**rlp
)
1257 objset_t
*mos
= dp
->dp_meta_objset
;
1258 dmu_buf_t
*dbuf
, *spill_dbuf
;
1259 redaction_list_t
*rl
;
1262 ASSERT(dsl_pool_config_held(dp
));
1264 err
= dmu_bonus_hold(mos
, rlobj
, tag
, &dbuf
);
1268 rl
= dmu_buf_get_user(dbuf
);
1270 redaction_list_t
*winner
= NULL
;
1272 rl
= kmem_zalloc(sizeof (redaction_list_t
), KM_SLEEP
);
1273 rl
->rl_bonus
= dbuf
;
1274 if (dmu_spill_hold_existing(dbuf
, tag
, &spill_dbuf
) == 0) {
1275 rl
->rl_dbuf
= spill_dbuf
;
1279 rl
->rl_object
= rlobj
;
1280 rl
->rl_phys
= rl
->rl_dbuf
->db_data
;
1281 rl
->rl_mos
= dp
->dp_meta_objset
;
1282 zfs_refcount_create(&rl
->rl_longholds
);
1283 dmu_buf_init_user(&rl
->rl_dbu
, redaction_list_evict_sync
, NULL
,
1285 if ((winner
= dmu_buf_set_user_ie(dbuf
, &rl
->rl_dbu
)) != NULL
) {
1286 kmem_free(rl
, sizeof (*rl
));
1295 * Snapshot ds is being destroyed.
1297 * Adjust the "freed_before_next" of any bookmarks between this snap
1298 * and the previous snapshot, because their "next snapshot" is changing.
1300 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1301 * their HAS_SNAP flag (note: there can be at most one snapshot of
1302 * each filesystem at a given txg), and return B_TRUE. In this case
1303 * the caller can not remove the key in the deadlist at this TXG, because
1304 * the HAS_FBN bookmarks require the key be there.
1306 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1307 * snapshot's TXG. In this case the caller can remove the key in the
1308 * deadlist at this TXG.
1311 dsl_bookmark_ds_destroyed(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1313 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1315 dsl_dataset_t
*head
, *next
;
1316 VERIFY0(dsl_dataset_hold_obj(dp
,
1317 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &head
));
1318 VERIFY0(dsl_dataset_hold_obj(dp
,
1319 dsl_dataset_phys(ds
)->ds_next_snap_obj
, FTAG
, &next
));
1322 * Find the first bookmark that HAS_FBN at or after the
1323 * previous snapshot.
1325 dsl_bookmark_node_t search
= { 0 };
1327 search
.dbn_phys
.zbm_creation_txg
=
1328 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1329 search
.dbn_phys
.zbm_flags
= ZBM_FLAG_HAS_FBN
;
1331 * The empty-string name can't be in the AVL, and it compares
1332 * before any entries with this TXG.
1334 search
.dbn_name
= (char *)"";
1335 VERIFY3P(avl_find(&head
->ds_bookmarks
, &search
, &idx
), ==, NULL
);
1336 dsl_bookmark_node_t
*dbn
=
1337 avl_nearest(&head
->ds_bookmarks
, idx
, AVL_AFTER
);
1340 * Iterate over all bookmarks that are at or after the previous
1341 * snapshot, and before this (being deleted) snapshot. Adjust
1342 * their FBN based on their new next snapshot.
1344 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
<
1345 dsl_dataset_phys(ds
)->ds_creation_txg
;
1346 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1347 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
))
1350 * Increase our FBN by the amount of space that was live
1351 * (referenced) at the time of this bookmark (i.e.
1352 * birth <= zbm_creation_txg), and killed between this
1353 * (being deleted) snapshot and the next snapshot (i.e.
1354 * on the next snapshot's deadlist). (Space killed before
1355 * this are already on our FBN.)
1357 uint64_t referenced
, compressed
, uncompressed
;
1358 dsl_deadlist_space_range(&next
->ds_deadlist
,
1359 0, dbn
->dbn_phys
.zbm_creation_txg
,
1360 &referenced
, &compressed
, &uncompressed
);
1361 dbn
->dbn_phys
.zbm_referenced_freed_before_next_snap
+=
1363 dbn
->dbn_phys
.zbm_compressed_freed_before_next_snap
+=
1365 dbn
->dbn_phys
.zbm_uncompressed_freed_before_next_snap
+=
1367 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1368 dbn
->dbn_name
, sizeof (uint64_t),
1369 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1370 &dbn
->dbn_phys
, tx
));
1372 dsl_dataset_rele(next
, FTAG
);
1375 * There may be several bookmarks at this txg (the TXG of the
1376 * snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
1377 * flag on all of them, and return TRUE if there is at least 1
1378 * bookmark here with HAS_FBN (thus preventing the deadlist
1379 * key from being removed).
1381 boolean_t rv
= B_FALSE
;
1382 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
==
1383 dsl_dataset_phys(ds
)->ds_creation_txg
;
1384 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1385 if (!(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) {
1386 ASSERT(!(dbn
->dbn_phys
.zbm_flags
&
1387 ZBM_FLAG_SNAPSHOT_EXISTS
));
1390 ASSERT(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_SNAPSHOT_EXISTS
);
1391 dbn
->dbn_phys
.zbm_flags
&= ~ZBM_FLAG_SNAPSHOT_EXISTS
;
1392 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1393 dbn
->dbn_name
, sizeof (uint64_t),
1394 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1395 &dbn
->dbn_phys
, tx
));
1398 dsl_dataset_rele(head
, FTAG
);
1403 * A snapshot is being created of this (head) dataset.
1405 * We don't keep keys in the deadlist for the most recent snapshot, or any
1406 * bookmarks at or after it, because there can't be any blocks on the
1407 * deadlist in this range. Now that the most recent snapshot is after
1408 * all bookmarks, we need to add these keys. Note that the caller always
1409 * adds a key at the previous snapshot, so we only add keys for bookmarks
1413 dsl_bookmark_snapshotted(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1415 uint64_t last_key_added
= UINT64_MAX
;
1416 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1417 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>
1418 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1419 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1420 uint64_t creation_txg
= dbn
->dbn_phys
.zbm_creation_txg
;
1421 ASSERT3U(creation_txg
, <=, last_key_added
);
1423 * Note, there may be multiple bookmarks at this TXG,
1424 * and we only want to add the key for this TXG once.
1425 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1426 * these bookmarks in sequence.
1428 if ((dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
) &&
1429 creation_txg
!= last_key_added
) {
1430 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1432 last_key_added
= creation_txg
;
1438 * The next snapshot of the origin dataset has changed, due to
1439 * promote or clone swap. If there are any bookmarks at this dataset,
1440 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1441 * The head dataset has the relevant bookmarks in ds_bookmarks.
1444 dsl_bookmark_next_changed(dsl_dataset_t
*head
, dsl_dataset_t
*origin
,
1447 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1450 * Find the first bookmark that HAS_FBN at the origin snapshot.
1452 dsl_bookmark_node_t search
= { 0 };
1454 search
.dbn_phys
.zbm_creation_txg
=
1455 dsl_dataset_phys(origin
)->ds_creation_txg
;
1456 search
.dbn_phys
.zbm_flags
= ZBM_FLAG_HAS_FBN
;
1458 * The empty-string name can't be in the AVL, and it compares
1459 * before any entries with this TXG.
1461 search
.dbn_name
= (char *)"";
1462 VERIFY3P(avl_find(&head
->ds_bookmarks
, &search
, &idx
), ==, NULL
);
1463 dsl_bookmark_node_t
*dbn
=
1464 avl_nearest(&head
->ds_bookmarks
, idx
, AVL_AFTER
);
1467 * Iterate over all bookmarks that are at the origin txg.
1468 * Adjust their FBN based on their new next snapshot.
1470 for (; dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
==
1471 dsl_dataset_phys(origin
)->ds_creation_txg
&&
1472 (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
);
1473 dbn
= AVL_NEXT(&head
->ds_bookmarks
, dbn
)) {
1476 * Bookmark is at the origin, therefore its
1477 * "next dataset" is changing, so we need
1478 * to reset its FBN by recomputing it in
1479 * dsl_bookmark_set_phys().
1481 ASSERT3U(dbn
->dbn_phys
.zbm_guid
, ==,
1482 dsl_dataset_phys(origin
)->ds_guid
);
1483 ASSERT3U(dbn
->dbn_phys
.zbm_referenced_bytes_refd
, ==,
1484 dsl_dataset_phys(origin
)->ds_referenced_bytes
);
1485 ASSERT(dbn
->dbn_phys
.zbm_flags
&
1486 ZBM_FLAG_SNAPSHOT_EXISTS
);
1488 * Save and restore the zbm_redaction_obj, which
1489 * is zeroed by dsl_bookmark_set_phys().
1491 uint64_t redaction_obj
=
1492 dbn
->dbn_phys
.zbm_redaction_obj
;
1493 dsl_bookmark_set_phys(&dbn
->dbn_phys
, origin
);
1494 dbn
->dbn_phys
.zbm_redaction_obj
= redaction_obj
;
1496 VERIFY0(zap_update(dp
->dp_meta_objset
, head
->ds_bookmarks_obj
,
1497 dbn
->dbn_name
, sizeof (uint64_t),
1498 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1499 &dbn
->dbn_phys
, tx
));
1504 * This block is no longer referenced by this (head) dataset.
1506 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1507 * is the head dataset.
1510 dsl_bookmark_block_killed(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1515 * Iterate over bookmarks whose "next" is the head dataset.
1517 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1518 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>=
1519 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1520 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1522 * If the block was live (referenced) at the time of this
1523 * bookmark, add its space to the bookmark's FBN.
1525 if (BP_GET_LOGICAL_BIRTH(bp
) <=
1526 dbn
->dbn_phys
.zbm_creation_txg
&&
1527 (dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
)) {
1528 mutex_enter(&dbn
->dbn_lock
);
1529 dbn
->dbn_phys
.zbm_referenced_freed_before_next_snap
+=
1530 bp_get_dsize_sync(dsl_dataset_get_spa(ds
), bp
);
1531 dbn
->dbn_phys
.zbm_compressed_freed_before_next_snap
+=
1533 dbn
->dbn_phys
.zbm_uncompressed_freed_before_next_snap
+=
1536 * Changing the ZAP object here would be too
1537 * expensive. Also, we may be called from the zio
1538 * interrupt thread, which can't block on i/o.
1539 * Therefore, we mark this bookmark as dirty and
1540 * modify the ZAP once per txg, in
1541 * dsl_bookmark_sync_done().
1543 dbn
->dbn_dirty
= B_TRUE
;
1544 mutex_exit(&dbn
->dbn_lock
);
1550 dsl_bookmark_sync_done(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1552 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1554 if (dsl_dataset_is_snapshot(ds
))
1558 * We only dirty bookmarks that are at or after the most recent
1559 * snapshot. We can't create snapshots between
1560 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1561 * don't need to look at any bookmarks before ds_prev_snap_txg.
1563 for (dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1564 dbn
!= NULL
&& dbn
->dbn_phys
.zbm_creation_txg
>=
1565 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1566 dbn
= AVL_PREV(&ds
->ds_bookmarks
, dbn
)) {
1567 if (dbn
->dbn_dirty
) {
1569 * We only dirty nodes with HAS_FBN, therefore
1570 * we can always use the current bookmark struct size.
1572 ASSERT(dbn
->dbn_phys
.zbm_flags
& ZBM_FLAG_HAS_FBN
);
1573 VERIFY0(zap_update(dp
->dp_meta_objset
,
1574 ds
->ds_bookmarks_obj
,
1575 dbn
->dbn_name
, sizeof (uint64_t),
1576 sizeof (zfs_bookmark_phys_t
) / sizeof (uint64_t),
1577 &dbn
->dbn_phys
, tx
));
1578 dbn
->dbn_dirty
= B_FALSE
;
1582 for (dsl_bookmark_node_t
*dbn
= avl_first(&ds
->ds_bookmarks
);
1583 dbn
!= NULL
; dbn
= AVL_NEXT(&ds
->ds_bookmarks
, dbn
)) {
1584 ASSERT(!dbn
->dbn_dirty
);
1590 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1593 dsl_bookmark_latest_txg(dsl_dataset_t
*ds
)
1595 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1596 dsl_bookmark_node_t
*dbn
= avl_last(&ds
->ds_bookmarks
);
1599 return (dbn
->dbn_phys
.zbm_creation_txg
);
1603 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1604 * redact_block_phys_t is before the bookmark, return -1. If the first block in
1605 * the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
1606 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1609 redact_block_zb_compare(redact_block_phys_t
*first
,
1610 zbookmark_phys_t
*second
)
1613 * If the block_phys is for a previous object, or the last block in the
1614 * block_phys is strictly before the block in the bookmark, the
1615 * block_phys is earlier.
1617 if (first
->rbp_object
< second
->zb_object
||
1618 (first
->rbp_object
== second
->zb_object
&&
1619 first
->rbp_blkid
+ (redact_block_get_count(first
) - 1) <
1620 second
->zb_blkid
)) {
1625 * If the bookmark is for a previous object, or the block in the
1626 * bookmark is strictly before the first block in the block_phys, the
1627 * bookmark is earlier.
1629 if (first
->rbp_object
> second
->zb_object
||
1630 (first
->rbp_object
== second
->zb_object
&&
1631 first
->rbp_blkid
> second
->zb_blkid
)) {
1639 * Traverse the redaction list in the provided object, and call the callback for
1640 * each entry we find. Don't call the callback for any records before resume.
1643 dsl_redaction_list_traverse(redaction_list_t
*rl
, zbookmark_phys_t
*resume
,
1644 rl_traverse_callback_t cb
, void *arg
)
1646 objset_t
*mos
= rl
->rl_mos
;
1649 if (rl
->rl_phys
->rlp_last_object
!= UINT64_MAX
||
1650 rl
->rl_phys
->rlp_last_blkid
!= UINT64_MAX
) {
1652 * When we finish a send, we update the last object and offset
1653 * to UINT64_MAX. If a send fails partway through, the last
1654 * object and offset will have some other value, indicating how
1655 * far the send got. The redaction list must be complete before
1656 * it can be traversed, so return EINVAL if the last object and
1657 * blkid are not set to UINT64_MAX.
1659 return (SET_ERROR(EINVAL
));
1663 * This allows us to skip the binary search and resume checking logic
1664 * below, if we're not resuming a redacted send.
1666 if (ZB_IS_ZERO(resume
))
1670 * Binary search for the point to resume from.
1672 uint64_t maxidx
= rl
->rl_phys
->rlp_num_entries
- 1;
1673 uint64_t minidx
= 0;
1674 while (resume
!= NULL
&& maxidx
> minidx
) {
1675 redact_block_phys_t rbp
= { 0 };
1676 ASSERT3U(maxidx
, >, minidx
);
1677 uint64_t mididx
= minidx
+ ((maxidx
- minidx
) / 2);
1678 err
= dmu_read(mos
, rl
->rl_object
, mididx
* sizeof (rbp
),
1679 sizeof (rbp
), &rbp
, DMU_READ_NO_PREFETCH
);
1683 int cmp
= redact_block_zb_compare(&rbp
, resume
);
1688 } else if (cmp
> 0) {
1690 (mididx
== minidx
? minidx
: mididx
- 1);
1692 minidx
= mididx
+ 1;
1696 unsigned int bufsize
= SPA_OLD_MAXBLOCKSIZE
;
1697 redact_block_phys_t
*buf
= zio_data_buf_alloc(bufsize
);
1699 unsigned int entries_per_buf
= bufsize
/ sizeof (redact_block_phys_t
);
1700 uint64_t start_block
= minidx
/ entries_per_buf
;
1701 err
= dmu_read(mos
, rl
->rl_object
, start_block
* bufsize
, bufsize
, buf
,
1704 for (uint64_t curidx
= minidx
;
1705 err
== 0 && curidx
< rl
->rl_phys
->rlp_num_entries
;
1708 * We read in the redaction list one block at a time. Once we
1709 * finish with all the entries in a given block, we read in a
1710 * new one. The predictive prefetcher will take care of any
1711 * prefetching, and this code shouldn't be the bottleneck, so we
1712 * don't need to do manual prefetching.
1714 if (curidx
% entries_per_buf
== 0) {
1715 err
= dmu_read(mos
, rl
->rl_object
, curidx
*
1716 sizeof (*buf
), bufsize
, buf
,
1721 redact_block_phys_t
*rb
= &buf
[curidx
% entries_per_buf
];
1723 * If resume is non-null, we should either not send the data, or
1724 * null out resume so we don't have to keep doing these
1727 if (resume
!= NULL
) {
1729 * It is possible that after the binary search we got
1730 * a record before the resume point. There's two cases
1731 * where this can occur. If the record is the last
1732 * redaction record, and the resume point is after the
1733 * end of the redacted data, curidx will be the last
1734 * redaction record. In that case, the loop will end
1735 * after this iteration. The second case is if the
1736 * resume point is between two redaction records, the
1737 * binary search can return either the record before
1738 * or after the resume point. In that case, the next
1739 * iteration will be greater than the resume point.
1741 if (redact_block_zb_compare(rb
, resume
) < 0) {
1742 ASSERT3U(curidx
, ==, minidx
);
1746 * If the place to resume is in the middle of
1747 * the range described by this
1748 * redact_block_phys, then modify the
1749 * redact_block_phys in memory so we generate
1750 * the right records.
1752 if (resume
->zb_object
== rb
->rbp_object
&&
1753 resume
->zb_blkid
> rb
->rbp_blkid
) {
1754 uint64_t diff
= resume
->zb_blkid
-
1756 rb
->rbp_blkid
= resume
->zb_blkid
;
1757 redact_block_set_count(rb
,
1758 redact_block_get_count(rb
) - diff
);
1764 if (cb(rb
, arg
) != 0) {
1770 zio_data_buf_free(buf
, bufsize
);