4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2019, 2024, Klara, Inc.
29 * Copyright (c) 2019, Allan Jude
30 * Copyright (c) 2019 Datto Inc.
31 * Copyright (c) 2022 Axcient.
35 #include <sys/spa_impl.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/dmu_send.h>
39 #include <sys/dmu_recv.h>
40 #include <sys/dmu_tx.h>
42 #include <sys/dnode.h>
43 #include <sys/zfs_context.h>
44 #include <sys/dmu_objset.h>
45 #include <sys/dmu_traverse.h>
46 #include <sys/dsl_dataset.h>
47 #include <sys/dsl_dir.h>
48 #include <sys/dsl_prop.h>
49 #include <sys/dsl_pool.h>
50 #include <sys/dsl_synctask.h>
51 #include <sys/zfs_ioctl.h>
54 #include <sys/zio_checksum.h>
55 #include <sys/zfs_znode.h>
56 #include <zfs_fletcher.h>
59 #include <sys/zfs_onexit.h>
60 #include <sys/dsl_destroy.h>
61 #include <sys/blkptr.h>
62 #include <sys/dsl_bookmark.h>
63 #include <sys/zfeature.h>
64 #include <sys/bqueue.h>
65 #include <sys/objlist.h>
67 #include <sys/zfs_vfsops.h>
69 #include <sys/zfs_file.h>
71 static uint_t zfs_recv_queue_length
= SPA_MAXBLOCKSIZE
;
72 static uint_t zfs_recv_queue_ff
= 20;
73 static uint_t zfs_recv_write_batch_size
= 1024 * 1024;
74 static int zfs_recv_best_effort_corrective
= 0;
76 static const void *const dmu_recv_tag
= "dmu_recv_tag";
77 const char *const recv_clone_name
= "%recv";
85 static int receive_read_payload_and_next_header(dmu_recv_cookie_t
*ra
, int len
,
88 struct receive_record_arg
{
89 dmu_replay_record_t header
;
90 void *payload
; /* Pointer to a buffer containing the payload */
92 * If the record is a WRITE or SPILL, pointer to the abd containing the
97 uint64_t bytes_read
; /* bytes read from stream when record created */
98 boolean_t eos_marker
; /* Marks the end of the stream */
102 struct receive_writer_arg
{
108 * These three members are used to signal to the main thread when
119 boolean_t raw
; /* DMU_BACKUP_FEATURE_RAW set */
120 boolean_t spill
; /* DRR_FLAG_SPILL_BLOCK set */
121 boolean_t full
; /* this is a full send stream */
122 uint64_t last_object
;
123 uint64_t last_offset
;
124 uint64_t max_object
; /* highest object ID referenced in stream */
125 uint64_t bytes_read
; /* bytes read when current record created */
129 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
130 boolean_t or_crypt_params_present
;
131 uint64_t or_firstobj
;
132 uint64_t or_numslots
;
133 uint8_t or_salt
[ZIO_DATA_SALT_LEN
];
134 uint8_t or_iv
[ZIO_DATA_IV_LEN
];
135 uint8_t or_mac
[ZIO_DATA_MAC_LEN
];
136 boolean_t or_byteorder
;
139 /* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */
140 or_need_sync_t or_need_sync
;
143 typedef struct dmu_recv_begin_arg
{
144 const char *drba_origin
;
145 dmu_recv_cookie_t
*drba_cookie
;
148 dsl_crypto_params_t
*drba_dcp
;
149 } dmu_recv_begin_arg_t
;
152 byteswap_record(dmu_replay_record_t
*drr
)
154 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
155 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
156 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
157 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
159 switch (drr
->drr_type
) {
161 DO64(drr_begin
.drr_magic
);
162 DO64(drr_begin
.drr_versioninfo
);
163 DO64(drr_begin
.drr_creation_time
);
164 DO32(drr_begin
.drr_type
);
165 DO32(drr_begin
.drr_flags
);
166 DO64(drr_begin
.drr_toguid
);
167 DO64(drr_begin
.drr_fromguid
);
170 DO64(drr_object
.drr_object
);
171 DO32(drr_object
.drr_type
);
172 DO32(drr_object
.drr_bonustype
);
173 DO32(drr_object
.drr_blksz
);
174 DO32(drr_object
.drr_bonuslen
);
175 DO32(drr_object
.drr_raw_bonuslen
);
176 DO64(drr_object
.drr_toguid
);
177 DO64(drr_object
.drr_maxblkid
);
179 case DRR_FREEOBJECTS
:
180 DO64(drr_freeobjects
.drr_firstobj
);
181 DO64(drr_freeobjects
.drr_numobjs
);
182 DO64(drr_freeobjects
.drr_toguid
);
185 DO64(drr_write
.drr_object
);
186 DO32(drr_write
.drr_type
);
187 DO64(drr_write
.drr_offset
);
188 DO64(drr_write
.drr_logical_size
);
189 DO64(drr_write
.drr_toguid
);
190 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
191 DO64(drr_write
.drr_key
.ddk_prop
);
192 DO64(drr_write
.drr_compressed_size
);
194 case DRR_WRITE_EMBEDDED
:
195 DO64(drr_write_embedded
.drr_object
);
196 DO64(drr_write_embedded
.drr_offset
);
197 DO64(drr_write_embedded
.drr_length
);
198 DO64(drr_write_embedded
.drr_toguid
);
199 DO32(drr_write_embedded
.drr_lsize
);
200 DO32(drr_write_embedded
.drr_psize
);
203 DO64(drr_free
.drr_object
);
204 DO64(drr_free
.drr_offset
);
205 DO64(drr_free
.drr_length
);
206 DO64(drr_free
.drr_toguid
);
209 DO64(drr_spill
.drr_object
);
210 DO64(drr_spill
.drr_length
);
211 DO64(drr_spill
.drr_toguid
);
212 DO64(drr_spill
.drr_compressed_size
);
213 DO32(drr_spill
.drr_type
);
215 case DRR_OBJECT_RANGE
:
216 DO64(drr_object_range
.drr_firstobj
);
217 DO64(drr_object_range
.drr_numslots
);
218 DO64(drr_object_range
.drr_toguid
);
221 DO64(drr_redact
.drr_object
);
222 DO64(drr_redact
.drr_offset
);
223 DO64(drr_redact
.drr_length
);
224 DO64(drr_redact
.drr_toguid
);
227 DO64(drr_end
.drr_toguid
);
228 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
234 if (drr
->drr_type
!= DRR_BEGIN
) {
235 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
243 redact_snaps_contains(uint64_t *snaps
, uint64_t num_snaps
, uint64_t guid
)
245 for (int i
= 0; i
< num_snaps
; i
++) {
246 if (snaps
[i
] == guid
)
253 * Check that the new stream we're trying to receive is redacted with respect to
254 * a subset of the snapshots that the origin was redacted with respect to. For
255 * the reasons behind this, see the man page on redacted zfs sends and receives.
258 compatible_redact_snaps(uint64_t *origin_snaps
, uint64_t origin_num_snaps
,
259 uint64_t *redact_snaps
, uint64_t num_redact_snaps
)
262 * Short circuit the comparison; if we are redacted with respect to
263 * more snapshots than the origin, we can't be redacted with respect
266 if (num_redact_snaps
> origin_num_snaps
) {
270 for (int i
= 0; i
< num_redact_snaps
; i
++) {
271 if (!redact_snaps_contains(origin_snaps
, origin_num_snaps
,
280 redact_check(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*origin
)
282 uint64_t *origin_snaps
;
283 uint64_t origin_num_snaps
;
284 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
285 struct drr_begin
*drrb
= drc
->drc_drrb
;
286 int featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
288 boolean_t ret
= B_TRUE
;
289 uint64_t *redact_snaps
;
290 uint_t numredactsnaps
;
293 * If this is a full send stream, we're safe no matter what.
295 if (drrb
->drr_fromguid
== 0)
298 VERIFY(dsl_dataset_get_uint64_array_feature(origin
,
299 SPA_FEATURE_REDACTED_DATASETS
, &origin_num_snaps
, &origin_snaps
));
301 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
302 BEGINNV_REDACT_FROM_SNAPS
, &redact_snaps
, &numredactsnaps
) ==
305 * If the send stream was sent from the redaction bookmark or
306 * the redacted version of the dataset, then we're safe. Verify
307 * that this is from the a compatible redaction bookmark or
310 if (!compatible_redact_snaps(origin_snaps
, origin_num_snaps
,
311 redact_snaps
, numredactsnaps
)) {
314 } else if (featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
316 * If the stream is redacted, it must be redacted with respect
317 * to a subset of what the origin is redacted with respect to.
318 * See case number 2 in the zfs man page section on redacted zfs
321 err
= nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
322 BEGINNV_REDACT_SNAPS
, &redact_snaps
, &numredactsnaps
);
324 if (err
!= 0 || !compatible_redact_snaps(origin_snaps
,
325 origin_num_snaps
, redact_snaps
, numredactsnaps
)) {
328 } else if (!redact_snaps_contains(origin_snaps
, origin_num_snaps
,
331 * If the stream isn't redacted but the origin is, this must be
332 * one of the snapshots the origin is redacted with respect to.
333 * See case number 1 in the zfs man page section on redacted zfs
345 * If we previously received a stream with --large-block, we don't support
346 * receiving an incremental on top of it without --large-block. This avoids
347 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
351 recv_check_large_blocks(dsl_dataset_t
*ds
, uint64_t featureflags
)
353 if (dsl_dataset_feature_is_active(ds
, SPA_FEATURE_LARGE_BLOCKS
) &&
354 !(featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
))
355 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH
));
360 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
361 uint64_t fromguid
, uint64_t featureflags
)
367 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
368 boolean_t encrypted
= ds
->ds_dir
->dd_crypto_obj
!= 0;
369 boolean_t raw
= (featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0;
370 boolean_t embed
= (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) != 0;
372 /* Temporary clone name must not exist. */
373 error
= zap_lookup(dp
->dp_meta_objset
,
374 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
377 return (error
== 0 ? SET_ERROR(EBUSY
) : error
);
379 /* Resume state must not be set. */
380 if (dsl_dataset_has_resume_receive_state(ds
))
381 return (SET_ERROR(EBUSY
));
383 /* New snapshot name must not exist if we're not healing it. */
384 error
= zap_lookup(dp
->dp_meta_objset
,
385 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
386 drba
->drba_cookie
->drc_tosnap
, 8, 1, &obj
);
387 if (drba
->drba_cookie
->drc_heal
) {
390 } else if (error
!= ENOENT
) {
391 return (error
== 0 ? SET_ERROR(EEXIST
) : error
);
394 /* Must not have children if receiving a ZVOL. */
395 error
= zap_count(dp
->dp_meta_objset
,
396 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &children
);
399 if (drba
->drba_cookie
->drc_drrb
->drr_type
!= DMU_OST_ZFS
&&
401 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
404 * Check snapshot limit before receiving. We'll recheck again at the
405 * end, but might as well abort before receiving if we're already over
408 * Note that we do not check the file system limit with
409 * dsl_dir_fscount_check because the temporary %clones don't count
410 * against that limit.
412 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
413 NULL
, drba
->drba_cred
, drba
->drba_proc
);
417 if (drba
->drba_cookie
->drc_heal
) {
418 /* Encryption is incompatible with embedded data. */
419 if (encrypted
&& embed
)
420 return (SET_ERROR(EINVAL
));
422 /* Healing is not supported when in 'force' mode. */
423 if (drba
->drba_cookie
->drc_force
)
424 return (SET_ERROR(EINVAL
));
426 /* Must have keys loaded if doing encrypted non-raw recv. */
427 if (encrypted
&& !raw
) {
428 if (spa_keystore_lookup_key(dp
->dp_spa
, ds
->ds_object
,
430 return (SET_ERROR(EACCES
));
433 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &snap
);
438 * When not doing best effort corrective recv healing can only
439 * be done if the send stream is for the same snapshot as the
440 * one we are trying to heal.
442 if (zfs_recv_best_effort_corrective
== 0 &&
443 drba
->drba_cookie
->drc_drrb
->drr_toguid
!=
444 dsl_dataset_phys(snap
)->ds_guid
) {
445 dsl_dataset_rele(snap
, FTAG
);
446 return (SET_ERROR(ENOTSUP
));
448 dsl_dataset_rele(snap
, FTAG
);
449 } else if (fromguid
!= 0) {
450 /* Sanity check the incremental recv */
451 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
453 /* Can't perform a raw receive on top of a non-raw receive */
454 if (!encrypted
&& raw
)
455 return (SET_ERROR(EINVAL
));
457 /* Encryption is incompatible with embedded data */
458 if (encrypted
&& embed
)
459 return (SET_ERROR(EINVAL
));
461 /* Find snapshot in this dir that matches fromguid. */
463 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
466 return (SET_ERROR(ENODEV
));
467 if (snap
->ds_dir
!= ds
->ds_dir
) {
468 dsl_dataset_rele(snap
, FTAG
);
469 return (SET_ERROR(ENODEV
));
471 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
473 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
474 dsl_dataset_rele(snap
, FTAG
);
477 return (SET_ERROR(ENODEV
));
479 if (drba
->drba_cookie
->drc_force
) {
480 drba
->drba_cookie
->drc_fromsnapobj
= obj
;
483 * If we are not forcing, there must be no
484 * changes since fromsnap. Raw sends have an
485 * additional constraint that requires that
486 * no "noop" snapshots exist between fromsnap
487 * and tosnap for the IVset checking code to
490 if (dsl_dataset_modified_since_snap(ds
, snap
) ||
492 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!=
494 dsl_dataset_rele(snap
, FTAG
);
495 return (SET_ERROR(ETXTBSY
));
497 drba
->drba_cookie
->drc_fromsnapobj
=
498 ds
->ds_prev
->ds_object
;
501 if (dsl_dataset_feature_is_active(snap
,
502 SPA_FEATURE_REDACTED_DATASETS
) && !redact_check(drba
,
504 dsl_dataset_rele(snap
, FTAG
);
505 return (SET_ERROR(EINVAL
));
508 error
= recv_check_large_blocks(snap
, featureflags
);
510 dsl_dataset_rele(snap
, FTAG
);
514 dsl_dataset_rele(snap
, FTAG
);
516 /* If full and not healing then must be forced. */
517 if (!drba
->drba_cookie
->drc_force
)
518 return (SET_ERROR(EEXIST
));
521 * We don't support using zfs recv -F to blow away
522 * encrypted filesystems. This would require the
523 * dsl dir to point to the old encryption key and
524 * the new one at the same time during the receive.
526 if ((!encrypted
&& raw
) || encrypted
)
527 return (SET_ERROR(EINVAL
));
530 * Perform the same encryption checks we would if
531 * we were creating a new dataset from scratch.
534 boolean_t will_encrypt
;
536 error
= dmu_objset_create_crypt_check(
537 ds
->ds_dir
->dd_parent
, drba
->drba_dcp
,
542 if (will_encrypt
&& embed
)
543 return (SET_ERROR(EINVAL
));
551 * Check that any feature flags used in the data stream we're receiving are
552 * supported by the pool we are receiving into.
554 * Note that some of the features we explicitly check here have additional
555 * (implicit) features they depend on, but those dependencies are enforced
556 * through the zfeature_register() calls declaring the features that we
560 recv_begin_check_feature_flags_impl(uint64_t featureflags
, spa_t
*spa
)
563 * Check if there are any unsupported feature flags.
565 if (!DMU_STREAM_SUPPORTED(featureflags
)) {
566 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE
));
569 /* Verify pool version supports SA if SA_SPILL feature set */
570 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
571 spa_version(spa
) < SPA_VERSION_SA
)
572 return (SET_ERROR(ENOTSUP
));
575 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
576 * and large_dnodes in the stream can only be used if those pool
577 * features are enabled because we don't attempt to decompress /
578 * un-embed / un-mooch / split up the blocks / dnodes during the
581 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
582 !spa_feature_is_enabled(spa
, SPA_FEATURE_LZ4_COMPRESS
))
583 return (SET_ERROR(ENOTSUP
));
584 if ((featureflags
& DMU_BACKUP_FEATURE_ZSTD
) &&
585 !spa_feature_is_enabled(spa
, SPA_FEATURE_ZSTD_COMPRESS
))
586 return (SET_ERROR(ENOTSUP
));
587 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
588 !spa_feature_is_enabled(spa
, SPA_FEATURE_EMBEDDED_DATA
))
589 return (SET_ERROR(ENOTSUP
));
590 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
591 !spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_BLOCKS
))
592 return (SET_ERROR(ENOTSUP
));
593 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
594 !spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_DNODE
))
595 return (SET_ERROR(ENOTSUP
));
596 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_MICROZAP
) &&
597 !spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_MICROZAP
))
598 return (SET_ERROR(ENOTSUP
));
601 * Receiving redacted streams requires that redacted datasets are
604 if ((featureflags
& DMU_BACKUP_FEATURE_REDACTED
) &&
605 !spa_feature_is_enabled(spa
, SPA_FEATURE_REDACTED_DATASETS
))
606 return (SET_ERROR(ENOTSUP
));
609 * If the LONGNAME is not enabled on the target, fail that request.
611 if ((featureflags
& DMU_BACKUP_FEATURE_LONGNAME
) &&
612 !spa_feature_is_enabled(spa
, SPA_FEATURE_LONGNAME
))
613 return (SET_ERROR(ENOTSUP
));
619 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
621 dmu_recv_begin_arg_t
*drba
= arg
;
622 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
623 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
624 uint64_t fromguid
= drrb
->drr_fromguid
;
625 int flags
= drrb
->drr_flags
;
626 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
628 uint64_t featureflags
= drba
->drba_cookie
->drc_featureflags
;
630 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
632 /* already checked */
633 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
634 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
636 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
637 DMU_COMPOUNDSTREAM
||
638 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
639 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
640 return (SET_ERROR(EINVAL
));
642 error
= recv_begin_check_feature_flags_impl(featureflags
, dp
->dp_spa
);
646 /* Resumable receives require extensible datasets */
647 if (drba
->drba_cookie
->drc_resumable
&&
648 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
649 return (SET_ERROR(ENOTSUP
));
651 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
652 /* raw receives require the encryption feature */
653 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ENCRYPTION
))
654 return (SET_ERROR(ENOTSUP
));
656 /* embedded data is incompatible with encryption and raw recv */
657 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
658 return (SET_ERROR(EINVAL
));
660 /* raw receives require spill block allocation flag */
661 if (!(flags
& DRR_FLAG_SPILL_BLOCK
))
662 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
665 * We support unencrypted datasets below encrypted ones now,
666 * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
667 * with a dataset we may encrypt.
669 if (drba
->drba_dcp
== NULL
||
670 drba
->drba_dcp
->cp_crypt
!= ZIO_CRYPT_OFF
) {
671 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
675 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
677 /* target fs already exists; recv into temp clone */
679 /* Can't recv a clone into an existing fs */
680 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
681 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
682 return (SET_ERROR(EINVAL
));
685 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
,
687 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
688 } else if (error
== ENOENT
) {
689 /* target fs does not exist; must be a full backup or clone */
690 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
693 /* healing recv must be done "into" an existing snapshot */
694 if (drba
->drba_cookie
->drc_heal
== B_TRUE
)
695 return (SET_ERROR(ENOTSUP
));
698 * If it's a non-clone incremental, we are missing the
699 * target fs, so fail the recv.
701 if (fromguid
!= 0 && !((flags
& DRR_FLAG_CLONE
) ||
703 return (SET_ERROR(ENOENT
));
706 * If we're receiving a full send as a clone, and it doesn't
707 * contain all the necessary free records and freeobject
708 * records, reject it.
710 if (fromguid
== 0 && drba
->drba_origin
!= NULL
&&
711 !(flags
& DRR_FLAG_FREERECORDS
))
712 return (SET_ERROR(EINVAL
));
714 /* Open the parent of tofs */
715 ASSERT3U(strlen(tofs
), <, sizeof (buf
));
716 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
717 error
= dsl_dataset_hold(dp
, buf
, FTAG
, &ds
);
721 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0 &&
722 drba
->drba_origin
== NULL
) {
723 boolean_t will_encrypt
;
726 * Check that we aren't breaking any encryption rules
727 * and that we have all the parameters we need to
728 * create an encrypted dataset if necessary. If we are
729 * making an encrypted dataset the stream can't have
732 error
= dmu_objset_create_crypt_check(ds
->ds_dir
,
733 drba
->drba_dcp
, &will_encrypt
);
735 dsl_dataset_rele(ds
, FTAG
);
740 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
741 dsl_dataset_rele(ds
, FTAG
);
742 return (SET_ERROR(EINVAL
));
747 * Check filesystem and snapshot limits before receiving. We'll
748 * recheck snapshot limits again at the end (we create the
749 * filesystems and increment those counts during begin_sync).
751 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
752 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
753 drba
->drba_cred
, drba
->drba_proc
);
755 dsl_dataset_rele(ds
, FTAG
);
759 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
760 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
761 drba
->drba_cred
, drba
->drba_proc
);
763 dsl_dataset_rele(ds
, FTAG
);
767 /* can't recv below anything but filesystems (eg. no ZVOLs) */
768 error
= dmu_objset_from_ds(ds
, &os
);
770 dsl_dataset_rele(ds
, FTAG
);
773 if (dmu_objset_type(os
) != DMU_OST_ZFS
) {
774 dsl_dataset_rele(ds
, FTAG
);
775 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
778 if (drba
->drba_origin
!= NULL
) {
779 dsl_dataset_t
*origin
;
780 error
= dsl_dataset_hold_flags(dp
, drba
->drba_origin
,
781 dsflags
, FTAG
, &origin
);
783 dsl_dataset_rele(ds
, FTAG
);
786 if (!origin
->ds_is_snapshot
) {
787 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
788 dsl_dataset_rele(ds
, FTAG
);
789 return (SET_ERROR(EINVAL
));
791 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
793 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
794 dsl_dataset_rele(ds
, FTAG
);
795 return (SET_ERROR(ENODEV
));
798 if (origin
->ds_dir
->dd_crypto_obj
!= 0 &&
799 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
800 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
801 dsl_dataset_rele(ds
, FTAG
);
802 return (SET_ERROR(EINVAL
));
806 * If the origin is redacted we need to verify that this
807 * send stream can safely be received on top of the
810 if (dsl_dataset_feature_is_active(origin
,
811 SPA_FEATURE_REDACTED_DATASETS
)) {
812 if (!redact_check(drba
, origin
)) {
813 dsl_dataset_rele_flags(origin
, dsflags
,
815 dsl_dataset_rele_flags(ds
, dsflags
,
817 return (SET_ERROR(EINVAL
));
821 error
= recv_check_large_blocks(ds
, featureflags
);
823 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
824 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
828 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
831 dsl_dataset_rele(ds
, FTAG
);
838 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
840 dmu_recv_begin_arg_t
*drba
= arg
;
841 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
842 objset_t
*mos
= dp
->dp_meta_objset
;
843 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
844 struct drr_begin
*drrb
= drc
->drc_drrb
;
845 const char *tofs
= drc
->drc_tofs
;
846 uint64_t featureflags
= drc
->drc_featureflags
;
847 dsl_dataset_t
*ds
, *newds
;
850 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
852 uint64_t crflags
= 0;
853 dsl_crypto_params_t dummy_dcp
= { 0 };
854 dsl_crypto_params_t
*dcp
= drba
->drba_dcp
;
856 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
857 crflags
|= DS_FLAG_CI_DATASET
;
859 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0)
860 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
863 * Raw, non-incremental recvs always use a dummy dcp with
864 * the raw cmd set. Raw incremental recvs do not use a dcp
865 * since the encryption parameters are already set in stone.
867 if (dcp
== NULL
&& drrb
->drr_fromguid
== 0 &&
868 drba
->drba_origin
== NULL
) {
869 ASSERT3P(dcp
, ==, NULL
);
872 if (featureflags
& DMU_BACKUP_FEATURE_RAW
)
873 dcp
->cp_cmd
= DCP_CMD_RAW_RECV
;
876 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
878 /* Create temporary clone unless we're doing corrective recv */
879 dsl_dataset_t
*snap
= NULL
;
881 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0) {
882 VERIFY0(dsl_dataset_hold_obj(dp
,
883 drba
->drba_cookie
->drc_fromsnapobj
, FTAG
, &snap
));
884 ASSERT3P(dcp
, ==, NULL
);
887 /* When healing we want to use the provided snapshot */
888 VERIFY0(dsl_dataset_snap_lookup(ds
, drc
->drc_tosnap
,
891 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
,
892 recv_clone_name
, snap
, crflags
, drba
->drba_cred
,
895 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0)
896 dsl_dataset_rele(snap
, FTAG
);
897 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
901 dsl_dataset_t
*origin
= NULL
;
903 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
905 if (drba
->drba_origin
!= NULL
) {
906 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
908 ASSERT3P(dcp
, ==, NULL
);
911 /* Create new dataset. */
912 dsobj
= dsl_dataset_create_sync(dd
, strrchr(tofs
, '/') + 1,
913 origin
, crflags
, drba
->drba_cred
, dcp
, tx
);
915 dsl_dataset_rele(origin
, FTAG
);
916 dsl_dir_rele(dd
, FTAG
);
917 drc
->drc_newfs
= B_TRUE
;
919 VERIFY0(dsl_dataset_own_obj_force(dp
, dsobj
, dsflags
, dmu_recv_tag
,
921 if (dsl_dataset_feature_is_active(newds
,
922 SPA_FEATURE_REDACTED_DATASETS
)) {
924 * If the origin dataset is redacted, the child will be redacted
925 * when we create it. We clear the new dataset's
926 * redaction info; if it should be redacted, we'll fill
927 * in its information later.
929 dsl_dataset_deactivate_feature(newds
,
930 SPA_FEATURE_REDACTED_DATASETS
, tx
);
932 VERIFY0(dmu_objset_from_ds(newds
, &os
));
934 if (drc
->drc_resumable
) {
935 dsl_dataset_zapify(newds
, tx
);
936 if (drrb
->drr_fromguid
!= 0) {
937 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
938 8, 1, &drrb
->drr_fromguid
, tx
));
940 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
941 8, 1, &drrb
->drr_toguid
, tx
));
942 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
943 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
946 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
948 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
950 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
952 if (featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) {
953 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_LARGEBLOCK
,
956 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) {
957 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
960 if (featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) {
961 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_COMPRESSOK
,
964 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
965 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_RAWOK
,
969 uint64_t *redact_snaps
;
970 uint_t numredactsnaps
;
971 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
972 BEGINNV_REDACT_FROM_SNAPS
, &redact_snaps
,
973 &numredactsnaps
) == 0) {
974 VERIFY0(zap_add(mos
, dsobj
,
975 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
,
976 sizeof (*redact_snaps
), numredactsnaps
,
982 * Usually the os->os_encrypted value is tied to the presence of a
983 * DSL Crypto Key object in the dd. However, that will not be received
984 * until dmu_recv_stream(), so we set the value manually for now.
986 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
987 os
->os_encrypted
= B_TRUE
;
988 drba
->drba_cookie
->drc_raw
= B_TRUE
;
991 if (featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
992 uint64_t *redact_snaps
;
993 uint_t numredactsnaps
;
994 VERIFY0(nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
995 BEGINNV_REDACT_SNAPS
, &redact_snaps
, &numredactsnaps
));
996 dsl_dataset_activate_redaction(newds
, redact_snaps
,
1000 if (featureflags
& DMU_BACKUP_FEATURE_LARGE_MICROZAP
) {
1002 * The source has seen a large microzap at least once in its
1003 * life, so we activate the feature here to match. It's not
1004 * strictly necessary since a large microzap is usable without
1005 * the feature active, but if that object is sent on from here,
1006 * we need this info to know to add the stream feature.
1008 * There may be no large microzap in the incoming stream, or
1009 * ever again, but this is a very niche feature and its very
1010 * difficult to spot a large microzap in the stream, so its
1011 * not worth the effort of trying harder to activate the
1012 * feature at first use.
1014 dsl_dataset_activate_feature(dsobj
, SPA_FEATURE_LARGE_MICROZAP
,
1015 (void *)B_TRUE
, tx
);
1018 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
1019 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1022 * Activate longname feature if received
1024 if (featureflags
& DMU_BACKUP_FEATURE_LONGNAME
&&
1025 !dsl_dataset_feature_is_active(newds
, SPA_FEATURE_LONGNAME
)) {
1026 dsl_dataset_activate_feature(newds
->ds_object
,
1027 SPA_FEATURE_LONGNAME
, (void *)B_TRUE
, tx
);
1028 newds
->ds_feature
[SPA_FEATURE_LONGNAME
] = (void *)B_TRUE
;
1032 * If we actually created a non-clone, we need to create the objset
1033 * in our new dataset. If this is a raw send we postpone this until
1034 * dmu_recv_stream() so that we can allocate the metadnode with the
1035 * properties from the DRR_BEGIN payload.
1037 rrw_enter(&newds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1038 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
)) &&
1039 (featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0 &&
1041 (void) dmu_objset_create_impl(dp
->dp_spa
,
1042 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
1044 rrw_exit(&newds
->ds_bp_rwlock
, FTAG
);
1046 drba
->drba_cookie
->drc_ds
= newds
;
1047 drba
->drba_cookie
->drc_os
= os
;
1049 spa_history_log_internal_ds(newds
, "receive", tx
, " ");
1053 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
1055 dmu_recv_begin_arg_t
*drba
= arg
;
1056 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
1057 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1058 struct drr_begin
*drrb
= drc
->drc_drrb
;
1060 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
1062 const char *tofs
= drc
->drc_tofs
;
1064 /* already checked */
1065 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1066 ASSERT(drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
1068 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1069 DMU_COMPOUNDSTREAM
||
1070 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
1071 return (SET_ERROR(EINVAL
));
1074 * This is mostly a sanity check since we should have already done these
1075 * checks during a previous attempt to receive the data.
1077 error
= recv_begin_check_feature_flags_impl(drc
->drc_featureflags
,
1082 /* 6 extra bytes for /%recv */
1083 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1085 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1086 tofs
, recv_clone_name
);
1088 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1089 /* raw receives require spill block allocation flag */
1090 if (!(drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
))
1091 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
1093 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1096 boolean_t recvexist
= B_TRUE
;
1097 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
1098 /* %recv does not exist; continue in tofs */
1099 recvexist
= B_FALSE
;
1100 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
1106 * Resume of full/newfs recv on existing dataset should be done with
1109 if (recvexist
&& drrb
->drr_fromguid
== 0 && !drc
->drc_force
) {
1110 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1111 return (SET_ERROR(ZFS_ERR_RESUME_EXISTS
));
1114 /* check that ds is marked inconsistent */
1115 if (!DS_IS_INCONSISTENT(ds
)) {
1116 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1117 return (SET_ERROR(EINVAL
));
1120 /* check that there is resuming data, and that the toguid matches */
1121 if (!dsl_dataset_is_zapified(ds
)) {
1122 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1123 return (SET_ERROR(EINVAL
));
1126 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1127 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
1128 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
1129 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1130 return (SET_ERROR(EINVAL
));
1134 * Check if the receive is still running. If so, it will be owned.
1135 * Note that nothing else can own the dataset (e.g. after the receive
1136 * fails) because it will be marked inconsistent.
1138 if (dsl_dataset_has_owner(ds
)) {
1139 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1140 return (SET_ERROR(EBUSY
));
1143 /* There should not be any snapshots of this fs yet. */
1144 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
1145 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1146 return (SET_ERROR(EINVAL
));
1150 * Note: resume point will be checked when we process the first WRITE
1154 /* check that the origin matches */
1156 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1157 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
1158 if (drrb
->drr_fromguid
!= val
) {
1159 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1160 return (SET_ERROR(EINVAL
));
1163 if (ds
->ds_prev
!= NULL
&& drrb
->drr_fromguid
!= 0)
1164 drc
->drc_fromsnapobj
= ds
->ds_prev
->ds_object
;
1167 * If we're resuming, and the send is redacted, then the original send
1168 * must have been redacted, and must have been redacted with respect to
1169 * the same snapshots.
1171 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
1172 uint64_t num_ds_redact_snaps
;
1173 uint64_t *ds_redact_snaps
;
1175 uint_t num_stream_redact_snaps
;
1176 uint64_t *stream_redact_snaps
;
1178 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
1179 BEGINNV_REDACT_SNAPS
, &stream_redact_snaps
,
1180 &num_stream_redact_snaps
) != 0) {
1181 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1182 return (SET_ERROR(EINVAL
));
1185 if (!dsl_dataset_get_uint64_array_feature(ds
,
1186 SPA_FEATURE_REDACTED_DATASETS
, &num_ds_redact_snaps
,
1187 &ds_redact_snaps
)) {
1188 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1189 return (SET_ERROR(EINVAL
));
1192 for (int i
= 0; i
< num_ds_redact_snaps
; i
++) {
1193 if (!redact_snaps_contains(ds_redact_snaps
,
1194 num_ds_redact_snaps
, stream_redact_snaps
[i
])) {
1195 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1196 return (SET_ERROR(EINVAL
));
1201 error
= recv_check_large_blocks(ds
, drc
->drc_featureflags
);
1203 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1207 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1212 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
1214 dmu_recv_begin_arg_t
*drba
= arg
;
1215 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1216 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1217 uint64_t featureflags
= drba
->drba_cookie
->drc_featureflags
;
1219 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
1220 /* 6 extra bytes for /%recv */
1221 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1223 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s", tofs
,
1226 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1227 drba
->drba_cookie
->drc_raw
= B_TRUE
;
1229 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1232 if (dsl_dataset_own_force(dp
, recvname
, dsflags
, dmu_recv_tag
, &ds
)
1234 /* %recv does not exist; continue in tofs */
1235 VERIFY0(dsl_dataset_own_force(dp
, tofs
, dsflags
, dmu_recv_tag
,
1237 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1240 ASSERT(DS_IS_INCONSISTENT(ds
));
1241 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1242 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)) ||
1243 drba
->drba_cookie
->drc_raw
);
1244 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1246 drba
->drba_cookie
->drc_ds
= ds
;
1247 VERIFY0(dmu_objset_from_ds(ds
, &drba
->drba_cookie
->drc_os
));
1248 drba
->drba_cookie
->drc_should_save
= B_TRUE
;
1250 spa_history_log_internal_ds(ds
, "resume receive", tx
, " ");
1254 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1255 * succeeds; otherwise we will leak the holds on the datasets.
1258 dmu_recv_begin(const char *tofs
, const char *tosnap
,
1259 dmu_replay_record_t
*drr_begin
, boolean_t force
, boolean_t heal
,
1260 boolean_t resumable
, nvlist_t
*localprops
, nvlist_t
*hidden_args
,
1261 const char *origin
, dmu_recv_cookie_t
*drc
, zfs_file_t
*fp
,
1264 dmu_recv_begin_arg_t drba
= { 0 };
1267 memset(drc
, 0, sizeof (dmu_recv_cookie_t
));
1268 drc
->drc_drr_begin
= drr_begin
;
1269 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
1270 drc
->drc_tosnap
= tosnap
;
1271 drc
->drc_tofs
= tofs
;
1272 drc
->drc_force
= force
;
1273 drc
->drc_heal
= heal
;
1274 drc
->drc_resumable
= resumable
;
1275 drc
->drc_cred
= CRED();
1276 drc
->drc_proc
= curproc
;
1277 drc
->drc_clone
= (origin
!= NULL
);
1279 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
1280 drc
->drc_byteswap
= B_TRUE
;
1281 (void) fletcher_4_incremental_byteswap(drr_begin
,
1282 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1283 byteswap_record(drr_begin
);
1284 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
1285 (void) fletcher_4_incremental_native(drr_begin
,
1286 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1288 return (SET_ERROR(EINVAL
));
1292 drc
->drc_voff
= *voffp
;
1293 drc
->drc_featureflags
=
1294 DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
1296 uint32_t payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
1299 * Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace
1300 * configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard
1301 * upper limit. Systems with less than 1GB of RAM will see a lower
1302 * limit from `arc_all_memory() / 4`.
1304 if (payloadlen
> (MIN((1U << 28), arc_all_memory() / 4)))
1308 if (payloadlen
!= 0) {
1309 void *payload
= vmem_alloc(payloadlen
, KM_SLEEP
);
1311 * For compatibility with recursive send streams, we don't do
1312 * this here if the stream could be part of a package. Instead,
1313 * we'll do it in dmu_recv_stream. If we pull the next header
1314 * too early, and it's the END record, we break the `recv_skip`
1318 err
= receive_read_payload_and_next_header(drc
, payloadlen
,
1321 vmem_free(payload
, payloadlen
);
1324 err
= nvlist_unpack(payload
, payloadlen
, &drc
->drc_begin_nvl
,
1326 vmem_free(payload
, payloadlen
);
1328 kmem_free(drc
->drc_next_rrd
,
1329 sizeof (*drc
->drc_next_rrd
));
1334 if (drc
->drc_drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
)
1335 drc
->drc_spill
= B_TRUE
;
1337 drba
.drba_origin
= origin
;
1338 drba
.drba_cookie
= drc
;
1339 drba
.drba_cred
= CRED();
1340 drba
.drba_proc
= curproc
;
1342 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
1343 err
= dsl_sync_task(tofs
,
1344 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
1345 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
);
1348 * For non-raw, non-incremental, non-resuming receives the
1349 * user can specify encryption parameters on the command line
1350 * with "zfs recv -o". For these receives we create a dcp and
1351 * pass it to the sync task. Creating the dcp will implicitly
1352 * remove the encryption params from the localprops nvlist,
1353 * which avoids errors when trying to set these normally
1354 * read-only properties. Any other kind of receive that
1355 * attempts to set these properties will fail as a result.
1357 if ((DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
1358 DMU_BACKUP_FEATURE_RAW
) == 0 &&
1359 origin
== NULL
&& drc
->drc_drrb
->drr_fromguid
== 0) {
1360 err
= dsl_crypto_params_create_nvlist(DCP_CMD_NONE
,
1361 localprops
, hidden_args
, &drba
.drba_dcp
);
1365 err
= dsl_sync_task(tofs
,
1366 dmu_recv_begin_check
, dmu_recv_begin_sync
,
1367 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
);
1368 dsl_crypto_params_free(drba
.drba_dcp
, !!err
);
1373 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
1374 nvlist_free(drc
->drc_begin_nvl
);
1380 * Holds data need for corrective recv callback
1382 typedef struct cr_cb_data
{
1384 zbookmark_phys_t zb
;
1389 corrective_read_done(zio_t
*zio
)
1391 cr_cb_data_t
*data
= zio
->io_private
;
1392 /* Corruption corrected; update error log if needed */
1393 if (zio
->io_error
== 0) {
1394 spa_remove_error(data
->spa
, &data
->zb
,
1395 BP_GET_LOGICAL_BIRTH(zio
->io_bp
));
1397 kmem_free(data
, sizeof (cr_cb_data_t
));
1398 abd_free(zio
->io_abd
);
1402 * zio_rewrite the data pointed to by bp with the data from the rrd's abd.
1405 do_corrective_recv(struct receive_writer_arg
*rwa
, struct drr_write
*drrw
,
1406 struct receive_record_arg
*rrd
, blkptr_t
*bp
)
1410 zbookmark_phys_t zb
;
1412 abd_t
*abd
= rrd
->abd
;
1413 zio_cksum_t bp_cksum
= bp
->blk_cksum
;
1414 zio_flag_t flags
= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_DONT_RETRY
|
1418 flags
|= ZIO_FLAG_RAW
;
1420 err
= dnode_hold(rwa
->os
, drrw
->drr_object
, FTAG
, &dn
);
1423 SET_BOOKMARK(&zb
, dmu_objset_id(rwa
->os
), drrw
->drr_object
, 0,
1424 dbuf_whichblock(dn
, 0, drrw
->drr_offset
));
1425 dnode_rele(dn
, FTAG
);
1427 if (!rwa
->raw
&& DRR_WRITE_COMPRESSED(drrw
)) {
1428 /* Decompress the stream data */
1429 abd_t
*dabd
= abd_alloc_linear(
1430 drrw
->drr_logical_size
, B_FALSE
);
1431 err
= zio_decompress_data(drrw
->drr_compressiontype
,
1432 abd
, dabd
, abd_get_size(abd
),
1433 abd_get_size(dabd
), NULL
);
1439 /* Swap in the newly decompressed data into the abd */
1444 if (!rwa
->raw
&& BP_GET_COMPRESS(bp
) != ZIO_COMPRESS_OFF
) {
1445 /* Recompress the data */
1446 abd_t
*cabd
= abd_alloc_linear(BP_GET_PSIZE(bp
),
1448 uint64_t csize
= zio_compress_data(BP_GET_COMPRESS(bp
),
1449 abd
, &cabd
, abd_get_size(abd
), BP_GET_PSIZE(bp
),
1450 rwa
->os
->os_complevel
);
1451 abd_zero_off(cabd
, csize
, BP_GET_PSIZE(bp
) - csize
);
1452 /* Swap in newly compressed data into the abd */
1455 flags
|= ZIO_FLAG_RAW_COMPRESS
;
1459 * The stream is not encrypted but the data on-disk is.
1460 * We need to re-encrypt the buf using the same
1461 * encryption type, salt, iv, and mac that was used to encrypt
1462 * the block previosly.
1464 if (!rwa
->raw
&& BP_USES_CRYPT(bp
)) {
1466 dsl_crypto_key_t
*dck
= NULL
;
1467 uint8_t salt
[ZIO_DATA_SALT_LEN
];
1468 uint8_t iv
[ZIO_DATA_IV_LEN
];
1469 uint8_t mac
[ZIO_DATA_MAC_LEN
];
1470 boolean_t no_crypt
= B_FALSE
;
1471 dsl_pool_t
*dp
= dmu_objset_pool(rwa
->os
);
1472 abd_t
*eabd
= abd_alloc_linear(BP_GET_PSIZE(bp
), B_FALSE
);
1474 zio_crypt_decode_params_bp(bp
, salt
, iv
);
1475 zio_crypt_decode_mac_bp(bp
, mac
);
1477 dsl_pool_config_enter(dp
, FTAG
);
1478 err
= dsl_dataset_hold_flags(dp
, rwa
->tofs
,
1479 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
1481 dsl_pool_config_exit(dp
, FTAG
);
1483 return (SET_ERROR(EACCES
));
1486 /* Look up the key from the spa's keystore */
1487 err
= spa_keystore_lookup_key(rwa
->os
->os_spa
,
1488 zb
.zb_objset
, FTAG
, &dck
);
1490 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
,
1492 dsl_pool_config_exit(dp
, FTAG
);
1494 return (SET_ERROR(EACCES
));
1497 err
= zio_do_crypt_abd(B_TRUE
, &dck
->dck_key
,
1498 BP_GET_TYPE(bp
), BP_SHOULD_BYTESWAP(bp
), salt
, iv
,
1499 mac
, abd_get_size(abd
), abd
, eabd
, &no_crypt
);
1501 spa_keystore_dsl_key_rele(rwa
->os
->os_spa
, dck
, FTAG
);
1502 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
1503 dsl_pool_config_exit(dp
, FTAG
);
1510 /* Swap in the newly encrypted data into the abd */
1515 * We want to prevent zio_rewrite() from trying to
1516 * encrypt the data again
1518 flags
|= ZIO_FLAG_RAW_ENCRYPT
;
1522 io
= zio_rewrite(NULL
, rwa
->os
->os_spa
, BP_GET_LOGICAL_BIRTH(bp
), bp
,
1523 abd
, BP_GET_PSIZE(bp
), NULL
, NULL
, ZIO_PRIORITY_SYNC_WRITE
, flags
,
1526 ASSERT(abd_get_size(abd
) == BP_GET_LSIZE(bp
) ||
1527 abd_get_size(abd
) == BP_GET_PSIZE(bp
));
1529 /* compute new bp checksum value and make sure it matches the old one */
1530 zio_checksum_compute(io
, BP_GET_CHECKSUM(bp
), abd
, abd_get_size(abd
));
1531 if (!ZIO_CHECKSUM_EQUAL(bp_cksum
, io
->io_bp
->blk_cksum
)) {
1533 if (zfs_recv_best_effort_corrective
!= 0)
1535 return (SET_ERROR(ECKSUM
));
1538 /* Correct the corruption in place */
1541 cr_cb_data_t
*cb_data
=
1542 kmem_alloc(sizeof (cr_cb_data_t
), KM_SLEEP
);
1543 cb_data
->spa
= rwa
->os
->os_spa
;
1544 cb_data
->size
= drrw
->drr_logical_size
;
1546 /* Test if healing worked by re-reading the bp */
1547 err
= zio_wait(zio_read(rwa
->heal_pio
, rwa
->os
->os_spa
, bp
,
1548 abd_alloc_for_io(drrw
->drr_logical_size
, B_FALSE
),
1549 drrw
->drr_logical_size
, corrective_read_done
,
1550 cb_data
, ZIO_PRIORITY_ASYNC_READ
, flags
, NULL
));
1552 if (err
!= 0 && zfs_recv_best_effort_corrective
!= 0)
1559 receive_read(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
1564 * The code doesn't rely on this (lengths being multiples of 8). See
1565 * comment in dump_bytes.
1567 ASSERT(len
% 8 == 0 ||
1568 (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0);
1570 while (done
< len
) {
1571 ssize_t resid
= len
- done
;
1572 zfs_file_t
*fp
= drc
->drc_fp
;
1573 int err
= zfs_file_read(fp
, (char *)buf
+ done
,
1574 len
- done
, &resid
);
1575 if (err
== 0 && resid
== len
- done
) {
1577 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
1578 * that the receive was interrupted and can
1579 * potentially be resumed.
1581 err
= SET_ERROR(ZFS_ERR_STREAM_TRUNCATED
);
1583 drc
->drc_voff
+= len
- done
- resid
;
1589 drc
->drc_bytes_read
+= len
;
1591 ASSERT3U(done
, ==, len
);
1595 static inline uint8_t
1596 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
1598 if (bonus_type
== DMU_OT_SA
) {
1602 ((DN_OLD_MAX_BONUSLEN
-
1603 MIN(DN_OLD_MAX_BONUSLEN
, bonus_size
)) >> SPA_BLKPTRSHIFT
));
1608 save_resume_state(struct receive_writer_arg
*rwa
,
1609 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
1611 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
1613 if (!rwa
->resumable
)
1617 * We use ds_resume_bytes[] != 0 to indicate that we need to
1618 * update this on disk, so it must not be 0.
1620 ASSERT(rwa
->bytes_read
!= 0);
1623 * We only resume from write records, which have a valid
1624 * (non-meta-dnode) object number.
1626 ASSERT(object
!= 0);
1629 * For resuming to work correctly, we must receive records in order,
1630 * sorted by object,offset. This is checked by the callers, but
1631 * assert it here for good measure.
1633 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
1634 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
1635 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
1636 ASSERT3U(rwa
->bytes_read
, >=,
1637 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
1639 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
1640 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
1641 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
1645 receive_object_is_same_generation(objset_t
*os
, uint64_t object
,
1646 dmu_object_type_t old_bonus_type
, dmu_object_type_t new_bonus_type
,
1647 const void *new_bonus
, boolean_t
*samegenp
)
1649 zfs_file_info_t zoi
;
1652 dmu_buf_t
*old_bonus_dbuf
;
1653 err
= dmu_bonus_hold(os
, object
, FTAG
, &old_bonus_dbuf
);
1656 err
= dmu_get_file_info(os
, old_bonus_type
, old_bonus_dbuf
->db_data
,
1658 dmu_buf_rele(old_bonus_dbuf
, FTAG
);
1661 uint64_t old_gen
= zoi
.zfi_generation
;
1663 err
= dmu_get_file_info(os
, new_bonus_type
, new_bonus
, &zoi
);
1666 uint64_t new_gen
= zoi
.zfi_generation
;
1668 *samegenp
= (old_gen
== new_gen
);
1673 receive_handle_existing_object(const struct receive_writer_arg
*rwa
,
1674 const struct drr_object
*drro
, const dmu_object_info_t
*doi
,
1675 const void *bonus_data
,
1676 uint64_t *object_to_hold
, uint32_t *new_blksz
)
1678 uint32_t indblksz
= drro
->drr_indblkshift
?
1679 1ULL << drro
->drr_indblkshift
: 0;
1680 int nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
1681 drro
->drr_bonuslen
);
1682 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
1683 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
1684 boolean_t do_free_range
= B_FALSE
;
1687 *object_to_hold
= drro
->drr_object
;
1689 /* nblkptr should be bounded by the bonus size and type */
1690 if (rwa
->raw
&& nblkptr
!= drro
->drr_nblkptr
)
1691 return (SET_ERROR(EINVAL
));
1694 * After the previous send stream, the sending system may
1695 * have freed this object, and then happened to re-allocate
1696 * this object number in a later txg. In this case, we are
1697 * receiving a different logical file, and the block size may
1698 * appear to be different. i.e. we may have a different
1699 * block size for this object than what the send stream says.
1700 * In this case we need to remove the object's contents,
1701 * so that its structure can be changed and then its contents
1702 * entirely replaced by subsequent WRITE records.
1704 * If this is a -L (--large-block) incremental stream, and
1705 * the previous stream was not -L, the block size may appear
1706 * to increase. i.e. we may have a smaller block size for
1707 * this object than what the send stream says. In this case
1708 * we need to keep the object's contents and block size
1709 * intact, so that we don't lose parts of the object's
1710 * contents that are not changed by this incremental send
1713 * We can distinguish between the two above cases by using
1714 * the ZPL's generation number (see
1715 * receive_object_is_same_generation()). However, we only
1716 * want to rely on the generation number when absolutely
1717 * necessary, because with raw receives, the generation is
1718 * encrypted. We also want to minimize dependence on the
1719 * ZPL, so that other types of datasets can also be received
1720 * (e.g. ZVOLs, although note that ZVOLS currently do not
1721 * reallocate their objects or change their structure).
1722 * Therefore, we check a number of different cases where we
1723 * know it is safe to discard the object's contents, before
1724 * using the ZPL's generation number to make the above
1727 if (drro
->drr_blksz
!= doi
->doi_data_block_size
) {
1730 * RAW streams always have large blocks, so
1731 * we are sure that the data is not needed
1732 * due to changing --large-block to be on.
1733 * Which is fortunate since the bonus buffer
1734 * (which contains the ZPL generation) is
1735 * encrypted, and the key might not be
1738 do_free_range
= B_TRUE
;
1739 } else if (rwa
->full
) {
1741 * This is a full send stream, so it always
1742 * replaces what we have. Even if the
1743 * generation numbers happen to match, this
1744 * can not actually be the same logical file.
1745 * This is relevant when receiving a full
1748 do_free_range
= B_TRUE
;
1749 } else if (drro
->drr_type
!=
1750 DMU_OT_PLAIN_FILE_CONTENTS
||
1751 doi
->doi_type
!= DMU_OT_PLAIN_FILE_CONTENTS
) {
1753 * PLAIN_FILE_CONTENTS are the only type of
1754 * objects that have ever been stored with
1755 * large blocks, so we don't need the special
1756 * logic below. ZAP blocks can shrink (when
1757 * there's only one block), so we don't want
1758 * to hit the error below about block size
1761 do_free_range
= B_TRUE
;
1762 } else if (doi
->doi_max_offset
<=
1763 doi
->doi_data_block_size
) {
1765 * There is only one block. We can free it,
1766 * because its contents will be replaced by a
1767 * WRITE record. This can not be the no-L ->
1768 * -L case, because the no-L case would have
1769 * resulted in multiple blocks. If we
1770 * supported -L -> no-L, it would not be safe
1771 * to free the file's contents. Fortunately,
1772 * that is not allowed (see
1773 * recv_check_large_blocks()).
1775 do_free_range
= B_TRUE
;
1777 boolean_t is_same_gen
;
1778 err
= receive_object_is_same_generation(rwa
->os
,
1779 drro
->drr_object
, doi
->doi_bonus_type
,
1780 drro
->drr_bonustype
, bonus_data
, &is_same_gen
);
1782 return (SET_ERROR(EINVAL
));
1786 * This is the same logical file, and
1787 * the block size must be increasing.
1788 * It could only decrease if
1789 * --large-block was changed to be
1790 * off, which is checked in
1791 * recv_check_large_blocks().
1793 if (drro
->drr_blksz
<=
1794 doi
->doi_data_block_size
)
1795 return (SET_ERROR(EINVAL
));
1797 * We keep the existing blocksize and
1801 doi
->doi_data_block_size
;
1803 do_free_range
= B_TRUE
;
1808 /* nblkptr can only decrease if the object was reallocated */
1809 if (nblkptr
< doi
->doi_nblkptr
)
1810 do_free_range
= B_TRUE
;
1812 /* number of slots can only change on reallocation */
1813 if (dn_slots
!= doi
->doi_dnodesize
>> DNODE_SHIFT
)
1814 do_free_range
= B_TRUE
;
1817 * For raw sends we also check a few other fields to
1818 * ensure we are preserving the objset structure exactly
1819 * as it was on the receive side:
1820 * - A changed indirect block size
1821 * - A smaller nlevels
1824 if (indblksz
!= doi
->doi_metadata_block_size
)
1825 do_free_range
= B_TRUE
;
1826 if (drro
->drr_nlevels
< doi
->doi_indirection
)
1827 do_free_range
= B_TRUE
;
1830 if (do_free_range
) {
1831 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1834 return (SET_ERROR(EINVAL
));
1838 * The dmu does not currently support decreasing nlevels or changing
1839 * indirect block size if there is already one, same as changing the
1840 * number of of dnode slots on an object. For non-raw sends this
1841 * does not matter and the new object can just use the previous one's
1842 * parameters. For raw sends, however, the structure of the received
1843 * dnode (including indirects and dnode slots) must match that of the
1844 * send side. Therefore, instead of using dmu_object_reclaim(), we
1845 * must free the object completely and call dmu_object_claim_dnsize()
1848 if ((rwa
->raw
&& ((doi
->doi_indirection
> 1 &&
1849 indblksz
!= doi
->doi_metadata_block_size
) ||
1850 drro
->drr_nlevels
< doi
->doi_indirection
)) ||
1851 dn_slots
!= doi
->doi_dnodesize
>> DNODE_SHIFT
) {
1852 err
= dmu_free_long_object(rwa
->os
, drro
->drr_object
);
1854 return (SET_ERROR(EINVAL
));
1856 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1857 *object_to_hold
= DMU_NEW_OBJECT
;
1861 * For raw receives, free everything beyond the new incoming
1862 * maxblkid. Normally this would be done with a DRR_FREE
1863 * record that would come after this DRR_OBJECT record is
1864 * processed. However, for raw receives we manually set the
1865 * maxblkid from the drr_maxblkid and so we must first free
1866 * everything above that blkid to ensure the DMU is always
1867 * consistent with itself. We will never free the first block
1868 * of the object here because a maxblkid of 0 could indicate
1869 * an object with a single block or one with no blocks. This
1870 * free may be skipped when dmu_free_long_range() was called
1871 * above since it covers the entire object's contents.
1873 if (rwa
->raw
&& *object_to_hold
!= DMU_NEW_OBJECT
&& !do_free_range
) {
1874 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1875 (drro
->drr_maxblkid
+ 1) * doi
->doi_data_block_size
,
1878 return (SET_ERROR(EINVAL
));
1884 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
1887 dmu_object_info_t doi
;
1890 uint32_t new_blksz
= drro
->drr_blksz
;
1891 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
1892 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
1894 if (drro
->drr_type
== DMU_OT_NONE
||
1895 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1896 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1897 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1898 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1899 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1900 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1901 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
1902 drro
->drr_bonuslen
>
1903 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa
->os
))) ||
1905 (spa_maxdnodesize(dmu_objset_spa(rwa
->os
)) >> DNODE_SHIFT
)) {
1906 return (SET_ERROR(EINVAL
));
1911 * We should have received a DRR_OBJECT_RANGE record
1912 * containing this block and stored it in rwa.
1914 if (drro
->drr_object
< rwa
->or_firstobj
||
1915 drro
->drr_object
>= rwa
->or_firstobj
+ rwa
->or_numslots
||
1916 drro
->drr_raw_bonuslen
< drro
->drr_bonuslen
||
1917 drro
->drr_indblkshift
> SPA_MAXBLOCKSHIFT
||
1918 drro
->drr_nlevels
> DN_MAX_LEVELS
||
1919 drro
->drr_nblkptr
> DN_MAX_NBLKPTR
||
1920 DN_SLOTS_TO_BONUSLEN(dn_slots
) <
1921 drro
->drr_raw_bonuslen
)
1922 return (SET_ERROR(EINVAL
));
1925 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1926 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1928 if (((drro
->drr_flags
& ~(DRR_OBJECT_SPILL
))) ||
1929 (!rwa
->spill
&& DRR_OBJECT_HAS_SPILL(drro
->drr_flags
))) {
1930 return (SET_ERROR(EINVAL
));
1933 if (drro
->drr_raw_bonuslen
!= 0 || drro
->drr_nblkptr
!= 0 ||
1934 drro
->drr_indblkshift
!= 0 || drro
->drr_nlevels
!= 0) {
1935 return (SET_ERROR(EINVAL
));
1939 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
1941 if (err
!= 0 && err
!= ENOENT
&& err
!= EEXIST
)
1942 return (SET_ERROR(EINVAL
));
1944 if (drro
->drr_object
> rwa
->max_object
)
1945 rwa
->max_object
= drro
->drr_object
;
1948 * If we are losing blkptrs or changing the block size this must
1949 * be a new file instance. We must clear out the previous file
1950 * contents before we can change this type of metadata in the dnode.
1951 * Raw receives will also check that the indirect structure of the
1952 * dnode hasn't changed.
1954 uint64_t object_to_hold
;
1956 err
= receive_handle_existing_object(rwa
, drro
, &doi
, data
,
1957 &object_to_hold
, &new_blksz
);
1960 } else if (err
== EEXIST
) {
1962 * The object requested is currently an interior slot of a
1963 * multi-slot dnode. This will be resolved when the next txg
1964 * is synced out, since the send stream will have told us
1965 * to free this slot when we freed the associated dnode
1966 * earlier in the stream.
1968 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1970 if (dmu_object_info(rwa
->os
, drro
->drr_object
, NULL
) != ENOENT
)
1971 return (SET_ERROR(EINVAL
));
1973 /* object was freed and we are about to allocate a new one */
1974 object_to_hold
= DMU_NEW_OBJECT
;
1977 * If the only record in this range so far was DRR_FREEOBJECTS
1978 * with at least one actually freed object, it's possible that
1979 * the block will now be converted to a hole. We need to wait
1980 * for the txg to sync to prevent races.
1982 if (rwa
->or_need_sync
== ORNS_YES
)
1983 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1985 /* object is free and we are about to allocate a new one */
1986 object_to_hold
= DMU_NEW_OBJECT
;
1989 /* Only relevant for the first object in the range */
1990 rwa
->or_need_sync
= ORNS_NO
;
1993 * If this is a multi-slot dnode there is a chance that this
1994 * object will expand into a slot that is already used by
1995 * another object from the previous snapshot. We must free
1996 * these objects before we attempt to allocate the new dnode.
1999 boolean_t need_sync
= B_FALSE
;
2001 for (uint64_t slot
= drro
->drr_object
+ 1;
2002 slot
< drro
->drr_object
+ dn_slots
;
2004 dmu_object_info_t slot_doi
;
2006 err
= dmu_object_info(rwa
->os
, slot
, &slot_doi
);
2007 if (err
== ENOENT
|| err
== EEXIST
)
2012 err
= dmu_free_long_object(rwa
->os
, slot
);
2020 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2023 tx
= dmu_tx_create(rwa
->os
);
2024 dmu_tx_hold_bonus(tx
, object_to_hold
);
2025 dmu_tx_hold_write(tx
, object_to_hold
, 0, 0);
2026 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2032 if (object_to_hold
== DMU_NEW_OBJECT
) {
2033 /* Currently free, wants to be allocated */
2034 err
= dmu_object_claim_dnsize(rwa
->os
, drro
->drr_object
,
2035 drro
->drr_type
, new_blksz
,
2036 drro
->drr_bonustype
, drro
->drr_bonuslen
,
2037 dn_slots
<< DNODE_SHIFT
, tx
);
2038 } else if (drro
->drr_type
!= doi
.doi_type
||
2039 new_blksz
!= doi
.doi_data_block_size
||
2040 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
2041 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
2042 /* Currently allocated, but with different properties */
2043 err
= dmu_object_reclaim_dnsize(rwa
->os
, drro
->drr_object
,
2044 drro
->drr_type
, new_blksz
,
2045 drro
->drr_bonustype
, drro
->drr_bonuslen
,
2046 dn_slots
<< DNODE_SHIFT
, rwa
->spill
?
2047 DRR_OBJECT_HAS_SPILL(drro
->drr_flags
) : B_FALSE
, tx
);
2048 } else if (rwa
->spill
&& !DRR_OBJECT_HAS_SPILL(drro
->drr_flags
)) {
2050 * Currently allocated, the existing version of this object
2051 * may reference a spill block that is no longer allocated
2052 * at the source and needs to be freed.
2054 err
= dmu_object_rm_spill(rwa
->os
, drro
->drr_object
, tx
);
2059 return (SET_ERROR(EINVAL
));
2062 if (rwa
->or_crypt_params_present
) {
2064 * Set the crypt params for the buffer associated with this
2065 * range of dnodes. This causes the blkptr_t to have the
2066 * same crypt params (byteorder, salt, iv, mac) as on the
2069 * Since we are committing this tx now, it is possible for
2070 * the dnode block to end up on-disk with the incorrect MAC,
2071 * if subsequent objects in this block are received in a
2072 * different txg. However, since the dataset is marked as
2073 * inconsistent, no code paths will do a non-raw read (or
2074 * decrypt the block / verify the MAC). The receive code and
2075 * scrub code can safely do raw reads and verify the
2076 * checksum. They don't need to verify the MAC.
2078 dmu_buf_t
*db
= NULL
;
2079 uint64_t offset
= rwa
->or_firstobj
* DNODE_MIN_SIZE
;
2081 err
= dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa
->os
),
2082 offset
, FTAG
, &db
, DMU_READ_PREFETCH
| DMU_READ_NO_DECRYPT
);
2085 return (SET_ERROR(EINVAL
));
2088 dmu_buf_set_crypt_params(db
, rwa
->or_byteorder
,
2089 rwa
->or_salt
, rwa
->or_iv
, rwa
->or_mac
, tx
);
2091 dmu_buf_rele(db
, FTAG
);
2093 rwa
->or_crypt_params_present
= B_FALSE
;
2096 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
2097 drro
->drr_checksumtype
, tx
);
2098 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
2099 drro
->drr_compress
, tx
);
2101 /* handle more restrictive dnode structuring for raw recvs */
2104 * Set the indirect block size, block shift, nlevels.
2105 * This will not fail because we ensured all of the
2106 * blocks were freed earlier if this is a new object.
2107 * For non-new objects block size and indirect block
2108 * shift cannot change and nlevels can only increase.
2110 ASSERT3U(new_blksz
, ==, drro
->drr_blksz
);
2111 VERIFY0(dmu_object_set_blocksize(rwa
->os
, drro
->drr_object
,
2112 drro
->drr_blksz
, drro
->drr_indblkshift
, tx
));
2113 VERIFY0(dmu_object_set_nlevels(rwa
->os
, drro
->drr_object
,
2114 drro
->drr_nlevels
, tx
));
2117 * Set the maxblkid. This will always succeed because
2118 * we freed all blocks beyond the new maxblkid above.
2120 VERIFY0(dmu_object_set_maxblkid(rwa
->os
, drro
->drr_object
,
2121 drro
->drr_maxblkid
, tx
));
2127 uint32_t flags
= DMU_READ_NO_PREFETCH
;
2130 flags
|= DMU_READ_NO_DECRYPT
;
2132 VERIFY0(dnode_hold(rwa
->os
, drro
->drr_object
, FTAG
, &dn
));
2133 VERIFY0(dmu_bonus_hold_by_dnode(dn
, FTAG
, &db
, flags
));
2135 dmu_buf_will_dirty(db
, tx
);
2137 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
2138 memcpy(db
->db_data
, data
, DRR_OBJECT_PAYLOAD_SIZE(drro
));
2141 * Raw bonus buffers have their byteorder determined by the
2142 * DRR_OBJECT_RANGE record.
2144 if (rwa
->byteswap
&& !rwa
->raw
) {
2145 dmu_object_byteswap_t byteswap
=
2146 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
2147 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
2148 DRR_OBJECT_PAYLOAD_SIZE(drro
));
2150 dmu_buf_rele(db
, FTAG
);
2151 dnode_rele(dn
, FTAG
);
2155 * If the receive fails, we want the resume stream to start with the
2156 * same record that we last successfully received. There is no way to
2157 * request resume from the object record, but we can benefit from the
2158 * fact that sender always sends object record before anything else,
2159 * after which it will "resend" data at offset 0 and resume normally.
2161 save_resume_state(rwa
, drro
->drr_object
, 0, tx
);
2169 receive_freeobjects(struct receive_writer_arg
*rwa
,
2170 struct drr_freeobjects
*drrfo
)
2175 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
2176 return (SET_ERROR(EINVAL
));
2178 for (obj
= drrfo
->drr_firstobj
== 0 ? 1 : drrfo
->drr_firstobj
;
2179 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&&
2180 obj
< DN_MAX_OBJECT
&& next_err
== 0;
2181 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
2182 dmu_object_info_t doi
;
2185 err
= dmu_object_info(rwa
->os
, obj
, &doi
);
2191 err
= dmu_free_long_object(rwa
->os
, obj
);
2196 if (rwa
->or_need_sync
== ORNS_MAYBE
)
2197 rwa
->or_need_sync
= ORNS_YES
;
2199 if (next_err
!= ESRCH
)
2205 * Note: if this fails, the caller will clean up any records left on the
2206 * rwa->write_batch list.
2209 flush_write_batch_impl(struct receive_writer_arg
*rwa
)
2214 if (dnode_hold(rwa
->os
, rwa
->last_object
, FTAG
, &dn
) != 0)
2215 return (SET_ERROR(EINVAL
));
2217 struct receive_record_arg
*last_rrd
= list_tail(&rwa
->write_batch
);
2218 struct drr_write
*last_drrw
= &last_rrd
->header
.drr_u
.drr_write
;
2220 struct receive_record_arg
*first_rrd
= list_head(&rwa
->write_batch
);
2221 struct drr_write
*first_drrw
= &first_rrd
->header
.drr_u
.drr_write
;
2223 ASSERT3U(rwa
->last_object
, ==, last_drrw
->drr_object
);
2224 ASSERT3U(rwa
->last_offset
, ==, last_drrw
->drr_offset
);
2226 dmu_tx_t
*tx
= dmu_tx_create(rwa
->os
);
2227 dmu_tx_hold_write_by_dnode(tx
, dn
, first_drrw
->drr_offset
,
2228 last_drrw
->drr_offset
- first_drrw
->drr_offset
+
2229 last_drrw
->drr_logical_size
);
2230 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2233 dnode_rele(dn
, FTAG
);
2237 struct receive_record_arg
*rrd
;
2238 while ((rrd
= list_head(&rwa
->write_batch
)) != NULL
) {
2239 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2240 abd_t
*abd
= rrd
->abd
;
2242 ASSERT3U(drrw
->drr_object
, ==, rwa
->last_object
);
2244 if (drrw
->drr_logical_size
!= dn
->dn_datablksz
) {
2246 * The WRITE record is larger than the object's block
2247 * size. We must be receiving an incremental
2248 * large-block stream into a dataset that previously did
2249 * a non-large-block receive. Lightweight writes must
2250 * be exactly one block, so we need to decompress the
2251 * data (if compressed) and do a normal dmu_write().
2253 ASSERT3U(drrw
->drr_logical_size
, >, dn
->dn_datablksz
);
2254 if (DRR_WRITE_COMPRESSED(drrw
)) {
2256 abd_alloc_linear(drrw
->drr_logical_size
,
2259 err
= zio_decompress_data(
2260 drrw
->drr_compressiontype
,
2263 abd_get_size(decomp_abd
), NULL
);
2266 dmu_write_by_dnode(dn
,
2268 drrw
->drr_logical_size
,
2269 abd_to_buf(decomp_abd
), tx
);
2271 abd_free(decomp_abd
);
2273 dmu_write_by_dnode(dn
,
2275 drrw
->drr_logical_size
,
2276 abd_to_buf(abd
), tx
);
2281 zio_prop_t zp
= {0};
2282 dmu_write_policy(rwa
->os
, dn
, 0, 0, &zp
);
2284 zio_flag_t zio_flags
= 0;
2287 zp
.zp_encrypt
= B_TRUE
;
2288 zp
.zp_compress
= drrw
->drr_compressiontype
;
2289 zp
.zp_byteorder
= ZFS_HOST_BYTEORDER
^
2290 !!DRR_IS_RAW_BYTESWAPPED(drrw
->drr_flags
) ^
2292 memcpy(zp
.zp_salt
, drrw
->drr_salt
,
2294 memcpy(zp
.zp_iv
, drrw
->drr_iv
,
2296 memcpy(zp
.zp_mac
, drrw
->drr_mac
,
2298 if (DMU_OT_IS_ENCRYPTED(zp
.zp_type
)) {
2299 zp
.zp_nopwrite
= B_FALSE
;
2300 zp
.zp_copies
= MIN(zp
.zp_copies
,
2301 SPA_DVAS_PER_BP
- 1);
2303 zio_flags
|= ZIO_FLAG_RAW
;
2304 } else if (DRR_WRITE_COMPRESSED(drrw
)) {
2305 ASSERT3U(drrw
->drr_compressed_size
, >, 0);
2306 ASSERT3U(drrw
->drr_logical_size
, >=,
2307 drrw
->drr_compressed_size
);
2308 zp
.zp_compress
= drrw
->drr_compressiontype
;
2309 zio_flags
|= ZIO_FLAG_RAW_COMPRESS
;
2310 } else if (rwa
->byteswap
) {
2312 * Note: compressed blocks never need to be
2313 * byteswapped, because WRITE records for
2314 * metadata blocks are never compressed. The
2315 * exception is raw streams, which are written
2316 * in the original byteorder, and the byteorder
2317 * bit is preserved in the BP by setting
2318 * zp_byteorder above.
2320 dmu_object_byteswap_t byteswap
=
2321 DMU_OT_BYTESWAP(drrw
->drr_type
);
2322 dmu_ot_byteswap
[byteswap
].ob_func(
2324 DRR_WRITE_PAYLOAD_SIZE(drrw
));
2328 * Since this data can't be read until the receive
2329 * completes, we can do a "lightweight" write for
2330 * improved performance.
2332 err
= dmu_lightweight_write_by_dnode(dn
,
2333 drrw
->drr_offset
, abd
, &zp
, zio_flags
, tx
);
2338 * This rrd is left on the list, so the caller will
2339 * free it (and the abd).
2345 * Note: If the receive fails, we want the resume stream to
2346 * start with the same record that we last successfully
2347 * received (as opposed to the next record), so that we can
2348 * verify that we are resuming from the correct location.
2350 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
2352 list_remove(&rwa
->write_batch
, rrd
);
2353 kmem_free(rrd
, sizeof (*rrd
));
2357 dnode_rele(dn
, FTAG
);
2362 flush_write_batch(struct receive_writer_arg
*rwa
)
2364 if (list_is_empty(&rwa
->write_batch
))
2368 err
= flush_write_batch_impl(rwa
);
2370 struct receive_record_arg
*rrd
;
2371 while ((rrd
= list_remove_head(&rwa
->write_batch
)) != NULL
) {
2373 kmem_free(rrd
, sizeof (*rrd
));
2376 ASSERT(list_is_empty(&rwa
->write_batch
));
2381 receive_process_write_record(struct receive_writer_arg
*rwa
,
2382 struct receive_record_arg
*rrd
)
2386 ASSERT3U(rrd
->header
.drr_type
, ==, DRR_WRITE
);
2387 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2389 if (drrw
->drr_offset
+ drrw
->drr_logical_size
< drrw
->drr_offset
||
2390 !DMU_OT_IS_VALID(drrw
->drr_type
))
2391 return (SET_ERROR(EINVAL
));
2396 int flags
= DB_RF_CANFAIL
;
2399 flags
|= DB_RF_NO_DECRYPT
;
2401 if (rwa
->byteswap
) {
2402 dmu_object_byteswap_t byteswap
=
2403 DMU_OT_BYTESWAP(drrw
->drr_type
);
2404 dmu_ot_byteswap
[byteswap
].ob_func(abd_to_buf(rrd
->abd
),
2405 DRR_WRITE_PAYLOAD_SIZE(drrw
));
2408 err
= dmu_buf_hold_noread(rwa
->os
, drrw
->drr_object
,
2409 drrw
->drr_offset
, FTAG
, &dbp
);
2413 /* Try to read the object to see if it needs healing */
2414 err
= dbuf_read((dmu_buf_impl_t
*)dbp
, NULL
, flags
);
2416 * We only try to heal when dbuf_read() returns a ECKSUMs.
2417 * Other errors (even EIO) get returned to caller.
2418 * EIO indicates that the device is not present/accessible,
2419 * so writing to it will likely fail.
2420 * If the block is healthy, we don't want to overwrite it
2423 if (err
!= ECKSUM
) {
2424 dmu_buf_rele(dbp
, FTAG
);
2427 /* Make sure the on-disk block and recv record sizes match */
2428 if (drrw
->drr_logical_size
!= dbp
->db_size
) {
2430 dmu_buf_rele(dbp
, FTAG
);
2433 /* Get the block pointer for the corrupted block */
2434 bp
= dmu_buf_get_blkptr(dbp
);
2435 err
= do_corrective_recv(rwa
, drrw
, rrd
, bp
);
2436 dmu_buf_rele(dbp
, FTAG
);
2441 * For resuming to work, records must be in increasing order
2442 * by (object, offset).
2444 if (drrw
->drr_object
< rwa
->last_object
||
2445 (drrw
->drr_object
== rwa
->last_object
&&
2446 drrw
->drr_offset
< rwa
->last_offset
)) {
2447 return (SET_ERROR(EINVAL
));
2450 struct receive_record_arg
*first_rrd
= list_head(&rwa
->write_batch
);
2451 struct drr_write
*first_drrw
= &first_rrd
->header
.drr_u
.drr_write
;
2452 uint64_t batch_size
=
2453 MIN(zfs_recv_write_batch_size
, DMU_MAX_ACCESS
/ 2);
2454 if (first_rrd
!= NULL
&&
2455 (drrw
->drr_object
!= first_drrw
->drr_object
||
2456 drrw
->drr_offset
>= first_drrw
->drr_offset
+ batch_size
)) {
2457 err
= flush_write_batch(rwa
);
2462 rwa
->last_object
= drrw
->drr_object
;
2463 rwa
->last_offset
= drrw
->drr_offset
;
2465 if (rwa
->last_object
> rwa
->max_object
)
2466 rwa
->max_object
= rwa
->last_object
;
2468 list_insert_tail(&rwa
->write_batch
, rrd
);
2470 * Return EAGAIN to indicate that we will use this rrd again,
2471 * so the caller should not free it
2477 receive_write_embedded(struct receive_writer_arg
*rwa
,
2478 struct drr_write_embedded
*drrwe
, void *data
)
2483 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
2484 return (SET_ERROR(EINVAL
));
2486 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
2487 return (SET_ERROR(EINVAL
));
2489 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
2490 return (SET_ERROR(EINVAL
));
2491 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
2492 return (SET_ERROR(EINVAL
));
2494 return (SET_ERROR(EINVAL
));
2496 if (drrwe
->drr_object
> rwa
->max_object
)
2497 rwa
->max_object
= drrwe
->drr_object
;
2499 tx
= dmu_tx_create(rwa
->os
);
2501 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
2502 drrwe
->drr_offset
, drrwe
->drr_length
);
2503 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2509 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
2510 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
2511 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
2512 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
2514 /* See comment in restore_write. */
2515 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
2521 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
2524 dmu_buf_t
*db
, *db_spill
;
2527 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
2528 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
2529 return (SET_ERROR(EINVAL
));
2532 * This is an unmodified spill block which was added to the stream
2533 * to resolve an issue with incorrectly removing spill blocks. It
2534 * should be ignored by current versions of the code which support
2535 * the DRR_FLAG_SPILL_BLOCK flag.
2537 if (rwa
->spill
&& DRR_SPILL_IS_UNMODIFIED(drrs
->drr_flags
)) {
2543 if (!DMU_OT_IS_VALID(drrs
->drr_type
) ||
2544 drrs
->drr_compressiontype
>= ZIO_COMPRESS_FUNCTIONS
||
2545 drrs
->drr_compressed_size
== 0)
2546 return (SET_ERROR(EINVAL
));
2549 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
2550 return (SET_ERROR(EINVAL
));
2552 if (drrs
->drr_object
> rwa
->max_object
)
2553 rwa
->max_object
= drrs
->drr_object
;
2555 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
2556 if ((err
= dmu_spill_hold_by_bonus(db
, DMU_READ_NO_DECRYPT
, FTAG
,
2558 dmu_buf_rele(db
, FTAG
);
2562 dmu_tx_t
*tx
= dmu_tx_create(rwa
->os
);
2564 dmu_tx_hold_spill(tx
, db
->db_object
);
2566 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2568 dmu_buf_rele(db
, FTAG
);
2569 dmu_buf_rele(db_spill
, FTAG
);
2575 * Spill blocks may both grow and shrink. When a change in size
2576 * occurs any existing dbuf must be updated to match the logical
2577 * size of the provided arc_buf_t.
2579 if (db_spill
->db_size
!= drrs
->drr_length
) {
2580 dmu_buf_will_fill(db_spill
, tx
, B_FALSE
);
2581 VERIFY0(dbuf_spill_set_blksz(db_spill
,
2582 drrs
->drr_length
, tx
));
2587 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
2588 !!DRR_IS_RAW_BYTESWAPPED(drrs
->drr_flags
) ^
2591 abuf
= arc_loan_raw_buf(dmu_objset_spa(rwa
->os
),
2592 drrs
->drr_object
, byteorder
, drrs
->drr_salt
,
2593 drrs
->drr_iv
, drrs
->drr_mac
, drrs
->drr_type
,
2594 drrs
->drr_compressed_size
, drrs
->drr_length
,
2595 drrs
->drr_compressiontype
, 0);
2597 abuf
= arc_loan_buf(dmu_objset_spa(rwa
->os
),
2598 DMU_OT_IS_METADATA(drrs
->drr_type
),
2600 if (rwa
->byteswap
) {
2601 dmu_object_byteswap_t byteswap
=
2602 DMU_OT_BYTESWAP(drrs
->drr_type
);
2603 dmu_ot_byteswap
[byteswap
].ob_func(abd_to_buf(abd
),
2604 DRR_SPILL_PAYLOAD_SIZE(drrs
));
2608 memcpy(abuf
->b_data
, abd_to_buf(abd
), DRR_SPILL_PAYLOAD_SIZE(drrs
));
2610 dbuf_assign_arcbuf((dmu_buf_impl_t
*)db_spill
, abuf
, tx
);
2612 dmu_buf_rele(db
, FTAG
);
2613 dmu_buf_rele(db_spill
, FTAG
);
2620 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
2624 if (drrf
->drr_length
!= -1ULL &&
2625 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
2626 return (SET_ERROR(EINVAL
));
2628 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
2629 return (SET_ERROR(EINVAL
));
2631 if (drrf
->drr_object
> rwa
->max_object
)
2632 rwa
->max_object
= drrf
->drr_object
;
2634 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
2635 drrf
->drr_offset
, drrf
->drr_length
);
2641 receive_object_range(struct receive_writer_arg
*rwa
,
2642 struct drr_object_range
*drror
)
2645 * By default, we assume this block is in our native format
2646 * (ZFS_HOST_BYTEORDER). We then take into account whether
2647 * the send stream is byteswapped (rwa->byteswap). Finally,
2648 * we need to byteswap again if this particular block was
2649 * in non-native format on the send side.
2651 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^ rwa
->byteswap
^
2652 !!DRR_IS_RAW_BYTESWAPPED(drror
->drr_flags
);
2655 * Since dnode block sizes are constant, we should not need to worry
2656 * about making sure that the dnode block size is the same on the
2657 * sending and receiving sides for the time being. For non-raw sends,
2658 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2659 * record at all). Raw sends require this record type because the
2660 * encryption parameters are used to protect an entire block of bonus
2661 * buffers. If the size of dnode blocks ever becomes variable,
2662 * handling will need to be added to ensure that dnode block sizes
2663 * match on the sending and receiving side.
2665 if (drror
->drr_numslots
!= DNODES_PER_BLOCK
||
2666 P2PHASE(drror
->drr_firstobj
, DNODES_PER_BLOCK
) != 0 ||
2668 return (SET_ERROR(EINVAL
));
2670 if (drror
->drr_firstobj
> rwa
->max_object
)
2671 rwa
->max_object
= drror
->drr_firstobj
;
2674 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
2675 * so that the block of dnodes is not written out when it's empty,
2676 * and converted to a HOLE BP.
2678 rwa
->or_crypt_params_present
= B_TRUE
;
2679 rwa
->or_firstobj
= drror
->drr_firstobj
;
2680 rwa
->or_numslots
= drror
->drr_numslots
;
2681 memcpy(rwa
->or_salt
, drror
->drr_salt
, ZIO_DATA_SALT_LEN
);
2682 memcpy(rwa
->or_iv
, drror
->drr_iv
, ZIO_DATA_IV_LEN
);
2683 memcpy(rwa
->or_mac
, drror
->drr_mac
, ZIO_DATA_MAC_LEN
);
2684 rwa
->or_byteorder
= byteorder
;
2686 rwa
->or_need_sync
= ORNS_MAYBE
;
2692 * Until we have the ability to redact large ranges of data efficiently, we
2693 * process these records as frees.
2696 receive_redact(struct receive_writer_arg
*rwa
, struct drr_redact
*drrr
)
2698 struct drr_free drrf
= {0};
2699 drrf
.drr_length
= drrr
->drr_length
;
2700 drrf
.drr_object
= drrr
->drr_object
;
2701 drrf
.drr_offset
= drrr
->drr_offset
;
2702 drrf
.drr_toguid
= drrr
->drr_toguid
;
2703 return (receive_free(rwa
, &drrf
));
2706 /* used to destroy the drc_ds on error */
2708 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
2710 dsl_dataset_t
*ds
= drc
->drc_ds
;
2711 ds_hold_flags_t dsflags
;
2713 dsflags
= (drc
->drc_raw
) ? DS_HOLD_FLAG_NONE
: DS_HOLD_FLAG_DECRYPT
;
2715 * Wait for the txg sync before cleaning up the receive. For
2716 * resumable receives, this ensures that our resume state has
2717 * been written out to disk. For raw receives, this ensures
2718 * that the user accounting code will not attempt to do anything
2719 * after we stopped receiving the dataset.
2721 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
2722 ds
->ds_objset
->os_raw_receive
= B_FALSE
;
2724 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2725 if (drc
->drc_resumable
&& drc
->drc_should_save
&&
2726 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds
))) {
2727 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2728 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
2730 char name
[ZFS_MAX_DATASET_NAME_LEN
];
2731 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2732 dsl_dataset_name(ds
, name
);
2733 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
2735 (void) dsl_destroy_head(name
);
2740 receive_cksum(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
2742 if (drc
->drc_byteswap
) {
2743 (void) fletcher_4_incremental_byteswap(buf
, len
,
2746 (void) fletcher_4_incremental_native(buf
, len
, &drc
->drc_cksum
);
2751 * Read the payload into a buffer of size len, and update the current record's
2753 * Allocate drc->drc_next_rrd and read the next record's header into
2754 * drc->drc_next_rrd->header.
2755 * Verify checksum of payload and next record.
2758 receive_read_payload_and_next_header(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
2763 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
2764 err
= receive_read(drc
, len
, buf
);
2767 receive_cksum(drc
, len
, buf
);
2769 /* note: rrd is NULL when reading the begin record's payload */
2770 if (drc
->drc_rrd
!= NULL
) {
2771 drc
->drc_rrd
->payload
= buf
;
2772 drc
->drc_rrd
->payload_size
= len
;
2773 drc
->drc_rrd
->bytes_read
= drc
->drc_bytes_read
;
2776 ASSERT3P(buf
, ==, NULL
);
2779 drc
->drc_prev_cksum
= drc
->drc_cksum
;
2781 drc
->drc_next_rrd
= kmem_zalloc(sizeof (*drc
->drc_next_rrd
), KM_SLEEP
);
2782 err
= receive_read(drc
, sizeof (drc
->drc_next_rrd
->header
),
2783 &drc
->drc_next_rrd
->header
);
2784 drc
->drc_next_rrd
->bytes_read
= drc
->drc_bytes_read
;
2787 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2788 drc
->drc_next_rrd
= NULL
;
2791 if (drc
->drc_next_rrd
->header
.drr_type
== DRR_BEGIN
) {
2792 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2793 drc
->drc_next_rrd
= NULL
;
2794 return (SET_ERROR(EINVAL
));
2798 * Note: checksum is of everything up to but not including the
2801 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2802 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
2804 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2805 &drc
->drc_next_rrd
->header
);
2807 zio_cksum_t cksum_orig
=
2808 drc
->drc_next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2809 zio_cksum_t
*cksump
=
2810 &drc
->drc_next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2812 if (drc
->drc_byteswap
)
2813 byteswap_record(&drc
->drc_next_rrd
->header
);
2815 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
2816 !ZIO_CHECKSUM_EQUAL(drc
->drc_cksum
, *cksump
)) {
2817 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2818 drc
->drc_next_rrd
= NULL
;
2819 return (SET_ERROR(ECKSUM
));
2822 receive_cksum(drc
, sizeof (cksum_orig
), &cksum_orig
);
2828 * Issue the prefetch reads for any necessary indirect blocks.
2830 * We use the object ignore list to tell us whether or not to issue prefetches
2831 * for a given object. We do this for both correctness (in case the blocksize
2832 * of an object has changed) and performance (if the object doesn't exist, don't
2833 * needlessly try to issue prefetches). We also trim the list as we go through
2834 * the stream to prevent it from growing to an unbounded size.
2836 * The object numbers within will always be in sorted order, and any write
2837 * records we see will also be in sorted order, but they're not sorted with
2838 * respect to each other (i.e. we can get several object records before
2839 * receiving each object's write records). As a result, once we've reached a
2840 * given object number, we can safely remove any reference to lower object
2841 * numbers in the ignore list. In practice, we receive up to 32 object records
2842 * before receiving write records, so the list can have up to 32 nodes in it.
2845 receive_read_prefetch(dmu_recv_cookie_t
*drc
, uint64_t object
, uint64_t offset
,
2848 if (!objlist_exists(drc
->drc_ignore_objlist
, object
)) {
2849 dmu_prefetch(drc
->drc_os
, object
, 1, offset
, length
,
2850 ZIO_PRIORITY_SYNC_READ
);
2855 * Read records off the stream, issuing any necessary prefetches.
2858 receive_read_record(dmu_recv_cookie_t
*drc
)
2862 switch (drc
->drc_rrd
->header
.drr_type
) {
2865 struct drr_object
*drro
=
2866 &drc
->drc_rrd
->header
.drr_u
.drr_object
;
2867 uint32_t size
= DRR_OBJECT_PAYLOAD_SIZE(drro
);
2869 dmu_object_info_t doi
;
2872 buf
= kmem_zalloc(size
, KM_SLEEP
);
2874 err
= receive_read_payload_and_next_header(drc
, size
, buf
);
2876 kmem_free(buf
, size
);
2879 err
= dmu_object_info(drc
->drc_os
, drro
->drr_object
, &doi
);
2881 * See receive_read_prefetch for an explanation why we're
2882 * storing this object in the ignore_obj_list.
2884 if (err
== ENOENT
|| err
== EEXIST
||
2885 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
2886 objlist_insert(drc
->drc_ignore_objlist
,
2892 case DRR_FREEOBJECTS
:
2894 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2899 struct drr_write
*drrw
= &drc
->drc_rrd
->header
.drr_u
.drr_write
;
2900 int size
= DRR_WRITE_PAYLOAD_SIZE(drrw
);
2901 abd_t
*abd
= abd_alloc_linear(size
, B_FALSE
);
2902 err
= receive_read_payload_and_next_header(drc
, size
,
2908 drc
->drc_rrd
->abd
= abd
;
2909 receive_read_prefetch(drc
, drrw
->drr_object
, drrw
->drr_offset
,
2910 drrw
->drr_logical_size
);
2913 case DRR_WRITE_EMBEDDED
:
2915 struct drr_write_embedded
*drrwe
=
2916 &drc
->drc_rrd
->header
.drr_u
.drr_write_embedded
;
2917 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
2918 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2920 err
= receive_read_payload_and_next_header(drc
, size
, buf
);
2922 kmem_free(buf
, size
);
2926 receive_read_prefetch(drc
, drrwe
->drr_object
, drrwe
->drr_offset
,
2934 * It might be beneficial to prefetch indirect blocks here, but
2935 * we don't really have the data to decide for sure.
2937 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2942 struct drr_end
*drre
= &drc
->drc_rrd
->header
.drr_u
.drr_end
;
2943 if (!ZIO_CHECKSUM_EQUAL(drc
->drc_prev_cksum
,
2944 drre
->drr_checksum
))
2945 return (SET_ERROR(ECKSUM
));
2950 struct drr_spill
*drrs
= &drc
->drc_rrd
->header
.drr_u
.drr_spill
;
2951 int size
= DRR_SPILL_PAYLOAD_SIZE(drrs
);
2952 abd_t
*abd
= abd_alloc_linear(size
, B_FALSE
);
2953 err
= receive_read_payload_and_next_header(drc
, size
,
2958 drc
->drc_rrd
->abd
= abd
;
2961 case DRR_OBJECT_RANGE
:
2963 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2968 return (SET_ERROR(EINVAL
));
2975 dprintf_drr(struct receive_record_arg
*rrd
, int err
)
2978 switch (rrd
->header
.drr_type
) {
2981 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2982 dprintf("drr_type = OBJECT obj = %llu type = %u "
2983 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2984 "compress = %u dn_slots = %u err = %d\n",
2985 (u_longlong_t
)drro
->drr_object
, drro
->drr_type
,
2986 drro
->drr_bonustype
, drro
->drr_blksz
, drro
->drr_bonuslen
,
2987 drro
->drr_checksumtype
, drro
->drr_compress
,
2988 drro
->drr_dn_slots
, err
);
2991 case DRR_FREEOBJECTS
:
2993 struct drr_freeobjects
*drrfo
=
2994 &rrd
->header
.drr_u
.drr_freeobjects
;
2995 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2996 "numobjs = %llu err = %d\n",
2997 (u_longlong_t
)drrfo
->drr_firstobj
,
2998 (u_longlong_t
)drrfo
->drr_numobjs
, err
);
3003 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
3004 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
3005 "lsize = %llu cksumtype = %u flags = %u "
3006 "compress = %u psize = %llu err = %d\n",
3007 (u_longlong_t
)drrw
->drr_object
, drrw
->drr_type
,
3008 (u_longlong_t
)drrw
->drr_offset
,
3009 (u_longlong_t
)drrw
->drr_logical_size
,
3010 drrw
->drr_checksumtype
, drrw
->drr_flags
,
3011 drrw
->drr_compressiontype
,
3012 (u_longlong_t
)drrw
->drr_compressed_size
, err
);
3015 case DRR_WRITE_BYREF
:
3017 struct drr_write_byref
*drrwbr
=
3018 &rrd
->header
.drr_u
.drr_write_byref
;
3019 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
3020 "length = %llu toguid = %llx refguid = %llx "
3021 "refobject = %llu refoffset = %llu cksumtype = %u "
3022 "flags = %u err = %d\n",
3023 (u_longlong_t
)drrwbr
->drr_object
,
3024 (u_longlong_t
)drrwbr
->drr_offset
,
3025 (u_longlong_t
)drrwbr
->drr_length
,
3026 (u_longlong_t
)drrwbr
->drr_toguid
,
3027 (u_longlong_t
)drrwbr
->drr_refguid
,
3028 (u_longlong_t
)drrwbr
->drr_refobject
,
3029 (u_longlong_t
)drrwbr
->drr_refoffset
,
3030 drrwbr
->drr_checksumtype
, drrwbr
->drr_flags
, err
);
3033 case DRR_WRITE_EMBEDDED
:
3035 struct drr_write_embedded
*drrwe
=
3036 &rrd
->header
.drr_u
.drr_write_embedded
;
3037 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
3038 "length = %llu compress = %u etype = %u lsize = %u "
3039 "psize = %u err = %d\n",
3040 (u_longlong_t
)drrwe
->drr_object
,
3041 (u_longlong_t
)drrwe
->drr_offset
,
3042 (u_longlong_t
)drrwe
->drr_length
,
3043 drrwe
->drr_compression
, drrwe
->drr_etype
,
3044 drrwe
->drr_lsize
, drrwe
->drr_psize
, err
);
3049 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
3050 dprintf("drr_type = FREE obj = %llu offset = %llu "
3051 "length = %lld err = %d\n",
3052 (u_longlong_t
)drrf
->drr_object
,
3053 (u_longlong_t
)drrf
->drr_offset
,
3054 (longlong_t
)drrf
->drr_length
,
3060 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
3061 dprintf("drr_type = SPILL obj = %llu length = %llu "
3062 "err = %d\n", (u_longlong_t
)drrs
->drr_object
,
3063 (u_longlong_t
)drrs
->drr_length
, err
);
3066 case DRR_OBJECT_RANGE
:
3068 struct drr_object_range
*drror
=
3069 &rrd
->header
.drr_u
.drr_object_range
;
3070 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
3071 "numslots = %llu flags = %u err = %d\n",
3072 (u_longlong_t
)drror
->drr_firstobj
,
3073 (u_longlong_t
)drror
->drr_numslots
,
3074 drror
->drr_flags
, err
);
3084 * Commit the records to the pool.
3087 receive_process_record(struct receive_writer_arg
*rwa
,
3088 struct receive_record_arg
*rrd
)
3092 /* Processing in order, therefore bytes_read should be increasing. */
3093 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
3094 rwa
->bytes_read
= rrd
->bytes_read
;
3096 /* We can only heal write records; other ones get ignored */
3097 if (rwa
->heal
&& rrd
->header
.drr_type
!= DRR_WRITE
) {
3098 if (rrd
->abd
!= NULL
) {
3101 } else if (rrd
->payload
!= NULL
) {
3102 kmem_free(rrd
->payload
, rrd
->payload_size
);
3103 rrd
->payload
= NULL
;
3108 if (!rwa
->heal
&& rrd
->header
.drr_type
!= DRR_WRITE
) {
3109 err
= flush_write_batch(rwa
);
3111 if (rrd
->abd
!= NULL
) {
3114 rrd
->payload
= NULL
;
3115 } else if (rrd
->payload
!= NULL
) {
3116 kmem_free(rrd
->payload
, rrd
->payload_size
);
3117 rrd
->payload
= NULL
;
3124 switch (rrd
->header
.drr_type
) {
3127 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
3128 err
= receive_object(rwa
, drro
, rrd
->payload
);
3129 kmem_free(rrd
->payload
, rrd
->payload_size
);
3130 rrd
->payload
= NULL
;
3133 case DRR_FREEOBJECTS
:
3135 struct drr_freeobjects
*drrfo
=
3136 &rrd
->header
.drr_u
.drr_freeobjects
;
3137 err
= receive_freeobjects(rwa
, drrfo
);
3142 err
= receive_process_write_record(rwa
, rrd
);
3145 * If healing - always free the abd after processing
3149 } else if (err
!= EAGAIN
) {
3151 * On success, a non-healing
3152 * receive_process_write_record() returns
3153 * EAGAIN to indicate that we do not want to free
3154 * the rrd or arc_buf.
3162 case DRR_WRITE_EMBEDDED
:
3164 struct drr_write_embedded
*drrwe
=
3165 &rrd
->header
.drr_u
.drr_write_embedded
;
3166 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
3167 kmem_free(rrd
->payload
, rrd
->payload_size
);
3168 rrd
->payload
= NULL
;
3173 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
3174 err
= receive_free(rwa
, drrf
);
3179 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
3180 err
= receive_spill(rwa
, drrs
, rrd
->abd
);
3184 rrd
->payload
= NULL
;
3187 case DRR_OBJECT_RANGE
:
3189 struct drr_object_range
*drror
=
3190 &rrd
->header
.drr_u
.drr_object_range
;
3191 err
= receive_object_range(rwa
, drror
);
3196 struct drr_redact
*drrr
= &rrd
->header
.drr_u
.drr_redact
;
3197 err
= receive_redact(rwa
, drrr
);
3201 err
= (SET_ERROR(EINVAL
));
3205 dprintf_drr(rrd
, err
);
3211 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3212 * receive_process_record When we're done, signal the main thread and exit.
3214 static __attribute__((noreturn
)) void
3215 receive_writer_thread(void *arg
)
3217 struct receive_writer_arg
*rwa
= arg
;
3218 struct receive_record_arg
*rrd
;
3219 fstrans_cookie_t cookie
= spl_fstrans_mark();
3221 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
3222 rrd
= bqueue_dequeue(&rwa
->q
)) {
3224 * If there's an error, the main thread will stop putting things
3225 * on the queue, but we need to clear everything in it before we
3229 if (rwa
->err
== 0) {
3230 err
= receive_process_record(rwa
, rrd
);
3231 } else if (rrd
->abd
!= NULL
) {
3234 rrd
->payload
= NULL
;
3235 } else if (rrd
->payload
!= NULL
) {
3236 kmem_free(rrd
->payload
, rrd
->payload_size
);
3237 rrd
->payload
= NULL
;
3240 * EAGAIN indicates that this record has been saved (on
3241 * raw->write_batch), and will be used again, so we don't
3243 * When healing data we always need to free the record.
3245 if (err
!= EAGAIN
|| rwa
->heal
) {
3248 kmem_free(rrd
, sizeof (*rrd
));
3251 kmem_free(rrd
, sizeof (*rrd
));
3254 zio_wait(rwa
->heal_pio
);
3256 int err
= flush_write_batch(rwa
);
3260 mutex_enter(&rwa
->mutex
);
3262 cv_signal(&rwa
->cv
);
3263 mutex_exit(&rwa
->mutex
);
3264 spl_fstrans_unmark(cookie
);
3269 resume_check(dmu_recv_cookie_t
*drc
, nvlist_t
*begin_nvl
)
3272 objset_t
*mos
= dmu_objset_pool(drc
->drc_os
)->dp_meta_objset
;
3273 uint64_t dsobj
= dmu_objset_id(drc
->drc_os
);
3274 uint64_t resume_obj
, resume_off
;
3276 if (nvlist_lookup_uint64(begin_nvl
,
3277 "resume_object", &resume_obj
) != 0 ||
3278 nvlist_lookup_uint64(begin_nvl
,
3279 "resume_offset", &resume_off
) != 0) {
3280 return (SET_ERROR(EINVAL
));
3282 VERIFY0(zap_lookup(mos
, dsobj
,
3283 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
3284 if (resume_obj
!= val
)
3285 return (SET_ERROR(EINVAL
));
3286 VERIFY0(zap_lookup(mos
, dsobj
,
3287 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
3288 if (resume_off
!= val
)
3289 return (SET_ERROR(EINVAL
));
3295 * Read in the stream's records, one by one, and apply them to the pool. There
3296 * are two threads involved; the thread that calls this function will spin up a
3297 * worker thread, read the records off the stream one by one, and issue
3298 * prefetches for any necessary indirect blocks. It will then push the records
3299 * onto an internal blocking queue. The worker thread will pull the records off
3300 * the queue, and actually write the data into the DMU. This way, the worker
3301 * thread doesn't have to wait for reads to complete, since everything it needs
3302 * (the indirect blocks) will be prefetched.
3304 * NB: callers *must* call dmu_recv_end() if this succeeds.
3307 dmu_recv_stream(dmu_recv_cookie_t
*drc
, offset_t
*voffp
)
3310 struct receive_writer_arg
*rwa
= kmem_zalloc(sizeof (*rwa
), KM_SLEEP
);
3312 if (dsl_dataset_has_resume_receive_state(drc
->drc_ds
)) {
3314 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
3315 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
3316 sizeof (bytes
), 1, &bytes
);
3317 drc
->drc_bytes_read
+= bytes
;
3320 drc
->drc_ignore_objlist
= objlist_create();
3322 /* these were verified in dmu_recv_begin */
3323 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
3325 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
3327 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
3328 ASSERT0(drc
->drc_os
->os_encrypted
&&
3329 (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
));
3331 /* handle DSL encryption key payload */
3332 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
3333 nvlist_t
*keynvl
= NULL
;
3335 ASSERT(drc
->drc_os
->os_encrypted
);
3336 ASSERT(drc
->drc_raw
);
3338 err
= nvlist_lookup_nvlist(drc
->drc_begin_nvl
, "crypt_keydata",
3343 if (!drc
->drc_heal
) {
3345 * If this is a new dataset we set the key immediately.
3346 * Otherwise we don't want to change the key until we
3347 * are sure the rest of the receive succeeded so we
3348 * stash the keynvl away until then.
3350 err
= dsl_crypto_recv_raw(spa_name(drc
->drc_os
->os_spa
),
3351 drc
->drc_ds
->ds_object
, drc
->drc_fromsnapobj
,
3352 drc
->drc_drrb
->drr_type
, keynvl
, drc
->drc_newfs
);
3357 /* see comment in dmu_recv_end_sync() */
3358 drc
->drc_ivset_guid
= 0;
3359 (void) nvlist_lookup_uint64(keynvl
, "to_ivset_guid",
3360 &drc
->drc_ivset_guid
);
3362 if (!drc
->drc_newfs
)
3363 drc
->drc_keynvl
= fnvlist_dup(keynvl
);
3366 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
3367 err
= resume_check(drc
, drc
->drc_begin_nvl
);
3373 * For compatibility with recursive send streams, we do this here,
3374 * rather than in dmu_recv_begin. If we pull the next header too
3375 * early, and it's the END record, we break the `recv_skip` logic.
3377 if (drc
->drc_drr_begin
->drr_payloadlen
== 0) {
3378 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
3384 * If we failed before this point we will clean up any new resume
3385 * state that was created. Now that we've gotten past the initial
3386 * checks we are ok to retain that resume state.
3388 drc
->drc_should_save
= B_TRUE
;
3390 (void) bqueue_init(&rwa
->q
, zfs_recv_queue_ff
,
3391 MAX(zfs_recv_queue_length
, 2 * zfs_max_recordsize
),
3392 offsetof(struct receive_record_arg
, node
));
3393 cv_init(&rwa
->cv
, NULL
, CV_DEFAULT
, NULL
);
3394 mutex_init(&rwa
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
3395 rwa
->os
= drc
->drc_os
;
3396 rwa
->byteswap
= drc
->drc_byteswap
;
3397 rwa
->heal
= drc
->drc_heal
;
3398 rwa
->tofs
= drc
->drc_tofs
;
3399 rwa
->resumable
= drc
->drc_resumable
;
3400 rwa
->raw
= drc
->drc_raw
;
3401 rwa
->spill
= drc
->drc_spill
;
3402 rwa
->full
= (drc
->drc_drr_begin
->drr_u
.drr_begin
.drr_fromguid
== 0);
3403 rwa
->os
->os_raw_receive
= drc
->drc_raw
;
3404 if (drc
->drc_heal
) {
3405 rwa
->heal_pio
= zio_root(drc
->drc_os
->os_spa
, NULL
, NULL
,
3406 ZIO_FLAG_GODFATHER
);
3408 list_create(&rwa
->write_batch
, sizeof (struct receive_record_arg
),
3409 offsetof(struct receive_record_arg
, node
.bqn_node
));
3411 (void) thread_create(NULL
, 0, receive_writer_thread
, rwa
, 0, curproc
,
3412 TS_RUN
, minclsyspri
);
3414 * We're reading rwa->err without locks, which is safe since we are the
3415 * only reader, and the worker thread is the only writer. It's ok if we
3416 * miss a write for an iteration or two of the loop, since the writer
3417 * thread will keep freeing records we send it until we send it an eos
3420 * We can leave this loop in 3 ways: First, if rwa->err is
3421 * non-zero. In that case, the writer thread will free the rrd we just
3422 * pushed. Second, if we're interrupted; in that case, either it's the
3423 * first loop and drc->drc_rrd was never allocated, or it's later, and
3424 * drc->drc_rrd has been handed off to the writer thread who will free
3425 * it. Finally, if receive_read_record fails or we're at the end of the
3426 * stream, then we free drc->drc_rrd and exit.
3428 while (rwa
->err
== 0) {
3430 err
= SET_ERROR(EINTR
);
3434 ASSERT3P(drc
->drc_rrd
, ==, NULL
);
3435 drc
->drc_rrd
= drc
->drc_next_rrd
;
3436 drc
->drc_next_rrd
= NULL
;
3437 /* Allocates and loads header into drc->drc_next_rrd */
3438 err
= receive_read_record(drc
);
3440 if (drc
->drc_rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
3441 kmem_free(drc
->drc_rrd
, sizeof (*drc
->drc_rrd
));
3442 drc
->drc_rrd
= NULL
;
3446 bqueue_enqueue(&rwa
->q
, drc
->drc_rrd
,
3447 sizeof (struct receive_record_arg
) +
3448 drc
->drc_rrd
->payload_size
);
3449 drc
->drc_rrd
= NULL
;
3452 ASSERT3P(drc
->drc_rrd
, ==, NULL
);
3453 drc
->drc_rrd
= kmem_zalloc(sizeof (*drc
->drc_rrd
), KM_SLEEP
);
3454 drc
->drc_rrd
->eos_marker
= B_TRUE
;
3455 bqueue_enqueue_flush(&rwa
->q
, drc
->drc_rrd
, 1);
3457 mutex_enter(&rwa
->mutex
);
3458 while (!rwa
->done
) {
3460 * We need to use cv_wait_sig() so that any process that may
3461 * be sleeping here can still fork.
3463 (void) cv_wait_sig(&rwa
->cv
, &rwa
->mutex
);
3465 mutex_exit(&rwa
->mutex
);
3468 * If we are receiving a full stream as a clone, all object IDs which
3469 * are greater than the maximum ID referenced in the stream are
3470 * by definition unused and must be freed.
3472 if (drc
->drc_clone
&& drc
->drc_drrb
->drr_fromguid
== 0) {
3473 uint64_t obj
= rwa
->max_object
+ 1;
3477 while (next_err
== 0) {
3478 free_err
= dmu_free_long_object(rwa
->os
, obj
);
3479 if (free_err
!= 0 && free_err
!= ENOENT
)
3482 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0);
3486 if (free_err
!= 0 && free_err
!= ENOENT
)
3488 else if (next_err
!= ESRCH
)
3493 cv_destroy(&rwa
->cv
);
3494 mutex_destroy(&rwa
->mutex
);
3495 bqueue_destroy(&rwa
->q
);
3496 list_destroy(&rwa
->write_batch
);
3502 * If we hit an error before we started the receive_writer_thread
3503 * we need to clean up the next_rrd we create by processing the
3506 if (drc
->drc_next_rrd
!= NULL
)
3507 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
3510 * The objset will be invalidated by dmu_recv_end() when we do
3511 * dsl_dataset_clone_swap_sync_impl().
3515 kmem_free(rwa
, sizeof (*rwa
));
3516 nvlist_free(drc
->drc_begin_nvl
);
3520 * Clean up references. If receive is not resumable,
3521 * destroy what we created, so we don't leave it in
3522 * the inconsistent state.
3524 dmu_recv_cleanup_ds(drc
);
3525 nvlist_free(drc
->drc_keynvl
);
3528 objlist_destroy(drc
->drc_ignore_objlist
);
3529 drc
->drc_ignore_objlist
= NULL
;
3530 *voffp
= drc
->drc_voff
;
3535 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
3537 dmu_recv_cookie_t
*drc
= arg
;
3538 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3541 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
3543 if (drc
->drc_heal
) {
3545 } else if (!drc
->drc_newfs
) {
3546 dsl_dataset_t
*origin_head
;
3548 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
3551 if (drc
->drc_force
) {
3553 * We will destroy any snapshots in tofs (i.e. before
3554 * origin_head) that are after the origin (which is
3555 * the snap before drc_ds, because drc_ds can not
3556 * have any snaps of its own).
3560 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3562 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3563 dsl_dataset_t
*snap
;
3564 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3568 if (snap
->ds_dir
!= origin_head
->ds_dir
)
3569 error
= SET_ERROR(EINVAL
);
3571 error
= dsl_destroy_snapshot_check_impl(
3574 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3575 dsl_dataset_rele(snap
, FTAG
);
3580 dsl_dataset_rele(origin_head
, FTAG
);
3584 if (drc
->drc_keynvl
!= NULL
) {
3585 error
= dsl_crypto_recv_raw_key_check(drc
->drc_ds
,
3586 drc
->drc_keynvl
, tx
);
3588 dsl_dataset_rele(origin_head
, FTAG
);
3593 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
3594 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
3596 dsl_dataset_rele(origin_head
, FTAG
);
3599 error
= dsl_dataset_snapshot_check_impl(origin_head
,
3600 drc
->drc_tosnap
, tx
, B_TRUE
, 1,
3601 drc
->drc_cred
, drc
->drc_proc
);
3602 dsl_dataset_rele(origin_head
, FTAG
);
3606 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
3608 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
3609 drc
->drc_tosnap
, tx
, B_TRUE
, 1,
3610 drc
->drc_cred
, drc
->drc_proc
);
3616 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
3618 dmu_recv_cookie_t
*drc
= arg
;
3619 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3620 boolean_t encrypted
= drc
->drc_ds
->ds_dir
->dd_crypto_obj
!= 0;
3621 uint64_t newsnapobj
= 0;
3623 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
3624 tx
, "snap=%s", drc
->drc_tosnap
);
3625 drc
->drc_ds
->ds_objset
->os_raw_receive
= B_FALSE
;
3627 if (drc
->drc_heal
) {
3628 if (drc
->drc_keynvl
!= NULL
) {
3629 nvlist_free(drc
->drc_keynvl
);
3630 drc
->drc_keynvl
= NULL
;
3632 } else if (!drc
->drc_newfs
) {
3633 dsl_dataset_t
*origin_head
;
3635 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
3638 if (drc
->drc_force
) {
3640 * Destroy any snapshots of drc_tofs (origin_head)
3641 * after the origin (the snap before drc_ds).
3645 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3647 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3648 dsl_dataset_t
*snap
;
3649 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3651 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
3652 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3653 dsl_destroy_snapshot_sync_impl(snap
,
3655 dsl_dataset_rele(snap
, FTAG
);
3658 if (drc
->drc_keynvl
!= NULL
) {
3659 dsl_crypto_recv_raw_key_sync(drc
->drc_ds
,
3660 drc
->drc_keynvl
, tx
);
3661 nvlist_free(drc
->drc_keynvl
);
3662 drc
->drc_keynvl
= NULL
;
3665 VERIFY3P(drc
->drc_ds
->ds_prev
, ==,
3666 origin_head
->ds_prev
);
3668 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
3671 * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
3672 * so drc_os is no longer valid.
3676 dsl_dataset_snapshot_sync_impl(origin_head
,
3677 drc
->drc_tosnap
, tx
);
3679 /* set snapshot's creation time and guid */
3680 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
3681 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
3682 drc
->drc_drrb
->drr_creation_time
;
3683 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
3684 drc
->drc_drrb
->drr_toguid
;
3685 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
3686 ~DS_FLAG_INCONSISTENT
;
3688 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
3689 dsl_dataset_phys(origin_head
)->ds_flags
&=
3690 ~DS_FLAG_INCONSISTENT
;
3693 dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3695 dsl_dataset_rele(origin_head
, FTAG
);
3696 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
3698 if (drc
->drc_owner
!= NULL
)
3699 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
3701 dsl_dataset_t
*ds
= drc
->drc_ds
;
3703 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
3705 /* set snapshot's creation time and guid */
3706 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
3707 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
3708 drc
->drc_drrb
->drr_creation_time
;
3709 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
3710 drc
->drc_drrb
->drr_toguid
;
3711 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
3712 ~DS_FLAG_INCONSISTENT
;
3714 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3715 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
3716 if (dsl_dataset_has_resume_receive_state(ds
)) {
3717 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3718 DS_FIELD_RESUME_FROMGUID
, tx
);
3719 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3720 DS_FIELD_RESUME_OBJECT
, tx
);
3721 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3722 DS_FIELD_RESUME_OFFSET
, tx
);
3723 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3724 DS_FIELD_RESUME_BYTES
, tx
);
3725 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3726 DS_FIELD_RESUME_TOGUID
, tx
);
3727 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3728 DS_FIELD_RESUME_TONAME
, tx
);
3729 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3730 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
, tx
);
3733 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
3737 * If this is a raw receive, the crypt_keydata nvlist will include
3738 * a to_ivset_guid for us to set on the new snapshot. This value
3739 * will override the value generated by the snapshot code. However,
3740 * this value may not be present, because older implementations of
3741 * the raw send code did not include this value, and we are still
3742 * allowed to receive them if the zfs_disable_ivset_guid_check
3743 * tunable is set, in which case we will leave the newly-generated
3746 if (!drc
->drc_heal
&& drc
->drc_raw
&& drc
->drc_ivset_guid
!= 0) {
3747 dmu_object_zapify(dp
->dp_meta_objset
, newsnapobj
,
3748 DMU_OT_DSL_DATASET
, tx
);
3749 VERIFY0(zap_update(dp
->dp_meta_objset
, newsnapobj
,
3750 DS_FIELD_IVSET_GUID
, sizeof (uint64_t), 1,
3751 &drc
->drc_ivset_guid
, tx
));
3755 * Release the hold from dmu_recv_begin. This must be done before
3756 * we return to open context, so that when we free the dataset's dnode
3757 * we can evict its bonus buffer. Since the dataset may be destroyed
3758 * at this point (and therefore won't have a valid pointer to the spa)
3759 * we release the key mapping manually here while we do have a valid
3760 * pointer, if it exists.
3762 if (!drc
->drc_raw
&& encrypted
) {
3763 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx
)->dp_spa
,
3764 drc
->drc_ds
->ds_object
, drc
->drc_ds
);
3766 dsl_dataset_disown(drc
->drc_ds
, 0, dmu_recv_tag
);
3770 static int dmu_recv_end_modified_blocks
= 3;
3773 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
3777 * We will be destroying the ds; make sure its origin is unmounted if
3780 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3781 dsl_dataset_name(drc
->drc_ds
, name
);
3782 zfs_destroy_unmount_origin(name
);
3785 return (dsl_sync_task(drc
->drc_tofs
,
3786 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3787 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3791 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
3793 return (dsl_sync_task(drc
->drc_tofs
,
3794 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3795 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3799 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
3803 drc
->drc_owner
= owner
;
3806 error
= dmu_recv_new_end(drc
);
3808 error
= dmu_recv_existing_end(drc
);
3811 dmu_recv_cleanup_ds(drc
);
3812 nvlist_free(drc
->drc_keynvl
);
3813 } else if (!drc
->drc_heal
) {
3814 if (drc
->drc_newfs
) {
3815 zvol_create_minor(drc
->drc_tofs
);
3817 char *snapname
= kmem_asprintf("%s@%s",
3818 drc
->drc_tofs
, drc
->drc_tosnap
);
3819 zvol_create_minor(snapname
);
3820 kmem_strfree(snapname
);
3826 * Return TRUE if this objset is currently being received into.
3829 dmu_objset_is_receiving(objset_t
*os
)
3831 return (os
->os_dsl_dataset
!= NULL
&&
3832 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);
3835 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, queue_length
, UINT
, ZMOD_RW
,
3836 "Maximum receive queue length");
3838 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, queue_ff
, UINT
, ZMOD_RW
,
3839 "Receive queue fill fraction");
3841 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, write_batch_size
, UINT
, ZMOD_RW
,
3842 "Maximum amount of writes to batch into one transaction");
3844 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, best_effort_corrective
, INT
, ZMOD_RW
,
3845 "Ignore errors during corrective receive");