Add missing zfs_refcount_destroy() in key_mapping_rele()
[zfs.git] / module / zfs / dmu_recv.c
blob2324e8e87ba29b678da28ebab34e2c5768b1572f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
32 #include <sys/dmu.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dbuf.h>
36 #include <sys/dnode.h>
37 #include <sys/zfs_context.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/spa_impl.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/zap.h>
48 #include <sys/zio_checksum.h>
49 #include <sys/zfs_znode.h>
50 #include <zfs_fletcher.h>
51 #include <sys/avl.h>
52 #include <sys/ddt.h>
53 #include <sys/zfs_onexit.h>
54 #include <sys/dmu_recv.h>
55 #include <sys/dsl_destroy.h>
56 #include <sys/blkptr.h>
57 #include <sys/dsl_bookmark.h>
58 #include <sys/zfeature.h>
59 #include <sys/bqueue.h>
60 #include <sys/zvol.h>
61 #include <sys/policy.h>
63 int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
65 static char *dmu_recv_tag = "dmu_recv_tag";
66 const char *recv_clone_name = "%recv";
68 static void byteswap_record(dmu_replay_record_t *drr);
70 typedef struct dmu_recv_begin_arg {
71 const char *drba_origin;
72 dmu_recv_cookie_t *drba_cookie;
73 cred_t *drba_cred;
74 dsl_crypto_params_t *drba_dcp;
75 } dmu_recv_begin_arg_t;
77 static int
78 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
79 uint64_t fromguid, uint64_t featureflags)
81 uint64_t val;
82 uint64_t children;
83 int error;
84 dsl_pool_t *dp = ds->ds_dir->dd_pool;
85 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
86 boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
87 boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
89 /* Temporary clone name must not exist. */
90 error = zap_lookup(dp->dp_meta_objset,
91 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
92 8, 1, &val);
93 if (error != ENOENT)
94 return (error == 0 ? EBUSY : error);
96 /* Resume state must not be set. */
97 if (dsl_dataset_has_resume_receive_state(ds))
98 return (SET_ERROR(EBUSY));
100 /* New snapshot name must not exist. */
101 error = zap_lookup(dp->dp_meta_objset,
102 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
103 drba->drba_cookie->drc_tosnap, 8, 1, &val);
104 if (error != ENOENT)
105 return (error == 0 ? EEXIST : error);
107 /* Must not have children if receiving a ZVOL. */
108 error = zap_count(dp->dp_meta_objset,
109 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
110 if (error != 0)
111 return (error);
112 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
113 children > 0)
114 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
117 * Check snapshot limit before receiving. We'll recheck again at the
118 * end, but might as well abort before receiving if we're already over
119 * the limit.
121 * Note that we do not check the file system limit with
122 * dsl_dir_fscount_check because the temporary %clones don't count
123 * against that limit.
125 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
126 NULL, drba->drba_cred);
127 if (error != 0)
128 return (error);
130 if (fromguid != 0) {
131 dsl_dataset_t *snap;
132 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
134 /* Can't raw receive on top of an unencrypted dataset */
135 if (!encrypted && raw)
136 return (SET_ERROR(EINVAL));
138 /* Encryption is incompatible with embedded data */
139 if (encrypted && embed)
140 return (SET_ERROR(EINVAL));
142 /* Find snapshot in this dir that matches fromguid. */
143 while (obj != 0) {
144 error = dsl_dataset_hold_obj(dp, obj, FTAG,
145 &snap);
146 if (error != 0)
147 return (SET_ERROR(ENODEV));
148 if (snap->ds_dir != ds->ds_dir) {
149 dsl_dataset_rele(snap, FTAG);
150 return (SET_ERROR(ENODEV));
152 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
153 break;
154 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
155 dsl_dataset_rele(snap, FTAG);
157 if (obj == 0)
158 return (SET_ERROR(ENODEV));
160 if (drba->drba_cookie->drc_force) {
161 drba->drba_cookie->drc_fromsnapobj = obj;
162 } else {
164 * If we are not forcing, there must be no
165 * changes since fromsnap. Raw sends have an
166 * additional constraint that requires that
167 * no "noop" snapshots exist between fromsnap
168 * and tosnap for the IVset checking code to
169 * work properly.
171 if (dsl_dataset_modified_since_snap(ds, snap) ||
172 (raw &&
173 dsl_dataset_phys(ds)->ds_prev_snap_obj !=
174 snap->ds_object)) {
175 dsl_dataset_rele(snap, FTAG);
176 return (SET_ERROR(ETXTBSY));
178 drba->drba_cookie->drc_fromsnapobj =
179 ds->ds_prev->ds_object;
182 dsl_dataset_rele(snap, FTAG);
183 } else {
184 /* if full, then must be forced */
185 if (!drba->drba_cookie->drc_force)
186 return (SET_ERROR(EEXIST));
189 * We don't support using zfs recv -F to blow away
190 * encrypted filesystems. This would require the
191 * dsl dir to point to the old encryption key and
192 * the new one at the same time during the receive.
194 if ((!encrypted && raw) || encrypted)
195 return (SET_ERROR(EINVAL));
198 * Perform the same encryption checks we would if
199 * we were creating a new dataset from scratch.
201 if (!raw) {
202 boolean_t will_encrypt;
204 error = dmu_objset_create_crypt_check(
205 ds->ds_dir->dd_parent, drba->drba_dcp,
206 &will_encrypt);
207 if (error != 0)
208 return (error);
210 if (will_encrypt && embed)
211 return (SET_ERROR(EINVAL));
214 drba->drba_cookie->drc_fromsnapobj = 0;
217 return (0);
221 static int
222 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
224 dmu_recv_begin_arg_t *drba = arg;
225 dsl_pool_t *dp = dmu_tx_pool(tx);
226 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
227 uint64_t fromguid = drrb->drr_fromguid;
228 int flags = drrb->drr_flags;
229 ds_hold_flags_t dsflags = 0;
230 int error;
231 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
232 dsl_dataset_t *ds;
233 const char *tofs = drba->drba_cookie->drc_tofs;
235 /* already checked */
236 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
237 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
239 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
240 DMU_COMPOUNDSTREAM ||
241 drrb->drr_type >= DMU_OST_NUMTYPES ||
242 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
243 return (SET_ERROR(EINVAL));
245 /* Verify pool version supports SA if SA_SPILL feature set */
246 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
247 spa_version(dp->dp_spa) < SPA_VERSION_SA)
248 return (SET_ERROR(ENOTSUP));
250 if (drba->drba_cookie->drc_resumable &&
251 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
252 return (SET_ERROR(ENOTSUP));
255 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
256 * record to a plain WRITE record, so the pool must have the
257 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
258 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
260 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
261 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
262 return (SET_ERROR(ENOTSUP));
263 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
264 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
265 return (SET_ERROR(ENOTSUP));
268 * The receiving code doesn't know how to translate large blocks
269 * to smaller ones, so the pool must have the LARGE_BLOCKS
270 * feature enabled if the stream has LARGE_BLOCKS. Same with
271 * large dnodes.
273 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
274 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
275 return (SET_ERROR(ENOTSUP));
276 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
277 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
278 return (SET_ERROR(ENOTSUP));
280 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
281 /* raw receives require the encryption feature */
282 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
283 return (SET_ERROR(ENOTSUP));
285 /* embedded data is incompatible with encryption and raw recv */
286 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
287 return (SET_ERROR(EINVAL));
289 /* raw receives require spill block allocation flag */
290 if (!(flags & DRR_FLAG_SPILL_BLOCK))
291 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
292 } else {
293 dsflags |= DS_HOLD_FLAG_DECRYPT;
296 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
297 if (error == 0) {
298 /* target fs already exists; recv into temp clone */
300 /* Can't recv a clone into an existing fs */
301 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
302 dsl_dataset_rele_flags(ds, dsflags, FTAG);
303 return (SET_ERROR(EINVAL));
306 error = recv_begin_check_existing_impl(drba, ds, fromguid,
307 featureflags);
308 dsl_dataset_rele_flags(ds, dsflags, FTAG);
309 } else if (error == ENOENT) {
310 /* target fs does not exist; must be a full backup or clone */
311 char buf[ZFS_MAX_DATASET_NAME_LEN];
312 objset_t *os;
315 * If it's a non-clone incremental, we are missing the
316 * target fs, so fail the recv.
318 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
319 drba->drba_origin))
320 return (SET_ERROR(ENOENT));
323 * If we're receiving a full send as a clone, and it doesn't
324 * contain all the necessary free records and freeobject
325 * records, reject it.
327 if (fromguid == 0 && drba->drba_origin &&
328 !(flags & DRR_FLAG_FREERECORDS))
329 return (SET_ERROR(EINVAL));
331 /* Open the parent of tofs */
332 ASSERT3U(strlen(tofs), <, sizeof (buf));
333 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
334 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
335 if (error != 0)
336 return (error);
338 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
339 drba->drba_origin == NULL) {
340 boolean_t will_encrypt;
343 * Check that we aren't breaking any encryption rules
344 * and that we have all the parameters we need to
345 * create an encrypted dataset if necessary. If we are
346 * making an encrypted dataset the stream can't have
347 * embedded data.
349 error = dmu_objset_create_crypt_check(ds->ds_dir,
350 drba->drba_dcp, &will_encrypt);
351 if (error != 0) {
352 dsl_dataset_rele(ds, FTAG);
353 return (error);
356 if (will_encrypt &&
357 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
358 dsl_dataset_rele(ds, FTAG);
359 return (SET_ERROR(EINVAL));
364 * Check filesystem and snapshot limits before receiving. We'll
365 * recheck snapshot limits again at the end (we create the
366 * filesystems and increment those counts during begin_sync).
368 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
369 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
370 if (error != 0) {
371 dsl_dataset_rele(ds, FTAG);
372 return (error);
375 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
376 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
377 if (error != 0) {
378 dsl_dataset_rele(ds, FTAG);
379 return (error);
382 /* can't recv below anything but filesystems (eg. no ZVOLs) */
383 error = dmu_objset_from_ds(ds, &os);
384 if (error != 0) {
385 dsl_dataset_rele(ds, FTAG);
386 return (error);
388 if (dmu_objset_type(os) != DMU_OST_ZFS) {
389 dsl_dataset_rele(ds, FTAG);
390 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
393 if (drba->drba_origin != NULL) {
394 dsl_dataset_t *origin;
396 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
397 dsflags, FTAG, &origin);
398 if (error != 0) {
399 dsl_dataset_rele(ds, FTAG);
400 return (error);
402 if (!origin->ds_is_snapshot) {
403 dsl_dataset_rele_flags(origin, dsflags, FTAG);
404 dsl_dataset_rele(ds, FTAG);
405 return (SET_ERROR(EINVAL));
407 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
408 fromguid != 0) {
409 dsl_dataset_rele_flags(origin, dsflags, FTAG);
410 dsl_dataset_rele(ds, FTAG);
411 return (SET_ERROR(ENODEV));
413 if (origin->ds_dir->dd_crypto_obj != 0 &&
414 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
415 dsl_dataset_rele_flags(origin, dsflags, FTAG);
416 dsl_dataset_rele(ds, FTAG);
417 return (SET_ERROR(EINVAL));
419 dsl_dataset_rele_flags(origin,
420 dsflags, FTAG);
423 dsl_dataset_rele(ds, FTAG);
424 error = 0;
426 return (error);
429 static void
430 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
432 dmu_recv_begin_arg_t *drba = arg;
433 dsl_pool_t *dp = dmu_tx_pool(tx);
434 objset_t *mos = dp->dp_meta_objset;
435 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
436 const char *tofs = drba->drba_cookie->drc_tofs;
437 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
438 dsl_dataset_t *ds, *newds;
439 objset_t *os;
440 uint64_t dsobj;
441 ds_hold_flags_t dsflags = 0;
442 int error;
443 uint64_t crflags = 0;
444 dsl_crypto_params_t dummy_dcp = { 0 };
445 dsl_crypto_params_t *dcp = drba->drba_dcp;
447 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
448 crflags |= DS_FLAG_CI_DATASET;
450 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
451 dsflags |= DS_HOLD_FLAG_DECRYPT;
454 * Raw, non-incremental recvs always use a dummy dcp with
455 * the raw cmd set. Raw incremental recvs do not use a dcp
456 * since the encryption parameters are already set in stone.
458 if (dcp == NULL && drba->drba_cookie->drc_fromsnapobj == 0 &&
459 drba->drba_origin == NULL) {
460 ASSERT3P(dcp, ==, NULL);
461 dcp = &dummy_dcp;
463 if (featureflags & DMU_BACKUP_FEATURE_RAW)
464 dcp->cp_cmd = DCP_CMD_RAW_RECV;
467 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
468 if (error == 0) {
469 /* create temporary clone */
470 dsl_dataset_t *snap = NULL;
472 if (drba->drba_cookie->drc_fromsnapobj != 0) {
473 VERIFY0(dsl_dataset_hold_obj(dp,
474 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
475 ASSERT3P(dcp, ==, NULL);
478 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
479 snap, crflags, drba->drba_cred, dcp, tx);
480 if (drba->drba_cookie->drc_fromsnapobj != 0)
481 dsl_dataset_rele(snap, FTAG);
482 dsl_dataset_rele_flags(ds, dsflags, FTAG);
483 } else {
484 dsl_dir_t *dd;
485 const char *tail;
486 dsl_dataset_t *origin = NULL;
488 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
490 if (drba->drba_origin != NULL) {
491 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
492 FTAG, &origin));
493 ASSERT3P(dcp, ==, NULL);
496 /* Create new dataset. */
497 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
498 origin, crflags, drba->drba_cred, dcp, tx);
499 if (origin != NULL)
500 dsl_dataset_rele(origin, FTAG);
501 dsl_dir_rele(dd, FTAG);
502 drba->drba_cookie->drc_newfs = B_TRUE;
505 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &newds));
506 VERIFY0(dmu_objset_from_ds(newds, &os));
508 if (drba->drba_cookie->drc_resumable) {
509 dsl_dataset_zapify(newds, tx);
510 if (drrb->drr_fromguid != 0) {
511 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
512 8, 1, &drrb->drr_fromguid, tx));
514 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
515 8, 1, &drrb->drr_toguid, tx));
516 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
517 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
518 uint64_t one = 1;
519 uint64_t zero = 0;
520 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
521 8, 1, &one, tx));
522 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
523 8, 1, &zero, tx));
524 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
525 8, 1, &zero, tx));
526 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
527 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
528 8, 1, &one, tx));
530 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
531 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
532 8, 1, &one, tx));
534 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
535 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
536 8, 1, &one, tx));
538 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
539 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
540 8, 1, &one, tx));
545 * Usually the os->os_encrypted value is tied to the presence of a
546 * DSL Crypto Key object in the dd. However, that will not be received
547 * until dmu_recv_stream(), so we set the value manually for now.
549 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
550 os->os_encrypted = B_TRUE;
551 drba->drba_cookie->drc_raw = B_TRUE;
554 dmu_buf_will_dirty(newds->ds_dbuf, tx);
555 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
558 * If we actually created a non-clone, we need to create the objset
559 * in our new dataset. If this is a raw send we postpone this until
560 * dmu_recv_stream() so that we can allocate the metadnode with the
561 * properties from the DRR_BEGIN payload.
563 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
564 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
565 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
566 (void) dmu_objset_create_impl(dp->dp_spa,
567 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
569 rrw_exit(&newds->ds_bp_rwlock, FTAG);
571 drba->drba_cookie->drc_ds = newds;
573 spa_history_log_internal_ds(newds, "receive", tx, "");
576 static int
577 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
579 dmu_recv_begin_arg_t *drba = arg;
580 dsl_pool_t *dp = dmu_tx_pool(tx);
581 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
582 int error;
583 ds_hold_flags_t dsflags = 0;
584 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
585 dsl_dataset_t *ds;
586 const char *tofs = drba->drba_cookie->drc_tofs;
588 /* already checked */
589 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
590 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
592 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
593 DMU_COMPOUNDSTREAM ||
594 drrb->drr_type >= DMU_OST_NUMTYPES)
595 return (SET_ERROR(EINVAL));
597 /* Verify pool version supports SA if SA_SPILL feature set */
598 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
599 spa_version(dp->dp_spa) < SPA_VERSION_SA)
600 return (SET_ERROR(ENOTSUP));
603 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
604 * record to a plain WRITE record, so the pool must have the
605 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
606 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
608 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
609 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
610 return (SET_ERROR(ENOTSUP));
611 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
612 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
613 return (SET_ERROR(ENOTSUP));
616 * The receiving code doesn't know how to translate large blocks
617 * to smaller ones, so the pool must have the LARGE_BLOCKS
618 * feature enabled if the stream has LARGE_BLOCKS. Same with
619 * large dnodes.
621 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
622 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
623 return (SET_ERROR(ENOTSUP));
624 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
625 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
626 return (SET_ERROR(ENOTSUP));
628 /* 6 extra bytes for /%recv */
629 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
630 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
631 tofs, recv_clone_name);
633 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
634 /* raw receives require spill block allocation flag */
635 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
636 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
637 } else {
638 dsflags |= DS_HOLD_FLAG_DECRYPT;
641 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
642 /* %recv does not exist; continue in tofs */
643 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
644 if (error != 0)
645 return (error);
648 /* check that ds is marked inconsistent */
649 if (!DS_IS_INCONSISTENT(ds)) {
650 dsl_dataset_rele_flags(ds, dsflags, FTAG);
651 return (SET_ERROR(EINVAL));
654 /* check that there is resuming data, and that the toguid matches */
655 if (!dsl_dataset_is_zapified(ds)) {
656 dsl_dataset_rele_flags(ds, dsflags, FTAG);
657 return (SET_ERROR(EINVAL));
659 uint64_t val;
660 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
661 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
662 if (error != 0 || drrb->drr_toguid != val) {
663 dsl_dataset_rele_flags(ds, dsflags, FTAG);
664 return (SET_ERROR(EINVAL));
668 * Check if the receive is still running. If so, it will be owned.
669 * Note that nothing else can own the dataset (e.g. after the receive
670 * fails) because it will be marked inconsistent.
672 if (dsl_dataset_has_owner(ds)) {
673 dsl_dataset_rele_flags(ds, dsflags, FTAG);
674 return (SET_ERROR(EBUSY));
677 /* There should not be any snapshots of this fs yet. */
678 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
679 dsl_dataset_rele_flags(ds, dsflags, FTAG);
680 return (SET_ERROR(EINVAL));
684 * Note: resume point will be checked when we process the first WRITE
685 * record.
688 /* check that the origin matches */
689 val = 0;
690 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
691 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
692 if (drrb->drr_fromguid != val) {
693 dsl_dataset_rele_flags(ds, dsflags, FTAG);
694 return (SET_ERROR(EINVAL));
697 dsl_dataset_rele_flags(ds, dsflags, FTAG);
698 return (0);
701 static void
702 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
704 dmu_recv_begin_arg_t *drba = arg;
705 dsl_pool_t *dp = dmu_tx_pool(tx);
706 const char *tofs = drba->drba_cookie->drc_tofs;
707 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
708 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
709 dsl_dataset_t *ds;
710 objset_t *os;
711 ds_hold_flags_t dsflags = 0;
712 uint64_t dsobj;
713 /* 6 extra bytes for /%recv */
714 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
716 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
717 tofs, recv_clone_name);
719 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
720 drba->drba_cookie->drc_raw = B_TRUE;
721 } else {
722 dsflags |= DS_HOLD_FLAG_DECRYPT;
725 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
726 /* %recv does not exist; continue in tofs */
727 VERIFY0(dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds));
728 drba->drba_cookie->drc_newfs = B_TRUE;
731 /* clear the inconsistent flag so that we can own it */
732 ASSERT(DS_IS_INCONSISTENT(ds));
733 dmu_buf_will_dirty(ds->ds_dbuf, tx);
734 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
735 dsobj = ds->ds_object;
736 dsl_dataset_rele_flags(ds, dsflags, FTAG);
738 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &ds));
739 VERIFY0(dmu_objset_from_ds(ds, &os));
741 dmu_buf_will_dirty(ds->ds_dbuf, tx);
742 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
744 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
745 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
746 drba->drba_cookie->drc_raw);
747 rrw_exit(&ds->ds_bp_rwlock, FTAG);
749 drba->drba_cookie->drc_ds = ds;
751 spa_history_log_internal_ds(ds, "resume receive", tx, "");
755 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
756 * succeeds; otherwise we will leak the holds on the datasets.
759 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
760 boolean_t force, boolean_t resumable, nvlist_t *localprops,
761 nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc)
763 dmu_recv_begin_arg_t drba = { 0 };
765 bzero(drc, sizeof (dmu_recv_cookie_t));
766 drc->drc_drr_begin = drr_begin;
767 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
768 drc->drc_tosnap = tosnap;
769 drc->drc_tofs = tofs;
770 drc->drc_force = force;
771 drc->drc_resumable = resumable;
772 drc->drc_cred = CRED();
773 drc->drc_clone = (origin != NULL);
775 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
776 drc->drc_byteswap = B_TRUE;
777 (void) fletcher_4_incremental_byteswap(drr_begin,
778 sizeof (dmu_replay_record_t), &drc->drc_cksum);
779 byteswap_record(drr_begin);
780 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
781 (void) fletcher_4_incremental_native(drr_begin,
782 sizeof (dmu_replay_record_t), &drc->drc_cksum);
783 } else {
784 return (SET_ERROR(EINVAL));
787 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
788 drc->drc_spill = B_TRUE;
790 drba.drba_origin = origin;
791 drba.drba_cookie = drc;
792 drba.drba_cred = CRED();
794 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
795 DMU_BACKUP_FEATURE_RESUMING) {
796 return (dsl_sync_task(tofs,
797 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
798 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
799 } else {
800 int err;
803 * For non-raw, non-incremental, non-resuming receives the
804 * user can specify encryption parameters on the command line
805 * with "zfs recv -o". For these receives we create a dcp and
806 * pass it to the sync task. Creating the dcp will implicitly
807 * remove the encryption params from the localprops nvlist,
808 * which avoids errors when trying to set these normally
809 * read-only properties. Any other kind of receive that
810 * attempts to set these properties will fail as a result.
812 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
813 DMU_BACKUP_FEATURE_RAW) == 0 &&
814 origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
815 err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
816 localprops, hidden_args, &drba.drba_dcp);
817 if (err != 0)
818 return (err);
821 err = dsl_sync_task(tofs,
822 dmu_recv_begin_check, dmu_recv_begin_sync,
823 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
824 dsl_crypto_params_free(drba.drba_dcp, !!err);
826 return (err);
830 struct receive_record_arg {
831 dmu_replay_record_t header;
832 void *payload; /* Pointer to a buffer containing the payload */
834 * If the record is a write, pointer to the arc_buf_t containing the
835 * payload.
837 arc_buf_t *arc_buf;
838 int payload_size;
839 uint64_t bytes_read; /* bytes read from stream when record created */
840 boolean_t eos_marker; /* Marks the end of the stream */
841 bqueue_node_t node;
844 struct receive_writer_arg {
845 objset_t *os;
846 boolean_t byteswap;
847 bqueue_t q;
850 * These three args are used to signal to the main thread that we're
851 * done.
853 kmutex_t mutex;
854 kcondvar_t cv;
855 boolean_t done;
857 int err;
858 /* A map from guid to dataset to help handle dedup'd streams. */
859 avl_tree_t *guid_to_ds_map;
860 boolean_t resumable;
861 boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
862 boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
863 uint64_t last_object;
864 uint64_t last_offset;
865 uint64_t max_object; /* highest object ID referenced in stream */
866 uint64_t bytes_read; /* bytes read when current record created */
868 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
869 boolean_t or_crypt_params_present;
870 uint64_t or_firstobj;
871 uint64_t or_numslots;
872 uint8_t or_salt[ZIO_DATA_SALT_LEN];
873 uint8_t or_iv[ZIO_DATA_IV_LEN];
874 uint8_t or_mac[ZIO_DATA_MAC_LEN];
875 boolean_t or_byteorder;
878 struct objlist {
879 list_t list; /* List of struct receive_objnode. */
881 * Last object looked up. Used to assert that objects are being looked
882 * up in ascending order.
884 uint64_t last_lookup;
887 struct receive_objnode {
888 list_node_t node;
889 uint64_t object;
892 struct receive_arg {
893 objset_t *os;
894 vnode_t *vp; /* The vnode to read the stream from */
895 uint64_t voff; /* The current offset in the stream */
896 uint64_t bytes_read;
898 * A record that has had its payload read in, but hasn't yet been handed
899 * off to the worker thread.
901 struct receive_record_arg *rrd;
902 /* A record that has had its header read in, but not its payload. */
903 struct receive_record_arg *next_rrd;
904 zio_cksum_t cksum;
905 zio_cksum_t prev_cksum;
906 int err;
907 boolean_t byteswap;
908 boolean_t raw;
909 uint64_t featureflags;
910 /* Sorted list of objects not to issue prefetches for. */
911 struct objlist ignore_objlist;
914 typedef struct guid_map_entry {
915 uint64_t guid;
916 boolean_t raw;
917 dsl_dataset_t *gme_ds;
918 avl_node_t avlnode;
919 } guid_map_entry_t;
921 static int
922 guid_compare(const void *arg1, const void *arg2)
924 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
925 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
927 return (AVL_CMP(gmep1->guid, gmep2->guid));
930 static void
931 free_guid_map_onexit(void *arg)
933 avl_tree_t *ca = arg;
934 void *cookie = NULL;
935 guid_map_entry_t *gmep;
937 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
938 ds_hold_flags_t dsflags = DS_HOLD_FLAG_DECRYPT;
940 if (gmep->raw) {
941 gmep->gme_ds->ds_objset->os_raw_receive = B_FALSE;
942 dsflags &= ~DS_HOLD_FLAG_DECRYPT;
945 dsl_dataset_disown(gmep->gme_ds, dsflags, gmep);
946 kmem_free(gmep, sizeof (guid_map_entry_t));
948 avl_destroy(ca);
949 kmem_free(ca, sizeof (avl_tree_t));
952 static int
953 receive_read(struct receive_arg *ra, int len, void *buf)
955 int done = 0;
958 * The code doesn't rely on this (lengths being multiples of 8). See
959 * comment in dump_bytes.
961 ASSERT(len % 8 == 0 ||
962 (ra->featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
964 while (done < len) {
965 ssize_t resid;
967 ra->err = vn_rdwr(UIO_READ, ra->vp,
968 (char *)buf + done, len - done,
969 ra->voff, UIO_SYSSPACE, FAPPEND,
970 RLIM64_INFINITY, CRED(), &resid);
972 if (resid == len - done) {
974 * Note: ECKSUM indicates that the receive
975 * was interrupted and can potentially be resumed.
977 ra->err = SET_ERROR(ECKSUM);
979 ra->voff += len - done - resid;
980 done = len - resid;
981 if (ra->err != 0)
982 return (ra->err);
985 ra->bytes_read += len;
987 ASSERT3U(done, ==, len);
988 return (0);
991 noinline static void
992 byteswap_record(dmu_replay_record_t *drr)
994 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
995 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
996 drr->drr_type = BSWAP_32(drr->drr_type);
997 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
999 switch (drr->drr_type) {
1000 case DRR_BEGIN:
1001 DO64(drr_begin.drr_magic);
1002 DO64(drr_begin.drr_versioninfo);
1003 DO64(drr_begin.drr_creation_time);
1004 DO32(drr_begin.drr_type);
1005 DO32(drr_begin.drr_flags);
1006 DO64(drr_begin.drr_toguid);
1007 DO64(drr_begin.drr_fromguid);
1008 break;
1009 case DRR_OBJECT:
1010 DO64(drr_object.drr_object);
1011 DO32(drr_object.drr_type);
1012 DO32(drr_object.drr_bonustype);
1013 DO32(drr_object.drr_blksz);
1014 DO32(drr_object.drr_bonuslen);
1015 DO32(drr_object.drr_raw_bonuslen);
1016 DO64(drr_object.drr_toguid);
1017 DO64(drr_object.drr_maxblkid);
1018 break;
1019 case DRR_FREEOBJECTS:
1020 DO64(drr_freeobjects.drr_firstobj);
1021 DO64(drr_freeobjects.drr_numobjs);
1022 DO64(drr_freeobjects.drr_toguid);
1023 break;
1024 case DRR_WRITE:
1025 DO64(drr_write.drr_object);
1026 DO32(drr_write.drr_type);
1027 DO64(drr_write.drr_offset);
1028 DO64(drr_write.drr_logical_size);
1029 DO64(drr_write.drr_toguid);
1030 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1031 DO64(drr_write.drr_key.ddk_prop);
1032 DO64(drr_write.drr_compressed_size);
1033 break;
1034 case DRR_WRITE_BYREF:
1035 DO64(drr_write_byref.drr_object);
1036 DO64(drr_write_byref.drr_offset);
1037 DO64(drr_write_byref.drr_length);
1038 DO64(drr_write_byref.drr_toguid);
1039 DO64(drr_write_byref.drr_refguid);
1040 DO64(drr_write_byref.drr_refobject);
1041 DO64(drr_write_byref.drr_refoffset);
1042 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1043 drr_key.ddk_cksum);
1044 DO64(drr_write_byref.drr_key.ddk_prop);
1045 break;
1046 case DRR_WRITE_EMBEDDED:
1047 DO64(drr_write_embedded.drr_object);
1048 DO64(drr_write_embedded.drr_offset);
1049 DO64(drr_write_embedded.drr_length);
1050 DO64(drr_write_embedded.drr_toguid);
1051 DO32(drr_write_embedded.drr_lsize);
1052 DO32(drr_write_embedded.drr_psize);
1053 break;
1054 case DRR_FREE:
1055 DO64(drr_free.drr_object);
1056 DO64(drr_free.drr_offset);
1057 DO64(drr_free.drr_length);
1058 DO64(drr_free.drr_toguid);
1059 break;
1060 case DRR_SPILL:
1061 DO64(drr_spill.drr_object);
1062 DO64(drr_spill.drr_length);
1063 DO64(drr_spill.drr_toguid);
1064 DO64(drr_spill.drr_compressed_size);
1065 DO32(drr_spill.drr_type);
1066 break;
1067 case DRR_OBJECT_RANGE:
1068 DO64(drr_object_range.drr_firstobj);
1069 DO64(drr_object_range.drr_numslots);
1070 DO64(drr_object_range.drr_toguid);
1071 break;
1072 case DRR_END:
1073 DO64(drr_end.drr_toguid);
1074 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1075 break;
1076 default:
1077 break;
1080 if (drr->drr_type != DRR_BEGIN) {
1081 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1084 #undef DO64
1085 #undef DO32
1088 static inline uint8_t
1089 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1091 if (bonus_type == DMU_OT_SA) {
1092 return (1);
1093 } else {
1094 return (1 +
1095 ((DN_OLD_MAX_BONUSLEN -
1096 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
1100 static void
1101 save_resume_state(struct receive_writer_arg *rwa,
1102 uint64_t object, uint64_t offset, dmu_tx_t *tx)
1104 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1106 if (!rwa->resumable)
1107 return;
1110 * We use ds_resume_bytes[] != 0 to indicate that we need to
1111 * update this on disk, so it must not be 0.
1113 ASSERT(rwa->bytes_read != 0);
1116 * We only resume from write records, which have a valid
1117 * (non-meta-dnode) object number.
1119 ASSERT(object != 0);
1122 * For resuming to work correctly, we must receive records in order,
1123 * sorted by object,offset. This is checked by the callers, but
1124 * assert it here for good measure.
1126 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1127 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1128 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1129 ASSERT3U(rwa->bytes_read, >=,
1130 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1132 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1133 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1134 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1137 noinline static int
1138 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1139 void *data)
1141 dmu_object_info_t doi;
1142 dmu_tx_t *tx;
1143 uint64_t object;
1144 int err;
1145 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1146 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1148 if (drro->drr_type == DMU_OT_NONE ||
1149 !DMU_OT_IS_VALID(drro->drr_type) ||
1150 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1151 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1152 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1153 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1154 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1155 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1156 drro->drr_bonuslen >
1157 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1158 dn_slots >
1159 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1160 return (SET_ERROR(EINVAL));
1163 if (rwa->raw) {
1165 * We should have received a DRR_OBJECT_RANGE record
1166 * containing this block and stored it in rwa.
1168 if (drro->drr_object < rwa->or_firstobj ||
1169 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1170 drro->drr_raw_bonuslen < drro->drr_bonuslen ||
1171 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
1172 drro->drr_nlevels > DN_MAX_LEVELS ||
1173 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
1174 DN_SLOTS_TO_BONUSLEN(dn_slots) <
1175 drro->drr_raw_bonuslen)
1176 return (SET_ERROR(EINVAL));
1177 } else {
1179 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1180 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1182 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
1183 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1184 return (SET_ERROR(EINVAL));
1187 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
1188 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
1189 return (SET_ERROR(EINVAL));
1193 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1194 if (err != 0 && err != ENOENT && err != EEXIST)
1195 return (SET_ERROR(EINVAL));
1197 if (drro->drr_object > rwa->max_object)
1198 rwa->max_object = drro->drr_object;
1201 * If we are losing blkptrs or changing the block size this must
1202 * be a new file instance. We must clear out the previous file
1203 * contents before we can change this type of metadata in the dnode.
1204 * Raw receives will also check that the indirect structure of the
1205 * dnode hasn't changed.
1207 if (err == 0) {
1208 uint32_t indblksz = drro->drr_indblkshift ?
1209 1ULL << drro->drr_indblkshift : 0;
1210 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
1211 drro->drr_bonuslen);
1212 boolean_t did_free = B_FALSE;
1214 object = drro->drr_object;
1216 /* nblkptr should be bounded by the bonus size and type */
1217 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1218 return (SET_ERROR(EINVAL));
1221 * Check for indicators that the object was freed and
1222 * reallocated. For all sends, these indicators are:
1223 * - A changed block size
1224 * - A smaller nblkptr
1225 * - A changed dnode size
1226 * For raw sends we also check a few other fields to
1227 * ensure we are preserving the objset structure exactly
1228 * as it was on the receive side:
1229 * - A changed indirect block size
1230 * - A smaller nlevels
1232 if (drro->drr_blksz != doi.doi_data_block_size ||
1233 nblkptr < doi.doi_nblkptr ||
1234 dn_slots != doi.doi_dnodesize >> DNODE_SHIFT ||
1235 (rwa->raw &&
1236 (indblksz != doi.doi_metadata_block_size ||
1237 drro->drr_nlevels < doi.doi_indirection))) {
1238 err = dmu_free_long_range(rwa->os,
1239 drro->drr_object, 0, DMU_OBJECT_END);
1240 if (err != 0)
1241 return (SET_ERROR(EINVAL));
1242 else
1243 did_free = B_TRUE;
1247 * The dmu does not currently support decreasing nlevels
1248 * or changing the number of dnode slots on an object. For
1249 * non-raw sends, this does not matter and the new object
1250 * can just use the previous one's nlevels. For raw sends,
1251 * however, the structure of the received dnode (including
1252 * nlevels and dnode slots) must match that of the send
1253 * side. Therefore, instead of using dmu_object_reclaim(),
1254 * we must free the object completely and call
1255 * dmu_object_claim_dnsize() instead.
1257 if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) ||
1258 dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
1259 err = dmu_free_long_object(rwa->os, drro->drr_object);
1260 if (err != 0)
1261 return (SET_ERROR(EINVAL));
1263 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1264 object = DMU_NEW_OBJECT;
1268 * For raw receives, free everything beyond the new incoming
1269 * maxblkid. Normally this would be done with a DRR_FREE
1270 * record that would come after this DRR_OBJECT record is
1271 * processed. However, for raw receives we manually set the
1272 * maxblkid from the drr_maxblkid and so we must first free
1273 * everything above that blkid to ensure the DMU is always
1274 * consistent with itself. We will never free the first block
1275 * of the object here because a maxblkid of 0 could indicate
1276 * an object with a single block or one with no blocks. This
1277 * free may be skipped when dmu_free_long_range() was called
1278 * above since it covers the entire object's contents.
1280 if (rwa->raw && object != DMU_NEW_OBJECT && !did_free) {
1281 err = dmu_free_long_range(rwa->os, drro->drr_object,
1282 (drro->drr_maxblkid + 1) * doi.doi_data_block_size,
1283 DMU_OBJECT_END);
1284 if (err != 0)
1285 return (SET_ERROR(EINVAL));
1287 } else if (err == EEXIST) {
1289 * The object requested is currently an interior slot of a
1290 * multi-slot dnode. This will be resolved when the next txg
1291 * is synced out, since the send stream will have told us
1292 * to free this slot when we freed the associated dnode
1293 * earlier in the stream.
1295 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1297 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1298 return (SET_ERROR(EINVAL));
1300 /* object was freed and we are about to allocate a new one */
1301 object = DMU_NEW_OBJECT;
1302 } else {
1303 /* object is free and we are about to allocate a new one */
1304 object = DMU_NEW_OBJECT;
1308 * If this is a multi-slot dnode there is a chance that this
1309 * object will expand into a slot that is already used by
1310 * another object from the previous snapshot. We must free
1311 * these objects before we attempt to allocate the new dnode.
1313 if (dn_slots > 1) {
1314 boolean_t need_sync = B_FALSE;
1316 for (uint64_t slot = drro->drr_object + 1;
1317 slot < drro->drr_object + dn_slots;
1318 slot++) {
1319 dmu_object_info_t slot_doi;
1321 err = dmu_object_info(rwa->os, slot, &slot_doi);
1322 if (err == ENOENT || err == EEXIST)
1323 continue;
1324 else if (err != 0)
1325 return (err);
1327 err = dmu_free_long_object(rwa->os, slot);
1328 if (err != 0)
1329 return (err);
1331 need_sync = B_TRUE;
1334 if (need_sync)
1335 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1338 tx = dmu_tx_create(rwa->os);
1339 dmu_tx_hold_bonus(tx, object);
1340 dmu_tx_hold_write(tx, object, 0, 0);
1341 err = dmu_tx_assign(tx, TXG_WAIT);
1342 if (err != 0) {
1343 dmu_tx_abort(tx);
1344 return (err);
1347 if (object == DMU_NEW_OBJECT) {
1348 /* Currently free, wants to be allocated */
1349 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
1350 drro->drr_type, drro->drr_blksz,
1351 drro->drr_bonustype, drro->drr_bonuslen,
1352 dn_slots << DNODE_SHIFT, tx);
1353 } else if (drro->drr_type != doi.doi_type ||
1354 drro->drr_blksz != doi.doi_data_block_size ||
1355 drro->drr_bonustype != doi.doi_bonus_type ||
1356 drro->drr_bonuslen != doi.doi_bonus_size) {
1357 /* Currently allocated, but with different properties */
1358 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
1359 drro->drr_type, drro->drr_blksz,
1360 drro->drr_bonustype, drro->drr_bonuslen,
1361 dn_slots << DNODE_SHIFT, rwa->spill ?
1362 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
1363 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
1365 * Currently allocated, the existing version of this object
1366 * may reference a spill block that is no longer allocated
1367 * at the source and needs to be freed.
1369 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
1372 if (err != 0) {
1373 dmu_tx_commit(tx);
1374 return (SET_ERROR(EINVAL));
1377 if (rwa->or_crypt_params_present) {
1379 * Set the crypt params for the buffer associated with this
1380 * range of dnodes. This causes the blkptr_t to have the
1381 * same crypt params (byteorder, salt, iv, mac) as on the
1382 * sending side.
1384 * Since we are committing this tx now, it is possible for
1385 * the dnode block to end up on-disk with the incorrect MAC,
1386 * if subsequent objects in this block are received in a
1387 * different txg. However, since the dataset is marked as
1388 * inconsistent, no code paths will do a non-raw read (or
1389 * decrypt the block / verify the MAC). The receive code and
1390 * scrub code can safely do raw reads and verify the
1391 * checksum. They don't need to verify the MAC.
1393 dmu_buf_t *db = NULL;
1394 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
1396 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
1397 offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
1398 if (err != 0) {
1399 dmu_tx_commit(tx);
1400 return (SET_ERROR(EINVAL));
1403 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
1404 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
1406 dmu_buf_rele(db, FTAG);
1408 rwa->or_crypt_params_present = B_FALSE;
1411 dmu_object_set_checksum(rwa->os, drro->drr_object,
1412 drro->drr_checksumtype, tx);
1413 dmu_object_set_compress(rwa->os, drro->drr_object,
1414 drro->drr_compress, tx);
1416 /* handle more restrictive dnode structuring for raw recvs */
1417 if (rwa->raw) {
1419 * Set the indirect block size, block shift, nlevels.
1420 * This will not fail because we ensured all of the
1421 * blocks were freed earlier if this is a new object.
1422 * For non-new objects block size and indirect block
1423 * shift cannot change and nlevels can only increase.
1425 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
1426 drro->drr_blksz, drro->drr_indblkshift, tx));
1427 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
1428 drro->drr_nlevels, tx));
1431 * Set the maxblkid. This will always succeed because
1432 * we freed all blocks beyond the new maxblkid above.
1434 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
1435 drro->drr_maxblkid, tx));
1438 if (data != NULL) {
1439 dmu_buf_t *db;
1440 dnode_t *dn;
1441 uint32_t flags = DMU_READ_NO_PREFETCH;
1443 if (rwa->raw)
1444 flags |= DMU_READ_NO_DECRYPT;
1446 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
1447 VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
1449 dmu_buf_will_dirty(db, tx);
1451 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1452 bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
1455 * Raw bonus buffers have their byteorder determined by the
1456 * DRR_OBJECT_RANGE record.
1458 if (rwa->byteswap && !rwa->raw) {
1459 dmu_object_byteswap_t byteswap =
1460 DMU_OT_BYTESWAP(drro->drr_bonustype);
1461 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1462 DRR_OBJECT_PAYLOAD_SIZE(drro));
1464 dmu_buf_rele(db, FTAG);
1465 dnode_rele(dn, FTAG);
1467 dmu_tx_commit(tx);
1469 return (0);
1472 /* ARGSUSED */
1473 noinline static int
1474 receive_freeobjects(struct receive_writer_arg *rwa,
1475 struct drr_freeobjects *drrfo)
1477 uint64_t obj;
1478 int next_err = 0;
1480 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1481 return (SET_ERROR(EINVAL));
1483 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
1484 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
1485 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
1486 dmu_object_info_t doi;
1487 int err;
1489 err = dmu_object_info(rwa->os, obj, &doi);
1490 if (err == ENOENT)
1491 continue;
1492 else if (err != 0)
1493 return (err);
1495 err = dmu_free_long_object(rwa->os, obj);
1497 if (err != 0)
1498 return (err);
1500 if (obj > rwa->max_object)
1501 rwa->max_object = obj;
1503 if (next_err != ESRCH)
1504 return (next_err);
1505 return (0);
1508 noinline static int
1509 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
1510 arc_buf_t *abuf)
1512 int err;
1513 dmu_tx_t *tx;
1514 dnode_t *dn;
1516 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
1517 !DMU_OT_IS_VALID(drrw->drr_type))
1518 return (SET_ERROR(EINVAL));
1521 * For resuming to work, records must be in increasing order
1522 * by (object, offset).
1524 if (drrw->drr_object < rwa->last_object ||
1525 (drrw->drr_object == rwa->last_object &&
1526 drrw->drr_offset < rwa->last_offset)) {
1527 return (SET_ERROR(EINVAL));
1529 rwa->last_object = drrw->drr_object;
1530 rwa->last_offset = drrw->drr_offset;
1532 if (rwa->last_object > rwa->max_object)
1533 rwa->max_object = rwa->last_object;
1535 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
1536 return (SET_ERROR(EINVAL));
1538 tx = dmu_tx_create(rwa->os);
1539 dmu_tx_hold_write(tx, drrw->drr_object,
1540 drrw->drr_offset, drrw->drr_logical_size);
1541 err = dmu_tx_assign(tx, TXG_WAIT);
1542 if (err != 0) {
1543 dmu_tx_abort(tx);
1544 return (err);
1547 if (rwa->byteswap && !arc_is_encrypted(abuf) &&
1548 arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
1549 dmu_object_byteswap_t byteswap =
1550 DMU_OT_BYTESWAP(drrw->drr_type);
1551 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
1552 DRR_WRITE_PAYLOAD_SIZE(drrw));
1555 VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
1556 err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
1557 if (err != 0) {
1558 dnode_rele(dn, FTAG);
1559 dmu_tx_commit(tx);
1560 return (err);
1562 dnode_rele(dn, FTAG);
1565 * Note: If the receive fails, we want the resume stream to start
1566 * with the same record that we last successfully received (as opposed
1567 * to the next record), so that we can verify that we are
1568 * resuming from the correct location.
1570 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
1571 dmu_tx_commit(tx);
1573 return (0);
1577 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1578 * streams to refer to a copy of the data that is already on the
1579 * system because it came in earlier in the stream. This function
1580 * finds the earlier copy of the data, and uses that copy instead of
1581 * data from the stream to fulfill this write.
1583 static int
1584 receive_write_byref(struct receive_writer_arg *rwa,
1585 struct drr_write_byref *drrwbr)
1587 dmu_tx_t *tx;
1588 int err;
1589 guid_map_entry_t gmesrch;
1590 guid_map_entry_t *gmep;
1591 avl_index_t where;
1592 objset_t *ref_os = NULL;
1593 int flags = DMU_READ_PREFETCH;
1594 dmu_buf_t *dbp;
1596 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1597 return (SET_ERROR(EINVAL));
1600 * If the GUID of the referenced dataset is different from the
1601 * GUID of the target dataset, find the referenced dataset.
1603 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1604 gmesrch.guid = drrwbr->drr_refguid;
1605 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
1606 &where)) == NULL) {
1607 return (SET_ERROR(EINVAL));
1609 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1610 return (SET_ERROR(EINVAL));
1611 } else {
1612 ref_os = rwa->os;
1615 if (drrwbr->drr_object > rwa->max_object)
1616 rwa->max_object = drrwbr->drr_object;
1618 if (rwa->raw)
1619 flags |= DMU_READ_NO_DECRYPT;
1621 /* may return either a regular db or an encrypted one */
1622 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1623 drrwbr->drr_refoffset, FTAG, &dbp, flags);
1624 if (err != 0)
1625 return (err);
1627 tx = dmu_tx_create(rwa->os);
1629 dmu_tx_hold_write(tx, drrwbr->drr_object,
1630 drrwbr->drr_offset, drrwbr->drr_length);
1631 err = dmu_tx_assign(tx, TXG_WAIT);
1632 if (err != 0) {
1633 dmu_tx_abort(tx);
1634 return (err);
1637 if (rwa->raw) {
1638 dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
1639 drrwbr->drr_offset, dbp, tx);
1640 } else {
1641 dmu_write(rwa->os, drrwbr->drr_object,
1642 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1644 dmu_buf_rele(dbp, FTAG);
1646 /* See comment in restore_write. */
1647 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
1648 dmu_tx_commit(tx);
1649 return (0);
1652 static int
1653 receive_write_embedded(struct receive_writer_arg *rwa,
1654 struct drr_write_embedded *drrwe, void *data)
1656 dmu_tx_t *tx;
1657 int err;
1659 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
1660 return (SET_ERROR(EINVAL));
1662 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
1663 return (SET_ERROR(EINVAL));
1665 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1666 return (SET_ERROR(EINVAL));
1667 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1668 return (SET_ERROR(EINVAL));
1669 if (rwa->raw)
1670 return (SET_ERROR(EINVAL));
1672 if (drrwe->drr_object > rwa->max_object)
1673 rwa->max_object = drrwe->drr_object;
1675 tx = dmu_tx_create(rwa->os);
1677 dmu_tx_hold_write(tx, drrwe->drr_object,
1678 drrwe->drr_offset, drrwe->drr_length);
1679 err = dmu_tx_assign(tx, TXG_WAIT);
1680 if (err != 0) {
1681 dmu_tx_abort(tx);
1682 return (err);
1685 dmu_write_embedded(rwa->os, drrwe->drr_object,
1686 drrwe->drr_offset, data, drrwe->drr_etype,
1687 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
1688 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1690 /* See comment in restore_write. */
1691 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
1692 dmu_tx_commit(tx);
1693 return (0);
1696 static int
1697 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
1698 arc_buf_t *abuf)
1700 dmu_tx_t *tx;
1701 dmu_buf_t *db, *db_spill;
1702 int err;
1703 uint32_t flags = 0;
1705 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1706 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
1707 return (SET_ERROR(EINVAL));
1710 * This is an unmodified spill block which was added to the stream
1711 * to resolve an issue with incorrectly removing spill blocks. It
1712 * should be ignored by current versions of the code which support
1713 * the DRR_FLAG_SPILL_BLOCK flag.
1715 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
1716 dmu_return_arcbuf(abuf);
1717 return (0);
1720 if (rwa->raw) {
1721 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
1722 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
1723 drrs->drr_compressed_size == 0)
1724 return (SET_ERROR(EINVAL));
1726 flags |= DMU_READ_NO_DECRYPT;
1729 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
1730 return (SET_ERROR(EINVAL));
1732 if (drrs->drr_object > rwa->max_object)
1733 rwa->max_object = drrs->drr_object;
1735 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
1736 if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
1737 &db_spill)) != 0) {
1738 dmu_buf_rele(db, FTAG);
1739 return (err);
1742 tx = dmu_tx_create(rwa->os);
1744 dmu_tx_hold_spill(tx, db->db_object);
1746 err = dmu_tx_assign(tx, TXG_WAIT);
1747 if (err != 0) {
1748 dmu_buf_rele(db, FTAG);
1749 dmu_buf_rele(db_spill, FTAG);
1750 dmu_tx_abort(tx);
1751 return (err);
1755 * Spill blocks may both grow and shrink. When a change in size
1756 * occurs any existing dbuf must be updated to match the logical
1757 * size of the provided arc_buf_t.
1759 if (db_spill->db_size != drrs->drr_length) {
1760 dmu_buf_will_fill(db_spill, tx);
1761 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1762 drrs->drr_length, tx));
1765 if (rwa->byteswap && !arc_is_encrypted(abuf) &&
1766 arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
1767 dmu_object_byteswap_t byteswap =
1768 DMU_OT_BYTESWAP(drrs->drr_type);
1769 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
1770 DRR_SPILL_PAYLOAD_SIZE(drrs));
1773 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
1775 dmu_buf_rele(db, FTAG);
1776 dmu_buf_rele(db_spill, FTAG);
1778 dmu_tx_commit(tx);
1779 return (0);
1782 /* ARGSUSED */
1783 noinline static int
1784 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
1786 int err;
1788 if (drrf->drr_length != DMU_OBJECT_END &&
1789 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1790 return (SET_ERROR(EINVAL));
1792 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
1793 return (SET_ERROR(EINVAL));
1795 if (drrf->drr_object > rwa->max_object)
1796 rwa->max_object = drrf->drr_object;
1798 err = dmu_free_long_range(rwa->os, drrf->drr_object,
1799 drrf->drr_offset, drrf->drr_length);
1801 return (err);
1804 static int
1805 receive_object_range(struct receive_writer_arg *rwa,
1806 struct drr_object_range *drror)
1809 * By default, we assume this block is in our native format
1810 * (ZFS_HOST_BYTEORDER). We then take into account whether
1811 * the send stream is byteswapped (rwa->byteswap). Finally,
1812 * we need to byteswap again if this particular block was
1813 * in non-native format on the send side.
1815 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
1816 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
1819 * Since dnode block sizes are constant, we should not need to worry
1820 * about making sure that the dnode block size is the same on the
1821 * sending and receiving sides for the time being. For non-raw sends,
1822 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
1823 * record at all). Raw sends require this record type because the
1824 * encryption parameters are used to protect an entire block of bonus
1825 * buffers. If the size of dnode blocks ever becomes variable,
1826 * handling will need to be added to ensure that dnode block sizes
1827 * match on the sending and receiving side.
1829 if (drror->drr_numslots != DNODES_PER_BLOCK ||
1830 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
1831 !rwa->raw)
1832 return (SET_ERROR(EINVAL));
1834 if (drror->drr_firstobj > rwa->max_object)
1835 rwa->max_object = drror->drr_firstobj;
1838 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
1839 * so that the block of dnodes is not written out when it's empty,
1840 * and converted to a HOLE BP.
1842 rwa->or_crypt_params_present = B_TRUE;
1843 rwa->or_firstobj = drror->drr_firstobj;
1844 rwa->or_numslots = drror->drr_numslots;
1845 bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
1846 bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
1847 bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
1848 rwa->or_byteorder = byteorder;
1850 return (0);
1853 /* used to destroy the drc_ds on error */
1854 static void
1855 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1857 dsl_dataset_t *ds = drc->drc_ds;
1858 ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
1861 * Wait for the txg sync before cleaning up the receive. For
1862 * resumable receives, this ensures that our resume state has
1863 * been written out to disk. For raw receives, this ensures
1864 * that the user accounting code will not attempt to do anything
1865 * after we stopped receiving the dataset.
1867 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1868 ds->ds_objset->os_raw_receive = B_FALSE;
1870 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1871 if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
1872 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1873 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
1874 } else {
1875 char name[ZFS_MAX_DATASET_NAME_LEN];
1876 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1877 dsl_dataset_name(ds, name);
1878 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
1879 (void) dsl_destroy_head(name);
1883 static void
1884 receive_cksum(struct receive_arg *ra, int len, void *buf)
1886 if (ra->byteswap) {
1887 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1888 } else {
1889 (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
1894 * Read the payload into a buffer of size len, and update the current record's
1895 * payload field.
1896 * Allocate ra->next_rrd and read the next record's header into
1897 * ra->next_rrd->header.
1898 * Verify checksum of payload and next record.
1900 static int
1901 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
1903 int err;
1904 zio_cksum_t cksum_orig;
1905 zio_cksum_t *cksump;
1907 if (len != 0) {
1908 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
1909 err = receive_read(ra, len, buf);
1910 if (err != 0)
1911 return (err);
1912 receive_cksum(ra, len, buf);
1914 /* note: rrd is NULL when reading the begin record's payload */
1915 if (ra->rrd != NULL) {
1916 ra->rrd->payload = buf;
1917 ra->rrd->payload_size = len;
1918 ra->rrd->bytes_read = ra->bytes_read;
1920 } else {
1921 ASSERT3P(buf, ==, NULL);
1924 ra->prev_cksum = ra->cksum;
1926 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
1927 err = receive_read(ra, sizeof (ra->next_rrd->header),
1928 &ra->next_rrd->header);
1929 ra->next_rrd->bytes_read = ra->bytes_read;
1931 if (err != 0) {
1932 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
1933 ra->next_rrd = NULL;
1934 return (err);
1936 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
1937 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
1938 ra->next_rrd = NULL;
1939 return (SET_ERROR(EINVAL));
1943 * Note: checksum is of everything up to but not including the
1944 * checksum itself.
1946 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
1947 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
1948 receive_cksum(ra,
1949 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
1950 &ra->next_rrd->header);
1952 cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
1953 cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
1955 if (ra->byteswap)
1956 byteswap_record(&ra->next_rrd->header);
1958 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
1959 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
1960 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
1961 ra->next_rrd = NULL;
1962 return (SET_ERROR(ECKSUM));
1965 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
1967 return (0);
1970 static void
1971 objlist_create(struct objlist *list)
1973 list_create(&list->list, sizeof (struct receive_objnode),
1974 offsetof(struct receive_objnode, node));
1975 list->last_lookup = 0;
1978 static void
1979 objlist_destroy(struct objlist *list)
1981 for (struct receive_objnode *n = list_remove_head(&list->list);
1982 n != NULL; n = list_remove_head(&list->list)) {
1983 kmem_free(n, sizeof (*n));
1985 list_destroy(&list->list);
1989 * This function looks through the objlist to see if the specified object number
1990 * is contained in the objlist. In the process, it will remove all object
1991 * numbers in the list that are smaller than the specified object number. Thus,
1992 * any lookup of an object number smaller than a previously looked up object
1993 * number will always return false; therefore, all lookups should be done in
1994 * ascending order.
1996 static boolean_t
1997 objlist_exists(struct objlist *list, uint64_t object)
1999 struct receive_objnode *node = list_head(&list->list);
2000 ASSERT3U(object, >=, list->last_lookup);
2001 list->last_lookup = object;
2002 while (node != NULL && node->object < object) {
2003 VERIFY3P(node, ==, list_remove_head(&list->list));
2004 kmem_free(node, sizeof (*node));
2005 node = list_head(&list->list);
2007 return (node != NULL && node->object == object);
2011 * The objlist is a list of object numbers stored in ascending order. However,
2012 * the insertion of new object numbers does not seek out the correct location to
2013 * store a new object number; instead, it appends it to the list for simplicity.
2014 * Thus, any users must take care to only insert new object numbers in ascending
2015 * order.
2017 static void
2018 objlist_insert(struct objlist *list, uint64_t object)
2020 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
2021 node->object = object;
2022 #ifdef ZFS_DEBUG
2024 struct receive_objnode *last_object = list_tail(&list->list);
2025 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
2026 ASSERT3U(node->object, >, last_objnum);
2028 #endif
2029 list_insert_tail(&list->list, node);
2033 * Issue the prefetch reads for any necessary indirect blocks.
2035 * We use the object ignore list to tell us whether or not to issue prefetches
2036 * for a given object. We do this for both correctness (in case the blocksize
2037 * of an object has changed) and performance (if the object doesn't exist, don't
2038 * needlessly try to issue prefetches). We also trim the list as we go through
2039 * the stream to prevent it from growing to an unbounded size.
2041 * The object numbers within will always be in sorted order, and any write
2042 * records we see will also be in sorted order, but they're not sorted with
2043 * respect to each other (i.e. we can get several object records before
2044 * receiving each object's write records). As a result, once we've reached a
2045 * given object number, we can safely remove any reference to lower object
2046 * numbers in the ignore list. In practice, we receive up to 32 object records
2047 * before receiving write records, so the list can have up to 32 nodes in it.
2049 /* ARGSUSED */
2050 static void
2051 receive_read_prefetch(struct receive_arg *ra,
2052 uint64_t object, uint64_t offset, uint64_t length)
2054 if (!objlist_exists(&ra->ignore_objlist, object)) {
2055 dmu_prefetch(ra->os, object, 1, offset, length,
2056 ZIO_PRIORITY_SYNC_READ);
2061 * Read records off the stream, issuing any necessary prefetches.
2063 static int
2064 receive_read_record(struct receive_arg *ra)
2066 int err;
2068 switch (ra->rrd->header.drr_type) {
2069 case DRR_OBJECT:
2071 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2072 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
2073 void *buf = NULL;
2074 dmu_object_info_t doi;
2076 if (size != 0)
2077 buf = kmem_zalloc(size, KM_SLEEP);
2079 err = receive_read_payload_and_next_header(ra, size, buf);
2080 if (err != 0) {
2081 kmem_free(buf, size);
2082 return (err);
2084 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2086 * See receive_read_prefetch for an explanation why we're
2087 * storing this object in the ignore_obj_list.
2089 if (err == ENOENT || err == EEXIST ||
2090 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2091 objlist_insert(&ra->ignore_objlist, drro->drr_object);
2092 err = 0;
2094 return (err);
2096 case DRR_FREEOBJECTS:
2098 err = receive_read_payload_and_next_header(ra, 0, NULL);
2099 return (err);
2101 case DRR_WRITE:
2103 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2104 arc_buf_t *abuf;
2105 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
2107 if (ra->raw) {
2108 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
2109 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
2110 ra->byteswap;
2112 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
2113 drrw->drr_object, byteorder, drrw->drr_salt,
2114 drrw->drr_iv, drrw->drr_mac, drrw->drr_type,
2115 drrw->drr_compressed_size, drrw->drr_logical_size,
2116 drrw->drr_compressiontype);
2117 } else if (DRR_WRITE_COMPRESSED(drrw)) {
2118 ASSERT3U(drrw->drr_compressed_size, >, 0);
2119 ASSERT3U(drrw->drr_logical_size, >=,
2120 drrw->drr_compressed_size);
2121 ASSERT(!is_meta);
2122 abuf = arc_loan_compressed_buf(
2123 dmu_objset_spa(ra->os),
2124 drrw->drr_compressed_size, drrw->drr_logical_size,
2125 drrw->drr_compressiontype);
2126 } else {
2127 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2128 is_meta, drrw->drr_logical_size);
2131 err = receive_read_payload_and_next_header(ra,
2132 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
2133 if (err != 0) {
2134 dmu_return_arcbuf(abuf);
2135 return (err);
2137 ra->rrd->arc_buf = abuf;
2138 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2139 drrw->drr_logical_size);
2140 return (err);
2142 case DRR_WRITE_BYREF:
2144 struct drr_write_byref *drrwb =
2145 &ra->rrd->header.drr_u.drr_write_byref;
2146 err = receive_read_payload_and_next_header(ra, 0, NULL);
2147 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2148 drrwb->drr_length);
2149 return (err);
2151 case DRR_WRITE_EMBEDDED:
2153 struct drr_write_embedded *drrwe =
2154 &ra->rrd->header.drr_u.drr_write_embedded;
2155 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2156 void *buf = kmem_zalloc(size, KM_SLEEP);
2158 err = receive_read_payload_and_next_header(ra, size, buf);
2159 if (err != 0) {
2160 kmem_free(buf, size);
2161 return (err);
2164 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2165 drrwe->drr_length);
2166 return (err);
2168 case DRR_FREE:
2171 * It might be beneficial to prefetch indirect blocks here, but
2172 * we don't really have the data to decide for sure.
2174 err = receive_read_payload_and_next_header(ra, 0, NULL);
2175 return (err);
2177 case DRR_END:
2179 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2180 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2181 return (SET_ERROR(ECKSUM));
2182 return (0);
2184 case DRR_SPILL:
2186 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2187 arc_buf_t *abuf;
2188 int len = DRR_SPILL_PAYLOAD_SIZE(drrs);
2190 /* DRR_SPILL records are either raw or uncompressed */
2191 if (ra->raw) {
2192 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
2193 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
2194 ra->byteswap;
2196 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
2197 dmu_objset_id(ra->os), byteorder, drrs->drr_salt,
2198 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
2199 drrs->drr_compressed_size, drrs->drr_length,
2200 drrs->drr_compressiontype);
2201 } else {
2202 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2203 DMU_OT_IS_METADATA(drrs->drr_type),
2204 drrs->drr_length);
2207 err = receive_read_payload_and_next_header(ra, len,
2208 abuf->b_data);
2209 if (err != 0) {
2210 dmu_return_arcbuf(abuf);
2211 return (err);
2213 ra->rrd->arc_buf = abuf;
2214 return (err);
2216 case DRR_OBJECT_RANGE:
2218 err = receive_read_payload_and_next_header(ra, 0, NULL);
2219 return (err);
2221 default:
2222 return (SET_ERROR(EINVAL));
2226 static void
2227 dprintf_drr(struct receive_record_arg *rrd, int err)
2229 #ifdef ZFS_DEBUG
2230 switch (rrd->header.drr_type) {
2231 case DRR_OBJECT:
2233 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2234 dprintf("drr_type = OBJECT obj = %llu type = %u "
2235 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2236 "compress = %u dn_slots = %u err = %d\n",
2237 drro->drr_object, drro->drr_type, drro->drr_bonustype,
2238 drro->drr_blksz, drro->drr_bonuslen,
2239 drro->drr_checksumtype, drro->drr_compress,
2240 drro->drr_dn_slots, err);
2241 break;
2243 case DRR_FREEOBJECTS:
2245 struct drr_freeobjects *drrfo =
2246 &rrd->header.drr_u.drr_freeobjects;
2247 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2248 "numobjs = %llu err = %d\n",
2249 drrfo->drr_firstobj, drrfo->drr_numobjs, err);
2250 break;
2252 case DRR_WRITE:
2254 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2255 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2256 "lsize = %llu cksumtype = %u flags = %u "
2257 "compress = %u psize = %llu err = %d\n",
2258 drrw->drr_object, drrw->drr_type, drrw->drr_offset,
2259 drrw->drr_logical_size, drrw->drr_checksumtype,
2260 drrw->drr_flags, drrw->drr_compressiontype,
2261 drrw->drr_compressed_size, err);
2262 break;
2264 case DRR_WRITE_BYREF:
2266 struct drr_write_byref *drrwbr =
2267 &rrd->header.drr_u.drr_write_byref;
2268 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2269 "length = %llu toguid = %llx refguid = %llx "
2270 "refobject = %llu refoffset = %llu cksumtype = %u "
2271 "flags = %u err = %d\n",
2272 drrwbr->drr_object, drrwbr->drr_offset,
2273 drrwbr->drr_length, drrwbr->drr_toguid,
2274 drrwbr->drr_refguid, drrwbr->drr_refobject,
2275 drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
2276 drrwbr->drr_flags, err);
2277 break;
2279 case DRR_WRITE_EMBEDDED:
2281 struct drr_write_embedded *drrwe =
2282 &rrd->header.drr_u.drr_write_embedded;
2283 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2284 "length = %llu compress = %u etype = %u lsize = %u "
2285 "psize = %u err = %d\n",
2286 drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
2287 drrwe->drr_compression, drrwe->drr_etype,
2288 drrwe->drr_lsize, drrwe->drr_psize, err);
2289 break;
2291 case DRR_FREE:
2293 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2294 dprintf("drr_type = FREE obj = %llu offset = %llu "
2295 "length = %lld err = %d\n",
2296 drrf->drr_object, drrf->drr_offset, drrf->drr_length,
2297 err);
2298 break;
2300 case DRR_SPILL:
2302 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2303 dprintf("drr_type = SPILL obj = %llu length = %llu "
2304 "err = %d\n", drrs->drr_object, drrs->drr_length, err);
2305 break;
2307 case DRR_OBJECT_RANGE:
2309 struct drr_object_range *drror =
2310 &rrd->header.drr_u.drr_object_range;
2311 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2312 "numslots = %llu flags = %u err = %d\n",
2313 drror->drr_firstobj, drror->drr_numslots,
2314 drror->drr_flags, err);
2315 break;
2317 default:
2318 return;
2320 #endif
2324 * Commit the records to the pool.
2326 static int
2327 receive_process_record(struct receive_writer_arg *rwa,
2328 struct receive_record_arg *rrd)
2330 int err;
2332 /* Processing in order, therefore bytes_read should be increasing. */
2333 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2334 rwa->bytes_read = rrd->bytes_read;
2336 switch (rrd->header.drr_type) {
2337 case DRR_OBJECT:
2339 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2340 err = receive_object(rwa, drro, rrd->payload);
2341 kmem_free(rrd->payload, rrd->payload_size);
2342 rrd->payload = NULL;
2343 break;
2345 case DRR_FREEOBJECTS:
2347 struct drr_freeobjects *drrfo =
2348 &rrd->header.drr_u.drr_freeobjects;
2349 err = receive_freeobjects(rwa, drrfo);
2350 break;
2352 case DRR_WRITE:
2354 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2355 err = receive_write(rwa, drrw, rrd->arc_buf);
2356 /* if receive_write() is successful, it consumes the arc_buf */
2357 if (err != 0)
2358 dmu_return_arcbuf(rrd->arc_buf);
2359 rrd->arc_buf = NULL;
2360 rrd->payload = NULL;
2361 break;
2363 case DRR_WRITE_BYREF:
2365 struct drr_write_byref *drrwbr =
2366 &rrd->header.drr_u.drr_write_byref;
2367 err = receive_write_byref(rwa, drrwbr);
2368 break;
2370 case DRR_WRITE_EMBEDDED:
2372 struct drr_write_embedded *drrwe =
2373 &rrd->header.drr_u.drr_write_embedded;
2374 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2375 kmem_free(rrd->payload, rrd->payload_size);
2376 rrd->payload = NULL;
2377 break;
2379 case DRR_FREE:
2381 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2382 err = receive_free(rwa, drrf);
2383 break;
2385 case DRR_SPILL:
2387 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2388 err = receive_spill(rwa, drrs, rrd->arc_buf);
2389 /* if receive_spill() is successful, it consumes the arc_buf */
2390 if (err != 0)
2391 dmu_return_arcbuf(rrd->arc_buf);
2392 rrd->arc_buf = NULL;
2393 rrd->payload = NULL;
2394 break;
2396 case DRR_OBJECT_RANGE:
2398 struct drr_object_range *drror =
2399 &rrd->header.drr_u.drr_object_range;
2400 err = receive_object_range(rwa, drror);
2401 break;
2403 default:
2404 err = (SET_ERROR(EINVAL));
2407 if (err != 0)
2408 dprintf_drr(rrd, err);
2410 return (err);
2414 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2415 * receive_process_record When we're done, signal the main thread and exit.
2417 static void
2418 receive_writer_thread(void *arg)
2420 struct receive_writer_arg *rwa = arg;
2421 struct receive_record_arg *rrd;
2422 fstrans_cookie_t cookie = spl_fstrans_mark();
2424 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2425 rrd = bqueue_dequeue(&rwa->q)) {
2427 * If there's an error, the main thread will stop putting things
2428 * on the queue, but we need to clear everything in it before we
2429 * can exit.
2431 if (rwa->err == 0) {
2432 rwa->err = receive_process_record(rwa, rrd);
2433 } else if (rrd->arc_buf != NULL) {
2434 dmu_return_arcbuf(rrd->arc_buf);
2435 rrd->arc_buf = NULL;
2436 rrd->payload = NULL;
2437 } else if (rrd->payload != NULL) {
2438 kmem_free(rrd->payload, rrd->payload_size);
2439 rrd->payload = NULL;
2441 kmem_free(rrd, sizeof (*rrd));
2443 kmem_free(rrd, sizeof (*rrd));
2444 mutex_enter(&rwa->mutex);
2445 rwa->done = B_TRUE;
2446 cv_signal(&rwa->cv);
2447 mutex_exit(&rwa->mutex);
2448 spl_fstrans_unmark(cookie);
2449 thread_exit();
2452 static int
2453 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2455 uint64_t val;
2456 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2457 uint64_t dsobj = dmu_objset_id(ra->os);
2458 uint64_t resume_obj, resume_off;
2460 if (nvlist_lookup_uint64(begin_nvl,
2461 "resume_object", &resume_obj) != 0 ||
2462 nvlist_lookup_uint64(begin_nvl,
2463 "resume_offset", &resume_off) != 0) {
2464 return (SET_ERROR(EINVAL));
2466 VERIFY0(zap_lookup(mos, dsobj,
2467 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2468 if (resume_obj != val)
2469 return (SET_ERROR(EINVAL));
2470 VERIFY0(zap_lookup(mos, dsobj,
2471 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2472 if (resume_off != val)
2473 return (SET_ERROR(EINVAL));
2475 return (0);
2479 * Read in the stream's records, one by one, and apply them to the pool. There
2480 * are two threads involved; the thread that calls this function will spin up a
2481 * worker thread, read the records off the stream one by one, and issue
2482 * prefetches for any necessary indirect blocks. It will then push the records
2483 * onto an internal blocking queue. The worker thread will pull the records off
2484 * the queue, and actually write the data into the DMU. This way, the worker
2485 * thread doesn't have to wait for reads to complete, since everything it needs
2486 * (the indirect blocks) will be prefetched.
2488 * NB: callers *must* call dmu_recv_end() if this succeeds.
2491 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
2492 int cleanup_fd, uint64_t *action_handlep)
2494 int err = 0;
2495 struct receive_arg *ra;
2496 struct receive_writer_arg *rwa;
2497 int featureflags;
2498 uint32_t payloadlen;
2499 void *payload;
2500 nvlist_t *begin_nvl = NULL;
2502 ra = kmem_zalloc(sizeof (*ra), KM_SLEEP);
2503 rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
2505 ra->byteswap = drc->drc_byteswap;
2506 ra->raw = drc->drc_raw;
2507 ra->cksum = drc->drc_cksum;
2508 ra->vp = vp;
2509 ra->voff = *voffp;
2511 if (dsl_dataset_is_zapified(drc->drc_ds)) {
2512 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2513 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2514 sizeof (ra->bytes_read), 1, &ra->bytes_read);
2517 objlist_create(&ra->ignore_objlist);
2519 /* these were verified in dmu_recv_begin */
2520 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2521 DMU_SUBSTREAM);
2522 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2525 * Open the objset we are modifying.
2527 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os));
2529 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2531 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2532 ra->featureflags = featureflags;
2534 ASSERT0(ra->os->os_encrypted &&
2535 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
2537 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2538 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2539 minor_t minor;
2541 if (cleanup_fd == -1) {
2542 err = SET_ERROR(EBADF);
2543 goto out;
2545 err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2546 if (err != 0) {
2547 cleanup_fd = -1;
2548 goto out;
2551 if (*action_handlep == 0) {
2552 rwa->guid_to_ds_map =
2553 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2554 avl_create(rwa->guid_to_ds_map, guid_compare,
2555 sizeof (guid_map_entry_t),
2556 offsetof(guid_map_entry_t, avlnode));
2557 err = zfs_onexit_add_cb(minor,
2558 free_guid_map_onexit, rwa->guid_to_ds_map,
2559 action_handlep);
2560 if (err != 0)
2561 goto out;
2562 } else {
2563 err = zfs_onexit_cb_data(minor, *action_handlep,
2564 (void **)&rwa->guid_to_ds_map);
2565 if (err != 0)
2566 goto out;
2569 drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
2572 payloadlen = drc->drc_drr_begin->drr_payloadlen;
2573 payload = NULL;
2574 if (payloadlen != 0)
2575 payload = kmem_alloc(payloadlen, KM_SLEEP);
2577 err = receive_read_payload_and_next_header(ra, payloadlen, payload);
2578 if (err != 0) {
2579 if (payloadlen != 0)
2580 kmem_free(payload, payloadlen);
2581 goto out;
2583 if (payloadlen != 0) {
2584 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
2585 kmem_free(payload, payloadlen);
2586 if (err != 0)
2587 goto out;
2590 /* handle DSL encryption key payload */
2591 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
2592 nvlist_t *keynvl = NULL;
2594 ASSERT(ra->os->os_encrypted);
2595 ASSERT(drc->drc_raw);
2597 err = nvlist_lookup_nvlist(begin_nvl, "crypt_keydata", &keynvl);
2598 if (err != 0)
2599 goto out;
2602 * If this is a new dataset we set the key immediately.
2603 * Otherwise we don't want to change the key until we
2604 * are sure the rest of the receive succeeded so we stash
2605 * the keynvl away until then.
2607 err = dsl_crypto_recv_raw(spa_name(ra->os->os_spa),
2608 drc->drc_ds->ds_object, drc->drc_fromsnapobj,
2609 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
2610 if (err != 0)
2611 goto out;
2613 /* see comment in dmu_recv_end_sync() */
2614 drc->drc_ivset_guid = 0;
2615 (void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
2616 &drc->drc_ivset_guid);
2618 if (!drc->drc_newfs)
2619 drc->drc_keynvl = fnvlist_dup(keynvl);
2622 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2623 err = resume_check(ra, begin_nvl);
2624 if (err != 0)
2625 goto out;
2628 (void) bqueue_init(&rwa->q,
2629 MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
2630 offsetof(struct receive_record_arg, node));
2631 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
2632 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
2633 rwa->os = ra->os;
2634 rwa->byteswap = drc->drc_byteswap;
2635 rwa->resumable = drc->drc_resumable;
2636 rwa->raw = drc->drc_raw;
2637 rwa->spill = drc->drc_spill;
2638 rwa->os->os_raw_receive = drc->drc_raw;
2640 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
2641 TS_RUN, minclsyspri);
2643 * We're reading rwa->err without locks, which is safe since we are the
2644 * only reader, and the worker thread is the only writer. It's ok if we
2645 * miss a write for an iteration or two of the loop, since the writer
2646 * thread will keep freeing records we send it until we send it an eos
2647 * marker.
2649 * We can leave this loop in 3 ways: First, if rwa->err is
2650 * non-zero. In that case, the writer thread will free the rrd we just
2651 * pushed. Second, if we're interrupted; in that case, either it's the
2652 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
2653 * has been handed off to the writer thread who will free it. Finally,
2654 * if receive_read_record fails or we're at the end of the stream, then
2655 * we free ra->rrd and exit.
2657 while (rwa->err == 0) {
2658 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2659 err = SET_ERROR(EINTR);
2660 break;
2663 ASSERT3P(ra->rrd, ==, NULL);
2664 ra->rrd = ra->next_rrd;
2665 ra->next_rrd = NULL;
2666 /* Allocates and loads header into ra->next_rrd */
2667 err = receive_read_record(ra);
2669 if (ra->rrd->header.drr_type == DRR_END || err != 0) {
2670 kmem_free(ra->rrd, sizeof (*ra->rrd));
2671 ra->rrd = NULL;
2672 break;
2675 bqueue_enqueue(&rwa->q, ra->rrd,
2676 sizeof (struct receive_record_arg) + ra->rrd->payload_size);
2677 ra->rrd = NULL;
2679 ASSERT3P(ra->rrd, ==, NULL);
2680 ra->rrd = kmem_zalloc(sizeof (*ra->rrd), KM_SLEEP);
2681 ra->rrd->eos_marker = B_TRUE;
2682 bqueue_enqueue(&rwa->q, ra->rrd, 1);
2684 mutex_enter(&rwa->mutex);
2685 while (!rwa->done) {
2686 cv_wait(&rwa->cv, &rwa->mutex);
2688 mutex_exit(&rwa->mutex);
2691 * If we are receiving a full stream as a clone, all object IDs which
2692 * are greater than the maximum ID referenced in the stream are
2693 * by definition unused and must be freed.
2695 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
2696 uint64_t obj = rwa->max_object + 1;
2697 int free_err = 0;
2698 int next_err = 0;
2700 while (next_err == 0) {
2701 free_err = dmu_free_long_object(rwa->os, obj);
2702 if (free_err != 0 && free_err != ENOENT)
2703 break;
2705 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
2708 if (err == 0) {
2709 if (free_err != 0 && free_err != ENOENT)
2710 err = free_err;
2711 else if (next_err != ESRCH)
2712 err = next_err;
2716 cv_destroy(&rwa->cv);
2717 mutex_destroy(&rwa->mutex);
2718 bqueue_destroy(&rwa->q);
2719 if (err == 0)
2720 err = rwa->err;
2722 out:
2724 * If we hit an error before we started the receive_writer_thread
2725 * we need to clean up the next_rrd we create by processing the
2726 * DRR_BEGIN record.
2728 if (ra->next_rrd != NULL)
2729 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2731 nvlist_free(begin_nvl);
2732 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2733 zfs_onexit_fd_rele(cleanup_fd);
2735 if (err != 0) {
2737 * Clean up references. If receive is not resumable,
2738 * destroy what we created, so we don't leave it in
2739 * the inconsistent state.
2741 dmu_recv_cleanup_ds(drc);
2742 nvlist_free(drc->drc_keynvl);
2745 *voffp = ra->voff;
2746 objlist_destroy(&ra->ignore_objlist);
2747 kmem_free(ra, sizeof (*ra));
2748 kmem_free(rwa, sizeof (*rwa));
2749 return (err);
2752 static int
2753 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2755 dmu_recv_cookie_t *drc = arg;
2756 dsl_pool_t *dp = dmu_tx_pool(tx);
2757 int error;
2759 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2761 if (!drc->drc_newfs) {
2762 dsl_dataset_t *origin_head;
2764 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2765 if (error != 0)
2766 return (error);
2767 if (drc->drc_force) {
2769 * We will destroy any snapshots in tofs (i.e. before
2770 * origin_head) that are after the origin (which is
2771 * the snap before drc_ds, because drc_ds can not
2772 * have any snaps of its own).
2774 uint64_t obj;
2776 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2777 while (obj !=
2778 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2779 dsl_dataset_t *snap;
2780 error = dsl_dataset_hold_obj(dp, obj, FTAG,
2781 &snap);
2782 if (error != 0)
2783 break;
2784 if (snap->ds_dir != origin_head->ds_dir)
2785 error = SET_ERROR(EINVAL);
2786 if (error == 0) {
2787 error = dsl_destroy_snapshot_check_impl(
2788 snap, B_FALSE);
2790 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2791 dsl_dataset_rele(snap, FTAG);
2792 if (error != 0)
2793 break;
2795 if (error != 0) {
2796 dsl_dataset_rele(origin_head, FTAG);
2797 return (error);
2800 if (drc->drc_keynvl != NULL) {
2801 error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
2802 drc->drc_keynvl, tx);
2803 if (error != 0) {
2804 dsl_dataset_rele(origin_head, FTAG);
2805 return (error);
2809 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2810 origin_head, drc->drc_force, drc->drc_owner, tx);
2811 if (error != 0) {
2812 dsl_dataset_rele(origin_head, FTAG);
2813 return (error);
2815 error = dsl_dataset_snapshot_check_impl(origin_head,
2816 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2817 dsl_dataset_rele(origin_head, FTAG);
2818 if (error != 0)
2819 return (error);
2821 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2822 } else {
2823 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2824 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2826 return (error);
2829 static void
2830 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2832 dmu_recv_cookie_t *drc = arg;
2833 dsl_pool_t *dp = dmu_tx_pool(tx);
2834 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
2836 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2837 tx, "snap=%s", drc->drc_tosnap);
2838 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
2840 if (!drc->drc_newfs) {
2841 dsl_dataset_t *origin_head;
2843 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2844 &origin_head));
2846 if (drc->drc_force) {
2848 * Destroy any snapshots of drc_tofs (origin_head)
2849 * after the origin (the snap before drc_ds).
2851 uint64_t obj;
2853 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2854 while (obj !=
2855 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2856 dsl_dataset_t *snap;
2857 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2858 &snap));
2859 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2860 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2861 dsl_destroy_snapshot_sync_impl(snap,
2862 B_FALSE, tx);
2863 dsl_dataset_rele(snap, FTAG);
2866 if (drc->drc_keynvl != NULL) {
2867 dsl_crypto_recv_raw_key_sync(drc->drc_ds,
2868 drc->drc_keynvl, tx);
2869 nvlist_free(drc->drc_keynvl);
2870 drc->drc_keynvl = NULL;
2873 VERIFY3P(drc->drc_ds->ds_prev, ==, origin_head->ds_prev);
2875 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2876 origin_head, tx);
2877 dsl_dataset_snapshot_sync_impl(origin_head,
2878 drc->drc_tosnap, tx);
2880 /* set snapshot's creation time and guid */
2881 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2882 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2883 drc->drc_drrb->drr_creation_time;
2884 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2885 drc->drc_drrb->drr_toguid;
2886 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2887 ~DS_FLAG_INCONSISTENT;
2889 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2890 dsl_dataset_phys(origin_head)->ds_flags &=
2891 ~DS_FLAG_INCONSISTENT;
2893 drc->drc_newsnapobj =
2894 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2896 dsl_dataset_rele(origin_head, FTAG);
2897 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2899 if (drc->drc_owner != NULL)
2900 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2901 } else {
2902 dsl_dataset_t *ds = drc->drc_ds;
2904 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2906 /* set snapshot's creation time and guid */
2907 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2908 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2909 drc->drc_drrb->drr_creation_time;
2910 dsl_dataset_phys(ds->ds_prev)->ds_guid =
2911 drc->drc_drrb->drr_toguid;
2912 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2913 ~DS_FLAG_INCONSISTENT;
2915 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2916 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2917 if (dsl_dataset_has_resume_receive_state(ds)) {
2918 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2919 DS_FIELD_RESUME_FROMGUID, tx);
2920 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2921 DS_FIELD_RESUME_OBJECT, tx);
2922 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2923 DS_FIELD_RESUME_OFFSET, tx);
2924 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2925 DS_FIELD_RESUME_BYTES, tx);
2926 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2927 DS_FIELD_RESUME_TOGUID, tx);
2928 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
2929 DS_FIELD_RESUME_TONAME, tx);
2931 drc->drc_newsnapobj =
2932 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2936 * If this is a raw receive, the crypt_keydata nvlist will include
2937 * a to_ivset_guid for us to set on the new snapshot. This value
2938 * will override the value generated by the snapshot code. However,
2939 * this value may not be present, because older implementations of
2940 * the raw send code did not include this value, and we are still
2941 * allowed to receive them if the zfs_disable_ivset_guid_check
2942 * tunable is set, in which case we will leave the newly-generated
2943 * value.
2945 if (drc->drc_raw && drc->drc_ivset_guid != 0) {
2946 dmu_object_zapify(dp->dp_meta_objset, drc->drc_newsnapobj,
2947 DMU_OT_DSL_DATASET, tx);
2948 VERIFY0(zap_update(dp->dp_meta_objset, drc->drc_newsnapobj,
2949 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
2950 &drc->drc_ivset_guid, tx));
2953 zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
2956 * Release the hold from dmu_recv_begin. This must be done before
2957 * we return to open context, so that when we free the dataset's dnode
2958 * we can evict its bonus buffer. Since the dataset may be destroyed
2959 * at this point (and therefore won't have a valid pointer to the spa)
2960 * we release the key mapping manually here while we do have a valid
2961 * pointer, if it exists.
2963 if (!drc->drc_raw && encrypted) {
2964 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
2965 drc->drc_ds->ds_object, drc->drc_ds);
2967 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
2968 drc->drc_ds = NULL;
2971 static int
2972 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj,
2973 boolean_t raw)
2975 dsl_pool_t *dp;
2976 dsl_dataset_t *snapds;
2977 guid_map_entry_t *gmep;
2978 objset_t *os;
2979 ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
2980 int err;
2982 ASSERT(guid_map != NULL);
2984 err = dsl_pool_hold(name, FTAG, &dp);
2985 if (err != 0)
2986 return (err);
2987 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2988 err = dsl_dataset_own_obj(dp, snapobj, dsflags, gmep, &snapds);
2989 if (err == 0) {
2991 * If this is a deduplicated raw send stream, we need
2992 * to make sure that we can still read raw blocks from
2993 * earlier datasets in the stream, so we set the
2994 * os_raw_receive flag now.
2996 if (raw) {
2997 err = dmu_objset_from_ds(snapds, &os);
2998 if (err != 0) {
2999 dsl_dataset_disown(snapds, dsflags, FTAG);
3000 dsl_pool_rele(dp, FTAG);
3001 kmem_free(gmep, sizeof (*gmep));
3002 return (err);
3004 os->os_raw_receive = B_TRUE;
3007 gmep->raw = raw;
3008 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3009 gmep->gme_ds = snapds;
3010 avl_add(guid_map, gmep);
3011 } else {
3012 kmem_free(gmep, sizeof (*gmep));
3015 dsl_pool_rele(dp, FTAG);
3016 return (err);
3019 static int dmu_recv_end_modified_blocks = 3;
3021 static int
3022 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3024 #ifdef _KERNEL
3026 * We will be destroying the ds; make sure its origin is unmounted if
3027 * necessary.
3029 char name[ZFS_MAX_DATASET_NAME_LEN];
3030 dsl_dataset_name(drc->drc_ds, name);
3031 zfs_destroy_unmount_origin(name);
3032 #endif
3034 return (dsl_sync_task(drc->drc_tofs,
3035 dmu_recv_end_check, dmu_recv_end_sync, drc,
3036 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3039 static int
3040 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3042 return (dsl_sync_task(drc->drc_tofs,
3043 dmu_recv_end_check, dmu_recv_end_sync, drc,
3044 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3048 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3050 int error;
3052 drc->drc_owner = owner;
3054 if (drc->drc_newfs)
3055 error = dmu_recv_new_end(drc);
3056 else
3057 error = dmu_recv_existing_end(drc);
3059 if (error != 0) {
3060 dmu_recv_cleanup_ds(drc);
3061 nvlist_free(drc->drc_keynvl);
3062 } else if (drc->drc_guid_to_ds_map != NULL) {
3063 (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map,
3064 drc->drc_newsnapobj, drc->drc_raw);
3066 return (error);
3070 * Return TRUE if this objset is currently being received into.
3072 boolean_t
3073 dmu_objset_is_receiving(objset_t *os)
3075 return (os->os_dsl_dataset != NULL &&
3076 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3079 #if defined(_KERNEL)
3080 module_param(zfs_recv_queue_length, int, 0644);
3081 MODULE_PARM_DESC(zfs_recv_queue_length, "Maximum receive queue length");
3082 #endif