4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
33 * Copyright (c) 2019, Klara Inc.
34 * Copyright (c) 2019, Allan Jude
35 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
38 /* Portions Copyright 2010 Robert Milkowski */
41 #include <sys/zfs_context.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/dsl_dir.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dsl_pool.h>
47 #include <sys/dsl_synctask.h>
48 #include <sys/dsl_deleg.h>
49 #include <sys/dnode.h>
52 #include <sys/dmu_tx.h>
55 #include <sys/dmu_impl.h>
56 #include <sys/zfs_ioctl.h>
58 #include <sys/zfs_onexit.h>
59 #include <sys/dsl_destroy.h>
61 #include <sys/zfeature.h>
62 #include <sys/policy.h>
63 #include <sys/spa_impl.h>
64 #include <sys/dmu_recv.h>
65 #include <sys/zfs_project.h>
66 #include "zfs_namecheck.h"
67 #include <sys/vdev_impl.h>
72 * Needed to close a window in dnode_move() that allows the objset to be freed
73 * before it can be safely accessed.
78 * Tunable to overwrite the maximum number of threads for the parallelization
79 * of dmu_objset_find_dp, needed to speed up the import of pools with many
81 * Default is 4 times the number of leaf vdevs.
83 static const int dmu_find_threads
= 0;
86 * Backfill lower metadnode objects after this many have been freed.
87 * Backfilling negatively impacts object creation rates, so only do it
88 * if there are enough holes to fill.
90 static const int dmu_rescan_dnode_threshold
= 1 << DN_MAX_INDBLKSHIFT
;
92 static const char *upgrade_tag
= "upgrade_tag";
94 static void dmu_objset_find_dp_cb(void *arg
);
96 static void dmu_objset_upgrade(objset_t
*os
, dmu_objset_upgrade_cb_t cb
);
97 static void dmu_objset_upgrade_stop(objset_t
*os
);
100 dmu_objset_init(void)
102 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
106 dmu_objset_fini(void)
108 rw_destroy(&os_lock
);
112 dmu_objset_spa(objset_t
*os
)
118 dmu_objset_zil(objset_t
*os
)
124 dmu_objset_pool(objset_t
*os
)
128 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
129 return (ds
->ds_dir
->dd_pool
);
131 return (spa_get_dsl(os
->os_spa
));
135 dmu_objset_ds(objset_t
*os
)
137 return (os
->os_dsl_dataset
);
141 dmu_objset_type(objset_t
*os
)
143 return (os
->os_phys
->os_type
);
147 dmu_objset_name(objset_t
*os
, char *buf
)
149 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
153 dmu_objset_id(objset_t
*os
)
155 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
157 return (ds
? ds
->ds_object
: 0);
161 dmu_objset_dnodesize(objset_t
*os
)
163 return (os
->os_dnodesize
);
167 dmu_objset_syncprop(objset_t
*os
)
169 return (os
->os_sync
);
173 dmu_objset_logbias(objset_t
*os
)
175 return (os
->os_logbias
);
179 checksum_changed_cb(void *arg
, uint64_t newval
)
184 * Inheritance should have been done by now.
186 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
188 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
192 compression_changed_cb(void *arg
, uint64_t newval
)
197 * Inheritance and range checking should have been done by now.
199 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
201 os
->os_compress
= zio_compress_select(os
->os_spa
,
202 ZIO_COMPRESS_ALGO(newval
), ZIO_COMPRESS_ON
);
203 os
->os_complevel
= zio_complevel_select(os
->os_spa
, os
->os_compress
,
204 ZIO_COMPRESS_LEVEL(newval
), ZIO_COMPLEVEL_DEFAULT
);
208 copies_changed_cb(void *arg
, uint64_t newval
)
213 * Inheritance and range checking should have been done by now.
216 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
218 os
->os_copies
= newval
;
222 dedup_changed_cb(void *arg
, uint64_t newval
)
225 spa_t
*spa
= os
->os_spa
;
226 enum zio_checksum checksum
;
229 * Inheritance should have been done by now.
231 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
233 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
235 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
236 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
240 primary_cache_changed_cb(void *arg
, uint64_t newval
)
245 * Inheritance and range checking should have been done by now.
247 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
248 newval
== ZFS_CACHE_METADATA
);
250 os
->os_primary_cache
= newval
;
254 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
259 * Inheritance and range checking should have been done by now.
261 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
262 newval
== ZFS_CACHE_METADATA
);
264 os
->os_secondary_cache
= newval
;
268 prefetch_changed_cb(void *arg
, uint64_t newval
)
273 * Inheritance should have been done by now.
275 ASSERT(newval
== ZFS_PREFETCH_ALL
|| newval
== ZFS_PREFETCH_NONE
||
276 newval
== ZFS_PREFETCH_METADATA
);
277 os
->os_prefetch
= newval
;
281 sync_changed_cb(void *arg
, uint64_t newval
)
286 * Inheritance and range checking should have been done by now.
288 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
289 newval
== ZFS_SYNC_DISABLED
);
291 os
->os_sync
= newval
;
293 zil_set_sync(os
->os_zil
, newval
);
297 redundant_metadata_changed_cb(void *arg
, uint64_t newval
)
302 * Inheritance and range checking should have been done by now.
304 ASSERT(newval
== ZFS_REDUNDANT_METADATA_ALL
||
305 newval
== ZFS_REDUNDANT_METADATA_MOST
||
306 newval
== ZFS_REDUNDANT_METADATA_SOME
||
307 newval
== ZFS_REDUNDANT_METADATA_NONE
);
309 os
->os_redundant_metadata
= newval
;
313 dnodesize_changed_cb(void *arg
, uint64_t newval
)
318 case ZFS_DNSIZE_LEGACY
:
319 os
->os_dnodesize
= DNODE_MIN_SIZE
;
321 case ZFS_DNSIZE_AUTO
:
323 * Choose a dnode size that will work well for most
324 * workloads if the user specified "auto". Future code
325 * improvements could dynamically select a dnode size
326 * based on observed workload patterns.
328 os
->os_dnodesize
= DNODE_MIN_SIZE
* 2;
335 os
->os_dnodesize
= newval
;
341 smallblk_changed_cb(void *arg
, uint64_t newval
)
346 * Inheritance and range checking should have been done by now.
348 ASSERT(newval
<= SPA_MAXBLOCKSIZE
);
349 ASSERT(ISP2(newval
));
351 os
->os_zpl_special_smallblock
= newval
;
355 direct_changed_cb(void *arg
, uint64_t newval
)
360 * Inheritance and range checking should have been done by now.
362 ASSERT(newval
== ZFS_DIRECT_DISABLED
|| newval
== ZFS_DIRECT_STANDARD
||
363 newval
== ZFS_DIRECT_ALWAYS
);
365 os
->os_direct
= newval
;
369 logbias_changed_cb(void *arg
, uint64_t newval
)
373 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
374 newval
== ZFS_LOGBIAS_THROUGHPUT
);
375 os
->os_logbias
= newval
;
377 zil_set_logbias(os
->os_zil
, newval
);
381 recordsize_changed_cb(void *arg
, uint64_t newval
)
385 os
->os_recordsize
= newval
;
389 dmu_objset_byteswap(void *buf
, size_t size
)
391 objset_phys_t
*osp
= buf
;
393 ASSERT(size
== OBJSET_PHYS_SIZE_V1
|| size
== OBJSET_PHYS_SIZE_V2
||
394 size
== sizeof (objset_phys_t
));
395 dnode_byteswap(&osp
->os_meta_dnode
);
396 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
397 osp
->os_type
= BSWAP_64(osp
->os_type
);
398 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
399 if (size
>= OBJSET_PHYS_SIZE_V2
) {
400 dnode_byteswap(&osp
->os_userused_dnode
);
401 dnode_byteswap(&osp
->os_groupused_dnode
);
402 if (size
>= sizeof (objset_phys_t
))
403 dnode_byteswap(&osp
->os_projectused_dnode
);
408 * Runs cityhash on the objset_t pointer and the object number.
411 dnode_hash(const objset_t
*os
, uint64_t obj
)
413 uintptr_t osv
= (uintptr_t)os
;
414 return (cityhash2((uint64_t)osv
, obj
));
418 dnode_multilist_index_func(multilist_t
*ml
, void *obj
)
423 * The low order bits of the hash value are thought to be
424 * distributed evenly. Otherwise, in the case that the multilist
425 * has a power of two number of sublists, each sublists' usage
426 * would not be evenly distributed. In this context full 64bit
427 * division would be a waste of time, so limit it to 32 bits.
429 return ((unsigned int)dnode_hash(dn
->dn_objset
, dn
->dn_object
) %
430 multilist_get_num_sublists(ml
));
433 static inline boolean_t
434 dmu_os_is_l2cacheable(objset_t
*os
)
436 if (os
->os_secondary_cache
== ZFS_CACHE_ALL
||
437 os
->os_secondary_cache
== ZFS_CACHE_METADATA
) {
438 if (l2arc_exclude_special
== 0)
441 blkptr_t
*bp
= os
->os_rootbp
;
442 if (bp
== NULL
|| BP_IS_HOLE(bp
))
444 uint64_t vdev
= DVA_GET_VDEV(bp
->blk_dva
);
445 vdev_t
*rvd
= os
->os_spa
->spa_root_vdev
;
448 if (vdev
< rvd
->vdev_children
)
449 vd
= rvd
->vdev_child
[vdev
];
454 if (vd
->vdev_alloc_bias
!= VDEV_BIAS_SPECIAL
&&
455 vd
->vdev_alloc_bias
!= VDEV_BIAS_DEDUP
)
462 * Instantiates the objset_t in-memory structure corresponding to the
463 * objset_phys_t that's pointed to by the specified blkptr_t.
466 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
472 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
473 ASSERT(!BP_IS_REDACTED(bp
));
476 * We need the pool config lock to get properties.
478 ASSERT(ds
== NULL
|| dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
481 * The $ORIGIN dataset (if it exists) doesn't have an associated
482 * objset, so there's no reason to open it. The $ORIGIN dataset
483 * will not exist on pools older than SPA_VERSION_ORIGIN.
485 if (ds
!= NULL
&& spa_get_dsl(spa
) != NULL
&&
486 spa_get_dsl(spa
)->dp_origin_snap
!= NULL
) {
487 ASSERT3P(ds
->ds_dir
, !=,
488 spa_get_dsl(spa
)->dp_origin_snap
->ds_dir
);
491 os
= kmem_zalloc(sizeof (objset_t
), KM_SLEEP
);
492 os
->os_dsl_dataset
= ds
;
495 if (!BP_IS_HOLE(os
->os_rootbp
)) {
496 arc_flags_t aflags
= ARC_FLAG_WAIT
;
499 zio_flag_t zio_flags
= ZIO_FLAG_CANFAIL
;
500 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
501 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
503 if (dmu_os_is_l2cacheable(os
))
504 aflags
|= ARC_FLAG_L2CACHE
;
506 if (ds
!= NULL
&& ds
->ds_dir
->dd_crypto_obj
!= 0) {
507 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
508 ASSERT(BP_IS_AUTHENTICATED(bp
));
509 zio_flags
|= ZIO_FLAG_RAW
;
512 dprintf_bp(os
->os_rootbp
, "reading %s", "");
513 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
514 arc_getbuf_func
, &os
->os_phys_buf
,
515 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
517 kmem_free(os
, sizeof (objset_t
));
518 /* convert checksum errors into IO errors */
520 err
= SET_ERROR(EIO
);
524 if (spa_version(spa
) < SPA_VERSION_USERSPACE
)
525 size
= OBJSET_PHYS_SIZE_V1
;
526 else if (!spa_feature_is_enabled(spa
,
527 SPA_FEATURE_PROJECT_QUOTA
))
528 size
= OBJSET_PHYS_SIZE_V2
;
530 size
= sizeof (objset_phys_t
);
532 /* Increase the blocksize if we are permitted. */
533 if (arc_buf_size(os
->os_phys_buf
) < size
) {
534 arc_buf_t
*buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
535 ARC_BUFC_METADATA
, size
);
536 memset(buf
->b_data
, 0, size
);
537 memcpy(buf
->b_data
, os
->os_phys_buf
->b_data
,
538 arc_buf_size(os
->os_phys_buf
));
539 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
540 os
->os_phys_buf
= buf
;
543 os
->os_phys
= os
->os_phys_buf
->b_data
;
544 os
->os_flags
= os
->os_phys
->os_flags
;
546 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
547 sizeof (objset_phys_t
) : OBJSET_PHYS_SIZE_V1
;
548 os
->os_phys_buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
549 ARC_BUFC_METADATA
, size
);
550 os
->os_phys
= os
->os_phys_buf
->b_data
;
551 memset(os
->os_phys
, 0, size
);
554 * These properties will be filled in by the logic in zfs_get_zplprop()
555 * when they are queried for the first time.
557 os
->os_version
= OBJSET_PROP_UNINITIALIZED
;
558 os
->os_normalization
= OBJSET_PROP_UNINITIALIZED
;
559 os
->os_utf8only
= OBJSET_PROP_UNINITIALIZED
;
560 os
->os_casesensitivity
= OBJSET_PROP_UNINITIALIZED
;
563 * Note: the changed_cb will be called once before the register
564 * func returns, thus changing the checksum/compression from the
565 * default (fletcher2/off). Snapshots don't need to know about
566 * checksum/compression/copies.
569 os
->os_encrypted
= (ds
->ds_dir
->dd_crypto_obj
!= 0);
571 err
= dsl_prop_register(ds
,
572 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE
),
573 primary_cache_changed_cb
, os
);
575 err
= dsl_prop_register(ds
,
576 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE
),
577 secondary_cache_changed_cb
, os
);
580 err
= dsl_prop_register(ds
,
581 zfs_prop_to_name(ZFS_PROP_PREFETCH
),
582 prefetch_changed_cb
, os
);
584 if (!ds
->ds_is_snapshot
) {
586 err
= dsl_prop_register(ds
,
587 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
588 checksum_changed_cb
, os
);
591 err
= dsl_prop_register(ds
,
592 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
593 compression_changed_cb
, os
);
596 err
= dsl_prop_register(ds
,
597 zfs_prop_to_name(ZFS_PROP_COPIES
),
598 copies_changed_cb
, os
);
601 err
= dsl_prop_register(ds
,
602 zfs_prop_to_name(ZFS_PROP_DEDUP
),
603 dedup_changed_cb
, os
);
606 err
= dsl_prop_register(ds
,
607 zfs_prop_to_name(ZFS_PROP_LOGBIAS
),
608 logbias_changed_cb
, os
);
611 err
= dsl_prop_register(ds
,
612 zfs_prop_to_name(ZFS_PROP_SYNC
),
613 sync_changed_cb
, os
);
616 err
= dsl_prop_register(ds
,
618 ZFS_PROP_REDUNDANT_METADATA
),
619 redundant_metadata_changed_cb
, os
);
622 err
= dsl_prop_register(ds
,
623 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
),
624 recordsize_changed_cb
, os
);
627 err
= dsl_prop_register(ds
,
628 zfs_prop_to_name(ZFS_PROP_DNODESIZE
),
629 dnodesize_changed_cb
, os
);
632 err
= dsl_prop_register(ds
,
634 ZFS_PROP_SPECIAL_SMALL_BLOCKS
),
635 smallblk_changed_cb
, os
);
638 err
= dsl_prop_register(ds
,
639 zfs_prop_to_name(ZFS_PROP_DIRECT
),
640 direct_changed_cb
, os
);
644 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
645 kmem_free(os
, sizeof (objset_t
));
649 /* It's the meta-objset. */
650 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
651 os
->os_compress
= ZIO_COMPRESS_ON
;
652 os
->os_complevel
= ZIO_COMPLEVEL_DEFAULT
;
653 os
->os_encrypted
= B_FALSE
;
654 os
->os_copies
= spa_max_replication(spa
);
655 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
656 os
->os_dedup_verify
= B_FALSE
;
657 os
->os_logbias
= ZFS_LOGBIAS_LATENCY
;
658 os
->os_sync
= ZFS_SYNC_STANDARD
;
659 os
->os_primary_cache
= ZFS_CACHE_ALL
;
660 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
661 os
->os_dnodesize
= DNODE_MIN_SIZE
;
662 os
->os_prefetch
= ZFS_PREFETCH_ALL
;
665 if (ds
== NULL
|| !ds
->ds_is_snapshot
)
666 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
667 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
669 for (i
= 0; i
< TXG_SIZE
; i
++) {
670 multilist_create(&os
->os_dirty_dnodes
[i
], sizeof (dnode_t
),
671 offsetof(dnode_t
, dn_dirty_link
[i
]),
672 dnode_multilist_index_func
);
674 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
675 offsetof(dnode_t
, dn_link
));
676 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
677 offsetof(dmu_buf_impl_t
, db_link
));
679 list_link_init(&os
->os_evicting_node
);
681 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
682 mutex_init(&os
->os_userused_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
683 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
684 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
685 os
->os_obj_next_percpu_len
= boot_ncpus
;
686 os
->os_obj_next_percpu
= kmem_zalloc(os
->os_obj_next_percpu_len
*
687 sizeof (os
->os_obj_next_percpu
[0]), KM_SLEEP
);
689 dnode_special_open(os
, &os
->os_phys
->os_meta_dnode
,
690 DMU_META_DNODE_OBJECT
, &os
->os_meta_dnode
);
691 if (OBJSET_BUF_HAS_USERUSED(os
->os_phys_buf
)) {
692 dnode_special_open(os
, &os
->os_phys
->os_userused_dnode
,
693 DMU_USERUSED_OBJECT
, &os
->os_userused_dnode
);
694 dnode_special_open(os
, &os
->os_phys
->os_groupused_dnode
,
695 DMU_GROUPUSED_OBJECT
, &os
->os_groupused_dnode
);
696 if (OBJSET_BUF_HAS_PROJECTUSED(os
->os_phys_buf
))
697 dnode_special_open(os
,
698 &os
->os_phys
->os_projectused_dnode
,
699 DMU_PROJECTUSED_OBJECT
, &os
->os_projectused_dnode
);
702 mutex_init(&os
->os_upgrade_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
709 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
714 * We need the pool_config lock to manipulate the dsl_dataset_t.
715 * Even if the dataset is long-held, we need the pool_config lock
716 * to open the objset, as it needs to get properties.
718 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
720 mutex_enter(&ds
->ds_opening_lock
);
721 if (ds
->ds_objset
== NULL
) {
723 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
724 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
725 ds
, dsl_dataset_get_blkptr(ds
), &os
);
726 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
729 mutex_enter(&ds
->ds_lock
);
730 ASSERT(ds
->ds_objset
== NULL
);
732 mutex_exit(&ds
->ds_lock
);
735 *osp
= ds
->ds_objset
;
736 mutex_exit(&ds
->ds_opening_lock
);
741 * Holds the pool while the objset is held. Therefore only one objset
742 * can be held at a time.
745 dmu_objset_hold_flags(const char *name
, boolean_t decrypt
, const void *tag
,
751 ds_hold_flags_t flags
;
753 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
754 err
= dsl_pool_hold(name
, tag
, &dp
);
757 err
= dsl_dataset_hold_flags(dp
, name
, flags
, tag
, &ds
);
759 dsl_pool_rele(dp
, tag
);
763 err
= dmu_objset_from_ds(ds
, osp
);
765 dsl_dataset_rele(ds
, tag
);
766 dsl_pool_rele(dp
, tag
);
773 dmu_objset_hold(const char *name
, const void *tag
, objset_t
**osp
)
775 return (dmu_objset_hold_flags(name
, B_FALSE
, tag
, osp
));
779 dmu_objset_own_impl(dsl_dataset_t
*ds
, dmu_objset_type_t type
,
780 boolean_t readonly
, boolean_t decrypt
, const void *tag
, objset_t
**osp
)
784 int err
= dmu_objset_from_ds(ds
, osp
);
787 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
788 return (SET_ERROR(EINVAL
));
789 } else if (!readonly
&& dsl_dataset_is_snapshot(ds
)) {
790 return (SET_ERROR(EROFS
));
791 } else if (!readonly
&& decrypt
&&
792 dsl_dir_incompatible_encryption_version(ds
->ds_dir
)) {
793 return (SET_ERROR(EROFS
));
796 /* if we are decrypting, we can now check MACs in os->os_phys_buf */
797 if (decrypt
&& arc_is_unauthenticated((*osp
)->os_phys_buf
)) {
800 SET_BOOKMARK(&zb
, ds
->ds_object
, ZB_ROOT_OBJECT
,
801 ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
802 err
= arc_untransform((*osp
)->os_phys_buf
, (*osp
)->os_spa
,
807 ASSERT0(arc_is_unauthenticated((*osp
)->os_phys_buf
));
814 * dsl_pool must not be held when this is called.
815 * Upon successful return, there will be a longhold on the dataset,
816 * and the dsl_pool will not be held.
819 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
820 boolean_t readonly
, boolean_t decrypt
, const void *tag
, objset_t
**osp
)
825 ds_hold_flags_t flags
;
827 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
828 err
= dsl_pool_hold(name
, FTAG
, &dp
);
831 err
= dsl_dataset_own(dp
, name
, flags
, tag
, &ds
);
833 dsl_pool_rele(dp
, FTAG
);
836 err
= dmu_objset_own_impl(ds
, type
, readonly
, decrypt
, tag
, osp
);
838 dsl_dataset_disown(ds
, flags
, tag
);
839 dsl_pool_rele(dp
, FTAG
);
844 * User accounting requires the dataset to be decrypted and rw.
845 * We also don't begin user accounting during claiming to help
846 * speed up pool import times and to keep this txg reserved
847 * completely for recovery work.
849 if (!readonly
&& !dp
->dp_spa
->spa_claiming
&&
850 (ds
->ds_dir
->dd_crypto_obj
== 0 || decrypt
)) {
851 if (dmu_objset_userobjspace_upgradable(*osp
) ||
852 dmu_objset_projectquota_upgradable(*osp
)) {
853 dmu_objset_id_quota_upgrade(*osp
);
854 } else if (dmu_objset_userused_enabled(*osp
)) {
855 dmu_objset_userspace_upgrade(*osp
);
859 dsl_pool_rele(dp
, FTAG
);
864 dmu_objset_own_obj(dsl_pool_t
*dp
, uint64_t obj
, dmu_objset_type_t type
,
865 boolean_t readonly
, boolean_t decrypt
, const void *tag
, objset_t
**osp
)
869 ds_hold_flags_t flags
;
871 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
872 err
= dsl_dataset_own_obj(dp
, obj
, flags
, tag
, &ds
);
876 err
= dmu_objset_own_impl(ds
, type
, readonly
, decrypt
, tag
, osp
);
878 dsl_dataset_disown(ds
, flags
, tag
);
886 dmu_objset_rele_flags(objset_t
*os
, boolean_t decrypt
, const void *tag
)
888 ds_hold_flags_t flags
;
889 dsl_pool_t
*dp
= dmu_objset_pool(os
);
891 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
892 dsl_dataset_rele_flags(os
->os_dsl_dataset
, flags
, tag
);
893 dsl_pool_rele(dp
, tag
);
897 dmu_objset_rele(objset_t
*os
, const void *tag
)
899 dmu_objset_rele_flags(os
, B_FALSE
, tag
);
903 * When we are called, os MUST refer to an objset associated with a dataset
904 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
905 * == tag. We will then release and reacquire ownership of the dataset while
906 * holding the pool config_rwlock to avoid intervening namespace or ownership
909 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
910 * release the hold on its dataset and acquire a new one on the dataset of the
911 * same name so that it can be partially torn down and reconstructed.
914 dmu_objset_refresh_ownership(dsl_dataset_t
*ds
, dsl_dataset_t
**newds
,
915 boolean_t decrypt
, const void *tag
)
918 char name
[ZFS_MAX_DATASET_NAME_LEN
];
919 ds_hold_flags_t flags
;
921 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
922 VERIFY3P(ds
, !=, NULL
);
923 VERIFY3P(ds
->ds_owner
, ==, tag
);
924 VERIFY(dsl_dataset_long_held(ds
));
926 dsl_dataset_name(ds
, name
);
927 dp
= ds
->ds_dir
->dd_pool
;
928 dsl_pool_config_enter(dp
, FTAG
);
929 dsl_dataset_disown(ds
, flags
, tag
);
930 VERIFY0(dsl_dataset_own(dp
, name
, flags
, tag
, newds
));
931 dsl_pool_config_exit(dp
, FTAG
);
935 dmu_objset_disown(objset_t
*os
, boolean_t decrypt
, const void *tag
)
937 ds_hold_flags_t flags
;
939 flags
= (decrypt
) ? DS_HOLD_FLAG_DECRYPT
: DS_HOLD_FLAG_NONE
;
941 * Stop upgrading thread
943 dmu_objset_upgrade_stop(os
);
944 dsl_dataset_disown(os
->os_dsl_dataset
, flags
, tag
);
948 dmu_objset_evict_dbufs(objset_t
*os
)
953 dn_marker
= kmem_alloc(sizeof (dnode_t
), KM_SLEEP
);
955 mutex_enter(&os
->os_lock
);
956 dn
= list_head(&os
->os_dnodes
);
959 * Skip dnodes without holds. We have to do this dance
960 * because dnode_add_ref() only works if there is already a
961 * hold. If the dnode has no holds, then it has no dbufs.
963 if (dnode_add_ref(dn
, FTAG
)) {
964 list_insert_after(&os
->os_dnodes
, dn
, dn_marker
);
965 mutex_exit(&os
->os_lock
);
967 dnode_evict_dbufs(dn
);
968 dnode_rele(dn
, FTAG
);
970 mutex_enter(&os
->os_lock
);
971 dn
= list_next(&os
->os_dnodes
, dn_marker
);
972 list_remove(&os
->os_dnodes
, dn_marker
);
974 dn
= list_next(&os
->os_dnodes
, dn
);
977 mutex_exit(&os
->os_lock
);
979 kmem_free(dn_marker
, sizeof (dnode_t
));
981 if (DMU_USERUSED_DNODE(os
) != NULL
) {
982 if (DMU_PROJECTUSED_DNODE(os
) != NULL
)
983 dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os
));
984 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os
));
985 dnode_evict_dbufs(DMU_USERUSED_DNODE(os
));
987 dnode_evict_dbufs(DMU_META_DNODE(os
));
991 * Objset eviction processing is split into into two pieces.
992 * The first marks the objset as evicting, evicts any dbufs that
993 * have a refcount of zero, and then queues up the objset for the
994 * second phase of eviction. Once os->os_dnodes has been cleared by
995 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
996 * The second phase closes the special dnodes, dequeues the objset from
997 * the list of those undergoing eviction, and finally frees the objset.
999 * NOTE: Due to asynchronous eviction processing (invocation of
1000 * dnode_buf_pageout()), it is possible for the meta dnode for the
1001 * objset to have no holds even though os->os_dnodes is not empty.
1004 dmu_objset_evict(objset_t
*os
)
1006 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1008 for (int t
= 0; t
< TXG_SIZE
; t
++)
1009 ASSERT(!dmu_objset_is_dirty(os
, t
));
1012 dsl_prop_unregister_all(ds
, os
);
1017 dmu_objset_evict_dbufs(os
);
1019 mutex_enter(&os
->os_lock
);
1020 spa_evicting_os_register(os
->os_spa
, os
);
1021 if (list_is_empty(&os
->os_dnodes
)) {
1022 mutex_exit(&os
->os_lock
);
1023 dmu_objset_evict_done(os
);
1025 mutex_exit(&os
->os_lock
);
1032 dmu_objset_evict_done(objset_t
*os
)
1034 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
1036 dnode_special_close(&os
->os_meta_dnode
);
1037 if (DMU_USERUSED_DNODE(os
)) {
1038 if (DMU_PROJECTUSED_DNODE(os
))
1039 dnode_special_close(&os
->os_projectused_dnode
);
1040 dnode_special_close(&os
->os_userused_dnode
);
1041 dnode_special_close(&os
->os_groupused_dnode
);
1043 zil_free(os
->os_zil
);
1045 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
1048 * This is a barrier to prevent the objset from going away in
1049 * dnode_move() until we can safely ensure that the objset is still in
1050 * use. We consider the objset valid before the barrier and invalid
1051 * after the barrier.
1053 rw_enter(&os_lock
, RW_READER
);
1056 kmem_free(os
->os_obj_next_percpu
,
1057 os
->os_obj_next_percpu_len
* sizeof (os
->os_obj_next_percpu
[0]));
1059 mutex_destroy(&os
->os_lock
);
1060 mutex_destroy(&os
->os_userused_lock
);
1061 mutex_destroy(&os
->os_obj_lock
);
1062 mutex_destroy(&os
->os_user_ptr_lock
);
1063 mutex_destroy(&os
->os_upgrade_lock
);
1064 for (int i
= 0; i
< TXG_SIZE
; i
++)
1065 multilist_destroy(&os
->os_dirty_dnodes
[i
]);
1066 spa_evicting_os_deregister(os
->os_spa
, os
);
1067 kmem_free(os
, sizeof (objset_t
));
1071 dmu_objset_snap_cmtime(objset_t
*os
)
1073 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
1077 dmu_objset_create_impl_dnstats(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
1078 dmu_objset_type_t type
, int levels
, int blksz
, int ibs
, dmu_tx_t
*tx
)
1083 ASSERT(dmu_tx_is_syncing(tx
));
1086 blksz
= DNODE_BLOCK_SIZE
;
1088 ibs
= DN_MAX_INDBLKSHIFT
;
1091 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1093 VERIFY0(dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
1095 mdn
= DMU_META_DNODE(os
);
1097 dnode_allocate(mdn
, DMU_OT_DNODE
, blksz
, ibs
, DMU_OT_NONE
, 0,
1098 DNODE_MIN_SLOTS
, tx
);
1101 * We don't want to have to increase the meta-dnode's nlevels
1102 * later, because then we could do it in quiescing context while
1103 * we are also accessing it in open context.
1105 * This precaution is not necessary for the MOS (ds == NULL),
1106 * because the MOS is only updated in syncing context.
1107 * This is most fortunate: the MOS is the only objset that
1108 * needs to be synced multiple times as spa_sync() iterates
1109 * to convergence, so minimizing its dn_nlevels matters.
1116 * Determine the number of levels necessary for the
1117 * meta-dnode to contain DN_MAX_OBJECT dnodes. Note
1118 * that in order to ensure that we do not overflow
1119 * 64 bits, there has to be a nlevels that gives us a
1120 * number of blocks > DN_MAX_OBJECT but < 2^64.
1121 * Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
1122 * (10) must be less than (64 - log2(DN_MAX_OBJECT))
1125 while ((uint64_t)mdn
->dn_nblkptr
<<
1126 (mdn
->dn_datablkshift
- DNODE_SHIFT
+ (levels
- 1) *
1127 (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
1132 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
1133 mdn
->dn_nlevels
= levels
;
1136 ASSERT(type
!= DMU_OST_NONE
);
1137 ASSERT(type
!= DMU_OST_ANY
);
1138 ASSERT(type
< DMU_OST_NUMTYPES
);
1139 os
->os_phys
->os_type
= type
;
1142 * Enable user accounting if it is enabled and this is not an
1143 * encrypted receive.
1145 if (dmu_objset_userused_enabled(os
) &&
1146 (!os
->os_encrypted
|| !dmu_objset_is_receiving(os
))) {
1147 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1148 if (dmu_objset_userobjused_enabled(os
)) {
1149 ASSERT3P(ds
, !=, NULL
);
1150 ds
->ds_feature_activation
[
1151 SPA_FEATURE_USEROBJ_ACCOUNTING
] = (void *)B_TRUE
;
1152 os
->os_phys
->os_flags
|=
1153 OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
;
1155 if (dmu_objset_projectquota_enabled(os
)) {
1156 ASSERT3P(ds
, !=, NULL
);
1157 ds
->ds_feature_activation
[
1158 SPA_FEATURE_PROJECT_QUOTA
] = (void *)B_TRUE
;
1159 os
->os_phys
->os_flags
|=
1160 OBJSET_FLAG_PROJECTQUOTA_COMPLETE
;
1162 os
->os_flags
= os
->os_phys
->os_flags
;
1165 dsl_dataset_dirty(ds
, tx
);
1170 /* called from dsl for meta-objset */
1172 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
1173 dmu_objset_type_t type
, dmu_tx_t
*tx
)
1175 return (dmu_objset_create_impl_dnstats(spa
, ds
, bp
, type
, 0, 0, 0, tx
));
1178 typedef struct dmu_objset_create_arg
{
1179 const char *doca_name
;
1182 void (*doca_userfunc
)(objset_t
*os
, void *arg
,
1183 cred_t
*cr
, dmu_tx_t
*tx
);
1185 dmu_objset_type_t doca_type
;
1186 uint64_t doca_flags
;
1187 dsl_crypto_params_t
*doca_dcp
;
1188 } dmu_objset_create_arg_t
;
1191 dmu_objset_create_check(void *arg
, dmu_tx_t
*tx
)
1193 dmu_objset_create_arg_t
*doca
= arg
;
1194 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1196 dsl_dataset_t
*parentds
;
1201 if (strchr(doca
->doca_name
, '@') != NULL
)
1202 return (SET_ERROR(EINVAL
));
1204 if (strlen(doca
->doca_name
) >= ZFS_MAX_DATASET_NAME_LEN
)
1205 return (SET_ERROR(ENAMETOOLONG
));
1207 if (dataset_nestcheck(doca
->doca_name
) != 0)
1208 return (SET_ERROR(ENAMETOOLONG
));
1210 error
= dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
);
1214 dsl_dir_rele(pdd
, FTAG
);
1215 return (SET_ERROR(EEXIST
));
1218 error
= dmu_objset_create_crypt_check(pdd
, doca
->doca_dcp
, NULL
);
1220 dsl_dir_rele(pdd
, FTAG
);
1224 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
1225 doca
->doca_cred
, doca
->doca_proc
);
1227 dsl_dir_rele(pdd
, FTAG
);
1231 /* can't create below anything but filesystems (eg. no ZVOLs) */
1232 error
= dsl_dataset_hold_obj(pdd
->dd_pool
,
1233 dsl_dir_phys(pdd
)->dd_head_dataset_obj
, FTAG
, &parentds
);
1235 dsl_dir_rele(pdd
, FTAG
);
1238 error
= dmu_objset_from_ds(parentds
, &parentos
);
1240 dsl_dataset_rele(parentds
, FTAG
);
1241 dsl_dir_rele(pdd
, FTAG
);
1244 if (dmu_objset_type(parentos
) != DMU_OST_ZFS
) {
1245 dsl_dataset_rele(parentds
, FTAG
);
1246 dsl_dir_rele(pdd
, FTAG
);
1247 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
1249 dsl_dataset_rele(parentds
, FTAG
);
1250 dsl_dir_rele(pdd
, FTAG
);
1256 dmu_objset_create_sync(void *arg
, dmu_tx_t
*tx
)
1258 dmu_objset_create_arg_t
*doca
= arg
;
1259 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1260 spa_t
*spa
= dp
->dp_spa
;
1269 VERIFY0(dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
));
1271 obj
= dsl_dataset_create_sync(pdd
, tail
, NULL
, doca
->doca_flags
,
1272 doca
->doca_cred
, doca
->doca_dcp
, tx
);
1274 VERIFY0(dsl_dataset_hold_obj_flags(pdd
->dd_pool
, obj
,
1275 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
));
1276 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1277 bp
= dsl_dataset_get_blkptr(ds
);
1278 os
= dmu_objset_create_impl(spa
, ds
, bp
, doca
->doca_type
, tx
);
1279 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1281 if (doca
->doca_userfunc
!= NULL
) {
1282 doca
->doca_userfunc(os
, doca
->doca_userarg
,
1283 doca
->doca_cred
, tx
);
1287 * The doca_userfunc() may write out some data that needs to be
1288 * encrypted if the dataset is encrypted (specifically the root
1289 * directory). This data must be written out before the encryption
1290 * key mapping is removed by dsl_dataset_rele_flags(). Force the
1291 * I/O to occur immediately by invoking the relevant sections of
1294 if (os
->os_encrypted
) {
1295 dsl_dataset_t
*tmpds
= NULL
;
1296 boolean_t need_sync_done
= B_FALSE
;
1298 mutex_enter(&ds
->ds_lock
);
1299 ds
->ds_owner
= FTAG
;
1300 mutex_exit(&ds
->ds_lock
);
1302 rzio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1303 tmpds
= txg_list_remove_this(&dp
->dp_dirty_datasets
, ds
,
1305 if (tmpds
!= NULL
) {
1306 dsl_dataset_sync(ds
, rzio
, tx
);
1307 need_sync_done
= B_TRUE
;
1309 VERIFY0(zio_wait(rzio
));
1311 dmu_objset_sync_done(os
, tx
);
1312 taskq_wait(dp
->dp_sync_taskq
);
1313 if (txg_list_member(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
)) {
1314 ASSERT3P(ds
->ds_key_mapping
, !=, NULL
);
1315 key_mapping_rele(spa
, ds
->ds_key_mapping
, ds
);
1318 rzio
= zio_root(spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
1319 tmpds
= txg_list_remove_this(&dp
->dp_dirty_datasets
, ds
,
1321 if (tmpds
!= NULL
) {
1322 dmu_buf_rele(ds
->ds_dbuf
, ds
);
1323 dsl_dataset_sync(ds
, rzio
, tx
);
1325 VERIFY0(zio_wait(rzio
));
1327 if (need_sync_done
) {
1328 ASSERT3P(ds
->ds_key_mapping
, !=, NULL
);
1329 key_mapping_rele(spa
, ds
->ds_key_mapping
, ds
);
1330 dsl_dataset_sync_done(ds
, tx
);
1331 dmu_buf_rele(ds
->ds_dbuf
, ds
);
1334 mutex_enter(&ds
->ds_lock
);
1335 ds
->ds_owner
= NULL
;
1336 mutex_exit(&ds
->ds_lock
);
1339 spa_history_log_internal_ds(ds
, "create", tx
, " ");
1341 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
1342 dsl_dir_rele(pdd
, FTAG
);
1346 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
1347 dsl_crypto_params_t
*dcp
, dmu_objset_create_sync_func_t func
, void *arg
)
1349 dmu_objset_create_arg_t doca
;
1350 dsl_crypto_params_t tmp_dcp
= { 0 };
1352 doca
.doca_name
= name
;
1353 doca
.doca_cred
= CRED();
1354 doca
.doca_proc
= curproc
;
1355 doca
.doca_flags
= flags
;
1356 doca
.doca_userfunc
= func
;
1357 doca
.doca_userarg
= arg
;
1358 doca
.doca_type
= type
;
1361 * Some callers (mostly for testing) do not provide a dcp on their
1362 * own but various code inside the sync task will require it to be
1363 * allocated. Rather than adding NULL checks throughout this code
1364 * or adding dummy dcp's to all of the callers we simply create a
1365 * dummy one here and use that. This zero dcp will have the same
1366 * effect as asking for inheritance of all encryption params.
1368 doca
.doca_dcp
= (dcp
!= NULL
) ? dcp
: &tmp_dcp
;
1370 int rv
= dsl_sync_task(name
,
1371 dmu_objset_create_check
, dmu_objset_create_sync
, &doca
,
1372 6, ZFS_SPACE_CHECK_NORMAL
);
1375 zvol_create_minor(name
);
1379 typedef struct dmu_objset_clone_arg
{
1380 const char *doca_clone
;
1381 const char *doca_origin
;
1384 } dmu_objset_clone_arg_t
;
1387 dmu_objset_clone_check(void *arg
, dmu_tx_t
*tx
)
1389 dmu_objset_clone_arg_t
*doca
= arg
;
1393 dsl_dataset_t
*origin
;
1394 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1396 if (strchr(doca
->doca_clone
, '@') != NULL
)
1397 return (SET_ERROR(EINVAL
));
1399 if (strlen(doca
->doca_clone
) >= ZFS_MAX_DATASET_NAME_LEN
)
1400 return (SET_ERROR(ENAMETOOLONG
));
1402 error
= dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
);
1406 dsl_dir_rele(pdd
, FTAG
);
1407 return (SET_ERROR(EEXIST
));
1410 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
1411 doca
->doca_cred
, doca
->doca_proc
);
1413 dsl_dir_rele(pdd
, FTAG
);
1414 return (SET_ERROR(EDQUOT
));
1417 error
= dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
);
1419 dsl_dir_rele(pdd
, FTAG
);
1423 /* You can only clone snapshots, not the head datasets. */
1424 if (!origin
->ds_is_snapshot
) {
1425 dsl_dataset_rele(origin
, FTAG
);
1426 dsl_dir_rele(pdd
, FTAG
);
1427 return (SET_ERROR(EINVAL
));
1430 dsl_dataset_rele(origin
, FTAG
);
1431 dsl_dir_rele(pdd
, FTAG
);
1437 dmu_objset_clone_sync(void *arg
, dmu_tx_t
*tx
)
1439 dmu_objset_clone_arg_t
*doca
= arg
;
1440 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1443 dsl_dataset_t
*origin
, *ds
;
1445 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
1447 VERIFY0(dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
));
1448 VERIFY0(dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
));
1450 obj
= dsl_dataset_create_sync(pdd
, tail
, origin
, 0,
1451 doca
->doca_cred
, NULL
, tx
);
1453 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
1454 dsl_dataset_name(origin
, namebuf
);
1455 spa_history_log_internal_ds(ds
, "clone", tx
,
1456 "origin=%s (%llu)", namebuf
, (u_longlong_t
)origin
->ds_object
);
1457 dsl_dataset_rele(ds
, FTAG
);
1458 dsl_dataset_rele(origin
, FTAG
);
1459 dsl_dir_rele(pdd
, FTAG
);
1463 dmu_objset_clone(const char *clone
, const char *origin
)
1465 dmu_objset_clone_arg_t doca
;
1467 doca
.doca_clone
= clone
;
1468 doca
.doca_origin
= origin
;
1469 doca
.doca_cred
= CRED();
1470 doca
.doca_proc
= curproc
;
1472 int rv
= dsl_sync_task(clone
,
1473 dmu_objset_clone_check
, dmu_objset_clone_sync
, &doca
,
1474 6, ZFS_SPACE_CHECK_NORMAL
);
1477 zvol_create_minor(clone
);
1483 dmu_objset_snapshot_one(const char *fsname
, const char *snapname
)
1486 char *longsnap
= kmem_asprintf("%s@%s", fsname
, snapname
);
1487 nvlist_t
*snaps
= fnvlist_alloc();
1489 fnvlist_add_boolean(snaps
, longsnap
);
1490 kmem_strfree(longsnap
);
1491 err
= dsl_dataset_snapshot(snaps
, NULL
, NULL
);
1492 fnvlist_free(snaps
);
1497 dmu_objset_upgrade_task_cb(void *data
)
1499 objset_t
*os
= data
;
1501 mutex_enter(&os
->os_upgrade_lock
);
1502 os
->os_upgrade_status
= EINTR
;
1503 if (!os
->os_upgrade_exit
) {
1506 mutex_exit(&os
->os_upgrade_lock
);
1508 status
= os
->os_upgrade_cb(os
);
1510 mutex_enter(&os
->os_upgrade_lock
);
1512 os
->os_upgrade_status
= status
;
1514 os
->os_upgrade_exit
= B_TRUE
;
1515 os
->os_upgrade_id
= 0;
1516 mutex_exit(&os
->os_upgrade_lock
);
1517 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1521 dmu_objset_upgrade(objset_t
*os
, dmu_objset_upgrade_cb_t cb
)
1523 if (os
->os_upgrade_id
!= 0)
1526 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
1527 dsl_dataset_long_hold(dmu_objset_ds(os
), upgrade_tag
);
1529 mutex_enter(&os
->os_upgrade_lock
);
1530 if (os
->os_upgrade_id
== 0 && os
->os_upgrade_status
== 0) {
1531 os
->os_upgrade_exit
= B_FALSE
;
1532 os
->os_upgrade_cb
= cb
;
1533 os
->os_upgrade_id
= taskq_dispatch(
1534 os
->os_spa
->spa_upgrade_taskq
,
1535 dmu_objset_upgrade_task_cb
, os
, TQ_SLEEP
);
1536 if (os
->os_upgrade_id
== TASKQID_INVALID
) {
1537 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1538 os
->os_upgrade_status
= ENOMEM
;
1541 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1543 mutex_exit(&os
->os_upgrade_lock
);
1547 dmu_objset_upgrade_stop(objset_t
*os
)
1549 mutex_enter(&os
->os_upgrade_lock
);
1550 os
->os_upgrade_exit
= B_TRUE
;
1551 if (os
->os_upgrade_id
!= 0) {
1552 taskqid_t id
= os
->os_upgrade_id
;
1554 os
->os_upgrade_id
= 0;
1555 mutex_exit(&os
->os_upgrade_lock
);
1557 if ((taskq_cancel_id(os
->os_spa
->spa_upgrade_taskq
, id
)) == 0) {
1558 dsl_dataset_long_rele(dmu_objset_ds(os
), upgrade_tag
);
1560 txg_wait_synced(os
->os_spa
->spa_dsl_pool
, 0);
1562 mutex_exit(&os
->os_upgrade_lock
);
1567 dmu_objset_sync_dnodes(multilist_sublist_t
*list
, dmu_tx_t
*tx
)
1571 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
1572 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
1573 ASSERT(dn
->dn_dbuf
->db_data_pending
);
1575 * Initialize dn_zio outside dnode_sync() because the
1576 * meta-dnode needs to set it outside dnode_sync().
1578 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
1581 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1582 multilist_sublist_remove(list
, dn
);
1585 * See the comment above dnode_rele_task() for an explanation
1586 * of why this dnode hold is always needed (even when not
1587 * doing user accounting).
1589 multilist_t
*newlist
= &dn
->dn_objset
->os_synced_dnodes
;
1590 (void) dnode_add_ref(dn
, newlist
);
1591 multilist_insert(newlist
, dn
);
1598 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1601 blkptr_t
*bp
= zio
->io_bp
;
1603 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1606 ASSERT(!BP_IS_EMBEDDED(bp
));
1607 ASSERT3U(BP_GET_TYPE(bp
), ==, DMU_OT_OBJSET
);
1608 ASSERT0(BP_GET_LEVEL(bp
));
1611 * Update rootbp fill count: it should be the number of objects
1612 * allocated in the object set (not counting the "special"
1613 * objects that are stored in the objset_phys_t -- the meta
1614 * dnode and user/group/project accounting objects).
1616 for (int i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1617 fill
+= BP_GET_FILL(&dnp
->dn_blkptr
[i
]);
1619 BP_SET_FILL(bp
, fill
);
1621 if (os
->os_dsl_dataset
!= NULL
)
1622 rrw_enter(&os
->os_dsl_dataset
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
1623 *os
->os_rootbp
= *bp
;
1624 if (os
->os_dsl_dataset
!= NULL
)
1625 rrw_exit(&os
->os_dsl_dataset
->ds_bp_rwlock
, FTAG
);
1629 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1632 blkptr_t
*bp
= zio
->io_bp
;
1633 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1636 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1637 ASSERT(BP_EQUAL(bp
, bp_orig
));
1639 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1640 dmu_tx_t
*tx
= os
->os_synctx
;
1642 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1643 dsl_dataset_block_born(ds
, bp
, tx
);
1645 kmem_free(bp
, sizeof (*bp
));
1648 typedef struct sync_objset_arg
{
1654 taskq_ent_t soa_tq_ent
;
1655 } sync_objset_arg_t
;
1657 typedef struct sync_dnodes_arg
{
1658 multilist_t
*sda_list
;
1659 int sda_sublist_idx
;
1660 multilist_t
*sda_newlist
;
1661 sync_objset_arg_t
*sda_soa
;
1662 } sync_dnodes_arg_t
;
1664 static void sync_meta_dnode_task(void *arg
);
1667 sync_dnodes_task(void *arg
)
1669 sync_dnodes_arg_t
*sda
= arg
;
1670 sync_objset_arg_t
*soa
= sda
->sda_soa
;
1671 objset_t
*os
= soa
->soa_os
;
1673 uint_t allocator
= spa_acq_allocator(os
->os_spa
);
1674 multilist_sublist_t
*ms
=
1675 multilist_sublist_lock_idx(sda
->sda_list
, sda
->sda_sublist_idx
);
1677 dmu_objset_sync_dnodes(ms
, soa
->soa_tx
);
1679 multilist_sublist_unlock(ms
);
1680 spa_rel_allocator(os
->os_spa
, allocator
);
1682 kmem_free(sda
, sizeof (*sda
));
1684 mutex_enter(&soa
->soa_mutex
);
1685 ASSERT(soa
->soa_count
!= 0);
1686 if (--soa
->soa_count
!= 0) {
1687 mutex_exit(&soa
->soa_mutex
);
1690 mutex_exit(&soa
->soa_mutex
);
1692 taskq_dispatch_ent(dmu_objset_pool(os
)->dp_sync_taskq
,
1693 sync_meta_dnode_task
, soa
, TQ_FRONT
, &soa
->soa_tq_ent
);
1697 * Issue the zio_nowait() for all dirty record zios on the meta dnode,
1698 * then trigger the callback for the zil_sync. This runs once for each
1699 * objset, only after any/all sublists in the objset have been synced.
1702 sync_meta_dnode_task(void *arg
)
1704 sync_objset_arg_t
*soa
= arg
;
1705 objset_t
*os
= soa
->soa_os
;
1706 dmu_tx_t
*tx
= soa
->soa_tx
;
1707 int txgoff
= tx
->tx_txg
& TXG_MASK
;
1708 dbuf_dirty_record_t
*dr
;
1710 ASSERT0(soa
->soa_count
);
1712 list_t
*list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1713 while ((dr
= list_remove_head(list
)) != NULL
) {
1714 ASSERT0(dr
->dr_dbuf
->db_level
);
1715 zio_nowait(dr
->dr_zio
);
1718 /* Enable dnode backfill if enough objects have been freed. */
1719 if (os
->os_freed_dnodes
>= dmu_rescan_dnode_threshold
) {
1720 os
->os_rescan_dnodes
= B_TRUE
;
1721 os
->os_freed_dnodes
= 0;
1725 * Free intent log blocks up to this tx.
1727 zil_sync(os
->os_zil
, tx
);
1728 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1729 zio_nowait(soa
->soa_zio
);
1731 mutex_destroy(&soa
->soa_mutex
);
1732 kmem_free(soa
, sizeof (*soa
));
1735 /* called from dsl */
1737 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1740 zbookmark_phys_t zb
;
1745 blkptr_t
*blkptr_copy
= kmem_alloc(sizeof (*os
->os_rootbp
), KM_SLEEP
);
1746 *blkptr_copy
= *os
->os_rootbp
;
1748 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", (u_longlong_t
)tx
->tx_txg
);
1750 ASSERT(dmu_tx_is_syncing(tx
));
1751 /* XXX the write_done callback should really give us the tx... */
1754 if (os
->os_dsl_dataset
== NULL
) {
1756 * This is the MOS. If we have upgraded,
1757 * spa_max_replication() could change, so reset
1760 os
->os_copies
= spa_max_replication(os
->os_spa
);
1764 * Create the root block IO
1766 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1767 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1768 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1769 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1771 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1774 * If we are either claiming the ZIL or doing a raw receive, write
1775 * out the os_phys_buf raw. Neither of these actions will effect the
1776 * MAC at this point.
1778 if (os
->os_raw_receive
||
1779 os
->os_next_write_raw
[tx
->tx_txg
& TXG_MASK
]) {
1780 ASSERT(os
->os_encrypted
);
1781 arc_convert_to_raw(os
->os_phys_buf
,
1782 os
->os_dsl_dataset
->ds_object
, ZFS_HOST_BYTEORDER
,
1783 DMU_OT_OBJSET
, NULL
, NULL
, NULL
);
1786 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1787 blkptr_copy
, os
->os_phys_buf
, B_FALSE
, dmu_os_is_l2cacheable(os
),
1788 &zp
, dmu_objset_write_ready
, NULL
, dmu_objset_write_done
,
1789 os
, ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
1792 * Sync special dnodes - the parent IO for the sync is the root block
1794 DMU_META_DNODE(os
)->dn_zio
= zio
;
1795 dnode_sync(DMU_META_DNODE(os
), tx
);
1797 os
->os_phys
->os_flags
= os
->os_flags
;
1799 if (DMU_USERUSED_DNODE(os
) &&
1800 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1801 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1802 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1803 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1804 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1807 if (DMU_PROJECTUSED_DNODE(os
) &&
1808 DMU_PROJECTUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1809 DMU_PROJECTUSED_DNODE(os
)->dn_zio
= zio
;
1810 dnode_sync(DMU_PROJECTUSED_DNODE(os
), tx
);
1813 txgoff
= tx
->tx_txg
& TXG_MASK
;
1816 * We must create the list here because it uses the
1817 * dn_dirty_link[] of this txg. But it may already
1818 * exist because we call dsl_dataset_sync() twice per txg.
1820 if (os
->os_synced_dnodes
.ml_sublists
== NULL
) {
1821 multilist_create(&os
->os_synced_dnodes
, sizeof (dnode_t
),
1822 offsetof(dnode_t
, dn_dirty_link
[txgoff
]),
1823 dnode_multilist_index_func
);
1825 ASSERT3U(os
->os_synced_dnodes
.ml_offset
, ==,
1826 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1830 * zio_nowait(zio) is done after any/all sublist and meta dnode
1831 * zios have been nowaited, and the zil_sync() has been performed.
1832 * The soa is freed at the end of sync_meta_dnode_task.
1834 sync_objset_arg_t
*soa
= kmem_alloc(sizeof (*soa
), KM_SLEEP
);
1838 taskq_init_ent(&soa
->soa_tq_ent
);
1839 mutex_init(&soa
->soa_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
1841 ml
= &os
->os_dirty_dnodes
[txgoff
];
1842 soa
->soa_count
= num_sublists
= multilist_get_num_sublists(ml
);
1844 for (int i
= 0; i
< num_sublists
; i
++) {
1845 if (multilist_sublist_is_empty_idx(ml
, i
))
1849 if (soa
->soa_count
== 0) {
1850 taskq_dispatch_ent(dmu_objset_pool(os
)->dp_sync_taskq
,
1851 sync_meta_dnode_task
, soa
, TQ_FRONT
, &soa
->soa_tq_ent
);
1854 * Sync sublists in parallel. The last to finish
1855 * (i.e., when soa->soa_count reaches zero) must
1856 * dispatch sync_meta_dnode_task.
1858 for (int i
= 0; i
< num_sublists
; i
++) {
1859 if (multilist_sublist_is_empty_idx(ml
, i
))
1861 sync_dnodes_arg_t
*sda
=
1862 kmem_alloc(sizeof (*sda
), KM_SLEEP
);
1864 sda
->sda_sublist_idx
= i
;
1866 (void) taskq_dispatch(
1867 dmu_objset_pool(os
)->dp_sync_taskq
,
1868 sync_dnodes_task
, sda
, 0);
1869 /* sync_dnodes_task frees sda */
1875 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1877 return (!multilist_is_empty(&os
->os_dirty_dnodes
[txg
& TXG_MASK
]));
1880 static file_info_cb_t
*file_cbs
[DMU_OST_NUMTYPES
];
1883 dmu_objset_register_type(dmu_objset_type_t ost
, file_info_cb_t
*cb
)
1889 dmu_get_file_info(objset_t
*os
, dmu_object_type_t bonustype
, const void *data
,
1890 zfs_file_info_t
*zfi
)
1892 file_info_cb_t
*cb
= file_cbs
[os
->os_phys
->os_type
];
1895 return (cb(bonustype
, data
, zfi
));
1899 dmu_objset_userused_enabled(objset_t
*os
)
1901 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1902 file_cbs
[os
->os_phys
->os_type
] != NULL
&&
1903 DMU_USERUSED_DNODE(os
) != NULL
);
1907 dmu_objset_userobjused_enabled(objset_t
*os
)
1909 return (dmu_objset_userused_enabled(os
) &&
1910 spa_feature_is_enabled(os
->os_spa
, SPA_FEATURE_USEROBJ_ACCOUNTING
));
1914 dmu_objset_projectquota_enabled(objset_t
*os
)
1916 return (file_cbs
[os
->os_phys
->os_type
] != NULL
&&
1917 DMU_PROJECTUSED_DNODE(os
) != NULL
&&
1918 spa_feature_is_enabled(os
->os_spa
, SPA_FEATURE_PROJECT_QUOTA
));
1921 typedef struct userquota_node
{
1922 /* must be in the first filed, see userquota_update_cache() */
1923 char uqn_id
[20 + DMU_OBJACCT_PREFIX_LEN
];
1925 avl_node_t uqn_node
;
1928 typedef struct userquota_cache
{
1929 avl_tree_t uqc_user_deltas
;
1930 avl_tree_t uqc_group_deltas
;
1931 avl_tree_t uqc_project_deltas
;
1932 } userquota_cache_t
;
1935 userquota_compare(const void *l
, const void *r
)
1937 const userquota_node_t
*luqn
= l
;
1938 const userquota_node_t
*ruqn
= r
;
1942 * NB: can only access uqn_id because userquota_update_cache() doesn't
1943 * pass in an entire userquota_node_t.
1945 rv
= strcmp(luqn
->uqn_id
, ruqn
->uqn_id
);
1947 return (TREE_ISIGN(rv
));
1951 do_userquota_cacheflush(objset_t
*os
, userquota_cache_t
*cache
, dmu_tx_t
*tx
)
1954 userquota_node_t
*uqn
;
1956 ASSERT(dmu_tx_is_syncing(tx
));
1959 while ((uqn
= avl_destroy_nodes(&cache
->uqc_user_deltas
,
1960 &cookie
)) != NULL
) {
1962 * os_userused_lock protects against concurrent calls to
1963 * zap_increment_int(). It's needed because zap_increment_int()
1964 * is not thread-safe (i.e. not atomic).
1966 mutex_enter(&os
->os_userused_lock
);
1967 VERIFY0(zap_increment(os
, DMU_USERUSED_OBJECT
,
1968 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1969 mutex_exit(&os
->os_userused_lock
);
1970 kmem_free(uqn
, sizeof (*uqn
));
1972 avl_destroy(&cache
->uqc_user_deltas
);
1975 while ((uqn
= avl_destroy_nodes(&cache
->uqc_group_deltas
,
1976 &cookie
)) != NULL
) {
1977 mutex_enter(&os
->os_userused_lock
);
1978 VERIFY0(zap_increment(os
, DMU_GROUPUSED_OBJECT
,
1979 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1980 mutex_exit(&os
->os_userused_lock
);
1981 kmem_free(uqn
, sizeof (*uqn
));
1983 avl_destroy(&cache
->uqc_group_deltas
);
1985 if (dmu_objset_projectquota_enabled(os
)) {
1987 while ((uqn
= avl_destroy_nodes(&cache
->uqc_project_deltas
,
1988 &cookie
)) != NULL
) {
1989 mutex_enter(&os
->os_userused_lock
);
1990 VERIFY0(zap_increment(os
, DMU_PROJECTUSED_OBJECT
,
1991 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1992 mutex_exit(&os
->os_userused_lock
);
1993 kmem_free(uqn
, sizeof (*uqn
));
1995 avl_destroy(&cache
->uqc_project_deltas
);
2000 userquota_update_cache(avl_tree_t
*avl
, const char *id
, int64_t delta
)
2002 userquota_node_t
*uqn
;
2005 ASSERT(strlen(id
) < sizeof (uqn
->uqn_id
));
2007 * Use id directly for searching because uqn_id is the first field of
2008 * userquota_node_t and fields after uqn_id won't be accessed in
2011 uqn
= avl_find(avl
, (const void *)id
, &idx
);
2013 uqn
= kmem_zalloc(sizeof (*uqn
), KM_SLEEP
);
2014 strlcpy(uqn
->uqn_id
, id
, sizeof (uqn
->uqn_id
));
2015 avl_insert(avl
, uqn
, idx
);
2017 uqn
->uqn_delta
+= delta
;
2021 do_userquota_update(objset_t
*os
, userquota_cache_t
*cache
, uint64_t used
,
2022 uint64_t flags
, uint64_t user
, uint64_t group
, uint64_t project
,
2025 if (flags
& DNODE_FLAG_USERUSED_ACCOUNTED
) {
2026 int64_t delta
= DNODE_MIN_SIZE
+ used
;
2032 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)user
);
2033 userquota_update_cache(&cache
->uqc_user_deltas
, name
, delta
);
2035 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)group
);
2036 userquota_update_cache(&cache
->uqc_group_deltas
, name
, delta
);
2038 if (dmu_objset_projectquota_enabled(os
)) {
2039 (void) snprintf(name
, sizeof (name
), "%llx",
2040 (longlong_t
)project
);
2041 userquota_update_cache(&cache
->uqc_project_deltas
,
2048 do_userobjquota_update(objset_t
*os
, userquota_cache_t
*cache
, uint64_t flags
,
2049 uint64_t user
, uint64_t group
, uint64_t project
, boolean_t subtract
)
2051 if (flags
& DNODE_FLAG_USEROBJUSED_ACCOUNTED
) {
2052 char name
[20 + DMU_OBJACCT_PREFIX_LEN
];
2053 int delta
= subtract
? -1 : 1;
2055 (void) snprintf(name
, sizeof (name
), DMU_OBJACCT_PREFIX
"%llx",
2057 userquota_update_cache(&cache
->uqc_user_deltas
, name
, delta
);
2059 (void) snprintf(name
, sizeof (name
), DMU_OBJACCT_PREFIX
"%llx",
2061 userquota_update_cache(&cache
->uqc_group_deltas
, name
, delta
);
2063 if (dmu_objset_projectquota_enabled(os
)) {
2064 (void) snprintf(name
, sizeof (name
),
2065 DMU_OBJACCT_PREFIX
"%llx", (longlong_t
)project
);
2066 userquota_update_cache(&cache
->uqc_project_deltas
,
2072 typedef struct userquota_updates_arg
{
2074 int uua_sublist_idx
;
2076 } userquota_updates_arg_t
;
2079 userquota_updates_task(void *arg
)
2081 userquota_updates_arg_t
*uua
= arg
;
2082 objset_t
*os
= uua
->uua_os
;
2083 dmu_tx_t
*tx
= uua
->uua_tx
;
2085 userquota_cache_t cache
= { { 0 } };
2087 multilist_sublist_t
*list
= multilist_sublist_lock_idx(
2088 &os
->os_synced_dnodes
, uua
->uua_sublist_idx
);
2090 ASSERT(multilist_sublist_head(list
) == NULL
||
2091 dmu_objset_userused_enabled(os
));
2092 avl_create(&cache
.uqc_user_deltas
, userquota_compare
,
2093 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
2094 avl_create(&cache
.uqc_group_deltas
, userquota_compare
,
2095 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
2096 if (dmu_objset_projectquota_enabled(os
))
2097 avl_create(&cache
.uqc_project_deltas
, userquota_compare
,
2098 sizeof (userquota_node_t
), offsetof(userquota_node_t
,
2101 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
2103 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
2104 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
2105 dn
->dn_phys
->dn_flags
&
2106 DNODE_FLAG_USERUSED_ACCOUNTED
);
2108 flags
= dn
->dn_id_flags
;
2110 if (flags
& DN_ID_OLD_EXIST
) {
2111 do_userquota_update(os
, &cache
, dn
->dn_oldused
,
2112 dn
->dn_oldflags
, dn
->dn_olduid
, dn
->dn_oldgid
,
2113 dn
->dn_oldprojid
, B_TRUE
);
2114 do_userobjquota_update(os
, &cache
, dn
->dn_oldflags
,
2115 dn
->dn_olduid
, dn
->dn_oldgid
,
2116 dn
->dn_oldprojid
, B_TRUE
);
2118 if (flags
& DN_ID_NEW_EXIST
) {
2119 do_userquota_update(os
, &cache
,
2120 DN_USED_BYTES(dn
->dn_phys
), dn
->dn_phys
->dn_flags
,
2121 dn
->dn_newuid
, dn
->dn_newgid
,
2122 dn
->dn_newprojid
, B_FALSE
);
2123 do_userobjquota_update(os
, &cache
,
2124 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
, dn
->dn_newgid
,
2125 dn
->dn_newprojid
, B_FALSE
);
2128 mutex_enter(&dn
->dn_mtx
);
2130 dn
->dn_oldflags
= 0;
2131 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
2132 dn
->dn_olduid
= dn
->dn_newuid
;
2133 dn
->dn_oldgid
= dn
->dn_newgid
;
2134 dn
->dn_oldprojid
= dn
->dn_newprojid
;
2135 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
2136 if (dn
->dn_bonuslen
== 0)
2137 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
2139 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
2141 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
2142 mutex_exit(&dn
->dn_mtx
);
2144 multilist_sublist_remove(list
, dn
);
2145 dnode_rele(dn
, &os
->os_synced_dnodes
);
2147 do_userquota_cacheflush(os
, &cache
, tx
);
2148 multilist_sublist_unlock(list
);
2149 kmem_free(uua
, sizeof (*uua
));
2153 * Release dnode holds from dmu_objset_sync_dnodes(). When the dnode is being
2154 * synced (i.e. we have issued the zio's for blocks in the dnode), it can't be
2155 * evicted because the block containing the dnode can't be evicted until it is
2156 * written out. However, this hold is necessary to prevent the dnode_t from
2157 * being moved (via dnode_move()) while it's still referenced by
2158 * dbuf_dirty_record_t:dr_dnode. And dr_dnode is needed for
2159 * dirty_lightweight_leaf-type dirty records.
2161 * If we are doing user-object accounting, the dnode_rele() happens from
2162 * userquota_updates_task() instead.
2165 dnode_rele_task(void *arg
)
2167 userquota_updates_arg_t
*uua
= arg
;
2168 objset_t
*os
= uua
->uua_os
;
2170 multilist_sublist_t
*list
= multilist_sublist_lock_idx(
2171 &os
->os_synced_dnodes
, uua
->uua_sublist_idx
);
2174 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
2175 multilist_sublist_remove(list
, dn
);
2176 dnode_rele(dn
, &os
->os_synced_dnodes
);
2178 multilist_sublist_unlock(list
);
2179 kmem_free(uua
, sizeof (*uua
));
2183 * Return TRUE if userquota updates are needed.
2186 dmu_objset_do_userquota_updates_prep(objset_t
*os
, dmu_tx_t
*tx
)
2188 if (!dmu_objset_userused_enabled(os
))
2192 * If this is a raw receive just return and handle accounting
2193 * later when we have the keys loaded. We also don't do user
2194 * accounting during claiming since the datasets are not owned
2195 * for the duration of claiming and this txg should only be
2196 * used for recovery.
2198 if (os
->os_encrypted
&& dmu_objset_is_receiving(os
))
2201 if (tx
->tx_txg
<= os
->os_spa
->spa_claim_max_txg
)
2204 /* Allocate the user/group/project used objects if necessary. */
2205 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
2206 VERIFY0(zap_create_claim(os
,
2207 DMU_USERUSED_OBJECT
,
2208 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
2209 VERIFY0(zap_create_claim(os
,
2210 DMU_GROUPUSED_OBJECT
,
2211 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
2214 if (dmu_objset_projectquota_enabled(os
) &&
2215 DMU_PROJECTUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
2216 VERIFY0(zap_create_claim(os
, DMU_PROJECTUSED_OBJECT
,
2217 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
2223 * Dispatch taskq tasks to dp_sync_taskq to update the user accounting, and
2224 * also release the holds on the dnodes from dmu_objset_sync_dnodes().
2225 * The caller must taskq_wait(dp_sync_taskq).
2228 dmu_objset_sync_done(objset_t
*os
, dmu_tx_t
*tx
)
2230 boolean_t need_userquota
= dmu_objset_do_userquota_updates_prep(os
, tx
);
2232 int num_sublists
= multilist_get_num_sublists(&os
->os_synced_dnodes
);
2233 for (int i
= 0; i
< num_sublists
; i
++) {
2234 userquota_updates_arg_t
*uua
=
2235 kmem_alloc(sizeof (*uua
), KM_SLEEP
);
2237 uua
->uua_sublist_idx
= i
;
2241 * If we don't need to update userquotas, use
2242 * dnode_rele_task() to call dnode_rele()
2244 (void) taskq_dispatch(dmu_objset_pool(os
)->dp_sync_taskq
,
2245 need_userquota
? userquota_updates_task
: dnode_rele_task
,
2247 /* callback frees uua */
2253 * Returns a pointer to data to find uid/gid from
2255 * If a dirty record for transaction group that is syncing can't
2256 * be found then NULL is returned. In the NULL case it is assumed
2257 * the uid/gid aren't changing.
2260 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
2262 dbuf_dirty_record_t
*dr
;
2265 if (db
->db_dirtycnt
== 0)
2266 return (db
->db
.db_data
); /* Nothing is changing */
2268 dr
= dbuf_find_dirty_eq(db
, tx
->tx_txg
);
2273 if (dr
->dr_dnode
->dn_bonuslen
== 0 &&
2274 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
2275 data
= dr
->dt
.dl
.dr_data
->b_data
;
2277 data
= dr
->dt
.dl
.dr_data
;
2284 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
2286 objset_t
*os
= dn
->dn_objset
;
2288 dmu_buf_impl_t
*db
= NULL
;
2289 int flags
= dn
->dn_id_flags
;
2291 boolean_t have_spill
= B_FALSE
;
2293 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
2297 * Raw receives introduce a problem with user accounting. Raw
2298 * receives cannot update the user accounting info because the
2299 * user ids and the sizes are encrypted. To guarantee that we
2300 * never end up with bad user accounting, we simply disable it
2301 * during raw receives. We also disable this for normal receives
2302 * so that an incremental raw receive may be done on top of an
2303 * existing non-raw receive.
2305 if (os
->os_encrypted
&& dmu_objset_is_receiving(os
))
2308 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
2309 DN_ID_CHKED_SPILL
)))
2312 if (before
&& dn
->dn_bonuslen
!= 0)
2313 data
= DN_BONUS(dn
->dn_phys
);
2314 else if (!before
&& dn
->dn_bonuslen
!= 0) {
2317 mutex_enter(&db
->db_mtx
);
2318 data
= dmu_objset_userquota_find_data(db
, tx
);
2320 data
= DN_BONUS(dn
->dn_phys
);
2322 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
2325 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
2326 rf
|= DB_RF_HAVESTRUCT
;
2327 error
= dmu_spill_hold_by_dnode(dn
,
2328 rf
| DB_RF_MUST_SUCCEED
,
2329 FTAG
, (dmu_buf_t
**)&db
);
2331 mutex_enter(&db
->db_mtx
);
2332 data
= (before
) ? db
->db
.db_data
:
2333 dmu_objset_userquota_find_data(db
, tx
);
2334 have_spill
= B_TRUE
;
2336 mutex_enter(&dn
->dn_mtx
);
2337 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
2338 mutex_exit(&dn
->dn_mtx
);
2343 * Must always call the callback in case the object
2344 * type has changed and that type isn't an object type to track
2346 zfs_file_info_t zfi
;
2347 error
= file_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
, &zfi
);
2351 dn
->dn_olduid
= zfi
.zfi_user
;
2352 dn
->dn_oldgid
= zfi
.zfi_group
;
2353 dn
->dn_oldprojid
= zfi
.zfi_project
;
2355 dn
->dn_newuid
= zfi
.zfi_user
;
2356 dn
->dn_newgid
= zfi
.zfi_group
;
2357 dn
->dn_newprojid
= zfi
.zfi_project
;
2361 * Preserve existing uid/gid when the callback can't determine
2362 * what the new uid/gid are and the callback returned EEXIST.
2363 * The EEXIST error tells us to just use the existing uid/gid.
2364 * If we don't know what the old values are then just assign
2365 * them to 0, since that is a new file being created.
2367 if (!before
&& data
== NULL
&& error
== EEXIST
) {
2368 if (flags
& DN_ID_OLD_EXIST
) {
2369 dn
->dn_newuid
= dn
->dn_olduid
;
2370 dn
->dn_newgid
= dn
->dn_oldgid
;
2371 dn
->dn_newprojid
= dn
->dn_oldprojid
;
2375 dn
->dn_newprojid
= ZFS_DEFAULT_PROJID
;
2381 mutex_exit(&db
->db_mtx
);
2383 mutex_enter(&dn
->dn_mtx
);
2384 if (error
== 0 && before
)
2385 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
2386 if (error
== 0 && !before
)
2387 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
2390 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
2392 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
2394 mutex_exit(&dn
->dn_mtx
);
2396 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
2400 dmu_objset_userspace_present(objset_t
*os
)
2402 return (os
->os_phys
->os_flags
&
2403 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
2407 dmu_objset_userobjspace_present(objset_t
*os
)
2409 return (os
->os_phys
->os_flags
&
2410 OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
);
2414 dmu_objset_projectquota_present(objset_t
*os
)
2416 return (os
->os_phys
->os_flags
&
2417 OBJSET_FLAG_PROJECTQUOTA_COMPLETE
);
2421 dmu_objset_space_upgrade(objset_t
*os
)
2427 * We simply need to mark every object dirty, so that it will be
2428 * synced out and now accounted. If this is called
2429 * concurrently, or if we already did some work before crashing,
2430 * that's fine, since we track each object's accounted state
2434 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
2439 mutex_enter(&os
->os_upgrade_lock
);
2440 if (os
->os_upgrade_exit
)
2441 err
= SET_ERROR(EINTR
);
2442 mutex_exit(&os
->os_upgrade_lock
);
2447 return (SET_ERROR(EINTR
));
2449 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
2452 tx
= dmu_tx_create(os
);
2453 dmu_tx_hold_bonus(tx
, obj
);
2454 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
2456 dmu_buf_rele(db
, FTAG
);
2460 dmu_buf_will_dirty(db
, tx
);
2461 dmu_buf_rele(db
, FTAG
);
2468 dmu_objset_userspace_upgrade_cb(objset_t
*os
)
2472 if (dmu_objset_userspace_present(os
))
2474 if (dmu_objset_is_snapshot(os
))
2475 return (SET_ERROR(EINVAL
));
2476 if (!dmu_objset_userused_enabled(os
))
2477 return (SET_ERROR(ENOTSUP
));
2479 err
= dmu_objset_space_upgrade(os
);
2483 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
2484 txg_wait_synced(dmu_objset_pool(os
), 0);
2489 dmu_objset_userspace_upgrade(objset_t
*os
)
2491 dmu_objset_upgrade(os
, dmu_objset_userspace_upgrade_cb
);
2495 dmu_objset_id_quota_upgrade_cb(objset_t
*os
)
2499 if (dmu_objset_userobjspace_present(os
) &&
2500 dmu_objset_projectquota_present(os
))
2502 if (dmu_objset_is_snapshot(os
))
2503 return (SET_ERROR(EINVAL
));
2504 if (!dmu_objset_userused_enabled(os
))
2505 return (SET_ERROR(ENOTSUP
));
2506 if (!dmu_objset_projectquota_enabled(os
) &&
2507 dmu_objset_userobjspace_present(os
))
2508 return (SET_ERROR(ENOTSUP
));
2510 if (dmu_objset_userobjused_enabled(os
))
2511 dmu_objset_ds(os
)->ds_feature_activation
[
2512 SPA_FEATURE_USEROBJ_ACCOUNTING
] = (void *)B_TRUE
;
2513 if (dmu_objset_projectquota_enabled(os
))
2514 dmu_objset_ds(os
)->ds_feature_activation
[
2515 SPA_FEATURE_PROJECT_QUOTA
] = (void *)B_TRUE
;
2517 err
= dmu_objset_space_upgrade(os
);
2521 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
2522 if (dmu_objset_userobjused_enabled(os
))
2523 os
->os_flags
|= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE
;
2524 if (dmu_objset_projectquota_enabled(os
))
2525 os
->os_flags
|= OBJSET_FLAG_PROJECTQUOTA_COMPLETE
;
2527 txg_wait_synced(dmu_objset_pool(os
), 0);
2532 dmu_objset_id_quota_upgrade(objset_t
*os
)
2534 dmu_objset_upgrade(os
, dmu_objset_id_quota_upgrade_cb
);
2538 dmu_objset_userobjspace_upgradable(objset_t
*os
)
2540 return (dmu_objset_type(os
) == DMU_OST_ZFS
&&
2541 !dmu_objset_is_snapshot(os
) &&
2542 dmu_objset_userobjused_enabled(os
) &&
2543 !dmu_objset_userobjspace_present(os
) &&
2544 spa_writeable(dmu_objset_spa(os
)));
2548 dmu_objset_projectquota_upgradable(objset_t
*os
)
2550 return (dmu_objset_type(os
) == DMU_OST_ZFS
&&
2551 !dmu_objset_is_snapshot(os
) &&
2552 dmu_objset_projectquota_enabled(os
) &&
2553 !dmu_objset_projectquota_present(os
) &&
2554 spa_writeable(dmu_objset_spa(os
)));
2558 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
2559 uint64_t *usedobjsp
, uint64_t *availobjsp
)
2561 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
2562 usedobjsp
, availobjsp
);
2566 dmu_objset_fsid_guid(objset_t
*os
)
2568 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
2572 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
2574 stat
->dds_type
= os
->os_phys
->os_type
;
2575 if (os
->os_dsl_dataset
)
2576 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
2580 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
2582 ASSERT(os
->os_dsl_dataset
||
2583 os
->os_phys
->os_type
== DMU_OST_META
);
2585 if (os
->os_dsl_dataset
!= NULL
)
2586 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
2588 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
2589 os
->os_phys
->os_type
);
2590 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
2591 dmu_objset_userspace_present(os
));
2595 dmu_objset_is_snapshot(objset_t
*os
)
2597 if (os
->os_dsl_dataset
!= NULL
)
2598 return (os
->os_dsl_dataset
->ds_is_snapshot
);
2604 dmu_snapshot_realname(objset_t
*os
, const char *name
, char *real
, int maxlen
,
2605 boolean_t
*conflict
)
2607 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2610 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
2611 return (SET_ERROR(ENOENT
));
2613 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2614 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, name
, 8, 1, &ignored
,
2615 MT_NORMALIZE
, real
, maxlen
, conflict
));
2619 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
2620 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
2622 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2623 zap_cursor_t cursor
;
2624 zap_attribute_t
*attr
;
2626 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
2628 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
2629 return (SET_ERROR(ENOENT
));
2631 attr
= zap_attribute_alloc();
2632 zap_cursor_init_serialized(&cursor
,
2633 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2634 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, *offp
);
2636 if (zap_cursor_retrieve(&cursor
, attr
) != 0) {
2637 zap_cursor_fini(&cursor
);
2638 zap_attribute_free(attr
);
2639 return (SET_ERROR(ENOENT
));
2642 if (strlen(attr
->za_name
) + 1 > namelen
) {
2643 zap_cursor_fini(&cursor
);
2644 zap_attribute_free(attr
);
2645 return (SET_ERROR(ENAMETOOLONG
));
2648 (void) strlcpy(name
, attr
->za_name
, namelen
);
2650 *idp
= attr
->za_first_integer
;
2652 *case_conflict
= attr
->za_normalization_conflict
;
2653 zap_cursor_advance(&cursor
);
2654 *offp
= zap_cursor_serialize(&cursor
);
2655 zap_cursor_fini(&cursor
);
2656 zap_attribute_free(attr
);
2662 dmu_snapshot_lookup(objset_t
*os
, const char *name
, uint64_t *value
)
2664 return (dsl_dataset_snap_lookup(os
->os_dsl_dataset
, name
, value
));
2668 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
2669 uint64_t *idp
, uint64_t *offp
)
2671 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
2672 zap_cursor_t cursor
;
2673 zap_attribute_t
*attr
;
2675 /* there is no next dir on a snapshot! */
2676 if (os
->os_dsl_dataset
->ds_object
!=
2677 dsl_dir_phys(dd
)->dd_head_dataset_obj
)
2678 return (SET_ERROR(ENOENT
));
2680 attr
= zap_attribute_alloc();
2681 zap_cursor_init_serialized(&cursor
,
2682 dd
->dd_pool
->dp_meta_objset
,
2683 dsl_dir_phys(dd
)->dd_child_dir_zapobj
, *offp
);
2685 if (zap_cursor_retrieve(&cursor
, attr
) != 0) {
2686 zap_cursor_fini(&cursor
);
2687 zap_attribute_free(attr
);
2688 return (SET_ERROR(ENOENT
));
2691 if (strlen(attr
->za_name
) + 1 > namelen
) {
2692 zap_cursor_fini(&cursor
);
2693 zap_attribute_free(attr
);
2694 return (SET_ERROR(ENAMETOOLONG
));
2697 (void) strlcpy(name
, attr
->za_name
, namelen
);
2699 *idp
= attr
->za_first_integer
;
2700 zap_cursor_advance(&cursor
);
2701 *offp
= zap_cursor_serialize(&cursor
);
2702 zap_cursor_fini(&cursor
);
2703 zap_attribute_free(attr
);
2708 typedef struct dmu_objset_find_ctx
{
2712 char *dc_ddname
; /* last component of ddobj's name */
2713 int (*dc_func
)(dsl_pool_t
*, dsl_dataset_t
*, void *);
2716 kmutex_t
*dc_error_lock
;
2718 } dmu_objset_find_ctx_t
;
2721 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t
*dcp
)
2723 dsl_pool_t
*dp
= dcp
->dc_dp
;
2727 zap_attribute_t
*attr
;
2731 /* don't process if there already was an error */
2732 if (*dcp
->dc_error
!= 0)
2736 * Note: passing the name (dc_ddname) here is optional, but it
2737 * improves performance because we don't need to call
2738 * zap_value_search() to determine the name.
2740 err
= dsl_dir_hold_obj(dp
, dcp
->dc_ddobj
, dcp
->dc_ddname
, FTAG
, &dd
);
2744 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2745 if (dd
->dd_myname
[0] == '$') {
2746 dsl_dir_rele(dd
, FTAG
);
2750 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
2751 attr
= zap_attribute_alloc();
2754 * Iterate over all children.
2756 if (dcp
->dc_flags
& DS_FIND_CHILDREN
) {
2757 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2758 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
2759 zap_cursor_retrieve(&zc
, attr
) == 0;
2760 (void) zap_cursor_advance(&zc
)) {
2761 ASSERT3U(attr
->za_integer_length
, ==,
2763 ASSERT3U(attr
->za_num_integers
, ==, 1);
2765 dmu_objset_find_ctx_t
*child_dcp
=
2766 kmem_alloc(sizeof (*child_dcp
), KM_SLEEP
);
2768 child_dcp
->dc_ddobj
= attr
->za_first_integer
;
2769 child_dcp
->dc_ddname
= spa_strdup(attr
->za_name
);
2770 if (dcp
->dc_tq
!= NULL
)
2771 (void) taskq_dispatch(dcp
->dc_tq
,
2772 dmu_objset_find_dp_cb
, child_dcp
, TQ_SLEEP
);
2774 dmu_objset_find_dp_impl(child_dcp
);
2776 zap_cursor_fini(&zc
);
2780 * Iterate over all snapshots.
2782 if (dcp
->dc_flags
& DS_FIND_SNAPSHOTS
) {
2784 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2789 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
2790 dsl_dataset_rele(ds
, FTAG
);
2792 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
2793 zap_cursor_retrieve(&zc
, attr
) == 0;
2794 (void) zap_cursor_advance(&zc
)) {
2795 ASSERT3U(attr
->za_integer_length
, ==,
2797 ASSERT3U(attr
->za_num_integers
, ==, 1);
2799 err
= dsl_dataset_hold_obj(dp
,
2800 attr
->za_first_integer
, FTAG
, &ds
);
2803 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2804 dsl_dataset_rele(ds
, FTAG
);
2808 zap_cursor_fini(&zc
);
2812 zap_attribute_free(attr
);
2815 dsl_dir_rele(dd
, FTAG
);
2822 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2825 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2826 * that the dir will remain cached, and we won't have to re-instantiate
2827 * it (which could be expensive due to finding its name via
2828 * zap_value_search()).
2830 dsl_dir_rele(dd
, FTAG
);
2833 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2834 dsl_dataset_rele(ds
, FTAG
);
2838 mutex_enter(dcp
->dc_error_lock
);
2839 /* only keep first error */
2840 if (*dcp
->dc_error
== 0)
2841 *dcp
->dc_error
= err
;
2842 mutex_exit(dcp
->dc_error_lock
);
2845 if (dcp
->dc_ddname
!= NULL
)
2846 spa_strfree(dcp
->dc_ddname
);
2847 kmem_free(dcp
, sizeof (*dcp
));
2851 dmu_objset_find_dp_cb(void *arg
)
2853 dmu_objset_find_ctx_t
*dcp
= arg
;
2854 dsl_pool_t
*dp
= dcp
->dc_dp
;
2857 * We need to get a pool_config_lock here, as there are several
2858 * assert(pool_config_held) down the stack. Getting a lock via
2859 * dsl_pool_config_enter is risky, as it might be stalled by a
2860 * pending writer. This would deadlock, as the write lock can
2861 * only be granted when our parent thread gives up the lock.
2862 * The _prio interface gives us priority over a pending writer.
2864 dsl_pool_config_enter_prio(dp
, FTAG
);
2866 dmu_objset_find_dp_impl(dcp
);
2868 dsl_pool_config_exit(dp
, FTAG
);
2872 * Find objsets under and including ddobj, call func(ds) on each.
2873 * The order for the enumeration is completely undefined.
2874 * func is called with dsl_pool_config held.
2877 dmu_objset_find_dp(dsl_pool_t
*dp
, uint64_t ddobj
,
2878 int func(dsl_pool_t
*, dsl_dataset_t
*, void *), void *arg
, int flags
)
2883 dmu_objset_find_ctx_t
*dcp
;
2886 mutex_init(&err_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2887 dcp
= kmem_alloc(sizeof (*dcp
), KM_SLEEP
);
2890 dcp
->dc_ddobj
= ddobj
;
2891 dcp
->dc_ddname
= NULL
;
2892 dcp
->dc_func
= func
;
2894 dcp
->dc_flags
= flags
;
2895 dcp
->dc_error_lock
= &err_lock
;
2896 dcp
->dc_error
= &error
;
2898 if ((flags
& DS_FIND_SERIALIZE
) || dsl_pool_config_held_writer(dp
)) {
2900 * In case a write lock is held we can't make use of
2901 * parallelism, as down the stack of the worker threads
2902 * the lock is asserted via dsl_pool_config_held.
2903 * In case of a read lock this is solved by getting a read
2904 * lock in each worker thread, which isn't possible in case
2905 * of a writer lock. So we fall back to the synchronous path
2907 * In the future it might be possible to get some magic into
2908 * dsl_pool_config_held in a way that it returns true for
2909 * the worker threads so that a single lock held from this
2910 * thread suffices. For now, stay single threaded.
2912 dmu_objset_find_dp_impl(dcp
);
2913 mutex_destroy(&err_lock
);
2918 ntasks
= dmu_find_threads
;
2920 ntasks
= vdev_count_leaves(dp
->dp_spa
) * 4;
2921 tq
= taskq_create("dmu_objset_find", ntasks
, maxclsyspri
, ntasks
,
2924 kmem_free(dcp
, sizeof (*dcp
));
2925 mutex_destroy(&err_lock
);
2927 return (SET_ERROR(ENOMEM
));
2931 /* dcp will be freed by task */
2932 (void) taskq_dispatch(tq
, dmu_objset_find_dp_cb
, dcp
, TQ_SLEEP
);
2935 * PORTING: this code relies on the property of taskq_wait to wait
2936 * until no more tasks are queued and no more tasks are active. As
2937 * we always queue new tasks from within other tasks, task_wait
2938 * reliably waits for the full recursion to finish, even though we
2939 * enqueue new tasks after taskq_wait has been called.
2940 * On platforms other than illumos, taskq_wait may not have this
2945 mutex_destroy(&err_lock
);
2951 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2952 * The dp_config_rwlock must not be held when this is called, and it
2953 * will not be held when the callback is called.
2954 * Therefore this function should only be used when the pool is not changing
2955 * (e.g. in syncing context), or the callback can deal with the possible races.
2958 dmu_objset_find_impl(spa_t
*spa
, const char *name
,
2959 int func(const char *, void *), void *arg
, int flags
)
2962 dsl_pool_t
*dp
= spa_get_dsl(spa
);
2965 zap_attribute_t
*attr
;
2970 dsl_pool_config_enter(dp
, FTAG
);
2972 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, NULL
);
2974 dsl_pool_config_exit(dp
, FTAG
);
2978 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2979 if (dd
->dd_myname
[0] == '$') {
2980 dsl_dir_rele(dd
, FTAG
);
2981 dsl_pool_config_exit(dp
, FTAG
);
2985 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
2986 attr
= zap_attribute_alloc();
2989 * Iterate over all children.
2991 if (flags
& DS_FIND_CHILDREN
) {
2992 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2993 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
2994 zap_cursor_retrieve(&zc
, attr
) == 0;
2995 (void) zap_cursor_advance(&zc
)) {
2996 ASSERT3U(attr
->za_integer_length
, ==,
2998 ASSERT3U(attr
->za_num_integers
, ==, 1);
3000 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
3001 dsl_pool_config_exit(dp
, FTAG
);
3002 err
= dmu_objset_find_impl(spa
, child
,
3004 dsl_pool_config_enter(dp
, FTAG
);
3005 kmem_strfree(child
);
3009 zap_cursor_fini(&zc
);
3012 dsl_dir_rele(dd
, FTAG
);
3013 dsl_pool_config_exit(dp
, FTAG
);
3014 zap_attribute_free(attr
);
3020 * Iterate over all snapshots.
3022 if (flags
& DS_FIND_SNAPSHOTS
) {
3023 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
3028 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
3029 dsl_dataset_rele(ds
, FTAG
);
3031 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
3032 zap_cursor_retrieve(&zc
, attr
) == 0;
3033 (void) zap_cursor_advance(&zc
)) {
3034 ASSERT3U(attr
->za_integer_length
, ==,
3036 ASSERT3U(attr
->za_num_integers
, ==, 1);
3038 child
= kmem_asprintf("%s@%s",
3039 name
, attr
->za_name
);
3040 dsl_pool_config_exit(dp
, FTAG
);
3041 err
= func(child
, arg
);
3042 dsl_pool_config_enter(dp
, FTAG
);
3043 kmem_strfree(child
);
3047 zap_cursor_fini(&zc
);
3051 dsl_dir_rele(dd
, FTAG
);
3052 zap_attribute_free(attr
);
3053 dsl_pool_config_exit(dp
, FTAG
);
3058 /* Apply to self. */
3059 return (func(name
, arg
));
3063 * See comment above dmu_objset_find_impl().
3066 dmu_objset_find(const char *name
, int func(const char *, void *), void *arg
,
3072 error
= spa_open(name
, &spa
, FTAG
);
3075 error
= dmu_objset_find_impl(spa
, name
, func
, arg
, flags
);
3076 spa_close(spa
, FTAG
);
3081 dmu_objset_incompatible_encryption_version(objset_t
*os
)
3083 return (dsl_dir_incompatible_encryption_version(
3084 os
->os_dsl_dataset
->ds_dir
));
3088 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
3090 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
3091 os
->os_user_ptr
= user_ptr
;
3095 dmu_objset_get_user(objset_t
*os
)
3097 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
3098 return (os
->os_user_ptr
);
3102 * Determine name of filesystem, given name of snapshot.
3103 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
3106 dmu_fsname(const char *snapname
, char *buf
)
3108 char *atp
= strchr(snapname
, '@');
3110 return (SET_ERROR(EINVAL
));
3111 if (atp
- snapname
>= ZFS_MAX_DATASET_NAME_LEN
)
3112 return (SET_ERROR(ENAMETOOLONG
));
3113 (void) strlcpy(buf
, snapname
, atp
- snapname
+ 1);
3118 * Call when we think we're going to write/free space in open context
3119 * to track the amount of dirty data in the open txg, which is also the
3120 * amount of memory that can not be evicted until this txg syncs.
3122 * Note that there are two conditions where this can be called from
3125 * [1] When we just created the dataset, in which case we go on with
3126 * updating any accounting of dirty data as usual.
3127 * [2] When we are dirtying MOS data, in which case we only update the
3128 * pool's accounting of dirty data.
3131 dmu_objset_willuse_space(objset_t
*os
, int64_t space
, dmu_tx_t
*tx
)
3133 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
3134 int64_t aspace
= spa_get_worst_case_asize(os
->os_spa
, space
);
3137 dsl_dir_willuse_space(ds
->ds_dir
, aspace
, tx
);
3140 dsl_pool_dirty_space(dmu_tx_pool(tx
), space
, tx
);
3143 #if defined(_KERNEL)
3144 EXPORT_SYMBOL(dmu_objset_zil
);
3145 EXPORT_SYMBOL(dmu_objset_pool
);
3146 EXPORT_SYMBOL(dmu_objset_ds
);
3147 EXPORT_SYMBOL(dmu_objset_type
);
3148 EXPORT_SYMBOL(dmu_objset_name
);
3149 EXPORT_SYMBOL(dmu_objset_hold
);
3150 EXPORT_SYMBOL(dmu_objset_hold_flags
);
3151 EXPORT_SYMBOL(dmu_objset_own
);
3152 EXPORT_SYMBOL(dmu_objset_rele
);
3153 EXPORT_SYMBOL(dmu_objset_rele_flags
);
3154 EXPORT_SYMBOL(dmu_objset_disown
);
3155 EXPORT_SYMBOL(dmu_objset_from_ds
);
3156 EXPORT_SYMBOL(dmu_objset_create
);
3157 EXPORT_SYMBOL(dmu_objset_clone
);
3158 EXPORT_SYMBOL(dmu_objset_stats
);
3159 EXPORT_SYMBOL(dmu_objset_fast_stat
);
3160 EXPORT_SYMBOL(dmu_objset_spa
);
3161 EXPORT_SYMBOL(dmu_objset_space
);
3162 EXPORT_SYMBOL(dmu_objset_fsid_guid
);
3163 EXPORT_SYMBOL(dmu_objset_find
);
3164 EXPORT_SYMBOL(dmu_objset_byteswap
);
3165 EXPORT_SYMBOL(dmu_objset_evict_dbufs
);
3166 EXPORT_SYMBOL(dmu_objset_snap_cmtime
);
3167 EXPORT_SYMBOL(dmu_objset_dnodesize
);
3169 EXPORT_SYMBOL(dmu_objset_sync
);
3170 EXPORT_SYMBOL(dmu_objset_is_dirty
);
3171 EXPORT_SYMBOL(dmu_objset_create_impl_dnstats
);
3172 EXPORT_SYMBOL(dmu_objset_create_impl
);
3173 EXPORT_SYMBOL(dmu_objset_open_impl
);
3174 EXPORT_SYMBOL(dmu_objset_evict
);
3175 EXPORT_SYMBOL(dmu_objset_register_type
);
3176 EXPORT_SYMBOL(dmu_objset_sync_done
);
3177 EXPORT_SYMBOL(dmu_objset_userquota_get_ids
);
3178 EXPORT_SYMBOL(dmu_objset_userused_enabled
);
3179 EXPORT_SYMBOL(dmu_objset_userspace_upgrade
);
3180 EXPORT_SYMBOL(dmu_objset_userspace_present
);
3181 EXPORT_SYMBOL(dmu_objset_userobjused_enabled
);
3182 EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable
);
3183 EXPORT_SYMBOL(dmu_objset_userobjspace_present
);
3184 EXPORT_SYMBOL(dmu_objset_projectquota_enabled
);
3185 EXPORT_SYMBOL(dmu_objset_projectquota_present
);
3186 EXPORT_SYMBOL(dmu_objset_projectquota_upgradable
);
3187 EXPORT_SYMBOL(dmu_objset_id_quota_upgrade
);