4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_deleg.h>
38 #include <sys/sunddi.h>
39 #include "zfs_namecheck.h"
41 static uint64_t dsl_dir_space_towrite(dsl_dir_t
*dd
);
42 static void dsl_dir_set_reservation_sync(void *arg1
, void *arg2
,
43 cred_t
*cr
, dmu_tx_t
*tx
);
48 dsl_dir_evict(dmu_buf_t
*db
, void *arg
)
51 dsl_pool_t
*dp
= dd
->dd_pool
;
54 for (t
= 0; t
< TXG_SIZE
; t
++) {
55 ASSERT(!txg_list_member(&dp
->dp_dirty_dirs
, dd
, t
));
56 ASSERT(dd
->dd_tempreserved
[t
] == 0);
57 ASSERT(dd
->dd_space_towrite
[t
] == 0);
61 dsl_dir_close(dd
->dd_parent
, dd
);
63 spa_close(dd
->dd_pool
->dp_spa
, dd
);
66 * The props callback list should be empty since they hold the
69 list_destroy(&dd
->dd_prop_cbs
);
70 mutex_destroy(&dd
->dd_lock
);
71 kmem_free(dd
, sizeof (dsl_dir_t
));
75 dsl_dir_open_obj(dsl_pool_t
*dp
, uint64_t ddobj
,
76 const char *tail
, void *tag
, dsl_dir_t
**ddp
)
82 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
83 dsl_pool_sync_context(dp
));
85 err
= dmu_bonus_hold(dp
->dp_meta_objset
, ddobj
, tag
, &dbuf
);
88 dd
= dmu_buf_get_user(dbuf
);
91 dmu_object_info_t doi
;
92 dmu_object_info_from_db(dbuf
, &doi
);
93 ASSERT3U(doi
.doi_type
, ==, DMU_OT_DSL_DIR
);
94 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (dsl_dir_phys_t
));
101 dd
= kmem_zalloc(sizeof (dsl_dir_t
), KM_SLEEP
);
102 dd
->dd_object
= ddobj
;
105 dd
->dd_phys
= dbuf
->db_data
;
106 mutex_init(&dd
->dd_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
108 list_create(&dd
->dd_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
109 offsetof(dsl_prop_cb_record_t
, cbr_node
));
111 if (dd
->dd_phys
->dd_parent_obj
) {
112 err
= dsl_dir_open_obj(dp
, dd
->dd_phys
->dd_parent_obj
,
113 NULL
, dd
, &dd
->dd_parent
);
120 err
= zap_lookup(dp
->dp_meta_objset
,
121 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
122 tail
, sizeof (foundobj
), 1, &foundobj
);
123 ASSERT(err
|| foundobj
== ddobj
);
125 (void) strcpy(dd
->dd_myname
, tail
);
127 err
= zap_value_search(dp
->dp_meta_objset
,
128 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
129 ddobj
, 0, dd
->dd_myname
);
134 (void) strcpy(dd
->dd_myname
, spa_name(dp
->dp_spa
));
137 winner
= dmu_buf_set_user_ie(dbuf
, dd
, &dd
->dd_phys
,
141 dsl_dir_close(dd
->dd_parent
, dd
);
142 mutex_destroy(&dd
->dd_lock
);
143 kmem_free(dd
, sizeof (dsl_dir_t
));
146 spa_open_ref(dp
->dp_spa
, dd
);
151 * The dsl_dir_t has both open-to-close and instantiate-to-evict
152 * holds on the spa. We need the open-to-close holds because
153 * otherwise the spa_refcnt wouldn't change when we open a
154 * dir which the spa also has open, so we could incorrectly
155 * think it was OK to unload/export/destroy the pool. We need
156 * the instantiate-to-evict hold because the dsl_dir_t has a
157 * pointer to the dd_pool, which has a pointer to the spa_t.
159 spa_open_ref(dp
->dp_spa
, tag
);
160 ASSERT3P(dd
->dd_pool
, ==, dp
);
161 ASSERT3U(dd
->dd_object
, ==, ddobj
);
162 ASSERT3P(dd
->dd_dbuf
, ==, dbuf
);
168 dsl_dir_close(dd
->dd_parent
, dd
);
169 mutex_destroy(&dd
->dd_lock
);
170 kmem_free(dd
, sizeof (dsl_dir_t
));
171 dmu_buf_rele(dbuf
, tag
);
177 dsl_dir_close(dsl_dir_t
*dd
, void *tag
)
179 dprintf_dd(dd
, "%s\n", "");
180 spa_close(dd
->dd_pool
->dp_spa
, tag
);
181 dmu_buf_rele(dd
->dd_dbuf
, tag
);
184 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
186 dsl_dir_name(dsl_dir_t
*dd
, char *buf
)
189 dsl_dir_name(dd
->dd_parent
, buf
);
190 (void) strcat(buf
, "/");
194 if (!MUTEX_HELD(&dd
->dd_lock
)) {
196 * recursive mutex so that we can use
197 * dprintf_dd() with dd_lock held
199 mutex_enter(&dd
->dd_lock
);
200 (void) strcat(buf
, dd
->dd_myname
);
201 mutex_exit(&dd
->dd_lock
);
203 (void) strcat(buf
, dd
->dd_myname
);
207 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
209 dsl_dir_namelen(dsl_dir_t
*dd
)
214 /* parent's name + 1 for the "/" */
215 result
= dsl_dir_namelen(dd
->dd_parent
) + 1;
218 if (!MUTEX_HELD(&dd
->dd_lock
)) {
219 /* see dsl_dir_name */
220 mutex_enter(&dd
->dd_lock
);
221 result
+= strlen(dd
->dd_myname
);
222 mutex_exit(&dd
->dd_lock
);
224 result
+= strlen(dd
->dd_myname
);
231 dsl_dir_is_private(dsl_dir_t
*dd
)
235 if (dd
->dd_parent
&& dsl_dir_is_private(dd
->dd_parent
))
237 if (dataset_name_hidden(dd
->dd_myname
))
244 getcomponent(const char *path
, char *component
, const char **nextp
)
249 /* This would be a good place to reserve some namespace... */
250 p
= strpbrk(path
, "/@");
251 if (p
&& (p
[1] == '/' || p
[1] == '@')) {
252 /* two separators in a row */
255 if (p
== NULL
|| p
== path
) {
257 * if the first thing is an @ or /, it had better be an
258 * @ and it had better not have any more ats or slashes,
259 * and it had better have something after the @.
262 (p
[0] != '@' || strpbrk(path
+1, "/@") || p
[1] == '\0'))
264 if (strlen(path
) >= MAXNAMELEN
)
265 return (ENAMETOOLONG
);
266 (void) strcpy(component
, path
);
268 } else if (p
[0] == '/') {
269 if (p
-path
>= MAXNAMELEN
)
270 return (ENAMETOOLONG
);
271 (void) strncpy(component
, path
, p
- path
);
272 component
[p
-path
] = '\0';
274 } else if (p
[0] == '@') {
276 * if the next separator is an @, there better not be
279 if (strchr(path
, '/'))
281 if (p
-path
>= MAXNAMELEN
)
282 return (ENAMETOOLONG
);
283 (void) strncpy(component
, path
, p
- path
);
284 component
[p
-path
] = '\0';
286 ASSERT(!"invalid p");
293 * same as dsl_open_dir, ignore the first component of name and use the
297 dsl_dir_open_spa(spa_t
*spa
, const char *name
, void *tag
,
298 dsl_dir_t
**ddp
, const char **tailp
)
300 char buf
[MAXNAMELEN
];
301 const char *next
, *nextnext
= NULL
;
306 int openedspa
= FALSE
;
308 dprintf("%s\n", name
);
310 err
= getcomponent(name
, buf
, &next
);
314 err
= spa_open(buf
, &spa
, FTAG
);
316 dprintf("spa_open(%s) failed\n", buf
);
321 /* XXX this assertion belongs in spa_open */
322 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa
)));
325 dp
= spa_get_dsl(spa
);
327 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
328 err
= dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
, NULL
, tag
, &dd
);
330 rw_exit(&dp
->dp_config_rwlock
);
332 spa_close(spa
, FTAG
);
336 while (next
!= NULL
) {
338 err
= getcomponent(next
, buf
, &nextnext
);
341 ASSERT(next
[0] != '\0');
344 dprintf("looking up %s in obj%lld\n",
345 buf
, dd
->dd_phys
->dd_child_dir_zapobj
);
347 err
= zap_lookup(dp
->dp_meta_objset
,
348 dd
->dd_phys
->dd_child_dir_zapobj
,
349 buf
, sizeof (ddobj
), 1, &ddobj
);
356 err
= dsl_dir_open_obj(dp
, ddobj
, buf
, tag
, &child_ds
);
359 dsl_dir_close(dd
, tag
);
363 rw_exit(&dp
->dp_config_rwlock
);
366 dsl_dir_close(dd
, tag
);
368 spa_close(spa
, FTAG
);
373 * It's an error if there's more than one component left, or
374 * tailp==NULL and there's any component left.
377 (tailp
== NULL
|| (nextnext
&& nextnext
[0] != '\0'))) {
379 dsl_dir_close(dd
, tag
);
380 dprintf("next=%p (%s) tail=%p\n", next
, next
?next
:"", tailp
);
386 spa_close(spa
, FTAG
);
392 * Return the dsl_dir_t, and possibly the last component which couldn't
393 * be found in *tail. Return NULL if the path is bogus, or if
394 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
395 * means that the last component is a snapshot.
398 dsl_dir_open(const char *name
, void *tag
, dsl_dir_t
**ddp
, const char **tailp
)
400 return (dsl_dir_open_spa(NULL
, name
, tag
, ddp
, tailp
));
404 dsl_dir_create_sync(dsl_pool_t
*dp
, dsl_dir_t
*pds
, const char *name
,
407 objset_t
*mos
= dp
->dp_meta_objset
;
409 dsl_dir_phys_t
*dsphys
;
412 ddobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DIR
, 0,
413 DMU_OT_DSL_DIR
, sizeof (dsl_dir_phys_t
), tx
);
415 VERIFY(0 == zap_add(mos
, pds
->dd_phys
->dd_child_dir_zapobj
,
416 name
, sizeof (uint64_t), 1, &ddobj
, tx
));
418 /* it's the root dir */
419 VERIFY(0 == zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
420 DMU_POOL_ROOT_DATASET
, sizeof (uint64_t), 1, &ddobj
, tx
));
422 VERIFY(0 == dmu_bonus_hold(mos
, ddobj
, FTAG
, &dbuf
));
423 dmu_buf_will_dirty(dbuf
, tx
);
424 dsphys
= dbuf
->db_data
;
426 dsphys
->dd_creation_time
= gethrestime_sec();
428 dsphys
->dd_parent_obj
= pds
->dd_object
;
429 dsphys
->dd_props_zapobj
= zap_create(mos
,
430 DMU_OT_DSL_PROPS
, DMU_OT_NONE
, 0, tx
);
431 dsphys
->dd_child_dir_zapobj
= zap_create(mos
,
432 DMU_OT_DSL_DIR_CHILD_MAP
, DMU_OT_NONE
, 0, tx
);
433 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_USED_BREAKDOWN
)
434 dsphys
->dd_flags
|= DD_FLAG_USED_BREAKDOWN
;
435 dmu_buf_rele(dbuf
, FTAG
);
442 dsl_dir_destroy_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
444 dsl_dir_t
*dd
= arg1
;
445 dsl_pool_t
*dp
= dd
->dd_pool
;
446 objset_t
*mos
= dp
->dp_meta_objset
;
451 * There should be exactly two holds, both from
452 * dsl_dataset_destroy: one on the dd directory, and one on its
453 * head ds. Otherwise, someone is trying to lookup something
454 * inside this dir while we want to destroy it. The
455 * config_rwlock ensures that nobody else opens it after we
458 if (dmu_buf_refcount(dd
->dd_dbuf
) > 2)
461 err
= zap_count(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, &count
);
471 dsl_dir_destroy_sync(void *arg1
, void *tag
, cred_t
*cr
, dmu_tx_t
*tx
)
473 dsl_dir_t
*dd
= arg1
;
474 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
478 ASSERT(RW_WRITE_HELD(&dd
->dd_pool
->dp_config_rwlock
));
479 ASSERT(dd
->dd_phys
->dd_head_dataset_obj
== 0);
481 /* Remove our reservation. */
483 dsl_dir_set_reservation_sync(dd
, &val
, cr
, tx
);
484 ASSERT3U(dd
->dd_phys
->dd_used_bytes
, ==, 0);
485 ASSERT3U(dd
->dd_phys
->dd_reserved
, ==, 0);
486 for (t
= 0; t
< DD_USED_NUM
; t
++)
487 ASSERT3U(dd
->dd_phys
->dd_used_breakdown
[t
], ==, 0);
489 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, tx
));
490 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_props_zapobj
, tx
));
491 VERIFY(0 == dsl_deleg_destroy(mos
, dd
->dd_phys
->dd_deleg_zapobj
, tx
));
492 VERIFY(0 == zap_remove(mos
,
493 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
, dd
->dd_myname
, tx
));
496 dsl_dir_close(dd
, tag
);
497 VERIFY(0 == dmu_object_free(mos
, obj
, tx
));
501 dsl_dir_is_clone(dsl_dir_t
*dd
)
503 return (dd
->dd_phys
->dd_origin_obj
&&
504 (dd
->dd_pool
->dp_origin_snap
== NULL
||
505 dd
->dd_phys
->dd_origin_obj
!=
506 dd
->dd_pool
->dp_origin_snap
->ds_object
));
510 dsl_dir_stats(dsl_dir_t
*dd
, nvlist_t
*nv
)
512 mutex_enter(&dd
->dd_lock
);
513 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
514 dd
->dd_phys
->dd_used_bytes
);
515 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_QUOTA
, dd
->dd_phys
->dd_quota
);
516 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_RESERVATION
,
517 dd
->dd_phys
->dd_reserved
);
518 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
,
519 dd
->dd_phys
->dd_compressed_bytes
== 0 ? 100 :
520 (dd
->dd_phys
->dd_uncompressed_bytes
* 100 /
521 dd
->dd_phys
->dd_compressed_bytes
));
522 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
523 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDSNAP
,
524 dd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
]);
525 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDDS
,
526 dd
->dd_phys
->dd_used_breakdown
[DD_USED_HEAD
]);
527 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDREFRESERV
,
528 dd
->dd_phys
->dd_used_breakdown
[DD_USED_REFRSRV
]);
529 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDCHILD
,
530 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD
] +
531 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD_RSRV
]);
533 mutex_exit(&dd
->dd_lock
);
535 rw_enter(&dd
->dd_pool
->dp_config_rwlock
, RW_READER
);
536 if (dsl_dir_is_clone(dd
)) {
538 char buf
[MAXNAMELEN
];
540 VERIFY(0 == dsl_dataset_hold_obj(dd
->dd_pool
,
541 dd
->dd_phys
->dd_origin_obj
, FTAG
, &ds
));
542 dsl_dataset_name(ds
, buf
);
543 dsl_dataset_rele(ds
, FTAG
);
544 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_ORIGIN
, buf
);
546 rw_exit(&dd
->dd_pool
->dp_config_rwlock
);
550 dsl_dir_dirty(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
552 dsl_pool_t
*dp
= dd
->dd_pool
;
556 if (txg_list_add(&dp
->dp_dirty_dirs
, dd
, tx
->tx_txg
) == 0) {
557 /* up the hold count until we can be written out */
558 dmu_buf_add_ref(dd
->dd_dbuf
, dd
);
563 parent_delta(dsl_dir_t
*dd
, uint64_t used
, int64_t delta
)
565 uint64_t old_accounted
= MAX(used
, dd
->dd_phys
->dd_reserved
);
566 uint64_t new_accounted
= MAX(used
+ delta
, dd
->dd_phys
->dd_reserved
);
567 return (new_accounted
- old_accounted
);
571 dsl_dir_sync(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
573 ASSERT(dmu_tx_is_syncing(tx
));
575 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
577 mutex_enter(&dd
->dd_lock
);
578 ASSERT3U(dd
->dd_tempreserved
[tx
->tx_txg
&TXG_MASK
], ==, 0);
579 dprintf_dd(dd
, "txg=%llu towrite=%lluK\n", tx
->tx_txg
,
580 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] / 1024);
581 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] = 0;
582 mutex_exit(&dd
->dd_lock
);
584 /* release the hold from dsl_dir_dirty */
585 dmu_buf_rele(dd
->dd_dbuf
, dd
);
589 dsl_dir_space_towrite(dsl_dir_t
*dd
)
594 ASSERT(MUTEX_HELD(&dd
->dd_lock
));
596 for (i
= 0; i
< TXG_SIZE
; i
++) {
597 space
+= dd
->dd_space_towrite
[i
&TXG_MASK
];
598 ASSERT3U(dd
->dd_space_towrite
[i
&TXG_MASK
], >=, 0);
604 * How much space would dd have available if ancestor had delta applied
605 * to it? If ondiskonly is set, we're only interested in what's
606 * on-disk, not estimated pending changes.
609 dsl_dir_space_available(dsl_dir_t
*dd
,
610 dsl_dir_t
*ancestor
, int64_t delta
, int ondiskonly
)
612 uint64_t parentspace
, myspace
, quota
, used
;
615 * If there are no restrictions otherwise, assume we have
616 * unlimited space available.
619 parentspace
= UINT64_MAX
;
621 if (dd
->dd_parent
!= NULL
) {
622 parentspace
= dsl_dir_space_available(dd
->dd_parent
,
623 ancestor
, delta
, ondiskonly
);
626 mutex_enter(&dd
->dd_lock
);
627 if (dd
->dd_phys
->dd_quota
!= 0)
628 quota
= dd
->dd_phys
->dd_quota
;
629 used
= dd
->dd_phys
->dd_used_bytes
;
631 used
+= dsl_dir_space_towrite(dd
);
633 if (dd
->dd_parent
== NULL
) {
634 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, FALSE
);
635 quota
= MIN(quota
, poolsize
);
638 if (dd
->dd_phys
->dd_reserved
> used
&& parentspace
!= UINT64_MAX
) {
640 * We have some space reserved, in addition to what our
643 parentspace
+= dd
->dd_phys
->dd_reserved
- used
;
646 if (dd
== ancestor
) {
648 ASSERT(used
>= -delta
);
650 if (parentspace
!= UINT64_MAX
)
651 parentspace
-= delta
;
659 * While it's OK to be a little over quota, if
660 * we think we are using more space than there
661 * is in the pool (which is already 1.6% more than
662 * dsl_pool_adjustedsize()), something is very
665 ASSERT3U(used
, <=, spa_get_space(dd
->dd_pool
->dp_spa
));
668 * the lesser of the space provided by our parent and
669 * the space left in our quota
671 myspace
= MIN(parentspace
, quota
- used
);
674 mutex_exit(&dd
->dd_lock
);
687 dsl_dir_tempreserve_impl(dsl_dir_t
*dd
, uint64_t asize
, boolean_t netfree
,
688 boolean_t ignorequota
, boolean_t checkrefquota
, list_t
*tr_list
,
689 dmu_tx_t
*tx
, boolean_t first
)
691 uint64_t txg
= tx
->tx_txg
;
692 uint64_t est_inflight
, used_on_disk
, quota
, parent_rsrv
;
693 struct tempreserve
*tr
;
695 int txgidx
= txg
& TXG_MASK
;
697 uint64_t ref_rsrv
= 0;
699 ASSERT3U(txg
, !=, 0);
700 ASSERT3S(asize
, >, 0);
702 mutex_enter(&dd
->dd_lock
);
705 * Check against the dsl_dir's quota. We don't add in the delta
706 * when checking for over-quota because they get one free hit.
708 est_inflight
= dsl_dir_space_towrite(dd
);
709 for (i
= 0; i
< TXG_SIZE
; i
++)
710 est_inflight
+= dd
->dd_tempreserved
[i
];
711 used_on_disk
= dd
->dd_phys
->dd_used_bytes
;
714 * On the first iteration, fetch the dataset's used-on-disk and
715 * refreservation values. Also, if checkrefquota is set, test if
716 * allocating this space would exceed the dataset's refquota.
718 if (first
&& tx
->tx_objset
) {
720 dsl_dataset_t
*ds
= tx
->tx_objset
->os
->os_dsl_dataset
;
722 error
= dsl_dataset_check_quota(ds
, checkrefquota
,
723 asize
, est_inflight
, &used_on_disk
, &ref_rsrv
);
725 mutex_exit(&dd
->dd_lock
);
731 * If this transaction will result in a net free of space,
732 * we want to let it through.
734 if (ignorequota
|| netfree
|| dd
->dd_phys
->dd_quota
== 0)
737 quota
= dd
->dd_phys
->dd_quota
;
740 * Adjust the quota against the actual pool size at the root.
741 * To ensure that it's possible to remove files from a full
742 * pool without inducing transient overcommits, we throttle
743 * netfree transactions against a quota that is slightly larger,
744 * but still within the pool's allocation slop. In cases where
745 * we're very close to full, this will allow a steady trickle of
746 * removes to get through.
748 if (dd
->dd_parent
== NULL
) {
749 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, netfree
);
750 if (poolsize
< quota
) {
757 * If they are requesting more space, and our current estimate
758 * is over quota, they get to try again unless the actual
759 * on-disk is over quota and there are no pending changes (which
760 * may free up space for us).
762 if (used_on_disk
+ est_inflight
> quota
) {
763 if (est_inflight
> 0 || used_on_disk
< quota
)
765 dprintf_dd(dd
, "failing: used=%lluK inflight = %lluK "
766 "quota=%lluK tr=%lluK err=%d\n",
767 used_on_disk
>>10, est_inflight
>>10,
768 quota
>>10, asize
>>10, enospc
);
769 mutex_exit(&dd
->dd_lock
);
773 /* We need to up our estimated delta before dropping dd_lock */
774 dd
->dd_tempreserved
[txgidx
] += asize
;
776 parent_rsrv
= parent_delta(dd
, used_on_disk
+ est_inflight
,
778 mutex_exit(&dd
->dd_lock
);
780 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
783 list_insert_tail(tr_list
, tr
);
785 /* see if it's OK with our parent */
786 if (dd
->dd_parent
&& parent_rsrv
) {
787 boolean_t ismos
= (dd
->dd_phys
->dd_head_dataset_obj
== 0);
789 return (dsl_dir_tempreserve_impl(dd
->dd_parent
,
790 parent_rsrv
, netfree
, ismos
, TRUE
, tr_list
, tx
, FALSE
));
797 * Reserve space in this dsl_dir, to be used in this tx's txg.
798 * After the space has been dirtied (and dsl_dir_willuse_space()
799 * has been called), the reservation should be canceled, using
800 * dsl_dir_tempreserve_clear().
803 dsl_dir_tempreserve_space(dsl_dir_t
*dd
, uint64_t lsize
, uint64_t asize
,
804 uint64_t fsize
, uint64_t usize
, void **tr_cookiep
, dmu_tx_t
*tx
)
814 tr_list
= kmem_alloc(sizeof (list_t
), KM_SLEEP
);
815 list_create(tr_list
, sizeof (struct tempreserve
),
816 offsetof(struct tempreserve
, tr_node
));
817 ASSERT3S(asize
, >, 0);
818 ASSERT3S(fsize
, >=, 0);
820 err
= arc_tempreserve_space(lsize
, tx
->tx_txg
);
822 struct tempreserve
*tr
;
824 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
826 list_insert_tail(tr_list
, tr
);
828 err
= dsl_pool_tempreserve_space(dd
->dd_pool
, asize
, tx
);
831 txg_delay(dd
->dd_pool
, tx
->tx_txg
, 1);
834 dsl_pool_memory_pressure(dd
->dd_pool
);
838 struct tempreserve
*tr
;
840 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
841 tr
->tr_dp
= dd
->dd_pool
;
843 list_insert_tail(tr_list
, tr
);
845 err
= dsl_dir_tempreserve_impl(dd
, asize
, fsize
>= asize
,
846 FALSE
, asize
> usize
, tr_list
, tx
, TRUE
);
850 dsl_dir_tempreserve_clear(tr_list
, tx
);
852 *tr_cookiep
= tr_list
;
858 * Clear a temporary reservation that we previously made with
859 * dsl_dir_tempreserve_space().
862 dsl_dir_tempreserve_clear(void *tr_cookie
, dmu_tx_t
*tx
)
864 int txgidx
= tx
->tx_txg
& TXG_MASK
;
865 list_t
*tr_list
= tr_cookie
;
866 struct tempreserve
*tr
;
868 ASSERT3U(tx
->tx_txg
, !=, 0);
870 if (tr_cookie
== NULL
)
873 while (tr
= list_head(tr_list
)) {
875 dsl_pool_tempreserve_clear(tr
->tr_dp
, tr
->tr_size
, tx
);
876 } else if (tr
->tr_ds
) {
877 mutex_enter(&tr
->tr_ds
->dd_lock
);
878 ASSERT3U(tr
->tr_ds
->dd_tempreserved
[txgidx
], >=,
880 tr
->tr_ds
->dd_tempreserved
[txgidx
] -= tr
->tr_size
;
881 mutex_exit(&tr
->tr_ds
->dd_lock
);
883 arc_tempreserve_clear(tr
->tr_size
);
885 list_remove(tr_list
, tr
);
886 kmem_free(tr
, sizeof (struct tempreserve
));
889 kmem_free(tr_list
, sizeof (list_t
));
893 dsl_dir_willuse_space_impl(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
895 int64_t parent_space
;
898 mutex_enter(&dd
->dd_lock
);
900 dd
->dd_space_towrite
[tx
->tx_txg
& TXG_MASK
] += space
;
902 est_used
= dsl_dir_space_towrite(dd
) + dd
->dd_phys
->dd_used_bytes
;
903 parent_space
= parent_delta(dd
, est_used
, space
);
904 mutex_exit(&dd
->dd_lock
);
906 /* Make sure that we clean up dd_space_to* */
907 dsl_dir_dirty(dd
, tx
);
909 /* XXX this is potentially expensive and unnecessary... */
910 if (parent_space
&& dd
->dd_parent
)
911 dsl_dir_willuse_space_impl(dd
->dd_parent
, parent_space
, tx
);
915 * Call in open context when we think we're going to write/free space,
916 * eg. when dirtying data. Be conservative (ie. OK to write less than
917 * this or free more than this, but don't write more or free less).
920 dsl_dir_willuse_space(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
922 dsl_pool_willuse_space(dd
->dd_pool
, space
, tx
);
923 dsl_dir_willuse_space_impl(dd
, space
, tx
);
926 /* call from syncing context when we actually write/free space for this dd */
928 dsl_dir_diduse_space(dsl_dir_t
*dd
, dd_used_t type
,
929 int64_t used
, int64_t compressed
, int64_t uncompressed
, dmu_tx_t
*tx
)
931 int64_t accounted_delta
;
932 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
934 ASSERT(dmu_tx_is_syncing(tx
));
935 ASSERT(type
< DD_USED_NUM
);
937 dsl_dir_dirty(dd
, tx
);
940 mutex_enter(&dd
->dd_lock
);
941 accounted_delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, used
);
942 ASSERT(used
>= 0 || dd
->dd_phys
->dd_used_bytes
>= -used
);
943 ASSERT(compressed
>= 0 ||
944 dd
->dd_phys
->dd_compressed_bytes
>= -compressed
);
945 ASSERT(uncompressed
>= 0 ||
946 dd
->dd_phys
->dd_uncompressed_bytes
>= -uncompressed
);
947 dd
->dd_phys
->dd_used_bytes
+= used
;
948 dd
->dd_phys
->dd_uncompressed_bytes
+= uncompressed
;
949 dd
->dd_phys
->dd_compressed_bytes
+= compressed
;
951 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
953 dd
->dd_phys
->dd_used_breakdown
[type
] >= -used
);
954 dd
->dd_phys
->dd_used_breakdown
[type
] += used
;
958 for (t
= 0; t
< DD_USED_NUM
; t
++)
959 u
+= dd
->dd_phys
->dd_used_breakdown
[t
];
960 ASSERT3U(u
, ==, dd
->dd_phys
->dd_used_bytes
);
964 mutex_exit(&dd
->dd_lock
);
966 if (dd
->dd_parent
!= NULL
) {
967 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
968 accounted_delta
, compressed
, uncompressed
, tx
);
969 dsl_dir_transfer_space(dd
->dd_parent
,
970 used
- accounted_delta
,
971 DD_USED_CHILD_RSRV
, DD_USED_CHILD
, tx
);
976 dsl_dir_transfer_space(dsl_dir_t
*dd
, int64_t delta
,
977 dd_used_t oldtype
, dd_used_t newtype
, dmu_tx_t
*tx
)
979 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
981 ASSERT(dmu_tx_is_syncing(tx
));
982 ASSERT(oldtype
< DD_USED_NUM
);
983 ASSERT(newtype
< DD_USED_NUM
);
985 if (delta
== 0 || !(dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
))
988 dsl_dir_dirty(dd
, tx
);
990 mutex_enter(&dd
->dd_lock
);
992 dd
->dd_phys
->dd_used_breakdown
[oldtype
] >= delta
:
993 dd
->dd_phys
->dd_used_breakdown
[newtype
] >= -delta
);
994 ASSERT(dd
->dd_phys
->dd_used_bytes
>= ABS(delta
));
995 dd
->dd_phys
->dd_used_breakdown
[oldtype
] -= delta
;
996 dd
->dd_phys
->dd_used_breakdown
[newtype
] += delta
;
998 mutex_exit(&dd
->dd_lock
);
1002 dsl_dir_set_quota_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1004 dsl_dir_t
*dd
= arg1
;
1005 uint64_t *quotap
= arg2
;
1006 uint64_t new_quota
= *quotap
;
1013 mutex_enter(&dd
->dd_lock
);
1015 * If we are doing the preliminary check in open context, and
1016 * there are pending changes, then don't fail it, since the
1017 * pending changes could under-estimate the amount of space to be
1020 towrite
= dsl_dir_space_towrite(dd
);
1021 if ((dmu_tx_is_syncing(tx
) || towrite
== 0) &&
1022 (new_quota
< dd
->dd_phys
->dd_reserved
||
1023 new_quota
< dd
->dd_phys
->dd_used_bytes
+ towrite
)) {
1026 mutex_exit(&dd
->dd_lock
);
1032 dsl_dir_set_quota_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
1034 dsl_dir_t
*dd
= arg1
;
1035 uint64_t *quotap
= arg2
;
1036 uint64_t new_quota
= *quotap
;
1038 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1040 mutex_enter(&dd
->dd_lock
);
1041 dd
->dd_phys
->dd_quota
= new_quota
;
1042 mutex_exit(&dd
->dd_lock
);
1044 spa_history_internal_log(LOG_DS_QUOTA
, dd
->dd_pool
->dp_spa
,
1045 tx
, cr
, "%lld dataset = %llu ",
1046 (longlong_t
)new_quota
, dd
->dd_phys
->dd_head_dataset_obj
);
1050 dsl_dir_set_quota(const char *ddname
, uint64_t quota
)
1055 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1059 if (quota
!= dd
->dd_phys
->dd_quota
) {
1061 * If someone removes a file, then tries to set the quota, we
1062 * want to make sure the file freeing takes effect.
1064 txg_wait_open(dd
->dd_pool
, 0);
1066 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_quota_check
,
1067 dsl_dir_set_quota_sync
, dd
, "a
, 0);
1069 dsl_dir_close(dd
, FTAG
);
1074 dsl_dir_set_reservation_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1076 dsl_dir_t
*dd
= arg1
;
1077 uint64_t *reservationp
= arg2
;
1078 uint64_t new_reservation
= *reservationp
;
1079 uint64_t used
, avail
;
1082 if (new_reservation
> INT64_MAX
)
1086 * If we are doing the preliminary check in open context, the
1087 * space estimates may be inaccurate.
1089 if (!dmu_tx_is_syncing(tx
))
1092 mutex_enter(&dd
->dd_lock
);
1093 used
= dd
->dd_phys
->dd_used_bytes
;
1094 delta
= MAX(used
, new_reservation
) -
1095 MAX(used
, dd
->dd_phys
->dd_reserved
);
1096 mutex_exit(&dd
->dd_lock
);
1098 if (dd
->dd_parent
) {
1099 avail
= dsl_dir_space_available(dd
->dd_parent
,
1102 avail
= dsl_pool_adjustedsize(dd
->dd_pool
, B_FALSE
) - used
;
1105 if (delta
> 0 && delta
> avail
)
1107 if (delta
> 0 && dd
->dd_phys
->dd_quota
> 0 &&
1108 new_reservation
> dd
->dd_phys
->dd_quota
)
1115 dsl_dir_set_reservation_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
1117 dsl_dir_t
*dd
= arg1
;
1118 uint64_t *reservationp
= arg2
;
1119 uint64_t new_reservation
= *reservationp
;
1123 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1125 mutex_enter(&dd
->dd_lock
);
1126 used
= dd
->dd_phys
->dd_used_bytes
;
1127 delta
= MAX(used
, new_reservation
) -
1128 MAX(used
, dd
->dd_phys
->dd_reserved
);
1129 dd
->dd_phys
->dd_reserved
= new_reservation
;
1131 if (dd
->dd_parent
!= NULL
) {
1132 /* Roll up this additional usage into our ancestors */
1133 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1136 mutex_exit(&dd
->dd_lock
);
1138 spa_history_internal_log(LOG_DS_RESERVATION
, dd
->dd_pool
->dp_spa
,
1139 tx
, cr
, "%lld dataset = %llu",
1140 (longlong_t
)new_reservation
, dd
->dd_phys
->dd_head_dataset_obj
);
1144 dsl_dir_set_reservation(const char *ddname
, uint64_t reservation
)
1149 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1152 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_reservation_check
,
1153 dsl_dir_set_reservation_sync
, dd
, &reservation
, 0);
1154 dsl_dir_close(dd
, FTAG
);
1159 closest_common_ancestor(dsl_dir_t
*ds1
, dsl_dir_t
*ds2
)
1161 for (; ds1
; ds1
= ds1
->dd_parent
) {
1163 for (dd
= ds2
; dd
; dd
= dd
->dd_parent
) {
1172 * If delta is applied to dd, how much of that delta would be applied to
1173 * ancestor? Syncing context only.
1176 would_change(dsl_dir_t
*dd
, int64_t delta
, dsl_dir_t
*ancestor
)
1181 mutex_enter(&dd
->dd_lock
);
1182 delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, delta
);
1183 mutex_exit(&dd
->dd_lock
);
1184 return (would_change(dd
->dd_parent
, delta
, ancestor
));
1188 dsl_dir_t
*newparent
;
1189 const char *mynewname
;
1194 dsl_dir_rename_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1196 dsl_dir_t
*dd
= arg1
;
1197 struct renamearg
*ra
= arg2
;
1198 dsl_pool_t
*dp
= dd
->dd_pool
;
1199 objset_t
*mos
= dp
->dp_meta_objset
;
1203 /* There should be 2 references: the open and the dirty */
1204 if (dmu_buf_refcount(dd
->dd_dbuf
) > 2)
1207 /* check for existing name */
1208 err
= zap_lookup(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1209 ra
->mynewname
, 8, 1, &val
);
1215 if (ra
->newparent
!= dd
->dd_parent
) {
1216 /* is there enough space? */
1218 MAX(dd
->dd_phys
->dd_used_bytes
, dd
->dd_phys
->dd_reserved
);
1220 /* no rename into our descendant */
1221 if (closest_common_ancestor(dd
, ra
->newparent
) == dd
)
1224 if (err
= dsl_dir_transfer_possible(dd
->dd_parent
,
1225 ra
->newparent
, myspace
))
1233 dsl_dir_rename_sync(void *arg1
, void *arg2
, cred_t
*cr
, dmu_tx_t
*tx
)
1235 dsl_dir_t
*dd
= arg1
;
1236 struct renamearg
*ra
= arg2
;
1237 dsl_pool_t
*dp
= dd
->dd_pool
;
1238 objset_t
*mos
= dp
->dp_meta_objset
;
1241 ASSERT(dmu_buf_refcount(dd
->dd_dbuf
) <= 2);
1243 if (ra
->newparent
!= dd
->dd_parent
) {
1244 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
1245 -dd
->dd_phys
->dd_used_bytes
,
1246 -dd
->dd_phys
->dd_compressed_bytes
,
1247 -dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1248 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD
,
1249 dd
->dd_phys
->dd_used_bytes
,
1250 dd
->dd_phys
->dd_compressed_bytes
,
1251 dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1253 if (dd
->dd_phys
->dd_reserved
> dd
->dd_phys
->dd_used_bytes
) {
1254 uint64_t unused_rsrv
= dd
->dd_phys
->dd_reserved
-
1255 dd
->dd_phys
->dd_used_bytes
;
1257 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1258 -unused_rsrv
, 0, 0, tx
);
1259 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD_RSRV
,
1260 unused_rsrv
, 0, 0, tx
);
1264 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1266 /* remove from old parent zapobj */
1267 err
= zap_remove(mos
, dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
1269 ASSERT3U(err
, ==, 0);
1271 (void) strcpy(dd
->dd_myname
, ra
->mynewname
);
1272 dsl_dir_close(dd
->dd_parent
, dd
);
1273 dd
->dd_phys
->dd_parent_obj
= ra
->newparent
->dd_object
;
1274 VERIFY(0 == dsl_dir_open_obj(dd
->dd_pool
,
1275 ra
->newparent
->dd_object
, NULL
, dd
, &dd
->dd_parent
));
1277 /* add to new parent zapobj */
1278 err
= zap_add(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1279 dd
->dd_myname
, 8, 1, &dd
->dd_object
, tx
);
1280 ASSERT3U(err
, ==, 0);
1282 spa_history_internal_log(LOG_DS_RENAME
, dd
->dd_pool
->dp_spa
,
1283 tx
, cr
, "dataset = %llu", dd
->dd_phys
->dd_head_dataset_obj
);
1287 dsl_dir_rename(dsl_dir_t
*dd
, const char *newname
)
1289 struct renamearg ra
;
1292 /* new parent should exist */
1293 err
= dsl_dir_open(newname
, FTAG
, &ra
.newparent
, &ra
.mynewname
);
1297 /* can't rename to different pool */
1298 if (dd
->dd_pool
!= ra
.newparent
->dd_pool
) {
1303 /* new name should not already exist */
1304 if (ra
.mynewname
== NULL
) {
1309 err
= dsl_sync_task_do(dd
->dd_pool
,
1310 dsl_dir_rename_check
, dsl_dir_rename_sync
, dd
, &ra
, 3);
1313 dsl_dir_close(ra
.newparent
, FTAG
);
1318 dsl_dir_transfer_possible(dsl_dir_t
*sdd
, dsl_dir_t
*tdd
, uint64_t space
)
1320 dsl_dir_t
*ancestor
;
1324 ancestor
= closest_common_ancestor(sdd
, tdd
);
1325 adelta
= would_change(sdd
, -space
, ancestor
);
1326 avail
= dsl_dir_space_available(tdd
, ancestor
, adelta
, FALSE
);