OpenZFS 7003 - zap_lockdir() should tag hold
[zfs.git] / module / zfs / dmu_object.c
blobe54043fc3e3a47a7ad78a2b65f8c5aeb3d64aa1a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
24 * Copyright 2014 HybridCluster. All rights reserved.
27 #include <sys/dmu.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dnode.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/dsl_dataset.h>
35 uint64_t
36 dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
37 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
39 return dmu_object_alloc_dnsize(os, ot, blocksize, bonustype, bonuslen,
40 0, tx);
43 uint64_t
44 dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
45 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
47 uint64_t object;
48 uint64_t L1_dnode_count = DNODES_PER_BLOCK <<
49 (DMU_META_DNODE(os)->dn_indblkshift - SPA_BLKPTRSHIFT);
50 dnode_t *dn = NULL;
51 int dn_slots = dnodesize >> DNODE_SHIFT;
52 boolean_t restarted = B_FALSE;
54 if (dn_slots == 0) {
55 dn_slots = DNODE_MIN_SLOTS;
56 } else {
57 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
58 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
61 mutex_enter(&os->os_obj_lock);
62 for (;;) {
63 object = os->os_obj_next;
65 * Each time we polish off a L1 bp worth of dnodes (2^12
66 * objects), move to another L1 bp that's still
67 * reasonably sparse (at most 1/4 full). Look from the
68 * beginning at most once per txg. If we still can't
69 * allocate from that L1 block, search for an empty L0
70 * block, which will quickly skip to the end of the
71 * metadnode if the no nearby L0 blocks are empty. This
72 * fallback avoids a pathology where full dnode blocks
73 * containing large dnodes appear sparse because they
74 * have a low blk_fill, leading to many failed
75 * allocation attempts. In the long term a better
76 * mechanism to search for sparse metadnode regions,
77 * such as spacemaps, could be implemented.
79 * os_scan_dnodes is set during txg sync if enough objects
80 * have been freed since the previous rescan to justify
81 * backfilling again.
83 * Note that dmu_traverse depends on the behavior that we use
84 * multiple blocks of the dnode object before going back to
85 * reuse objects. Any change to this algorithm should preserve
86 * that property or find another solution to the issues
87 * described in traverse_visitbp.
89 if (P2PHASE(object, L1_dnode_count) == 0) {
90 uint64_t offset;
91 uint64_t blkfill;
92 int minlvl;
93 int error;
94 if (os->os_rescan_dnodes) {
95 offset = 0;
96 os->os_rescan_dnodes = B_FALSE;
97 } else {
98 offset = object << DNODE_SHIFT;
100 blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
101 minlvl = restarted ? 1 : 2;
102 restarted = B_TRUE;
103 error = dnode_next_offset(DMU_META_DNODE(os),
104 DNODE_FIND_HOLE, &offset, minlvl, blkfill, 0);
105 if (error == 0)
106 object = offset >> DNODE_SHIFT;
108 os->os_obj_next = object + dn_slots;
111 * XXX We should check for an i/o error here and return
112 * up to our caller. Actually we should pre-read it in
113 * dmu_tx_assign(), but there is currently no mechanism
114 * to do so.
116 (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
117 FTAG, &dn);
118 if (dn)
119 break;
121 if (dmu_object_next(os, &object, B_TRUE, 0) == 0)
122 os->os_obj_next = object;
123 else
125 * Skip to next known valid starting point for a dnode.
127 os->os_obj_next = P2ROUNDUP(object + 1,
128 DNODES_PER_BLOCK);
131 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
132 dnode_rele(dn, FTAG);
134 mutex_exit(&os->os_obj_lock);
136 dmu_tx_add_new_object(tx, os, object);
137 return (object);
141 dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
142 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
144 return (dmu_object_claim_dnsize(os, object, ot, blocksize, bonustype,
145 bonuslen, 0, tx));
149 dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
150 int blocksize, dmu_object_type_t bonustype, int bonuslen,
151 int dnodesize, dmu_tx_t *tx)
153 dnode_t *dn;
154 int dn_slots = dnodesize >> DNODE_SHIFT;
155 int err;
157 if (dn_slots == 0)
158 dn_slots = DNODE_MIN_SLOTS;
159 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
160 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
162 if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
163 return (SET_ERROR(EBADF));
165 err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
166 FTAG, &dn);
167 if (err)
168 return (err);
170 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
171 dnode_rele(dn, FTAG);
173 dmu_tx_add_new_object(tx, os, object);
174 return (0);
178 dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
179 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
181 return (dmu_object_reclaim_dnsize(os, object, ot, blocksize, bonustype,
182 bonuslen, 0, tx));
186 dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
187 int blocksize, dmu_object_type_t bonustype, int bonuslen, int dnodesize,
188 dmu_tx_t *tx)
190 dnode_t *dn;
191 int dn_slots = dnodesize >> DNODE_SHIFT;
192 int err;
194 if (object == DMU_META_DNODE_OBJECT)
195 return (SET_ERROR(EBADF));
197 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
198 FTAG, &dn);
199 if (err)
200 return (err);
202 dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, dn_slots, tx);
204 dnode_rele(dn, FTAG);
205 return (err);
210 dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
212 dnode_t *dn;
213 int err;
215 ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
217 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
218 FTAG, &dn);
219 if (err)
220 return (err);
222 ASSERT(dn->dn_type != DMU_OT_NONE);
223 dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
224 dnode_free(dn, tx);
225 dnode_rele(dn, FTAG);
227 return (0);
231 * Return (in *objectp) the next object which is allocated (or a hole)
232 * after *object, taking into account only objects that may have been modified
233 * after the specified txg.
236 dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
238 uint64_t offset;
239 dmu_object_info_t doi;
240 struct dsl_dataset *ds = os->os_dsl_dataset;
241 int dnodesize;
242 int error;
245 * Avoid expensive dnode hold if this dataset doesn't use large dnodes.
247 if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
248 error = dmu_object_info(os, *objectp, &doi);
249 if (error && !(error == EINVAL && *objectp == 0))
250 return (SET_ERROR(error));
251 else
252 dnodesize = doi.doi_dnodesize;
253 } else {
254 dnodesize = DNODE_MIN_SIZE;
257 if (*objectp == 0)
258 offset = 1 << DNODE_SHIFT;
259 else
260 offset = (*objectp << DNODE_SHIFT) + dnodesize;
262 error = dnode_next_offset(DMU_META_DNODE(os),
263 (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
265 *objectp = offset >> DNODE_SHIFT;
267 return (error);
271 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
272 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
274 * Only for use from syncing context, on MOS objects.
276 void
277 dmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type,
278 dmu_tx_t *tx)
280 dnode_t *dn;
282 ASSERT(dmu_tx_is_syncing(tx));
284 VERIFY0(dnode_hold(mos, object, FTAG, &dn));
285 if (dn->dn_type == DMU_OTN_ZAP_METADATA) {
286 dnode_rele(dn, FTAG);
287 return;
289 ASSERT3U(dn->dn_type, ==, old_type);
290 ASSERT0(dn->dn_maxblkid);
291 dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
292 DMU_OTN_ZAP_METADATA;
293 dnode_setdirty(dn, tx);
294 dnode_rele(dn, FTAG);
296 mzap_create_impl(mos, object, 0, 0, tx);
298 spa_feature_incr(dmu_objset_spa(mos),
299 SPA_FEATURE_EXTENSIBLE_DATASET, tx);
302 void
303 dmu_object_free_zapified(objset_t *mos, uint64_t object, dmu_tx_t *tx)
305 dnode_t *dn;
306 dmu_object_type_t t;
308 ASSERT(dmu_tx_is_syncing(tx));
310 VERIFY0(dnode_hold(mos, object, FTAG, &dn));
311 t = dn->dn_type;
312 dnode_rele(dn, FTAG);
314 if (t == DMU_OTN_ZAP_METADATA) {
315 spa_feature_decr(dmu_objset_spa(mos),
316 SPA_FEATURE_EXTENSIBLE_DATASET, tx);
318 VERIFY0(dmu_object_free(mos, object, tx));
321 #if defined(_KERNEL) && defined(HAVE_SPL)
322 EXPORT_SYMBOL(dmu_object_alloc);
323 EXPORT_SYMBOL(dmu_object_alloc_dnsize);
324 EXPORT_SYMBOL(dmu_object_claim);
325 EXPORT_SYMBOL(dmu_object_claim_dnsize);
326 EXPORT_SYMBOL(dmu_object_reclaim);
327 EXPORT_SYMBOL(dmu_object_reclaim_dnsize);
328 EXPORT_SYMBOL(dmu_object_free);
329 EXPORT_SYMBOL(dmu_object_next);
330 EXPORT_SYMBOL(dmu_object_zapify);
331 EXPORT_SYMBOL(dmu_object_free_zapified);
332 #endif