4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
24 * Copyright 2014 HybridCluster. All rights reserved.
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dnode.h>
34 #include <sys/zfeature.h>
35 #include <sys/dsl_dataset.h>
38 * Each of the concurrent object allocators will grab
39 * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to
40 * grab 128 slots, which is 4 blocks worth. This was experimentally
41 * determined to be the lowest value that eliminates the measurable effect
42 * of lock contention from this code path.
44 uint_t dmu_object_alloc_chunk_shift
= 7;
47 dmu_object_alloc_impl(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
48 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
49 int dnodesize
, dnode_t
**allocated_dnode
, const void *tag
, dmu_tx_t
*tx
)
52 uint64_t L1_dnode_count
= DNODES_PER_BLOCK
<<
53 (DMU_META_DNODE(os
)->dn_indblkshift
- SPA_BLKPTRSHIFT
);
55 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
56 boolean_t restarted
= B_FALSE
;
57 uint64_t *cpuobj
= NULL
;
58 uint_t dnodes_per_chunk
= 1 << dmu_object_alloc_chunk_shift
;
61 cpuobj
= &os
->os_obj_next_percpu
[CPU_SEQID_UNSTABLE
%
62 os
->os_obj_next_percpu_len
];
65 dn_slots
= DNODE_MIN_SLOTS
;
67 ASSERT3S(dn_slots
, >=, DNODE_MIN_SLOTS
);
68 ASSERT3S(dn_slots
, <=, DNODE_MAX_SLOTS
);
72 * The "chunk" of dnodes that is assigned to a CPU-specific
73 * allocator needs to be at least one block's worth, to avoid
74 * lock contention on the dbuf. It can be at most one L1 block's
75 * worth, so that the "rescan after polishing off a L1's worth"
76 * logic below will be sure to kick in.
78 if (dnodes_per_chunk
< DNODES_PER_BLOCK
)
79 dnodes_per_chunk
= DNODES_PER_BLOCK
;
80 if (dnodes_per_chunk
> L1_dnode_count
)
81 dnodes_per_chunk
= L1_dnode_count
;
84 * The caller requested the dnode be returned as a performance
85 * optimization in order to avoid releasing the hold only to
86 * immediately reacquire it. Since they caller is responsible
87 * for releasing the hold they must provide the tag.
89 if (allocated_dnode
!= NULL
) {
90 ASSERT3P(tag
, !=, NULL
);
92 ASSERT3P(tag
, ==, NULL
);
99 * If we finished a chunk of dnodes, get a new one from
100 * the global allocator.
102 if ((P2PHASE(object
, dnodes_per_chunk
) == 0) ||
103 (P2PHASE(object
+ dn_slots
- 1, dnodes_per_chunk
) <
105 DNODE_STAT_BUMP(dnode_alloc_next_chunk
);
106 mutex_enter(&os
->os_obj_lock
);
107 ASSERT0(P2PHASE(os
->os_obj_next_chunk
,
109 object
= os
->os_obj_next_chunk
;
112 * Each time we polish off a L1 bp worth of dnodes
113 * (2^12 objects), move to another L1 bp that's
114 * still reasonably sparse (at most 1/4 full). Look
115 * from the beginning at most once per txg. If we
116 * still can't allocate from that L1 block, search
117 * for an empty L0 block, which will quickly skip
118 * to the end of the metadnode if no nearby L0
119 * blocks are empty. This fallback avoids a
120 * pathology where full dnode blocks containing
121 * large dnodes appear sparse because they have a
122 * low blk_fill, leading to many failed allocation
123 * attempts. In the long term a better mechanism to
124 * search for sparse metadnode regions, such as
125 * spacemaps, could be implemented.
127 * os_scan_dnodes is set during txg sync if enough
128 * objects have been freed since the previous
129 * rescan to justify backfilling again.
131 * Note that dmu_traverse depends on the behavior
132 * that we use multiple blocks of the dnode object
133 * before going back to reuse objects. Any change
134 * to this algorithm should preserve that property
135 * or find another solution to the issues described
136 * in traverse_visitbp.
138 if (P2PHASE(object
, L1_dnode_count
) == 0) {
142 if (os
->os_rescan_dnodes
) {
144 os
->os_rescan_dnodes
= B_FALSE
;
146 offset
= object
<< DNODE_SHIFT
;
148 blkfill
= restarted
? 1 : DNODES_PER_BLOCK
>> 2;
149 minlvl
= restarted
? 1 : 2;
151 error
= dnode_next_offset(DMU_META_DNODE(os
),
152 DNODE_FIND_HOLE
, &offset
, minlvl
,
155 object
= offset
>> DNODE_SHIFT
;
159 * Note: if "restarted", we may find a L0 that
160 * is not suitably aligned.
162 os
->os_obj_next_chunk
=
163 P2ALIGN(object
, dnodes_per_chunk
) +
165 (void) atomic_swap_64(cpuobj
, object
);
166 mutex_exit(&os
->os_obj_lock
);
170 * The value of (*cpuobj) before adding dn_slots is the object
171 * ID assigned to us. The value afterwards is the object ID
172 * assigned to whoever wants to do an allocation next.
174 object
= atomic_add_64_nv(cpuobj
, dn_slots
) - dn_slots
;
177 * XXX We should check for an i/o error here and return
178 * up to our caller. Actually we should pre-read it in
179 * dmu_tx_assign(), but there is currently no mechanism
182 error
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
,
185 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
187 * Another thread could have allocated it; check
188 * again now that we have the struct lock.
190 if (dn
->dn_type
== DMU_OT_NONE
) {
191 dnode_allocate(dn
, ot
, blocksize
,
192 indirect_blockshift
, bonustype
,
193 bonuslen
, dn_slots
, tx
);
194 rw_exit(&dn
->dn_struct_rwlock
);
195 dmu_tx_add_new_object(tx
, dn
);
198 * Caller requested the allocated dnode be
199 * returned and is responsible for the hold.
201 if (allocated_dnode
!= NULL
)
202 *allocated_dnode
= dn
;
208 rw_exit(&dn
->dn_struct_rwlock
);
210 DNODE_STAT_BUMP(dnode_alloc_race
);
214 * Skip to next known valid starting point on error. This
215 * is the start of the next block of dnodes.
217 if (dmu_object_next(os
, &object
, B_TRUE
, 0) != 0) {
218 object
= P2ROUNDUP(object
+ 1, DNODES_PER_BLOCK
);
219 DNODE_STAT_BUMP(dnode_alloc_next_block
);
221 (void) atomic_swap_64(cpuobj
, object
);
226 dmu_object_alloc(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
227 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
229 return dmu_object_alloc_impl(os
, ot
, blocksize
, 0, bonustype
,
230 bonuslen
, 0, NULL
, NULL
, tx
);
234 dmu_object_alloc_ibs(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
235 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
238 return dmu_object_alloc_impl(os
, ot
, blocksize
, indirect_blockshift
,
239 bonustype
, bonuslen
, 0, NULL
, NULL
, tx
);
243 dmu_object_alloc_dnsize(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
244 dmu_object_type_t bonustype
, int bonuslen
, int dnodesize
, dmu_tx_t
*tx
)
246 return (dmu_object_alloc_impl(os
, ot
, blocksize
, 0, bonustype
,
247 bonuslen
, dnodesize
, NULL
, NULL
, tx
));
251 * Allocate a new object and return a pointer to the newly allocated dnode
252 * via the allocated_dnode argument. The returned dnode will be held and
253 * the caller is responsible for releasing the hold by calling dnode_rele().
256 dmu_object_alloc_hold(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
257 int indirect_blockshift
, dmu_object_type_t bonustype
, int bonuslen
,
258 int dnodesize
, dnode_t
**allocated_dnode
, const void *tag
, dmu_tx_t
*tx
)
260 return (dmu_object_alloc_impl(os
, ot
, blocksize
, indirect_blockshift
,
261 bonustype
, bonuslen
, dnodesize
, allocated_dnode
, tag
, tx
));
265 dmu_object_claim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
266 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
268 return (dmu_object_claim_dnsize(os
, object
, ot
, blocksize
, bonustype
,
273 dmu_object_claim_dnsize(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
274 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
,
275 int dnodesize
, dmu_tx_t
*tx
)
278 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
282 dn_slots
= DNODE_MIN_SLOTS
;
283 ASSERT3S(dn_slots
, >=, DNODE_MIN_SLOTS
);
284 ASSERT3S(dn_slots
, <=, DNODE_MAX_SLOTS
);
286 if (object
== DMU_META_DNODE_OBJECT
&& !dmu_tx_private_ok(tx
))
287 return (SET_ERROR(EBADF
));
289 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
, dn_slots
,
294 dnode_allocate(dn
, ot
, blocksize
, 0, bonustype
, bonuslen
, dn_slots
, tx
);
295 dmu_tx_add_new_object(tx
, dn
);
297 dnode_rele(dn
, FTAG
);
303 dmu_object_reclaim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
304 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
306 return (dmu_object_reclaim_dnsize(os
, object
, ot
, blocksize
, bonustype
,
307 bonuslen
, DNODE_MIN_SIZE
, B_FALSE
, tx
));
311 dmu_object_reclaim_dnsize(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
312 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, int dnodesize
,
313 boolean_t keep_spill
, dmu_tx_t
*tx
)
316 int dn_slots
= dnodesize
>> DNODE_SHIFT
;
320 dn_slots
= DNODE_MIN_SLOTS
;
322 if (object
== DMU_META_DNODE_OBJECT
)
323 return (SET_ERROR(EBADF
));
325 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
330 dnode_reallocate(dn
, ot
, blocksize
, bonustype
, bonuslen
, dn_slots
,
333 dnode_rele(dn
, FTAG
);
338 dmu_object_rm_spill(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
343 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
348 rw_enter(&dn
->dn_struct_rwlock
, RW_WRITER
);
349 if (dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
350 dbuf_rm_spill(dn
, tx
);
351 dnode_rm_spill(dn
, tx
);
353 rw_exit(&dn
->dn_struct_rwlock
);
355 dnode_rele(dn
, FTAG
);
360 dmu_object_free(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
365 ASSERT(object
!= DMU_META_DNODE_OBJECT
|| dmu_tx_private_ok(tx
));
367 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
, 0,
372 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
374 * If we don't create this free range, we'll leak indirect blocks when
375 * we get to freeing the dnode in syncing context.
377 dnode_free_range(dn
, 0, DMU_OBJECT_END
, tx
);
379 dnode_rele(dn
, FTAG
);
385 * Return (in *objectp) the next object which is allocated (or a hole)
386 * after *object, taking into account only objects that may have been modified
387 * after the specified txg.
390 dmu_object_next(objset_t
*os
, uint64_t *objectp
, boolean_t hole
, uint64_t txg
)
394 struct dsl_dataset
*ds
= os
->os_dsl_dataset
;
399 } else if (ds
&& dsl_dataset_feature_is_active(ds
,
400 SPA_FEATURE_LARGE_DNODE
)) {
401 uint64_t i
= *objectp
+ 1;
402 uint64_t last_obj
= *objectp
| (DNODES_PER_BLOCK
- 1);
403 dmu_object_info_t doi
;
406 * Scan through the remaining meta dnode block. The contents
407 * of each slot in the block are known so it can be quickly
408 * checked. If the block is exhausted without a match then
409 * hand off to dnode_next_offset() for further scanning.
411 while (i
<= last_obj
) {
413 return (SET_ERROR(ESRCH
));
414 error
= dmu_object_info(os
, i
, &doi
);
415 if (error
== ENOENT
) {
422 } else if (error
== EEXIST
) {
424 } else if (error
== 0) {
426 i
+= doi
.doi_dnodesize
>> DNODE_SHIFT
;
438 start_obj
= *objectp
+ 1;
441 offset
= start_obj
<< DNODE_SHIFT
;
443 error
= dnode_next_offset(DMU_META_DNODE(os
),
444 (hole
? DNODE_FIND_HOLE
: 0), &offset
, 0, DNODES_PER_BLOCK
, txg
);
446 *objectp
= offset
>> DNODE_SHIFT
;
452 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
453 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
455 * Only for use from syncing context, on MOS objects.
458 dmu_object_zapify(objset_t
*mos
, uint64_t object
, dmu_object_type_t old_type
,
463 ASSERT(dmu_tx_is_syncing(tx
));
465 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
466 if (dn
->dn_type
== DMU_OTN_ZAP_METADATA
) {
467 dnode_rele(dn
, FTAG
);
470 ASSERT3U(dn
->dn_type
, ==, old_type
);
471 ASSERT0(dn
->dn_maxblkid
);
474 * We must initialize the ZAP data before changing the type,
475 * so that concurrent calls to *_is_zapified() can determine if
476 * the object has been completely zapified by checking the type.
478 mzap_create_impl(dn
, 0, 0, tx
);
480 dn
->dn_next_type
[tx
->tx_txg
& TXG_MASK
] = dn
->dn_type
=
481 DMU_OTN_ZAP_METADATA
;
482 dnode_setdirty(dn
, tx
);
483 dnode_rele(dn
, FTAG
);
485 spa_feature_incr(dmu_objset_spa(mos
),
486 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
490 dmu_object_free_zapified(objset_t
*mos
, uint64_t object
, dmu_tx_t
*tx
)
495 ASSERT(dmu_tx_is_syncing(tx
));
497 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
499 dnode_rele(dn
, FTAG
);
501 if (t
== DMU_OTN_ZAP_METADATA
) {
502 spa_feature_decr(dmu_objset_spa(mos
),
503 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
505 VERIFY0(dmu_object_free(mos
, object
, tx
));
508 EXPORT_SYMBOL(dmu_object_alloc
);
509 EXPORT_SYMBOL(dmu_object_alloc_ibs
);
510 EXPORT_SYMBOL(dmu_object_alloc_dnsize
);
511 EXPORT_SYMBOL(dmu_object_alloc_hold
);
512 EXPORT_SYMBOL(dmu_object_claim
);
513 EXPORT_SYMBOL(dmu_object_claim_dnsize
);
514 EXPORT_SYMBOL(dmu_object_reclaim
);
515 EXPORT_SYMBOL(dmu_object_reclaim_dnsize
);
516 EXPORT_SYMBOL(dmu_object_rm_spill
);
517 EXPORT_SYMBOL(dmu_object_free
);
518 EXPORT_SYMBOL(dmu_object_next
);
519 EXPORT_SYMBOL(dmu_object_zapify
);
520 EXPORT_SYMBOL(dmu_object_free_zapified
);
523 ZFS_MODULE_PARAM(zfs
, , dmu_object_alloc_chunk_shift
, UINT
, ZMOD_RW
,
524 "CPU-specific allocator grabs 2^N objects at once");