4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25 * Copyright 2023 Alexander Stetsenko <alex.stetsenko@gmail.com>
26 * Copyright (c) 2023, Klara Inc.
30 * This file contains the top half of the zfs directory structure
31 * implementation. The bottom half is in zap_leaf.c.
33 * The zdir is an extendable hash data structure. There is a table of
34 * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are
35 * each a constant size and hold a variable number of directory entries.
36 * The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
38 * The pointer table holds a power of 2 number of pointers.
39 * (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to
40 * by the pointer at index i in the table holds entries whose hash value
41 * has a zd_prefix_len - bit prefix
46 #include <sys/dnode.h>
47 #include <sys/zfs_context.h>
48 #include <sys/zfs_znode.h>
49 #include <sys/fs/zfs.h>
51 #include <sys/zap_impl.h>
52 #include <sys/zap_leaf.h>
55 * If zap_iterate_prefetch is set, we will prefetch the entire ZAP object
56 * (all leaf blocks) when we start iterating over it.
58 * For zap_cursor_init(), the callers all intend to iterate through all the
59 * entries. There are a few cases where an error (typically i/o error) could
60 * cause it to bail out early.
62 * For zap_cursor_init_serialized(), there are callers that do the iteration
63 * outside of ZFS. Typically they would iterate over everything, but we
64 * don't have control of that. E.g. zfs_ioc_snapshot_list_next(),
65 * zcp_snapshots_iter(), and other iterators over things in the MOS - these
66 * are called by /sbin/zfs and channel programs. The other example is
67 * zfs_readdir() which iterates over directory entries for the getdents()
68 * syscall. /sbin/ls iterates to the end (unless it receives a signal), but
69 * userland doesn't have to.
71 * Given that the ZAP entries aren't returned in a specific order, the only
72 * legitimate use cases for partial iteration would be:
74 * 1. Pagination: e.g. you only want to display 100 entries at a time, so you
75 * get the first 100 and then wait for the user to hit "next page", which
78 * 2. You want to know if there are more than X entries, without relying on
79 * the zfs-specific implementation of the directory's st_size (which is
80 * the number of entries).
82 static int zap_iterate_prefetch
= B_TRUE
;
85 * Enable ZAP shrinking. When enabled, empty sibling leaf blocks will be
86 * collapsed into a single block.
88 int zap_shrink_enabled
= B_TRUE
;
90 int fzap_default_block_shift
= 14; /* 16k blocksize */
92 static uint64_t zap_allocate_blocks(zap_t
*zap
, int nblocks
);
93 static int zap_shrink(zap_name_t
*zn
, zap_leaf_t
*l
, dmu_tx_t
*tx
);
96 fzap_byteswap(void *vbuf
, size_t size
)
98 uint64_t block_type
= *(uint64_t *)vbuf
;
100 if (block_type
== ZBT_LEAF
|| block_type
== BSWAP_64(ZBT_LEAF
))
101 zap_leaf_byteswap(vbuf
, size
);
103 /* it's a ptrtbl block */
104 byteswap_uint64_array(vbuf
, size
);
109 fzap_upgrade(zap_t
*zap
, dmu_tx_t
*tx
, zap_flags_t flags
)
111 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
112 zap
->zap_ismicro
= FALSE
;
114 zap
->zap_dbu
.dbu_evict_func_sync
= zap_evict_sync
;
115 zap
->zap_dbu
.dbu_evict_func_async
= NULL
;
117 mutex_init(&zap
->zap_f
.zap_num_entries_mtx
, 0, MUTEX_DEFAULT
, 0);
118 zap
->zap_f
.zap_block_shift
= highbit64(zap
->zap_dbuf
->db_size
) - 1;
120 zap_phys_t
*zp
= zap_f_phys(zap
);
122 * explicitly zero it since it might be coming from an
123 * initialized microzap
125 memset(zap
->zap_dbuf
->db_data
, 0, zap
->zap_dbuf
->db_size
);
126 zp
->zap_block_type
= ZBT_HEADER
;
127 zp
->zap_magic
= ZAP_MAGIC
;
129 zp
->zap_ptrtbl
.zt_shift
= ZAP_EMBEDDED_PTRTBL_SHIFT(zap
);
131 zp
->zap_freeblk
= 2; /* block 1 will be the first leaf */
132 zp
->zap_num_leafs
= 1;
133 zp
->zap_num_entries
= 0;
134 zp
->zap_salt
= zap
->zap_salt
;
135 zp
->zap_normflags
= zap
->zap_normflags
;
136 zp
->zap_flags
= flags
;
138 /* block 1 will be the first leaf */
139 for (int i
= 0; i
< (1<<zp
->zap_ptrtbl
.zt_shift
); i
++)
140 ZAP_EMBEDDED_PTRTBL_ENT(zap
, i
) = 1;
143 * set up block 1 - the first leaf
146 VERIFY0(dmu_buf_hold_by_dnode(zap
->zap_dnode
,
147 1<<FZAP_BLOCK_SHIFT(zap
), FTAG
, &db
, DMU_READ_NO_PREFETCH
));
148 dmu_buf_will_dirty(db
, tx
);
150 zap_leaf_t
*l
= kmem_zalloc(sizeof (zap_leaf_t
), KM_SLEEP
);
153 zap_leaf_init(l
, zp
->zap_normflags
!= 0);
155 kmem_free(l
, sizeof (zap_leaf_t
));
156 dmu_buf_rele(db
, FTAG
);
160 zap_tryupgradedir(zap_t
*zap
, dmu_tx_t
*tx
)
162 if (RW_WRITE_HELD(&zap
->zap_rwlock
))
164 if (rw_tryupgrade(&zap
->zap_rwlock
)) {
165 dmu_buf_will_dirty(zap
->zap_dbuf
, tx
);
172 * Generic routines for dealing with the pointer & cookie tables.
176 zap_table_grow(zap_t
*zap
, zap_table_phys_t
*tbl
,
177 void (*transfer_func
)(const uint64_t *src
, uint64_t *dst
, int n
),
181 int bs
= FZAP_BLOCK_SHIFT(zap
);
182 int hepb
= 1<<(bs
-4);
183 /* hepb = half the number of entries in a block */
185 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
186 ASSERT(tbl
->zt_blk
!= 0);
187 ASSERT(tbl
->zt_numblks
> 0);
189 if (tbl
->zt_nextblk
!= 0) {
190 newblk
= tbl
->zt_nextblk
;
192 newblk
= zap_allocate_blocks(zap
, tbl
->zt_numblks
* 2);
193 tbl
->zt_nextblk
= newblk
;
194 ASSERT0(tbl
->zt_blks_copied
);
195 dmu_prefetch_by_dnode(zap
->zap_dnode
, 0,
196 tbl
->zt_blk
<< bs
, tbl
->zt_numblks
<< bs
,
197 ZIO_PRIORITY_SYNC_READ
);
201 * Copy the ptrtbl from the old to new location.
204 uint64_t b
= tbl
->zt_blks_copied
;
206 int err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
207 (tbl
->zt_blk
+ b
) << bs
, FTAG
, &db_old
, DMU_READ_NO_PREFETCH
);
211 /* first half of entries in old[b] go to new[2*b+0] */
213 VERIFY0(dmu_buf_hold_by_dnode(zap
->zap_dnode
,
214 (newblk
+ 2*b
+0) << bs
, FTAG
, &db_new
, DMU_READ_NO_PREFETCH
));
215 dmu_buf_will_dirty(db_new
, tx
);
216 transfer_func(db_old
->db_data
, db_new
->db_data
, hepb
);
217 dmu_buf_rele(db_new
, FTAG
);
219 /* second half of entries in old[b] go to new[2*b+1] */
220 VERIFY0(dmu_buf_hold_by_dnode(zap
->zap_dnode
,
221 (newblk
+ 2*b
+1) << bs
, FTAG
, &db_new
, DMU_READ_NO_PREFETCH
));
222 dmu_buf_will_dirty(db_new
, tx
);
223 transfer_func((uint64_t *)db_old
->db_data
+ hepb
,
224 db_new
->db_data
, hepb
);
225 dmu_buf_rele(db_new
, FTAG
);
227 dmu_buf_rele(db_old
, FTAG
);
229 tbl
->zt_blks_copied
++;
231 dprintf("copied block %llu of %llu\n",
232 (u_longlong_t
)tbl
->zt_blks_copied
,
233 (u_longlong_t
)tbl
->zt_numblks
);
235 if (tbl
->zt_blks_copied
== tbl
->zt_numblks
) {
236 (void) dmu_free_range(zap
->zap_objset
, zap
->zap_object
,
237 tbl
->zt_blk
<< bs
, tbl
->zt_numblks
<< bs
, tx
);
239 tbl
->zt_blk
= newblk
;
240 tbl
->zt_numblks
*= 2;
243 tbl
->zt_blks_copied
= 0;
245 dprintf("finished; numblocks now %llu (%uk entries)\n",
246 (u_longlong_t
)tbl
->zt_numblks
, 1<<(tbl
->zt_shift
-10));
253 zap_table_store(zap_t
*zap
, zap_table_phys_t
*tbl
, uint64_t idx
, uint64_t val
,
256 int bs
= FZAP_BLOCK_SHIFT(zap
);
258 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
259 ASSERT(tbl
->zt_blk
!= 0);
261 dprintf("storing %llx at index %llx\n", (u_longlong_t
)val
,
264 uint64_t blk
= idx
>> (bs
-3);
265 uint64_t off
= idx
& ((1<<(bs
-3))-1);
268 int err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
269 (tbl
->zt_blk
+ blk
) << bs
, FTAG
, &db
, DMU_READ_NO_PREFETCH
);
272 dmu_buf_will_dirty(db
, tx
);
274 if (tbl
->zt_nextblk
!= 0) {
275 uint64_t idx2
= idx
* 2;
276 uint64_t blk2
= idx2
>> (bs
-3);
277 uint64_t off2
= idx2
& ((1<<(bs
-3))-1);
280 err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
281 (tbl
->zt_nextblk
+ blk2
) << bs
, FTAG
, &db2
,
282 DMU_READ_NO_PREFETCH
);
284 dmu_buf_rele(db
, FTAG
);
287 dmu_buf_will_dirty(db2
, tx
);
288 ((uint64_t *)db2
->db_data
)[off2
] = val
;
289 ((uint64_t *)db2
->db_data
)[off2
+1] = val
;
290 dmu_buf_rele(db2
, FTAG
);
293 ((uint64_t *)db
->db_data
)[off
] = val
;
294 dmu_buf_rele(db
, FTAG
);
300 zap_table_load(zap_t
*zap
, zap_table_phys_t
*tbl
, uint64_t idx
, uint64_t *valp
)
302 int bs
= FZAP_BLOCK_SHIFT(zap
);
304 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
306 uint64_t blk
= idx
>> (bs
-3);
307 uint64_t off
= idx
& ((1<<(bs
-3))-1);
310 int err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
311 (tbl
->zt_blk
+ blk
) << bs
, FTAG
, &db
, DMU_READ_NO_PREFETCH
);
314 *valp
= ((uint64_t *)db
->db_data
)[off
];
315 dmu_buf_rele(db
, FTAG
);
317 if (tbl
->zt_nextblk
!= 0) {
319 * read the nextblk for the sake of i/o error checking,
320 * so that zap_table_load() will catch errors for
323 blk
= (idx
*2) >> (bs
-3);
325 err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
326 (tbl
->zt_nextblk
+ blk
) << bs
, FTAG
, &db
,
327 DMU_READ_NO_PREFETCH
);
329 dmu_buf_rele(db
, FTAG
);
335 * Routines for growing the ptrtbl.
339 zap_ptrtbl_transfer(const uint64_t *src
, uint64_t *dst
, int n
)
341 for (int i
= 0; i
< n
; i
++) {
342 uint64_t lb
= src
[i
];
349 zap_grow_ptrtbl(zap_t
*zap
, dmu_tx_t
*tx
)
352 * The pointer table should never use more hash bits than we
353 * have (otherwise we'd be using useless zero bits to index it).
354 * If we are within 2 bits of running out, stop growing, since
355 * this is already an aberrant condition.
357 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
>= zap_hashbits(zap
) - 2)
358 return (SET_ERROR(ENOSPC
));
360 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
== 0) {
362 * We are outgrowing the "embedded" ptrtbl (the one
363 * stored in the header block). Give it its own entire
364 * block, which will double the size of the ptrtbl.
366 ASSERT3U(zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
, ==,
367 ZAP_EMBEDDED_PTRTBL_SHIFT(zap
));
368 ASSERT0(zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
);
370 uint64_t newblk
= zap_allocate_blocks(zap
, 1);
372 int err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
373 newblk
<< FZAP_BLOCK_SHIFT(zap
), FTAG
, &db_new
,
374 DMU_READ_NO_PREFETCH
);
377 dmu_buf_will_dirty(db_new
, tx
);
378 zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap
, 0),
379 db_new
->db_data
, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap
));
380 dmu_buf_rele(db_new
, FTAG
);
382 zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
= newblk
;
383 zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
= 1;
384 zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
++;
386 ASSERT3U(1ULL << zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
, ==,
387 zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
<<
388 (FZAP_BLOCK_SHIFT(zap
)-3));
392 return (zap_table_grow(zap
, &zap_f_phys(zap
)->zap_ptrtbl
,
393 zap_ptrtbl_transfer
, tx
));
398 zap_increment_num_entries(zap_t
*zap
, int delta
, dmu_tx_t
*tx
)
400 dmu_buf_will_dirty(zap
->zap_dbuf
, tx
);
401 mutex_enter(&zap
->zap_f
.zap_num_entries_mtx
);
402 ASSERT(delta
> 0 || zap_f_phys(zap
)->zap_num_entries
>= -delta
);
403 zap_f_phys(zap
)->zap_num_entries
+= delta
;
404 mutex_exit(&zap
->zap_f
.zap_num_entries_mtx
);
408 zap_allocate_blocks(zap_t
*zap
, int nblocks
)
410 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
411 uint64_t newblk
= zap_f_phys(zap
)->zap_freeblk
;
412 zap_f_phys(zap
)->zap_freeblk
+= nblocks
;
417 zap_leaf_evict_sync(void *dbu
)
421 rw_destroy(&l
->l_rwlock
);
422 kmem_free(l
, sizeof (zap_leaf_t
));
426 zap_create_leaf(zap_t
*zap
, dmu_tx_t
*tx
)
428 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
430 uint64_t blkid
= zap_allocate_blocks(zap
, 1);
431 dmu_buf_t
*db
= NULL
;
433 VERIFY0(dmu_buf_hold_by_dnode(zap
->zap_dnode
,
434 blkid
<< FZAP_BLOCK_SHIFT(zap
), NULL
, &db
,
435 DMU_READ_NO_PREFETCH
));
438 * Create the leaf structure and stash it on the dbuf. If zap was
439 * recent shrunk or truncated, the dbuf might have been sitting in the
440 * cache waiting to be evicted, and so still have the old leaf attached
441 * to it. If so, just reuse it.
443 zap_leaf_t
*l
= dmu_buf_get_user(db
);
445 l
= kmem_zalloc(sizeof (zap_leaf_t
), KM_SLEEP
);
448 rw_init(&l
->l_rwlock
, NULL
, RW_NOLOCKDEP
, NULL
);
449 dmu_buf_init_user(&l
->l_dbu
, zap_leaf_evict_sync
, NULL
,
451 dmu_buf_set_user(l
->l_dbuf
, &l
->l_dbu
);
453 ASSERT3U(l
->l_blkid
, ==, blkid
);
454 ASSERT3P(l
->l_dbuf
, ==, db
);
457 rw_enter(&l
->l_rwlock
, RW_WRITER
);
458 dmu_buf_will_dirty(l
->l_dbuf
, tx
);
460 zap_leaf_init(l
, zap
->zap_normflags
!= 0);
462 zap_f_phys(zap
)->zap_num_leafs
++;
468 fzap_count(zap_t
*zap
, uint64_t *count
)
470 ASSERT(!zap
->zap_ismicro
);
471 mutex_enter(&zap
->zap_f
.zap_num_entries_mtx
); /* unnecessary */
472 *count
= zap_f_phys(zap
)->zap_num_entries
;
473 mutex_exit(&zap
->zap_f
.zap_num_entries_mtx
);
478 * Routines for obtaining zap_leaf_t's
482 zap_put_leaf(zap_leaf_t
*l
)
484 rw_exit(&l
->l_rwlock
);
485 dmu_buf_rele(l
->l_dbuf
, NULL
);
489 zap_open_leaf(uint64_t blkid
, dmu_buf_t
*db
)
493 zap_leaf_t
*l
= kmem_zalloc(sizeof (zap_leaf_t
), KM_SLEEP
);
494 rw_init(&l
->l_rwlock
, NULL
, RW_DEFAULT
, NULL
);
495 rw_enter(&l
->l_rwlock
, RW_WRITER
);
497 l
->l_bs
= highbit64(db
->db_size
) - 1;
500 dmu_buf_init_user(&l
->l_dbu
, zap_leaf_evict_sync
, NULL
, &l
->l_dbuf
);
501 zap_leaf_t
*winner
= dmu_buf_set_user(db
, &l
->l_dbu
);
503 rw_exit(&l
->l_rwlock
);
504 if (winner
!= NULL
) {
505 /* someone else set it first */
506 zap_leaf_evict_sync(&l
->l_dbu
);
511 * lhr_pad was previously used for the next leaf in the leaf
512 * chain. There should be no chained leafs (as we have removed
515 ASSERT0(zap_leaf_phys(l
)->l_hdr
.lh_pad1
);
518 * There should be more hash entries than there can be
519 * chunks to put in the hash table
521 ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l
), >, ZAP_LEAF_NUMCHUNKS(l
) / 3);
523 /* The chunks should begin at the end of the hash table */
524 ASSERT3P(&ZAP_LEAF_CHUNK(l
, 0), ==, (zap_leaf_chunk_t
*)
525 &zap_leaf_phys(l
)->l_hash
[ZAP_LEAF_HASH_NUMENTRIES(l
)]);
527 /* The chunks should end at the end of the block */
528 ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l
, ZAP_LEAF_NUMCHUNKS(l
)) -
529 (uintptr_t)zap_leaf_phys(l
), ==, l
->l_dbuf
->db_size
);
535 zap_get_leaf_byblk(zap_t
*zap
, uint64_t blkid
, dmu_tx_t
*tx
, krw_t lt
,
540 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
543 * If system crashed just after dmu_free_long_range in zfs_rmnode, we
544 * would be left with an empty xattr dir in delete queue. blkid=0
545 * would be passed in when doing zfs_purgedir. If that's the case we
546 * should just return immediately. The underlying objects should
547 * already be freed, so this should be perfectly fine.
550 return (SET_ERROR(ENOENT
));
552 int bs
= FZAP_BLOCK_SHIFT(zap
);
553 int err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
554 blkid
<< bs
, NULL
, &db
, DMU_READ_NO_PREFETCH
);
558 ASSERT3U(db
->db_object
, ==, zap
->zap_object
);
559 ASSERT3U(db
->db_offset
, ==, blkid
<< bs
);
560 ASSERT3U(db
->db_size
, ==, 1 << bs
);
563 zap_leaf_t
*l
= dmu_buf_get_user(db
);
566 l
= zap_open_leaf(blkid
, db
);
568 rw_enter(&l
->l_rwlock
, lt
);
570 * Must lock before dirtying, otherwise zap_leaf_phys(l) could change,
571 * causing ASSERT below to fail.
574 dmu_buf_will_dirty(db
, tx
);
575 ASSERT3U(l
->l_blkid
, ==, blkid
);
576 ASSERT3P(l
->l_dbuf
, ==, db
);
577 ASSERT3U(zap_leaf_phys(l
)->l_hdr
.lh_block_type
, ==, ZBT_LEAF
);
578 ASSERT3U(zap_leaf_phys(l
)->l_hdr
.lh_magic
, ==, ZAP_LEAF_MAGIC
);
585 zap_idx_to_blk(zap_t
*zap
, uint64_t idx
, uint64_t *valp
)
587 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
589 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
== 0) {
591 (1ULL << zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
));
592 *valp
= ZAP_EMBEDDED_PTRTBL_ENT(zap
, idx
);
595 return (zap_table_load(zap
, &zap_f_phys(zap
)->zap_ptrtbl
,
601 zap_set_idx_to_blk(zap_t
*zap
, uint64_t idx
, uint64_t blk
, dmu_tx_t
*tx
)
604 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
606 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
== 0) {
607 ZAP_EMBEDDED_PTRTBL_ENT(zap
, idx
) = blk
;
610 return (zap_table_store(zap
, &zap_f_phys(zap
)->zap_ptrtbl
,
616 zap_set_idx_range_to_blk(zap_t
*zap
, uint64_t idx
, uint64_t nptrs
, uint64_t blk
,
619 int bs
= FZAP_BLOCK_SHIFT(zap
);
620 int epb
= bs
>> 3; /* entries per block */
624 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
627 * Check for i/o errors
629 for (int i
= 0; i
< nptrs
; i
+= epb
) {
631 err
= zap_idx_to_blk(zap
, idx
+ i
, &blk
);
637 for (int i
= 0; i
< nptrs
; i
++) {
638 err
= zap_set_idx_to_blk(zap
, idx
+ i
, blk
, tx
);
639 ASSERT0(err
); /* we checked for i/o errors above */
647 #define ZAP_PREFIX_HASH(pref, pref_len) ((pref) << (64 - (pref_len)))
650 * Each leaf has single range of entries (block pointers) in the ZAP ptrtbl.
651 * If two leaves are siblings, their ranges are adjecent and contain the same
652 * number of entries. In order to find out if a leaf has a sibling, we need to
653 * check the range corresponding to the sibling leaf. There is no need to check
654 * all entries in the range, we only need to check the frist and the last one.
657 check_sibling_ptrtbl_range(zap_t
*zap
, uint64_t prefix
, uint64_t prefix_len
)
659 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
661 uint64_t h
= ZAP_PREFIX_HASH(prefix
, prefix_len
);
662 uint64_t idx
= ZAP_HASH_IDX(h
, zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
663 uint64_t pref_diff
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
- prefix_len
;
664 uint64_t nptrs
= (1 << pref_diff
);
668 ASSERT3U(idx
+nptrs
, <=, (1UL << zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
));
670 if (zap_idx_to_blk(zap
, idx
, &first
) != 0)
673 if (zap_idx_to_blk(zap
, idx
+ nptrs
- 1, &last
) != 0)
682 zap_deref_leaf(zap_t
*zap
, uint64_t h
, dmu_tx_t
*tx
, krw_t lt
, zap_leaf_t
**lp
)
686 ASSERT(zap
->zap_dbuf
== NULL
||
687 zap_f_phys(zap
) == zap
->zap_dbuf
->db_data
);
689 /* Reality check for corrupt zap objects (leaf or header). */
690 if ((zap_f_phys(zap
)->zap_block_type
!= ZBT_LEAF
&&
691 zap_f_phys(zap
)->zap_block_type
!= ZBT_HEADER
) ||
692 zap_f_phys(zap
)->zap_magic
!= ZAP_MAGIC
) {
693 return (SET_ERROR(EIO
));
696 uint64_t idx
= ZAP_HASH_IDX(h
, zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
697 int err
= zap_idx_to_blk(zap
, idx
, &blk
);
700 err
= zap_get_leaf_byblk(zap
, blk
, tx
, lt
, lp
);
703 ZAP_HASH_IDX(h
, zap_leaf_phys(*lp
)->l_hdr
.lh_prefix_len
) ==
704 zap_leaf_phys(*lp
)->l_hdr
.lh_prefix
);
709 zap_expand_leaf(zap_name_t
*zn
, zap_leaf_t
*l
,
710 const void *tag
, dmu_tx_t
*tx
, zap_leaf_t
**lp
)
712 zap_t
*zap
= zn
->zn_zap
;
713 uint64_t hash
= zn
->zn_hash
;
715 int old_prefix_len
= zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
;
717 ASSERT3U(old_prefix_len
, <=, zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
718 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
720 ASSERT3U(ZAP_HASH_IDX(hash
, old_prefix_len
), ==,
721 zap_leaf_phys(l
)->l_hdr
.lh_prefix
);
723 if (zap_tryupgradedir(zap
, tx
) == 0 ||
724 old_prefix_len
== zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
) {
725 /* We failed to upgrade, or need to grow the pointer table */
726 objset_t
*os
= zap
->zap_objset
;
727 uint64_t object
= zap
->zap_object
;
731 zap_unlockdir(zap
, tag
);
732 err
= zap_lockdir(os
, object
, tx
, RW_WRITER
,
733 FALSE
, FALSE
, tag
, &zn
->zn_zap
);
737 ASSERT(!zap
->zap_ismicro
);
739 while (old_prefix_len
==
740 zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
) {
741 err
= zap_grow_ptrtbl(zap
, tx
);
746 err
= zap_deref_leaf(zap
, hash
, tx
, RW_WRITER
, &l
);
750 if (zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
!= old_prefix_len
) {
751 /* it split while our locks were down */
756 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
757 ASSERT3U(old_prefix_len
, <, zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
758 ASSERT3U(ZAP_HASH_IDX(hash
, old_prefix_len
), ==,
759 zap_leaf_phys(l
)->l_hdr
.lh_prefix
);
761 int prefix_diff
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
-
762 (old_prefix_len
+ 1);
764 (ZAP_HASH_IDX(hash
, old_prefix_len
+ 1) | 1) << prefix_diff
;
766 /* check for i/o errors before doing zap_leaf_split */
767 for (int i
= 0; i
< (1ULL << prefix_diff
); i
++) {
769 err
= zap_idx_to_blk(zap
, sibling
+ i
, &blk
);
772 ASSERT3U(blk
, ==, l
->l_blkid
);
775 zap_leaf_t
*nl
= zap_create_leaf(zap
, tx
);
776 zap_leaf_split(l
, nl
, zap
->zap_normflags
!= 0);
778 /* set sibling pointers */
779 for (int i
= 0; i
< (1ULL << prefix_diff
); i
++) {
780 err
= zap_set_idx_to_blk(zap
, sibling
+ i
, nl
->l_blkid
, tx
);
781 ASSERT0(err
); /* we checked for i/o errors above */
784 ASSERT3U(zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
, >, 0);
786 if (hash
& (1ULL << (64 - zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
))) {
787 /* we want the sibling */
799 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t
*zn
, zap_leaf_t
*l
,
800 const void *tag
, dmu_tx_t
*tx
)
802 zap_t
*zap
= zn
->zn_zap
;
803 int shift
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
;
804 int leaffull
= (zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
== shift
&&
805 zap_leaf_phys(l
)->l_hdr
.lh_nfree
< ZAP_LEAF_LOW_WATER
);
809 if (leaffull
|| zap_f_phys(zap
)->zap_ptrtbl
.zt_nextblk
) {
811 * We are in the middle of growing the pointer table, or
812 * this leaf will soon make us grow it.
814 if (zap_tryupgradedir(zap
, tx
) == 0) {
815 objset_t
*os
= zap
->zap_objset
;
816 uint64_t zapobj
= zap
->zap_object
;
818 zap_unlockdir(zap
, tag
);
819 int err
= zap_lockdir(os
, zapobj
, tx
,
820 RW_WRITER
, FALSE
, FALSE
, tag
, &zn
->zn_zap
);
826 /* could have finished growing while our locks were down */
827 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
== shift
)
828 (void) zap_grow_ptrtbl(zap
, tx
);
833 fzap_checkname(zap_name_t
*zn
)
835 uint32_t maxnamelen
= zn
->zn_normbuf_len
;
836 uint64_t len
= (uint64_t)zn
->zn_key_orig_numints
* zn
->zn_key_intlen
;
837 /* Only allow directory zap to have longname */
838 if (len
> maxnamelen
||
839 (len
> ZAP_MAXNAMELEN
&&
840 zn
->zn_zap
->zap_dnode
->dn_type
!= DMU_OT_DIRECTORY_CONTENTS
))
841 return (SET_ERROR(ENAMETOOLONG
));
846 fzap_checksize(uint64_t integer_size
, uint64_t num_integers
)
848 /* Only integer sizes supported by C */
849 switch (integer_size
) {
856 return (SET_ERROR(EINVAL
));
859 if (integer_size
* num_integers
> ZAP_MAXVALUELEN
)
860 return (SET_ERROR(E2BIG
));
866 fzap_check(zap_name_t
*zn
, uint64_t integer_size
, uint64_t num_integers
)
868 int err
= fzap_checkname(zn
);
871 return (fzap_checksize(integer_size
, num_integers
));
875 * Routines for manipulating attributes.
878 fzap_lookup(zap_name_t
*zn
,
879 uint64_t integer_size
, uint64_t num_integers
, void *buf
,
880 char *realname
, int rn_len
, boolean_t
*ncp
)
883 zap_entry_handle_t zeh
;
885 int err
= fzap_checkname(zn
);
889 err
= zap_deref_leaf(zn
->zn_zap
, zn
->zn_hash
, NULL
, RW_READER
, &l
);
892 err
= zap_leaf_lookup(l
, zn
, &zeh
);
894 if ((err
= fzap_checksize(integer_size
, num_integers
)) != 0) {
899 err
= zap_entry_read(&zeh
, integer_size
, num_integers
, buf
);
900 (void) zap_entry_read_name(zn
->zn_zap
, &zeh
, rn_len
, realname
);
902 *ncp
= zap_entry_normalization_conflict(&zeh
,
903 zn
, NULL
, zn
->zn_zap
);
912 fzap_add_cd(zap_name_t
*zn
,
913 uint64_t integer_size
, uint64_t num_integers
,
914 const void *val
, uint32_t cd
, const void *tag
, dmu_tx_t
*tx
)
918 zap_entry_handle_t zeh
;
919 zap_t
*zap
= zn
->zn_zap
;
921 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
922 ASSERT(!zap
->zap_ismicro
);
923 ASSERT(fzap_check(zn
, integer_size
, num_integers
) == 0);
925 err
= zap_deref_leaf(zap
, zn
->zn_hash
, tx
, RW_WRITER
, &l
);
929 err
= zap_leaf_lookup(l
, zn
, &zeh
);
931 err
= SET_ERROR(EEXIST
);
937 err
= zap_entry_create(l
, zn
, cd
,
938 integer_size
, num_integers
, val
, &zeh
);
941 zap_increment_num_entries(zap
, 1, tx
);
942 } else if (err
== EAGAIN
) {
943 err
= zap_expand_leaf(zn
, l
, tag
, tx
, &l
);
944 zap
= zn
->zn_zap
; /* zap_expand_leaf() may change zap */
954 zap_put_leaf_maybe_grow_ptrtbl(zn
, l
, tag
, tx
);
960 fzap_add(zap_name_t
*zn
,
961 uint64_t integer_size
, uint64_t num_integers
,
962 const void *val
, const void *tag
, dmu_tx_t
*tx
)
964 int err
= fzap_check(zn
, integer_size
, num_integers
);
968 return (fzap_add_cd(zn
, integer_size
, num_integers
,
969 val
, ZAP_NEED_CD
, tag
, tx
));
973 fzap_update(zap_name_t
*zn
,
974 int integer_size
, uint64_t num_integers
, const void *val
,
975 const void *tag
, dmu_tx_t
*tx
)
980 zap_entry_handle_t zeh
;
981 zap_t
*zap
= zn
->zn_zap
;
983 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
984 err
= fzap_check(zn
, integer_size
, num_integers
);
988 err
= zap_deref_leaf(zap
, zn
->zn_hash
, tx
, RW_WRITER
, &l
);
992 err
= zap_leaf_lookup(l
, zn
, &zeh
);
993 create
= (err
== ENOENT
);
994 ASSERT(err
== 0 || err
== ENOENT
);
997 err
= zap_entry_create(l
, zn
, ZAP_NEED_CD
,
998 integer_size
, num_integers
, val
, &zeh
);
1000 zap_increment_num_entries(zap
, 1, tx
);
1002 err
= zap_entry_update(&zeh
, integer_size
, num_integers
, val
);
1005 if (err
== EAGAIN
) {
1006 err
= zap_expand_leaf(zn
, l
, tag
, tx
, &l
);
1007 zap
= zn
->zn_zap
; /* zap_expand_leaf() may change zap */
1016 zap_put_leaf_maybe_grow_ptrtbl(zn
, l
, tag
, tx
);
1022 fzap_length(zap_name_t
*zn
,
1023 uint64_t *integer_size
, uint64_t *num_integers
)
1027 zap_entry_handle_t zeh
;
1029 err
= zap_deref_leaf(zn
->zn_zap
, zn
->zn_hash
, NULL
, RW_READER
, &l
);
1032 err
= zap_leaf_lookup(l
, zn
, &zeh
);
1036 if (integer_size
!= NULL
)
1037 *integer_size
= zeh
.zeh_integer_size
;
1038 if (num_integers
!= NULL
)
1039 *num_integers
= zeh
.zeh_num_integers
;
1046 fzap_remove(zap_name_t
*zn
, dmu_tx_t
*tx
)
1050 zap_entry_handle_t zeh
;
1052 err
= zap_deref_leaf(zn
->zn_zap
, zn
->zn_hash
, tx
, RW_WRITER
, &l
);
1055 err
= zap_leaf_lookup(l
, zn
, &zeh
);
1057 zap_entry_remove(&zeh
);
1058 zap_increment_num_entries(zn
->zn_zap
, -1, tx
);
1060 if (zap_leaf_phys(l
)->l_hdr
.lh_nentries
== 0 &&
1062 return (zap_shrink(zn
, l
, tx
));
1069 fzap_prefetch(zap_name_t
*zn
)
1072 zap_t
*zap
= zn
->zn_zap
;
1074 uint64_t idx
= ZAP_HASH_IDX(zn
->zn_hash
,
1075 zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
1076 if (zap_idx_to_blk(zap
, idx
, &blk
) != 0)
1078 int bs
= FZAP_BLOCK_SHIFT(zap
);
1079 dmu_prefetch_by_dnode(zap
->zap_dnode
, 0, blk
<< bs
, 1 << bs
,
1080 ZIO_PRIORITY_SYNC_READ
);
1084 * Helper functions for consumers.
1088 zap_create_link(objset_t
*os
, dmu_object_type_t ot
, uint64_t parent_obj
,
1089 const char *name
, dmu_tx_t
*tx
)
1091 return (zap_create_link_dnsize(os
, ot
, parent_obj
, name
, 0, tx
));
1095 zap_create_link_dnsize(objset_t
*os
, dmu_object_type_t ot
, uint64_t parent_obj
,
1096 const char *name
, int dnodesize
, dmu_tx_t
*tx
)
1100 new_obj
= zap_create_dnsize(os
, ot
, DMU_OT_NONE
, 0, dnodesize
, tx
);
1101 VERIFY(new_obj
!= 0);
1102 VERIFY0(zap_add(os
, parent_obj
, name
, sizeof (uint64_t), 1, &new_obj
,
1109 zap_value_search(objset_t
*os
, uint64_t zapobj
, uint64_t value
, uint64_t mask
,
1110 char *name
, uint64_t namelen
)
1118 zap_attribute_t
*za
= zap_attribute_long_alloc();
1119 for (zap_cursor_init(&zc
, os
, zapobj
);
1120 (err
= zap_cursor_retrieve(&zc
, za
)) == 0;
1121 zap_cursor_advance(&zc
)) {
1122 if ((za
->za_first_integer
& mask
) == (value
& mask
)) {
1123 if (strlcpy(name
, za
->za_name
, namelen
) >= namelen
)
1124 err
= SET_ERROR(ENAMETOOLONG
);
1128 zap_cursor_fini(&zc
);
1129 zap_attribute_free(za
);
1134 zap_join(objset_t
*os
, uint64_t fromobj
, uint64_t intoobj
, dmu_tx_t
*tx
)
1139 zap_attribute_t
*za
= zap_attribute_long_alloc();
1140 for (zap_cursor_init(&zc
, os
, fromobj
);
1141 zap_cursor_retrieve(&zc
, za
) == 0;
1142 (void) zap_cursor_advance(&zc
)) {
1143 if (za
->za_integer_length
!= 8 || za
->za_num_integers
!= 1) {
1144 err
= SET_ERROR(EINVAL
);
1147 err
= zap_add(os
, intoobj
, za
->za_name
,
1148 8, 1, &za
->za_first_integer
, tx
);
1152 zap_cursor_fini(&zc
);
1153 zap_attribute_free(za
);
1158 zap_join_key(objset_t
*os
, uint64_t fromobj
, uint64_t intoobj
,
1159 uint64_t value
, dmu_tx_t
*tx
)
1164 zap_attribute_t
*za
= zap_attribute_long_alloc();
1165 for (zap_cursor_init(&zc
, os
, fromobj
);
1166 zap_cursor_retrieve(&zc
, za
) == 0;
1167 (void) zap_cursor_advance(&zc
)) {
1168 if (za
->za_integer_length
!= 8 || za
->za_num_integers
!= 1) {
1169 err
= SET_ERROR(EINVAL
);
1172 err
= zap_add(os
, intoobj
, za
->za_name
,
1177 zap_cursor_fini(&zc
);
1178 zap_attribute_free(za
);
1183 zap_join_increment(objset_t
*os
, uint64_t fromobj
, uint64_t intoobj
,
1189 zap_attribute_t
*za
= zap_attribute_long_alloc();
1190 for (zap_cursor_init(&zc
, os
, fromobj
);
1191 zap_cursor_retrieve(&zc
, za
) == 0;
1192 (void) zap_cursor_advance(&zc
)) {
1195 if (za
->za_integer_length
!= 8 || za
->za_num_integers
!= 1) {
1196 err
= SET_ERROR(EINVAL
);
1200 err
= zap_lookup(os
, intoobj
, za
->za_name
, 8, 1, &delta
);
1201 if (err
!= 0 && err
!= ENOENT
)
1203 delta
+= za
->za_first_integer
;
1204 err
= zap_update(os
, intoobj
, za
->za_name
, 8, 1, &delta
, tx
);
1208 zap_cursor_fini(&zc
);
1209 zap_attribute_free(za
);
1214 zap_add_int(objset_t
*os
, uint64_t obj
, uint64_t value
, dmu_tx_t
*tx
)
1218 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)value
);
1219 return (zap_add(os
, obj
, name
, 8, 1, &value
, tx
));
1223 zap_remove_int(objset_t
*os
, uint64_t obj
, uint64_t value
, dmu_tx_t
*tx
)
1227 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)value
);
1228 return (zap_remove(os
, obj
, name
, tx
));
1232 zap_lookup_int(objset_t
*os
, uint64_t obj
, uint64_t value
)
1236 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)value
);
1237 return (zap_lookup(os
, obj
, name
, 8, 1, &value
));
1241 zap_add_int_key(objset_t
*os
, uint64_t obj
,
1242 uint64_t key
, uint64_t value
, dmu_tx_t
*tx
)
1246 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)key
);
1247 return (zap_add(os
, obj
, name
, 8, 1, &value
, tx
));
1251 zap_update_int_key(objset_t
*os
, uint64_t obj
,
1252 uint64_t key
, uint64_t value
, dmu_tx_t
*tx
)
1256 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)key
);
1257 return (zap_update(os
, obj
, name
, 8, 1, &value
, tx
));
1261 zap_lookup_int_key(objset_t
*os
, uint64_t obj
, uint64_t key
, uint64_t *valuep
)
1265 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)key
);
1266 return (zap_lookup(os
, obj
, name
, 8, 1, valuep
));
1270 zap_increment(objset_t
*os
, uint64_t obj
, const char *name
, int64_t delta
,
1278 int err
= zap_lookup(os
, obj
, name
, 8, 1, &value
);
1279 if (err
!= 0 && err
!= ENOENT
)
1283 err
= zap_remove(os
, obj
, name
, tx
);
1285 err
= zap_update(os
, obj
, name
, 8, 1, &value
, tx
);
1290 zap_increment_int(objset_t
*os
, uint64_t obj
, uint64_t key
, int64_t delta
,
1295 (void) snprintf(name
, sizeof (name
), "%llx", (longlong_t
)key
);
1296 return (zap_increment(os
, obj
, name
, delta
, tx
));
1300 * Routines for iterating over the attributes.
1304 fzap_cursor_retrieve(zap_t
*zap
, zap_cursor_t
*zc
, zap_attribute_t
*za
)
1307 zap_entry_handle_t zeh
;
1310 /* retrieve the next entry at or after zc_hash/zc_cd */
1311 /* if no entry, return ENOENT */
1314 * If we are reading from the beginning, we're almost certain to
1315 * iterate over the entire ZAP object. If there are multiple leaf
1316 * blocks (freeblk > 2), prefetch the whole object (up to
1317 * dmu_prefetch_max bytes), so that we read the leaf blocks
1318 * concurrently. (Unless noprefetch was requested via
1319 * zap_cursor_init_noprefetch()).
1321 if (zc
->zc_hash
== 0 && zap_iterate_prefetch
&&
1322 zc
->zc_prefetch
&& zap_f_phys(zap
)->zap_freeblk
> 2) {
1323 dmu_prefetch_by_dnode(zap
->zap_dnode
, 0, 0,
1324 zap_f_phys(zap
)->zap_freeblk
<< FZAP_BLOCK_SHIFT(zap
),
1325 ZIO_PRIORITY_ASYNC_READ
);
1329 rw_enter(&zc
->zc_leaf
->l_rwlock
, RW_READER
);
1332 * The leaf was either shrunk or split.
1334 if ((zap_leaf_phys(zc
->zc_leaf
)->l_hdr
.lh_block_type
== 0) ||
1335 (ZAP_HASH_IDX(zc
->zc_hash
,
1336 zap_leaf_phys(zc
->zc_leaf
)->l_hdr
.lh_prefix_len
) !=
1337 zap_leaf_phys(zc
->zc_leaf
)->l_hdr
.lh_prefix
)) {
1338 zap_put_leaf(zc
->zc_leaf
);
1344 if (zc
->zc_leaf
== NULL
) {
1345 err
= zap_deref_leaf(zap
, zc
->zc_hash
, NULL
, RW_READER
,
1352 err
= zap_leaf_lookup_closest(l
, zc
->zc_hash
, zc
->zc_cd
, &zeh
);
1354 if (err
== ENOENT
) {
1355 if (zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
== 0) {
1356 zc
->zc_hash
= -1ULL;
1359 uint64_t nocare
= (1ULL <<
1360 (64 - zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
)) - 1;
1362 zc
->zc_hash
= (zc
->zc_hash
& ~nocare
) + nocare
+ 1;
1365 if (zc
->zc_hash
== 0) {
1366 zc
->zc_hash
= -1ULL;
1368 zap_put_leaf(zc
->zc_leaf
);
1376 zc
->zc_hash
= zeh
.zeh_hash
;
1377 zc
->zc_cd
= zeh
.zeh_cd
;
1378 za
->za_integer_length
= zeh
.zeh_integer_size
;
1379 za
->za_num_integers
= zeh
.zeh_num_integers
;
1380 if (zeh
.zeh_num_integers
== 0) {
1381 za
->za_first_integer
= 0;
1383 err
= zap_entry_read(&zeh
, 8, 1, &za
->za_first_integer
);
1384 ASSERT(err
== 0 || err
== EOVERFLOW
);
1386 err
= zap_entry_read_name(zap
, &zeh
,
1387 za
->za_name_len
, za
->za_name
);
1390 za
->za_normalization_conflict
=
1391 zap_entry_normalization_conflict(&zeh
,
1392 NULL
, za
->za_name
, zap
);
1394 rw_exit(&zc
->zc_leaf
->l_rwlock
);
1399 zap_stats_ptrtbl(zap_t
*zap
, uint64_t *tbl
, int len
, zap_stats_t
*zs
)
1401 uint64_t lastblk
= 0;
1404 * NB: if a leaf has more pointers than an entire ptrtbl block
1405 * can hold, then it'll be accounted for more than once, since
1406 * we won't have lastblk.
1408 for (int i
= 0; i
< len
; i
++) {
1411 if (tbl
[i
] == lastblk
)
1415 int err
= zap_get_leaf_byblk(zap
, tbl
[i
], NULL
, RW_READER
, &l
);
1417 zap_leaf_stats(zap
, l
, zs
);
1424 fzap_get_stats(zap_t
*zap
, zap_stats_t
*zs
)
1426 int bs
= FZAP_BLOCK_SHIFT(zap
);
1427 zs
->zs_blocksize
= 1ULL << bs
;
1430 * Set zap_phys_t fields
1432 zs
->zs_num_leafs
= zap_f_phys(zap
)->zap_num_leafs
;
1433 zs
->zs_num_entries
= zap_f_phys(zap
)->zap_num_entries
;
1434 zs
->zs_num_blocks
= zap_f_phys(zap
)->zap_freeblk
;
1435 zs
->zs_block_type
= zap_f_phys(zap
)->zap_block_type
;
1436 zs
->zs_magic
= zap_f_phys(zap
)->zap_magic
;
1437 zs
->zs_salt
= zap_f_phys(zap
)->zap_salt
;
1440 * Set zap_ptrtbl fields
1442 zs
->zs_ptrtbl_len
= 1ULL << zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
;
1443 zs
->zs_ptrtbl_nextblk
= zap_f_phys(zap
)->zap_ptrtbl
.zt_nextblk
;
1444 zs
->zs_ptrtbl_blks_copied
=
1445 zap_f_phys(zap
)->zap_ptrtbl
.zt_blks_copied
;
1446 zs
->zs_ptrtbl_zt_blk
= zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
;
1447 zs
->zs_ptrtbl_zt_numblks
= zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
;
1448 zs
->zs_ptrtbl_zt_shift
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
;
1450 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
== 0) {
1451 /* the ptrtbl is entirely in the header block. */
1452 zap_stats_ptrtbl(zap
, &ZAP_EMBEDDED_PTRTBL_ENT(zap
, 0),
1453 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap
), zs
);
1455 dmu_prefetch_by_dnode(zap
->zap_dnode
, 0,
1456 zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
<< bs
,
1457 zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
<< bs
,
1458 ZIO_PRIORITY_SYNC_READ
);
1460 for (int b
= 0; b
< zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
;
1465 err
= dmu_buf_hold_by_dnode(zap
->zap_dnode
,
1466 (zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
+ b
) << bs
,
1467 FTAG
, &db
, DMU_READ_NO_PREFETCH
);
1469 zap_stats_ptrtbl(zap
, db
->db_data
,
1471 dmu_buf_rele(db
, FTAG
);
1478 * Find last allocated block and update freeblk.
1481 zap_trunc(zap_t
*zap
)
1486 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
1488 if (zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
> 0) {
1489 /* External ptrtbl */
1490 nentries
= (1 << zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
1491 lastblk
= zap_f_phys(zap
)->zap_ptrtbl
.zt_blk
+
1492 zap_f_phys(zap
)->zap_ptrtbl
.zt_numblks
- 1;
1494 /* Embedded ptrtbl */
1495 nentries
= (1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap
));
1499 for (uint64_t idx
= 0; idx
< nentries
; idx
++) {
1501 if (zap_idx_to_blk(zap
, idx
, &blk
) != 0)
1507 ASSERT3U(lastblk
, <, zap_f_phys(zap
)->zap_freeblk
);
1509 zap_f_phys(zap
)->zap_freeblk
= lastblk
+ 1;
1513 * ZAP shrinking algorithm.
1515 * We shrink ZAP recuresively removing empty leaves. We can remove an empty leaf
1516 * only if it has a sibling. Sibling leaves have the same prefix length and
1517 * their prefixes differ only by the least significant (sibling) bit. We require
1518 * both siblings to be empty. This eliminates a need to rehash the non-empty
1519 * remaining leaf. When we have removed one of two empty sibling, we set ptrtbl
1520 * entries of the removed leaf to point out to the remaining leaf. Prefix length
1521 * of the remaining leaf is decremented. As a result, it has a new prefix and it
1522 * might have a new sibling. So, we repeat the process.
1525 * 1. Check if a sibling leaf (sl) exists and it is empty.
1526 * 2. Release the leaf (l) if it has the sibling bit (slbit) equal to 1.
1527 * 3. Release the sibling (sl) to derefer it again with WRITER lock.
1528 * 4. Upgrade zapdir lock to WRITER (once).
1529 * 5. Derefer released leaves again.
1530 * 6. If it is needed, recheck whether both leaves are still siblings and empty.
1531 * 7. Set ptrtbl pointers of the removed leaf (slbit 1) to point out to blkid of
1532 * the remaining leaf (slbit 0).
1533 * 8. Free disk block of the removed leaf (dmu_free_range).
1534 * 9. Decrement prefix_len of the remaining leaf.
1535 * 10. Repeat the steps.
1538 zap_shrink(zap_name_t
*zn
, zap_leaf_t
*l
, dmu_tx_t
*tx
)
1540 zap_t
*zap
= zn
->zn_zap
;
1541 int64_t zt_shift
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
;
1542 uint64_t hash
= zn
->zn_hash
;
1543 uint64_t prefix
= zap_leaf_phys(l
)->l_hdr
.lh_prefix
;
1544 uint64_t prefix_len
= zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
;
1545 boolean_t trunc
= B_FALSE
;
1548 ASSERT3U(zap_leaf_phys(l
)->l_hdr
.lh_nentries
, ==, 0);
1549 ASSERT3U(prefix_len
, <=, zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
);
1550 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
1551 ASSERT3U(ZAP_HASH_IDX(hash
, prefix_len
), ==, prefix
);
1553 boolean_t writer
= B_FALSE
;
1556 * To avoid deadlock always deref leaves in the same order -
1557 * sibling 0 first, then sibling 1.
1559 while (prefix_len
) {
1561 int64_t prefix_diff
= zt_shift
- prefix_len
;
1562 uint64_t sl_prefix
= prefix
^ 1;
1563 uint64_t sl_hash
= ZAP_PREFIX_HASH(sl_prefix
, prefix_len
);
1564 int slbit
= prefix
& 1;
1566 ASSERT3U(zap_leaf_phys(l
)->l_hdr
.lh_nentries
, ==, 0);
1569 * Check if there is a sibling by reading ptrtbl ptrs.
1571 if (check_sibling_ptrtbl_range(zap
, sl_prefix
, prefix_len
) == 0)
1575 * sibling 1, unlock it - we haven't yet dereferenced sibling 0.
1583 * Dereference sibling leaf and check if it is empty.
1585 if ((err
= zap_deref_leaf(zap
, sl_hash
, tx
, RW_READER
,
1589 ASSERT3U(ZAP_HASH_IDX(sl_hash
, prefix_len
), ==, sl_prefix
);
1592 * Check if we have a sibling and it is empty.
1594 if (zap_leaf_phys(sl
)->l_hdr
.lh_prefix_len
!= prefix_len
||
1595 zap_leaf_phys(sl
)->l_hdr
.lh_nentries
!= 0) {
1603 * If there two empty sibling, we have work to do, so
1604 * we need to lock ZAP ptrtbl as WRITER.
1606 if (!writer
&& (writer
= zap_tryupgradedir(zap
, tx
)) == 0) {
1607 /* We failed to upgrade */
1614 * Usually, the right way to upgrade from a READER lock
1615 * to a WRITER lock is to call zap_unlockdir() and
1616 * zap_lockdir(), but we do not have a tag. Instead,
1617 * we do it in more sophisticated way.
1619 rw_exit(&zap
->zap_rwlock
);
1620 rw_enter(&zap
->zap_rwlock
, RW_WRITER
);
1621 dmu_buf_will_dirty(zap
->zap_dbuf
, tx
);
1623 zt_shift
= zap_f_phys(zap
)->zap_ptrtbl
.zt_shift
;
1628 * Here we have WRITER lock for ptrtbl.
1629 * Now, we need a WRITER lock for both siblings leaves.
1630 * Also, we have to recheck if the leaves are still siblings
1635 if ((err
= zap_deref_leaf(zap
, (slbit
? sl_hash
: hash
),
1636 tx
, RW_WRITER
, &l
)) != 0)
1640 * The leaf isn't empty anymore or
1641 * it was shrunk/split while our locks were down.
1643 if (zap_leaf_phys(l
)->l_hdr
.lh_nentries
!= 0 ||
1644 zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
!= prefix_len
)
1649 if ((err
= zap_deref_leaf(zap
, (slbit
? hash
: sl_hash
), tx
,
1650 RW_WRITER
, &sl
)) != 0)
1654 * The leaf isn't empty anymore or
1655 * it was shrunk/split while our locks were down.
1657 if (zap_leaf_phys(sl
)->l_hdr
.lh_nentries
!= 0 ||
1658 zap_leaf_phys(sl
)->l_hdr
.lh_prefix_len
!= prefix_len
) {
1663 /* If we have gotten here, we have a leaf to collapse */
1664 uint64_t idx
= (slbit
? prefix
: sl_prefix
) << prefix_diff
;
1665 uint64_t nptrs
= (1ULL << prefix_diff
);
1666 uint64_t sl_blkid
= sl
->l_blkid
;
1669 * Set ptrtbl entries to point out to the slibling 0 blkid
1671 if ((err
= zap_set_idx_range_to_blk(zap
, idx
, nptrs
, l
->l_blkid
,
1678 * Free sibling 1 disk block.
1680 int bs
= FZAP_BLOCK_SHIFT(zap
);
1681 if (sl_blkid
== zap_f_phys(zap
)->zap_freeblk
- 1)
1684 (void) dmu_free_range(zap
->zap_objset
, zap
->zap_object
,
1685 sl_blkid
<< bs
, 1 << bs
, tx
);
1688 zap_f_phys(zap
)->zap_num_leafs
--;
1691 * Update prefix and prefix_len.
1693 zap_leaf_phys(l
)->l_hdr
.lh_prefix
>>= 1;
1694 zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
--;
1696 prefix
= zap_leaf_phys(l
)->l_hdr
.lh_prefix
;
1697 prefix_len
= zap_leaf_phys(l
)->l_hdr
.lh_prefix_len
;
1710 ZFS_MODULE_PARAM(zfs
, , zap_iterate_prefetch
, INT
, ZMOD_RW
,
1711 "When iterating ZAP object, prefetch it");
1714 ZFS_MODULE_PARAM(zfs
, , zap_shrink_enabled
, INT
, ZMOD_RW
,
1715 "Enable ZAP shrinking");