BRT: More optimizations after per-vdev splitting
[zfs.git] / module / zfs / brt.c
blobc48527db7d17e29eae62a0a98b8f153ab9d8d17f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2020, 2021, 2022 by Pawel Jakub Dawidek
26 #include <sys/zfs_context.h>
27 #include <sys/spa.h>
28 #include <sys/spa_impl.h>
29 #include <sys/zio.h>
30 #include <sys/brt.h>
31 #include <sys/brt_impl.h>
32 #include <sys/ddt.h>
33 #include <sys/bitmap.h>
34 #include <sys/zap.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/arc.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/kstat.h>
41 #include <sys/wmsum.h>
44 * Block Cloning design.
46 * Block Cloning allows to manually clone a file (or a subset of its blocks)
47 * into another (or the same) file by just creating additional references to
48 * the data blocks without copying the data itself. Those references are kept
49 * in the Block Reference Tables (BRTs).
51 * In many ways this is similar to the existing deduplication, but there are
52 * some important differences:
54 * - Deduplication is automatic and Block Cloning is not - one has to use a
55 * dedicated system call(s) to clone the given file/blocks.
56 * - Deduplication keeps all data blocks in its table, even those referenced
57 * just once. Block Cloning creates an entry in its tables only when there
58 * are at least two references to the given data block. If the block was
59 * never explicitly cloned or the second to last reference was dropped,
60 * there will be neither space nor performance overhead.
61 * - Deduplication needs data to work - one needs to pass real data to the
62 * write(2) syscall, so hash can be calculated. Block Cloning doesn't require
63 * data, just block pointers to the data, so it is extremely fast, as we pay
64 * neither the cost of reading the data, nor the cost of writing the data -
65 * we operate exclusively on metadata.
66 * - If the D (dedup) bit is not set in the block pointer, it means that
67 * the block is not in the dedup table (DDT) and we won't consult the DDT
68 * when we need to free the block. Block Cloning must be consulted on every
69 * free, because we cannot modify the source BP (eg. by setting something
70 * similar to the D bit), thus we have no hint if the block is in the
71 * Block Reference Table (BRT), so we need to look into the BRT. There is
72 * an optimization in place that allows us to eliminate the majority of BRT
73 * lookups which is described below in the "Minimizing free penalty" section.
74 * - The BRT entry is much smaller than the DDT entry - for BRT we only store
75 * 64bit offset and 64bit reference counter.
76 * - Dedup keys are cryptographic hashes, so two blocks that are close to each
77 * other on disk are most likely in totally different parts of the DDT.
78 * The BRT entry keys are offsets into a single top-level VDEV, so data blocks
79 * from one file should have BRT entries close to each other.
80 * - Scrub will only do a single pass over a block that is referenced multiple
81 * times in the DDT. Unfortunately it is not currently (if at all) possible
82 * with Block Cloning and block referenced multiple times will be scrubbed
83 * multiple times. The new, sorted scrub should be able to eliminate
84 * duplicated reads given enough memory.
85 * - Deduplication requires cryptographically strong hash as a checksum or
86 * additional data verification. Block Cloning works with any checksum
87 * algorithm or even with checksumming disabled.
89 * As mentioned above, the BRT entries are much smaller than the DDT entries.
90 * To uniquely identify a block we just need its vdev id and offset. We also
91 * need to maintain a reference counter. The vdev id will often repeat, as there
92 * is a small number of top-level VDEVs and a large number of blocks stored in
93 * each VDEV. We take advantage of that to reduce the BRT entry size further by
94 * maintaining one BRT for each top-level VDEV, so we can then have only offset
95 * and counter as the BRT entry.
97 * Minimizing free penalty.
99 * Block Cloning allows creating additional references to any existing block.
100 * When we free a block there is no hint in the block pointer whether the block
101 * was cloned or not, so on each free we have to check if there is a
102 * corresponding entry in the BRT or not. If there is, we need to decrease
103 * the reference counter. Doing BRT lookup on every free can potentially be
104 * expensive by requiring additional I/Os if the BRT doesn't fit into memory.
105 * This is the main problem with deduplication, so we've learned our lesson and
106 * try not to repeat the same mistake here. How do we do that? We divide each
107 * top-level VDEV into 16MB regions. For each region we maintain a counter that
108 * is a sum of all the BRT entries that have offsets within the region. This
109 * creates the entries count array of 16bit numbers for each top-level VDEV.
110 * The entries count array is always kept in memory and updated on disk in the
111 * same transaction group as the BRT updates to keep everything in-sync. We can
112 * keep the array in memory, because it is very small. With 16MB regions and
113 * 1TB VDEV the array requires only 128kB of memory (we may decide to decrease
114 * the region size even further in the future). Now, when we want to free
115 * a block, we first consult the array. If the counter for the whole region is
116 * zero, there is no need to look for the BRT entry, as there isn't one for
117 * sure. If the counter for the region is greater than zero, only then we will
118 * do a BRT lookup and if an entry is found we will decrease the reference
119 * counter in the BRT entry and in the entry counters array.
121 * The entry counters array is small, but can potentially be larger for very
122 * large VDEVs or smaller regions. In this case we don't want to rewrite entire
123 * array on every change. We then divide the array into 32kB block and keep
124 * a bitmap of dirty blocks within a transaction group. When we sync the
125 * transaction group we can only update the parts of the entry counters array
126 * that were modified. Note: Keeping track of the dirty parts of the entry
127 * counters array is implemented, but updating only parts of the array on disk
128 * is not yet implemented - for now we will update entire array if there was
129 * any change.
131 * The implementation tries to be economic: if BRT is not used, or no longer
132 * used, there will be no entries in the MOS and no additional memory used (eg.
133 * the entry counters array is only allocated if needed).
135 * Interaction between Deduplication and Block Cloning.
137 * If both functionalities are in use, we could end up with a block that is
138 * referenced multiple times in both DDT and BRT. When we free one of the
139 * references we couldn't tell where it belongs, so we would have to decide
140 * what table takes the precedence: do we first clear DDT references or BRT
141 * references? To avoid this dilemma BRT cooperates with DDT - if a given block
142 * is being cloned using BRT and the BP has the D (dedup) bit set, BRT will
143 * lookup DDT entry instead and increase the counter there. No BRT entry
144 * will be created for a block which has the D (dedup) bit set.
145 * BRT may be more efficient for manual deduplication, but if the block is
146 * already in the DDT, then creating additional BRT entry would be less
147 * efficient. This clever idea was proposed by Allan Jude.
149 * Block Cloning across datasets.
151 * Block Cloning is not limited to cloning blocks within the same dataset.
152 * It is possible (and very useful) to clone blocks between different datasets.
153 * One use case is recovering files from snapshots. By cloning the files into
154 * dataset we need no additional storage. Without Block Cloning we would need
155 * additional space for those files.
156 * Another interesting use case is moving the files between datasets
157 * (copying the file content to the new dataset and removing the source file).
158 * In that case Block Cloning will only be used briefly, because the BRT entries
159 * will be removed when the source is removed.
160 * Block Cloning across encrypted datasets is supported as long as both
161 * datasets share the same master key (e.g. snapshots and clones)
163 * Block Cloning flow through ZFS layers.
165 * Note: Block Cloning can be used both for cloning file system blocks and ZVOL
166 * blocks. As of this writing no interface is implemented that allows for block
167 * cloning within a ZVOL.
168 * FreeBSD and Linux provides copy_file_range(2) system call and we will use it
169 * for blocking cloning.
171 * ssize_t
172 * copy_file_range(int infd, off_t *inoffp, int outfd, off_t *outoffp,
173 * size_t len, unsigned int flags);
175 * Even though offsets and length represent bytes, they have to be
176 * block-aligned or we will return an error so the upper layer can
177 * fallback to the generic mechanism that will just copy the data.
178 * Using copy_file_range(2) will call OS-independent zfs_clone_range() function.
179 * This function was implemented based on zfs_write(), but instead of writing
180 * the given data we first read block pointers using the new dmu_read_l0_bps()
181 * function from the source file. Once we have BPs from the source file we call
182 * the dmu_brt_clone() function on the destination file. This function
183 * allocates BPs for us. We iterate over all source BPs. If the given BP is
184 * a hole or an embedded block, we just copy BP as-is. If it points to a real
185 * data we place this BP on a BRT pending list using the brt_pending_add()
186 * function.
188 * We use this pending list to keep track of all BPs that got new references
189 * within this transaction group.
191 * Some special cases to consider and how we address them:
192 * - The block we want to clone may have been created within the same
193 * transaction group that we are trying to clone. Such block has no BP
194 * allocated yet, so cannot be immediately cloned. We return EAGAIN.
195 * - The block we want to clone may have been modified within the same
196 * transaction group. We return EAGAIN.
197 * - A block may be cloned multiple times during one transaction group (that's
198 * why pending list is actually a tree and not an append-only list - this
199 * way we can figure out faster if this block is cloned for the first time
200 * in this txg or consecutive time).
201 * - A block may be cloned and freed within the same transaction group
202 * (see dbuf_undirty()).
203 * - A block may be cloned and within the same transaction group the clone
204 * can be cloned again (see dmu_read_l0_bps()).
205 * - A file might have been deleted, but the caller still has a file descriptor
206 * open to this file and clones it.
208 * When we free a block we have an additional step in the ZIO pipeline where we
209 * call the zio_brt_free() function. We then call the brt_entry_decref()
210 * that loads the corresponding BRT entry (if one exists) and decreases
211 * reference counter. If this is not the last reference we will stop ZIO
212 * pipeline here. If this is the last reference or the block is not in the
213 * BRT, we continue the pipeline and free the block as usual.
215 * At the beginning of spa_sync() where there can be no more block cloning,
216 * but before issuing frees we call brt_pending_apply(). This function applies
217 * all the new clones to the BRT table - we load BRT entries and update
218 * reference counters. To sync new BRT entries to disk, we use brt_sync()
219 * function. This function will sync all dirty per-top-level-vdev BRTs,
220 * the entry counters arrays, etc.
222 * Block Cloning and ZIL.
224 * Every clone operation is divided into chunks (similar to write) and each
225 * chunk is cloned in a separate transaction. The chunk size is determined by
226 * how many BPs we can fit into a single ZIL entry.
227 * Replaying clone operation is different from the regular clone operation,
228 * as when we log clone operations we cannot use the source object - it may
229 * reside on a different dataset, so we log BPs we want to clone.
230 * The ZIL is replayed when we mount the given dataset, not when the pool is
231 * imported. Taking this into account it is possible that the pool is imported
232 * without mounting datasets and the source dataset is destroyed before the
233 * destination dataset is mounted and its ZIL replayed.
234 * To address this situation we leverage zil_claim() mechanism where ZFS will
235 * parse all the ZILs on pool import. When we come across TX_CLONE_RANGE
236 * entries, we will bump reference counters for their BPs in the BRT. Then
237 * on mount and ZIL replay we bump the reference counters once more, while the
238 * first references are dropped during ZIL destroy by zil_free_clone_range().
239 * It is possible that after zil_claim() we never mount the destination, so
240 * we never replay its ZIL and just destroy it. In this case the only taken
241 * references will be dropped by zil_free_clone_range(), since the cloning is
242 * not going to ever take place.
245 static kmem_cache_t *brt_entry_cache;
248 * Enable/disable prefetching of BRT entries that we are going to modify.
250 static int brt_zap_prefetch = 1;
252 #ifdef ZFS_DEBUG
253 #define BRT_DEBUG(...) do { \
254 if ((zfs_flags & ZFS_DEBUG_BRT) != 0) { \
255 __dprintf(B_TRUE, __FILE__, __func__, __LINE__, __VA_ARGS__); \
257 } while (0)
258 #else
259 #define BRT_DEBUG(...) do { } while (0)
260 #endif
262 static int brt_zap_default_bs = 12;
263 static int brt_zap_default_ibs = 12;
265 static kstat_t *brt_ksp;
267 typedef struct brt_stats {
268 kstat_named_t brt_addref_entry_not_on_disk;
269 kstat_named_t brt_addref_entry_on_disk;
270 kstat_named_t brt_decref_entry_in_memory;
271 kstat_named_t brt_decref_entry_loaded_from_disk;
272 kstat_named_t brt_decref_entry_not_in_memory;
273 kstat_named_t brt_decref_entry_read_lost_race;
274 kstat_named_t brt_decref_entry_still_referenced;
275 kstat_named_t brt_decref_free_data_later;
276 kstat_named_t brt_decref_free_data_now;
277 kstat_named_t brt_decref_no_entry;
278 } brt_stats_t;
280 static brt_stats_t brt_stats = {
281 { "addref_entry_not_on_disk", KSTAT_DATA_UINT64 },
282 { "addref_entry_on_disk", KSTAT_DATA_UINT64 },
283 { "decref_entry_in_memory", KSTAT_DATA_UINT64 },
284 { "decref_entry_loaded_from_disk", KSTAT_DATA_UINT64 },
285 { "decref_entry_not_in_memory", KSTAT_DATA_UINT64 },
286 { "decref_entry_read_lost_race", KSTAT_DATA_UINT64 },
287 { "decref_entry_still_referenced", KSTAT_DATA_UINT64 },
288 { "decref_free_data_later", KSTAT_DATA_UINT64 },
289 { "decref_free_data_now", KSTAT_DATA_UINT64 },
290 { "decref_no_entry", KSTAT_DATA_UINT64 }
293 struct {
294 wmsum_t brt_addref_entry_not_on_disk;
295 wmsum_t brt_addref_entry_on_disk;
296 wmsum_t brt_decref_entry_in_memory;
297 wmsum_t brt_decref_entry_loaded_from_disk;
298 wmsum_t brt_decref_entry_not_in_memory;
299 wmsum_t brt_decref_entry_read_lost_race;
300 wmsum_t brt_decref_entry_still_referenced;
301 wmsum_t brt_decref_free_data_later;
302 wmsum_t brt_decref_free_data_now;
303 wmsum_t brt_decref_no_entry;
304 } brt_sums;
306 #define BRTSTAT_BUMP(stat) wmsum_add(&brt_sums.stat, 1)
308 static int brt_entry_compare(const void *x1, const void *x2);
309 static void brt_vdevs_expand(spa_t *spa, uint64_t nvdevs);
311 static void
312 brt_rlock(spa_t *spa)
314 rw_enter(&spa->spa_brt_lock, RW_READER);
317 static void
318 brt_wlock(spa_t *spa)
320 rw_enter(&spa->spa_brt_lock, RW_WRITER);
323 static void
324 brt_unlock(spa_t *spa)
326 rw_exit(&spa->spa_brt_lock);
329 static uint16_t
330 brt_vdev_entcount_get(const brt_vdev_t *brtvd, uint64_t idx)
333 ASSERT3U(idx, <, brtvd->bv_size);
335 if (unlikely(brtvd->bv_need_byteswap)) {
336 return (BSWAP_16(brtvd->bv_entcount[idx]));
337 } else {
338 return (brtvd->bv_entcount[idx]);
342 static void
343 brt_vdev_entcount_set(brt_vdev_t *brtvd, uint64_t idx, uint16_t entcnt)
346 ASSERT3U(idx, <, brtvd->bv_size);
348 if (unlikely(brtvd->bv_need_byteswap)) {
349 brtvd->bv_entcount[idx] = BSWAP_16(entcnt);
350 } else {
351 brtvd->bv_entcount[idx] = entcnt;
355 static void
356 brt_vdev_entcount_inc(brt_vdev_t *brtvd, uint64_t idx)
358 uint16_t entcnt;
360 ASSERT3U(idx, <, brtvd->bv_size);
362 entcnt = brt_vdev_entcount_get(brtvd, idx);
363 ASSERT(entcnt < UINT16_MAX);
365 brt_vdev_entcount_set(brtvd, idx, entcnt + 1);
368 static void
369 brt_vdev_entcount_dec(brt_vdev_t *brtvd, uint64_t idx)
371 uint16_t entcnt;
373 ASSERT3U(idx, <, brtvd->bv_size);
375 entcnt = brt_vdev_entcount_get(brtvd, idx);
376 ASSERT(entcnt > 0);
378 brt_vdev_entcount_set(brtvd, idx, entcnt - 1);
381 #ifdef ZFS_DEBUG
382 static void
383 brt_vdev_dump(brt_vdev_t *brtvd)
385 uint64_t idx;
387 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
388 zfs_dbgmsg(" BRT vdevid=%llu meta_dirty=%d entcount_dirty=%d "
389 "size=%llu totalcount=%llu nblocks=%llu bitmapsize=%zu",
390 (u_longlong_t)brtvd->bv_vdevid,
391 brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty,
392 (u_longlong_t)brtvd->bv_size,
393 (u_longlong_t)brtvd->bv_totalcount,
394 (u_longlong_t)nblocks,
395 (size_t)BT_SIZEOFMAP(nblocks));
396 if (brtvd->bv_totalcount > 0) {
397 zfs_dbgmsg(" entcounts:");
398 for (idx = 0; idx < brtvd->bv_size; idx++) {
399 uint16_t entcnt = brt_vdev_entcount_get(brtvd, idx);
400 if (entcnt > 0) {
401 zfs_dbgmsg(" [%04llu] %hu",
402 (u_longlong_t)idx, entcnt);
406 if (brtvd->bv_entcount_dirty) {
407 char *bitmap;
409 bitmap = kmem_alloc(nblocks + 1, KM_SLEEP);
410 for (idx = 0; idx < nblocks; idx++) {
411 bitmap[idx] =
412 BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.';
414 bitmap[idx] = '\0';
415 zfs_dbgmsg(" dirty: %s", bitmap);
416 kmem_free(bitmap, nblocks + 1);
419 #endif
421 static brt_vdev_t *
422 brt_vdev(spa_t *spa, uint64_t vdevid, boolean_t alloc)
424 brt_vdev_t *brtvd = NULL;
426 brt_rlock(spa);
427 if (vdevid < spa->spa_brt_nvdevs) {
428 brtvd = spa->spa_brt_vdevs[vdevid];
429 } else if (alloc) {
430 /* New VDEV was added. */
431 brt_unlock(spa);
432 brt_wlock(spa);
433 if (vdevid >= spa->spa_brt_nvdevs)
434 brt_vdevs_expand(spa, vdevid + 1);
435 brtvd = spa->spa_brt_vdevs[vdevid];
437 brt_unlock(spa);
438 return (brtvd);
441 static void
442 brt_vdev_create(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
444 char name[64];
446 ASSERT(brtvd->bv_initiated);
447 ASSERT0(brtvd->bv_mos_brtvdev);
448 ASSERT0(brtvd->bv_mos_entries);
450 uint64_t mos_entries = zap_create_flags(spa->spa_meta_objset, 0,
451 ZAP_FLAG_HASH64 | ZAP_FLAG_UINT64_KEY, DMU_OTN_ZAP_METADATA,
452 brt_zap_default_bs, brt_zap_default_ibs, DMU_OT_NONE, 0, tx);
453 VERIFY(mos_entries != 0);
454 VERIFY0(dnode_hold(spa->spa_meta_objset, mos_entries, brtvd,
455 &brtvd->bv_mos_entries_dnode));
456 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
457 brtvd->bv_mos_entries = mos_entries;
458 rw_exit(&brtvd->bv_mos_entries_lock);
459 BRT_DEBUG("MOS entries created, object=%llu",
460 (u_longlong_t)brtvd->bv_mos_entries);
463 * We allocate DMU buffer to store the bv_entcount[] array.
464 * We will keep array size (bv_size) and cummulative count for all
465 * bv_entcount[]s (bv_totalcount) in the bonus buffer.
467 brtvd->bv_mos_brtvdev = dmu_object_alloc(spa->spa_meta_objset,
468 DMU_OTN_UINT64_METADATA, BRT_BLOCKSIZE,
469 DMU_OTN_UINT64_METADATA, sizeof (brt_vdev_phys_t), tx);
470 VERIFY(brtvd->bv_mos_brtvdev != 0);
471 BRT_DEBUG("MOS BRT VDEV created, object=%llu",
472 (u_longlong_t)brtvd->bv_mos_brtvdev);
474 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
475 (u_longlong_t)brtvd->bv_vdevid);
476 VERIFY0(zap_add(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, name,
477 sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev, tx));
478 BRT_DEBUG("Pool directory object created, object=%s", name);
480 spa_feature_incr(spa, SPA_FEATURE_BLOCK_CLONING, tx);
483 static void
484 brt_vdev_realloc(spa_t *spa, brt_vdev_t *brtvd)
486 vdev_t *vd;
487 uint16_t *entcount;
488 ulong_t *bitmap;
489 uint64_t nblocks, onblocks, size;
491 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
493 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
494 vd = vdev_lookup_top(spa, brtvd->bv_vdevid);
495 size = (vdev_get_min_asize(vd) - 1) / spa->spa_brt_rangesize + 1;
496 spa_config_exit(spa, SCL_VDEV, FTAG);
498 entcount = vmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
499 nblocks = BRT_RANGESIZE_TO_NBLOCKS(size);
500 bitmap = kmem_zalloc(BT_SIZEOFMAP(nblocks), KM_SLEEP);
502 if (!brtvd->bv_initiated) {
503 ASSERT0(brtvd->bv_size);
504 ASSERT0P(brtvd->bv_entcount);
505 ASSERT0P(brtvd->bv_bitmap);
506 } else {
507 ASSERT(brtvd->bv_size > 0);
508 ASSERT(brtvd->bv_entcount != NULL);
509 ASSERT(brtvd->bv_bitmap != NULL);
511 * TODO: Allow vdev shrinking. We only need to implement
512 * shrinking the on-disk BRT VDEV object.
513 * dmu_free_range(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
514 * offset, size, tx);
516 ASSERT3U(brtvd->bv_size, <=, size);
518 memcpy(entcount, brtvd->bv_entcount,
519 sizeof (entcount[0]) * MIN(size, brtvd->bv_size));
520 vmem_free(brtvd->bv_entcount,
521 sizeof (entcount[0]) * brtvd->bv_size);
522 onblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
523 memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks),
524 BT_SIZEOFMAP(onblocks)));
525 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(onblocks));
528 brtvd->bv_size = size;
529 brtvd->bv_entcount = entcount;
530 brtvd->bv_bitmap = bitmap;
531 if (!brtvd->bv_initiated) {
532 brtvd->bv_need_byteswap = FALSE;
533 brtvd->bv_initiated = TRUE;
534 BRT_DEBUG("BRT VDEV %llu initiated.",
535 (u_longlong_t)brtvd->bv_vdevid);
539 static int
540 brt_vdev_load(spa_t *spa, brt_vdev_t *brtvd)
542 dmu_buf_t *db;
543 brt_vdev_phys_t *bvphys;
544 int error;
546 ASSERT(!brtvd->bv_initiated);
547 ASSERT(brtvd->bv_mos_brtvdev != 0);
549 error = dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
550 FTAG, &db);
551 if (error != 0)
552 return (error);
554 bvphys = db->db_data;
555 if (spa->spa_brt_rangesize == 0) {
556 spa->spa_brt_rangesize = bvphys->bvp_rangesize;
557 } else {
558 ASSERT3U(spa->spa_brt_rangesize, ==, bvphys->bvp_rangesize);
561 brt_vdev_realloc(spa, brtvd);
563 /* TODO: We don't support VDEV shrinking. */
564 ASSERT3U(bvphys->bvp_size, <=, brtvd->bv_size);
567 * If VDEV grew, we will leave new bv_entcount[] entries zeroed out.
569 error = dmu_read(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
570 MIN(brtvd->bv_size, bvphys->bvp_size) * sizeof (uint16_t),
571 brtvd->bv_entcount, DMU_READ_NO_PREFETCH);
572 if (error != 0)
573 return (error);
575 ASSERT(bvphys->bvp_mos_entries != 0);
576 VERIFY0(dnode_hold(spa->spa_meta_objset, bvphys->bvp_mos_entries, brtvd,
577 &brtvd->bv_mos_entries_dnode));
578 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
579 brtvd->bv_mos_entries = bvphys->bvp_mos_entries;
580 rw_exit(&brtvd->bv_mos_entries_lock);
581 brtvd->bv_need_byteswap =
582 (bvphys->bvp_byteorder != BRT_NATIVE_BYTEORDER);
583 brtvd->bv_totalcount = bvphys->bvp_totalcount;
584 brtvd->bv_usedspace = bvphys->bvp_usedspace;
585 brtvd->bv_savedspace = bvphys->bvp_savedspace;
587 dmu_buf_rele(db, FTAG);
589 BRT_DEBUG("BRT VDEV %llu loaded: mos_brtvdev=%llu, mos_entries=%llu",
590 (u_longlong_t)brtvd->bv_vdevid,
591 (u_longlong_t)brtvd->bv_mos_brtvdev,
592 (u_longlong_t)brtvd->bv_mos_entries);
593 return (0);
596 static void
597 brt_vdev_dealloc(brt_vdev_t *brtvd)
599 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
600 ASSERT(brtvd->bv_initiated);
601 ASSERT0(avl_numnodes(&brtvd->bv_tree));
603 vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
604 brtvd->bv_entcount = NULL;
605 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
606 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(nblocks));
607 brtvd->bv_bitmap = NULL;
609 brtvd->bv_size = 0;
611 brtvd->bv_initiated = FALSE;
612 BRT_DEBUG("BRT VDEV %llu deallocated.", (u_longlong_t)brtvd->bv_vdevid);
615 static void
616 brt_vdev_destroy(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
618 char name[64];
619 uint64_t count;
621 ASSERT(brtvd->bv_initiated);
622 ASSERT(brtvd->bv_mos_brtvdev != 0);
623 ASSERT(brtvd->bv_mos_entries != 0);
624 ASSERT0(brtvd->bv_totalcount);
625 ASSERT0(brtvd->bv_usedspace);
626 ASSERT0(brtvd->bv_savedspace);
628 uint64_t mos_entries = brtvd->bv_mos_entries;
629 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
630 brtvd->bv_mos_entries = 0;
631 rw_exit(&brtvd->bv_mos_entries_lock);
632 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd);
633 brtvd->bv_mos_entries_dnode = NULL;
634 ASSERT0(zap_count(spa->spa_meta_objset, mos_entries, &count));
635 ASSERT0(count);
636 VERIFY0(zap_destroy(spa->spa_meta_objset, mos_entries, tx));
637 BRT_DEBUG("MOS entries destroyed, object=%llu",
638 (u_longlong_t)mos_entries);
640 VERIFY0(dmu_object_free(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
641 tx));
642 BRT_DEBUG("MOS BRT VDEV destroyed, object=%llu",
643 (u_longlong_t)brtvd->bv_mos_brtvdev);
644 brtvd->bv_mos_brtvdev = 0;
646 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
647 (u_longlong_t)brtvd->bv_vdevid);
648 VERIFY0(zap_remove(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
649 name, tx));
650 BRT_DEBUG("Pool directory object removed, object=%s", name);
652 brtvd->bv_meta_dirty = FALSE;
654 rw_enter(&brtvd->bv_lock, RW_WRITER);
655 brt_vdev_dealloc(brtvd);
656 rw_exit(&brtvd->bv_lock);
658 spa_feature_decr(spa, SPA_FEATURE_BLOCK_CLONING, tx);
661 static void
662 brt_vdevs_expand(spa_t *spa, uint64_t nvdevs)
664 brt_vdev_t **vdevs;
666 ASSERT(RW_WRITE_HELD(&spa->spa_brt_lock));
667 ASSERT3U(nvdevs, >=, spa->spa_brt_nvdevs);
669 if (nvdevs == spa->spa_brt_nvdevs)
670 return;
672 vdevs = kmem_zalloc(sizeof (*spa->spa_brt_vdevs) * nvdevs, KM_SLEEP);
673 if (spa->spa_brt_nvdevs > 0) {
674 ASSERT(spa->spa_brt_vdevs != NULL);
676 memcpy(vdevs, spa->spa_brt_vdevs,
677 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
678 kmem_free(spa->spa_brt_vdevs,
679 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
681 spa->spa_brt_vdevs = vdevs;
683 for (uint64_t vdevid = spa->spa_brt_nvdevs; vdevid < nvdevs; vdevid++) {
684 brt_vdev_t *brtvd = kmem_zalloc(sizeof (*brtvd), KM_SLEEP);
685 rw_init(&brtvd->bv_lock, NULL, RW_DEFAULT, NULL);
686 brtvd->bv_vdevid = vdevid;
687 brtvd->bv_initiated = FALSE;
688 rw_init(&brtvd->bv_mos_entries_lock, NULL, RW_DEFAULT, NULL);
689 avl_create(&brtvd->bv_tree, brt_entry_compare,
690 sizeof (brt_entry_t), offsetof(brt_entry_t, bre_node));
691 for (int i = 0; i < TXG_SIZE; i++) {
692 avl_create(&brtvd->bv_pending_tree[i],
693 brt_entry_compare, sizeof (brt_entry_t),
694 offsetof(brt_entry_t, bre_node));
696 mutex_init(&brtvd->bv_pending_lock, NULL, MUTEX_DEFAULT, NULL);
697 spa->spa_brt_vdevs[vdevid] = brtvd;
700 BRT_DEBUG("BRT VDEVs expanded from %llu to %llu.",
701 (u_longlong_t)spa->spa_brt_nvdevs, (u_longlong_t)nvdevs);
702 spa->spa_brt_nvdevs = nvdevs;
705 static boolean_t
706 brt_vdev_lookup(spa_t *spa, brt_vdev_t *brtvd, uint64_t offset)
708 uint64_t idx = offset / spa->spa_brt_rangesize;
709 if (idx < brtvd->bv_size) {
710 /* VDEV wasn't expanded. */
711 return (brt_vdev_entcount_get(brtvd, idx) > 0);
713 return (FALSE);
716 static void
717 brt_vdev_addref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre,
718 uint64_t dsize, uint64_t count)
720 uint64_t idx;
722 ASSERT(brtvd->bv_initiated);
724 brtvd->bv_savedspace += dsize * count;
725 brtvd->bv_meta_dirty = TRUE;
727 if (bre->bre_count > 0)
728 return;
730 brtvd->bv_usedspace += dsize;
732 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
733 if (idx >= brtvd->bv_size) {
734 /* VDEV has been expanded. */
735 rw_enter(&brtvd->bv_lock, RW_WRITER);
736 brt_vdev_realloc(spa, brtvd);
737 rw_exit(&brtvd->bv_lock);
740 ASSERT3U(idx, <, brtvd->bv_size);
742 brtvd->bv_totalcount++;
743 brt_vdev_entcount_inc(brtvd, idx);
744 brtvd->bv_entcount_dirty = TRUE;
745 idx = idx / BRT_BLOCKSIZE / 8;
746 BT_SET(brtvd->bv_bitmap, idx);
749 static void
750 brt_vdev_decref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre,
751 uint64_t dsize)
753 uint64_t idx;
755 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
756 ASSERT(brtvd->bv_initiated);
758 brtvd->bv_savedspace -= dsize;
759 brtvd->bv_meta_dirty = TRUE;
761 if (bre->bre_count > 0)
762 return;
764 brtvd->bv_usedspace -= dsize;
766 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
767 ASSERT3U(idx, <, brtvd->bv_size);
769 ASSERT(brtvd->bv_totalcount > 0);
770 brtvd->bv_totalcount--;
771 brt_vdev_entcount_dec(brtvd, idx);
772 brtvd->bv_entcount_dirty = TRUE;
773 idx = idx / BRT_BLOCKSIZE / 8;
774 BT_SET(brtvd->bv_bitmap, idx);
777 static void
778 brt_vdev_sync(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
780 dmu_buf_t *db;
781 brt_vdev_phys_t *bvphys;
783 ASSERT(brtvd->bv_meta_dirty);
784 ASSERT(brtvd->bv_mos_brtvdev != 0);
785 ASSERT(dmu_tx_is_syncing(tx));
787 VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
788 FTAG, &db));
790 if (brtvd->bv_entcount_dirty) {
792 * TODO: Walk brtvd->bv_bitmap and write only the dirty blocks.
794 dmu_write(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
795 brtvd->bv_size * sizeof (brtvd->bv_entcount[0]),
796 brtvd->bv_entcount, tx);
797 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
798 memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(nblocks));
799 brtvd->bv_entcount_dirty = FALSE;
802 dmu_buf_will_dirty(db, tx);
803 bvphys = db->db_data;
804 bvphys->bvp_mos_entries = brtvd->bv_mos_entries;
805 bvphys->bvp_size = brtvd->bv_size;
806 if (brtvd->bv_need_byteswap) {
807 bvphys->bvp_byteorder = BRT_NON_NATIVE_BYTEORDER;
808 } else {
809 bvphys->bvp_byteorder = BRT_NATIVE_BYTEORDER;
811 bvphys->bvp_totalcount = brtvd->bv_totalcount;
812 bvphys->bvp_rangesize = spa->spa_brt_rangesize;
813 bvphys->bvp_usedspace = brtvd->bv_usedspace;
814 bvphys->bvp_savedspace = brtvd->bv_savedspace;
815 dmu_buf_rele(db, FTAG);
817 brtvd->bv_meta_dirty = FALSE;
820 static void
821 brt_vdevs_free(spa_t *spa)
823 if (spa->spa_brt_vdevs == 0)
824 return;
825 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
826 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
827 rw_enter(&brtvd->bv_lock, RW_WRITER);
828 if (brtvd->bv_initiated)
829 brt_vdev_dealloc(brtvd);
830 rw_exit(&brtvd->bv_lock);
831 rw_destroy(&brtvd->bv_lock);
832 if (brtvd->bv_mos_entries != 0)
833 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd);
834 rw_destroy(&brtvd->bv_mos_entries_lock);
835 avl_destroy(&brtvd->bv_tree);
836 for (int i = 0; i < TXG_SIZE; i++)
837 avl_destroy(&brtvd->bv_pending_tree[i]);
838 mutex_destroy(&brtvd->bv_pending_lock);
839 kmem_free(brtvd, sizeof (*brtvd));
841 kmem_free(spa->spa_brt_vdevs, sizeof (*spa->spa_brt_vdevs) *
842 spa->spa_brt_nvdevs);
845 static void
846 brt_entry_fill(const blkptr_t *bp, brt_entry_t *bre, uint64_t *vdevidp)
849 bre->bre_bp = *bp;
850 bre->bre_count = 0;
851 bre->bre_pcount = 0;
853 *vdevidp = DVA_GET_VDEV(&bp->blk_dva[0]);
856 static int
857 brt_entry_lookup(brt_vdev_t *brtvd, brt_entry_t *bre)
859 uint64_t off = BRE_OFFSET(bre);
861 return (zap_lookup_uint64_by_dnode(brtvd->bv_mos_entries_dnode,
862 &off, BRT_KEY_WORDS, 1, sizeof (bre->bre_count), &bre->bre_count));
866 * Return TRUE if we _can_ have BRT entry for this bp. It might be false
867 * positive, but gives us quick answer if we should look into BRT, which
868 * may require reads and thus will be more expensive.
870 boolean_t
871 brt_maybe_exists(spa_t *spa, const blkptr_t *bp)
874 if (spa->spa_brt_nvdevs == 0)
875 return (B_FALSE);
877 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
878 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
879 if (brtvd == NULL || !brtvd->bv_initiated)
880 return (FALSE);
883 * We don't need locks here, since bv_entcount pointer must be
884 * stable at this point, and we don't care about false positive
885 * races here, while false negative should be impossible, since
886 * all brt_vdev_addref() have already completed by this point.
888 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]);
889 return (brt_vdev_lookup(spa, brtvd, off));
892 uint64_t
893 brt_get_dspace(spa_t *spa)
895 if (spa->spa_brt_nvdevs == 0)
896 return (0);
898 brt_rlock(spa);
899 uint64_t s = 0;
900 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
901 s += spa->spa_brt_vdevs[vdevid]->bv_savedspace;
902 brt_unlock(spa);
903 return (s);
906 uint64_t
907 brt_get_used(spa_t *spa)
909 if (spa->spa_brt_nvdevs == 0)
910 return (0);
912 brt_rlock(spa);
913 uint64_t s = 0;
914 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
915 s += spa->spa_brt_vdevs[vdevid]->bv_usedspace;
916 brt_unlock(spa);
917 return (s);
920 uint64_t
921 brt_get_saved(spa_t *spa)
923 return (brt_get_dspace(spa));
926 uint64_t
927 brt_get_ratio(spa_t *spa)
929 uint64_t used = brt_get_used(spa);
930 if (used == 0)
931 return (100);
932 return ((used + brt_get_saved(spa)) * 100 / used);
935 static int
936 brt_kstats_update(kstat_t *ksp, int rw)
938 brt_stats_t *bs = ksp->ks_data;
940 if (rw == KSTAT_WRITE)
941 return (EACCES);
943 bs->brt_addref_entry_not_on_disk.value.ui64 =
944 wmsum_value(&brt_sums.brt_addref_entry_not_on_disk);
945 bs->brt_addref_entry_on_disk.value.ui64 =
946 wmsum_value(&brt_sums.brt_addref_entry_on_disk);
947 bs->brt_decref_entry_in_memory.value.ui64 =
948 wmsum_value(&brt_sums.brt_decref_entry_in_memory);
949 bs->brt_decref_entry_loaded_from_disk.value.ui64 =
950 wmsum_value(&brt_sums.brt_decref_entry_loaded_from_disk);
951 bs->brt_decref_entry_not_in_memory.value.ui64 =
952 wmsum_value(&brt_sums.brt_decref_entry_not_in_memory);
953 bs->brt_decref_entry_read_lost_race.value.ui64 =
954 wmsum_value(&brt_sums.brt_decref_entry_read_lost_race);
955 bs->brt_decref_entry_still_referenced.value.ui64 =
956 wmsum_value(&brt_sums.brt_decref_entry_still_referenced);
957 bs->brt_decref_free_data_later.value.ui64 =
958 wmsum_value(&brt_sums.brt_decref_free_data_later);
959 bs->brt_decref_free_data_now.value.ui64 =
960 wmsum_value(&brt_sums.brt_decref_free_data_now);
961 bs->brt_decref_no_entry.value.ui64 =
962 wmsum_value(&brt_sums.brt_decref_no_entry);
964 return (0);
967 static void
968 brt_stat_init(void)
971 wmsum_init(&brt_sums.brt_addref_entry_not_on_disk, 0);
972 wmsum_init(&brt_sums.brt_addref_entry_on_disk, 0);
973 wmsum_init(&brt_sums.brt_decref_entry_in_memory, 0);
974 wmsum_init(&brt_sums.brt_decref_entry_loaded_from_disk, 0);
975 wmsum_init(&brt_sums.brt_decref_entry_not_in_memory, 0);
976 wmsum_init(&brt_sums.brt_decref_entry_read_lost_race, 0);
977 wmsum_init(&brt_sums.brt_decref_entry_still_referenced, 0);
978 wmsum_init(&brt_sums.brt_decref_free_data_later, 0);
979 wmsum_init(&brt_sums.brt_decref_free_data_now, 0);
980 wmsum_init(&brt_sums.brt_decref_no_entry, 0);
982 brt_ksp = kstat_create("zfs", 0, "brtstats", "misc", KSTAT_TYPE_NAMED,
983 sizeof (brt_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
984 if (brt_ksp != NULL) {
985 brt_ksp->ks_data = &brt_stats;
986 brt_ksp->ks_update = brt_kstats_update;
987 kstat_install(brt_ksp);
991 static void
992 brt_stat_fini(void)
994 if (brt_ksp != NULL) {
995 kstat_delete(brt_ksp);
996 brt_ksp = NULL;
999 wmsum_fini(&brt_sums.brt_addref_entry_not_on_disk);
1000 wmsum_fini(&brt_sums.brt_addref_entry_on_disk);
1001 wmsum_fini(&brt_sums.brt_decref_entry_in_memory);
1002 wmsum_fini(&brt_sums.brt_decref_entry_loaded_from_disk);
1003 wmsum_fini(&brt_sums.brt_decref_entry_not_in_memory);
1004 wmsum_fini(&brt_sums.brt_decref_entry_read_lost_race);
1005 wmsum_fini(&brt_sums.brt_decref_entry_still_referenced);
1006 wmsum_fini(&brt_sums.brt_decref_free_data_later);
1007 wmsum_fini(&brt_sums.brt_decref_free_data_now);
1008 wmsum_fini(&brt_sums.brt_decref_no_entry);
1011 void
1012 brt_init(void)
1014 brt_entry_cache = kmem_cache_create("brt_entry_cache",
1015 sizeof (brt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
1017 brt_stat_init();
1020 void
1021 brt_fini(void)
1023 brt_stat_fini();
1025 kmem_cache_destroy(brt_entry_cache);
1028 /* Return TRUE if block should be freed immediately. */
1029 boolean_t
1030 brt_entry_decref(spa_t *spa, const blkptr_t *bp)
1032 brt_entry_t *bre, *racebre;
1033 brt_entry_t bre_search;
1034 avl_index_t where;
1035 uint64_t vdevid;
1036 int error;
1038 brt_entry_fill(bp, &bre_search, &vdevid);
1040 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1041 ASSERT(brtvd != NULL);
1043 rw_enter(&brtvd->bv_lock, RW_WRITER);
1044 ASSERT(brtvd->bv_initiated);
1045 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
1046 if (bre != NULL) {
1047 BRTSTAT_BUMP(brt_decref_entry_in_memory);
1048 goto out;
1049 } else {
1050 BRTSTAT_BUMP(brt_decref_entry_not_in_memory);
1052 rw_exit(&brtvd->bv_lock);
1054 error = brt_entry_lookup(brtvd, &bre_search);
1055 /* bre_search now contains correct bre_count */
1056 if (error == ENOENT) {
1057 BRTSTAT_BUMP(brt_decref_no_entry);
1058 return (B_TRUE);
1060 ASSERT0(error);
1062 rw_enter(&brtvd->bv_lock, RW_WRITER);
1063 racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
1064 if (racebre != NULL) {
1065 /* The entry was added when the lock was dropped. */
1066 BRTSTAT_BUMP(brt_decref_entry_read_lost_race);
1067 bre = racebre;
1068 goto out;
1071 BRTSTAT_BUMP(brt_decref_entry_loaded_from_disk);
1072 bre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP);
1073 bre->bre_bp = bre_search.bre_bp;
1074 bre->bre_count = bre_search.bre_count;
1075 bre->bre_pcount = 0;
1076 avl_insert(&brtvd->bv_tree, bre, where);
1078 out:
1079 if (bre->bre_count == 0) {
1080 rw_exit(&brtvd->bv_lock);
1081 BRTSTAT_BUMP(brt_decref_free_data_now);
1082 return (B_TRUE);
1085 bre->bre_pcount--;
1086 ASSERT(bre->bre_count > 0);
1087 bre->bre_count--;
1088 if (bre->bre_count == 0)
1089 BRTSTAT_BUMP(brt_decref_free_data_later);
1090 else
1091 BRTSTAT_BUMP(brt_decref_entry_still_referenced);
1092 brt_vdev_decref(spa, brtvd, bre, bp_get_dsize_sync(spa, bp));
1094 rw_exit(&brtvd->bv_lock);
1096 return (B_FALSE);
1099 uint64_t
1100 brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp)
1102 brt_entry_t bre_search, *bre;
1103 uint64_t vdevid, refcnt;
1104 int error;
1106 brt_entry_fill(bp, &bre_search, &vdevid);
1108 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1109 ASSERT(brtvd != NULL);
1111 rw_enter(&brtvd->bv_lock, RW_READER);
1112 ASSERT(brtvd->bv_initiated);
1113 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
1114 if (bre == NULL) {
1115 rw_exit(&brtvd->bv_lock);
1116 error = brt_entry_lookup(brtvd, &bre_search);
1117 if (error == ENOENT) {
1118 refcnt = 0;
1119 } else {
1120 ASSERT0(error);
1121 refcnt = bre_search.bre_count;
1123 } else {
1124 refcnt = bre->bre_count;
1125 rw_exit(&brtvd->bv_lock);
1128 return (refcnt);
1131 static void
1132 brt_prefetch(brt_vdev_t *brtvd, const blkptr_t *bp)
1134 if (!brt_zap_prefetch || brtvd->bv_mos_entries == 0)
1135 return;
1137 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]);
1138 rw_enter(&brtvd->bv_mos_entries_lock, RW_READER);
1139 if (brtvd->bv_mos_entries != 0) {
1140 (void) zap_prefetch_uint64_by_dnode(brtvd->bv_mos_entries_dnode,
1141 &off, BRT_KEY_WORDS);
1143 rw_exit(&brtvd->bv_mos_entries_lock);
1146 static int
1147 brt_entry_compare(const void *x1, const void *x2)
1149 const brt_entry_t *bre1 = x1, *bre2 = x2;
1150 const blkptr_t *bp1 = &bre1->bre_bp, *bp2 = &bre2->bre_bp;
1152 return (TREE_CMP(DVA_GET_OFFSET(&bp1->blk_dva[0]),
1153 DVA_GET_OFFSET(&bp2->blk_dva[0])));
1156 void
1157 brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
1159 brt_entry_t *bre, *newbre;
1160 avl_index_t where;
1161 uint64_t txg;
1163 txg = dmu_tx_get_txg(tx);
1164 ASSERT3U(txg, !=, 0);
1166 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
1167 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_TRUE);
1168 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK];
1170 newbre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP);
1171 newbre->bre_bp = *bp;
1172 newbre->bre_count = 0;
1173 newbre->bre_pcount = 1;
1175 mutex_enter(&brtvd->bv_pending_lock);
1176 bre = avl_find(pending_tree, newbre, &where);
1177 if (bre == NULL) {
1178 avl_insert(pending_tree, newbre, where);
1179 newbre = NULL;
1180 } else {
1181 bre->bre_pcount++;
1183 mutex_exit(&brtvd->bv_pending_lock);
1185 if (newbre != NULL) {
1186 ASSERT(bre != NULL);
1187 ASSERT(bre != newbre);
1188 kmem_cache_free(brt_entry_cache, newbre);
1189 } else {
1190 ASSERT0P(bre);
1192 /* Prefetch BRT entry for the syncing context. */
1193 brt_prefetch(brtvd, bp);
1197 void
1198 brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
1200 brt_entry_t *bre, bre_search;
1201 uint64_t txg;
1203 txg = dmu_tx_get_txg(tx);
1204 ASSERT3U(txg, !=, 0);
1206 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
1207 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1208 ASSERT(brtvd != NULL);
1209 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK];
1211 bre_search.bre_bp = *bp;
1213 mutex_enter(&brtvd->bv_pending_lock);
1214 bre = avl_find(pending_tree, &bre_search, NULL);
1215 ASSERT(bre != NULL);
1216 ASSERT(bre->bre_pcount > 0);
1217 bre->bre_pcount--;
1218 if (bre->bre_pcount == 0)
1219 avl_remove(pending_tree, bre);
1220 else
1221 bre = NULL;
1222 mutex_exit(&brtvd->bv_pending_lock);
1224 if (bre)
1225 kmem_cache_free(brt_entry_cache, bre);
1228 static void
1229 brt_pending_apply_vdev(spa_t *spa, brt_vdev_t *brtvd, uint64_t txg)
1231 brt_entry_t *bre, *nbre;
1234 * We are in syncing context, so no other bv_pending_tree accesses
1235 * are possible for the TXG. So we don't need bv_pending_lock.
1237 ASSERT(avl_is_empty(&brtvd->bv_tree));
1238 avl_swap(&brtvd->bv_tree, &brtvd->bv_pending_tree[txg & TXG_MASK]);
1240 for (bre = avl_first(&brtvd->bv_tree); bre; bre = nbre) {
1241 nbre = AVL_NEXT(&brtvd->bv_tree, bre);
1244 * If the block has DEDUP bit set, it means that it
1245 * already exists in the DEDUP table, so we can just
1246 * use that instead of creating new entry in the BRT.
1248 if (BP_GET_DEDUP(&bre->bre_bp)) {
1249 while (bre->bre_pcount > 0) {
1250 if (!ddt_addref(spa, &bre->bre_bp))
1251 break;
1252 bre->bre_pcount--;
1254 if (bre->bre_pcount == 0) {
1255 avl_remove(&brtvd->bv_tree, bre);
1256 kmem_cache_free(brt_entry_cache, bre);
1257 continue;
1262 * Unless we know that the block is definitely not in ZAP,
1263 * try to get its reference count from there.
1265 uint64_t off = BRE_OFFSET(bre);
1266 if (brtvd->bv_mos_entries != 0 &&
1267 brt_vdev_lookup(spa, brtvd, off)) {
1268 int error = zap_lookup_uint64_by_dnode(
1269 brtvd->bv_mos_entries_dnode, &off,
1270 BRT_KEY_WORDS, 1, sizeof (bre->bre_count),
1271 &bre->bre_count);
1272 if (error == 0) {
1273 BRTSTAT_BUMP(brt_addref_entry_on_disk);
1274 } else {
1275 ASSERT3U(error, ==, ENOENT);
1276 BRTSTAT_BUMP(brt_addref_entry_not_on_disk);
1282 * If all the cloned blocks we had were handled by DDT, we don't need
1283 * to initiate the vdev.
1285 if (avl_is_empty(&brtvd->bv_tree))
1286 return;
1288 if (!brtvd->bv_initiated) {
1289 rw_enter(&brtvd->bv_lock, RW_WRITER);
1290 brt_vdev_realloc(spa, brtvd);
1291 rw_exit(&brtvd->bv_lock);
1295 * Convert pending references into proper ones. This has to be a
1296 * separate loop, since entcount modifications would cause false
1297 * positives for brt_vdev_lookup() on following iterations.
1299 for (bre = avl_first(&brtvd->bv_tree); bre;
1300 bre = AVL_NEXT(&brtvd->bv_tree, bre)) {
1301 brt_vdev_addref(spa, brtvd, bre,
1302 bp_get_dsize(spa, &bre->bre_bp), bre->bre_pcount);
1303 bre->bre_count += bre->bre_pcount;
1307 void
1308 brt_pending_apply(spa_t *spa, uint64_t txg)
1311 brt_rlock(spa);
1312 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1313 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1314 brt_unlock(spa);
1316 brt_pending_apply_vdev(spa, brtvd, txg);
1318 brt_rlock(spa);
1320 brt_unlock(spa);
1323 static void
1324 brt_sync_entry(dnode_t *dn, brt_entry_t *bre, dmu_tx_t *tx)
1326 uint64_t off = BRE_OFFSET(bre);
1328 if (bre->bre_pcount == 0) {
1329 /* The net change is zero, nothing to do in ZAP. */
1330 } else if (bre->bre_count == 0) {
1331 int error = zap_remove_uint64_by_dnode(dn, &off,
1332 BRT_KEY_WORDS, tx);
1333 VERIFY(error == 0 || error == ENOENT);
1334 } else {
1335 VERIFY0(zap_update_uint64_by_dnode(dn, &off,
1336 BRT_KEY_WORDS, 1, sizeof (bre->bre_count),
1337 &bre->bre_count, tx));
1341 static void
1342 brt_sync_table(spa_t *spa, dmu_tx_t *tx)
1344 brt_entry_t *bre;
1346 brt_rlock(spa);
1347 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1348 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1349 brt_unlock(spa);
1351 if (!brtvd->bv_meta_dirty) {
1352 ASSERT(!brtvd->bv_entcount_dirty);
1353 ASSERT0(avl_numnodes(&brtvd->bv_tree));
1354 brt_rlock(spa);
1355 continue;
1358 ASSERT(!brtvd->bv_entcount_dirty ||
1359 avl_numnodes(&brtvd->bv_tree) != 0);
1361 if (brtvd->bv_mos_brtvdev == 0)
1362 brt_vdev_create(spa, brtvd, tx);
1364 void *c = NULL;
1365 while ((bre = avl_destroy_nodes(&brtvd->bv_tree, &c)) != NULL) {
1366 brt_sync_entry(brtvd->bv_mos_entries_dnode, bre, tx);
1367 kmem_cache_free(brt_entry_cache, bre);
1370 #ifdef ZFS_DEBUG
1371 if (zfs_flags & ZFS_DEBUG_BRT)
1372 brt_vdev_dump(brtvd);
1373 #endif
1374 if (brtvd->bv_totalcount == 0)
1375 brt_vdev_destroy(spa, brtvd, tx);
1376 else
1377 brt_vdev_sync(spa, brtvd, tx);
1378 brt_rlock(spa);
1380 brt_unlock(spa);
1383 void
1384 brt_sync(spa_t *spa, uint64_t txg)
1386 dmu_tx_t *tx;
1387 uint64_t vdevid;
1389 ASSERT3U(spa_syncing_txg(spa), ==, txg);
1391 brt_rlock(spa);
1392 for (vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1393 if (spa->spa_brt_vdevs[vdevid]->bv_meta_dirty)
1394 break;
1396 if (vdevid >= spa->spa_brt_nvdevs) {
1397 brt_unlock(spa);
1398 return;
1400 brt_unlock(spa);
1402 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1403 brt_sync_table(spa, tx);
1404 dmu_tx_commit(tx);
1407 static void
1408 brt_alloc(spa_t *spa)
1410 rw_init(&spa->spa_brt_lock, NULL, RW_DEFAULT, NULL);
1411 spa->spa_brt_vdevs = NULL;
1412 spa->spa_brt_nvdevs = 0;
1413 spa->spa_brt_rangesize = 0;
1416 void
1417 brt_create(spa_t *spa)
1419 brt_alloc(spa);
1420 spa->spa_brt_rangesize = BRT_RANGESIZE;
1424 brt_load(spa_t *spa)
1426 int error = 0;
1428 brt_alloc(spa);
1429 brt_wlock(spa);
1430 for (uint64_t vdevid = 0; vdevid < spa->spa_root_vdev->vdev_children;
1431 vdevid++) {
1432 char name[64];
1433 uint64_t mos_brtvdev;
1435 /* Look if this vdev had active block cloning. */
1436 snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
1437 (u_longlong_t)vdevid);
1438 error = zap_lookup(spa->spa_meta_objset,
1439 DMU_POOL_DIRECTORY_OBJECT, name, sizeof (uint64_t), 1,
1440 &mos_brtvdev);
1441 if (error == ENOENT) {
1442 error = 0;
1443 continue;
1445 if (error != 0)
1446 break;
1448 /* If it did, then allocate them all and load this one. */
1449 brt_vdevs_expand(spa, spa->spa_root_vdev->vdev_children);
1450 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1451 rw_enter(&brtvd->bv_lock, RW_WRITER);
1452 brtvd->bv_mos_brtvdev = mos_brtvdev;
1453 error = brt_vdev_load(spa, brtvd);
1454 rw_exit(&brtvd->bv_lock);
1455 if (error != 0)
1456 break;
1459 if (spa->spa_brt_rangesize == 0)
1460 spa->spa_brt_rangesize = BRT_RANGESIZE;
1461 brt_unlock(spa);
1462 return (error);
1465 void
1466 brt_unload(spa_t *spa)
1468 if (spa->spa_brt_rangesize == 0)
1469 return;
1470 brt_vdevs_free(spa);
1471 rw_destroy(&spa->spa_brt_lock);
1472 spa->spa_brt_rangesize = 0;
1475 /* BEGIN CSTYLED */
1476 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_prefetch, INT, ZMOD_RW,
1477 "Enable prefetching of BRT ZAP entries");
1478 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_bs, UINT, ZMOD_RW,
1479 "BRT ZAP leaf blockshift");
1480 ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_ibs, UINT, ZMOD_RW,
1481 "BRT ZAP indirect blockshift");
1482 /* END CSTYLED */