4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/zfeature.h>
40 * Note on space map block size:
42 * The data for a given space map can be kept on blocks of any size.
43 * Larger blocks entail fewer I/O operations, but they also cause the
44 * DMU to keep more data in-core, and also to waste more I/O bandwidth
45 * when only a few blocks have changed since the last transaction group.
49 * Enabled whenever we want to stress test the use of double-word
52 boolean_t zfs_force_some_double_word_sm_entries
= B_FALSE
;
55 * Override the default indirect block size of 128K, instead use 16K for
56 * spacemaps (2^14 bytes). This dramatically reduces write inflation since
57 * appending to a spacemap typically has to write one data block (4KB) and one
58 * or two indirect blocks (16K-32K, rather than 128K).
60 int space_map_ibs
= 14;
63 sm_entry_is_debug(uint64_t e
)
65 return (SM_PREFIX_DECODE(e
) == SM_DEBUG_PREFIX
);
69 sm_entry_is_single_word(uint64_t e
)
71 uint8_t prefix
= SM_PREFIX_DECODE(e
);
72 return (prefix
!= SM_DEBUG_PREFIX
&& prefix
!= SM2_PREFIX
);
76 sm_entry_is_double_word(uint64_t e
)
78 return (SM_PREFIX_DECODE(e
) == SM2_PREFIX
);
82 * Iterate through the space map, invoking the callback on each (non-debug)
83 * space map entry. Stop after reading 'end' bytes of the space map.
86 space_map_iterate(space_map_t
*sm
, uint64_t end
, sm_cb_t callback
, void *arg
)
88 uint64_t blksz
= sm
->sm_blksz
;
90 ASSERT3U(blksz
, !=, 0);
91 ASSERT3U(end
, <=, space_map_length(sm
));
92 ASSERT0(P2PHASE(end
, sizeof (uint64_t)));
94 dmu_prefetch(sm
->sm_os
, space_map_object(sm
), 0, 0, end
,
95 ZIO_PRIORITY_SYNC_READ
);
98 uint64_t txg
= 0, sync_pass
= 0;
99 for (uint64_t block_base
= 0; block_base
< end
&& error
== 0;
100 block_base
+= blksz
) {
102 error
= dmu_buf_hold(sm
->sm_os
, space_map_object(sm
),
103 block_base
, FTAG
, &db
, DMU_READ_PREFETCH
);
107 uint64_t *block_start
= db
->db_data
;
108 uint64_t block_length
= MIN(end
- block_base
, blksz
);
109 uint64_t *block_end
= block_start
+
110 (block_length
/ sizeof (uint64_t));
112 VERIFY0(P2PHASE(block_length
, sizeof (uint64_t)));
113 VERIFY3U(block_length
, !=, 0);
114 ASSERT3U(blksz
, ==, db
->db_size
);
116 for (uint64_t *block_cursor
= block_start
;
117 block_cursor
< block_end
&& error
== 0; block_cursor
++) {
118 uint64_t e
= *block_cursor
;
120 if (sm_entry_is_debug(e
)) {
122 * Debug entries are only needed to record the
123 * current TXG and sync pass if available.
125 * Note though that sometimes there can be
126 * debug entries that are used as padding
127 * at the end of space map blocks in-order
128 * to not split a double-word entry in the
129 * middle between two blocks. These entries
130 * have their TXG field set to 0 and we
131 * skip them without recording the TXG.
132 * [see comment in space_map_write_seg()]
134 uint64_t e_txg
= SM_DEBUG_TXG_DECODE(e
);
137 sync_pass
= SM_DEBUG_SYNCPASS_DECODE(e
);
139 ASSERT0(SM_DEBUG_SYNCPASS_DECODE(e
));
144 uint64_t raw_offset
, raw_run
, vdev_id
;
146 if (sm_entry_is_single_word(e
)) {
147 type
= SM_TYPE_DECODE(e
);
148 vdev_id
= SM_NO_VDEVID
;
149 raw_offset
= SM_OFFSET_DECODE(e
);
150 raw_run
= SM_RUN_DECODE(e
);
152 /* it is a two-word entry */
153 ASSERT(sm_entry_is_double_word(e
));
154 raw_run
= SM2_RUN_DECODE(e
);
155 vdev_id
= SM2_VDEV_DECODE(e
);
157 /* move on to the second word */
160 VERIFY3P(block_cursor
, <=, block_end
);
162 type
= SM2_TYPE_DECODE(e
);
163 raw_offset
= SM2_OFFSET_DECODE(e
);
166 uint64_t entry_offset
= (raw_offset
<< sm
->sm_shift
) +
168 uint64_t entry_run
= raw_run
<< sm
->sm_shift
;
170 VERIFY0(P2PHASE(entry_offset
, 1ULL << sm
->sm_shift
));
171 VERIFY0(P2PHASE(entry_run
, 1ULL << sm
->sm_shift
));
172 ASSERT3U(entry_offset
, >=, sm
->sm_start
);
173 ASSERT3U(entry_offset
, <, sm
->sm_start
+ sm
->sm_size
);
174 ASSERT3U(entry_run
, <=, sm
->sm_size
);
175 ASSERT3U(entry_offset
+ entry_run
, <=,
176 sm
->sm_start
+ sm
->sm_size
);
178 space_map_entry_t sme
= {
181 .sme_offset
= entry_offset
,
182 .sme_run
= entry_run
,
184 .sme_sync_pass
= sync_pass
186 error
= callback(&sme
, arg
);
188 dmu_buf_rele(db
, FTAG
);
194 * Reads the entries from the last block of the space map into
195 * buf in reverse order. Populates nwords with number of words
198 * Refer to block comment within space_map_incremental_destroy()
199 * to understand why this function is needed.
202 space_map_reversed_last_block_entries(space_map_t
*sm
, uint64_t *buf
,
203 uint64_t bufsz
, uint64_t *nwords
)
209 * Find the offset of the last word in the space map and use
210 * that to read the last block of the space map with
213 uint64_t last_word_offset
=
214 sm
->sm_phys
->smp_length
- sizeof (uint64_t);
215 error
= dmu_buf_hold(sm
->sm_os
, space_map_object(sm
), last_word_offset
,
216 FTAG
, &db
, DMU_READ_NO_PREFETCH
);
220 ASSERT3U(sm
->sm_object
, ==, db
->db_object
);
221 ASSERT3U(sm
->sm_blksz
, ==, db
->db_size
);
222 ASSERT3U(bufsz
, >=, db
->db_size
);
223 ASSERT(nwords
!= NULL
);
225 uint64_t *words
= db
->db_data
;
227 (sm
->sm_phys
->smp_length
- db
->db_offset
) / sizeof (uint64_t);
229 ASSERT3U(*nwords
, <=, bufsz
/ sizeof (uint64_t));
231 uint64_t n
= *nwords
;
233 for (uint64_t i
= 0; i
< n
; i
++) {
234 uint64_t entry
= words
[i
];
235 if (sm_entry_is_double_word(entry
)) {
237 * Since we are populating the buffer backwards
238 * we have to be extra careful and add the two
239 * words of the double-word entry in the right
251 ASSERT(sm_entry_is_debug(entry
) ||
252 sm_entry_is_single_word(entry
));
259 * Assert that we wrote backwards all the
260 * way to the beginning of the buffer.
264 dmu_buf_rele(db
, FTAG
);
269 * Note: This function performs destructive actions - specifically
270 * it deletes entries from the end of the space map. Thus, callers
271 * should ensure that they are holding the appropriate locks for
272 * the space map that they provide.
275 space_map_incremental_destroy(space_map_t
*sm
, sm_cb_t callback
, void *arg
,
278 uint64_t bufsz
= MAX(sm
->sm_blksz
, SPA_MINBLOCKSIZE
);
279 uint64_t *buf
= zio_buf_alloc(bufsz
);
281 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
284 * Ideally we would want to iterate from the beginning of the
285 * space map to the end in incremental steps. The issue with this
286 * approach is that we don't have any field on-disk that points
287 * us where to start between each step. We could try zeroing out
288 * entries that we've destroyed, but this doesn't work either as
289 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
291 * As a result, we destroy its entries incrementally starting from
292 * the end after applying the callback to each of them.
294 * The problem with this approach is that we cannot literally
295 * iterate through the words in the space map backwards as we
296 * can't distinguish two-word space map entries from their second
297 * word. Thus we do the following:
299 * 1] We get all the entries from the last block of the space map
300 * and put them into a buffer in reverse order. This way the
301 * last entry comes first in the buffer, the second to last is
303 * 2] We iterate through the entries in the buffer and we apply
304 * the callback to each one. As we move from entry to entry we
305 * we decrease the size of the space map, deleting effectively
307 * 3] If there are no more entries in the space map or the callback
308 * returns a value other than 0, we stop iterating over the
309 * space map. If there are entries remaining and the callback
310 * returned 0, we go back to step [1].
313 while (space_map_length(sm
) > 0 && error
== 0) {
315 error
= space_map_reversed_last_block_entries(sm
, buf
, bufsz
,
320 ASSERT3U(nwords
, <=, bufsz
/ sizeof (uint64_t));
322 for (uint64_t i
= 0; i
< nwords
; i
++) {
325 if (sm_entry_is_debug(e
)) {
326 sm
->sm_phys
->smp_length
-= sizeof (uint64_t);
331 uint64_t raw_offset
, raw_run
, vdev_id
;
333 if (sm_entry_is_single_word(e
)) {
334 type
= SM_TYPE_DECODE(e
);
335 vdev_id
= SM_NO_VDEVID
;
336 raw_offset
= SM_OFFSET_DECODE(e
);
337 raw_run
= SM_RUN_DECODE(e
);
339 ASSERT(sm_entry_is_double_word(e
));
342 raw_run
= SM2_RUN_DECODE(e
);
343 vdev_id
= SM2_VDEV_DECODE(e
);
345 /* move to the second word */
349 ASSERT3P(i
, <=, nwords
);
351 type
= SM2_TYPE_DECODE(e
);
352 raw_offset
= SM2_OFFSET_DECODE(e
);
355 uint64_t entry_offset
=
356 (raw_offset
<< sm
->sm_shift
) + sm
->sm_start
;
357 uint64_t entry_run
= raw_run
<< sm
->sm_shift
;
359 VERIFY0(P2PHASE(entry_offset
, 1ULL << sm
->sm_shift
));
360 VERIFY0(P2PHASE(entry_run
, 1ULL << sm
->sm_shift
));
361 VERIFY3U(entry_offset
, >=, sm
->sm_start
);
362 VERIFY3U(entry_offset
, <, sm
->sm_start
+ sm
->sm_size
);
363 VERIFY3U(entry_run
, <=, sm
->sm_size
);
364 VERIFY3U(entry_offset
+ entry_run
, <=,
365 sm
->sm_start
+ sm
->sm_size
);
367 space_map_entry_t sme
= {
370 .sme_offset
= entry_offset
,
373 error
= callback(&sme
, arg
);
377 if (type
== SM_ALLOC
)
378 sm
->sm_phys
->smp_alloc
-= entry_run
;
380 sm
->sm_phys
->smp_alloc
+= entry_run
;
381 sm
->sm_phys
->smp_length
-= words
* sizeof (uint64_t);
385 if (space_map_length(sm
) == 0) {
387 ASSERT0(space_map_allocated(sm
));
390 zio_buf_free(buf
, bufsz
);
394 typedef struct space_map_load_arg
{
395 space_map_t
*smla_sm
;
396 range_tree_t
*smla_rt
;
398 } space_map_load_arg_t
;
401 space_map_load_callback(space_map_entry_t
*sme
, void *arg
)
403 space_map_load_arg_t
*smla
= arg
;
404 if (sme
->sme_type
== smla
->smla_type
) {
405 VERIFY3U(range_tree_space(smla
->smla_rt
) + sme
->sme_run
, <=,
406 smla
->smla_sm
->sm_size
);
407 range_tree_add(smla
->smla_rt
, sme
->sme_offset
, sme
->sme_run
);
409 range_tree_remove(smla
->smla_rt
, sme
->sme_offset
, sme
->sme_run
);
416 * Load the spacemap into the rangetree, like space_map_load. But only
417 * read the first 'length' bytes of the spacemap.
420 space_map_load_length(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
423 space_map_load_arg_t smla
;
425 VERIFY0(range_tree_space(rt
));
427 if (maptype
== SM_FREE
)
428 range_tree_add(rt
, sm
->sm_start
, sm
->sm_size
);
432 smla
.smla_type
= maptype
;
433 int err
= space_map_iterate(sm
, length
,
434 space_map_load_callback
, &smla
);
437 range_tree_vacate(rt
, NULL
, NULL
);
443 * Load the space map disk into the specified range tree. Segments of maptype
444 * are added to the range tree, other segment types are removed.
447 space_map_load(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
)
449 return (space_map_load_length(sm
, rt
, maptype
, space_map_length(sm
)));
453 space_map_histogram_clear(space_map_t
*sm
)
455 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
458 memset(sm
->sm_phys
->smp_histogram
, 0,
459 sizeof (sm
->sm_phys
->smp_histogram
));
463 space_map_histogram_verify(space_map_t
*sm
, range_tree_t
*rt
)
466 * Verify that the in-core range tree does not have any
467 * ranges smaller than our sm_shift size.
469 for (int i
= 0; i
< sm
->sm_shift
; i
++) {
470 if (rt
->rt_histogram
[i
] != 0)
477 space_map_histogram_add(space_map_t
*sm
, range_tree_t
*rt
, dmu_tx_t
*tx
)
481 ASSERT(dmu_tx_is_syncing(tx
));
482 VERIFY3U(space_map_object(sm
), !=, 0);
484 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
487 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
489 ASSERT(space_map_histogram_verify(sm
, rt
));
491 * Transfer the content of the range tree histogram to the space
492 * map histogram. The space map histogram contains 32 buckets ranging
493 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
494 * however, can represent ranges from 2^0 to 2^63. Since the space
495 * map only cares about allocatable blocks (minimum of sm_shift) we
496 * can safely ignore all ranges in the range tree smaller than sm_shift.
498 for (int i
= sm
->sm_shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
501 * Since the largest histogram bucket in the space map is
502 * 2^(32+sm_shift-1), we need to normalize the values in
503 * the range tree for any bucket larger than that size. For
504 * example given an sm_shift of 9, ranges larger than 2^40
505 * would get normalized as if they were 1TB ranges. Assume
506 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
507 * the calculation below would normalize this to 5 * 2^4 (16).
509 ASSERT3U(i
, >=, idx
+ sm
->sm_shift
);
510 sm
->sm_phys
->smp_histogram
[idx
] +=
511 rt
->rt_histogram
[i
] << (i
- idx
- sm
->sm_shift
);
514 * Increment the space map's index as long as we haven't
515 * reached the maximum bucket size. Accumulate all ranges
516 * larger than the max bucket size into the last bucket.
518 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
519 ASSERT3U(idx
+ sm
->sm_shift
, ==, i
);
521 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
527 space_map_write_intro_debug(space_map_t
*sm
, maptype_t maptype
, dmu_tx_t
*tx
)
529 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
531 uint64_t dentry
= SM_PREFIX_ENCODE(SM_DEBUG_PREFIX
) |
532 SM_DEBUG_ACTION_ENCODE(maptype
) |
533 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx
->tx_pool
->dp_spa
)) |
534 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx
));
536 dmu_write(sm
->sm_os
, space_map_object(sm
), sm
->sm_phys
->smp_length
,
537 sizeof (dentry
), &dentry
, tx
);
539 sm
->sm_phys
->smp_length
+= sizeof (dentry
);
543 * Writes one or more entries given a segment.
545 * Note: The function may release the dbuf from the pointer initially
546 * passed to it, and return a different dbuf. Also, the space map's
547 * dbuf must be dirty for the changes in sm_phys to take effect.
550 space_map_write_seg(space_map_t
*sm
, uint64_t rstart
, uint64_t rend
,
551 maptype_t maptype
, uint64_t vdev_id
, uint8_t words
, dmu_buf_t
**dbp
,
552 const void *tag
, dmu_tx_t
*tx
)
554 ASSERT3U(words
, !=, 0);
555 ASSERT3U(words
, <=, 2);
557 /* ensure the vdev_id can be represented by the space map */
558 ASSERT3U(vdev_id
, <=, SM_NO_VDEVID
);
561 * if this is a single word entry, ensure that no vdev was
564 IMPLY(words
== 1, vdev_id
== SM_NO_VDEVID
);
566 dmu_buf_t
*db
= *dbp
;
567 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
569 uint64_t *block_base
= db
->db_data
;
570 uint64_t *block_end
= block_base
+ (sm
->sm_blksz
/ sizeof (uint64_t));
571 uint64_t *block_cursor
= block_base
+
572 (sm
->sm_phys
->smp_length
- db
->db_offset
) / sizeof (uint64_t);
574 ASSERT3P(block_cursor
, <=, block_end
);
576 uint64_t size
= (rend
- rstart
) >> sm
->sm_shift
;
577 uint64_t start
= (rstart
- sm
->sm_start
) >> sm
->sm_shift
;
578 uint64_t run_max
= (words
== 2) ? SM2_RUN_MAX
: SM_RUN_MAX
;
580 ASSERT3U(rstart
, >=, sm
->sm_start
);
581 ASSERT3U(rstart
, <, sm
->sm_start
+ sm
->sm_size
);
582 ASSERT3U(rend
- rstart
, <=, sm
->sm_size
);
583 ASSERT3U(rend
, <=, sm
->sm_start
+ sm
->sm_size
);
586 ASSERT3P(block_cursor
, <=, block_end
);
589 * If we are at the end of this block, flush it and start
590 * writing again from the beginning.
592 if (block_cursor
== block_end
) {
593 dmu_buf_rele(db
, tag
);
595 uint64_t next_word_offset
= sm
->sm_phys
->smp_length
;
596 VERIFY0(dmu_buf_hold(sm
->sm_os
,
597 space_map_object(sm
), next_word_offset
,
598 tag
, &db
, DMU_READ_PREFETCH
));
599 dmu_buf_will_dirty(db
, tx
);
601 /* update caller's dbuf */
604 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
606 block_base
= db
->db_data
;
607 block_cursor
= block_base
;
608 block_end
= block_base
+
609 (db
->db_size
/ sizeof (uint64_t));
613 * If we are writing a two-word entry and we only have one
614 * word left on this block, just pad it with an empty debug
615 * entry and write the two-word entry in the next block.
617 uint64_t *next_entry
= block_cursor
+ 1;
618 if (next_entry
== block_end
&& words
> 1) {
619 ASSERT3U(words
, ==, 2);
620 *block_cursor
= SM_PREFIX_ENCODE(SM_DEBUG_PREFIX
) |
621 SM_DEBUG_ACTION_ENCODE(0) |
622 SM_DEBUG_SYNCPASS_ENCODE(0) |
623 SM_DEBUG_TXG_ENCODE(0);
625 sm
->sm_phys
->smp_length
+= sizeof (uint64_t);
626 ASSERT3P(block_cursor
, ==, block_end
);
630 uint64_t run_len
= MIN(size
, run_max
);
633 *block_cursor
= SM_OFFSET_ENCODE(start
) |
634 SM_TYPE_ENCODE(maptype
) |
635 SM_RUN_ENCODE(run_len
);
639 /* write the first word of the entry */
640 *block_cursor
= SM_PREFIX_ENCODE(SM2_PREFIX
) |
641 SM2_RUN_ENCODE(run_len
) |
642 SM2_VDEV_ENCODE(vdev_id
);
645 /* move on to the second word of the entry */
646 ASSERT3P(block_cursor
, <, block_end
);
647 *block_cursor
= SM2_TYPE_ENCODE(maptype
) |
648 SM2_OFFSET_ENCODE(start
);
652 panic("%d-word space map entries are not supported",
656 sm
->sm_phys
->smp_length
+= words
* sizeof (uint64_t);
666 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
670 space_map_write_impl(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
671 uint64_t vdev_id
, dmu_tx_t
*tx
)
673 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
676 space_map_write_intro_debug(sm
, maptype
, tx
);
680 * We do this right after we write the intro debug entry
681 * because the estimate does not take it into account.
683 uint64_t initial_objsize
= sm
->sm_phys
->smp_length
;
684 uint64_t estimated_growth
=
685 space_map_estimate_optimal_size(sm
, rt
, SM_NO_VDEVID
);
686 uint64_t estimated_final_objsize
= initial_objsize
+ estimated_growth
;
690 * Find the offset right after the last word in the space map
691 * and use that to get a hold of the last block, so we can
692 * start appending to it.
694 uint64_t next_word_offset
= sm
->sm_phys
->smp_length
;
695 VERIFY0(dmu_buf_hold(sm
->sm_os
, space_map_object(sm
),
696 next_word_offset
, FTAG
, &db
, DMU_READ_PREFETCH
));
697 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
699 dmu_buf_will_dirty(db
, tx
);
701 zfs_btree_t
*t
= &rt
->rt_root
;
702 zfs_btree_index_t where
;
703 for (range_seg_t
*rs
= zfs_btree_first(t
, &where
); rs
!= NULL
;
704 rs
= zfs_btree_next(t
, &where
, &where
)) {
705 uint64_t offset
= (rs_get_start(rs
, rt
) - sm
->sm_start
) >>
707 uint64_t length
= (rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
)) >>
712 * We only write two-word entries when both of the following
715 * [1] The feature is enabled.
716 * [2] The offset or run is too big for a single-word entry,
717 * or the vdev_id is set (meaning not equal to
720 * Note that for purposes of testing we've added the case that
721 * we write two-word entries occasionally when the feature is
722 * enabled and zfs_force_some_double_word_sm_entries has been
725 if (spa_feature_is_active(spa
, SPA_FEATURE_SPACEMAP_V2
) &&
726 (offset
>= (1ULL << SM_OFFSET_BITS
) ||
727 length
> SM_RUN_MAX
||
728 vdev_id
!= SM_NO_VDEVID
||
729 (zfs_force_some_double_word_sm_entries
&&
730 random_in_range(100) == 0)))
733 space_map_write_seg(sm
, rs_get_start(rs
, rt
), rs_get_end(rs
,
734 rt
), maptype
, vdev_id
, words
, &db
, FTAG
, tx
);
737 dmu_buf_rele(db
, FTAG
);
741 * We expect our estimation to be based on the worst case
742 * scenario [see comment in space_map_estimate_optimal_size()].
743 * Therefore we expect the actual objsize to be equal or less
744 * than whatever we estimated it to be.
746 ASSERT3U(estimated_final_objsize
, >=, sm
->sm_phys
->smp_length
);
751 * Note: This function manipulates the state of the given space map but
752 * does not hold any locks implicitly. Thus the caller is responsible
753 * for synchronizing writes to the space map.
756 space_map_write(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
757 uint64_t vdev_id
, dmu_tx_t
*tx
)
759 ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm
->sm_os
)));
760 VERIFY3U(space_map_object(sm
), !=, 0);
762 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
765 * This field is no longer necessary since the in-core space map
766 * now contains the object number but is maintained for backwards
769 sm
->sm_phys
->smp_object
= sm
->sm_object
;
771 if (range_tree_is_empty(rt
)) {
772 VERIFY3U(sm
->sm_object
, ==, sm
->sm_phys
->smp_object
);
776 if (maptype
== SM_ALLOC
)
777 sm
->sm_phys
->smp_alloc
+= range_tree_space(rt
);
779 sm
->sm_phys
->smp_alloc
-= range_tree_space(rt
);
781 uint64_t nodes
= zfs_btree_numnodes(&rt
->rt_root
);
782 uint64_t rt_space
= range_tree_space(rt
);
784 space_map_write_impl(sm
, rt
, maptype
, vdev_id
, tx
);
787 * Ensure that the space_map's accounting wasn't changed
788 * while we were in the middle of writing it out.
790 VERIFY3U(nodes
, ==, zfs_btree_numnodes(&rt
->rt_root
));
791 VERIFY3U(range_tree_space(rt
), ==, rt_space
);
795 space_map_open_impl(space_map_t
*sm
)
800 error
= dmu_bonus_hold(sm
->sm_os
, sm
->sm_object
, sm
, &sm
->sm_dbuf
);
804 dmu_object_size_from_db(sm
->sm_dbuf
, &sm
->sm_blksz
, &blocks
);
805 sm
->sm_phys
= sm
->sm_dbuf
->db_data
;
810 space_map_open(space_map_t
**smp
, objset_t
*os
, uint64_t object
,
811 uint64_t start
, uint64_t size
, uint8_t shift
)
816 ASSERT(*smp
== NULL
);
820 sm
= kmem_alloc(sizeof (space_map_t
), KM_SLEEP
);
822 sm
->sm_start
= start
;
824 sm
->sm_shift
= shift
;
826 sm
->sm_object
= object
;
831 error
= space_map_open_impl(sm
);
842 space_map_close(space_map_t
*sm
)
847 if (sm
->sm_dbuf
!= NULL
)
848 dmu_buf_rele(sm
->sm_dbuf
, sm
);
852 kmem_free(sm
, sizeof (*sm
));
856 space_map_truncate(space_map_t
*sm
, int blocksize
, dmu_tx_t
*tx
)
858 objset_t
*os
= sm
->sm_os
;
859 spa_t
*spa
= dmu_objset_spa(os
);
860 dmu_object_info_t doi
;
862 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
863 ASSERT(dmu_tx_is_syncing(tx
));
864 VERIFY3U(dmu_tx_get_txg(tx
), <=, spa_final_dirty_txg(spa
));
866 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
869 * If the space map has the wrong bonus size (because
870 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
871 * the wrong block size (because space_map_blksz has changed),
872 * free and re-allocate its object with the updated sizes.
874 * Otherwise, just truncate the current object.
876 if ((spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
877 doi
.doi_bonus_size
!= sizeof (space_map_phys_t
)) ||
878 doi
.doi_data_block_size
!= blocksize
||
879 doi
.doi_metadata_block_size
!= 1 << space_map_ibs
) {
880 zfs_dbgmsg("txg %llu, spa %s, sm %px, reallocating "
881 "object[%llu]: old bonus %llu, old blocksz %u",
882 (u_longlong_t
)dmu_tx_get_txg(tx
), spa_name(spa
), sm
,
883 (u_longlong_t
)sm
->sm_object
,
884 (u_longlong_t
)doi
.doi_bonus_size
,
885 doi
.doi_data_block_size
);
887 space_map_free(sm
, tx
);
888 dmu_buf_rele(sm
->sm_dbuf
, sm
);
890 sm
->sm_object
= space_map_alloc(sm
->sm_os
, blocksize
, tx
);
891 VERIFY0(space_map_open_impl(sm
));
893 VERIFY0(dmu_free_range(os
, space_map_object(sm
), 0, -1ULL, tx
));
896 * If the spacemap is reallocated, its histogram
897 * will be reset. Do the same in the common case so that
898 * bugs related to the uncommon case do not go unnoticed.
900 memset(sm
->sm_phys
->smp_histogram
, 0,
901 sizeof (sm
->sm_phys
->smp_histogram
));
904 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
905 sm
->sm_phys
->smp_length
= 0;
906 sm
->sm_phys
->smp_alloc
= 0;
910 space_map_alloc(objset_t
*os
, int blocksize
, dmu_tx_t
*tx
)
912 spa_t
*spa
= dmu_objset_spa(os
);
916 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
917 spa_feature_incr(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
918 bonuslen
= sizeof (space_map_phys_t
);
919 ASSERT3U(bonuslen
, <=, dmu_bonus_max());
921 bonuslen
= SPACE_MAP_SIZE_V0
;
924 object
= dmu_object_alloc_ibs(os
, DMU_OT_SPACE_MAP
, blocksize
,
925 space_map_ibs
, DMU_OT_SPACE_MAP_HEADER
, bonuslen
, tx
);
931 space_map_free_obj(objset_t
*os
, uint64_t smobj
, dmu_tx_t
*tx
)
933 spa_t
*spa
= dmu_objset_spa(os
);
934 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
935 dmu_object_info_t doi
;
937 VERIFY0(dmu_object_info(os
, smobj
, &doi
));
938 if (doi
.doi_bonus_size
!= SPACE_MAP_SIZE_V0
) {
939 spa_feature_decr(spa
,
940 SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
944 VERIFY0(dmu_object_free(os
, smobj
, tx
));
948 space_map_free(space_map_t
*sm
, dmu_tx_t
*tx
)
953 space_map_free_obj(sm
->sm_os
, space_map_object(sm
), tx
);
958 * Given a range tree, it makes a worst-case estimate of how much
959 * space would the tree's segments take if they were written to
960 * the given space map.
963 space_map_estimate_optimal_size(space_map_t
*sm
, range_tree_t
*rt
,
966 spa_t
*spa
= dmu_objset_spa(sm
->sm_os
);
967 uint64_t shift
= sm
->sm_shift
;
968 uint64_t *histogram
= rt
->rt_histogram
;
969 uint64_t entries_for_seg
= 0;
972 * In order to get a quick estimate of the optimal size that this
973 * range tree would have on-disk as a space map, we iterate through
974 * its histogram buckets instead of iterating through its nodes.
976 * Note that this is a highest-bound/worst-case estimate for the
979 * 1] We assume that we always add a debug padding for each block
980 * we write and we also assume that we start at the last word
981 * of a block attempting to write a two-word entry.
982 * 2] Rounding up errors due to the way segments are distributed
983 * in the buckets of the range tree's histogram.
984 * 3] The activation of zfs_force_some_double_word_sm_entries
985 * (tunable) when testing.
987 * = Math and Rounding Errors =
989 * rt_histogram[i] bucket of a range tree represents the number
990 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
991 * that, we want to divide the buckets into groups: Buckets that
992 * can be represented using a single-word entry, ones that can
993 * be represented with a double-word entry, and ones that can
994 * only be represented with multiple two-word entries.
996 * [Note that if the new encoding feature is not enabled there
997 * are only two groups: single-word entry buckets and multiple
998 * single-word entry buckets. The information below assumes
999 * two-word entries enabled, but it can easily applied when
1000 * the feature is not enabled]
1002 * To find the highest bucket that can be represented with a
1003 * single-word entry we look at the maximum run that such entry
1004 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
1005 * the run of a space map entry is shifted by sm_shift, thus we
1006 * add it to the exponent]. This way, excluding the value of the
1007 * maximum run that can be represented by a single-word entry,
1008 * all runs that are smaller exist in buckets 0 to
1009 * SM_RUN_BITS + shift - 1.
1011 * To find the highest bucket that can be represented with a
1012 * double-word entry, we follow the same approach. Finally, any
1013 * bucket higher than that are represented with multiple two-word
1014 * entries. To be more specific, if the highest bucket whose
1015 * segments can be represented with a single two-word entry is X,
1016 * then bucket X+1 will need 2 two-word entries for each of its
1017 * segments, X+2 will need 4, X+3 will need 8, ...etc.
1019 * With all of the above we make our estimation based on bucket
1020 * groups. There is a rounding error though. As we mentioned in
1021 * the example with the one-word entry, the maximum run that can
1022 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
1023 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
1024 * that length fall into the next bucket (and bucket group) where
1025 * we start counting two-word entries and this is one more reason
1026 * why the estimated size may end up being bigger than the actual
1032 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
) ||
1033 (vdev_id
== SM_NO_VDEVID
&& sm
->sm_size
< SM_OFFSET_MAX
)) {
1036 * If we are trying to force some double word entries just
1037 * assume the worst-case of every single word entry being
1038 * written as a double word entry.
1040 uint64_t entry_size
=
1041 (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
) &&
1042 zfs_force_some_double_word_sm_entries
) ?
1043 (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1045 uint64_t single_entry_max_bucket
= SM_RUN_BITS
+ shift
- 1;
1046 for (; idx
<= single_entry_max_bucket
; idx
++)
1047 size
+= histogram
[idx
] * entry_size
;
1049 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
)) {
1050 for (; idx
< RANGE_TREE_HISTOGRAM_SIZE
; idx
++) {
1051 ASSERT3U(idx
, >=, single_entry_max_bucket
);
1053 1ULL << (idx
- single_entry_max_bucket
);
1054 size
+= histogram
[idx
] *
1055 entries_for_seg
* entry_size
;
1061 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
));
1063 uint64_t double_entry_max_bucket
= SM2_RUN_BITS
+ shift
- 1;
1064 for (; idx
<= double_entry_max_bucket
; idx
++)
1065 size
+= histogram
[idx
] * 2 * sizeof (uint64_t);
1067 for (; idx
< RANGE_TREE_HISTOGRAM_SIZE
; idx
++) {
1068 ASSERT3U(idx
, >=, double_entry_max_bucket
);
1069 entries_for_seg
= 1ULL << (idx
- double_entry_max_bucket
);
1070 size
+= histogram
[idx
] *
1071 entries_for_seg
* 2 * sizeof (uint64_t);
1075 * Assume the worst case where we start with the padding at the end
1076 * of the current block and we add an extra padding entry at the end
1077 * of all subsequent blocks.
1079 size
+= ((size
/ sm
->sm_blksz
) + 1) * sizeof (uint64_t);
1085 space_map_object(space_map_t
*sm
)
1087 return (sm
!= NULL
? sm
->sm_object
: 0);
1091 space_map_allocated(space_map_t
*sm
)
1093 return (sm
!= NULL
? sm
->sm_phys
->smp_alloc
: 0);
1097 space_map_length(space_map_t
*sm
)
1099 return (sm
!= NULL
? sm
->sm_phys
->smp_length
: 0);
1103 space_map_nblocks(space_map_t
*sm
)
1107 return (DIV_ROUND_UP(space_map_length(sm
), sm
->sm_blksz
));