Stop ganging due to past vdev write errors
[zfs.git] / module / zfs / metaslab.c
blobcf853a42de07b73e90950b57e5d22d9cab67f721
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
29 #include <sys/zfs_context.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
36 #include <sys/zio.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
40 #include <sys/zap.h>
41 #include <sys/btree.h>
43 #define WITH_DF_BLOCK_ALLOCATOR
45 #define GANG_ALLOCATION(flags) \
46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
49 * Metaslab granularity, in bytes. This is roughly similar to what would be
50 * referred to as the "stripe size" in traditional RAID arrays. In normal
51 * operation, we will try to write this amount of data to each disk before
52 * moving on to the next top-level vdev.
54 static unsigned long metaslab_aliquot = 1024 * 1024;
57 * For testing, make some blocks above a certain size be gang blocks.
59 unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
62 * In pools where the log space map feature is not enabled we touch
63 * multiple metaslabs (and their respective space maps) with each
64 * transaction group. Thus, we benefit from having a small space map
65 * block size since it allows us to issue more I/O operations scattered
66 * around the disk. So a sane default for the space map block size
67 * is 8~16K.
69 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
72 * When the log space map feature is enabled, we accumulate a lot of
73 * changes per metaslab that are flushed once in a while so we benefit
74 * from a bigger block size like 128K for the metaslab space maps.
76 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
79 * The in-core space map representation is more compact than its on-disk form.
80 * The zfs_condense_pct determines how much more compact the in-core
81 * space map representation must be before we compact it on-disk.
82 * Values should be greater than or equal to 100.
84 uint_t zfs_condense_pct = 200;
87 * Condensing a metaslab is not guaranteed to actually reduce the amount of
88 * space used on disk. In particular, a space map uses data in increments of
89 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
90 * same number of blocks after condensing. Since the goal of condensing is to
91 * reduce the number of IOPs required to read the space map, we only want to
92 * condense when we can be sure we will reduce the number of blocks used by the
93 * space map. Unfortunately, we cannot precisely compute whether or not this is
94 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
95 * we apply the following heuristic: do not condense a spacemap unless the
96 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
97 * blocks.
99 static const int zfs_metaslab_condense_block_threshold = 4;
102 * The zfs_mg_noalloc_threshold defines which metaslab groups should
103 * be eligible for allocation. The value is defined as a percentage of
104 * free space. Metaslab groups that have more free space than
105 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
106 * a metaslab group's free space is less than or equal to the
107 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
108 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
109 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
110 * groups are allowed to accept allocations. Gang blocks are always
111 * eligible to allocate on any metaslab group. The default value of 0 means
112 * no metaslab group will be excluded based on this criterion.
114 static uint_t zfs_mg_noalloc_threshold = 0;
117 * Metaslab groups are considered eligible for allocations if their
118 * fragmentation metric (measured as a percentage) is less than or
119 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
120 * exceeds this threshold then it will be skipped unless all metaslab
121 * groups within the metaslab class have also crossed this threshold.
123 * This tunable was introduced to avoid edge cases where we continue
124 * allocating from very fragmented disks in our pool while other, less
125 * fragmented disks, exists. On the other hand, if all disks in the
126 * pool are uniformly approaching the threshold, the threshold can
127 * be a speed bump in performance, where we keep switching the disks
128 * that we allocate from (e.g. we allocate some segments from disk A
129 * making it bypassing the threshold while freeing segments from disk
130 * B getting its fragmentation below the threshold).
132 * Empirically, we've seen that our vdev selection for allocations is
133 * good enough that fragmentation increases uniformly across all vdevs
134 * the majority of the time. Thus we set the threshold percentage high
135 * enough to avoid hitting the speed bump on pools that are being pushed
136 * to the edge.
138 static uint_t zfs_mg_fragmentation_threshold = 95;
141 * Allow metaslabs to keep their active state as long as their fragmentation
142 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
143 * active metaslab that exceeds this threshold will no longer keep its active
144 * status allowing better metaslabs to be selected.
146 static uint_t zfs_metaslab_fragmentation_threshold = 70;
149 * When set will load all metaslabs when pool is first opened.
151 int metaslab_debug_load = B_FALSE;
154 * When set will prevent metaslabs from being unloaded.
156 static int metaslab_debug_unload = B_FALSE;
159 * Minimum size which forces the dynamic allocator to change
160 * it's allocation strategy. Once the space map cannot satisfy
161 * an allocation of this size then it switches to using more
162 * aggressive strategy (i.e search by size rather than offset).
164 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
167 * The minimum free space, in percent, which must be available
168 * in a space map to continue allocations in a first-fit fashion.
169 * Once the space map's free space drops below this level we dynamically
170 * switch to using best-fit allocations.
172 uint_t metaslab_df_free_pct = 4;
175 * Maximum distance to search forward from the last offset. Without this
176 * limit, fragmented pools can see >100,000 iterations and
177 * metaslab_block_picker() becomes the performance limiting factor on
178 * high-performance storage.
180 * With the default setting of 16MB, we typically see less than 500
181 * iterations, even with very fragmented, ashift=9 pools. The maximum number
182 * of iterations possible is:
183 * metaslab_df_max_search / (2 * (1<<ashift))
184 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
185 * 2048 (with ashift=12).
187 static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
190 * Forces the metaslab_block_picker function to search for at least this many
191 * segments forwards until giving up on finding a segment that the allocation
192 * will fit into.
194 static const uint32_t metaslab_min_search_count = 100;
197 * If we are not searching forward (due to metaslab_df_max_search,
198 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
199 * controls what segment is used. If it is set, we will use the largest free
200 * segment. If it is not set, we will use a segment of exactly the requested
201 * size (or larger).
203 static int metaslab_df_use_largest_segment = B_FALSE;
206 * Percentage of all cpus that can be used by the metaslab taskq.
208 int metaslab_load_pct = 50;
211 * These tunables control how long a metaslab will remain loaded after the
212 * last allocation from it. A metaslab can't be unloaded until at least
213 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be
215 * unloaded sooner. These settings are intended to be generous -- to keep
216 * metaslabs loaded for a long time, reducing the rate of metaslab loading.
218 static uint_t metaslab_unload_delay = 32;
219 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
222 * Max number of metaslabs per group to preload.
224 uint_t metaslab_preload_limit = 10;
227 * Enable/disable preloading of metaslab.
229 static int metaslab_preload_enabled = B_TRUE;
232 * Enable/disable fragmentation weighting on metaslabs.
234 static int metaslab_fragmentation_factor_enabled = B_TRUE;
237 * Enable/disable lba weighting (i.e. outer tracks are given preference).
239 static int metaslab_lba_weighting_enabled = B_TRUE;
242 * Enable/disable metaslab group biasing.
244 static int metaslab_bias_enabled = B_TRUE;
247 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
249 static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
252 * Enable/disable segment-based metaslab selection.
254 static int zfs_metaslab_segment_weight_enabled = B_TRUE;
257 * When using segment-based metaslab selection, we will continue
258 * allocating from the active metaslab until we have exhausted
259 * zfs_metaslab_switch_threshold of its buckets.
261 static int zfs_metaslab_switch_threshold = 2;
264 * Internal switch to enable/disable the metaslab allocation tracing
265 * facility.
267 static const boolean_t metaslab_trace_enabled = B_FALSE;
270 * Maximum entries that the metaslab allocation tracing facility will keep
271 * in a given list when running in non-debug mode. We limit the number
272 * of entries in non-debug mode to prevent us from using up too much memory.
273 * The limit should be sufficiently large that we don't expect any allocation
274 * to every exceed this value. In debug mode, the system will panic if this
275 * limit is ever reached allowing for further investigation.
277 static const uint64_t metaslab_trace_max_entries = 5000;
280 * Maximum number of metaslabs per group that can be disabled
281 * simultaneously.
283 static const int max_disabled_ms = 3;
286 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
287 * To avoid 64-bit overflow, don't set above UINT32_MAX.
289 static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
292 * Maximum percentage of memory to use on storing loaded metaslabs. If loading
293 * a metaslab would take it over this percentage, the oldest selected metaslab
294 * is automatically unloaded.
296 static uint_t zfs_metaslab_mem_limit = 25;
299 * Force the per-metaslab range trees to use 64-bit integers to store
300 * segments. Used for debugging purposes.
302 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
305 * By default we only store segments over a certain size in the size-sorted
306 * metaslab trees (ms_allocatable_by_size and
307 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
308 * improves load and unload times at the cost of causing us to use slightly
309 * larger segments than we would otherwise in some cases.
311 static const uint32_t metaslab_by_size_min_shift = 14;
314 * If not set, we will first try normal allocation. If that fails then
315 * we will do a gang allocation. If that fails then we will do a "try hard"
316 * gang allocation. If that fails then we will have a multi-layer gang
317 * block.
319 * If set, we will first try normal allocation. If that fails then
320 * we will do a "try hard" allocation. If that fails we will do a gang
321 * allocation. If that fails we will do a "try hard" gang allocation. If
322 * that fails then we will have a multi-layer gang block.
324 static int zfs_metaslab_try_hard_before_gang = B_FALSE;
327 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
328 * metaslabs. This improves performance, especially when there are many
329 * metaslabs per vdev and the allocation can't actually be satisfied (so we
330 * would otherwise iterate all the metaslabs). If there is a metaslab with a
331 * worse weight but it can actually satisfy the allocation, we won't find it
332 * until trying hard. This may happen if the worse metaslab is not loaded
333 * (and the true weight is better than we have calculated), or due to weight
334 * bucketization. E.g. we are looking for a 60K segment, and the best
335 * metaslabs all have free segments in the 32-63K bucket, but the best
336 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
337 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
338 * bucket, and therefore a lower weight).
340 static uint_t zfs_metaslab_find_max_tries = 100;
342 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
343 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
344 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
345 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
347 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
348 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
349 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
350 static unsigned int metaslab_idx_func(multilist_t *, void *);
351 static void metaslab_evict(metaslab_t *, uint64_t);
352 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
353 kmem_cache_t *metaslab_alloc_trace_cache;
355 typedef struct metaslab_stats {
356 kstat_named_t metaslabstat_trace_over_limit;
357 kstat_named_t metaslabstat_reload_tree;
358 kstat_named_t metaslabstat_too_many_tries;
359 kstat_named_t metaslabstat_try_hard;
360 } metaslab_stats_t;
362 static metaslab_stats_t metaslab_stats = {
363 { "trace_over_limit", KSTAT_DATA_UINT64 },
364 { "reload_tree", KSTAT_DATA_UINT64 },
365 { "too_many_tries", KSTAT_DATA_UINT64 },
366 { "try_hard", KSTAT_DATA_UINT64 },
369 #define METASLABSTAT_BUMP(stat) \
370 atomic_inc_64(&metaslab_stats.stat.value.ui64);
373 static kstat_t *metaslab_ksp;
375 void
376 metaslab_stat_init(void)
378 ASSERT(metaslab_alloc_trace_cache == NULL);
379 metaslab_alloc_trace_cache = kmem_cache_create(
380 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
381 0, NULL, NULL, NULL, NULL, NULL, 0);
382 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
383 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
384 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
385 if (metaslab_ksp != NULL) {
386 metaslab_ksp->ks_data = &metaslab_stats;
387 kstat_install(metaslab_ksp);
391 void
392 metaslab_stat_fini(void)
394 if (metaslab_ksp != NULL) {
395 kstat_delete(metaslab_ksp);
396 metaslab_ksp = NULL;
399 kmem_cache_destroy(metaslab_alloc_trace_cache);
400 metaslab_alloc_trace_cache = NULL;
404 * ==========================================================================
405 * Metaslab classes
406 * ==========================================================================
408 metaslab_class_t *
409 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
411 metaslab_class_t *mc;
413 mc = kmem_zalloc(offsetof(metaslab_class_t,
414 mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
416 mc->mc_spa = spa;
417 mc->mc_ops = ops;
418 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
419 multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
420 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
421 for (int i = 0; i < spa->spa_alloc_count; i++) {
422 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
423 mca->mca_rotor = NULL;
424 zfs_refcount_create_tracked(&mca->mca_alloc_slots);
427 return (mc);
430 void
431 metaslab_class_destroy(metaslab_class_t *mc)
433 spa_t *spa = mc->mc_spa;
435 ASSERT(mc->mc_alloc == 0);
436 ASSERT(mc->mc_deferred == 0);
437 ASSERT(mc->mc_space == 0);
438 ASSERT(mc->mc_dspace == 0);
440 for (int i = 0; i < spa->spa_alloc_count; i++) {
441 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
442 ASSERT(mca->mca_rotor == NULL);
443 zfs_refcount_destroy(&mca->mca_alloc_slots);
445 mutex_destroy(&mc->mc_lock);
446 multilist_destroy(&mc->mc_metaslab_txg_list);
447 kmem_free(mc, offsetof(metaslab_class_t,
448 mc_allocator[spa->spa_alloc_count]));
452 metaslab_class_validate(metaslab_class_t *mc)
454 metaslab_group_t *mg;
455 vdev_t *vd;
458 * Must hold one of the spa_config locks.
460 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
461 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
463 if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
464 return (0);
466 do {
467 vd = mg->mg_vd;
468 ASSERT(vd->vdev_mg != NULL);
469 ASSERT3P(vd->vdev_top, ==, vd);
470 ASSERT3P(mg->mg_class, ==, mc);
471 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
472 } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
474 return (0);
477 static void
478 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
479 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
481 atomic_add_64(&mc->mc_alloc, alloc_delta);
482 atomic_add_64(&mc->mc_deferred, defer_delta);
483 atomic_add_64(&mc->mc_space, space_delta);
484 atomic_add_64(&mc->mc_dspace, dspace_delta);
487 uint64_t
488 metaslab_class_get_alloc(metaslab_class_t *mc)
490 return (mc->mc_alloc);
493 uint64_t
494 metaslab_class_get_deferred(metaslab_class_t *mc)
496 return (mc->mc_deferred);
499 uint64_t
500 metaslab_class_get_space(metaslab_class_t *mc)
502 return (mc->mc_space);
505 uint64_t
506 metaslab_class_get_dspace(metaslab_class_t *mc)
508 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
511 void
512 metaslab_class_histogram_verify(metaslab_class_t *mc)
514 spa_t *spa = mc->mc_spa;
515 vdev_t *rvd = spa->spa_root_vdev;
516 uint64_t *mc_hist;
517 int i;
519 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
520 return;
522 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
523 KM_SLEEP);
525 mutex_enter(&mc->mc_lock);
526 for (int c = 0; c < rvd->vdev_children; c++) {
527 vdev_t *tvd = rvd->vdev_child[c];
528 metaslab_group_t *mg = vdev_get_mg(tvd, mc);
531 * Skip any holes, uninitialized top-levels, or
532 * vdevs that are not in this metalab class.
534 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
535 mg->mg_class != mc) {
536 continue;
539 IMPLY(mg == mg->mg_vd->vdev_log_mg,
540 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
542 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
543 mc_hist[i] += mg->mg_histogram[i];
546 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
547 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
550 mutex_exit(&mc->mc_lock);
551 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
555 * Calculate the metaslab class's fragmentation metric. The metric
556 * is weighted based on the space contribution of each metaslab group.
557 * The return value will be a number between 0 and 100 (inclusive), or
558 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
559 * zfs_frag_table for more information about the metric.
561 uint64_t
562 metaslab_class_fragmentation(metaslab_class_t *mc)
564 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
565 uint64_t fragmentation = 0;
567 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
569 for (int c = 0; c < rvd->vdev_children; c++) {
570 vdev_t *tvd = rvd->vdev_child[c];
571 metaslab_group_t *mg = tvd->vdev_mg;
574 * Skip any holes, uninitialized top-levels,
575 * or vdevs that are not in this metalab class.
577 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
578 mg->mg_class != mc) {
579 continue;
583 * If a metaslab group does not contain a fragmentation
584 * metric then just bail out.
586 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
587 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
588 return (ZFS_FRAG_INVALID);
592 * Determine how much this metaslab_group is contributing
593 * to the overall pool fragmentation metric.
595 fragmentation += mg->mg_fragmentation *
596 metaslab_group_get_space(mg);
598 fragmentation /= metaslab_class_get_space(mc);
600 ASSERT3U(fragmentation, <=, 100);
601 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
602 return (fragmentation);
606 * Calculate the amount of expandable space that is available in
607 * this metaslab class. If a device is expanded then its expandable
608 * space will be the amount of allocatable space that is currently not
609 * part of this metaslab class.
611 uint64_t
612 metaslab_class_expandable_space(metaslab_class_t *mc)
614 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
615 uint64_t space = 0;
617 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
618 for (int c = 0; c < rvd->vdev_children; c++) {
619 vdev_t *tvd = rvd->vdev_child[c];
620 metaslab_group_t *mg = tvd->vdev_mg;
622 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
623 mg->mg_class != mc) {
624 continue;
628 * Calculate if we have enough space to add additional
629 * metaslabs. We report the expandable space in terms
630 * of the metaslab size since that's the unit of expansion.
632 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
633 1ULL << tvd->vdev_ms_shift);
635 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
636 return (space);
639 void
640 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
642 multilist_t *ml = &mc->mc_metaslab_txg_list;
643 for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
644 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
645 metaslab_t *msp = multilist_sublist_head(mls);
646 multilist_sublist_unlock(mls);
647 while (msp != NULL) {
648 mutex_enter(&msp->ms_lock);
651 * If the metaslab has been removed from the list
652 * (which could happen if we were at the memory limit
653 * and it was evicted during this loop), then we can't
654 * proceed and we should restart the sublist.
656 if (!multilist_link_active(&msp->ms_class_txg_node)) {
657 mutex_exit(&msp->ms_lock);
658 i--;
659 break;
661 mls = multilist_sublist_lock(ml, i);
662 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
663 multilist_sublist_unlock(mls);
664 if (txg >
665 msp->ms_selected_txg + metaslab_unload_delay &&
666 gethrtime() > msp->ms_selected_time +
667 (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
668 metaslab_evict(msp, txg);
669 } else {
671 * Once we've hit a metaslab selected too
672 * recently to evict, we're done evicting for
673 * now.
675 mutex_exit(&msp->ms_lock);
676 break;
678 mutex_exit(&msp->ms_lock);
679 msp = next_msp;
684 static int
685 metaslab_compare(const void *x1, const void *x2)
687 const metaslab_t *m1 = (const metaslab_t *)x1;
688 const metaslab_t *m2 = (const metaslab_t *)x2;
690 int sort1 = 0;
691 int sort2 = 0;
692 if (m1->ms_allocator != -1 && m1->ms_primary)
693 sort1 = 1;
694 else if (m1->ms_allocator != -1 && !m1->ms_primary)
695 sort1 = 2;
696 if (m2->ms_allocator != -1 && m2->ms_primary)
697 sort2 = 1;
698 else if (m2->ms_allocator != -1 && !m2->ms_primary)
699 sort2 = 2;
702 * Sort inactive metaslabs first, then primaries, then secondaries. When
703 * selecting a metaslab to allocate from, an allocator first tries its
704 * primary, then secondary active metaslab. If it doesn't have active
705 * metaslabs, or can't allocate from them, it searches for an inactive
706 * metaslab to activate. If it can't find a suitable one, it will steal
707 * a primary or secondary metaslab from another allocator.
709 if (sort1 < sort2)
710 return (-1);
711 if (sort1 > sort2)
712 return (1);
714 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
715 if (likely(cmp))
716 return (cmp);
718 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
720 return (TREE_CMP(m1->ms_start, m2->ms_start));
724 * ==========================================================================
725 * Metaslab groups
726 * ==========================================================================
729 * Update the allocatable flag and the metaslab group's capacity.
730 * The allocatable flag is set to true if the capacity is below
731 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
732 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
733 * transitions from allocatable to non-allocatable or vice versa then the
734 * metaslab group's class is updated to reflect the transition.
736 static void
737 metaslab_group_alloc_update(metaslab_group_t *mg)
739 vdev_t *vd = mg->mg_vd;
740 metaslab_class_t *mc = mg->mg_class;
741 vdev_stat_t *vs = &vd->vdev_stat;
742 boolean_t was_allocatable;
743 boolean_t was_initialized;
745 ASSERT(vd == vd->vdev_top);
746 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
747 SCL_ALLOC);
749 mutex_enter(&mg->mg_lock);
750 was_allocatable = mg->mg_allocatable;
751 was_initialized = mg->mg_initialized;
753 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
754 (vs->vs_space + 1);
756 mutex_enter(&mc->mc_lock);
759 * If the metaslab group was just added then it won't
760 * have any space until we finish syncing out this txg.
761 * At that point we will consider it initialized and available
762 * for allocations. We also don't consider non-activated
763 * metaslab groups (e.g. vdevs that are in the middle of being removed)
764 * to be initialized, because they can't be used for allocation.
766 mg->mg_initialized = metaslab_group_initialized(mg);
767 if (!was_initialized && mg->mg_initialized) {
768 mc->mc_groups++;
769 } else if (was_initialized && !mg->mg_initialized) {
770 ASSERT3U(mc->mc_groups, >, 0);
771 mc->mc_groups--;
773 if (mg->mg_initialized)
774 mg->mg_no_free_space = B_FALSE;
777 * A metaslab group is considered allocatable if it has plenty
778 * of free space or is not heavily fragmented. We only take
779 * fragmentation into account if the metaslab group has a valid
780 * fragmentation metric (i.e. a value between 0 and 100).
782 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
783 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
784 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
785 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
788 * The mc_alloc_groups maintains a count of the number of
789 * groups in this metaslab class that are still above the
790 * zfs_mg_noalloc_threshold. This is used by the allocating
791 * threads to determine if they should avoid allocations to
792 * a given group. The allocator will avoid allocations to a group
793 * if that group has reached or is below the zfs_mg_noalloc_threshold
794 * and there are still other groups that are above the threshold.
795 * When a group transitions from allocatable to non-allocatable or
796 * vice versa we update the metaslab class to reflect that change.
797 * When the mc_alloc_groups value drops to 0 that means that all
798 * groups have reached the zfs_mg_noalloc_threshold making all groups
799 * eligible for allocations. This effectively means that all devices
800 * are balanced again.
802 if (was_allocatable && !mg->mg_allocatable)
803 mc->mc_alloc_groups--;
804 else if (!was_allocatable && mg->mg_allocatable)
805 mc->mc_alloc_groups++;
806 mutex_exit(&mc->mc_lock);
808 mutex_exit(&mg->mg_lock);
812 metaslab_sort_by_flushed(const void *va, const void *vb)
814 const metaslab_t *a = va;
815 const metaslab_t *b = vb;
817 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
818 if (likely(cmp))
819 return (cmp);
821 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
822 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
823 cmp = TREE_CMP(a_vdev_id, b_vdev_id);
824 if (cmp)
825 return (cmp);
827 return (TREE_CMP(a->ms_id, b->ms_id));
830 metaslab_group_t *
831 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
833 metaslab_group_t *mg;
835 mg = kmem_zalloc(offsetof(metaslab_group_t,
836 mg_allocator[allocators]), KM_SLEEP);
837 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
838 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
839 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
840 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
841 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
842 mg->mg_vd = vd;
843 mg->mg_class = mc;
844 mg->mg_activation_count = 0;
845 mg->mg_initialized = B_FALSE;
846 mg->mg_no_free_space = B_TRUE;
847 mg->mg_allocators = allocators;
849 for (int i = 0; i < allocators; i++) {
850 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
851 zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
854 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
855 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
857 return (mg);
860 void
861 metaslab_group_destroy(metaslab_group_t *mg)
863 ASSERT(mg->mg_prev == NULL);
864 ASSERT(mg->mg_next == NULL);
866 * We may have gone below zero with the activation count
867 * either because we never activated in the first place or
868 * because we're done, and possibly removing the vdev.
870 ASSERT(mg->mg_activation_count <= 0);
872 taskq_destroy(mg->mg_taskq);
873 avl_destroy(&mg->mg_metaslab_tree);
874 mutex_destroy(&mg->mg_lock);
875 mutex_destroy(&mg->mg_ms_disabled_lock);
876 cv_destroy(&mg->mg_ms_disabled_cv);
878 for (int i = 0; i < mg->mg_allocators; i++) {
879 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
880 zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
882 kmem_free(mg, offsetof(metaslab_group_t,
883 mg_allocator[mg->mg_allocators]));
886 void
887 metaslab_group_activate(metaslab_group_t *mg)
889 metaslab_class_t *mc = mg->mg_class;
890 spa_t *spa = mc->mc_spa;
891 metaslab_group_t *mgprev, *mgnext;
893 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
895 ASSERT(mg->mg_prev == NULL);
896 ASSERT(mg->mg_next == NULL);
897 ASSERT(mg->mg_activation_count <= 0);
899 if (++mg->mg_activation_count <= 0)
900 return;
902 mg->mg_aliquot = metaslab_aliquot * MAX(1,
903 vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
904 metaslab_group_alloc_update(mg);
906 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
907 mg->mg_prev = mg;
908 mg->mg_next = mg;
909 } else {
910 mgnext = mgprev->mg_next;
911 mg->mg_prev = mgprev;
912 mg->mg_next = mgnext;
913 mgprev->mg_next = mg;
914 mgnext->mg_prev = mg;
916 for (int i = 0; i < spa->spa_alloc_count; i++) {
917 mc->mc_allocator[i].mca_rotor = mg;
918 mg = mg->mg_next;
923 * Passivate a metaslab group and remove it from the allocation rotor.
924 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
925 * a metaslab group. This function will momentarily drop spa_config_locks
926 * that are lower than the SCL_ALLOC lock (see comment below).
928 void
929 metaslab_group_passivate(metaslab_group_t *mg)
931 metaslab_class_t *mc = mg->mg_class;
932 spa_t *spa = mc->mc_spa;
933 metaslab_group_t *mgprev, *mgnext;
934 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
936 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
937 (SCL_ALLOC | SCL_ZIO));
939 if (--mg->mg_activation_count != 0) {
940 for (int i = 0; i < spa->spa_alloc_count; i++)
941 ASSERT(mc->mc_allocator[i].mca_rotor != mg);
942 ASSERT(mg->mg_prev == NULL);
943 ASSERT(mg->mg_next == NULL);
944 ASSERT(mg->mg_activation_count < 0);
945 return;
949 * The spa_config_lock is an array of rwlocks, ordered as
950 * follows (from highest to lowest):
951 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
952 * SCL_ZIO > SCL_FREE > SCL_VDEV
953 * (For more information about the spa_config_lock see spa_misc.c)
954 * The higher the lock, the broader its coverage. When we passivate
955 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
956 * config locks. However, the metaslab group's taskq might be trying
957 * to preload metaslabs so we must drop the SCL_ZIO lock and any
958 * lower locks to allow the I/O to complete. At a minimum,
959 * we continue to hold the SCL_ALLOC lock, which prevents any future
960 * allocations from taking place and any changes to the vdev tree.
962 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
963 taskq_wait_outstanding(mg->mg_taskq, 0);
964 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
965 metaslab_group_alloc_update(mg);
966 for (int i = 0; i < mg->mg_allocators; i++) {
967 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
968 metaslab_t *msp = mga->mga_primary;
969 if (msp != NULL) {
970 mutex_enter(&msp->ms_lock);
971 metaslab_passivate(msp,
972 metaslab_weight_from_range_tree(msp));
973 mutex_exit(&msp->ms_lock);
975 msp = mga->mga_secondary;
976 if (msp != NULL) {
977 mutex_enter(&msp->ms_lock);
978 metaslab_passivate(msp,
979 metaslab_weight_from_range_tree(msp));
980 mutex_exit(&msp->ms_lock);
984 mgprev = mg->mg_prev;
985 mgnext = mg->mg_next;
987 if (mg == mgnext) {
988 mgnext = NULL;
989 } else {
990 mgprev->mg_next = mgnext;
991 mgnext->mg_prev = mgprev;
993 for (int i = 0; i < spa->spa_alloc_count; i++) {
994 if (mc->mc_allocator[i].mca_rotor == mg)
995 mc->mc_allocator[i].mca_rotor = mgnext;
998 mg->mg_prev = NULL;
999 mg->mg_next = NULL;
1002 boolean_t
1003 metaslab_group_initialized(metaslab_group_t *mg)
1005 vdev_t *vd = mg->mg_vd;
1006 vdev_stat_t *vs = &vd->vdev_stat;
1008 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1011 uint64_t
1012 metaslab_group_get_space(metaslab_group_t *mg)
1015 * Note that the number of nodes in mg_metaslab_tree may be one less
1016 * than vdev_ms_count, due to the embedded log metaslab.
1018 mutex_enter(&mg->mg_lock);
1019 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1020 mutex_exit(&mg->mg_lock);
1021 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1024 void
1025 metaslab_group_histogram_verify(metaslab_group_t *mg)
1027 uint64_t *mg_hist;
1028 avl_tree_t *t = &mg->mg_metaslab_tree;
1029 uint64_t ashift = mg->mg_vd->vdev_ashift;
1031 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1032 return;
1034 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1035 KM_SLEEP);
1037 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1038 SPACE_MAP_HISTOGRAM_SIZE + ashift);
1040 mutex_enter(&mg->mg_lock);
1041 for (metaslab_t *msp = avl_first(t);
1042 msp != NULL; msp = AVL_NEXT(t, msp)) {
1043 VERIFY3P(msp->ms_group, ==, mg);
1044 /* skip if not active */
1045 if (msp->ms_sm == NULL)
1046 continue;
1048 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1049 mg_hist[i + ashift] +=
1050 msp->ms_sm->sm_phys->smp_histogram[i];
1054 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1055 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1057 mutex_exit(&mg->mg_lock);
1059 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1062 static void
1063 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1065 metaslab_class_t *mc = mg->mg_class;
1066 uint64_t ashift = mg->mg_vd->vdev_ashift;
1068 ASSERT(MUTEX_HELD(&msp->ms_lock));
1069 if (msp->ms_sm == NULL)
1070 return;
1072 mutex_enter(&mg->mg_lock);
1073 mutex_enter(&mc->mc_lock);
1074 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1075 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1076 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1077 mg->mg_histogram[i + ashift] +=
1078 msp->ms_sm->sm_phys->smp_histogram[i];
1079 mc->mc_histogram[i + ashift] +=
1080 msp->ms_sm->sm_phys->smp_histogram[i];
1082 mutex_exit(&mc->mc_lock);
1083 mutex_exit(&mg->mg_lock);
1086 void
1087 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1089 metaslab_class_t *mc = mg->mg_class;
1090 uint64_t ashift = mg->mg_vd->vdev_ashift;
1092 ASSERT(MUTEX_HELD(&msp->ms_lock));
1093 if (msp->ms_sm == NULL)
1094 return;
1096 mutex_enter(&mg->mg_lock);
1097 mutex_enter(&mc->mc_lock);
1098 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1099 ASSERT3U(mg->mg_histogram[i + ashift], >=,
1100 msp->ms_sm->sm_phys->smp_histogram[i]);
1101 ASSERT3U(mc->mc_histogram[i + ashift], >=,
1102 msp->ms_sm->sm_phys->smp_histogram[i]);
1103 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1104 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1106 mg->mg_histogram[i + ashift] -=
1107 msp->ms_sm->sm_phys->smp_histogram[i];
1108 mc->mc_histogram[i + ashift] -=
1109 msp->ms_sm->sm_phys->smp_histogram[i];
1111 mutex_exit(&mc->mc_lock);
1112 mutex_exit(&mg->mg_lock);
1115 static void
1116 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1118 ASSERT(msp->ms_group == NULL);
1119 mutex_enter(&mg->mg_lock);
1120 msp->ms_group = mg;
1121 msp->ms_weight = 0;
1122 avl_add(&mg->mg_metaslab_tree, msp);
1123 mutex_exit(&mg->mg_lock);
1125 mutex_enter(&msp->ms_lock);
1126 metaslab_group_histogram_add(mg, msp);
1127 mutex_exit(&msp->ms_lock);
1130 static void
1131 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1133 mutex_enter(&msp->ms_lock);
1134 metaslab_group_histogram_remove(mg, msp);
1135 mutex_exit(&msp->ms_lock);
1137 mutex_enter(&mg->mg_lock);
1138 ASSERT(msp->ms_group == mg);
1139 avl_remove(&mg->mg_metaslab_tree, msp);
1141 metaslab_class_t *mc = msp->ms_group->mg_class;
1142 multilist_sublist_t *mls =
1143 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1144 if (multilist_link_active(&msp->ms_class_txg_node))
1145 multilist_sublist_remove(mls, msp);
1146 multilist_sublist_unlock(mls);
1148 msp->ms_group = NULL;
1149 mutex_exit(&mg->mg_lock);
1152 static void
1153 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1155 ASSERT(MUTEX_HELD(&msp->ms_lock));
1156 ASSERT(MUTEX_HELD(&mg->mg_lock));
1157 ASSERT(msp->ms_group == mg);
1159 avl_remove(&mg->mg_metaslab_tree, msp);
1160 msp->ms_weight = weight;
1161 avl_add(&mg->mg_metaslab_tree, msp);
1165 static void
1166 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1169 * Although in principle the weight can be any value, in
1170 * practice we do not use values in the range [1, 511].
1172 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1173 ASSERT(MUTEX_HELD(&msp->ms_lock));
1175 mutex_enter(&mg->mg_lock);
1176 metaslab_group_sort_impl(mg, msp, weight);
1177 mutex_exit(&mg->mg_lock);
1181 * Calculate the fragmentation for a given metaslab group. We can use
1182 * a simple average here since all metaslabs within the group must have
1183 * the same size. The return value will be a value between 0 and 100
1184 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1185 * group have a fragmentation metric.
1187 uint64_t
1188 metaslab_group_fragmentation(metaslab_group_t *mg)
1190 vdev_t *vd = mg->mg_vd;
1191 uint64_t fragmentation = 0;
1192 uint64_t valid_ms = 0;
1194 for (int m = 0; m < vd->vdev_ms_count; m++) {
1195 metaslab_t *msp = vd->vdev_ms[m];
1197 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1198 continue;
1199 if (msp->ms_group != mg)
1200 continue;
1202 valid_ms++;
1203 fragmentation += msp->ms_fragmentation;
1206 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1207 return (ZFS_FRAG_INVALID);
1209 fragmentation /= valid_ms;
1210 ASSERT3U(fragmentation, <=, 100);
1211 return (fragmentation);
1215 * Determine if a given metaslab group should skip allocations. A metaslab
1216 * group should avoid allocations if its free capacity is less than the
1217 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1218 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1219 * that can still handle allocations. If the allocation throttle is enabled
1220 * then we skip allocations to devices that have reached their maximum
1221 * allocation queue depth unless the selected metaslab group is the only
1222 * eligible group remaining.
1224 static boolean_t
1225 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1226 uint64_t psize, int allocator, int d)
1228 spa_t *spa = mg->mg_vd->vdev_spa;
1229 metaslab_class_t *mc = mg->mg_class;
1232 * We can only consider skipping this metaslab group if it's
1233 * in the normal metaslab class and there are other metaslab
1234 * groups to select from. Otherwise, we always consider it eligible
1235 * for allocations.
1237 if ((mc != spa_normal_class(spa) &&
1238 mc != spa_special_class(spa) &&
1239 mc != spa_dedup_class(spa)) ||
1240 mc->mc_groups <= 1)
1241 return (B_TRUE);
1244 * If the metaslab group's mg_allocatable flag is set (see comments
1245 * in metaslab_group_alloc_update() for more information) and
1246 * the allocation throttle is disabled then allow allocations to this
1247 * device. However, if the allocation throttle is enabled then
1248 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1249 * to determine if we should allow allocations to this metaslab group.
1250 * If all metaslab groups are no longer considered allocatable
1251 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1252 * gang block size then we allow allocations on this metaslab group
1253 * regardless of the mg_allocatable or throttle settings.
1255 if (mg->mg_allocatable) {
1256 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1257 int64_t qdepth;
1258 uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1260 if (!mc->mc_alloc_throttle_enabled)
1261 return (B_TRUE);
1264 * If this metaslab group does not have any free space, then
1265 * there is no point in looking further.
1267 if (mg->mg_no_free_space)
1268 return (B_FALSE);
1271 * Relax allocation throttling for ditto blocks. Due to
1272 * random imbalances in allocation it tends to push copies
1273 * to one vdev, that looks a bit better at the moment.
1275 qmax = qmax * (4 + d) / 4;
1277 qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1280 * If this metaslab group is below its qmax or it's
1281 * the only allocatable metasable group, then attempt
1282 * to allocate from it.
1284 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1285 return (B_TRUE);
1286 ASSERT3U(mc->mc_alloc_groups, >, 1);
1289 * Since this metaslab group is at or over its qmax, we
1290 * need to determine if there are metaslab groups after this
1291 * one that might be able to handle this allocation. This is
1292 * racy since we can't hold the locks for all metaslab
1293 * groups at the same time when we make this check.
1295 for (metaslab_group_t *mgp = mg->mg_next;
1296 mgp != rotor; mgp = mgp->mg_next) {
1297 metaslab_group_allocator_t *mgap =
1298 &mgp->mg_allocator[allocator];
1299 qmax = mgap->mga_cur_max_alloc_queue_depth;
1300 qmax = qmax * (4 + d) / 4;
1301 qdepth =
1302 zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1305 * If there is another metaslab group that
1306 * might be able to handle the allocation, then
1307 * we return false so that we skip this group.
1309 if (qdepth < qmax && !mgp->mg_no_free_space)
1310 return (B_FALSE);
1314 * We didn't find another group to handle the allocation
1315 * so we can't skip this metaslab group even though
1316 * we are at or over our qmax.
1318 return (B_TRUE);
1320 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1321 return (B_TRUE);
1323 return (B_FALSE);
1327 * ==========================================================================
1328 * Range tree callbacks
1329 * ==========================================================================
1333 * Comparison function for the private size-ordered tree using 32-bit
1334 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1336 static int
1337 metaslab_rangesize32_compare(const void *x1, const void *x2)
1339 const range_seg32_t *r1 = x1;
1340 const range_seg32_t *r2 = x2;
1342 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1343 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1345 int cmp = TREE_CMP(rs_size1, rs_size2);
1346 if (likely(cmp))
1347 return (cmp);
1349 return (TREE_CMP(r1->rs_start, r2->rs_start));
1353 * Comparison function for the private size-ordered tree using 64-bit
1354 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1356 static int
1357 metaslab_rangesize64_compare(const void *x1, const void *x2)
1359 const range_seg64_t *r1 = x1;
1360 const range_seg64_t *r2 = x2;
1362 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1363 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1365 int cmp = TREE_CMP(rs_size1, rs_size2);
1366 if (likely(cmp))
1367 return (cmp);
1369 return (TREE_CMP(r1->rs_start, r2->rs_start));
1371 typedef struct metaslab_rt_arg {
1372 zfs_btree_t *mra_bt;
1373 uint32_t mra_floor_shift;
1374 } metaslab_rt_arg_t;
1376 struct mssa_arg {
1377 range_tree_t *rt;
1378 metaslab_rt_arg_t *mra;
1381 static void
1382 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1384 struct mssa_arg *mssap = arg;
1385 range_tree_t *rt = mssap->rt;
1386 metaslab_rt_arg_t *mrap = mssap->mra;
1387 range_seg_max_t seg = {0};
1388 rs_set_start(&seg, rt, start);
1389 rs_set_end(&seg, rt, start + size);
1390 metaslab_rt_add(rt, &seg, mrap);
1393 static void
1394 metaslab_size_tree_full_load(range_tree_t *rt)
1396 metaslab_rt_arg_t *mrap = rt->rt_arg;
1397 METASLABSTAT_BUMP(metaslabstat_reload_tree);
1398 ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1399 mrap->mra_floor_shift = 0;
1400 struct mssa_arg arg = {0};
1401 arg.rt = rt;
1402 arg.mra = mrap;
1403 range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1407 * Create any block allocator specific components. The current allocators
1408 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1410 static void
1411 metaslab_rt_create(range_tree_t *rt, void *arg)
1413 metaslab_rt_arg_t *mrap = arg;
1414 zfs_btree_t *size_tree = mrap->mra_bt;
1416 size_t size;
1417 int (*compare) (const void *, const void *);
1418 switch (rt->rt_type) {
1419 case RANGE_SEG32:
1420 size = sizeof (range_seg32_t);
1421 compare = metaslab_rangesize32_compare;
1422 break;
1423 case RANGE_SEG64:
1424 size = sizeof (range_seg64_t);
1425 compare = metaslab_rangesize64_compare;
1426 break;
1427 default:
1428 panic("Invalid range seg type %d", rt->rt_type);
1430 zfs_btree_create(size_tree, compare, size);
1431 mrap->mra_floor_shift = metaslab_by_size_min_shift;
1434 static void
1435 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1437 (void) rt;
1438 metaslab_rt_arg_t *mrap = arg;
1439 zfs_btree_t *size_tree = mrap->mra_bt;
1441 zfs_btree_destroy(size_tree);
1442 kmem_free(mrap, sizeof (*mrap));
1445 static void
1446 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1448 metaslab_rt_arg_t *mrap = arg;
1449 zfs_btree_t *size_tree = mrap->mra_bt;
1451 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1452 (1ULL << mrap->mra_floor_shift))
1453 return;
1455 zfs_btree_add(size_tree, rs);
1458 static void
1459 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1461 metaslab_rt_arg_t *mrap = arg;
1462 zfs_btree_t *size_tree = mrap->mra_bt;
1464 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
1465 mrap->mra_floor_shift))
1466 return;
1468 zfs_btree_remove(size_tree, rs);
1471 static void
1472 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1474 metaslab_rt_arg_t *mrap = arg;
1475 zfs_btree_t *size_tree = mrap->mra_bt;
1476 zfs_btree_clear(size_tree);
1477 zfs_btree_destroy(size_tree);
1479 metaslab_rt_create(rt, arg);
1482 static const range_tree_ops_t metaslab_rt_ops = {
1483 .rtop_create = metaslab_rt_create,
1484 .rtop_destroy = metaslab_rt_destroy,
1485 .rtop_add = metaslab_rt_add,
1486 .rtop_remove = metaslab_rt_remove,
1487 .rtop_vacate = metaslab_rt_vacate
1491 * ==========================================================================
1492 * Common allocator routines
1493 * ==========================================================================
1497 * Return the maximum contiguous segment within the metaslab.
1499 uint64_t
1500 metaslab_largest_allocatable(metaslab_t *msp)
1502 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1503 range_seg_t *rs;
1505 if (t == NULL)
1506 return (0);
1507 if (zfs_btree_numnodes(t) == 0)
1508 metaslab_size_tree_full_load(msp->ms_allocatable);
1510 rs = zfs_btree_last(t, NULL);
1511 if (rs == NULL)
1512 return (0);
1514 return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1515 msp->ms_allocatable));
1519 * Return the maximum contiguous segment within the unflushed frees of this
1520 * metaslab.
1522 static uint64_t
1523 metaslab_largest_unflushed_free(metaslab_t *msp)
1525 ASSERT(MUTEX_HELD(&msp->ms_lock));
1527 if (msp->ms_unflushed_frees == NULL)
1528 return (0);
1530 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1531 metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1532 range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1533 NULL);
1534 if (rs == NULL)
1535 return (0);
1538 * When a range is freed from the metaslab, that range is added to
1539 * both the unflushed frees and the deferred frees. While the block
1540 * will eventually be usable, if the metaslab were loaded the range
1541 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1542 * txgs had passed. As a result, when attempting to estimate an upper
1543 * bound for the largest currently-usable free segment in the
1544 * metaslab, we need to not consider any ranges currently in the defer
1545 * trees. This algorithm approximates the largest available chunk in
1546 * the largest range in the unflushed_frees tree by taking the first
1547 * chunk. While this may be a poor estimate, it should only remain so
1548 * briefly and should eventually self-correct as frees are no longer
1549 * deferred. Similar logic applies to the ms_freed tree. See
1550 * metaslab_load() for more details.
1552 * There are two primary sources of inaccuracy in this estimate. Both
1553 * are tolerated for performance reasons. The first source is that we
1554 * only check the largest segment for overlaps. Smaller segments may
1555 * have more favorable overlaps with the other trees, resulting in
1556 * larger usable chunks. Second, we only look at the first chunk in
1557 * the largest segment; there may be other usable chunks in the
1558 * largest segment, but we ignore them.
1560 uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1561 uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1562 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1563 uint64_t start = 0;
1564 uint64_t size = 0;
1565 boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1566 rsize, &start, &size);
1567 if (found) {
1568 if (rstart == start)
1569 return (0);
1570 rsize = start - rstart;
1574 uint64_t start = 0;
1575 uint64_t size = 0;
1576 boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1577 rsize, &start, &size);
1578 if (found)
1579 rsize = start - rstart;
1581 return (rsize);
1584 static range_seg_t *
1585 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1586 uint64_t size, zfs_btree_index_t *where)
1588 range_seg_t *rs;
1589 range_seg_max_t rsearch;
1591 rs_set_start(&rsearch, rt, start);
1592 rs_set_end(&rsearch, rt, start + size);
1594 rs = zfs_btree_find(t, &rsearch, where);
1595 if (rs == NULL) {
1596 rs = zfs_btree_next(t, where, where);
1599 return (rs);
1602 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1603 defined(WITH_CF_BLOCK_ALLOCATOR)
1606 * This is a helper function that can be used by the allocator to find a
1607 * suitable block to allocate. This will search the specified B-tree looking
1608 * for a block that matches the specified criteria.
1610 static uint64_t
1611 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1612 uint64_t max_search)
1614 if (*cursor == 0)
1615 *cursor = rt->rt_start;
1616 zfs_btree_t *bt = &rt->rt_root;
1617 zfs_btree_index_t where;
1618 range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1619 uint64_t first_found;
1620 int count_searched = 0;
1622 if (rs != NULL)
1623 first_found = rs_get_start(rs, rt);
1625 while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1626 max_search || count_searched < metaslab_min_search_count)) {
1627 uint64_t offset = rs_get_start(rs, rt);
1628 if (offset + size <= rs_get_end(rs, rt)) {
1629 *cursor = offset + size;
1630 return (offset);
1632 rs = zfs_btree_next(bt, &where, &where);
1633 count_searched++;
1636 *cursor = 0;
1637 return (-1ULL);
1639 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1641 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1643 * ==========================================================================
1644 * Dynamic Fit (df) block allocator
1646 * Search for a free chunk of at least this size, starting from the last
1647 * offset (for this alignment of block) looking for up to
1648 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1649 * found within 16MB, then return a free chunk of exactly the requested size (or
1650 * larger).
1652 * If it seems like searching from the last offset will be unproductive, skip
1653 * that and just return a free chunk of exactly the requested size (or larger).
1654 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1655 * mechanism is probably not very useful and may be removed in the future.
1657 * The behavior when not searching can be changed to return the largest free
1658 * chunk, instead of a free chunk of exactly the requested size, by setting
1659 * metaslab_df_use_largest_segment.
1660 * ==========================================================================
1662 static uint64_t
1663 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1666 * Find the largest power of 2 block size that evenly divides the
1667 * requested size. This is used to try to allocate blocks with similar
1668 * alignment from the same area of the metaslab (i.e. same cursor
1669 * bucket) but it does not guarantee that other allocations sizes
1670 * may exist in the same region.
1672 uint64_t align = size & -size;
1673 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1674 range_tree_t *rt = msp->ms_allocatable;
1675 uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1676 uint64_t offset;
1678 ASSERT(MUTEX_HELD(&msp->ms_lock));
1681 * If we're running low on space, find a segment based on size,
1682 * rather than iterating based on offset.
1684 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1685 free_pct < metaslab_df_free_pct) {
1686 offset = -1;
1687 } else {
1688 offset = metaslab_block_picker(rt,
1689 cursor, size, metaslab_df_max_search);
1692 if (offset == -1) {
1693 range_seg_t *rs;
1694 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1695 metaslab_size_tree_full_load(msp->ms_allocatable);
1697 if (metaslab_df_use_largest_segment) {
1698 /* use largest free segment */
1699 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1700 } else {
1701 zfs_btree_index_t where;
1702 /* use segment of this size, or next largest */
1703 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1704 rt, msp->ms_start, size, &where);
1706 if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1707 rt)) {
1708 offset = rs_get_start(rs, rt);
1709 *cursor = offset + size;
1713 return (offset);
1716 const metaslab_ops_t zfs_metaslab_ops = {
1717 metaslab_df_alloc
1719 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1721 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1723 * ==========================================================================
1724 * Cursor fit block allocator -
1725 * Select the largest region in the metaslab, set the cursor to the beginning
1726 * of the range and the cursor_end to the end of the range. As allocations
1727 * are made advance the cursor. Continue allocating from the cursor until
1728 * the range is exhausted and then find a new range.
1729 * ==========================================================================
1731 static uint64_t
1732 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1734 range_tree_t *rt = msp->ms_allocatable;
1735 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1736 uint64_t *cursor = &msp->ms_lbas[0];
1737 uint64_t *cursor_end = &msp->ms_lbas[1];
1738 uint64_t offset = 0;
1740 ASSERT(MUTEX_HELD(&msp->ms_lock));
1742 ASSERT3U(*cursor_end, >=, *cursor);
1744 if ((*cursor + size) > *cursor_end) {
1745 range_seg_t *rs;
1747 if (zfs_btree_numnodes(t) == 0)
1748 metaslab_size_tree_full_load(msp->ms_allocatable);
1749 rs = zfs_btree_last(t, NULL);
1750 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1751 size)
1752 return (-1ULL);
1754 *cursor = rs_get_start(rs, rt);
1755 *cursor_end = rs_get_end(rs, rt);
1758 offset = *cursor;
1759 *cursor += size;
1761 return (offset);
1764 const metaslab_ops_t zfs_metaslab_ops = {
1765 metaslab_cf_alloc
1767 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1769 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1771 * ==========================================================================
1772 * New dynamic fit allocator -
1773 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1774 * contiguous blocks. If no region is found then just use the largest segment
1775 * that remains.
1776 * ==========================================================================
1780 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1781 * to request from the allocator.
1783 uint64_t metaslab_ndf_clump_shift = 4;
1785 static uint64_t
1786 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1788 zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1789 range_tree_t *rt = msp->ms_allocatable;
1790 zfs_btree_index_t where;
1791 range_seg_t *rs;
1792 range_seg_max_t rsearch;
1793 uint64_t hbit = highbit64(size);
1794 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1795 uint64_t max_size = metaslab_largest_allocatable(msp);
1797 ASSERT(MUTEX_HELD(&msp->ms_lock));
1799 if (max_size < size)
1800 return (-1ULL);
1802 rs_set_start(&rsearch, rt, *cursor);
1803 rs_set_end(&rsearch, rt, *cursor + size);
1805 rs = zfs_btree_find(t, &rsearch, &where);
1806 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1807 t = &msp->ms_allocatable_by_size;
1809 rs_set_start(&rsearch, rt, 0);
1810 rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1811 metaslab_ndf_clump_shift)));
1813 rs = zfs_btree_find(t, &rsearch, &where);
1814 if (rs == NULL)
1815 rs = zfs_btree_next(t, &where, &where);
1816 ASSERT(rs != NULL);
1819 if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1820 *cursor = rs_get_start(rs, rt) + size;
1821 return (rs_get_start(rs, rt));
1823 return (-1ULL);
1826 const metaslab_ops_t zfs_metaslab_ops = {
1827 metaslab_ndf_alloc
1829 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1833 * ==========================================================================
1834 * Metaslabs
1835 * ==========================================================================
1839 * Wait for any in-progress metaslab loads to complete.
1841 static void
1842 metaslab_load_wait(metaslab_t *msp)
1844 ASSERT(MUTEX_HELD(&msp->ms_lock));
1846 while (msp->ms_loading) {
1847 ASSERT(!msp->ms_loaded);
1848 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1853 * Wait for any in-progress flushing to complete.
1855 static void
1856 metaslab_flush_wait(metaslab_t *msp)
1858 ASSERT(MUTEX_HELD(&msp->ms_lock));
1860 while (msp->ms_flushing)
1861 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1864 static unsigned int
1865 metaslab_idx_func(multilist_t *ml, void *arg)
1867 metaslab_t *msp = arg;
1870 * ms_id values are allocated sequentially, so full 64bit
1871 * division would be a waste of time, so limit it to 32 bits.
1873 return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
1876 uint64_t
1877 metaslab_allocated_space(metaslab_t *msp)
1879 return (msp->ms_allocated_space);
1883 * Verify that the space accounting on disk matches the in-core range_trees.
1885 static void
1886 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1888 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1889 uint64_t allocating = 0;
1890 uint64_t sm_free_space, msp_free_space;
1892 ASSERT(MUTEX_HELD(&msp->ms_lock));
1893 ASSERT(!msp->ms_condensing);
1895 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1896 return;
1899 * We can only verify the metaslab space when we're called
1900 * from syncing context with a loaded metaslab that has an
1901 * allocated space map. Calling this in non-syncing context
1902 * does not provide a consistent view of the metaslab since
1903 * we're performing allocations in the future.
1905 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1906 !msp->ms_loaded)
1907 return;
1910 * Even though the smp_alloc field can get negative,
1911 * when it comes to a metaslab's space map, that should
1912 * never be the case.
1914 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1916 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1917 range_tree_space(msp->ms_unflushed_frees));
1919 ASSERT3U(metaslab_allocated_space(msp), ==,
1920 space_map_allocated(msp->ms_sm) +
1921 range_tree_space(msp->ms_unflushed_allocs) -
1922 range_tree_space(msp->ms_unflushed_frees));
1924 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1927 * Account for future allocations since we would have
1928 * already deducted that space from the ms_allocatable.
1930 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1931 allocating +=
1932 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1934 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
1935 msp->ms_allocating_total);
1937 ASSERT3U(msp->ms_deferspace, ==,
1938 range_tree_space(msp->ms_defer[0]) +
1939 range_tree_space(msp->ms_defer[1]));
1941 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1942 msp->ms_deferspace + range_tree_space(msp->ms_freed);
1944 VERIFY3U(sm_free_space, ==, msp_free_space);
1947 static void
1948 metaslab_aux_histograms_clear(metaslab_t *msp)
1951 * Auxiliary histograms are only cleared when resetting them,
1952 * which can only happen while the metaslab is loaded.
1954 ASSERT(msp->ms_loaded);
1956 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
1957 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1958 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
1961 static void
1962 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1963 range_tree_t *rt)
1966 * This is modeled after space_map_histogram_add(), so refer to that
1967 * function for implementation details. We want this to work like
1968 * the space map histogram, and not the range tree histogram, as we
1969 * are essentially constructing a delta that will be later subtracted
1970 * from the space map histogram.
1972 int idx = 0;
1973 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1974 ASSERT3U(i, >=, idx + shift);
1975 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1977 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1978 ASSERT3U(idx + shift, ==, i);
1979 idx++;
1980 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1986 * Called at every sync pass that the metaslab gets synced.
1988 * The reason is that we want our auxiliary histograms to be updated
1989 * wherever the metaslab's space map histogram is updated. This way
1990 * we stay consistent on which parts of the metaslab space map's
1991 * histogram are currently not available for allocations (e.g because
1992 * they are in the defer, freed, and freeing trees).
1994 static void
1995 metaslab_aux_histograms_update(metaslab_t *msp)
1997 space_map_t *sm = msp->ms_sm;
1998 ASSERT(sm != NULL);
2001 * This is similar to the metaslab's space map histogram updates
2002 * that take place in metaslab_sync(). The only difference is that
2003 * we only care about segments that haven't made it into the
2004 * ms_allocatable tree yet.
2006 if (msp->ms_loaded) {
2007 metaslab_aux_histograms_clear(msp);
2009 metaslab_aux_histogram_add(msp->ms_synchist,
2010 sm->sm_shift, msp->ms_freed);
2012 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2013 metaslab_aux_histogram_add(msp->ms_deferhist[t],
2014 sm->sm_shift, msp->ms_defer[t]);
2018 metaslab_aux_histogram_add(msp->ms_synchist,
2019 sm->sm_shift, msp->ms_freeing);
2023 * Called every time we are done syncing (writing to) the metaslab,
2024 * i.e. at the end of each sync pass.
2025 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2027 static void
2028 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2030 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2031 space_map_t *sm = msp->ms_sm;
2033 if (sm == NULL) {
2035 * We came here from metaslab_init() when creating/opening a
2036 * pool, looking at a metaslab that hasn't had any allocations
2037 * yet.
2039 return;
2043 * This is similar to the actions that we take for the ms_freed
2044 * and ms_defer trees in metaslab_sync_done().
2046 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2047 if (defer_allowed) {
2048 memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2049 sizeof (msp->ms_synchist));
2050 } else {
2051 memset(msp->ms_deferhist[hist_index], 0,
2052 sizeof (msp->ms_deferhist[hist_index]));
2054 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2058 * Ensure that the metaslab's weight and fragmentation are consistent
2059 * with the contents of the histogram (either the range tree's histogram
2060 * or the space map's depending whether the metaslab is loaded).
2062 static void
2063 metaslab_verify_weight_and_frag(metaslab_t *msp)
2065 ASSERT(MUTEX_HELD(&msp->ms_lock));
2067 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2068 return;
2071 * We can end up here from vdev_remove_complete(), in which case we
2072 * cannot do these assertions because we hold spa config locks and
2073 * thus we are not allowed to read from the DMU.
2075 * We check if the metaslab group has been removed and if that's
2076 * the case we return immediately as that would mean that we are
2077 * here from the aforementioned code path.
2079 if (msp->ms_group == NULL)
2080 return;
2083 * Devices being removed always return a weight of 0 and leave
2084 * fragmentation and ms_max_size as is - there is nothing for
2085 * us to verify here.
2087 vdev_t *vd = msp->ms_group->mg_vd;
2088 if (vd->vdev_removing)
2089 return;
2092 * If the metaslab is dirty it probably means that we've done
2093 * some allocations or frees that have changed our histograms
2094 * and thus the weight.
2096 for (int t = 0; t < TXG_SIZE; t++) {
2097 if (txg_list_member(&vd->vdev_ms_list, msp, t))
2098 return;
2102 * This verification checks that our in-memory state is consistent
2103 * with what's on disk. If the pool is read-only then there aren't
2104 * any changes and we just have the initially-loaded state.
2106 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2107 return;
2109 /* some extra verification for in-core tree if you can */
2110 if (msp->ms_loaded) {
2111 range_tree_stat_verify(msp->ms_allocatable);
2112 VERIFY(space_map_histogram_verify(msp->ms_sm,
2113 msp->ms_allocatable));
2116 uint64_t weight = msp->ms_weight;
2117 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2118 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2119 uint64_t frag = msp->ms_fragmentation;
2120 uint64_t max_segsize = msp->ms_max_size;
2122 msp->ms_weight = 0;
2123 msp->ms_fragmentation = 0;
2126 * This function is used for verification purposes and thus should
2127 * not introduce any side-effects/mutations on the system's state.
2129 * Regardless of whether metaslab_weight() thinks this metaslab
2130 * should be active or not, we want to ensure that the actual weight
2131 * (and therefore the value of ms_weight) would be the same if it
2132 * was to be recalculated at this point.
2134 * In addition we set the nodirty flag so metaslab_weight() does
2135 * not dirty the metaslab for future TXGs (e.g. when trying to
2136 * force condensing to upgrade the metaslab spacemaps).
2138 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2140 VERIFY3U(max_segsize, ==, msp->ms_max_size);
2143 * If the weight type changed then there is no point in doing
2144 * verification. Revert fields to their original values.
2146 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2147 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2148 msp->ms_fragmentation = frag;
2149 msp->ms_weight = weight;
2150 return;
2153 VERIFY3U(msp->ms_fragmentation, ==, frag);
2154 VERIFY3U(msp->ms_weight, ==, weight);
2158 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2159 * this class that was used longest ago, and attempt to unload it. We don't
2160 * want to spend too much time in this loop to prevent performance
2161 * degradation, and we expect that most of the time this operation will
2162 * succeed. Between that and the normal unloading processing during txg sync,
2163 * we expect this to keep the metaslab memory usage under control.
2165 static void
2166 metaslab_potentially_evict(metaslab_class_t *mc)
2168 #ifdef _KERNEL
2169 uint64_t allmem = arc_all_memory();
2170 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2171 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2172 uint_t tries = 0;
2173 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2174 tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2175 tries++) {
2176 unsigned int idx = multilist_get_random_index(
2177 &mc->mc_metaslab_txg_list);
2178 multilist_sublist_t *mls =
2179 multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
2180 metaslab_t *msp = multilist_sublist_head(mls);
2181 multilist_sublist_unlock(mls);
2182 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2183 inuse * size) {
2184 VERIFY3P(mls, ==, multilist_sublist_lock(
2185 &mc->mc_metaslab_txg_list, idx));
2186 ASSERT3U(idx, ==,
2187 metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2189 if (!multilist_link_active(&msp->ms_class_txg_node)) {
2190 multilist_sublist_unlock(mls);
2191 break;
2193 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2194 multilist_sublist_unlock(mls);
2196 * If the metaslab is currently loading there are two
2197 * cases. If it's the metaslab we're evicting, we
2198 * can't continue on or we'll panic when we attempt to
2199 * recursively lock the mutex. If it's another
2200 * metaslab that's loading, it can be safely skipped,
2201 * since we know it's very new and therefore not a
2202 * good eviction candidate. We check later once the
2203 * lock is held that the metaslab is fully loaded
2204 * before actually unloading it.
2206 if (msp->ms_loading) {
2207 msp = next_msp;
2208 inuse =
2209 spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2210 continue;
2213 * We can't unload metaslabs with no spacemap because
2214 * they're not ready to be unloaded yet. We can't
2215 * unload metaslabs with outstanding allocations
2216 * because doing so could cause the metaslab's weight
2217 * to decrease while it's unloaded, which violates an
2218 * invariant that we use to prevent unnecessary
2219 * loading. We also don't unload metaslabs that are
2220 * currently active because they are high-weight
2221 * metaslabs that are likely to be used in the near
2222 * future.
2224 mutex_enter(&msp->ms_lock);
2225 if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2226 msp->ms_allocating_total == 0) {
2227 metaslab_unload(msp);
2229 mutex_exit(&msp->ms_lock);
2230 msp = next_msp;
2231 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2234 #else
2235 (void) mc, (void) zfs_metaslab_mem_limit;
2236 #endif
2239 static int
2240 metaslab_load_impl(metaslab_t *msp)
2242 int error = 0;
2244 ASSERT(MUTEX_HELD(&msp->ms_lock));
2245 ASSERT(msp->ms_loading);
2246 ASSERT(!msp->ms_condensing);
2249 * We temporarily drop the lock to unblock other operations while we
2250 * are reading the space map. Therefore, metaslab_sync() and
2251 * metaslab_sync_done() can run at the same time as we do.
2253 * If we are using the log space maps, metaslab_sync() can't write to
2254 * the metaslab's space map while we are loading as we only write to
2255 * it when we are flushing the metaslab, and that can't happen while
2256 * we are loading it.
2258 * If we are not using log space maps though, metaslab_sync() can
2259 * append to the space map while we are loading. Therefore we load
2260 * only entries that existed when we started the load. Additionally,
2261 * metaslab_sync_done() has to wait for the load to complete because
2262 * there are potential races like metaslab_load() loading parts of the
2263 * space map that are currently being appended by metaslab_sync(). If
2264 * we didn't, the ms_allocatable would have entries that
2265 * metaslab_sync_done() would try to re-add later.
2267 * That's why before dropping the lock we remember the synced length
2268 * of the metaslab and read up to that point of the space map,
2269 * ignoring entries appended by metaslab_sync() that happen after we
2270 * drop the lock.
2272 uint64_t length = msp->ms_synced_length;
2273 mutex_exit(&msp->ms_lock);
2275 hrtime_t load_start = gethrtime();
2276 metaslab_rt_arg_t *mrap;
2277 if (msp->ms_allocatable->rt_arg == NULL) {
2278 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2279 } else {
2280 mrap = msp->ms_allocatable->rt_arg;
2281 msp->ms_allocatable->rt_ops = NULL;
2282 msp->ms_allocatable->rt_arg = NULL;
2284 mrap->mra_bt = &msp->ms_allocatable_by_size;
2285 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2287 if (msp->ms_sm != NULL) {
2288 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2289 SM_FREE, length);
2291 /* Now, populate the size-sorted tree. */
2292 metaslab_rt_create(msp->ms_allocatable, mrap);
2293 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2294 msp->ms_allocatable->rt_arg = mrap;
2296 struct mssa_arg arg = {0};
2297 arg.rt = msp->ms_allocatable;
2298 arg.mra = mrap;
2299 range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2300 &arg);
2301 } else {
2303 * Add the size-sorted tree first, since we don't need to load
2304 * the metaslab from the spacemap.
2306 metaslab_rt_create(msp->ms_allocatable, mrap);
2307 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2308 msp->ms_allocatable->rt_arg = mrap;
2310 * The space map has not been allocated yet, so treat
2311 * all the space in the metaslab as free and add it to the
2312 * ms_allocatable tree.
2314 range_tree_add(msp->ms_allocatable,
2315 msp->ms_start, msp->ms_size);
2317 if (msp->ms_new) {
2319 * If the ms_sm doesn't exist, this means that this
2320 * metaslab hasn't gone through metaslab_sync() and
2321 * thus has never been dirtied. So we shouldn't
2322 * expect any unflushed allocs or frees from previous
2323 * TXGs.
2325 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2326 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2331 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2332 * changing the ms_sm (or log_sm) and the metaslab's range trees
2333 * while we are about to use them and populate the ms_allocatable.
2334 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2335 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2337 mutex_enter(&msp->ms_sync_lock);
2338 mutex_enter(&msp->ms_lock);
2340 ASSERT(!msp->ms_condensing);
2341 ASSERT(!msp->ms_flushing);
2343 if (error != 0) {
2344 mutex_exit(&msp->ms_sync_lock);
2345 return (error);
2348 ASSERT3P(msp->ms_group, !=, NULL);
2349 msp->ms_loaded = B_TRUE;
2352 * Apply all the unflushed changes to ms_allocatable right
2353 * away so any manipulations we do below have a clear view
2354 * of what is allocated and what is free.
2356 range_tree_walk(msp->ms_unflushed_allocs,
2357 range_tree_remove, msp->ms_allocatable);
2358 range_tree_walk(msp->ms_unflushed_frees,
2359 range_tree_add, msp->ms_allocatable);
2361 ASSERT3P(msp->ms_group, !=, NULL);
2362 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2363 if (spa_syncing_log_sm(spa) != NULL) {
2364 ASSERT(spa_feature_is_enabled(spa,
2365 SPA_FEATURE_LOG_SPACEMAP));
2368 * If we use a log space map we add all the segments
2369 * that are in ms_unflushed_frees so they are available
2370 * for allocation.
2372 * ms_allocatable needs to contain all free segments
2373 * that are ready for allocations (thus not segments
2374 * from ms_freeing, ms_freed, and the ms_defer trees).
2375 * But if we grab the lock in this code path at a sync
2376 * pass later that 1, then it also contains the
2377 * segments of ms_freed (they were added to it earlier
2378 * in this path through ms_unflushed_frees). So we
2379 * need to remove all the segments that exist in
2380 * ms_freed from ms_allocatable as they will be added
2381 * later in metaslab_sync_done().
2383 * When there's no log space map, the ms_allocatable
2384 * correctly doesn't contain any segments that exist
2385 * in ms_freed [see ms_synced_length].
2387 range_tree_walk(msp->ms_freed,
2388 range_tree_remove, msp->ms_allocatable);
2392 * If we are not using the log space map, ms_allocatable
2393 * contains the segments that exist in the ms_defer trees
2394 * [see ms_synced_length]. Thus we need to remove them
2395 * from ms_allocatable as they will be added again in
2396 * metaslab_sync_done().
2398 * If we are using the log space map, ms_allocatable still
2399 * contains the segments that exist in the ms_defer trees.
2400 * Not because it read them through the ms_sm though. But
2401 * because these segments are part of ms_unflushed_frees
2402 * whose segments we add to ms_allocatable earlier in this
2403 * code path.
2405 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2406 range_tree_walk(msp->ms_defer[t],
2407 range_tree_remove, msp->ms_allocatable);
2411 * Call metaslab_recalculate_weight_and_sort() now that the
2412 * metaslab is loaded so we get the metaslab's real weight.
2414 * Unless this metaslab was created with older software and
2415 * has not yet been converted to use segment-based weight, we
2416 * expect the new weight to be better or equal to the weight
2417 * that the metaslab had while it was not loaded. This is
2418 * because the old weight does not take into account the
2419 * consolidation of adjacent segments between TXGs. [see
2420 * comment for ms_synchist and ms_deferhist[] for more info]
2422 uint64_t weight = msp->ms_weight;
2423 uint64_t max_size = msp->ms_max_size;
2424 metaslab_recalculate_weight_and_sort(msp);
2425 if (!WEIGHT_IS_SPACEBASED(weight))
2426 ASSERT3U(weight, <=, msp->ms_weight);
2427 msp->ms_max_size = metaslab_largest_allocatable(msp);
2428 ASSERT3U(max_size, <=, msp->ms_max_size);
2429 hrtime_t load_end = gethrtime();
2430 msp->ms_load_time = load_end;
2431 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2432 "ms_id %llu, smp_length %llu, "
2433 "unflushed_allocs %llu, unflushed_frees %llu, "
2434 "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2435 "loading_time %lld ms, ms_max_size %llu, "
2436 "max size error %lld, "
2437 "old_weight %llx, new_weight %llx",
2438 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2439 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2440 (u_longlong_t)msp->ms_id,
2441 (u_longlong_t)space_map_length(msp->ms_sm),
2442 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
2443 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
2444 (u_longlong_t)range_tree_space(msp->ms_freed),
2445 (u_longlong_t)range_tree_space(msp->ms_defer[0]),
2446 (u_longlong_t)range_tree_space(msp->ms_defer[1]),
2447 (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2448 (longlong_t)((load_end - load_start) / 1000000),
2449 (u_longlong_t)msp->ms_max_size,
2450 (u_longlong_t)msp->ms_max_size - max_size,
2451 (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2453 metaslab_verify_space(msp, spa_syncing_txg(spa));
2454 mutex_exit(&msp->ms_sync_lock);
2455 return (0);
2459 metaslab_load(metaslab_t *msp)
2461 ASSERT(MUTEX_HELD(&msp->ms_lock));
2464 * There may be another thread loading the same metaslab, if that's
2465 * the case just wait until the other thread is done and return.
2467 metaslab_load_wait(msp);
2468 if (msp->ms_loaded)
2469 return (0);
2470 VERIFY(!msp->ms_loading);
2471 ASSERT(!msp->ms_condensing);
2474 * We set the loading flag BEFORE potentially dropping the lock to
2475 * wait for an ongoing flush (see ms_flushing below). This way other
2476 * threads know that there is already a thread that is loading this
2477 * metaslab.
2479 msp->ms_loading = B_TRUE;
2482 * Wait for any in-progress flushing to finish as we drop the ms_lock
2483 * both here (during space_map_load()) and in metaslab_flush() (when
2484 * we flush our changes to the ms_sm).
2486 if (msp->ms_flushing)
2487 metaslab_flush_wait(msp);
2490 * In the possibility that we were waiting for the metaslab to be
2491 * flushed (where we temporarily dropped the ms_lock), ensure that
2492 * no one else loaded the metaslab somehow.
2494 ASSERT(!msp->ms_loaded);
2497 * If we're loading a metaslab in the normal class, consider evicting
2498 * another one to keep our memory usage under the limit defined by the
2499 * zfs_metaslab_mem_limit tunable.
2501 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2502 msp->ms_group->mg_class) {
2503 metaslab_potentially_evict(msp->ms_group->mg_class);
2506 int error = metaslab_load_impl(msp);
2508 ASSERT(MUTEX_HELD(&msp->ms_lock));
2509 msp->ms_loading = B_FALSE;
2510 cv_broadcast(&msp->ms_load_cv);
2512 return (error);
2515 void
2516 metaslab_unload(metaslab_t *msp)
2518 ASSERT(MUTEX_HELD(&msp->ms_lock));
2521 * This can happen if a metaslab is selected for eviction (in
2522 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2523 * metaslab_class_evict_old).
2525 if (!msp->ms_loaded)
2526 return;
2528 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2529 msp->ms_loaded = B_FALSE;
2530 msp->ms_unload_time = gethrtime();
2532 msp->ms_activation_weight = 0;
2533 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2535 if (msp->ms_group != NULL) {
2536 metaslab_class_t *mc = msp->ms_group->mg_class;
2537 multilist_sublist_t *mls =
2538 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2539 if (multilist_link_active(&msp->ms_class_txg_node))
2540 multilist_sublist_remove(mls, msp);
2541 multilist_sublist_unlock(mls);
2543 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2544 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2545 "ms_id %llu, weight %llx, "
2546 "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2547 "loaded %llu ms ago, max_size %llu",
2548 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2549 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2550 (u_longlong_t)msp->ms_id,
2551 (u_longlong_t)msp->ms_weight,
2552 (u_longlong_t)msp->ms_selected_txg,
2553 (u_longlong_t)(msp->ms_unload_time -
2554 msp->ms_selected_time) / 1000 / 1000,
2555 (u_longlong_t)msp->ms_alloc_txg,
2556 (u_longlong_t)(msp->ms_unload_time -
2557 msp->ms_load_time) / 1000 / 1000,
2558 (u_longlong_t)msp->ms_max_size);
2562 * We explicitly recalculate the metaslab's weight based on its space
2563 * map (as it is now not loaded). We want unload metaslabs to always
2564 * have their weights calculated from the space map histograms, while
2565 * loaded ones have it calculated from their in-core range tree
2566 * [see metaslab_load()]. This way, the weight reflects the information
2567 * available in-core, whether it is loaded or not.
2569 * If ms_group == NULL means that we came here from metaslab_fini(),
2570 * at which point it doesn't make sense for us to do the recalculation
2571 * and the sorting.
2573 if (msp->ms_group != NULL)
2574 metaslab_recalculate_weight_and_sort(msp);
2578 * We want to optimize the memory use of the per-metaslab range
2579 * trees. To do this, we store the segments in the range trees in
2580 * units of sectors, zero-indexing from the start of the metaslab. If
2581 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2582 * the ranges using two uint32_ts, rather than two uint64_ts.
2584 range_seg_type_t
2585 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2586 uint64_t *start, uint64_t *shift)
2588 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2589 !zfs_metaslab_force_large_segs) {
2590 *shift = vdev->vdev_ashift;
2591 *start = msp->ms_start;
2592 return (RANGE_SEG32);
2593 } else {
2594 *shift = 0;
2595 *start = 0;
2596 return (RANGE_SEG64);
2600 void
2601 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2603 ASSERT(MUTEX_HELD(&msp->ms_lock));
2604 metaslab_class_t *mc = msp->ms_group->mg_class;
2605 multilist_sublist_t *mls =
2606 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2607 if (multilist_link_active(&msp->ms_class_txg_node))
2608 multilist_sublist_remove(mls, msp);
2609 msp->ms_selected_txg = txg;
2610 msp->ms_selected_time = gethrtime();
2611 multilist_sublist_insert_tail(mls, msp);
2612 multilist_sublist_unlock(mls);
2615 void
2616 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2617 int64_t defer_delta, int64_t space_delta)
2619 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2621 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2622 ASSERT(vd->vdev_ms_count != 0);
2624 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2625 vdev_deflated_space(vd, space_delta));
2629 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2630 uint64_t txg, metaslab_t **msp)
2632 vdev_t *vd = mg->mg_vd;
2633 spa_t *spa = vd->vdev_spa;
2634 objset_t *mos = spa->spa_meta_objset;
2635 metaslab_t *ms;
2636 int error;
2638 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2639 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2640 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2641 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2642 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2643 multilist_link_init(&ms->ms_class_txg_node);
2645 ms->ms_id = id;
2646 ms->ms_start = id << vd->vdev_ms_shift;
2647 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2648 ms->ms_allocator = -1;
2649 ms->ms_new = B_TRUE;
2651 vdev_ops_t *ops = vd->vdev_ops;
2652 if (ops->vdev_op_metaslab_init != NULL)
2653 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2656 * We only open space map objects that already exist. All others
2657 * will be opened when we finally allocate an object for it. For
2658 * readonly pools there is no need to open the space map object.
2660 * Note:
2661 * When called from vdev_expand(), we can't call into the DMU as
2662 * we are holding the spa_config_lock as a writer and we would
2663 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2664 * that case, the object parameter is zero though, so we won't
2665 * call into the DMU.
2667 if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
2668 !spa->spa_read_spacemaps)) {
2669 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2670 ms->ms_size, vd->vdev_ashift);
2672 if (error != 0) {
2673 kmem_free(ms, sizeof (metaslab_t));
2674 return (error);
2677 ASSERT(ms->ms_sm != NULL);
2678 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2681 uint64_t shift, start;
2682 range_seg_type_t type =
2683 metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2685 ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2686 for (int t = 0; t < TXG_SIZE; t++) {
2687 ms->ms_allocating[t] = range_tree_create(NULL, type,
2688 NULL, start, shift);
2690 ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
2691 ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
2692 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2693 ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2694 start, shift);
2696 ms->ms_checkpointing =
2697 range_tree_create(NULL, type, NULL, start, shift);
2698 ms->ms_unflushed_allocs =
2699 range_tree_create(NULL, type, NULL, start, shift);
2701 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2702 mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2703 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2704 ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
2705 type, mrap, start, shift);
2707 ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2709 metaslab_group_add(mg, ms);
2710 metaslab_set_fragmentation(ms, B_FALSE);
2713 * If we're opening an existing pool (txg == 0) or creating
2714 * a new one (txg == TXG_INITIAL), all space is available now.
2715 * If we're adding space to an existing pool, the new space
2716 * does not become available until after this txg has synced.
2717 * The metaslab's weight will also be initialized when we sync
2718 * out this txg. This ensures that we don't attempt to allocate
2719 * from it before we have initialized it completely.
2721 if (txg <= TXG_INITIAL) {
2722 metaslab_sync_done(ms, 0);
2723 metaslab_space_update(vd, mg->mg_class,
2724 metaslab_allocated_space(ms), 0, 0);
2727 if (txg != 0) {
2728 vdev_dirty(vd, 0, NULL, txg);
2729 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2732 *msp = ms;
2734 return (0);
2737 static void
2738 metaslab_fini_flush_data(metaslab_t *msp)
2740 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2742 if (metaslab_unflushed_txg(msp) == 0) {
2743 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2744 ==, NULL);
2745 return;
2747 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2749 mutex_enter(&spa->spa_flushed_ms_lock);
2750 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2751 mutex_exit(&spa->spa_flushed_ms_lock);
2753 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2754 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2755 metaslab_unflushed_dirty(msp));
2758 uint64_t
2759 metaslab_unflushed_changes_memused(metaslab_t *ms)
2761 return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2762 range_tree_numsegs(ms->ms_unflushed_frees)) *
2763 ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2766 void
2767 metaslab_fini(metaslab_t *msp)
2769 metaslab_group_t *mg = msp->ms_group;
2770 vdev_t *vd = mg->mg_vd;
2771 spa_t *spa = vd->vdev_spa;
2773 metaslab_fini_flush_data(msp);
2775 metaslab_group_remove(mg, msp);
2777 mutex_enter(&msp->ms_lock);
2778 VERIFY(msp->ms_group == NULL);
2781 * If this metaslab hasn't been through metaslab_sync_done() yet its
2782 * space hasn't been accounted for in its vdev and doesn't need to be
2783 * subtracted.
2785 if (!msp->ms_new) {
2786 metaslab_space_update(vd, mg->mg_class,
2787 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2790 space_map_close(msp->ms_sm);
2791 msp->ms_sm = NULL;
2793 metaslab_unload(msp);
2795 range_tree_destroy(msp->ms_allocatable);
2796 range_tree_destroy(msp->ms_freeing);
2797 range_tree_destroy(msp->ms_freed);
2799 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2800 metaslab_unflushed_changes_memused(msp));
2801 spa->spa_unflushed_stats.sus_memused -=
2802 metaslab_unflushed_changes_memused(msp);
2803 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2804 range_tree_destroy(msp->ms_unflushed_allocs);
2805 range_tree_destroy(msp->ms_checkpointing);
2806 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2807 range_tree_destroy(msp->ms_unflushed_frees);
2809 for (int t = 0; t < TXG_SIZE; t++) {
2810 range_tree_destroy(msp->ms_allocating[t]);
2812 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2813 range_tree_destroy(msp->ms_defer[t]);
2815 ASSERT0(msp->ms_deferspace);
2817 for (int t = 0; t < TXG_SIZE; t++)
2818 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2820 range_tree_vacate(msp->ms_trim, NULL, NULL);
2821 range_tree_destroy(msp->ms_trim);
2823 mutex_exit(&msp->ms_lock);
2824 cv_destroy(&msp->ms_load_cv);
2825 cv_destroy(&msp->ms_flush_cv);
2826 mutex_destroy(&msp->ms_lock);
2827 mutex_destroy(&msp->ms_sync_lock);
2828 ASSERT3U(msp->ms_allocator, ==, -1);
2830 kmem_free(msp, sizeof (metaslab_t));
2833 #define FRAGMENTATION_TABLE_SIZE 17
2836 * This table defines a segment size based fragmentation metric that will
2837 * allow each metaslab to derive its own fragmentation value. This is done
2838 * by calculating the space in each bucket of the spacemap histogram and
2839 * multiplying that by the fragmentation metric in this table. Doing
2840 * this for all buckets and dividing it by the total amount of free
2841 * space in this metaslab (i.e. the total free space in all buckets) gives
2842 * us the fragmentation metric. This means that a high fragmentation metric
2843 * equates to most of the free space being comprised of small segments.
2844 * Conversely, if the metric is low, then most of the free space is in
2845 * large segments. A 10% change in fragmentation equates to approximately
2846 * double the number of segments.
2848 * This table defines 0% fragmented space using 16MB segments. Testing has
2849 * shown that segments that are greater than or equal to 16MB do not suffer
2850 * from drastic performance problems. Using this value, we derive the rest
2851 * of the table. Since the fragmentation value is never stored on disk, it
2852 * is possible to change these calculations in the future.
2854 static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2855 100, /* 512B */
2856 100, /* 1K */
2857 98, /* 2K */
2858 95, /* 4K */
2859 90, /* 8K */
2860 80, /* 16K */
2861 70, /* 32K */
2862 60, /* 64K */
2863 50, /* 128K */
2864 40, /* 256K */
2865 30, /* 512K */
2866 20, /* 1M */
2867 15, /* 2M */
2868 10, /* 4M */
2869 5, /* 8M */
2870 0 /* 16M */
2874 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2875 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2876 * been upgraded and does not support this metric. Otherwise, the return
2877 * value should be in the range [0, 100].
2879 static void
2880 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2882 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2883 uint64_t fragmentation = 0;
2884 uint64_t total = 0;
2885 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2886 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2888 if (!feature_enabled) {
2889 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2890 return;
2894 * A null space map means that the entire metaslab is free
2895 * and thus is not fragmented.
2897 if (msp->ms_sm == NULL) {
2898 msp->ms_fragmentation = 0;
2899 return;
2903 * If this metaslab's space map has not been upgraded, flag it
2904 * so that we upgrade next time we encounter it.
2906 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2907 uint64_t txg = spa_syncing_txg(spa);
2908 vdev_t *vd = msp->ms_group->mg_vd;
2911 * If we've reached the final dirty txg, then we must
2912 * be shutting down the pool. We don't want to dirty
2913 * any data past this point so skip setting the condense
2914 * flag. We can retry this action the next time the pool
2915 * is imported. We also skip marking this metaslab for
2916 * condensing if the caller has explicitly set nodirty.
2918 if (!nodirty &&
2919 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2920 msp->ms_condense_wanted = B_TRUE;
2921 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2922 zfs_dbgmsg("txg %llu, requesting force condense: "
2923 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
2924 (u_longlong_t)msp->ms_id,
2925 (u_longlong_t)vd->vdev_id);
2927 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2928 return;
2931 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2932 uint64_t space = 0;
2933 uint8_t shift = msp->ms_sm->sm_shift;
2935 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2936 FRAGMENTATION_TABLE_SIZE - 1);
2938 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2939 continue;
2941 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2942 total += space;
2944 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2945 fragmentation += space * zfs_frag_table[idx];
2948 if (total > 0)
2949 fragmentation /= total;
2950 ASSERT3U(fragmentation, <=, 100);
2952 msp->ms_fragmentation = fragmentation;
2956 * Compute a weight -- a selection preference value -- for the given metaslab.
2957 * This is based on the amount of free space, the level of fragmentation,
2958 * the LBA range, and whether the metaslab is loaded.
2960 static uint64_t
2961 metaslab_space_weight(metaslab_t *msp)
2963 metaslab_group_t *mg = msp->ms_group;
2964 vdev_t *vd = mg->mg_vd;
2965 uint64_t weight, space;
2967 ASSERT(MUTEX_HELD(&msp->ms_lock));
2970 * The baseline weight is the metaslab's free space.
2972 space = msp->ms_size - metaslab_allocated_space(msp);
2974 if (metaslab_fragmentation_factor_enabled &&
2975 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2977 * Use the fragmentation information to inversely scale
2978 * down the baseline weight. We need to ensure that we
2979 * don't exclude this metaslab completely when it's 100%
2980 * fragmented. To avoid this we reduce the fragmented value
2981 * by 1.
2983 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2986 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2987 * this metaslab again. The fragmentation metric may have
2988 * decreased the space to something smaller than
2989 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2990 * so that we can consume any remaining space.
2992 if (space > 0 && space < SPA_MINBLOCKSIZE)
2993 space = SPA_MINBLOCKSIZE;
2995 weight = space;
2998 * Modern disks have uniform bit density and constant angular velocity.
2999 * Therefore, the outer recording zones are faster (higher bandwidth)
3000 * than the inner zones by the ratio of outer to inner track diameter,
3001 * which is typically around 2:1. We account for this by assigning
3002 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3003 * In effect, this means that we'll select the metaslab with the most
3004 * free bandwidth rather than simply the one with the most free space.
3006 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3007 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3008 ASSERT(weight >= space && weight <= 2 * space);
3012 * If this metaslab is one we're actively using, adjust its
3013 * weight to make it preferable to any inactive metaslab so
3014 * we'll polish it off. If the fragmentation on this metaslab
3015 * has exceed our threshold, then don't mark it active.
3017 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3018 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3019 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3022 WEIGHT_SET_SPACEBASED(weight);
3023 return (weight);
3027 * Return the weight of the specified metaslab, according to the segment-based
3028 * weighting algorithm. The metaslab must be loaded. This function can
3029 * be called within a sync pass since it relies only on the metaslab's
3030 * range tree which is always accurate when the metaslab is loaded.
3032 static uint64_t
3033 metaslab_weight_from_range_tree(metaslab_t *msp)
3035 uint64_t weight = 0;
3036 uint32_t segments = 0;
3038 ASSERT(msp->ms_loaded);
3040 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3041 i--) {
3042 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3043 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3045 segments <<= 1;
3046 segments += msp->ms_allocatable->rt_histogram[i];
3049 * The range tree provides more precision than the space map
3050 * and must be downgraded so that all values fit within the
3051 * space map's histogram. This allows us to compare loaded
3052 * vs. unloaded metaslabs to determine which metaslab is
3053 * considered "best".
3055 if (i > max_idx)
3056 continue;
3058 if (segments != 0) {
3059 WEIGHT_SET_COUNT(weight, segments);
3060 WEIGHT_SET_INDEX(weight, i);
3061 WEIGHT_SET_ACTIVE(weight, 0);
3062 break;
3065 return (weight);
3069 * Calculate the weight based on the on-disk histogram. Should be applied
3070 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
3071 * give results consistent with the on-disk state
3073 static uint64_t
3074 metaslab_weight_from_spacemap(metaslab_t *msp)
3076 space_map_t *sm = msp->ms_sm;
3077 ASSERT(!msp->ms_loaded);
3078 ASSERT(sm != NULL);
3079 ASSERT3U(space_map_object(sm), !=, 0);
3080 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3083 * Create a joint histogram from all the segments that have made
3084 * it to the metaslab's space map histogram, that are not yet
3085 * available for allocation because they are still in the freeing
3086 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3087 * these segments from the space map's histogram to get a more
3088 * accurate weight.
3090 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3091 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3092 deferspace_histogram[i] += msp->ms_synchist[i];
3093 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3094 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3095 deferspace_histogram[i] += msp->ms_deferhist[t][i];
3099 uint64_t weight = 0;
3100 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3101 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3102 deferspace_histogram[i]);
3103 uint64_t count =
3104 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3105 if (count != 0) {
3106 WEIGHT_SET_COUNT(weight, count);
3107 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3108 WEIGHT_SET_ACTIVE(weight, 0);
3109 break;
3112 return (weight);
3116 * Compute a segment-based weight for the specified metaslab. The weight
3117 * is determined by highest bucket in the histogram. The information
3118 * for the highest bucket is encoded into the weight value.
3120 static uint64_t
3121 metaslab_segment_weight(metaslab_t *msp)
3123 metaslab_group_t *mg = msp->ms_group;
3124 uint64_t weight = 0;
3125 uint8_t shift = mg->mg_vd->vdev_ashift;
3127 ASSERT(MUTEX_HELD(&msp->ms_lock));
3130 * The metaslab is completely free.
3132 if (metaslab_allocated_space(msp) == 0) {
3133 int idx = highbit64(msp->ms_size) - 1;
3134 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3136 if (idx < max_idx) {
3137 WEIGHT_SET_COUNT(weight, 1ULL);
3138 WEIGHT_SET_INDEX(weight, idx);
3139 } else {
3140 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3141 WEIGHT_SET_INDEX(weight, max_idx);
3143 WEIGHT_SET_ACTIVE(weight, 0);
3144 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3145 return (weight);
3148 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3151 * If the metaslab is fully allocated then just make the weight 0.
3153 if (metaslab_allocated_space(msp) == msp->ms_size)
3154 return (0);
3156 * If the metaslab is already loaded, then use the range tree to
3157 * determine the weight. Otherwise, we rely on the space map information
3158 * to generate the weight.
3160 if (msp->ms_loaded) {
3161 weight = metaslab_weight_from_range_tree(msp);
3162 } else {
3163 weight = metaslab_weight_from_spacemap(msp);
3167 * If the metaslab was active the last time we calculated its weight
3168 * then keep it active. We want to consume the entire region that
3169 * is associated with this weight.
3171 if (msp->ms_activation_weight != 0 && weight != 0)
3172 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3173 return (weight);
3177 * Determine if we should attempt to allocate from this metaslab. If the
3178 * metaslab is loaded, then we can determine if the desired allocation
3179 * can be satisfied by looking at the size of the maximum free segment
3180 * on that metaslab. Otherwise, we make our decision based on the metaslab's
3181 * weight. For segment-based weighting we can determine the maximum
3182 * allocation based on the index encoded in its value. For space-based
3183 * weights we rely on the entire weight (excluding the weight-type bit).
3185 static boolean_t
3186 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3189 * If the metaslab is loaded, ms_max_size is definitive and we can use
3190 * the fast check. If it's not, the ms_max_size is a lower bound (once
3191 * set), and we should use the fast check as long as we're not in
3192 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3193 * seconds since the metaslab was unloaded.
3195 if (msp->ms_loaded ||
3196 (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3197 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3198 return (msp->ms_max_size >= asize);
3200 boolean_t should_allocate;
3201 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3203 * The metaslab segment weight indicates segments in the
3204 * range [2^i, 2^(i+1)), where i is the index in the weight.
3205 * Since the asize might be in the middle of the range, we
3206 * should attempt the allocation if asize < 2^(i+1).
3208 should_allocate = (asize <
3209 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3210 } else {
3211 should_allocate = (asize <=
3212 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3215 return (should_allocate);
3218 static uint64_t
3219 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3221 vdev_t *vd = msp->ms_group->mg_vd;
3222 spa_t *spa = vd->vdev_spa;
3223 uint64_t weight;
3225 ASSERT(MUTEX_HELD(&msp->ms_lock));
3227 metaslab_set_fragmentation(msp, nodirty);
3230 * Update the maximum size. If the metaslab is loaded, this will
3231 * ensure that we get an accurate maximum size if newly freed space
3232 * has been added back into the free tree. If the metaslab is
3233 * unloaded, we check if there's a larger free segment in the
3234 * unflushed frees. This is a lower bound on the largest allocatable
3235 * segment size. Coalescing of adjacent entries may reveal larger
3236 * allocatable segments, but we aren't aware of those until loading
3237 * the space map into a range tree.
3239 if (msp->ms_loaded) {
3240 msp->ms_max_size = metaslab_largest_allocatable(msp);
3241 } else {
3242 msp->ms_max_size = MAX(msp->ms_max_size,
3243 metaslab_largest_unflushed_free(msp));
3247 * Segment-based weighting requires space map histogram support.
3249 if (zfs_metaslab_segment_weight_enabled &&
3250 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3251 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3252 sizeof (space_map_phys_t))) {
3253 weight = metaslab_segment_weight(msp);
3254 } else {
3255 weight = metaslab_space_weight(msp);
3257 return (weight);
3260 void
3261 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3263 ASSERT(MUTEX_HELD(&msp->ms_lock));
3265 /* note: we preserve the mask (e.g. indication of primary, etc..) */
3266 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3267 metaslab_group_sort(msp->ms_group, msp,
3268 metaslab_weight(msp, B_FALSE) | was_active);
3271 static int
3272 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3273 int allocator, uint64_t activation_weight)
3275 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3276 ASSERT(MUTEX_HELD(&msp->ms_lock));
3279 * If we're activating for the claim code, we don't want to actually
3280 * set the metaslab up for a specific allocator.
3282 if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3283 ASSERT0(msp->ms_activation_weight);
3284 msp->ms_activation_weight = msp->ms_weight;
3285 metaslab_group_sort(mg, msp, msp->ms_weight |
3286 activation_weight);
3287 return (0);
3290 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3291 &mga->mga_primary : &mga->mga_secondary);
3293 mutex_enter(&mg->mg_lock);
3294 if (*mspp != NULL) {
3295 mutex_exit(&mg->mg_lock);
3296 return (EEXIST);
3299 *mspp = msp;
3300 ASSERT3S(msp->ms_allocator, ==, -1);
3301 msp->ms_allocator = allocator;
3302 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3304 ASSERT0(msp->ms_activation_weight);
3305 msp->ms_activation_weight = msp->ms_weight;
3306 metaslab_group_sort_impl(mg, msp,
3307 msp->ms_weight | activation_weight);
3308 mutex_exit(&mg->mg_lock);
3310 return (0);
3313 static int
3314 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3316 ASSERT(MUTEX_HELD(&msp->ms_lock));
3319 * The current metaslab is already activated for us so there
3320 * is nothing to do. Already activated though, doesn't mean
3321 * that this metaslab is activated for our allocator nor our
3322 * requested activation weight. The metaslab could have started
3323 * as an active one for our allocator but changed allocators
3324 * while we were waiting to grab its ms_lock or we stole it
3325 * [see find_valid_metaslab()]. This means that there is a
3326 * possibility of passivating a metaslab of another allocator
3327 * or from a different activation mask, from this thread.
3329 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3330 ASSERT(msp->ms_loaded);
3331 return (0);
3334 int error = metaslab_load(msp);
3335 if (error != 0) {
3336 metaslab_group_sort(msp->ms_group, msp, 0);
3337 return (error);
3341 * When entering metaslab_load() we may have dropped the
3342 * ms_lock because we were loading this metaslab, or we
3343 * were waiting for another thread to load it for us. In
3344 * that scenario, we recheck the weight of the metaslab
3345 * to see if it was activated by another thread.
3347 * If the metaslab was activated for another allocator or
3348 * it was activated with a different activation weight (e.g.
3349 * we wanted to make it a primary but it was activated as
3350 * secondary) we return error (EBUSY).
3352 * If the metaslab was activated for the same allocator
3353 * and requested activation mask, skip activating it.
3355 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3356 if (msp->ms_allocator != allocator)
3357 return (EBUSY);
3359 if ((msp->ms_weight & activation_weight) == 0)
3360 return (SET_ERROR(EBUSY));
3362 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3363 msp->ms_primary);
3364 return (0);
3368 * If the metaslab has literally 0 space, it will have weight 0. In
3369 * that case, don't bother activating it. This can happen if the
3370 * metaslab had space during find_valid_metaslab, but another thread
3371 * loaded it and used all that space while we were waiting to grab the
3372 * lock.
3374 if (msp->ms_weight == 0) {
3375 ASSERT0(range_tree_space(msp->ms_allocatable));
3376 return (SET_ERROR(ENOSPC));
3379 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3380 allocator, activation_weight)) != 0) {
3381 return (error);
3384 ASSERT(msp->ms_loaded);
3385 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3387 return (0);
3390 static void
3391 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3392 uint64_t weight)
3394 ASSERT(MUTEX_HELD(&msp->ms_lock));
3395 ASSERT(msp->ms_loaded);
3397 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3398 metaslab_group_sort(mg, msp, weight);
3399 return;
3402 mutex_enter(&mg->mg_lock);
3403 ASSERT3P(msp->ms_group, ==, mg);
3404 ASSERT3S(0, <=, msp->ms_allocator);
3405 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3407 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3408 if (msp->ms_primary) {
3409 ASSERT3P(mga->mga_primary, ==, msp);
3410 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3411 mga->mga_primary = NULL;
3412 } else {
3413 ASSERT3P(mga->mga_secondary, ==, msp);
3414 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3415 mga->mga_secondary = NULL;
3417 msp->ms_allocator = -1;
3418 metaslab_group_sort_impl(mg, msp, weight);
3419 mutex_exit(&mg->mg_lock);
3422 static void
3423 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3425 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3428 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3429 * this metaslab again. In that case, it had better be empty,
3430 * or we would be leaving space on the table.
3432 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3433 size >= SPA_MINBLOCKSIZE ||
3434 range_tree_space(msp->ms_allocatable) == 0);
3435 ASSERT0(weight & METASLAB_ACTIVE_MASK);
3437 ASSERT(msp->ms_activation_weight != 0);
3438 msp->ms_activation_weight = 0;
3439 metaslab_passivate_allocator(msp->ms_group, msp, weight);
3440 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3444 * Segment-based metaslabs are activated once and remain active until
3445 * we either fail an allocation attempt (similar to space-based metaslabs)
3446 * or have exhausted the free space in zfs_metaslab_switch_threshold
3447 * buckets since the metaslab was activated. This function checks to see
3448 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3449 * metaslab and passivates it proactively. This will allow us to select a
3450 * metaslab with a larger contiguous region, if any, remaining within this
3451 * metaslab group. If we're in sync pass > 1, then we continue using this
3452 * metaslab so that we don't dirty more block and cause more sync passes.
3454 static void
3455 metaslab_segment_may_passivate(metaslab_t *msp)
3457 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3459 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3460 return;
3463 * Since we are in the middle of a sync pass, the most accurate
3464 * information that is accessible to us is the in-core range tree
3465 * histogram; calculate the new weight based on that information.
3467 uint64_t weight = metaslab_weight_from_range_tree(msp);
3468 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3469 int current_idx = WEIGHT_GET_INDEX(weight);
3471 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3472 metaslab_passivate(msp, weight);
3475 static void
3476 metaslab_preload(void *arg)
3478 metaslab_t *msp = arg;
3479 metaslab_class_t *mc = msp->ms_group->mg_class;
3480 spa_t *spa = mc->mc_spa;
3481 fstrans_cookie_t cookie = spl_fstrans_mark();
3483 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3485 mutex_enter(&msp->ms_lock);
3486 (void) metaslab_load(msp);
3487 metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3488 mutex_exit(&msp->ms_lock);
3489 spl_fstrans_unmark(cookie);
3492 static void
3493 metaslab_group_preload(metaslab_group_t *mg)
3495 spa_t *spa = mg->mg_vd->vdev_spa;
3496 metaslab_t *msp;
3497 avl_tree_t *t = &mg->mg_metaslab_tree;
3498 int m = 0;
3500 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
3501 taskq_wait_outstanding(mg->mg_taskq, 0);
3502 return;
3505 mutex_enter(&mg->mg_lock);
3508 * Load the next potential metaslabs
3510 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3511 ASSERT3P(msp->ms_group, ==, mg);
3514 * We preload only the maximum number of metaslabs specified
3515 * by metaslab_preload_limit. If a metaslab is being forced
3516 * to condense then we preload it too. This will ensure
3517 * that force condensing happens in the next txg.
3519 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3520 continue;
3523 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
3524 msp, TQ_SLEEP) != TASKQID_INVALID);
3526 mutex_exit(&mg->mg_lock);
3530 * Determine if the space map's on-disk footprint is past our tolerance for
3531 * inefficiency. We would like to use the following criteria to make our
3532 * decision:
3534 * 1. Do not condense if the size of the space map object would dramatically
3535 * increase as a result of writing out the free space range tree.
3537 * 2. Condense if the on on-disk space map representation is at least
3538 * zfs_condense_pct/100 times the size of the optimal representation
3539 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3541 * 3. Do not condense if the on-disk size of the space map does not actually
3542 * decrease.
3544 * Unfortunately, we cannot compute the on-disk size of the space map in this
3545 * context because we cannot accurately compute the effects of compression, etc.
3546 * Instead, we apply the heuristic described in the block comment for
3547 * zfs_metaslab_condense_block_threshold - we only condense if the space used
3548 * is greater than a threshold number of blocks.
3550 static boolean_t
3551 metaslab_should_condense(metaslab_t *msp)
3553 space_map_t *sm = msp->ms_sm;
3554 vdev_t *vd = msp->ms_group->mg_vd;
3555 uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3557 ASSERT(MUTEX_HELD(&msp->ms_lock));
3558 ASSERT(msp->ms_loaded);
3559 ASSERT(sm != NULL);
3560 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3563 * We always condense metaslabs that are empty and metaslabs for
3564 * which a condense request has been made.
3566 if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3567 msp->ms_condense_wanted)
3568 return (B_TRUE);
3570 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3571 uint64_t object_size = space_map_length(sm);
3572 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3573 msp->ms_allocatable, SM_NO_VDEVID);
3575 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3576 object_size > zfs_metaslab_condense_block_threshold * record_size);
3580 * Condense the on-disk space map representation to its minimized form.
3581 * The minimized form consists of a small number of allocations followed
3582 * by the entries of the free range tree (ms_allocatable). The condensed
3583 * spacemap contains all the entries of previous TXGs (including those in
3584 * the pool-wide log spacemaps; thus this is effectively a superset of
3585 * metaslab_flush()), but this TXG's entries still need to be written.
3587 static void
3588 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3590 range_tree_t *condense_tree;
3591 space_map_t *sm = msp->ms_sm;
3592 uint64_t txg = dmu_tx_get_txg(tx);
3593 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3595 ASSERT(MUTEX_HELD(&msp->ms_lock));
3596 ASSERT(msp->ms_loaded);
3597 ASSERT(msp->ms_sm != NULL);
3600 * In order to condense the space map, we need to change it so it
3601 * only describes which segments are currently allocated and free.
3603 * All the current free space resides in the ms_allocatable, all
3604 * the ms_defer trees, and all the ms_allocating trees. We ignore
3605 * ms_freed because it is empty because we're in sync pass 1. We
3606 * ignore ms_freeing because these changes are not yet reflected
3607 * in the spacemap (they will be written later this txg).
3609 * So to truncate the space map to represent all the entries of
3610 * previous TXGs we do the following:
3612 * 1] We create a range tree (condense tree) that is 100% empty.
3613 * 2] We add to it all segments found in the ms_defer trees
3614 * as those segments are marked as free in the original space
3615 * map. We do the same with the ms_allocating trees for the same
3616 * reason. Adding these segments should be a relatively
3617 * inexpensive operation since we expect these trees to have a
3618 * small number of nodes.
3619 * 3] We vacate any unflushed allocs, since they are not frees we
3620 * need to add to the condense tree. Then we vacate any
3621 * unflushed frees as they should already be part of ms_allocatable.
3622 * 4] At this point, we would ideally like to add all segments
3623 * in the ms_allocatable tree from the condense tree. This way
3624 * we would write all the entries of the condense tree as the
3625 * condensed space map, which would only contain freed
3626 * segments with everything else assumed to be allocated.
3628 * Doing so can be prohibitively expensive as ms_allocatable can
3629 * be large, and therefore computationally expensive to add to
3630 * the condense_tree. Instead we first sync out an entry marking
3631 * everything as allocated, then the condense_tree and then the
3632 * ms_allocatable, in the condensed space map. While this is not
3633 * optimal, it is typically close to optimal and more importantly
3634 * much cheaper to compute.
3636 * 5] Finally, as both of the unflushed trees were written to our
3637 * new and condensed metaslab space map, we basically flushed
3638 * all the unflushed changes to disk, thus we call
3639 * metaslab_flush_update().
3641 ASSERT3U(spa_sync_pass(spa), ==, 1);
3642 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3644 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3645 "spa %s, smp size %llu, segments %llu, forcing condense=%s",
3646 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3647 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3648 spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
3649 (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
3650 msp->ms_condense_wanted ? "TRUE" : "FALSE");
3652 msp->ms_condense_wanted = B_FALSE;
3654 range_seg_type_t type;
3655 uint64_t shift, start;
3656 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3657 &start, &shift);
3659 condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3661 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3662 range_tree_walk(msp->ms_defer[t],
3663 range_tree_add, condense_tree);
3666 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3667 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3668 range_tree_add, condense_tree);
3671 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3672 metaslab_unflushed_changes_memused(msp));
3673 spa->spa_unflushed_stats.sus_memused -=
3674 metaslab_unflushed_changes_memused(msp);
3675 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3676 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3679 * We're about to drop the metaslab's lock thus allowing other
3680 * consumers to change it's content. Set the metaslab's ms_condensing
3681 * flag to ensure that allocations on this metaslab do not occur
3682 * while we're in the middle of committing it to disk. This is only
3683 * critical for ms_allocatable as all other range trees use per TXG
3684 * views of their content.
3686 msp->ms_condensing = B_TRUE;
3688 mutex_exit(&msp->ms_lock);
3689 uint64_t object = space_map_object(msp->ms_sm);
3690 space_map_truncate(sm,
3691 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3692 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3695 * space_map_truncate() may have reallocated the spacemap object.
3696 * If so, update the vdev_ms_array.
3698 if (space_map_object(msp->ms_sm) != object) {
3699 object = space_map_object(msp->ms_sm);
3700 dmu_write(spa->spa_meta_objset,
3701 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3702 msp->ms_id, sizeof (uint64_t), &object, tx);
3706 * Note:
3707 * When the log space map feature is enabled, each space map will
3708 * always have ALLOCS followed by FREES for each sync pass. This is
3709 * typically true even when the log space map feature is disabled,
3710 * except from the case where a metaslab goes through metaslab_sync()
3711 * and gets condensed. In that case the metaslab's space map will have
3712 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3713 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3714 * sync pass 1.
3716 range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3717 shift);
3718 range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3719 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3720 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3721 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3723 range_tree_vacate(condense_tree, NULL, NULL);
3724 range_tree_destroy(condense_tree);
3725 range_tree_vacate(tmp_tree, NULL, NULL);
3726 range_tree_destroy(tmp_tree);
3727 mutex_enter(&msp->ms_lock);
3729 msp->ms_condensing = B_FALSE;
3730 metaslab_flush_update(msp, tx);
3733 static void
3734 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3736 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3737 ASSERT(spa_syncing_log_sm(spa) != NULL);
3738 ASSERT(msp->ms_sm != NULL);
3739 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3740 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3742 mutex_enter(&spa->spa_flushed_ms_lock);
3743 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3744 metaslab_set_unflushed_dirty(msp, B_TRUE);
3745 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3746 mutex_exit(&spa->spa_flushed_ms_lock);
3748 spa_log_sm_increment_current_mscount(spa);
3749 spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3752 void
3753 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3755 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3756 ASSERT(spa_syncing_log_sm(spa) != NULL);
3757 ASSERT(msp->ms_sm != NULL);
3758 ASSERT(metaslab_unflushed_txg(msp) != 0);
3759 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3760 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3761 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3763 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3765 /* update metaslab's position in our flushing tree */
3766 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3767 boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3768 mutex_enter(&spa->spa_flushed_ms_lock);
3769 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3770 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3771 metaslab_set_unflushed_dirty(msp, dirty);
3772 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3773 mutex_exit(&spa->spa_flushed_ms_lock);
3775 /* update metaslab counts of spa_log_sm_t nodes */
3776 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3777 spa_log_sm_increment_current_mscount(spa);
3779 /* update log space map summary */
3780 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
3781 ms_prev_flushed_dirty);
3782 spa_log_summary_add_flushed_metaslab(spa, dirty);
3784 /* cleanup obsolete logs if any */
3785 spa_cleanup_old_sm_logs(spa, tx);
3789 * Called when the metaslab has been flushed (its own spacemap now reflects
3790 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3791 * metadata and any pool-wide related log space map data (e.g. summary,
3792 * obsolete logs, etc..) to reflect that.
3794 static void
3795 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3797 metaslab_group_t *mg = msp->ms_group;
3798 spa_t *spa = mg->mg_vd->vdev_spa;
3800 ASSERT(MUTEX_HELD(&msp->ms_lock));
3802 ASSERT3U(spa_sync_pass(spa), ==, 1);
3805 * Just because a metaslab got flushed, that doesn't mean that
3806 * it will pass through metaslab_sync_done(). Thus, make sure to
3807 * update ms_synced_length here in case it doesn't.
3809 msp->ms_synced_length = space_map_length(msp->ms_sm);
3812 * We may end up here from metaslab_condense() without the
3813 * feature being active. In that case this is a no-op.
3815 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
3816 metaslab_unflushed_txg(msp) == 0)
3817 return;
3819 metaslab_unflushed_bump(msp, tx, B_FALSE);
3822 boolean_t
3823 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3825 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3827 ASSERT(MUTEX_HELD(&msp->ms_lock));
3828 ASSERT3U(spa_sync_pass(spa), ==, 1);
3829 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3831 ASSERT(msp->ms_sm != NULL);
3832 ASSERT(metaslab_unflushed_txg(msp) != 0);
3833 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3836 * There is nothing wrong with flushing the same metaslab twice, as
3837 * this codepath should work on that case. However, the current
3838 * flushing scheme makes sure to avoid this situation as we would be
3839 * making all these calls without having anything meaningful to write
3840 * to disk. We assert this behavior here.
3842 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3845 * We can not flush while loading, because then we would
3846 * not load the ms_unflushed_{allocs,frees}.
3848 if (msp->ms_loading)
3849 return (B_FALSE);
3851 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3852 metaslab_verify_weight_and_frag(msp);
3855 * Metaslab condensing is effectively flushing. Therefore if the
3856 * metaslab can be condensed we can just condense it instead of
3857 * flushing it.
3859 * Note that metaslab_condense() does call metaslab_flush_update()
3860 * so we can just return immediately after condensing. We also
3861 * don't need to care about setting ms_flushing or broadcasting
3862 * ms_flush_cv, even if we temporarily drop the ms_lock in
3863 * metaslab_condense(), as the metaslab is already loaded.
3865 if (msp->ms_loaded && metaslab_should_condense(msp)) {
3866 metaslab_group_t *mg = msp->ms_group;
3869 * For all histogram operations below refer to the
3870 * comments of metaslab_sync() where we follow a
3871 * similar procedure.
3873 metaslab_group_histogram_verify(mg);
3874 metaslab_class_histogram_verify(mg->mg_class);
3875 metaslab_group_histogram_remove(mg, msp);
3877 metaslab_condense(msp, tx);
3879 space_map_histogram_clear(msp->ms_sm);
3880 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3881 ASSERT(range_tree_is_empty(msp->ms_freed));
3882 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3883 space_map_histogram_add(msp->ms_sm,
3884 msp->ms_defer[t], tx);
3886 metaslab_aux_histograms_update(msp);
3888 metaslab_group_histogram_add(mg, msp);
3889 metaslab_group_histogram_verify(mg);
3890 metaslab_class_histogram_verify(mg->mg_class);
3892 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3895 * Since we recreated the histogram (and potentially
3896 * the ms_sm too while condensing) ensure that the
3897 * weight is updated too because we are not guaranteed
3898 * that this metaslab is dirty and will go through
3899 * metaslab_sync_done().
3901 metaslab_recalculate_weight_and_sort(msp);
3902 return (B_TRUE);
3905 msp->ms_flushing = B_TRUE;
3906 uint64_t sm_len_before = space_map_length(msp->ms_sm);
3908 mutex_exit(&msp->ms_lock);
3909 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3910 SM_NO_VDEVID, tx);
3911 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3912 SM_NO_VDEVID, tx);
3913 mutex_enter(&msp->ms_lock);
3915 uint64_t sm_len_after = space_map_length(msp->ms_sm);
3916 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3917 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3918 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3919 "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
3920 spa_name(spa),
3921 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3922 (u_longlong_t)msp->ms_id,
3923 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
3924 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
3925 (u_longlong_t)(sm_len_after - sm_len_before));
3928 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3929 metaslab_unflushed_changes_memused(msp));
3930 spa->spa_unflushed_stats.sus_memused -=
3931 metaslab_unflushed_changes_memused(msp);
3932 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3933 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3935 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3936 metaslab_verify_weight_and_frag(msp);
3938 metaslab_flush_update(msp, tx);
3940 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3941 metaslab_verify_weight_and_frag(msp);
3943 msp->ms_flushing = B_FALSE;
3944 cv_broadcast(&msp->ms_flush_cv);
3945 return (B_TRUE);
3949 * Write a metaslab to disk in the context of the specified transaction group.
3951 void
3952 metaslab_sync(metaslab_t *msp, uint64_t txg)
3954 metaslab_group_t *mg = msp->ms_group;
3955 vdev_t *vd = mg->mg_vd;
3956 spa_t *spa = vd->vdev_spa;
3957 objset_t *mos = spa_meta_objset(spa);
3958 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3959 dmu_tx_t *tx;
3961 ASSERT(!vd->vdev_ishole);
3964 * This metaslab has just been added so there's no work to do now.
3966 if (msp->ms_new) {
3967 ASSERT0(range_tree_space(alloctree));
3968 ASSERT0(range_tree_space(msp->ms_freeing));
3969 ASSERT0(range_tree_space(msp->ms_freed));
3970 ASSERT0(range_tree_space(msp->ms_checkpointing));
3971 ASSERT0(range_tree_space(msp->ms_trim));
3972 return;
3976 * Normally, we don't want to process a metaslab if there are no
3977 * allocations or frees to perform. However, if the metaslab is being
3978 * forced to condense, it's loaded and we're not beyond the final
3979 * dirty txg, we need to let it through. Not condensing beyond the
3980 * final dirty txg prevents an issue where metaslabs that need to be
3981 * condensed but were loaded for other reasons could cause a panic
3982 * here. By only checking the txg in that branch of the conditional,
3983 * we preserve the utility of the VERIFY statements in all other
3984 * cases.
3986 if (range_tree_is_empty(alloctree) &&
3987 range_tree_is_empty(msp->ms_freeing) &&
3988 range_tree_is_empty(msp->ms_checkpointing) &&
3989 !(msp->ms_loaded && msp->ms_condense_wanted &&
3990 txg <= spa_final_dirty_txg(spa)))
3991 return;
3994 VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
3997 * The only state that can actually be changing concurrently
3998 * with metaslab_sync() is the metaslab's ms_allocatable. No
3999 * other thread can be modifying this txg's alloc, freeing,
4000 * freed, or space_map_phys_t. We drop ms_lock whenever we
4001 * could call into the DMU, because the DMU can call down to
4002 * us (e.g. via zio_free()) at any time.
4004 * The spa_vdev_remove_thread() can be reading metaslab state
4005 * concurrently, and it is locked out by the ms_sync_lock.
4006 * Note that the ms_lock is insufficient for this, because it
4007 * is dropped by space_map_write().
4009 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4012 * Generate a log space map if one doesn't exist already.
4014 spa_generate_syncing_log_sm(spa, tx);
4016 if (msp->ms_sm == NULL) {
4017 uint64_t new_object = space_map_alloc(mos,
4018 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4019 zfs_metaslab_sm_blksz_with_log :
4020 zfs_metaslab_sm_blksz_no_log, tx);
4021 VERIFY3U(new_object, !=, 0);
4023 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4024 msp->ms_id, sizeof (uint64_t), &new_object, tx);
4026 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4027 msp->ms_start, msp->ms_size, vd->vdev_ashift));
4028 ASSERT(msp->ms_sm != NULL);
4030 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
4031 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
4032 ASSERT0(metaslab_allocated_space(msp));
4035 if (!range_tree_is_empty(msp->ms_checkpointing) &&
4036 vd->vdev_checkpoint_sm == NULL) {
4037 ASSERT(spa_has_checkpoint(spa));
4039 uint64_t new_object = space_map_alloc(mos,
4040 zfs_vdev_standard_sm_blksz, tx);
4041 VERIFY3U(new_object, !=, 0);
4043 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4044 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4045 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4048 * We save the space map object as an entry in vdev_top_zap
4049 * so it can be retrieved when the pool is reopened after an
4050 * export or through zdb.
4052 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4053 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4054 sizeof (new_object), 1, &new_object, tx));
4057 mutex_enter(&msp->ms_sync_lock);
4058 mutex_enter(&msp->ms_lock);
4061 * Note: metaslab_condense() clears the space map's histogram.
4062 * Therefore we must verify and remove this histogram before
4063 * condensing.
4065 metaslab_group_histogram_verify(mg);
4066 metaslab_class_histogram_verify(mg->mg_class);
4067 metaslab_group_histogram_remove(mg, msp);
4069 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4070 metaslab_should_condense(msp))
4071 metaslab_condense(msp, tx);
4074 * We'll be going to disk to sync our space accounting, thus we
4075 * drop the ms_lock during that time so allocations coming from
4076 * open-context (ZIL) for future TXGs do not block.
4078 mutex_exit(&msp->ms_lock);
4079 space_map_t *log_sm = spa_syncing_log_sm(spa);
4080 if (log_sm != NULL) {
4081 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4082 if (metaslab_unflushed_txg(msp) == 0)
4083 metaslab_unflushed_add(msp, tx);
4084 else if (!metaslab_unflushed_dirty(msp))
4085 metaslab_unflushed_bump(msp, tx, B_TRUE);
4087 space_map_write(log_sm, alloctree, SM_ALLOC,
4088 vd->vdev_id, tx);
4089 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4090 vd->vdev_id, tx);
4091 mutex_enter(&msp->ms_lock);
4093 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4094 metaslab_unflushed_changes_memused(msp));
4095 spa->spa_unflushed_stats.sus_memused -=
4096 metaslab_unflushed_changes_memused(msp);
4097 range_tree_remove_xor_add(alloctree,
4098 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4099 range_tree_remove_xor_add(msp->ms_freeing,
4100 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4101 spa->spa_unflushed_stats.sus_memused +=
4102 metaslab_unflushed_changes_memused(msp);
4103 } else {
4104 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4106 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4107 SM_NO_VDEVID, tx);
4108 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4109 SM_NO_VDEVID, tx);
4110 mutex_enter(&msp->ms_lock);
4113 msp->ms_allocated_space += range_tree_space(alloctree);
4114 ASSERT3U(msp->ms_allocated_space, >=,
4115 range_tree_space(msp->ms_freeing));
4116 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4118 if (!range_tree_is_empty(msp->ms_checkpointing)) {
4119 ASSERT(spa_has_checkpoint(spa));
4120 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4123 * Since we are doing writes to disk and the ms_checkpointing
4124 * tree won't be changing during that time, we drop the
4125 * ms_lock while writing to the checkpoint space map, for the
4126 * same reason mentioned above.
4128 mutex_exit(&msp->ms_lock);
4129 space_map_write(vd->vdev_checkpoint_sm,
4130 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4131 mutex_enter(&msp->ms_lock);
4133 spa->spa_checkpoint_info.sci_dspace +=
4134 range_tree_space(msp->ms_checkpointing);
4135 vd->vdev_stat.vs_checkpoint_space +=
4136 range_tree_space(msp->ms_checkpointing);
4137 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4138 -space_map_allocated(vd->vdev_checkpoint_sm));
4140 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4143 if (msp->ms_loaded) {
4145 * When the space map is loaded, we have an accurate
4146 * histogram in the range tree. This gives us an opportunity
4147 * to bring the space map's histogram up-to-date so we clear
4148 * it first before updating it.
4150 space_map_histogram_clear(msp->ms_sm);
4151 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4154 * Since we've cleared the histogram we need to add back
4155 * any free space that has already been processed, plus
4156 * any deferred space. This allows the on-disk histogram
4157 * to accurately reflect all free space even if some space
4158 * is not yet available for allocation (i.e. deferred).
4160 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4163 * Add back any deferred free space that has not been
4164 * added back into the in-core free tree yet. This will
4165 * ensure that we don't end up with a space map histogram
4166 * that is completely empty unless the metaslab is fully
4167 * allocated.
4169 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4170 space_map_histogram_add(msp->ms_sm,
4171 msp->ms_defer[t], tx);
4176 * Always add the free space from this sync pass to the space
4177 * map histogram. We want to make sure that the on-disk histogram
4178 * accounts for all free space. If the space map is not loaded,
4179 * then we will lose some accuracy but will correct it the next
4180 * time we load the space map.
4182 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4183 metaslab_aux_histograms_update(msp);
4185 metaslab_group_histogram_add(mg, msp);
4186 metaslab_group_histogram_verify(mg);
4187 metaslab_class_histogram_verify(mg->mg_class);
4190 * For sync pass 1, we avoid traversing this txg's free range tree
4191 * and instead will just swap the pointers for freeing and freed.
4192 * We can safely do this since the freed_tree is guaranteed to be
4193 * empty on the initial pass.
4195 * Keep in mind that even if we are currently using a log spacemap
4196 * we want current frees to end up in the ms_allocatable (but not
4197 * get appended to the ms_sm) so their ranges can be reused as usual.
4199 if (spa_sync_pass(spa) == 1) {
4200 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4201 ASSERT0(msp->ms_allocated_this_txg);
4202 } else {
4203 range_tree_vacate(msp->ms_freeing,
4204 range_tree_add, msp->ms_freed);
4206 msp->ms_allocated_this_txg += range_tree_space(alloctree);
4207 range_tree_vacate(alloctree, NULL, NULL);
4209 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4210 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4211 & TXG_MASK]));
4212 ASSERT0(range_tree_space(msp->ms_freeing));
4213 ASSERT0(range_tree_space(msp->ms_checkpointing));
4215 mutex_exit(&msp->ms_lock);
4218 * Verify that the space map object ID has been recorded in the
4219 * vdev_ms_array.
4221 uint64_t object;
4222 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4223 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4224 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4226 mutex_exit(&msp->ms_sync_lock);
4227 dmu_tx_commit(tx);
4230 static void
4231 metaslab_evict(metaslab_t *msp, uint64_t txg)
4233 if (!msp->ms_loaded || msp->ms_disabled != 0)
4234 return;
4236 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4237 VERIFY0(range_tree_space(
4238 msp->ms_allocating[(txg + t) & TXG_MASK]));
4240 if (msp->ms_allocator != -1)
4241 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4243 if (!metaslab_debug_unload)
4244 metaslab_unload(msp);
4248 * Called after a transaction group has completely synced to mark
4249 * all of the metaslab's free space as usable.
4251 void
4252 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4254 metaslab_group_t *mg = msp->ms_group;
4255 vdev_t *vd = mg->mg_vd;
4256 spa_t *spa = vd->vdev_spa;
4257 range_tree_t **defer_tree;
4258 int64_t alloc_delta, defer_delta;
4259 boolean_t defer_allowed = B_TRUE;
4261 ASSERT(!vd->vdev_ishole);
4263 mutex_enter(&msp->ms_lock);
4265 if (msp->ms_new) {
4266 /* this is a new metaslab, add its capacity to the vdev */
4267 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4269 /* there should be no allocations nor frees at this point */
4270 VERIFY0(msp->ms_allocated_this_txg);
4271 VERIFY0(range_tree_space(msp->ms_freed));
4274 ASSERT0(range_tree_space(msp->ms_freeing));
4275 ASSERT0(range_tree_space(msp->ms_checkpointing));
4277 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4279 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4280 metaslab_class_get_alloc(spa_normal_class(spa));
4281 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4282 defer_allowed = B_FALSE;
4285 defer_delta = 0;
4286 alloc_delta = msp->ms_allocated_this_txg -
4287 range_tree_space(msp->ms_freed);
4289 if (defer_allowed) {
4290 defer_delta = range_tree_space(msp->ms_freed) -
4291 range_tree_space(*defer_tree);
4292 } else {
4293 defer_delta -= range_tree_space(*defer_tree);
4295 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4296 defer_delta, 0);
4298 if (spa_syncing_log_sm(spa) == NULL) {
4300 * If there's a metaslab_load() in progress and we don't have
4301 * a log space map, it means that we probably wrote to the
4302 * metaslab's space map. If this is the case, we need to
4303 * make sure that we wait for the load to complete so that we
4304 * have a consistent view at the in-core side of the metaslab.
4306 metaslab_load_wait(msp);
4307 } else {
4308 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4312 * When auto-trimming is enabled, free ranges which are added to
4313 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
4314 * periodically consumed by the vdev_autotrim_thread() which issues
4315 * trims for all ranges and then vacates the tree. The ms_trim tree
4316 * can be discarded at any time with the sole consequence of recent
4317 * frees not being trimmed.
4319 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4320 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4321 if (!defer_allowed) {
4322 range_tree_walk(msp->ms_freed, range_tree_add,
4323 msp->ms_trim);
4325 } else {
4326 range_tree_vacate(msp->ms_trim, NULL, NULL);
4330 * Move the frees from the defer_tree back to the free
4331 * range tree (if it's loaded). Swap the freed_tree and
4332 * the defer_tree -- this is safe to do because we've
4333 * just emptied out the defer_tree.
4335 range_tree_vacate(*defer_tree,
4336 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4337 if (defer_allowed) {
4338 range_tree_swap(&msp->ms_freed, defer_tree);
4339 } else {
4340 range_tree_vacate(msp->ms_freed,
4341 msp->ms_loaded ? range_tree_add : NULL,
4342 msp->ms_allocatable);
4345 msp->ms_synced_length = space_map_length(msp->ms_sm);
4347 msp->ms_deferspace += defer_delta;
4348 ASSERT3S(msp->ms_deferspace, >=, 0);
4349 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4350 if (msp->ms_deferspace != 0) {
4352 * Keep syncing this metaslab until all deferred frees
4353 * are back in circulation.
4355 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4357 metaslab_aux_histograms_update_done(msp, defer_allowed);
4359 if (msp->ms_new) {
4360 msp->ms_new = B_FALSE;
4361 mutex_enter(&mg->mg_lock);
4362 mg->mg_ms_ready++;
4363 mutex_exit(&mg->mg_lock);
4367 * Re-sort metaslab within its group now that we've adjusted
4368 * its allocatable space.
4370 metaslab_recalculate_weight_and_sort(msp);
4372 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4373 ASSERT0(range_tree_space(msp->ms_freeing));
4374 ASSERT0(range_tree_space(msp->ms_freed));
4375 ASSERT0(range_tree_space(msp->ms_checkpointing));
4376 msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4377 msp->ms_allocated_this_txg = 0;
4378 mutex_exit(&msp->ms_lock);
4381 void
4382 metaslab_sync_reassess(metaslab_group_t *mg)
4384 spa_t *spa = mg->mg_class->mc_spa;
4386 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4387 metaslab_group_alloc_update(mg);
4388 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4391 * Preload the next potential metaslabs but only on active
4392 * metaslab groups. We can get into a state where the metaslab
4393 * is no longer active since we dirty metaslabs as we remove a
4394 * a device, thus potentially making the metaslab group eligible
4395 * for preloading.
4397 if (mg->mg_activation_count > 0) {
4398 metaslab_group_preload(mg);
4400 spa_config_exit(spa, SCL_ALLOC, FTAG);
4404 * When writing a ditto block (i.e. more than one DVA for a given BP) on
4405 * the same vdev as an existing DVA of this BP, then try to allocate it
4406 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4408 static boolean_t
4409 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4411 uint64_t dva_ms_id;
4413 if (DVA_GET_ASIZE(dva) == 0)
4414 return (B_TRUE);
4416 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4417 return (B_TRUE);
4419 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4421 return (msp->ms_id != dva_ms_id);
4425 * ==========================================================================
4426 * Metaslab allocation tracing facility
4427 * ==========================================================================
4431 * Add an allocation trace element to the allocation tracing list.
4433 static void
4434 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4435 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4436 int allocator)
4438 metaslab_alloc_trace_t *mat;
4440 if (!metaslab_trace_enabled)
4441 return;
4444 * When the tracing list reaches its maximum we remove
4445 * the second element in the list before adding a new one.
4446 * By removing the second element we preserve the original
4447 * entry as a clue to what allocations steps have already been
4448 * performed.
4450 if (zal->zal_size == metaslab_trace_max_entries) {
4451 metaslab_alloc_trace_t *mat_next;
4452 #ifdef ZFS_DEBUG
4453 panic("too many entries in allocation list");
4454 #endif
4455 METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4456 zal->zal_size--;
4457 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4458 list_remove(&zal->zal_list, mat_next);
4459 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4462 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4463 list_link_init(&mat->mat_list_node);
4464 mat->mat_mg = mg;
4465 mat->mat_msp = msp;
4466 mat->mat_size = psize;
4467 mat->mat_dva_id = dva_id;
4468 mat->mat_offset = offset;
4469 mat->mat_weight = 0;
4470 mat->mat_allocator = allocator;
4472 if (msp != NULL)
4473 mat->mat_weight = msp->ms_weight;
4476 * The list is part of the zio so locking is not required. Only
4477 * a single thread will perform allocations for a given zio.
4479 list_insert_tail(&zal->zal_list, mat);
4480 zal->zal_size++;
4482 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4485 void
4486 metaslab_trace_init(zio_alloc_list_t *zal)
4488 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4489 offsetof(metaslab_alloc_trace_t, mat_list_node));
4490 zal->zal_size = 0;
4493 void
4494 metaslab_trace_fini(zio_alloc_list_t *zal)
4496 metaslab_alloc_trace_t *mat;
4498 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4499 kmem_cache_free(metaslab_alloc_trace_cache, mat);
4500 list_destroy(&zal->zal_list);
4501 zal->zal_size = 0;
4505 * ==========================================================================
4506 * Metaslab block operations
4507 * ==========================================================================
4510 static void
4511 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
4512 int flags, int allocator)
4514 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4515 (flags & METASLAB_DONT_THROTTLE))
4516 return;
4518 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4519 if (!mg->mg_class->mc_alloc_throttle_enabled)
4520 return;
4522 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4523 (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4526 static void
4527 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4529 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4530 metaslab_class_allocator_t *mca =
4531 &mg->mg_class->mc_allocator[allocator];
4532 uint64_t max = mg->mg_max_alloc_queue_depth;
4533 uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4534 while (cur < max) {
4535 if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4536 cur, cur + 1) == cur) {
4537 atomic_inc_64(&mca->mca_alloc_max_slots);
4538 return;
4540 cur = mga->mga_cur_max_alloc_queue_depth;
4544 void
4545 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
4546 int flags, int allocator, boolean_t io_complete)
4548 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4549 (flags & METASLAB_DONT_THROTTLE))
4550 return;
4552 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4553 if (!mg->mg_class->mc_alloc_throttle_enabled)
4554 return;
4556 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4557 (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4558 if (io_complete)
4559 metaslab_group_increment_qdepth(mg, allocator);
4562 void
4563 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
4564 int allocator)
4566 #ifdef ZFS_DEBUG
4567 const dva_t *dva = bp->blk_dva;
4568 int ndvas = BP_GET_NDVAS(bp);
4570 for (int d = 0; d < ndvas; d++) {
4571 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4572 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4573 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4574 VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4576 #endif
4579 static uint64_t
4580 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4582 uint64_t start;
4583 range_tree_t *rt = msp->ms_allocatable;
4584 metaslab_class_t *mc = msp->ms_group->mg_class;
4586 ASSERT(MUTEX_HELD(&msp->ms_lock));
4587 VERIFY(!msp->ms_condensing);
4588 VERIFY0(msp->ms_disabled);
4590 start = mc->mc_ops->msop_alloc(msp, size);
4591 if (start != -1ULL) {
4592 metaslab_group_t *mg = msp->ms_group;
4593 vdev_t *vd = mg->mg_vd;
4595 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4596 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4597 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4598 range_tree_remove(rt, start, size);
4599 range_tree_clear(msp->ms_trim, start, size);
4601 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4602 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4604 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4605 msp->ms_allocating_total += size;
4607 /* Track the last successful allocation */
4608 msp->ms_alloc_txg = txg;
4609 metaslab_verify_space(msp, txg);
4613 * Now that we've attempted the allocation we need to update the
4614 * metaslab's maximum block size since it may have changed.
4616 msp->ms_max_size = metaslab_largest_allocatable(msp);
4617 return (start);
4621 * Find the metaslab with the highest weight that is less than what we've
4622 * already tried. In the common case, this means that we will examine each
4623 * metaslab at most once. Note that concurrent callers could reorder metaslabs
4624 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4625 * activated by another thread, and we fail to allocate from the metaslab we
4626 * have selected, we may not try the newly-activated metaslab, and instead
4627 * activate another metaslab. This is not optimal, but generally does not cause
4628 * any problems (a possible exception being if every metaslab is completely full
4629 * except for the newly-activated metaslab which we fail to examine).
4631 static metaslab_t *
4632 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4633 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4634 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4635 boolean_t *was_active)
4637 avl_index_t idx;
4638 avl_tree_t *t = &mg->mg_metaslab_tree;
4639 metaslab_t *msp = avl_find(t, search, &idx);
4640 if (msp == NULL)
4641 msp = avl_nearest(t, idx, AVL_AFTER);
4643 uint_t tries = 0;
4644 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4645 int i;
4647 if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4648 METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4649 return (NULL);
4651 tries++;
4653 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4654 metaslab_trace_add(zal, mg, msp, asize, d,
4655 TRACE_TOO_SMALL, allocator);
4656 continue;
4660 * If the selected metaslab is condensing or disabled,
4661 * skip it.
4663 if (msp->ms_condensing || msp->ms_disabled > 0)
4664 continue;
4666 *was_active = msp->ms_allocator != -1;
4668 * If we're activating as primary, this is our first allocation
4669 * from this disk, so we don't need to check how close we are.
4670 * If the metaslab under consideration was already active,
4671 * we're getting desperate enough to steal another allocator's
4672 * metaslab, so we still don't care about distances.
4674 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4675 break;
4677 for (i = 0; i < d; i++) {
4678 if (want_unique &&
4679 !metaslab_is_unique(msp, &dva[i]))
4680 break; /* try another metaslab */
4682 if (i == d)
4683 break;
4686 if (msp != NULL) {
4687 search->ms_weight = msp->ms_weight;
4688 search->ms_start = msp->ms_start + 1;
4689 search->ms_allocator = msp->ms_allocator;
4690 search->ms_primary = msp->ms_primary;
4692 return (msp);
4695 static void
4696 metaslab_active_mask_verify(metaslab_t *msp)
4698 ASSERT(MUTEX_HELD(&msp->ms_lock));
4700 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4701 return;
4703 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4704 return;
4706 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4707 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4708 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4709 VERIFY3S(msp->ms_allocator, !=, -1);
4710 VERIFY(msp->ms_primary);
4711 return;
4714 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4715 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4716 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4717 VERIFY3S(msp->ms_allocator, !=, -1);
4718 VERIFY(!msp->ms_primary);
4719 return;
4722 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4723 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4724 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4725 VERIFY3S(msp->ms_allocator, ==, -1);
4726 return;
4730 static uint64_t
4731 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4732 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4733 int allocator, boolean_t try_hard)
4735 metaslab_t *msp = NULL;
4736 uint64_t offset = -1ULL;
4738 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4739 for (int i = 0; i < d; i++) {
4740 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4741 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4742 activation_weight = METASLAB_WEIGHT_SECONDARY;
4743 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4744 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4745 activation_weight = METASLAB_WEIGHT_CLAIM;
4746 break;
4751 * If we don't have enough metaslabs active to fill the entire array, we
4752 * just use the 0th slot.
4754 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4755 allocator = 0;
4756 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4758 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4760 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4761 search->ms_weight = UINT64_MAX;
4762 search->ms_start = 0;
4764 * At the end of the metaslab tree are the already-active metaslabs,
4765 * first the primaries, then the secondaries. When we resume searching
4766 * through the tree, we need to consider ms_allocator and ms_primary so
4767 * we start in the location right after where we left off, and don't
4768 * accidentally loop forever considering the same metaslabs.
4770 search->ms_allocator = -1;
4771 search->ms_primary = B_TRUE;
4772 for (;;) {
4773 boolean_t was_active = B_FALSE;
4775 mutex_enter(&mg->mg_lock);
4777 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4778 mga->mga_primary != NULL) {
4779 msp = mga->mga_primary;
4782 * Even though we don't hold the ms_lock for the
4783 * primary metaslab, those fields should not
4784 * change while we hold the mg_lock. Thus it is
4785 * safe to make assertions on them.
4787 ASSERT(msp->ms_primary);
4788 ASSERT3S(msp->ms_allocator, ==, allocator);
4789 ASSERT(msp->ms_loaded);
4791 was_active = B_TRUE;
4792 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4793 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4794 mga->mga_secondary != NULL) {
4795 msp = mga->mga_secondary;
4798 * See comment above about the similar assertions
4799 * for the primary metaslab.
4801 ASSERT(!msp->ms_primary);
4802 ASSERT3S(msp->ms_allocator, ==, allocator);
4803 ASSERT(msp->ms_loaded);
4805 was_active = B_TRUE;
4806 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4807 } else {
4808 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4809 want_unique, asize, allocator, try_hard, zal,
4810 search, &was_active);
4813 mutex_exit(&mg->mg_lock);
4814 if (msp == NULL) {
4815 kmem_free(search, sizeof (*search));
4816 return (-1ULL);
4818 mutex_enter(&msp->ms_lock);
4820 metaslab_active_mask_verify(msp);
4823 * This code is disabled out because of issues with
4824 * tracepoints in non-gpl kernel modules.
4826 #if 0
4827 DTRACE_PROBE3(ms__activation__attempt,
4828 metaslab_t *, msp, uint64_t, activation_weight,
4829 boolean_t, was_active);
4830 #endif
4833 * Ensure that the metaslab we have selected is still
4834 * capable of handling our request. It's possible that
4835 * another thread may have changed the weight while we
4836 * were blocked on the metaslab lock. We check the
4837 * active status first to see if we need to set_selected_txg
4838 * a new metaslab.
4840 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4841 ASSERT3S(msp->ms_allocator, ==, -1);
4842 mutex_exit(&msp->ms_lock);
4843 continue;
4847 * If the metaslab was activated for another allocator
4848 * while we were waiting in the ms_lock above, or it's
4849 * a primary and we're seeking a secondary (or vice versa),
4850 * we go back and select a new metaslab.
4852 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4853 (msp->ms_allocator != -1) &&
4854 (msp->ms_allocator != allocator || ((activation_weight ==
4855 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4856 ASSERT(msp->ms_loaded);
4857 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4858 msp->ms_allocator != -1);
4859 mutex_exit(&msp->ms_lock);
4860 continue;
4864 * This metaslab was used for claiming regions allocated
4865 * by the ZIL during pool import. Once these regions are
4866 * claimed we don't need to keep the CLAIM bit set
4867 * anymore. Passivate this metaslab to zero its activation
4868 * mask.
4870 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4871 activation_weight != METASLAB_WEIGHT_CLAIM) {
4872 ASSERT(msp->ms_loaded);
4873 ASSERT3S(msp->ms_allocator, ==, -1);
4874 metaslab_passivate(msp, msp->ms_weight &
4875 ~METASLAB_WEIGHT_CLAIM);
4876 mutex_exit(&msp->ms_lock);
4877 continue;
4880 metaslab_set_selected_txg(msp, txg);
4882 int activation_error =
4883 metaslab_activate(msp, allocator, activation_weight);
4884 metaslab_active_mask_verify(msp);
4887 * If the metaslab was activated by another thread for
4888 * another allocator or activation_weight (EBUSY), or it
4889 * failed because another metaslab was assigned as primary
4890 * for this allocator (EEXIST) we continue using this
4891 * metaslab for our allocation, rather than going on to a
4892 * worse metaslab (we waited for that metaslab to be loaded
4893 * after all).
4895 * If the activation failed due to an I/O error or ENOSPC we
4896 * skip to the next metaslab.
4898 boolean_t activated;
4899 if (activation_error == 0) {
4900 activated = B_TRUE;
4901 } else if (activation_error == EBUSY ||
4902 activation_error == EEXIST) {
4903 activated = B_FALSE;
4904 } else {
4905 mutex_exit(&msp->ms_lock);
4906 continue;
4908 ASSERT(msp->ms_loaded);
4911 * Now that we have the lock, recheck to see if we should
4912 * continue to use this metaslab for this allocation. The
4913 * the metaslab is now loaded so metaslab_should_allocate()
4914 * can accurately determine if the allocation attempt should
4915 * proceed.
4917 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4918 /* Passivate this metaslab and select a new one. */
4919 metaslab_trace_add(zal, mg, msp, asize, d,
4920 TRACE_TOO_SMALL, allocator);
4921 goto next;
4925 * If this metaslab is currently condensing then pick again
4926 * as we can't manipulate this metaslab until it's committed
4927 * to disk. If this metaslab is being initialized, we shouldn't
4928 * allocate from it since the allocated region might be
4929 * overwritten after allocation.
4931 if (msp->ms_condensing) {
4932 metaslab_trace_add(zal, mg, msp, asize, d,
4933 TRACE_CONDENSING, allocator);
4934 if (activated) {
4935 metaslab_passivate(msp, msp->ms_weight &
4936 ~METASLAB_ACTIVE_MASK);
4938 mutex_exit(&msp->ms_lock);
4939 continue;
4940 } else if (msp->ms_disabled > 0) {
4941 metaslab_trace_add(zal, mg, msp, asize, d,
4942 TRACE_DISABLED, allocator);
4943 if (activated) {
4944 metaslab_passivate(msp, msp->ms_weight &
4945 ~METASLAB_ACTIVE_MASK);
4947 mutex_exit(&msp->ms_lock);
4948 continue;
4951 offset = metaslab_block_alloc(msp, asize, txg);
4952 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4954 if (offset != -1ULL) {
4955 /* Proactively passivate the metaslab, if needed */
4956 if (activated)
4957 metaslab_segment_may_passivate(msp);
4958 break;
4960 next:
4961 ASSERT(msp->ms_loaded);
4964 * This code is disabled out because of issues with
4965 * tracepoints in non-gpl kernel modules.
4967 #if 0
4968 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4969 uint64_t, asize);
4970 #endif
4973 * We were unable to allocate from this metaslab so determine
4974 * a new weight for this metaslab. Now that we have loaded
4975 * the metaslab we can provide a better hint to the metaslab
4976 * selector.
4978 * For space-based metaslabs, we use the maximum block size.
4979 * This information is only available when the metaslab
4980 * is loaded and is more accurate than the generic free
4981 * space weight that was calculated by metaslab_weight().
4982 * This information allows us to quickly compare the maximum
4983 * available allocation in the metaslab to the allocation
4984 * size being requested.
4986 * For segment-based metaslabs, determine the new weight
4987 * based on the highest bucket in the range tree. We
4988 * explicitly use the loaded segment weight (i.e. the range
4989 * tree histogram) since it contains the space that is
4990 * currently available for allocation and is accurate
4991 * even within a sync pass.
4993 uint64_t weight;
4994 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
4995 weight = metaslab_largest_allocatable(msp);
4996 WEIGHT_SET_SPACEBASED(weight);
4997 } else {
4998 weight = metaslab_weight_from_range_tree(msp);
5001 if (activated) {
5002 metaslab_passivate(msp, weight);
5003 } else {
5005 * For the case where we use the metaslab that is
5006 * active for another allocator we want to make
5007 * sure that we retain the activation mask.
5009 * Note that we could attempt to use something like
5010 * metaslab_recalculate_weight_and_sort() that
5011 * retains the activation mask here. That function
5012 * uses metaslab_weight() to set the weight though
5013 * which is not as accurate as the calculations
5014 * above.
5016 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5017 metaslab_group_sort(mg, msp, weight);
5019 metaslab_active_mask_verify(msp);
5022 * We have just failed an allocation attempt, check
5023 * that metaslab_should_allocate() agrees. Otherwise,
5024 * we may end up in an infinite loop retrying the same
5025 * metaslab.
5027 ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5029 mutex_exit(&msp->ms_lock);
5031 mutex_exit(&msp->ms_lock);
5032 kmem_free(search, sizeof (*search));
5033 return (offset);
5036 static uint64_t
5037 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5038 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5039 int allocator, boolean_t try_hard)
5041 uint64_t offset;
5042 ASSERT(mg->mg_initialized);
5044 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5045 dva, d, allocator, try_hard);
5047 mutex_enter(&mg->mg_lock);
5048 if (offset == -1ULL) {
5049 mg->mg_failed_allocations++;
5050 metaslab_trace_add(zal, mg, NULL, asize, d,
5051 TRACE_GROUP_FAILURE, allocator);
5052 if (asize == SPA_GANGBLOCKSIZE) {
5054 * This metaslab group was unable to allocate
5055 * the minimum gang block size so it must be out of
5056 * space. We must notify the allocation throttle
5057 * to start skipping allocation attempts to this
5058 * metaslab group until more space becomes available.
5059 * Note: this failure cannot be caused by the
5060 * allocation throttle since the allocation throttle
5061 * is only responsible for skipping devices and
5062 * not failing block allocations.
5064 mg->mg_no_free_space = B_TRUE;
5067 mg->mg_allocations++;
5068 mutex_exit(&mg->mg_lock);
5069 return (offset);
5073 * Allocate a block for the specified i/o.
5076 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5077 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5078 zio_alloc_list_t *zal, int allocator)
5080 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5081 metaslab_group_t *mg, *fast_mg, *rotor;
5082 vdev_t *vd;
5083 boolean_t try_hard = B_FALSE;
5085 ASSERT(!DVA_IS_VALID(&dva[d]));
5088 * For testing, make some blocks above a certain size be gang blocks.
5089 * This will result in more split blocks when using device removal,
5090 * and a large number of split blocks coupled with ztest-induced
5091 * damage can result in extremely long reconstruction times. This
5092 * will also test spilling from special to normal.
5094 if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) {
5095 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5096 allocator);
5097 return (SET_ERROR(ENOSPC));
5101 * Start at the rotor and loop through all mgs until we find something.
5102 * Note that there's no locking on mca_rotor or mca_aliquot because
5103 * nothing actually breaks if we miss a few updates -- we just won't
5104 * allocate quite as evenly. It all balances out over time.
5106 * If we are doing ditto or log blocks, try to spread them across
5107 * consecutive vdevs. If we're forced to reuse a vdev before we've
5108 * allocated all of our ditto blocks, then try and spread them out on
5109 * that vdev as much as possible. If it turns out to not be possible,
5110 * gradually lower our standards until anything becomes acceptable.
5111 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5112 * gives us hope of containing our fault domains to something we're
5113 * able to reason about. Otherwise, any two top-level vdev failures
5114 * will guarantee the loss of data. With consecutive allocation,
5115 * only two adjacent top-level vdev failures will result in data loss.
5117 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5118 * ourselves on the same vdev as our gang block header. That
5119 * way, we can hope for locality in vdev_cache, plus it makes our
5120 * fault domains something tractable.
5122 if (hintdva) {
5123 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5126 * It's possible the vdev we're using as the hint no
5127 * longer exists or its mg has been closed (e.g. by
5128 * device removal). Consult the rotor when
5129 * all else fails.
5131 if (vd != NULL && vd->vdev_mg != NULL) {
5132 mg = vdev_get_mg(vd, mc);
5134 if (flags & METASLAB_HINTBP_AVOID &&
5135 mg->mg_next != NULL)
5136 mg = mg->mg_next;
5137 } else {
5138 mg = mca->mca_rotor;
5140 } else if (d != 0) {
5141 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5142 mg = vd->vdev_mg->mg_next;
5143 } else if (flags & METASLAB_FASTWRITE) {
5144 mg = fast_mg = mca->mca_rotor;
5146 do {
5147 if (fast_mg->mg_vd->vdev_pending_fastwrite <
5148 mg->mg_vd->vdev_pending_fastwrite)
5149 mg = fast_mg;
5150 } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
5152 } else {
5153 ASSERT(mca->mca_rotor != NULL);
5154 mg = mca->mca_rotor;
5158 * If the hint put us into the wrong metaslab class, or into a
5159 * metaslab group that has been passivated, just follow the rotor.
5161 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
5162 mg = mca->mca_rotor;
5164 rotor = mg;
5165 top:
5166 do {
5167 boolean_t allocatable;
5169 ASSERT(mg->mg_activation_count == 1);
5170 vd = mg->mg_vd;
5173 * Don't allocate from faulted devices.
5175 if (try_hard) {
5176 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5177 allocatable = vdev_allocatable(vd);
5178 spa_config_exit(spa, SCL_ZIO, FTAG);
5179 } else {
5180 allocatable = vdev_allocatable(vd);
5184 * Determine if the selected metaslab group is eligible
5185 * for allocations. If we're ganging then don't allow
5186 * this metaslab group to skip allocations since that would
5187 * inadvertently return ENOSPC and suspend the pool
5188 * even though space is still available.
5190 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5191 allocatable = metaslab_group_allocatable(mg, rotor,
5192 psize, allocator, d);
5195 if (!allocatable) {
5196 metaslab_trace_add(zal, mg, NULL, psize, d,
5197 TRACE_NOT_ALLOCATABLE, allocator);
5198 goto next;
5201 ASSERT(mg->mg_initialized);
5204 * Avoid writing single-copy data to an unhealthy,
5205 * non-redundant vdev, unless we've already tried all
5206 * other vdevs.
5208 if (vd->vdev_state < VDEV_STATE_HEALTHY &&
5209 d == 0 && !try_hard && vd->vdev_children == 0) {
5210 metaslab_trace_add(zal, mg, NULL, psize, d,
5211 TRACE_VDEV_ERROR, allocator);
5212 goto next;
5215 ASSERT(mg->mg_class == mc);
5217 uint64_t asize = vdev_psize_to_asize(vd, psize);
5218 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5221 * If we don't need to try hard, then require that the
5222 * block be on a different metaslab from any other DVAs
5223 * in this BP (unique=true). If we are trying hard, then
5224 * allow any metaslab to be used (unique=false).
5226 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5227 !try_hard, dva, d, allocator, try_hard);
5229 if (offset != -1ULL) {
5231 * If we've just selected this metaslab group,
5232 * figure out whether the corresponding vdev is
5233 * over- or under-used relative to the pool,
5234 * and set an allocation bias to even it out.
5236 * Bias is also used to compensate for unequally
5237 * sized vdevs so that space is allocated fairly.
5239 if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5240 vdev_stat_t *vs = &vd->vdev_stat;
5241 int64_t vs_free = vs->vs_space - vs->vs_alloc;
5242 int64_t mc_free = mc->mc_space - mc->mc_alloc;
5243 int64_t ratio;
5246 * Calculate how much more or less we should
5247 * try to allocate from this device during
5248 * this iteration around the rotor.
5250 * This basically introduces a zero-centered
5251 * bias towards the devices with the most
5252 * free space, while compensating for vdev
5253 * size differences.
5255 * Examples:
5256 * vdev V1 = 16M/128M
5257 * vdev V2 = 16M/128M
5258 * ratio(V1) = 100% ratio(V2) = 100%
5260 * vdev V1 = 16M/128M
5261 * vdev V2 = 64M/128M
5262 * ratio(V1) = 127% ratio(V2) = 72%
5264 * vdev V1 = 16M/128M
5265 * vdev V2 = 64M/512M
5266 * ratio(V1) = 40% ratio(V2) = 160%
5268 ratio = (vs_free * mc->mc_alloc_groups * 100) /
5269 (mc_free + 1);
5270 mg->mg_bias = ((ratio - 100) *
5271 (int64_t)mg->mg_aliquot) / 100;
5272 } else if (!metaslab_bias_enabled) {
5273 mg->mg_bias = 0;
5276 if ((flags & METASLAB_FASTWRITE) ||
5277 atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5278 mg->mg_aliquot + mg->mg_bias) {
5279 mca->mca_rotor = mg->mg_next;
5280 mca->mca_aliquot = 0;
5283 DVA_SET_VDEV(&dva[d], vd->vdev_id);
5284 DVA_SET_OFFSET(&dva[d], offset);
5285 DVA_SET_GANG(&dva[d],
5286 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5287 DVA_SET_ASIZE(&dva[d], asize);
5289 if (flags & METASLAB_FASTWRITE) {
5290 atomic_add_64(&vd->vdev_pending_fastwrite,
5291 psize);
5294 return (0);
5296 next:
5297 mca->mca_rotor = mg->mg_next;
5298 mca->mca_aliquot = 0;
5299 } while ((mg = mg->mg_next) != rotor);
5302 * If we haven't tried hard, perhaps do so now.
5304 if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5305 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5306 psize <= 1 << spa->spa_min_ashift)) {
5307 METASLABSTAT_BUMP(metaslabstat_try_hard);
5308 try_hard = B_TRUE;
5309 goto top;
5312 memset(&dva[d], 0, sizeof (dva_t));
5314 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5315 return (SET_ERROR(ENOSPC));
5318 void
5319 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5320 boolean_t checkpoint)
5322 metaslab_t *msp;
5323 spa_t *spa = vd->vdev_spa;
5325 ASSERT(vdev_is_concrete(vd));
5326 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5327 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5329 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5331 VERIFY(!msp->ms_condensing);
5332 VERIFY3U(offset, >=, msp->ms_start);
5333 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5334 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5335 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5337 metaslab_check_free_impl(vd, offset, asize);
5339 mutex_enter(&msp->ms_lock);
5340 if (range_tree_is_empty(msp->ms_freeing) &&
5341 range_tree_is_empty(msp->ms_checkpointing)) {
5342 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5345 if (checkpoint) {
5346 ASSERT(spa_has_checkpoint(spa));
5347 range_tree_add(msp->ms_checkpointing, offset, asize);
5348 } else {
5349 range_tree_add(msp->ms_freeing, offset, asize);
5351 mutex_exit(&msp->ms_lock);
5354 void
5355 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5356 uint64_t size, void *arg)
5358 (void) inner_offset;
5359 boolean_t *checkpoint = arg;
5361 ASSERT3P(checkpoint, !=, NULL);
5363 if (vd->vdev_ops->vdev_op_remap != NULL)
5364 vdev_indirect_mark_obsolete(vd, offset, size);
5365 else
5366 metaslab_free_impl(vd, offset, size, *checkpoint);
5369 static void
5370 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5371 boolean_t checkpoint)
5373 spa_t *spa = vd->vdev_spa;
5375 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5377 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5378 return;
5380 if (spa->spa_vdev_removal != NULL &&
5381 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5382 vdev_is_concrete(vd)) {
5384 * Note: we check if the vdev is concrete because when
5385 * we complete the removal, we first change the vdev to be
5386 * an indirect vdev (in open context), and then (in syncing
5387 * context) clear spa_vdev_removal.
5389 free_from_removing_vdev(vd, offset, size);
5390 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
5391 vdev_indirect_mark_obsolete(vd, offset, size);
5392 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5393 metaslab_free_impl_cb, &checkpoint);
5394 } else {
5395 metaslab_free_concrete(vd, offset, size, checkpoint);
5399 typedef struct remap_blkptr_cb_arg {
5400 blkptr_t *rbca_bp;
5401 spa_remap_cb_t rbca_cb;
5402 vdev_t *rbca_remap_vd;
5403 uint64_t rbca_remap_offset;
5404 void *rbca_cb_arg;
5405 } remap_blkptr_cb_arg_t;
5407 static void
5408 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5409 uint64_t size, void *arg)
5411 remap_blkptr_cb_arg_t *rbca = arg;
5412 blkptr_t *bp = rbca->rbca_bp;
5414 /* We can not remap split blocks. */
5415 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5416 return;
5417 ASSERT0(inner_offset);
5419 if (rbca->rbca_cb != NULL) {
5421 * At this point we know that we are not handling split
5422 * blocks and we invoke the callback on the previous
5423 * vdev which must be indirect.
5425 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5427 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5428 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5430 /* set up remap_blkptr_cb_arg for the next call */
5431 rbca->rbca_remap_vd = vd;
5432 rbca->rbca_remap_offset = offset;
5436 * The phys birth time is that of dva[0]. This ensures that we know
5437 * when each dva was written, so that resilver can determine which
5438 * blocks need to be scrubbed (i.e. those written during the time
5439 * the vdev was offline). It also ensures that the key used in
5440 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
5441 * we didn't change the phys_birth, a lookup in the ARC for a
5442 * remapped BP could find the data that was previously stored at
5443 * this vdev + offset.
5445 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5446 DVA_GET_VDEV(&bp->blk_dva[0]));
5447 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5448 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5449 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5451 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5452 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5456 * If the block pointer contains any indirect DVAs, modify them to refer to
5457 * concrete DVAs. Note that this will sometimes not be possible, leaving
5458 * the indirect DVA in place. This happens if the indirect DVA spans multiple
5459 * segments in the mapping (i.e. it is a "split block").
5461 * If the BP was remapped, calls the callback on the original dva (note the
5462 * callback can be called multiple times if the original indirect DVA refers
5463 * to another indirect DVA, etc).
5465 * Returns TRUE if the BP was remapped.
5467 boolean_t
5468 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5470 remap_blkptr_cb_arg_t rbca;
5472 if (!zfs_remap_blkptr_enable)
5473 return (B_FALSE);
5475 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5476 return (B_FALSE);
5479 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5480 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5482 if (BP_GET_DEDUP(bp))
5483 return (B_FALSE);
5486 * Gang blocks can not be remapped, because
5487 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5488 * the BP used to read the gang block header (GBH) being the same
5489 * as the DVA[0] that we allocated for the GBH.
5491 if (BP_IS_GANG(bp))
5492 return (B_FALSE);
5495 * Embedded BP's have no DVA to remap.
5497 if (BP_GET_NDVAS(bp) < 1)
5498 return (B_FALSE);
5501 * Note: we only remap dva[0]. If we remapped other dvas, we
5502 * would no longer know what their phys birth txg is.
5504 dva_t *dva = &bp->blk_dva[0];
5506 uint64_t offset = DVA_GET_OFFSET(dva);
5507 uint64_t size = DVA_GET_ASIZE(dva);
5508 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5510 if (vd->vdev_ops->vdev_op_remap == NULL)
5511 return (B_FALSE);
5513 rbca.rbca_bp = bp;
5514 rbca.rbca_cb = callback;
5515 rbca.rbca_remap_vd = vd;
5516 rbca.rbca_remap_offset = offset;
5517 rbca.rbca_cb_arg = arg;
5520 * remap_blkptr_cb() will be called in order for each level of
5521 * indirection, until a concrete vdev is reached or a split block is
5522 * encountered. old_vd and old_offset are updated within the callback
5523 * as we go from the one indirect vdev to the next one (either concrete
5524 * or indirect again) in that order.
5526 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5528 /* Check if the DVA wasn't remapped because it is a split block */
5529 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5530 return (B_FALSE);
5532 return (B_TRUE);
5536 * Undo the allocation of a DVA which happened in the given transaction group.
5538 void
5539 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5541 metaslab_t *msp;
5542 vdev_t *vd;
5543 uint64_t vdev = DVA_GET_VDEV(dva);
5544 uint64_t offset = DVA_GET_OFFSET(dva);
5545 uint64_t size = DVA_GET_ASIZE(dva);
5547 ASSERT(DVA_IS_VALID(dva));
5548 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5550 if (txg > spa_freeze_txg(spa))
5551 return;
5553 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5554 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5555 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5556 (u_longlong_t)vdev, (u_longlong_t)offset,
5557 (u_longlong_t)size);
5558 return;
5561 ASSERT(!vd->vdev_removing);
5562 ASSERT(vdev_is_concrete(vd));
5563 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5564 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5566 if (DVA_GET_GANG(dva))
5567 size = vdev_gang_header_asize(vd);
5569 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5571 mutex_enter(&msp->ms_lock);
5572 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5573 offset, size);
5574 msp->ms_allocating_total -= size;
5576 VERIFY(!msp->ms_condensing);
5577 VERIFY3U(offset, >=, msp->ms_start);
5578 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5579 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5580 msp->ms_size);
5581 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5582 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5583 range_tree_add(msp->ms_allocatable, offset, size);
5584 mutex_exit(&msp->ms_lock);
5588 * Free the block represented by the given DVA.
5590 void
5591 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5593 uint64_t vdev = DVA_GET_VDEV(dva);
5594 uint64_t offset = DVA_GET_OFFSET(dva);
5595 uint64_t size = DVA_GET_ASIZE(dva);
5596 vdev_t *vd = vdev_lookup_top(spa, vdev);
5598 ASSERT(DVA_IS_VALID(dva));
5599 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5601 if (DVA_GET_GANG(dva)) {
5602 size = vdev_gang_header_asize(vd);
5605 metaslab_free_impl(vd, offset, size, checkpoint);
5609 * Reserve some allocation slots. The reservation system must be called
5610 * before we call into the allocator. If there aren't any available slots
5611 * then the I/O will be throttled until an I/O completes and its slots are
5612 * freed up. The function returns true if it was successful in placing
5613 * the reservation.
5615 boolean_t
5616 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5617 zio_t *zio, int flags)
5619 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5620 uint64_t max = mca->mca_alloc_max_slots;
5622 ASSERT(mc->mc_alloc_throttle_enabled);
5623 if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
5624 zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
5626 * The potential race between _count() and _add() is covered
5627 * by the allocator lock in most cases, or irrelevant due to
5628 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
5629 * But even if we assume some other non-existing scenario, the
5630 * worst that can happen is few more I/Os get to allocation
5631 * earlier, that is not a problem.
5633 * We reserve the slots individually so that we can unreserve
5634 * them individually when an I/O completes.
5636 for (int d = 0; d < slots; d++)
5637 zfs_refcount_add(&mca->mca_alloc_slots, zio);
5638 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5639 return (B_TRUE);
5641 return (B_FALSE);
5644 void
5645 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5646 int allocator, zio_t *zio)
5648 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5650 ASSERT(mc->mc_alloc_throttle_enabled);
5651 for (int d = 0; d < slots; d++)
5652 zfs_refcount_remove(&mca->mca_alloc_slots, zio);
5655 static int
5656 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5657 uint64_t txg)
5659 metaslab_t *msp;
5660 spa_t *spa = vd->vdev_spa;
5661 int error = 0;
5663 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5664 return (SET_ERROR(ENXIO));
5666 ASSERT3P(vd->vdev_ms, !=, NULL);
5667 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5669 mutex_enter(&msp->ms_lock);
5671 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5672 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5673 if (error == EBUSY) {
5674 ASSERT(msp->ms_loaded);
5675 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5676 error = 0;
5680 if (error == 0 &&
5681 !range_tree_contains(msp->ms_allocatable, offset, size))
5682 error = SET_ERROR(ENOENT);
5684 if (error || txg == 0) { /* txg == 0 indicates dry run */
5685 mutex_exit(&msp->ms_lock);
5686 return (error);
5689 VERIFY(!msp->ms_condensing);
5690 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5691 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5692 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5693 msp->ms_size);
5694 range_tree_remove(msp->ms_allocatable, offset, size);
5695 range_tree_clear(msp->ms_trim, offset, size);
5697 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
5698 metaslab_class_t *mc = msp->ms_group->mg_class;
5699 multilist_sublist_t *mls =
5700 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5701 if (!multilist_link_active(&msp->ms_class_txg_node)) {
5702 msp->ms_selected_txg = txg;
5703 multilist_sublist_insert_head(mls, msp);
5705 multilist_sublist_unlock(mls);
5707 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5708 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5709 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5710 offset, size);
5711 msp->ms_allocating_total += size;
5714 mutex_exit(&msp->ms_lock);
5716 return (0);
5719 typedef struct metaslab_claim_cb_arg_t {
5720 uint64_t mcca_txg;
5721 int mcca_error;
5722 } metaslab_claim_cb_arg_t;
5724 static void
5725 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5726 uint64_t size, void *arg)
5728 (void) inner_offset;
5729 metaslab_claim_cb_arg_t *mcca_arg = arg;
5731 if (mcca_arg->mcca_error == 0) {
5732 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5733 size, mcca_arg->mcca_txg);
5738 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5740 if (vd->vdev_ops->vdev_op_remap != NULL) {
5741 metaslab_claim_cb_arg_t arg;
5744 * Only zdb(8) can claim on indirect vdevs. This is used
5745 * to detect leaks of mapped space (that are not accounted
5746 * for in the obsolete counts, spacemap, or bpobj).
5748 ASSERT(!spa_writeable(vd->vdev_spa));
5749 arg.mcca_error = 0;
5750 arg.mcca_txg = txg;
5752 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5753 metaslab_claim_impl_cb, &arg);
5755 if (arg.mcca_error == 0) {
5756 arg.mcca_error = metaslab_claim_concrete(vd,
5757 offset, size, txg);
5759 return (arg.mcca_error);
5760 } else {
5761 return (metaslab_claim_concrete(vd, offset, size, txg));
5766 * Intent log support: upon opening the pool after a crash, notify the SPA
5767 * of blocks that the intent log has allocated for immediate write, but
5768 * which are still considered free by the SPA because the last transaction
5769 * group didn't commit yet.
5771 static int
5772 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5774 uint64_t vdev = DVA_GET_VDEV(dva);
5775 uint64_t offset = DVA_GET_OFFSET(dva);
5776 uint64_t size = DVA_GET_ASIZE(dva);
5777 vdev_t *vd;
5779 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5780 return (SET_ERROR(ENXIO));
5783 ASSERT(DVA_IS_VALID(dva));
5785 if (DVA_GET_GANG(dva))
5786 size = vdev_gang_header_asize(vd);
5788 return (metaslab_claim_impl(vd, offset, size, txg));
5792 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5793 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5794 zio_alloc_list_t *zal, zio_t *zio, int allocator)
5796 dva_t *dva = bp->blk_dva;
5797 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5798 int error = 0;
5800 ASSERT(bp->blk_birth == 0);
5801 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5803 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5805 if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5806 /* no vdevs in this class */
5807 spa_config_exit(spa, SCL_ALLOC, FTAG);
5808 return (SET_ERROR(ENOSPC));
5811 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5812 ASSERT(BP_GET_NDVAS(bp) == 0);
5813 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5814 ASSERT3P(zal, !=, NULL);
5816 for (int d = 0; d < ndvas; d++) {
5817 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5818 txg, flags, zal, allocator);
5819 if (error != 0) {
5820 for (d--; d >= 0; d--) {
5821 metaslab_unalloc_dva(spa, &dva[d], txg);
5822 metaslab_group_alloc_decrement(spa,
5823 DVA_GET_VDEV(&dva[d]), zio, flags,
5824 allocator, B_FALSE);
5825 memset(&dva[d], 0, sizeof (dva_t));
5827 spa_config_exit(spa, SCL_ALLOC, FTAG);
5828 return (error);
5829 } else {
5831 * Update the metaslab group's queue depth
5832 * based on the newly allocated dva.
5834 metaslab_group_alloc_increment(spa,
5835 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5838 ASSERT(error == 0);
5839 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5841 spa_config_exit(spa, SCL_ALLOC, FTAG);
5843 BP_SET_BIRTH(bp, txg, 0);
5845 return (0);
5848 void
5849 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5851 const dva_t *dva = bp->blk_dva;
5852 int ndvas = BP_GET_NDVAS(bp);
5854 ASSERT(!BP_IS_HOLE(bp));
5855 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5858 * If we have a checkpoint for the pool we need to make sure that
5859 * the blocks that we free that are part of the checkpoint won't be
5860 * reused until the checkpoint is discarded or we revert to it.
5862 * The checkpoint flag is passed down the metaslab_free code path
5863 * and is set whenever we want to add a block to the checkpoint's
5864 * accounting. That is, we "checkpoint" blocks that existed at the
5865 * time the checkpoint was created and are therefore referenced by
5866 * the checkpointed uberblock.
5868 * Note that, we don't checkpoint any blocks if the current
5869 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5870 * normally as they will be referenced by the checkpointed uberblock.
5872 boolean_t checkpoint = B_FALSE;
5873 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5874 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5876 * At this point, if the block is part of the checkpoint
5877 * there is no way it was created in the current txg.
5879 ASSERT(!now);
5880 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5881 checkpoint = B_TRUE;
5884 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5886 for (int d = 0; d < ndvas; d++) {
5887 if (now) {
5888 metaslab_unalloc_dva(spa, &dva[d], txg);
5889 } else {
5890 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5891 metaslab_free_dva(spa, &dva[d], checkpoint);
5895 spa_config_exit(spa, SCL_FREE, FTAG);
5899 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5901 const dva_t *dva = bp->blk_dva;
5902 int ndvas = BP_GET_NDVAS(bp);
5903 int error = 0;
5905 ASSERT(!BP_IS_HOLE(bp));
5907 if (txg != 0) {
5909 * First do a dry run to make sure all DVAs are claimable,
5910 * so we don't have to unwind from partial failures below.
5912 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5913 return (error);
5916 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5918 for (int d = 0; d < ndvas; d++) {
5919 error = metaslab_claim_dva(spa, &dva[d], txg);
5920 if (error != 0)
5921 break;
5924 spa_config_exit(spa, SCL_ALLOC, FTAG);
5926 ASSERT(error == 0 || txg == 0);
5928 return (error);
5931 void
5932 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
5934 const dva_t *dva = bp->blk_dva;
5935 int ndvas = BP_GET_NDVAS(bp);
5936 uint64_t psize = BP_GET_PSIZE(bp);
5937 int d;
5938 vdev_t *vd;
5940 ASSERT(!BP_IS_HOLE(bp));
5941 ASSERT(!BP_IS_EMBEDDED(bp));
5942 ASSERT(psize > 0);
5944 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5946 for (d = 0; d < ndvas; d++) {
5947 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5948 continue;
5949 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
5952 spa_config_exit(spa, SCL_VDEV, FTAG);
5955 void
5956 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
5958 const dva_t *dva = bp->blk_dva;
5959 int ndvas = BP_GET_NDVAS(bp);
5960 uint64_t psize = BP_GET_PSIZE(bp);
5961 int d;
5962 vdev_t *vd;
5964 ASSERT(!BP_IS_HOLE(bp));
5965 ASSERT(!BP_IS_EMBEDDED(bp));
5966 ASSERT(psize > 0);
5968 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5970 for (d = 0; d < ndvas; d++) {
5971 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5972 continue;
5973 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
5974 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
5977 spa_config_exit(spa, SCL_VDEV, FTAG);
5980 static void
5981 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5982 uint64_t size, void *arg)
5984 (void) inner, (void) arg;
5986 if (vd->vdev_ops == &vdev_indirect_ops)
5987 return;
5989 metaslab_check_free_impl(vd, offset, size);
5992 static void
5993 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5995 metaslab_t *msp;
5996 spa_t *spa __maybe_unused = vd->vdev_spa;
5998 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5999 return;
6001 if (vd->vdev_ops->vdev_op_remap != NULL) {
6002 vd->vdev_ops->vdev_op_remap(vd, offset, size,
6003 metaslab_check_free_impl_cb, NULL);
6004 return;
6007 ASSERT(vdev_is_concrete(vd));
6008 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
6009 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
6011 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6013 mutex_enter(&msp->ms_lock);
6014 if (msp->ms_loaded) {
6015 range_tree_verify_not_present(msp->ms_allocatable,
6016 offset, size);
6020 * Check all segments that currently exist in the freeing pipeline.
6022 * It would intuitively make sense to also check the current allocating
6023 * tree since metaslab_unalloc_dva() exists for extents that are
6024 * allocated and freed in the same sync pass within the same txg.
6025 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6026 * segment but then we free part of it within the same txg
6027 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6028 * current allocating tree.
6030 range_tree_verify_not_present(msp->ms_freeing, offset, size);
6031 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6032 range_tree_verify_not_present(msp->ms_freed, offset, size);
6033 for (int j = 0; j < TXG_DEFER_SIZE; j++)
6034 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
6035 range_tree_verify_not_present(msp->ms_trim, offset, size);
6036 mutex_exit(&msp->ms_lock);
6039 void
6040 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6042 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6043 return;
6045 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6046 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6047 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6048 vdev_t *vd = vdev_lookup_top(spa, vdev);
6049 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6050 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6052 if (DVA_GET_GANG(&bp->blk_dva[i]))
6053 size = vdev_gang_header_asize(vd);
6055 ASSERT3P(vd, !=, NULL);
6057 metaslab_check_free_impl(vd, offset, size);
6059 spa_config_exit(spa, SCL_VDEV, FTAG);
6062 static void
6063 metaslab_group_disable_wait(metaslab_group_t *mg)
6065 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6066 while (mg->mg_disabled_updating) {
6067 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6071 static void
6072 metaslab_group_disabled_increment(metaslab_group_t *mg)
6074 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6075 ASSERT(mg->mg_disabled_updating);
6077 while (mg->mg_ms_disabled >= max_disabled_ms) {
6078 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6080 mg->mg_ms_disabled++;
6081 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6085 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6086 * We must also track how many metaslabs are currently disabled within a
6087 * metaslab group and limit them to prevent allocation failures from
6088 * occurring because all metaslabs are disabled.
6090 void
6091 metaslab_disable(metaslab_t *msp)
6093 ASSERT(!MUTEX_HELD(&msp->ms_lock));
6094 metaslab_group_t *mg = msp->ms_group;
6096 mutex_enter(&mg->mg_ms_disabled_lock);
6099 * To keep an accurate count of how many threads have disabled
6100 * a specific metaslab group, we only allow one thread to mark
6101 * the metaslab group at a time. This ensures that the value of
6102 * ms_disabled will be accurate when we decide to mark a metaslab
6103 * group as disabled. To do this we force all other threads
6104 * to wait till the metaslab's mg_disabled_updating flag is no
6105 * longer set.
6107 metaslab_group_disable_wait(mg);
6108 mg->mg_disabled_updating = B_TRUE;
6109 if (msp->ms_disabled == 0) {
6110 metaslab_group_disabled_increment(mg);
6112 mutex_enter(&msp->ms_lock);
6113 msp->ms_disabled++;
6114 mutex_exit(&msp->ms_lock);
6116 mg->mg_disabled_updating = B_FALSE;
6117 cv_broadcast(&mg->mg_ms_disabled_cv);
6118 mutex_exit(&mg->mg_ms_disabled_lock);
6121 void
6122 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6124 metaslab_group_t *mg = msp->ms_group;
6125 spa_t *spa = mg->mg_vd->vdev_spa;
6128 * Wait for the outstanding IO to be synced to prevent newly
6129 * allocated blocks from being overwritten. This used by
6130 * initialize and TRIM which are modifying unallocated space.
6132 if (sync)
6133 txg_wait_synced(spa_get_dsl(spa), 0);
6135 mutex_enter(&mg->mg_ms_disabled_lock);
6136 mutex_enter(&msp->ms_lock);
6137 if (--msp->ms_disabled == 0) {
6138 mg->mg_ms_disabled--;
6139 cv_broadcast(&mg->mg_ms_disabled_cv);
6140 if (unload)
6141 metaslab_unload(msp);
6143 mutex_exit(&msp->ms_lock);
6144 mutex_exit(&mg->mg_ms_disabled_lock);
6147 void
6148 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6150 ms->ms_unflushed_dirty = dirty;
6153 static void
6154 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6156 vdev_t *vd = ms->ms_group->mg_vd;
6157 spa_t *spa = vd->vdev_spa;
6158 objset_t *mos = spa_meta_objset(spa);
6160 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6162 metaslab_unflushed_phys_t entry = {
6163 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
6165 uint64_t entry_size = sizeof (entry);
6166 uint64_t entry_offset = ms->ms_id * entry_size;
6168 uint64_t object = 0;
6169 int err = zap_lookup(mos, vd->vdev_top_zap,
6170 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6171 &object);
6172 if (err == ENOENT) {
6173 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6174 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6175 VERIFY0(zap_add(mos, vd->vdev_top_zap,
6176 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6177 &object, tx));
6178 } else {
6179 VERIFY0(err);
6182 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6183 &entry, tx);
6186 void
6187 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6189 ms->ms_unflushed_txg = txg;
6190 metaslab_update_ondisk_flush_data(ms, tx);
6193 boolean_t
6194 metaslab_unflushed_dirty(metaslab_t *ms)
6196 return (ms->ms_unflushed_dirty);
6199 uint64_t
6200 metaslab_unflushed_txg(metaslab_t *ms)
6202 return (ms->ms_unflushed_txg);
6205 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW,
6206 "Allocation granularity (a.k.a. stripe size)");
6208 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6209 "Load all metaslabs when pool is first opened");
6211 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6212 "Prevent metaslabs from being unloaded");
6214 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6215 "Preload potential metaslabs during reassessment");
6217 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6218 "Delay in txgs after metaslab was last used before unloading");
6220 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6221 "Delay in milliseconds after metaslab was last used before unloading");
6223 /* BEGIN CSTYLED */
6224 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6225 "Percentage of metaslab group size that should be free to make it "
6226 "eligible for allocation");
6228 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6229 "Percentage of metaslab group size that should be considered eligible "
6230 "for allocations unless all metaslab groups within the metaslab class "
6231 "have also crossed this threshold");
6233 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6234 ZMOD_RW,
6235 "Use the fragmentation metric to prefer less fragmented metaslabs");
6236 /* END CSTYLED */
6238 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6239 ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6241 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6242 "Prefer metaslabs with lower LBAs");
6244 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6245 "Enable metaslab group biasing");
6247 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6248 ZMOD_RW, "Enable segment-based metaslab selection");
6250 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6251 "Segment-based metaslab selection maximum buckets before switching");
6253 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
6254 "Blocks larger than this size are forced to be gang blocks");
6256 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6257 "Max distance (bytes) to search forward before using size tree");
6259 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6260 "When looking in size tree, use largest segment instead of exact fit");
6262 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
6263 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6265 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6266 "Percentage of memory that can be used to store metaslab range trees");
6268 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6269 ZMOD_RW, "Try hard to allocate before ganging");
6271 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
6272 "Normally only consider this many of the best metaslabs in each vdev");