4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
38 #define GANG_ALLOCATION(flags) \
39 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
41 uint64_t metaslab_aliquot
= 512ULL << 10;
42 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
45 * The in-core space map representation is more compact than its on-disk form.
46 * The zfs_condense_pct determines how much more compact the in-core
47 * space map representation must be before we compact it on-disk.
48 * Values should be greater than or equal to 100.
50 int zfs_condense_pct
= 200;
53 * Condensing a metaslab is not guaranteed to actually reduce the amount of
54 * space used on disk. In particular, a space map uses data in increments of
55 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
56 * same number of blocks after condensing. Since the goal of condensing is to
57 * reduce the number of IOPs required to read the space map, we only want to
58 * condense when we can be sure we will reduce the number of blocks used by the
59 * space map. Unfortunately, we cannot precisely compute whether or not this is
60 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
61 * we apply the following heuristic: do not condense a spacemap unless the
62 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
65 int zfs_metaslab_condense_block_threshold
= 4;
68 * The zfs_mg_noalloc_threshold defines which metaslab groups should
69 * be eligible for allocation. The value is defined as a percentage of
70 * free space. Metaslab groups that have more free space than
71 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
72 * a metaslab group's free space is less than or equal to the
73 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
74 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
75 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
76 * groups are allowed to accept allocations. Gang blocks are always
77 * eligible to allocate on any metaslab group. The default value of 0 means
78 * no metaslab group will be excluded based on this criterion.
80 int zfs_mg_noalloc_threshold
= 0;
83 * Metaslab groups are considered eligible for allocations if their
84 * fragmenation metric (measured as a percentage) is less than or equal to
85 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
86 * then it will be skipped unless all metaslab groups within the metaslab
87 * class have also crossed this threshold.
89 int zfs_mg_fragmentation_threshold
= 85;
92 * Allow metaslabs to keep their active state as long as their fragmentation
93 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
94 * active metaslab that exceeds this threshold will no longer keep its active
95 * status allowing better metaslabs to be selected.
97 int zfs_metaslab_fragmentation_threshold
= 70;
100 * When set will load all metaslabs when pool is first opened.
102 int metaslab_debug_load
= 0;
105 * When set will prevent metaslabs from being unloaded.
107 int metaslab_debug_unload
= 0;
110 * Minimum size which forces the dynamic allocator to change
111 * it's allocation strategy. Once the space map cannot satisfy
112 * an allocation of this size then it switches to using more
113 * aggressive strategy (i.e search by size rather than offset).
115 uint64_t metaslab_df_alloc_threshold
= SPA_OLD_MAXBLOCKSIZE
;
118 * The minimum free space, in percent, which must be available
119 * in a space map to continue allocations in a first-fit fashion.
120 * Once the space map's free space drops below this level we dynamically
121 * switch to using best-fit allocations.
123 int metaslab_df_free_pct
= 4;
126 * A metaslab is considered "free" if it contains a contiguous
127 * segment which is greater than metaslab_min_alloc_size.
129 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
132 * Percentage of all cpus that can be used by the metaslab taskq.
134 int metaslab_load_pct
= 50;
137 * Determines how many txgs a metaslab may remain loaded without having any
138 * allocations from it. As long as a metaslab continues to be used we will
141 int metaslab_unload_delay
= TXG_SIZE
* 2;
144 * Max number of metaslabs per group to preload.
146 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
149 * Enable/disable preloading of metaslab.
151 boolean_t metaslab_preload_enabled
= B_TRUE
;
154 * Enable/disable fragmentation weighting on metaslabs.
156 boolean_t metaslab_fragmentation_factor_enabled
= B_TRUE
;
159 * Enable/disable lba weighting (i.e. outer tracks are given preference).
161 boolean_t metaslab_lba_weighting_enabled
= B_TRUE
;
164 * Enable/disable metaslab group biasing.
166 boolean_t metaslab_bias_enabled
= B_TRUE
;
169 * Enable/disable segment-based metaslab selection.
171 boolean_t zfs_metaslab_segment_weight_enabled
= B_TRUE
;
174 * When using segment-based metaslab selection, we will continue
175 * allocating from the active metaslab until we have exhausted
176 * zfs_metaslab_switch_threshold of its buckets.
178 int zfs_metaslab_switch_threshold
= 2;
181 * Internal switch to enable/disable the metaslab allocation tracing
184 boolean_t metaslab_trace_enabled
= B_TRUE
;
187 * Maximum entries that the metaslab allocation tracing facility will keep
188 * in a given list when running in non-debug mode. We limit the number
189 * of entries in non-debug mode to prevent us from using up too much memory.
190 * The limit should be sufficiently large that we don't expect any allocation
191 * to every exceed this value. In debug mode, the system will panic if this
192 * limit is ever reached allowing for further investigation.
194 uint64_t metaslab_trace_max_entries
= 5000;
196 static uint64_t metaslab_weight(metaslab_t
*);
197 static void metaslab_set_fragmentation(metaslab_t
*);
199 kmem_cache_t
*metaslab_alloc_trace_cache
;
202 * ==========================================================================
204 * ==========================================================================
207 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
209 metaslab_class_t
*mc
;
211 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
216 mutex_init(&mc
->mc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
217 refcount_create_tracked(&mc
->mc_alloc_slots
);
223 metaslab_class_destroy(metaslab_class_t
*mc
)
225 ASSERT(mc
->mc_rotor
== NULL
);
226 ASSERT(mc
->mc_alloc
== 0);
227 ASSERT(mc
->mc_deferred
== 0);
228 ASSERT(mc
->mc_space
== 0);
229 ASSERT(mc
->mc_dspace
== 0);
231 refcount_destroy(&mc
->mc_alloc_slots
);
232 mutex_destroy(&mc
->mc_lock
);
233 kmem_free(mc
, sizeof (metaslab_class_t
));
237 metaslab_class_validate(metaslab_class_t
*mc
)
239 metaslab_group_t
*mg
;
243 * Must hold one of the spa_config locks.
245 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
246 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
248 if ((mg
= mc
->mc_rotor
) == NULL
)
253 ASSERT(vd
->vdev_mg
!= NULL
);
254 ASSERT3P(vd
->vdev_top
, ==, vd
);
255 ASSERT3P(mg
->mg_class
, ==, mc
);
256 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
257 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
263 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
264 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
266 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
267 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
268 atomic_add_64(&mc
->mc_space
, space_delta
);
269 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
273 metaslab_class_get_alloc(metaslab_class_t
*mc
)
275 return (mc
->mc_alloc
);
279 metaslab_class_get_deferred(metaslab_class_t
*mc
)
281 return (mc
->mc_deferred
);
285 metaslab_class_get_space(metaslab_class_t
*mc
)
287 return (mc
->mc_space
);
291 metaslab_class_get_dspace(metaslab_class_t
*mc
)
293 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
297 metaslab_class_histogram_verify(metaslab_class_t
*mc
)
299 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
303 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
306 mc_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
309 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
310 vdev_t
*tvd
= rvd
->vdev_child
[c
];
311 metaslab_group_t
*mg
= tvd
->vdev_mg
;
314 * Skip any holes, uninitialized top-levels, or
315 * vdevs that are not in this metalab class.
317 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
318 mg
->mg_class
!= mc
) {
322 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
323 mc_hist
[i
] += mg
->mg_histogram
[i
];
326 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
327 VERIFY3U(mc_hist
[i
], ==, mc
->mc_histogram
[i
]);
329 kmem_free(mc_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
333 * Calculate the metaslab class's fragmentation metric. The metric
334 * is weighted based on the space contribution of each metaslab group.
335 * The return value will be a number between 0 and 100 (inclusive), or
336 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
337 * zfs_frag_table for more information about the metric.
340 metaslab_class_fragmentation(metaslab_class_t
*mc
)
342 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
343 uint64_t fragmentation
= 0;
345 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
347 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
348 vdev_t
*tvd
= rvd
->vdev_child
[c
];
349 metaslab_group_t
*mg
= tvd
->vdev_mg
;
352 * Skip any holes, uninitialized top-levels, or
353 * vdevs that are not in this metalab class.
355 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
356 mg
->mg_class
!= mc
) {
361 * If a metaslab group does not contain a fragmentation
362 * metric then just bail out.
364 if (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
) {
365 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
366 return (ZFS_FRAG_INVALID
);
370 * Determine how much this metaslab_group is contributing
371 * to the overall pool fragmentation metric.
373 fragmentation
+= mg
->mg_fragmentation
*
374 metaslab_group_get_space(mg
);
376 fragmentation
/= metaslab_class_get_space(mc
);
378 ASSERT3U(fragmentation
, <=, 100);
379 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
380 return (fragmentation
);
384 * Calculate the amount of expandable space that is available in
385 * this metaslab class. If a device is expanded then its expandable
386 * space will be the amount of allocatable space that is currently not
387 * part of this metaslab class.
390 metaslab_class_expandable_space(metaslab_class_t
*mc
)
392 vdev_t
*rvd
= mc
->mc_spa
->spa_root_vdev
;
395 spa_config_enter(mc
->mc_spa
, SCL_VDEV
, FTAG
, RW_READER
);
396 for (int c
= 0; c
< rvd
->vdev_children
; c
++) {
398 vdev_t
*tvd
= rvd
->vdev_child
[c
];
399 metaslab_group_t
*mg
= tvd
->vdev_mg
;
401 if (tvd
->vdev_ishole
|| tvd
->vdev_ms_shift
== 0 ||
402 mg
->mg_class
!= mc
) {
407 * Calculate if we have enough space to add additional
408 * metaslabs. We report the expandable space in terms
409 * of the metaslab size since that's the unit of expansion.
410 * Adjust by efi system partition size.
412 tspace
= tvd
->vdev_max_asize
- tvd
->vdev_asize
;
413 if (tspace
> mc
->mc_spa
->spa_bootsize
) {
414 tspace
-= mc
->mc_spa
->spa_bootsize
;
416 space
+= P2ALIGN(tspace
, 1ULL << tvd
->vdev_ms_shift
);
418 spa_config_exit(mc
->mc_spa
, SCL_VDEV
, FTAG
);
423 metaslab_compare(const void *x1
, const void *x2
)
425 const metaslab_t
*m1
= x1
;
426 const metaslab_t
*m2
= x2
;
428 if (m1
->ms_weight
< m2
->ms_weight
)
430 if (m1
->ms_weight
> m2
->ms_weight
)
434 * If the weights are identical, use the offset to force uniqueness.
436 if (m1
->ms_start
< m2
->ms_start
)
438 if (m1
->ms_start
> m2
->ms_start
)
441 ASSERT3P(m1
, ==, m2
);
447 * Verify that the space accounting on disk matches the in-core range_trees.
450 metaslab_verify_space(metaslab_t
*msp
, uint64_t txg
)
452 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
453 uint64_t allocated
= 0;
454 uint64_t sm_free_space
, msp_free_space
;
456 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
458 if ((zfs_flags
& ZFS_DEBUG_METASLAB_VERIFY
) == 0)
462 * We can only verify the metaslab space when we're called
463 * from syncing context with a loaded metaslab that has an allocated
464 * space map. Calling this in non-syncing context does not
465 * provide a consistent view of the metaslab since we're performing
466 * allocations in the future.
468 if (txg
!= spa_syncing_txg(spa
) || msp
->ms_sm
== NULL
||
472 sm_free_space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
) -
473 space_map_alloc_delta(msp
->ms_sm
);
476 * Account for future allocations since we would have already
477 * deducted that space from the ms_freetree.
479 for (int t
= 0; t
< TXG_CONCURRENT_STATES
; t
++) {
481 range_tree_space(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]);
484 msp_free_space
= range_tree_space(msp
->ms_tree
) + allocated
+
485 msp
->ms_deferspace
+ range_tree_space(msp
->ms_freedtree
);
487 VERIFY3U(sm_free_space
, ==, msp_free_space
);
491 * ==========================================================================
493 * ==========================================================================
496 * Update the allocatable flag and the metaslab group's capacity.
497 * The allocatable flag is set to true if the capacity is below
498 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
499 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
500 * transitions from allocatable to non-allocatable or vice versa then the
501 * metaslab group's class is updated to reflect the transition.
504 metaslab_group_alloc_update(metaslab_group_t
*mg
)
506 vdev_t
*vd
= mg
->mg_vd
;
507 metaslab_class_t
*mc
= mg
->mg_class
;
508 vdev_stat_t
*vs
= &vd
->vdev_stat
;
509 boolean_t was_allocatable
;
510 boolean_t was_initialized
;
512 ASSERT(vd
== vd
->vdev_top
);
514 mutex_enter(&mg
->mg_lock
);
515 was_allocatable
= mg
->mg_allocatable
;
516 was_initialized
= mg
->mg_initialized
;
518 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
521 mutex_enter(&mc
->mc_lock
);
524 * If the metaslab group was just added then it won't
525 * have any space until we finish syncing out this txg.
526 * At that point we will consider it initialized and available
527 * for allocations. We also don't consider non-activated
528 * metaslab groups (e.g. vdevs that are in the middle of being removed)
529 * to be initialized, because they can't be used for allocation.
531 mg
->mg_initialized
= metaslab_group_initialized(mg
);
532 if (!was_initialized
&& mg
->mg_initialized
) {
534 } else if (was_initialized
&& !mg
->mg_initialized
) {
535 ASSERT3U(mc
->mc_groups
, >, 0);
538 if (mg
->mg_initialized
)
539 mg
->mg_no_free_space
= B_FALSE
;
542 * A metaslab group is considered allocatable if it has plenty
543 * of free space or is not heavily fragmented. We only take
544 * fragmentation into account if the metaslab group has a valid
545 * fragmentation metric (i.e. a value between 0 and 100).
547 mg
->mg_allocatable
= (mg
->mg_activation_count
> 0 &&
548 mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
&&
549 (mg
->mg_fragmentation
== ZFS_FRAG_INVALID
||
550 mg
->mg_fragmentation
<= zfs_mg_fragmentation_threshold
));
553 * The mc_alloc_groups maintains a count of the number of
554 * groups in this metaslab class that are still above the
555 * zfs_mg_noalloc_threshold. This is used by the allocating
556 * threads to determine if they should avoid allocations to
557 * a given group. The allocator will avoid allocations to a group
558 * if that group has reached or is below the zfs_mg_noalloc_threshold
559 * and there are still other groups that are above the threshold.
560 * When a group transitions from allocatable to non-allocatable or
561 * vice versa we update the metaslab class to reflect that change.
562 * When the mc_alloc_groups value drops to 0 that means that all
563 * groups have reached the zfs_mg_noalloc_threshold making all groups
564 * eligible for allocations. This effectively means that all devices
565 * are balanced again.
567 if (was_allocatable
&& !mg
->mg_allocatable
)
568 mc
->mc_alloc_groups
--;
569 else if (!was_allocatable
&& mg
->mg_allocatable
)
570 mc
->mc_alloc_groups
++;
571 mutex_exit(&mc
->mc_lock
);
573 mutex_exit(&mg
->mg_lock
);
577 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
579 metaslab_group_t
*mg
;
581 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
582 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
583 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
584 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
587 mg
->mg_activation_count
= 0;
588 mg
->mg_initialized
= B_FALSE
;
589 mg
->mg_no_free_space
= B_TRUE
;
590 refcount_create_tracked(&mg
->mg_alloc_queue_depth
);
592 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
593 minclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
);
599 metaslab_group_destroy(metaslab_group_t
*mg
)
601 ASSERT(mg
->mg_prev
== NULL
);
602 ASSERT(mg
->mg_next
== NULL
);
604 * We may have gone below zero with the activation count
605 * either because we never activated in the first place or
606 * because we're done, and possibly removing the vdev.
608 ASSERT(mg
->mg_activation_count
<= 0);
610 taskq_destroy(mg
->mg_taskq
);
611 avl_destroy(&mg
->mg_metaslab_tree
);
612 mutex_destroy(&mg
->mg_lock
);
613 refcount_destroy(&mg
->mg_alloc_queue_depth
);
614 kmem_free(mg
, sizeof (metaslab_group_t
));
618 metaslab_group_activate(metaslab_group_t
*mg
)
620 metaslab_class_t
*mc
= mg
->mg_class
;
621 metaslab_group_t
*mgprev
, *mgnext
;
623 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
625 ASSERT(mc
->mc_rotor
!= mg
);
626 ASSERT(mg
->mg_prev
== NULL
);
627 ASSERT(mg
->mg_next
== NULL
);
628 ASSERT(mg
->mg_activation_count
<= 0);
630 if (++mg
->mg_activation_count
<= 0)
633 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
634 metaslab_group_alloc_update(mg
);
636 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
640 mgnext
= mgprev
->mg_next
;
641 mg
->mg_prev
= mgprev
;
642 mg
->mg_next
= mgnext
;
643 mgprev
->mg_next
= mg
;
644 mgnext
->mg_prev
= mg
;
650 metaslab_group_passivate(metaslab_group_t
*mg
)
652 metaslab_class_t
*mc
= mg
->mg_class
;
653 metaslab_group_t
*mgprev
, *mgnext
;
655 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
657 if (--mg
->mg_activation_count
!= 0) {
658 ASSERT(mc
->mc_rotor
!= mg
);
659 ASSERT(mg
->mg_prev
== NULL
);
660 ASSERT(mg
->mg_next
== NULL
);
661 ASSERT(mg
->mg_activation_count
< 0);
665 taskq_wait(mg
->mg_taskq
);
666 metaslab_group_alloc_update(mg
);
668 mgprev
= mg
->mg_prev
;
669 mgnext
= mg
->mg_next
;
674 mc
->mc_rotor
= mgnext
;
675 mgprev
->mg_next
= mgnext
;
676 mgnext
->mg_prev
= mgprev
;
684 metaslab_group_initialized(metaslab_group_t
*mg
)
686 vdev_t
*vd
= mg
->mg_vd
;
687 vdev_stat_t
*vs
= &vd
->vdev_stat
;
689 return (vs
->vs_space
!= 0 && mg
->mg_activation_count
> 0);
693 metaslab_group_get_space(metaslab_group_t
*mg
)
695 return ((1ULL << mg
->mg_vd
->vdev_ms_shift
) * mg
->mg_vd
->vdev_ms_count
);
699 metaslab_group_histogram_verify(metaslab_group_t
*mg
)
702 vdev_t
*vd
= mg
->mg_vd
;
703 uint64_t ashift
= vd
->vdev_ashift
;
706 if ((zfs_flags
& ZFS_DEBUG_HISTOGRAM_VERIFY
) == 0)
709 mg_hist
= kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
,
712 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE
, >=,
713 SPACE_MAP_HISTOGRAM_SIZE
+ ashift
);
715 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
716 metaslab_t
*msp
= vd
->vdev_ms
[m
];
718 if (msp
->ms_sm
== NULL
)
721 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++)
722 mg_hist
[i
+ ashift
] +=
723 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
726 for (i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++)
727 VERIFY3U(mg_hist
[i
], ==, mg
->mg_histogram
[i
]);
729 kmem_free(mg_hist
, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE
);
733 metaslab_group_histogram_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
735 metaslab_class_t
*mc
= mg
->mg_class
;
736 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
738 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
739 if (msp
->ms_sm
== NULL
)
742 mutex_enter(&mg
->mg_lock
);
743 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
744 mg
->mg_histogram
[i
+ ashift
] +=
745 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
746 mc
->mc_histogram
[i
+ ashift
] +=
747 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
749 mutex_exit(&mg
->mg_lock
);
753 metaslab_group_histogram_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
755 metaslab_class_t
*mc
= mg
->mg_class
;
756 uint64_t ashift
= mg
->mg_vd
->vdev_ashift
;
758 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
759 if (msp
->ms_sm
== NULL
)
762 mutex_enter(&mg
->mg_lock
);
763 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
764 ASSERT3U(mg
->mg_histogram
[i
+ ashift
], >=,
765 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
766 ASSERT3U(mc
->mc_histogram
[i
+ ashift
], >=,
767 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
769 mg
->mg_histogram
[i
+ ashift
] -=
770 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
771 mc
->mc_histogram
[i
+ ashift
] -=
772 msp
->ms_sm
->sm_phys
->smp_histogram
[i
];
774 mutex_exit(&mg
->mg_lock
);
778 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
780 ASSERT(msp
->ms_group
== NULL
);
781 mutex_enter(&mg
->mg_lock
);
784 avl_add(&mg
->mg_metaslab_tree
, msp
);
785 mutex_exit(&mg
->mg_lock
);
787 mutex_enter(&msp
->ms_lock
);
788 metaslab_group_histogram_add(mg
, msp
);
789 mutex_exit(&msp
->ms_lock
);
793 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
795 mutex_enter(&msp
->ms_lock
);
796 metaslab_group_histogram_remove(mg
, msp
);
797 mutex_exit(&msp
->ms_lock
);
799 mutex_enter(&mg
->mg_lock
);
800 ASSERT(msp
->ms_group
== mg
);
801 avl_remove(&mg
->mg_metaslab_tree
, msp
);
802 msp
->ms_group
= NULL
;
803 mutex_exit(&mg
->mg_lock
);
807 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
810 * Although in principle the weight can be any value, in
811 * practice we do not use values in the range [1, 511].
813 ASSERT(weight
>= SPA_MINBLOCKSIZE
|| weight
== 0);
814 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
816 mutex_enter(&mg
->mg_lock
);
817 ASSERT(msp
->ms_group
== mg
);
818 avl_remove(&mg
->mg_metaslab_tree
, msp
);
819 msp
->ms_weight
= weight
;
820 avl_add(&mg
->mg_metaslab_tree
, msp
);
821 mutex_exit(&mg
->mg_lock
);
825 * Calculate the fragmentation for a given metaslab group. We can use
826 * a simple average here since all metaslabs within the group must have
827 * the same size. The return value will be a value between 0 and 100
828 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
829 * group have a fragmentation metric.
832 metaslab_group_fragmentation(metaslab_group_t
*mg
)
834 vdev_t
*vd
= mg
->mg_vd
;
835 uint64_t fragmentation
= 0;
836 uint64_t valid_ms
= 0;
838 for (int m
= 0; m
< vd
->vdev_ms_count
; m
++) {
839 metaslab_t
*msp
= vd
->vdev_ms
[m
];
841 if (msp
->ms_fragmentation
== ZFS_FRAG_INVALID
)
845 fragmentation
+= msp
->ms_fragmentation
;
848 if (valid_ms
<= vd
->vdev_ms_count
/ 2)
849 return (ZFS_FRAG_INVALID
);
851 fragmentation
/= valid_ms
;
852 ASSERT3U(fragmentation
, <=, 100);
853 return (fragmentation
);
857 * Determine if a given metaslab group should skip allocations. A metaslab
858 * group should avoid allocations if its free capacity is less than the
859 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
860 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
861 * that can still handle allocations. If the allocation throttle is enabled
862 * then we skip allocations to devices that have reached their maximum
863 * allocation queue depth unless the selected metaslab group is the only
864 * eligible group remaining.
867 metaslab_group_allocatable(metaslab_group_t
*mg
, metaslab_group_t
*rotor
,
870 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
871 metaslab_class_t
*mc
= mg
->mg_class
;
874 * We can only consider skipping this metaslab group if it's
875 * in the normal metaslab class and there are other metaslab
876 * groups to select from. Otherwise, we always consider it eligible
879 if (mc
!= spa_normal_class(spa
) || mc
->mc_groups
<= 1)
883 * If the metaslab group's mg_allocatable flag is set (see comments
884 * in metaslab_group_alloc_update() for more information) and
885 * the allocation throttle is disabled then allow allocations to this
886 * device. However, if the allocation throttle is enabled then
887 * check if we have reached our allocation limit (mg_alloc_queue_depth)
888 * to determine if we should allow allocations to this metaslab group.
889 * If all metaslab groups are no longer considered allocatable
890 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
891 * gang block size then we allow allocations on this metaslab group
892 * regardless of the mg_allocatable or throttle settings.
894 if (mg
->mg_allocatable
) {
895 metaslab_group_t
*mgp
;
897 uint64_t qmax
= mg
->mg_max_alloc_queue_depth
;
899 if (!mc
->mc_alloc_throttle_enabled
)
903 * If this metaslab group does not have any free space, then
904 * there is no point in looking further.
906 if (mg
->mg_no_free_space
)
909 qdepth
= refcount_count(&mg
->mg_alloc_queue_depth
);
912 * If this metaslab group is below its qmax or it's
913 * the only allocatable metasable group, then attempt
914 * to allocate from it.
916 if (qdepth
< qmax
|| mc
->mc_alloc_groups
== 1)
918 ASSERT3U(mc
->mc_alloc_groups
, >, 1);
921 * Since this metaslab group is at or over its qmax, we
922 * need to determine if there are metaslab groups after this
923 * one that might be able to handle this allocation. This is
924 * racy since we can't hold the locks for all metaslab
925 * groups at the same time when we make this check.
927 for (mgp
= mg
->mg_next
; mgp
!= rotor
; mgp
= mgp
->mg_next
) {
928 qmax
= mgp
->mg_max_alloc_queue_depth
;
930 qdepth
= refcount_count(&mgp
->mg_alloc_queue_depth
);
933 * If there is another metaslab group that
934 * might be able to handle the allocation, then
935 * we return false so that we skip this group.
937 if (qdepth
< qmax
&& !mgp
->mg_no_free_space
)
942 * We didn't find another group to handle the allocation
943 * so we can't skip this metaslab group even though
944 * we are at or over our qmax.
948 } else if (mc
->mc_alloc_groups
== 0 || psize
== SPA_MINBLOCKSIZE
) {
955 * ==========================================================================
956 * Range tree callbacks
957 * ==========================================================================
961 * Comparison function for the private size-ordered tree. Tree is sorted
962 * by size, larger sizes at the end of the tree.
965 metaslab_rangesize_compare(const void *x1
, const void *x2
)
967 const range_seg_t
*r1
= x1
;
968 const range_seg_t
*r2
= x2
;
969 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
970 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
972 if (rs_size1
< rs_size2
)
974 if (rs_size1
> rs_size2
)
977 if (r1
->rs_start
< r2
->rs_start
)
980 if (r1
->rs_start
> r2
->rs_start
)
987 * Create any block allocator specific components. The current allocators
988 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
991 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
993 metaslab_t
*msp
= arg
;
995 ASSERT3P(rt
->rt_arg
, ==, msp
);
996 ASSERT(msp
->ms_tree
== NULL
);
998 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
999 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
1003 * Destroy the block allocator specific components.
1006 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
1008 metaslab_t
*msp
= arg
;
1010 ASSERT3P(rt
->rt_arg
, ==, msp
);
1011 ASSERT3P(msp
->ms_tree
, ==, rt
);
1012 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
1014 avl_destroy(&msp
->ms_size_tree
);
1018 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
1020 metaslab_t
*msp
= arg
;
1022 ASSERT3P(rt
->rt_arg
, ==, msp
);
1023 ASSERT3P(msp
->ms_tree
, ==, rt
);
1024 VERIFY(!msp
->ms_condensing
);
1025 avl_add(&msp
->ms_size_tree
, rs
);
1029 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
1031 metaslab_t
*msp
= arg
;
1033 ASSERT3P(rt
->rt_arg
, ==, msp
);
1034 ASSERT3P(msp
->ms_tree
, ==, rt
);
1035 VERIFY(!msp
->ms_condensing
);
1036 avl_remove(&msp
->ms_size_tree
, rs
);
1040 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
1042 metaslab_t
*msp
= arg
;
1044 ASSERT3P(rt
->rt_arg
, ==, msp
);
1045 ASSERT3P(msp
->ms_tree
, ==, rt
);
1048 * Normally one would walk the tree freeing nodes along the way.
1049 * Since the nodes are shared with the range trees we can avoid
1050 * walking all nodes and just reinitialize the avl tree. The nodes
1051 * will be freed by the range tree, so we don't want to free them here.
1053 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
1054 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
1057 static range_tree_ops_t metaslab_rt_ops
= {
1059 metaslab_rt_destroy
,
1066 * ==========================================================================
1067 * Common allocator routines
1068 * ==========================================================================
1072 * Return the maximum contiguous segment within the metaslab.
1075 metaslab_block_maxsize(metaslab_t
*msp
)
1077 avl_tree_t
*t
= &msp
->ms_size_tree
;
1080 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
1083 return (rs
->rs_end
- rs
->rs_start
);
1086 static range_seg_t
*
1087 metaslab_block_find(avl_tree_t
*t
, uint64_t start
, uint64_t size
)
1089 range_seg_t
*rs
, rsearch
;
1092 rsearch
.rs_start
= start
;
1093 rsearch
.rs_end
= start
+ size
;
1095 rs
= avl_find(t
, &rsearch
, &where
);
1097 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1104 * This is a helper function that can be used by the allocator to find
1105 * a suitable block to allocate. This will search the specified AVL
1106 * tree looking for a block that matches the specified criteria.
1109 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
1112 range_seg_t
*rs
= metaslab_block_find(t
, *cursor
, size
);
1114 while (rs
!= NULL
) {
1115 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
1117 if (offset
+ size
<= rs
->rs_end
) {
1118 *cursor
= offset
+ size
;
1121 rs
= AVL_NEXT(t
, rs
);
1125 * If we know we've searched the whole map (*cursor == 0), give up.
1126 * Otherwise, reset the cursor to the beginning and try again.
1132 return (metaslab_block_picker(t
, cursor
, size
, align
));
1136 * ==========================================================================
1137 * The first-fit block allocator
1138 * ==========================================================================
1141 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
1144 * Find the largest power of 2 block size that evenly divides the
1145 * requested size. This is used to try to allocate blocks with similar
1146 * alignment from the same area of the metaslab (i.e. same cursor
1147 * bucket) but it does not guarantee that other allocations sizes
1148 * may exist in the same region.
1150 uint64_t align
= size
& -size
;
1151 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1152 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1154 return (metaslab_block_picker(t
, cursor
, size
, align
));
1157 static metaslab_ops_t metaslab_ff_ops
= {
1162 * ==========================================================================
1163 * Dynamic block allocator -
1164 * Uses the first fit allocation scheme until space get low and then
1165 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1166 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1167 * ==========================================================================
1170 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
1173 * Find the largest power of 2 block size that evenly divides the
1174 * requested size. This is used to try to allocate blocks with similar
1175 * alignment from the same area of the metaslab (i.e. same cursor
1176 * bucket) but it does not guarantee that other allocations sizes
1177 * may exist in the same region.
1179 uint64_t align
= size
& -size
;
1180 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
1181 range_tree_t
*rt
= msp
->ms_tree
;
1182 avl_tree_t
*t
= &rt
->rt_root
;
1183 uint64_t max_size
= metaslab_block_maxsize(msp
);
1184 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
1186 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1187 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1189 if (max_size
< size
)
1193 * If we're running low on space switch to using the size
1194 * sorted AVL tree (best-fit).
1196 if (max_size
< metaslab_df_alloc_threshold
||
1197 free_pct
< metaslab_df_free_pct
) {
1198 t
= &msp
->ms_size_tree
;
1202 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
1205 static metaslab_ops_t metaslab_df_ops
= {
1210 * ==========================================================================
1211 * Cursor fit block allocator -
1212 * Select the largest region in the metaslab, set the cursor to the beginning
1213 * of the range and the cursor_end to the end of the range. As allocations
1214 * are made advance the cursor. Continue allocating from the cursor until
1215 * the range is exhausted and then find a new range.
1216 * ==========================================================================
1219 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
1221 range_tree_t
*rt
= msp
->ms_tree
;
1222 avl_tree_t
*t
= &msp
->ms_size_tree
;
1223 uint64_t *cursor
= &msp
->ms_lbas
[0];
1224 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
1225 uint64_t offset
= 0;
1227 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1228 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
1230 ASSERT3U(*cursor_end
, >=, *cursor
);
1232 if ((*cursor
+ size
) > *cursor_end
) {
1235 rs
= avl_last(&msp
->ms_size_tree
);
1236 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
1239 *cursor
= rs
->rs_start
;
1240 *cursor_end
= rs
->rs_end
;
1249 static metaslab_ops_t metaslab_cf_ops
= {
1254 * ==========================================================================
1255 * New dynamic fit allocator -
1256 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1257 * contiguous blocks. If no region is found then just use the largest segment
1259 * ==========================================================================
1263 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1264 * to request from the allocator.
1266 uint64_t metaslab_ndf_clump_shift
= 4;
1269 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
1271 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
1273 range_seg_t
*rs
, rsearch
;
1274 uint64_t hbit
= highbit64(size
);
1275 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
1276 uint64_t max_size
= metaslab_block_maxsize(msp
);
1278 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1279 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
1281 if (max_size
< size
)
1284 rsearch
.rs_start
= *cursor
;
1285 rsearch
.rs_end
= *cursor
+ size
;
1287 rs
= avl_find(t
, &rsearch
, &where
);
1288 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
1289 t
= &msp
->ms_size_tree
;
1291 rsearch
.rs_start
= 0;
1292 rsearch
.rs_end
= MIN(max_size
,
1293 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
1294 rs
= avl_find(t
, &rsearch
, &where
);
1296 rs
= avl_nearest(t
, where
, AVL_AFTER
);
1300 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
1301 *cursor
= rs
->rs_start
+ size
;
1302 return (rs
->rs_start
);
1307 static metaslab_ops_t metaslab_ndf_ops
= {
1311 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
1314 * ==========================================================================
1316 * ==========================================================================
1320 * Wait for any in-progress metaslab loads to complete.
1323 metaslab_load_wait(metaslab_t
*msp
)
1325 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1327 while (msp
->ms_loading
) {
1328 ASSERT(!msp
->ms_loaded
);
1329 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
1334 metaslab_load(metaslab_t
*msp
)
1337 boolean_t success
= B_FALSE
;
1339 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1340 ASSERT(!msp
->ms_loaded
);
1341 ASSERT(!msp
->ms_loading
);
1343 msp
->ms_loading
= B_TRUE
;
1346 * If the space map has not been allocated yet, then treat
1347 * all the space in the metaslab as free and add it to the
1350 if (msp
->ms_sm
!= NULL
)
1351 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
1353 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
1355 success
= (error
== 0);
1356 msp
->ms_loading
= B_FALSE
;
1359 ASSERT3P(msp
->ms_group
, !=, NULL
);
1360 msp
->ms_loaded
= B_TRUE
;
1362 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1363 range_tree_walk(msp
->ms_defertree
[t
],
1364 range_tree_remove
, msp
->ms_tree
);
1366 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1368 cv_broadcast(&msp
->ms_load_cv
);
1373 metaslab_unload(metaslab_t
*msp
)
1375 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1376 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
1377 msp
->ms_loaded
= B_FALSE
;
1378 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
1379 msp
->ms_max_size
= 0;
1383 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
,
1386 vdev_t
*vd
= mg
->mg_vd
;
1387 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
1391 ms
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
1392 mutex_init(&ms
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1393 cv_init(&ms
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
1395 ms
->ms_start
= id
<< vd
->vdev_ms_shift
;
1396 ms
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
1399 * We only open space map objects that already exist. All others
1400 * will be opened when we finally allocate an object for it.
1403 error
= space_map_open(&ms
->ms_sm
, mos
, object
, ms
->ms_start
,
1404 ms
->ms_size
, vd
->vdev_ashift
, &ms
->ms_lock
);
1407 kmem_free(ms
, sizeof (metaslab_t
));
1411 ASSERT(ms
->ms_sm
!= NULL
);
1415 * We create the main range tree here, but we don't create the
1416 * other range trees until metaslab_sync_done(). This serves
1417 * two purposes: it allows metaslab_sync_done() to detect the
1418 * addition of new space; and for debugging, it ensures that we'd
1419 * data fault on any attempt to use this metaslab before it's ready.
1421 ms
->ms_tree
= range_tree_create(&metaslab_rt_ops
, ms
, &ms
->ms_lock
);
1422 metaslab_group_add(mg
, ms
);
1424 metaslab_set_fragmentation(ms
);
1427 * If we're opening an existing pool (txg == 0) or creating
1428 * a new one (txg == TXG_INITIAL), all space is available now.
1429 * If we're adding space to an existing pool, the new space
1430 * does not become available until after this txg has synced.
1431 * The metaslab's weight will also be initialized when we sync
1432 * out this txg. This ensures that we don't attempt to allocate
1433 * from it before we have initialized it completely.
1435 if (txg
<= TXG_INITIAL
)
1436 metaslab_sync_done(ms
, 0);
1439 * If metaslab_debug_load is set and we're initializing a metaslab
1440 * that has an allocated space map object then load the its space
1441 * map so that can verify frees.
1443 if (metaslab_debug_load
&& ms
->ms_sm
!= NULL
) {
1444 mutex_enter(&ms
->ms_lock
);
1445 VERIFY0(metaslab_load(ms
));
1446 mutex_exit(&ms
->ms_lock
);
1450 vdev_dirty(vd
, 0, NULL
, txg
);
1451 vdev_dirty(vd
, VDD_METASLAB
, ms
, txg
);
1460 metaslab_fini(metaslab_t
*msp
)
1462 metaslab_group_t
*mg
= msp
->ms_group
;
1464 metaslab_group_remove(mg
, msp
);
1466 mutex_enter(&msp
->ms_lock
);
1467 VERIFY(msp
->ms_group
== NULL
);
1468 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1470 space_map_close(msp
->ms_sm
);
1472 metaslab_unload(msp
);
1473 range_tree_destroy(msp
->ms_tree
);
1474 range_tree_destroy(msp
->ms_freeingtree
);
1475 range_tree_destroy(msp
->ms_freedtree
);
1477 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1478 range_tree_destroy(msp
->ms_alloctree
[t
]);
1481 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1482 range_tree_destroy(msp
->ms_defertree
[t
]);
1485 ASSERT0(msp
->ms_deferspace
);
1487 mutex_exit(&msp
->ms_lock
);
1488 cv_destroy(&msp
->ms_load_cv
);
1489 mutex_destroy(&msp
->ms_lock
);
1491 kmem_free(msp
, sizeof (metaslab_t
));
1494 #define FRAGMENTATION_TABLE_SIZE 17
1497 * This table defines a segment size based fragmentation metric that will
1498 * allow each metaslab to derive its own fragmentation value. This is done
1499 * by calculating the space in each bucket of the spacemap histogram and
1500 * multiplying that by the fragmetation metric in this table. Doing
1501 * this for all buckets and dividing it by the total amount of free
1502 * space in this metaslab (i.e. the total free space in all buckets) gives
1503 * us the fragmentation metric. This means that a high fragmentation metric
1504 * equates to most of the free space being comprised of small segments.
1505 * Conversely, if the metric is low, then most of the free space is in
1506 * large segments. A 10% change in fragmentation equates to approximately
1507 * double the number of segments.
1509 * This table defines 0% fragmented space using 16MB segments. Testing has
1510 * shown that segments that are greater than or equal to 16MB do not suffer
1511 * from drastic performance problems. Using this value, we derive the rest
1512 * of the table. Since the fragmentation value is never stored on disk, it
1513 * is possible to change these calculations in the future.
1515 int zfs_frag_table
[FRAGMENTATION_TABLE_SIZE
] = {
1535 * Calclate the metaslab's fragmentation metric. A return value
1536 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1537 * not support this metric. Otherwise, the return value should be in the
1541 metaslab_set_fragmentation(metaslab_t
*msp
)
1543 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1544 uint64_t fragmentation
= 0;
1546 boolean_t feature_enabled
= spa_feature_is_enabled(spa
,
1547 SPA_FEATURE_SPACEMAP_HISTOGRAM
);
1549 if (!feature_enabled
) {
1550 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
1555 * A null space map means that the entire metaslab is free
1556 * and thus is not fragmented.
1558 if (msp
->ms_sm
== NULL
) {
1559 msp
->ms_fragmentation
= 0;
1564 * If this metaslab's space map has not been upgraded, flag it
1565 * so that we upgrade next time we encounter it.
1567 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
)) {
1568 uint64_t txg
= spa_syncing_txg(spa
);
1569 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1572 * If we've reached the final dirty txg, then we must
1573 * be shutting down the pool. We don't want to dirty
1574 * any data past this point so skip setting the condense
1575 * flag. We can retry this action the next time the pool
1578 if (spa_writeable(spa
) && txg
< spa_final_dirty_txg(spa
)) {
1579 msp
->ms_condense_wanted
= B_TRUE
;
1580 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1581 spa_dbgmsg(spa
, "txg %llu, requesting force condense: "
1582 "ms_id %llu, vdev_id %llu", txg
, msp
->ms_id
,
1585 msp
->ms_fragmentation
= ZFS_FRAG_INVALID
;
1589 for (int i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE
; i
++) {
1591 uint8_t shift
= msp
->ms_sm
->sm_shift
;
1593 int idx
= MIN(shift
- SPA_MINBLOCKSHIFT
+ i
,
1594 FRAGMENTATION_TABLE_SIZE
- 1);
1596 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1599 space
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << (i
+ shift
);
1602 ASSERT3U(idx
, <, FRAGMENTATION_TABLE_SIZE
);
1603 fragmentation
+= space
* zfs_frag_table
[idx
];
1607 fragmentation
/= total
;
1608 ASSERT3U(fragmentation
, <=, 100);
1610 msp
->ms_fragmentation
= fragmentation
;
1614 * Compute a weight -- a selection preference value -- for the given metaslab.
1615 * This is based on the amount of free space, the level of fragmentation,
1616 * the LBA range, and whether the metaslab is loaded.
1619 metaslab_space_weight(metaslab_t
*msp
)
1621 metaslab_group_t
*mg
= msp
->ms_group
;
1622 vdev_t
*vd
= mg
->mg_vd
;
1623 uint64_t weight
, space
;
1625 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1626 ASSERT(!vd
->vdev_removing
);
1629 * The baseline weight is the metaslab's free space.
1631 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1633 if (metaslab_fragmentation_factor_enabled
&&
1634 msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
) {
1636 * Use the fragmentation information to inversely scale
1637 * down the baseline weight. We need to ensure that we
1638 * don't exclude this metaslab completely when it's 100%
1639 * fragmented. To avoid this we reduce the fragmented value
1642 space
= (space
* (100 - (msp
->ms_fragmentation
- 1))) / 100;
1645 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1646 * this metaslab again. The fragmentation metric may have
1647 * decreased the space to something smaller than
1648 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1649 * so that we can consume any remaining space.
1651 if (space
> 0 && space
< SPA_MINBLOCKSIZE
)
1652 space
= SPA_MINBLOCKSIZE
;
1657 * Modern disks have uniform bit density and constant angular velocity.
1658 * Therefore, the outer recording zones are faster (higher bandwidth)
1659 * than the inner zones by the ratio of outer to inner track diameter,
1660 * which is typically around 2:1. We account for this by assigning
1661 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1662 * In effect, this means that we'll select the metaslab with the most
1663 * free bandwidth rather than simply the one with the most free space.
1665 if (metaslab_lba_weighting_enabled
) {
1666 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1667 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1671 * If this metaslab is one we're actively using, adjust its
1672 * weight to make it preferable to any inactive metaslab so
1673 * we'll polish it off. If the fragmentation on this metaslab
1674 * has exceed our threshold, then don't mark it active.
1676 if (msp
->ms_loaded
&& msp
->ms_fragmentation
!= ZFS_FRAG_INVALID
&&
1677 msp
->ms_fragmentation
<= zfs_metaslab_fragmentation_threshold
) {
1678 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1681 WEIGHT_SET_SPACEBASED(weight
);
1686 * Return the weight of the specified metaslab, according to the segment-based
1687 * weighting algorithm. The metaslab must be loaded. This function can
1688 * be called within a sync pass since it relies only on the metaslab's
1689 * range tree which is always accurate when the metaslab is loaded.
1692 metaslab_weight_from_range_tree(metaslab_t
*msp
)
1694 uint64_t weight
= 0;
1695 uint32_t segments
= 0;
1697 ASSERT(msp
->ms_loaded
);
1699 for (int i
= RANGE_TREE_HISTOGRAM_SIZE
- 1; i
>= SPA_MINBLOCKSHIFT
;
1701 uint8_t shift
= msp
->ms_group
->mg_vd
->vdev_ashift
;
1702 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
1705 segments
+= msp
->ms_tree
->rt_histogram
[i
];
1708 * The range tree provides more precision than the space map
1709 * and must be downgraded so that all values fit within the
1710 * space map's histogram. This allows us to compare loaded
1711 * vs. unloaded metaslabs to determine which metaslab is
1712 * considered "best".
1717 if (segments
!= 0) {
1718 WEIGHT_SET_COUNT(weight
, segments
);
1719 WEIGHT_SET_INDEX(weight
, i
);
1720 WEIGHT_SET_ACTIVE(weight
, 0);
1728 * Calculate the weight based on the on-disk histogram. This should only
1729 * be called after a sync pass has completely finished since the on-disk
1730 * information is updated in metaslab_sync().
1733 metaslab_weight_from_spacemap(metaslab_t
*msp
)
1735 uint64_t weight
= 0;
1737 for (int i
= SPACE_MAP_HISTOGRAM_SIZE
- 1; i
>= 0; i
--) {
1738 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] != 0) {
1739 WEIGHT_SET_COUNT(weight
,
1740 msp
->ms_sm
->sm_phys
->smp_histogram
[i
]);
1741 WEIGHT_SET_INDEX(weight
, i
+
1742 msp
->ms_sm
->sm_shift
);
1743 WEIGHT_SET_ACTIVE(weight
, 0);
1751 * Compute a segment-based weight for the specified metaslab. The weight
1752 * is determined by highest bucket in the histogram. The information
1753 * for the highest bucket is encoded into the weight value.
1756 metaslab_segment_weight(metaslab_t
*msp
)
1758 metaslab_group_t
*mg
= msp
->ms_group
;
1759 uint64_t weight
= 0;
1760 uint8_t shift
= mg
->mg_vd
->vdev_ashift
;
1762 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1765 * The metaslab is completely free.
1767 if (space_map_allocated(msp
->ms_sm
) == 0) {
1768 int idx
= highbit64(msp
->ms_size
) - 1;
1769 int max_idx
= SPACE_MAP_HISTOGRAM_SIZE
+ shift
- 1;
1771 if (idx
< max_idx
) {
1772 WEIGHT_SET_COUNT(weight
, 1ULL);
1773 WEIGHT_SET_INDEX(weight
, idx
);
1775 WEIGHT_SET_COUNT(weight
, 1ULL << (idx
- max_idx
));
1776 WEIGHT_SET_INDEX(weight
, max_idx
);
1778 WEIGHT_SET_ACTIVE(weight
, 0);
1779 ASSERT(!WEIGHT_IS_SPACEBASED(weight
));
1784 ASSERT3U(msp
->ms_sm
->sm_dbuf
->db_size
, ==, sizeof (space_map_phys_t
));
1787 * If the metaslab is fully allocated then just make the weight 0.
1789 if (space_map_allocated(msp
->ms_sm
) == msp
->ms_size
)
1792 * If the metaslab is already loaded, then use the range tree to
1793 * determine the weight. Otherwise, we rely on the space map information
1794 * to generate the weight.
1796 if (msp
->ms_loaded
) {
1797 weight
= metaslab_weight_from_range_tree(msp
);
1799 weight
= metaslab_weight_from_spacemap(msp
);
1803 * If the metaslab was active the last time we calculated its weight
1804 * then keep it active. We want to consume the entire region that
1805 * is associated with this weight.
1807 if (msp
->ms_activation_weight
!= 0 && weight
!= 0)
1808 WEIGHT_SET_ACTIVE(weight
, WEIGHT_GET_ACTIVE(msp
->ms_weight
));
1813 * Determine if we should attempt to allocate from this metaslab. If the
1814 * metaslab has a maximum size then we can quickly determine if the desired
1815 * allocation size can be satisfied. Otherwise, if we're using segment-based
1816 * weighting then we can determine the maximum allocation that this metaslab
1817 * can accommodate based on the index encoded in the weight. If we're using
1818 * space-based weights then rely on the entire weight (excluding the weight
1822 metaslab_should_allocate(metaslab_t
*msp
, uint64_t asize
)
1824 boolean_t should_allocate
;
1826 if (msp
->ms_max_size
!= 0)
1827 return (msp
->ms_max_size
>= asize
);
1829 if (!WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
1831 * The metaslab segment weight indicates segments in the
1832 * range [2^i, 2^(i+1)), where i is the index in the weight.
1833 * Since the asize might be in the middle of the range, we
1834 * should attempt the allocation if asize < 2^(i+1).
1836 should_allocate
= (asize
<
1837 1ULL << (WEIGHT_GET_INDEX(msp
->ms_weight
) + 1));
1839 should_allocate
= (asize
<=
1840 (msp
->ms_weight
& ~METASLAB_WEIGHT_TYPE
));
1842 return (should_allocate
);
1846 metaslab_weight(metaslab_t
*msp
)
1848 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1849 spa_t
*spa
= vd
->vdev_spa
;
1852 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1855 * This vdev is in the process of being removed so there is nothing
1856 * for us to do here.
1858 if (vd
->vdev_removing
) {
1859 ASSERT0(space_map_allocated(msp
->ms_sm
));
1860 ASSERT0(vd
->vdev_ms_shift
);
1864 metaslab_set_fragmentation(msp
);
1867 * Update the maximum size if the metaslab is loaded. This will
1868 * ensure that we get an accurate maximum size if newly freed space
1869 * has been added back into the free tree.
1872 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
1875 * Segment-based weighting requires space map histogram support.
1877 if (zfs_metaslab_segment_weight_enabled
&&
1878 spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
1879 (msp
->ms_sm
== NULL
|| msp
->ms_sm
->sm_dbuf
->db_size
==
1880 sizeof (space_map_phys_t
))) {
1881 weight
= metaslab_segment_weight(msp
);
1883 weight
= metaslab_space_weight(msp
);
1889 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1891 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1893 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1894 metaslab_load_wait(msp
);
1895 if (!msp
->ms_loaded
) {
1896 int error
= metaslab_load(msp
);
1898 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1903 msp
->ms_activation_weight
= msp
->ms_weight
;
1904 metaslab_group_sort(msp
->ms_group
, msp
,
1905 msp
->ms_weight
| activation_weight
);
1907 ASSERT(msp
->ms_loaded
);
1908 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1914 metaslab_passivate(metaslab_t
*msp
, uint64_t weight
)
1916 uint64_t size
= weight
& ~METASLAB_WEIGHT_TYPE
;
1919 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1920 * this metaslab again. In that case, it had better be empty,
1921 * or we would be leaving space on the table.
1923 ASSERT(size
>= SPA_MINBLOCKSIZE
||
1924 range_tree_space(msp
->ms_tree
) == 0);
1925 ASSERT0(weight
& METASLAB_ACTIVE_MASK
);
1927 msp
->ms_activation_weight
= 0;
1928 metaslab_group_sort(msp
->ms_group
, msp
, weight
);
1929 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1933 * Segment-based metaslabs are activated once and remain active until
1934 * we either fail an allocation attempt (similar to space-based metaslabs)
1935 * or have exhausted the free space in zfs_metaslab_switch_threshold
1936 * buckets since the metaslab was activated. This function checks to see
1937 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
1938 * metaslab and passivates it proactively. This will allow us to select a
1939 * metaslabs with larger contiguous region if any remaining within this
1940 * metaslab group. If we're in sync pass > 1, then we continue using this
1941 * metaslab so that we don't dirty more block and cause more sync passes.
1944 metaslab_segment_may_passivate(metaslab_t
*msp
)
1946 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1948 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
) || spa_sync_pass(spa
) > 1)
1952 * Since we are in the middle of a sync pass, the most accurate
1953 * information that is accessible to us is the in-core range tree
1954 * histogram; calculate the new weight based on that information.
1956 uint64_t weight
= metaslab_weight_from_range_tree(msp
);
1957 int activation_idx
= WEIGHT_GET_INDEX(msp
->ms_activation_weight
);
1958 int current_idx
= WEIGHT_GET_INDEX(weight
);
1960 if (current_idx
<= activation_idx
- zfs_metaslab_switch_threshold
)
1961 metaslab_passivate(msp
, weight
);
1965 metaslab_preload(void *arg
)
1967 metaslab_t
*msp
= arg
;
1968 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1970 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1972 mutex_enter(&msp
->ms_lock
);
1973 metaslab_load_wait(msp
);
1974 if (!msp
->ms_loaded
)
1975 (void) metaslab_load(msp
);
1976 msp
->ms_selected_txg
= spa_syncing_txg(spa
);
1977 mutex_exit(&msp
->ms_lock
);
1981 metaslab_group_preload(metaslab_group_t
*mg
)
1983 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1985 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1988 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1989 taskq_wait(mg
->mg_taskq
);
1993 mutex_enter(&mg
->mg_lock
);
1995 * Load the next potential metaslabs
1997 for (msp
= avl_first(t
); msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
1999 * We preload only the maximum number of metaslabs specified
2000 * by metaslab_preload_limit. If a metaslab is being forced
2001 * to condense then we preload it too. This will ensure
2002 * that force condensing happens in the next txg.
2004 if (++m
> metaslab_preload_limit
&& !msp
->ms_condense_wanted
) {
2008 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
2009 msp
, TQ_SLEEP
) != NULL
);
2011 mutex_exit(&mg
->mg_lock
);
2015 * Determine if the space map's on-disk footprint is past our tolerance
2016 * for inefficiency. We would like to use the following criteria to make
2019 * 1. The size of the space map object should not dramatically increase as a
2020 * result of writing out the free space range tree.
2022 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2023 * times the size than the free space range tree representation
2024 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
2026 * 3. The on-disk size of the space map should actually decrease.
2028 * Checking the first condition is tricky since we don't want to walk
2029 * the entire AVL tree calculating the estimated on-disk size. Instead we
2030 * use the size-ordered range tree in the metaslab and calculate the
2031 * size required to write out the largest segment in our free tree. If the
2032 * size required to represent that segment on disk is larger than the space
2033 * map object then we avoid condensing this map.
2035 * To determine the second criterion we use a best-case estimate and assume
2036 * each segment can be represented on-disk as a single 64-bit entry. We refer
2037 * to this best-case estimate as the space map's minimal form.
2039 * Unfortunately, we cannot compute the on-disk size of the space map in this
2040 * context because we cannot accurately compute the effects of compression, etc.
2041 * Instead, we apply the heuristic described in the block comment for
2042 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2043 * is greater than a threshold number of blocks.
2046 metaslab_should_condense(metaslab_t
*msp
)
2048 space_map_t
*sm
= msp
->ms_sm
;
2050 uint64_t size
, entries
, segsz
, object_size
, optimal_size
, record_size
;
2051 dmu_object_info_t doi
;
2052 uint64_t vdev_blocksize
= 1 << msp
->ms_group
->mg_vd
->vdev_ashift
;
2054 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2055 ASSERT(msp
->ms_loaded
);
2058 * Use the ms_size_tree range tree, which is ordered by size, to
2059 * obtain the largest segment in the free tree. We always condense
2060 * metaslabs that are empty and metaslabs for which a condense
2061 * request has been made.
2063 rs
= avl_last(&msp
->ms_size_tree
);
2064 if (rs
== NULL
|| msp
->ms_condense_wanted
)
2068 * Calculate the number of 64-bit entries this segment would
2069 * require when written to disk. If this single segment would be
2070 * larger on-disk than the entire current on-disk structure, then
2071 * clearly condensing will increase the on-disk structure size.
2073 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
2074 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
2075 segsz
= entries
* sizeof (uint64_t);
2077 optimal_size
= sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
);
2078 object_size
= space_map_length(msp
->ms_sm
);
2080 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
2081 record_size
= MAX(doi
.doi_data_block_size
, vdev_blocksize
);
2083 return (segsz
<= object_size
&&
2084 object_size
>= (optimal_size
* zfs_condense_pct
/ 100) &&
2085 object_size
> zfs_metaslab_condense_block_threshold
* record_size
);
2089 * Condense the on-disk space map representation to its minimized form.
2090 * The minimized form consists of a small number of allocations followed by
2091 * the entries of the free range tree.
2094 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
2096 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
2097 range_tree_t
*condense_tree
;
2098 space_map_t
*sm
= msp
->ms_sm
;
2100 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
2101 ASSERT3U(spa_sync_pass(spa
), ==, 1);
2102 ASSERT(msp
->ms_loaded
);
2105 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2106 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg
,
2107 msp
->ms_id
, msp
, msp
->ms_group
->mg_vd
->vdev_id
,
2108 msp
->ms_group
->mg_vd
->vdev_spa
->spa_name
,
2109 space_map_length(msp
->ms_sm
), avl_numnodes(&msp
->ms_tree
->rt_root
),
2110 msp
->ms_condense_wanted
? "TRUE" : "FALSE");
2112 msp
->ms_condense_wanted
= B_FALSE
;
2115 * Create an range tree that is 100% allocated. We remove segments
2116 * that have been freed in this txg, any deferred frees that exist,
2117 * and any allocation in the future. Removing segments should be
2118 * a relatively inexpensive operation since we expect these trees to
2119 * have a small number of nodes.
2121 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
2122 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
2125 * Remove what's been freed in this txg from the condense_tree.
2126 * Since we're in sync_pass 1, we know that all the frees from
2127 * this txg are in the freeingtree.
2129 range_tree_walk(msp
->ms_freeingtree
, range_tree_remove
, condense_tree
);
2131 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2132 range_tree_walk(msp
->ms_defertree
[t
],
2133 range_tree_remove
, condense_tree
);
2136 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2137 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
2138 range_tree_remove
, condense_tree
);
2142 * We're about to drop the metaslab's lock thus allowing
2143 * other consumers to change it's content. Set the
2144 * metaslab's ms_condensing flag to ensure that
2145 * allocations on this metaslab do not occur while we're
2146 * in the middle of committing it to disk. This is only critical
2147 * for the ms_tree as all other range trees use per txg
2148 * views of their content.
2150 msp
->ms_condensing
= B_TRUE
;
2152 mutex_exit(&msp
->ms_lock
);
2153 space_map_truncate(sm
, tx
);
2154 mutex_enter(&msp
->ms_lock
);
2157 * While we would ideally like to create a space map representation
2158 * that consists only of allocation records, doing so can be
2159 * prohibitively expensive because the in-core free tree can be
2160 * large, and therefore computationally expensive to subtract
2161 * from the condense_tree. Instead we sync out two trees, a cheap
2162 * allocation only tree followed by the in-core free tree. While not
2163 * optimal, this is typically close to optimal, and much cheaper to
2166 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
2167 range_tree_vacate(condense_tree
, NULL
, NULL
);
2168 range_tree_destroy(condense_tree
);
2170 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
2171 msp
->ms_condensing
= B_FALSE
;
2175 * Write a metaslab to disk in the context of the specified transaction group.
2178 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
2180 metaslab_group_t
*mg
= msp
->ms_group
;
2181 vdev_t
*vd
= mg
->mg_vd
;
2182 spa_t
*spa
= vd
->vdev_spa
;
2183 objset_t
*mos
= spa_meta_objset(spa
);
2184 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
2186 uint64_t object
= space_map_object(msp
->ms_sm
);
2188 ASSERT(!vd
->vdev_ishole
);
2191 * This metaslab has just been added so there's no work to do now.
2193 if (msp
->ms_freeingtree
== NULL
) {
2194 ASSERT3P(alloctree
, ==, NULL
);
2198 ASSERT3P(alloctree
, !=, NULL
);
2199 ASSERT3P(msp
->ms_freeingtree
, !=, NULL
);
2200 ASSERT3P(msp
->ms_freedtree
, !=, NULL
);
2203 * Normally, we don't want to process a metaslab if there
2204 * are no allocations or frees to perform. However, if the metaslab
2205 * is being forced to condense and it's loaded, we need to let it
2208 if (range_tree_space(alloctree
) == 0 &&
2209 range_tree_space(msp
->ms_freeingtree
) == 0 &&
2210 !(msp
->ms_loaded
&& msp
->ms_condense_wanted
))
2214 VERIFY(txg
<= spa_final_dirty_txg(spa
));
2217 * The only state that can actually be changing concurrently with
2218 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2219 * be modifying this txg's alloctree, freeingtree, freedtree, or
2220 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2221 * space map ASSERTs. We drop it whenever we call into the DMU,
2222 * because the DMU can call down to us (e.g. via zio_free()) at
2226 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
2228 if (msp
->ms_sm
== NULL
) {
2229 uint64_t new_object
;
2231 new_object
= space_map_alloc(mos
, tx
);
2232 VERIFY3U(new_object
, !=, 0);
2234 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
2235 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
2237 ASSERT(msp
->ms_sm
!= NULL
);
2240 mutex_enter(&msp
->ms_lock
);
2243 * Note: metaslab_condense() clears the space map's histogram.
2244 * Therefore we must verify and remove this histogram before
2247 metaslab_group_histogram_verify(mg
);
2248 metaslab_class_histogram_verify(mg
->mg_class
);
2249 metaslab_group_histogram_remove(mg
, msp
);
2251 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
2252 metaslab_should_condense(msp
)) {
2253 metaslab_condense(msp
, txg
, tx
);
2255 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
2256 space_map_write(msp
->ms_sm
, msp
->ms_freeingtree
, SM_FREE
, tx
);
2259 if (msp
->ms_loaded
) {
2261 * When the space map is loaded, we have an accruate
2262 * histogram in the range tree. This gives us an opportunity
2263 * to bring the space map's histogram up-to-date so we clear
2264 * it first before updating it.
2266 space_map_histogram_clear(msp
->ms_sm
);
2267 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
2270 * Since we've cleared the histogram we need to add back
2271 * any free space that has already been processed, plus
2272 * any deferred space. This allows the on-disk histogram
2273 * to accurately reflect all free space even if some space
2274 * is not yet available for allocation (i.e. deferred).
2276 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freedtree
, tx
);
2279 * Add back any deferred free space that has not been
2280 * added back into the in-core free tree yet. This will
2281 * ensure that we don't end up with a space map histogram
2282 * that is completely empty unless the metaslab is fully
2285 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2286 space_map_histogram_add(msp
->ms_sm
,
2287 msp
->ms_defertree
[t
], tx
);
2292 * Always add the free space from this sync pass to the space
2293 * map histogram. We want to make sure that the on-disk histogram
2294 * accounts for all free space. If the space map is not loaded,
2295 * then we will lose some accuracy but will correct it the next
2296 * time we load the space map.
2298 space_map_histogram_add(msp
->ms_sm
, msp
->ms_freeingtree
, tx
);
2300 metaslab_group_histogram_add(mg
, msp
);
2301 metaslab_group_histogram_verify(mg
);
2302 metaslab_class_histogram_verify(mg
->mg_class
);
2305 * For sync pass 1, we avoid traversing this txg's free range tree
2306 * and instead will just swap the pointers for freeingtree and
2307 * freedtree. We can safely do this since the freed_tree is
2308 * guaranteed to be empty on the initial pass.
2310 if (spa_sync_pass(spa
) == 1) {
2311 range_tree_swap(&msp
->ms_freeingtree
, &msp
->ms_freedtree
);
2313 range_tree_vacate(msp
->ms_freeingtree
,
2314 range_tree_add
, msp
->ms_freedtree
);
2316 range_tree_vacate(alloctree
, NULL
, NULL
);
2318 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
2319 ASSERT0(range_tree_space(msp
->ms_alloctree
[TXG_CLEAN(txg
) & TXG_MASK
]));
2320 ASSERT0(range_tree_space(msp
->ms_freeingtree
));
2322 mutex_exit(&msp
->ms_lock
);
2324 if (object
!= space_map_object(msp
->ms_sm
)) {
2325 object
= space_map_object(msp
->ms_sm
);
2326 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
2327 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
2333 * Called after a transaction group has completely synced to mark
2334 * all of the metaslab's free space as usable.
2337 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
2339 metaslab_group_t
*mg
= msp
->ms_group
;
2340 vdev_t
*vd
= mg
->mg_vd
;
2341 spa_t
*spa
= vd
->vdev_spa
;
2342 range_tree_t
**defer_tree
;
2343 int64_t alloc_delta
, defer_delta
;
2344 boolean_t defer_allowed
= B_TRUE
;
2346 ASSERT(!vd
->vdev_ishole
);
2348 mutex_enter(&msp
->ms_lock
);
2351 * If this metaslab is just becoming available, initialize its
2352 * range trees and add its capacity to the vdev.
2354 if (msp
->ms_freedtree
== NULL
) {
2355 for (int t
= 0; t
< TXG_SIZE
; t
++) {
2356 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
2358 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
2362 ASSERT3P(msp
->ms_freeingtree
, ==, NULL
);
2363 msp
->ms_freeingtree
= range_tree_create(NULL
, msp
,
2366 ASSERT3P(msp
->ms_freedtree
, ==, NULL
);
2367 msp
->ms_freedtree
= range_tree_create(NULL
, msp
,
2370 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
2371 ASSERT(msp
->ms_defertree
[t
] == NULL
);
2373 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
2377 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
2380 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
2382 uint64_t free_space
= metaslab_class_get_space(spa_normal_class(spa
)) -
2383 metaslab_class_get_alloc(spa_normal_class(spa
));
2384 if (free_space
<= spa_get_slop_space(spa
)) {
2385 defer_allowed
= B_FALSE
;
2389 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
2390 if (defer_allowed
) {
2391 defer_delta
= range_tree_space(msp
->ms_freedtree
) -
2392 range_tree_space(*defer_tree
);
2394 defer_delta
-= range_tree_space(*defer_tree
);
2397 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
2400 * If there's a metaslab_load() in progress, wait for it to complete
2401 * so that we have a consistent view of the in-core space map.
2403 metaslab_load_wait(msp
);
2406 * Move the frees from the defer_tree back to the free
2407 * range tree (if it's loaded). Swap the freed_tree and the
2408 * defer_tree -- this is safe to do because we've just emptied out
2411 range_tree_vacate(*defer_tree
,
2412 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
2413 if (defer_allowed
) {
2414 range_tree_swap(&msp
->ms_freedtree
, defer_tree
);
2416 range_tree_vacate(msp
->ms_freedtree
,
2417 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
2420 space_map_update(msp
->ms_sm
);
2422 msp
->ms_deferspace
+= defer_delta
;
2423 ASSERT3S(msp
->ms_deferspace
, >=, 0);
2424 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
2425 if (msp
->ms_deferspace
!= 0) {
2427 * Keep syncing this metaslab until all deferred frees
2428 * are back in circulation.
2430 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
2434 * Calculate the new weights before unloading any metaslabs.
2435 * This will give us the most accurate weighting.
2437 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
2440 * If the metaslab is loaded and we've not tried to load or allocate
2441 * from it in 'metaslab_unload_delay' txgs, then unload it.
2443 if (msp
->ms_loaded
&&
2444 msp
->ms_selected_txg
+ metaslab_unload_delay
< txg
) {
2445 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
2446 VERIFY0(range_tree_space(
2447 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
2450 if (!metaslab_debug_unload
)
2451 metaslab_unload(msp
);
2454 mutex_exit(&msp
->ms_lock
);
2458 metaslab_sync_reassess(metaslab_group_t
*mg
)
2460 metaslab_group_alloc_update(mg
);
2461 mg
->mg_fragmentation
= metaslab_group_fragmentation(mg
);
2464 * Preload the next potential metaslabs
2466 metaslab_group_preload(mg
);
2470 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
2472 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
2473 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
2474 uint64_t start
= msp
->ms_id
;
2476 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
2477 return (1ULL << 63);
2480 return ((start
- offset
) << ms_shift
);
2482 return ((offset
- start
) << ms_shift
);
2487 * ==========================================================================
2488 * Metaslab allocation tracing facility
2489 * ==========================================================================
2491 kstat_t
*metaslab_trace_ksp
;
2492 kstat_named_t metaslab_trace_over_limit
;
2495 metaslab_alloc_trace_init(void)
2497 ASSERT(metaslab_alloc_trace_cache
== NULL
);
2498 metaslab_alloc_trace_cache
= kmem_cache_create(
2499 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t
),
2500 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
2501 metaslab_trace_ksp
= kstat_create("zfs", 0, "metaslab_trace_stats",
2502 "misc", KSTAT_TYPE_NAMED
, 1, KSTAT_FLAG_VIRTUAL
);
2503 if (metaslab_trace_ksp
!= NULL
) {
2504 metaslab_trace_ksp
->ks_data
= &metaslab_trace_over_limit
;
2505 kstat_named_init(&metaslab_trace_over_limit
,
2506 "metaslab_trace_over_limit", KSTAT_DATA_UINT64
);
2507 kstat_install(metaslab_trace_ksp
);
2512 metaslab_alloc_trace_fini(void)
2514 if (metaslab_trace_ksp
!= NULL
) {
2515 kstat_delete(metaslab_trace_ksp
);
2516 metaslab_trace_ksp
= NULL
;
2518 kmem_cache_destroy(metaslab_alloc_trace_cache
);
2519 metaslab_alloc_trace_cache
= NULL
;
2523 * Add an allocation trace element to the allocation tracing list.
2526 metaslab_trace_add(zio_alloc_list_t
*zal
, metaslab_group_t
*mg
,
2527 metaslab_t
*msp
, uint64_t psize
, uint32_t dva_id
, uint64_t offset
)
2529 if (!metaslab_trace_enabled
)
2533 * When the tracing list reaches its maximum we remove
2534 * the second element in the list before adding a new one.
2535 * By removing the second element we preserve the original
2536 * entry as a clue to what allocations steps have already been
2539 if (zal
->zal_size
== metaslab_trace_max_entries
) {
2540 metaslab_alloc_trace_t
*mat_next
;
2542 panic("too many entries in allocation list");
2544 atomic_inc_64(&metaslab_trace_over_limit
.value
.ui64
);
2546 mat_next
= list_next(&zal
->zal_list
, list_head(&zal
->zal_list
));
2547 list_remove(&zal
->zal_list
, mat_next
);
2548 kmem_cache_free(metaslab_alloc_trace_cache
, mat_next
);
2551 metaslab_alloc_trace_t
*mat
=
2552 kmem_cache_alloc(metaslab_alloc_trace_cache
, KM_SLEEP
);
2553 list_link_init(&mat
->mat_list_node
);
2556 mat
->mat_size
= psize
;
2557 mat
->mat_dva_id
= dva_id
;
2558 mat
->mat_offset
= offset
;
2559 mat
->mat_weight
= 0;
2562 mat
->mat_weight
= msp
->ms_weight
;
2565 * The list is part of the zio so locking is not required. Only
2566 * a single thread will perform allocations for a given zio.
2568 list_insert_tail(&zal
->zal_list
, mat
);
2571 ASSERT3U(zal
->zal_size
, <=, metaslab_trace_max_entries
);
2575 metaslab_trace_init(zio_alloc_list_t
*zal
)
2577 list_create(&zal
->zal_list
, sizeof (metaslab_alloc_trace_t
),
2578 offsetof(metaslab_alloc_trace_t
, mat_list_node
));
2583 metaslab_trace_fini(zio_alloc_list_t
*zal
)
2585 metaslab_alloc_trace_t
*mat
;
2587 while ((mat
= list_remove_head(&zal
->zal_list
)) != NULL
)
2588 kmem_cache_free(metaslab_alloc_trace_cache
, mat
);
2589 list_destroy(&zal
->zal_list
);
2594 * ==========================================================================
2595 * Metaslab block operations
2596 * ==========================================================================
2600 metaslab_group_alloc_increment(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
)
2602 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2603 flags
& METASLAB_DONT_THROTTLE
)
2606 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2607 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2610 (void) refcount_add(&mg
->mg_alloc_queue_depth
, tag
);
2614 metaslab_group_alloc_decrement(spa_t
*spa
, uint64_t vdev
, void *tag
, int flags
)
2616 if (!(flags
& METASLAB_ASYNC_ALLOC
) ||
2617 flags
& METASLAB_DONT_THROTTLE
)
2620 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2621 if (!mg
->mg_class
->mc_alloc_throttle_enabled
)
2624 (void) refcount_remove(&mg
->mg_alloc_queue_depth
, tag
);
2628 metaslab_group_alloc_verify(spa_t
*spa
, const blkptr_t
*bp
, void *tag
)
2631 const dva_t
*dva
= bp
->blk_dva
;
2632 int ndvas
= BP_GET_NDVAS(bp
);
2634 for (int d
= 0; d
< ndvas
; d
++) {
2635 uint64_t vdev
= DVA_GET_VDEV(&dva
[d
]);
2636 metaslab_group_t
*mg
= vdev_lookup_top(spa
, vdev
)->vdev_mg
;
2637 VERIFY(refcount_not_held(&mg
->mg_alloc_queue_depth
, tag
));
2643 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
, uint64_t txg
)
2646 range_tree_t
*rt
= msp
->ms_tree
;
2647 metaslab_class_t
*mc
= msp
->ms_group
->mg_class
;
2649 VERIFY(!msp
->ms_condensing
);
2651 start
= mc
->mc_ops
->msop_alloc(msp
, size
);
2652 if (start
!= -1ULL) {
2653 metaslab_group_t
*mg
= msp
->ms_group
;
2654 vdev_t
*vd
= mg
->mg_vd
;
2656 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
2657 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2658 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
2659 range_tree_remove(rt
, start
, size
);
2661 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2662 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
2664 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], start
, size
);
2666 /* Track the last successful allocation */
2667 msp
->ms_alloc_txg
= txg
;
2668 metaslab_verify_space(msp
, txg
);
2672 * Now that we've attempted the allocation we need to update the
2673 * metaslab's maximum block size since it may have changed.
2675 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
2680 metaslab_group_alloc_normal(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
2681 uint64_t asize
, uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
2683 metaslab_t
*msp
= NULL
;
2684 uint64_t offset
= -1ULL;
2685 uint64_t activation_weight
;
2686 uint64_t target_distance
;
2689 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
2690 for (i
= 0; i
< d
; i
++) {
2691 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
2692 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
2697 metaslab_t
*search
= kmem_alloc(sizeof (*search
), KM_SLEEP
);
2698 search
->ms_weight
= UINT64_MAX
;
2699 search
->ms_start
= 0;
2701 boolean_t was_active
;
2702 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
2705 mutex_enter(&mg
->mg_lock
);
2708 * Find the metaslab with the highest weight that is less
2709 * than what we've already tried. In the common case, this
2710 * means that we will examine each metaslab at most once.
2711 * Note that concurrent callers could reorder metaslabs
2712 * by activation/passivation once we have dropped the mg_lock.
2713 * If a metaslab is activated by another thread, and we fail
2714 * to allocate from the metaslab we have selected, we may
2715 * not try the newly-activated metaslab, and instead activate
2716 * another metaslab. This is not optimal, but generally
2717 * does not cause any problems (a possible exception being
2718 * if every metaslab is completely full except for the
2719 * the newly-activated metaslab which we fail to examine).
2721 msp
= avl_find(t
, search
, &idx
);
2723 msp
= avl_nearest(t
, idx
, AVL_AFTER
);
2724 for (; msp
!= NULL
; msp
= AVL_NEXT(t
, msp
)) {
2726 if (!metaslab_should_allocate(msp
, asize
)) {
2727 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
2733 * If the selected metaslab is condensing, skip it.
2735 if (msp
->ms_condensing
)
2738 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
2739 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
2742 target_distance
= min_distance
+
2743 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
2746 for (i
= 0; i
< d
; i
++) {
2747 if (metaslab_distance(msp
, &dva
[i
]) <
2754 mutex_exit(&mg
->mg_lock
);
2756 kmem_free(search
, sizeof (*search
));
2759 search
->ms_weight
= msp
->ms_weight
;
2760 search
->ms_start
= msp
->ms_start
+ 1;
2762 mutex_enter(&msp
->ms_lock
);
2765 * Ensure that the metaslab we have selected is still
2766 * capable of handling our request. It's possible that
2767 * another thread may have changed the weight while we
2768 * were blocked on the metaslab lock. We check the
2769 * active status first to see if we need to reselect
2772 if (was_active
&& !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
)) {
2773 mutex_exit(&msp
->ms_lock
);
2777 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
2778 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
2779 metaslab_passivate(msp
,
2780 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
2781 mutex_exit(&msp
->ms_lock
);
2785 if (metaslab_activate(msp
, activation_weight
) != 0) {
2786 mutex_exit(&msp
->ms_lock
);
2789 msp
->ms_selected_txg
= txg
;
2792 * Now that we have the lock, recheck to see if we should
2793 * continue to use this metaslab for this allocation. The
2794 * the metaslab is now loaded so metaslab_should_allocate() can
2795 * accurately determine if the allocation attempt should
2798 if (!metaslab_should_allocate(msp
, asize
)) {
2799 /* Passivate this metaslab and select a new one. */
2800 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
2806 * If this metaslab is currently condensing then pick again as
2807 * we can't manipulate this metaslab until it's committed
2810 if (msp
->ms_condensing
) {
2811 metaslab_trace_add(zal
, mg
, msp
, asize
, d
,
2813 mutex_exit(&msp
->ms_lock
);
2817 offset
= metaslab_block_alloc(msp
, asize
, txg
);
2818 metaslab_trace_add(zal
, mg
, msp
, asize
, d
, offset
);
2820 if (offset
!= -1ULL) {
2821 /* Proactively passivate the metaslab, if needed */
2822 metaslab_segment_may_passivate(msp
);
2826 ASSERT(msp
->ms_loaded
);
2829 * We were unable to allocate from this metaslab so determine
2830 * a new weight for this metaslab. Now that we have loaded
2831 * the metaslab we can provide a better hint to the metaslab
2834 * For space-based metaslabs, we use the maximum block size.
2835 * This information is only available when the metaslab
2836 * is loaded and is more accurate than the generic free
2837 * space weight that was calculated by metaslab_weight().
2838 * This information allows us to quickly compare the maximum
2839 * available allocation in the metaslab to the allocation
2840 * size being requested.
2842 * For segment-based metaslabs, determine the new weight
2843 * based on the highest bucket in the range tree. We
2844 * explicitly use the loaded segment weight (i.e. the range
2845 * tree histogram) since it contains the space that is
2846 * currently available for allocation and is accurate
2847 * even within a sync pass.
2849 if (WEIGHT_IS_SPACEBASED(msp
->ms_weight
)) {
2850 uint64_t weight
= metaslab_block_maxsize(msp
);
2851 WEIGHT_SET_SPACEBASED(weight
);
2852 metaslab_passivate(msp
, weight
);
2854 metaslab_passivate(msp
,
2855 metaslab_weight_from_range_tree(msp
));
2859 * We have just failed an allocation attempt, check
2860 * that metaslab_should_allocate() agrees. Otherwise,
2861 * we may end up in an infinite loop retrying the same
2864 ASSERT(!metaslab_should_allocate(msp
, asize
));
2865 mutex_exit(&msp
->ms_lock
);
2867 mutex_exit(&msp
->ms_lock
);
2868 kmem_free(search
, sizeof (*search
));
2873 metaslab_group_alloc(metaslab_group_t
*mg
, zio_alloc_list_t
*zal
,
2874 uint64_t asize
, uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
2877 ASSERT(mg
->mg_initialized
);
2879 offset
= metaslab_group_alloc_normal(mg
, zal
, asize
, txg
,
2880 min_distance
, dva
, d
);
2882 mutex_enter(&mg
->mg_lock
);
2883 if (offset
== -1ULL) {
2884 mg
->mg_failed_allocations
++;
2885 metaslab_trace_add(zal
, mg
, NULL
, asize
, d
,
2886 TRACE_GROUP_FAILURE
);
2887 if (asize
== SPA_GANGBLOCKSIZE
) {
2889 * This metaslab group was unable to allocate
2890 * the minimum gang block size so it must be out of
2891 * space. We must notify the allocation throttle
2892 * to start skipping allocation attempts to this
2893 * metaslab group until more space becomes available.
2894 * Note: this failure cannot be caused by the
2895 * allocation throttle since the allocation throttle
2896 * is only responsible for skipping devices and
2897 * not failing block allocations.
2899 mg
->mg_no_free_space
= B_TRUE
;
2902 mg
->mg_allocations
++;
2903 mutex_exit(&mg
->mg_lock
);
2908 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2909 * on the same vdev as an existing DVA of this BP, then try to allocate it
2910 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2913 int ditto_same_vdev_distance_shift
= 3;
2916 * Allocate a block for the specified i/o.
2919 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
2920 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
,
2921 zio_alloc_list_t
*zal
)
2923 metaslab_group_t
*mg
, *rotor
;
2925 boolean_t try_hard
= B_FALSE
;
2927 ASSERT(!DVA_IS_VALID(&dva
[d
]));
2930 * For testing, make some blocks above a certain size be gang blocks.
2932 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0) {
2933 metaslab_trace_add(zal
, NULL
, NULL
, psize
, d
, TRACE_FORCE_GANG
);
2934 return (SET_ERROR(ENOSPC
));
2938 * Start at the rotor and loop through all mgs until we find something.
2939 * Note that there's no locking on mc_rotor or mc_aliquot because
2940 * nothing actually breaks if we miss a few updates -- we just won't
2941 * allocate quite as evenly. It all balances out over time.
2943 * If we are doing ditto or log blocks, try to spread them across
2944 * consecutive vdevs. If we're forced to reuse a vdev before we've
2945 * allocated all of our ditto blocks, then try and spread them out on
2946 * that vdev as much as possible. If it turns out to not be possible,
2947 * gradually lower our standards until anything becomes acceptable.
2948 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2949 * gives us hope of containing our fault domains to something we're
2950 * able to reason about. Otherwise, any two top-level vdev failures
2951 * will guarantee the loss of data. With consecutive allocation,
2952 * only two adjacent top-level vdev failures will result in data loss.
2954 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2955 * ourselves on the same vdev as our gang block header. That
2956 * way, we can hope for locality in vdev_cache, plus it makes our
2957 * fault domains something tractable.
2960 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
2963 * It's possible the vdev we're using as the hint no
2964 * longer exists (i.e. removed). Consult the rotor when
2970 if (flags
& METASLAB_HINTBP_AVOID
&&
2971 mg
->mg_next
!= NULL
)
2976 } else if (d
!= 0) {
2977 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
2978 mg
= vd
->vdev_mg
->mg_next
;
2984 * If the hint put us into the wrong metaslab class, or into a
2985 * metaslab group that has been passivated, just follow the rotor.
2987 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
2993 boolean_t allocatable
;
2995 ASSERT(mg
->mg_activation_count
== 1);
2999 * Don't allocate from faulted devices.
3002 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
3003 allocatable
= vdev_allocatable(vd
);
3004 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
3006 allocatable
= vdev_allocatable(vd
);
3010 * Determine if the selected metaslab group is eligible
3011 * for allocations. If we're ganging then don't allow
3012 * this metaslab group to skip allocations since that would
3013 * inadvertently return ENOSPC and suspend the pool
3014 * even though space is still available.
3016 if (allocatable
&& !GANG_ALLOCATION(flags
) && !try_hard
) {
3017 allocatable
= metaslab_group_allocatable(mg
, rotor
,
3022 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3023 TRACE_NOT_ALLOCATABLE
);
3027 ASSERT(mg
->mg_initialized
);
3030 * Avoid writing single-copy data to a failing,
3031 * non-redundant vdev, unless we've already tried all
3034 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
3035 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
3036 d
== 0 && !try_hard
&& vd
->vdev_children
== 0) {
3037 metaslab_trace_add(zal
, mg
, NULL
, psize
, d
,
3042 ASSERT(mg
->mg_class
== mc
);
3045 * If we don't need to try hard, then require that the
3046 * block be 1/8th of the device away from any other DVAs
3047 * in this BP. If we are trying hard, allow any offset
3048 * to be used (distance=0).
3050 uint64_t distance
= 0;
3052 distance
= vd
->vdev_asize
>>
3053 ditto_same_vdev_distance_shift
;
3054 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
3058 uint64_t asize
= vdev_psize_to_asize(vd
, psize
);
3059 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
3061 uint64_t offset
= metaslab_group_alloc(mg
, zal
, asize
, txg
,
3064 if (offset
!= -1ULL) {
3066 * If we've just selected this metaslab group,
3067 * figure out whether the corresponding vdev is
3068 * over- or under-used relative to the pool,
3069 * and set an allocation bias to even it out.
3071 if (mc
->mc_aliquot
== 0 && metaslab_bias_enabled
) {
3072 vdev_stat_t
*vs
= &vd
->vdev_stat
;
3075 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
3076 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
3079 * Calculate how much more or less we should
3080 * try to allocate from this device during
3081 * this iteration around the rotor.
3082 * For example, if a device is 80% full
3083 * and the pool is 20% full then we should
3084 * reduce allocations by 60% on this device.
3086 * mg_bias = (20 - 80) * 512K / 100 = -307K
3088 * This reduces allocations by 307K for this
3091 mg
->mg_bias
= ((cu
- vu
) *
3092 (int64_t)mg
->mg_aliquot
) / 100;
3093 } else if (!metaslab_bias_enabled
) {
3097 if (atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
3098 mg
->mg_aliquot
+ mg
->mg_bias
) {
3099 mc
->mc_rotor
= mg
->mg_next
;
3103 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
3104 DVA_SET_OFFSET(&dva
[d
], offset
);
3105 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
3106 DVA_SET_ASIZE(&dva
[d
], asize
);
3111 mc
->mc_rotor
= mg
->mg_next
;
3113 } while ((mg
= mg
->mg_next
) != rotor
);
3116 * If we haven't tried hard, do so now.
3123 bzero(&dva
[d
], sizeof (dva_t
));
3125 metaslab_trace_add(zal
, rotor
, NULL
, psize
, d
, TRACE_ENOSPC
);
3126 return (SET_ERROR(ENOSPC
));
3130 * Free the block represented by DVA in the context of the specified
3131 * transaction group.
3134 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
3136 uint64_t vdev
= DVA_GET_VDEV(dva
);
3137 uint64_t offset
= DVA_GET_OFFSET(dva
);
3138 uint64_t size
= DVA_GET_ASIZE(dva
);
3142 ASSERT(DVA_IS_VALID(dva
));
3144 if (txg
> spa_freeze_txg(spa
))
3147 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
3148 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
3149 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
3150 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
3155 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3157 if (DVA_GET_GANG(dva
))
3158 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
3160 mutex_enter(&msp
->ms_lock
);
3163 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
3166 VERIFY(!msp
->ms_condensing
);
3167 VERIFY3U(offset
, >=, msp
->ms_start
);
3168 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
3169 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
3171 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3172 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3173 range_tree_add(msp
->ms_tree
, offset
, size
);
3174 msp
->ms_max_size
= metaslab_block_maxsize(msp
);
3176 VERIFY3U(txg
, ==, spa
->spa_syncing_txg
);
3177 if (range_tree_space(msp
->ms_freeingtree
) == 0)
3178 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
3179 range_tree_add(msp
->ms_freeingtree
, offset
, size
);
3182 mutex_exit(&msp
->ms_lock
);
3186 * Intent log support: upon opening the pool after a crash, notify the SPA
3187 * of blocks that the intent log has allocated for immediate write, but
3188 * which are still considered free by the SPA because the last transaction
3189 * group didn't commit yet.
3192 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
3194 uint64_t vdev
= DVA_GET_VDEV(dva
);
3195 uint64_t offset
= DVA_GET_OFFSET(dva
);
3196 uint64_t size
= DVA_GET_ASIZE(dva
);
3201 ASSERT(DVA_IS_VALID(dva
));
3203 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
3204 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
3205 return (SET_ERROR(ENXIO
));
3207 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3209 if (DVA_GET_GANG(dva
))
3210 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
3212 mutex_enter(&msp
->ms_lock
);
3214 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
3215 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
3217 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
3218 error
= SET_ERROR(ENOENT
);
3220 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
3221 mutex_exit(&msp
->ms_lock
);
3225 VERIFY(!msp
->ms_condensing
);
3226 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
3227 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
3228 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
3229 range_tree_remove(msp
->ms_tree
, offset
, size
);
3231 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
3232 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
3233 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
3234 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
3237 mutex_exit(&msp
->ms_lock
);
3243 * Reserve some allocation slots. The reservation system must be called
3244 * before we call into the allocator. If there aren't any available slots
3245 * then the I/O will be throttled until an I/O completes and its slots are
3246 * freed up. The function returns true if it was successful in placing
3250 metaslab_class_throttle_reserve(metaslab_class_t
*mc
, int slots
, zio_t
*zio
,
3253 uint64_t available_slots
= 0;
3254 boolean_t slot_reserved
= B_FALSE
;
3256 ASSERT(mc
->mc_alloc_throttle_enabled
);
3257 mutex_enter(&mc
->mc_lock
);
3259 uint64_t reserved_slots
= refcount_count(&mc
->mc_alloc_slots
);
3260 if (reserved_slots
< mc
->mc_alloc_max_slots
)
3261 available_slots
= mc
->mc_alloc_max_slots
- reserved_slots
;
3263 if (slots
<= available_slots
|| GANG_ALLOCATION(flags
)) {
3265 * We reserve the slots individually so that we can unreserve
3266 * them individually when an I/O completes.
3268 for (int d
= 0; d
< slots
; d
++) {
3269 reserved_slots
= refcount_add(&mc
->mc_alloc_slots
, zio
);
3271 zio
->io_flags
|= ZIO_FLAG_IO_ALLOCATING
;
3272 slot_reserved
= B_TRUE
;
3275 mutex_exit(&mc
->mc_lock
);
3276 return (slot_reserved
);
3280 metaslab_class_throttle_unreserve(metaslab_class_t
*mc
, int slots
, zio_t
*zio
)
3282 ASSERT(mc
->mc_alloc_throttle_enabled
);
3283 mutex_enter(&mc
->mc_lock
);
3284 for (int d
= 0; d
< slots
; d
++) {
3285 (void) refcount_remove(&mc
->mc_alloc_slots
, zio
);
3287 mutex_exit(&mc
->mc_lock
);
3291 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
3292 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
,
3293 zio_alloc_list_t
*zal
, zio_t
*zio
)
3295 dva_t
*dva
= bp
->blk_dva
;
3296 dva_t
*hintdva
= hintbp
->blk_dva
;
3299 ASSERT(bp
->blk_birth
== 0);
3300 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
3302 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
3304 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
3305 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3306 return (SET_ERROR(ENOSPC
));
3309 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
3310 ASSERT(BP_GET_NDVAS(bp
) == 0);
3311 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
3312 ASSERT3P(zal
, !=, NULL
);
3314 for (int d
= 0; d
< ndvas
; d
++) {
3315 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
3318 for (d
--; d
>= 0; d
--) {
3319 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
3320 metaslab_group_alloc_decrement(spa
,
3321 DVA_GET_VDEV(&dva
[d
]), zio
, flags
);
3322 bzero(&dva
[d
], sizeof (dva_t
));
3324 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3328 * Update the metaslab group's queue depth
3329 * based on the newly allocated dva.
3331 metaslab_group_alloc_increment(spa
,
3332 DVA_GET_VDEV(&dva
[d
]), zio
, flags
);
3337 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
3339 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3341 BP_SET_BIRTH(bp
, txg
, txg
);
3347 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
3349 const dva_t
*dva
= bp
->blk_dva
;
3350 int ndvas
= BP_GET_NDVAS(bp
);
3352 ASSERT(!BP_IS_HOLE(bp
));
3353 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
3355 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
3357 for (int d
= 0; d
< ndvas
; d
++)
3358 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
3360 spa_config_exit(spa
, SCL_FREE
, FTAG
);
3364 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
3366 const dva_t
*dva
= bp
->blk_dva
;
3367 int ndvas
= BP_GET_NDVAS(bp
);
3370 ASSERT(!BP_IS_HOLE(bp
));
3374 * First do a dry run to make sure all DVAs are claimable,
3375 * so we don't have to unwind from partial failures below.
3377 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
3381 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
3383 for (int d
= 0; d
< ndvas
; d
++)
3384 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
3387 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
3389 ASSERT(error
== 0 || txg
== 0);
3395 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
3397 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
3400 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
3401 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
3402 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
3403 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
3404 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
3405 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
3406 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
3409 range_tree_verify(msp
->ms_tree
, offset
, size
);
3411 range_tree_verify(msp
->ms_freeingtree
, offset
, size
);
3412 range_tree_verify(msp
->ms_freedtree
, offset
, size
);
3413 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
3414 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
3416 spa_config_exit(spa
, SCL_VDEV
, FTAG
);