4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
25 * Copyright (c) 2021 Hewlett Packard Enterprise Development LP
29 #include <sys/spa_impl.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/vdev_trim.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/dsl_synctask.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/arc_impl.h>
40 * TRIM is a feature which is used to notify a SSD that some previously
41 * written space is no longer allocated by the pool. This is useful because
42 * writes to a SSD must be performed to blocks which have first been erased.
43 * Ensuring the SSD always has a supply of erased blocks for new writes
44 * helps prevent the performance from deteriorating.
46 * There are two supported TRIM methods; manual and automatic.
50 * A manual TRIM is initiated by running the 'zpool trim' command. A single
51 * 'vdev_trim' thread is created for each leaf vdev, and it is responsible for
52 * managing that vdev TRIM process. This involves iterating over all the
53 * metaslabs, calculating the unallocated space ranges, and then issuing the
56 * While a metaslab is being actively trimmed it is not eligible to perform
57 * new allocations. After traversing all of the metaslabs the thread is
58 * terminated. Finally, both the requested options and current progress of
59 * the TRIM are regularly written to the pool. This allows the TRIM to be
60 * suspended and resumed as needed.
64 * An automatic TRIM is enabled by setting the 'autotrim' pool property
65 * to 'on'. When enabled, a `vdev_autotrim' thread is created for each
66 * top-level (not leaf) vdev in the pool. These threads perform the same
67 * core TRIM process as a manual TRIM, but with a few key differences.
69 * 1) Automatic TRIM happens continuously in the background and operates
70 * solely on recently freed blocks (ms_trim not ms_allocatable).
72 * 2) Each thread is associated with a top-level (not leaf) vdev. This has
73 * the benefit of simplifying the threading model, it makes it easier
74 * to coordinate administrative commands, and it ensures only a single
75 * metaslab is disabled at a time. Unlike manual TRIM, this means each
76 * 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
79 * 3) There is no automatic TRIM progress information stored on disk, nor
80 * is it reported by 'zpool status'.
82 * While the automatic TRIM process is highly effective it is more likely
83 * than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
84 * 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently
85 * TRIM and are skipped. This means small amounts of freed space may not
86 * be automatically trimmed.
88 * Furthermore, devices with attached hot spares and devices being actively
89 * replaced are skipped. This is done to avoid adding additional stress to
90 * a potentially unhealthy device and to minimize the required rebuild time.
92 * For this reason it may be beneficial to occasionally manually TRIM a pool
93 * even when automatic TRIM is enabled.
97 * Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
99 static unsigned int zfs_trim_extent_bytes_max
= 128 * 1024 * 1024;
102 * Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
104 static unsigned int zfs_trim_extent_bytes_min
= 32 * 1024;
107 * Skip uninitialized metaslabs during the TRIM process. This option is
108 * useful for pools constructed from large thinly-provisioned devices where
109 * TRIM operations are slow. As a pool ages an increasing fraction of
110 * the pools metaslabs will be initialized progressively degrading the
111 * usefulness of this option. This setting is stored when starting a
112 * manual TRIM and will persist for the duration of the requested TRIM.
114 unsigned int zfs_trim_metaslab_skip
= 0;
117 * Maximum number of queued TRIM I/Os per leaf vdev. The number of
118 * concurrent TRIM I/Os issued to the device is controlled by the
119 * zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options.
121 static unsigned int zfs_trim_queue_limit
= 10;
124 * The minimum number of transaction groups between automatic trims of a
125 * metaslab. This setting represents a trade-off between issuing more
126 * efficient TRIM operations, by allowing them to be aggregated longer,
127 * and issuing them promptly so the trimmed space is available. Note
128 * that this value is a minimum; metaslabs can be trimmed less frequently
129 * when there are a large number of ranges which need to be trimmed.
131 * Increasing this value will allow frees to be aggregated for a longer
132 * time. This can result is larger TRIM operations, and increased memory
133 * usage in order to track the ranges to be trimmed. Decreasing this value
134 * has the opposite effect. The default value of 32 was determined though
135 * testing to be a reasonable compromise.
137 static unsigned int zfs_trim_txg_batch
= 32;
140 * The trim_args are a control structure which describe how a leaf vdev
141 * should be trimmed. The core elements are the vdev, the metaslab being
142 * trimmed and a range tree containing the extents to TRIM. All provided
143 * ranges must be within the metaslab.
145 typedef struct trim_args
{
147 * These fields are set by the caller of vdev_trim_ranges().
149 vdev_t
*trim_vdev
; /* Leaf vdev to TRIM */
150 metaslab_t
*trim_msp
; /* Disabled metaslab */
151 range_tree_t
*trim_tree
; /* TRIM ranges (in metaslab) */
152 trim_type_t trim_type
; /* Manual or auto TRIM */
153 uint64_t trim_extent_bytes_max
; /* Maximum TRIM I/O size */
154 uint64_t trim_extent_bytes_min
; /* Minimum TRIM I/O size */
155 enum trim_flag trim_flags
; /* TRIM flags (secure) */
158 * These fields are updated by vdev_trim_ranges().
160 hrtime_t trim_start_time
; /* Start time */
161 uint64_t trim_bytes_done
; /* Bytes trimmed */
165 * Determines whether a vdev_trim_thread() should be stopped.
168 vdev_trim_should_stop(vdev_t
*vd
)
170 return (vd
->vdev_trim_exit_wanted
|| !vdev_writeable(vd
) ||
171 vd
->vdev_detached
|| vd
->vdev_top
->vdev_removing
);
175 * Determines whether a vdev_autotrim_thread() should be stopped.
178 vdev_autotrim_should_stop(vdev_t
*tvd
)
180 return (tvd
->vdev_autotrim_exit_wanted
||
181 !vdev_writeable(tvd
) || tvd
->vdev_removing
||
182 spa_get_autotrim(tvd
->vdev_spa
) == SPA_AUTOTRIM_OFF
);
186 * The sync task for updating the on-disk state of a manual TRIM. This
187 * is scheduled by vdev_trim_change_state().
190 vdev_trim_zap_update_sync(void *arg
, dmu_tx_t
*tx
)
193 * We pass in the guid instead of the vdev_t since the vdev may
194 * have been freed prior to the sync task being processed. This
195 * happens when a vdev is detached as we call spa_config_vdev_exit(),
196 * stop the trimming thread, schedule the sync task, and free
197 * the vdev. Later when the scheduled sync task is invoked, it would
198 * find that the vdev has been freed.
200 uint64_t guid
= *(uint64_t *)arg
;
201 uint64_t txg
= dmu_tx_get_txg(tx
);
202 kmem_free(arg
, sizeof (uint64_t));
204 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
205 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
|| !vdev_is_concrete(vd
))
208 uint64_t last_offset
= vd
->vdev_trim_offset
[txg
& TXG_MASK
];
209 vd
->vdev_trim_offset
[txg
& TXG_MASK
] = 0;
211 VERIFY3U(vd
->vdev_leaf_zap
, !=, 0);
213 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
215 if (last_offset
> 0 || vd
->vdev_trim_last_offset
== UINT64_MAX
) {
217 if (vd
->vdev_trim_last_offset
== UINT64_MAX
)
220 vd
->vdev_trim_last_offset
= last_offset
;
221 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
222 VDEV_LEAF_ZAP_TRIM_LAST_OFFSET
,
223 sizeof (last_offset
), 1, &last_offset
, tx
));
226 if (vd
->vdev_trim_action_time
> 0) {
227 uint64_t val
= (uint64_t)vd
->vdev_trim_action_time
;
228 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
229 VDEV_LEAF_ZAP_TRIM_ACTION_TIME
, sizeof (val
),
233 if (vd
->vdev_trim_rate
> 0) {
234 uint64_t rate
= (uint64_t)vd
->vdev_trim_rate
;
236 if (rate
== UINT64_MAX
)
239 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
240 VDEV_LEAF_ZAP_TRIM_RATE
, sizeof (rate
), 1, &rate
, tx
));
243 uint64_t partial
= vd
->vdev_trim_partial
;
244 if (partial
== UINT64_MAX
)
247 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_PARTIAL
,
248 sizeof (partial
), 1, &partial
, tx
));
250 uint64_t secure
= vd
->vdev_trim_secure
;
251 if (secure
== UINT64_MAX
)
254 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_SECURE
,
255 sizeof (secure
), 1, &secure
, tx
));
258 uint64_t trim_state
= vd
->vdev_trim_state
;
259 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_STATE
,
260 sizeof (trim_state
), 1, &trim_state
, tx
));
264 * Update the on-disk state of a manual TRIM. This is called to request
265 * that a TRIM be started/suspended/canceled, or to change one of the
266 * TRIM options (partial, secure, rate).
269 vdev_trim_change_state(vdev_t
*vd
, vdev_trim_state_t new_state
,
270 uint64_t rate
, boolean_t partial
, boolean_t secure
)
272 ASSERT(MUTEX_HELD(&vd
->vdev_trim_lock
));
273 spa_t
*spa
= vd
->vdev_spa
;
275 if (new_state
== vd
->vdev_trim_state
)
279 * Copy the vd's guid, this will be freed by the sync task.
281 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
282 *guid
= vd
->vdev_guid
;
285 * If we're suspending, then preserve the original start time.
287 if (vd
->vdev_trim_state
!= VDEV_TRIM_SUSPENDED
) {
288 vd
->vdev_trim_action_time
= gethrestime_sec();
292 * If we're activating, then preserve the requested rate and trim
293 * method. Setting the last offset and rate to UINT64_MAX is used
294 * as a sentinel to indicate they should be reset to default values.
296 if (new_state
== VDEV_TRIM_ACTIVE
) {
297 if (vd
->vdev_trim_state
== VDEV_TRIM_COMPLETE
||
298 vd
->vdev_trim_state
== VDEV_TRIM_CANCELED
) {
299 vd
->vdev_trim_last_offset
= UINT64_MAX
;
300 vd
->vdev_trim_rate
= UINT64_MAX
;
301 vd
->vdev_trim_partial
= UINT64_MAX
;
302 vd
->vdev_trim_secure
= UINT64_MAX
;
306 vd
->vdev_trim_rate
= rate
;
309 vd
->vdev_trim_partial
= partial
;
312 vd
->vdev_trim_secure
= secure
;
315 vdev_trim_state_t old_state
= vd
->vdev_trim_state
;
316 boolean_t resumed
= (old_state
== VDEV_TRIM_SUSPENDED
);
317 vd
->vdev_trim_state
= new_state
;
319 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
320 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
321 dsl_sync_task_nowait(spa_get_dsl(spa
), vdev_trim_zap_update_sync
,
325 case VDEV_TRIM_ACTIVE
:
326 spa_event_notify(spa
, vd
, NULL
,
327 resumed
? ESC_ZFS_TRIM_RESUME
: ESC_ZFS_TRIM_START
);
328 spa_history_log_internal(spa
, "trim", tx
,
329 "vdev=%s activated", vd
->vdev_path
);
331 case VDEV_TRIM_SUSPENDED
:
332 spa_event_notify(spa
, vd
, NULL
, ESC_ZFS_TRIM_SUSPEND
);
333 spa_history_log_internal(spa
, "trim", tx
,
334 "vdev=%s suspended", vd
->vdev_path
);
336 case VDEV_TRIM_CANCELED
:
337 if (old_state
== VDEV_TRIM_ACTIVE
||
338 old_state
== VDEV_TRIM_SUSPENDED
) {
339 spa_event_notify(spa
, vd
, NULL
, ESC_ZFS_TRIM_CANCEL
);
340 spa_history_log_internal(spa
, "trim", tx
,
341 "vdev=%s canceled", vd
->vdev_path
);
344 case VDEV_TRIM_COMPLETE
:
345 spa_event_notify(spa
, vd
, NULL
, ESC_ZFS_TRIM_FINISH
);
346 spa_history_log_internal(spa
, "trim", tx
,
347 "vdev=%s complete", vd
->vdev_path
);
350 panic("invalid state %llu", (unsigned long long)new_state
);
355 if (new_state
!= VDEV_TRIM_ACTIVE
)
356 spa_notify_waiters(spa
);
360 * The zio_done_func_t done callback for each manual TRIM issued. It is
361 * responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
362 * and limiting the number of in flight TRIM I/Os.
365 vdev_trim_cb(zio_t
*zio
)
367 vdev_t
*vd
= zio
->io_vd
;
369 mutex_enter(&vd
->vdev_trim_io_lock
);
370 if (zio
->io_error
== ENXIO
&& !vdev_writeable(vd
)) {
372 * The I/O failed because the vdev was unavailable; roll the
373 * last offset back. (This works because spa_sync waits on
374 * spa_txg_zio before it runs sync tasks.)
377 &vd
->vdev_trim_offset
[zio
->io_txg
& TXG_MASK
];
378 *offset
= MIN(*offset
, zio
->io_offset
);
380 if (zio
->io_error
!= 0) {
381 vd
->vdev_stat
.vs_trim_errors
++;
382 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_MANUAL
,
383 0, 0, 0, 0, 1, zio
->io_orig_size
);
385 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_MANUAL
,
386 1, zio
->io_orig_size
, 0, 0, 0, 0);
389 vd
->vdev_trim_bytes_done
+= zio
->io_orig_size
;
392 ASSERT3U(vd
->vdev_trim_inflight
[TRIM_TYPE_MANUAL
], >, 0);
393 vd
->vdev_trim_inflight
[TRIM_TYPE_MANUAL
]--;
394 cv_broadcast(&vd
->vdev_trim_io_cv
);
395 mutex_exit(&vd
->vdev_trim_io_lock
);
397 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
401 * The zio_done_func_t done callback for each automatic TRIM issued. It
402 * is responsible for updating the TRIM stats and limiting the number of
403 * in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
404 * never reissued on failure.
407 vdev_autotrim_cb(zio_t
*zio
)
409 vdev_t
*vd
= zio
->io_vd
;
411 mutex_enter(&vd
->vdev_trim_io_lock
);
413 if (zio
->io_error
!= 0) {
414 vd
->vdev_stat
.vs_trim_errors
++;
415 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_AUTO
,
416 0, 0, 0, 0, 1, zio
->io_orig_size
);
418 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_AUTO
,
419 1, zio
->io_orig_size
, 0, 0, 0, 0);
422 ASSERT3U(vd
->vdev_trim_inflight
[TRIM_TYPE_AUTO
], >, 0);
423 vd
->vdev_trim_inflight
[TRIM_TYPE_AUTO
]--;
424 cv_broadcast(&vd
->vdev_trim_io_cv
);
425 mutex_exit(&vd
->vdev_trim_io_lock
);
427 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
431 * The zio_done_func_t done callback for each TRIM issued via
432 * vdev_trim_simple(). It is responsible for updating the TRIM stats and
433 * limiting the number of in flight TRIM I/Os. Simple TRIM I/Os are best
434 * effort and are never reissued on failure.
437 vdev_trim_simple_cb(zio_t
*zio
)
439 vdev_t
*vd
= zio
->io_vd
;
441 mutex_enter(&vd
->vdev_trim_io_lock
);
443 if (zio
->io_error
!= 0) {
444 vd
->vdev_stat
.vs_trim_errors
++;
445 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_SIMPLE
,
446 0, 0, 0, 0, 1, zio
->io_orig_size
);
448 spa_iostats_trim_add(vd
->vdev_spa
, TRIM_TYPE_SIMPLE
,
449 1, zio
->io_orig_size
, 0, 0, 0, 0);
452 ASSERT3U(vd
->vdev_trim_inflight
[TRIM_TYPE_SIMPLE
], >, 0);
453 vd
->vdev_trim_inflight
[TRIM_TYPE_SIMPLE
]--;
454 cv_broadcast(&vd
->vdev_trim_io_cv
);
455 mutex_exit(&vd
->vdev_trim_io_lock
);
457 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
460 * Returns the average trim rate in bytes/sec for the ta->trim_vdev.
463 vdev_trim_calculate_rate(trim_args_t
*ta
)
465 return (ta
->trim_bytes_done
* 1000 /
466 (NSEC2MSEC(gethrtime() - ta
->trim_start_time
) + 1));
470 * Issues a physical TRIM and takes care of rate limiting (bytes/sec)
471 * and number of concurrent TRIM I/Os.
474 vdev_trim_range(trim_args_t
*ta
, uint64_t start
, uint64_t size
)
476 vdev_t
*vd
= ta
->trim_vdev
;
477 spa_t
*spa
= vd
->vdev_spa
;
480 mutex_enter(&vd
->vdev_trim_io_lock
);
483 * Limit manual TRIM I/Os to the requested rate. This does not
484 * apply to automatic TRIM since no per vdev rate can be specified.
486 if (ta
->trim_type
== TRIM_TYPE_MANUAL
) {
487 while (vd
->vdev_trim_rate
!= 0 && !vdev_trim_should_stop(vd
) &&
488 vdev_trim_calculate_rate(ta
) > vd
->vdev_trim_rate
) {
489 cv_timedwait_idle(&vd
->vdev_trim_io_cv
,
490 &vd
->vdev_trim_io_lock
, ddi_get_lbolt() +
494 ta
->trim_bytes_done
+= size
;
496 /* Limit in flight trimming I/Os */
497 while (vd
->vdev_trim_inflight
[0] + vd
->vdev_trim_inflight
[1] +
498 vd
->vdev_trim_inflight
[2] >= zfs_trim_queue_limit
) {
499 cv_wait(&vd
->vdev_trim_io_cv
, &vd
->vdev_trim_io_lock
);
501 vd
->vdev_trim_inflight
[ta
->trim_type
]++;
502 mutex_exit(&vd
->vdev_trim_io_lock
);
504 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
505 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
506 uint64_t txg
= dmu_tx_get_txg(tx
);
508 spa_config_enter(spa
, SCL_STATE_ALL
, vd
, RW_READER
);
509 mutex_enter(&vd
->vdev_trim_lock
);
511 if (ta
->trim_type
== TRIM_TYPE_MANUAL
&&
512 vd
->vdev_trim_offset
[txg
& TXG_MASK
] == 0) {
513 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
514 *guid
= vd
->vdev_guid
;
516 /* This is the first write of this txg. */
517 dsl_sync_task_nowait(spa_get_dsl(spa
),
518 vdev_trim_zap_update_sync
, guid
, tx
);
522 * We know the vdev_t will still be around since all consumers of
523 * vdev_free must stop the trimming first.
525 if ((ta
->trim_type
== TRIM_TYPE_MANUAL
&&
526 vdev_trim_should_stop(vd
)) ||
527 (ta
->trim_type
== TRIM_TYPE_AUTO
&&
528 vdev_autotrim_should_stop(vd
->vdev_top
))) {
529 mutex_enter(&vd
->vdev_trim_io_lock
);
530 vd
->vdev_trim_inflight
[ta
->trim_type
]--;
531 mutex_exit(&vd
->vdev_trim_io_lock
);
532 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
533 mutex_exit(&vd
->vdev_trim_lock
);
535 return (SET_ERROR(EINTR
));
537 mutex_exit(&vd
->vdev_trim_lock
);
539 if (ta
->trim_type
== TRIM_TYPE_MANUAL
)
540 vd
->vdev_trim_offset
[txg
& TXG_MASK
] = start
+ size
;
542 if (ta
->trim_type
== TRIM_TYPE_MANUAL
) {
544 } else if (ta
->trim_type
== TRIM_TYPE_AUTO
) {
545 cb
= vdev_autotrim_cb
;
547 cb
= vdev_trim_simple_cb
;
550 zio_nowait(zio_trim(spa
->spa_txg_zio
[txg
& TXG_MASK
], vd
,
551 start
, size
, cb
, NULL
, ZIO_PRIORITY_TRIM
, ZIO_FLAG_CANFAIL
,
553 /* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */
561 * Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
562 * Additional parameters describing how the TRIM should be performed must
563 * be set in the trim_args structure. See the trim_args definition for
564 * additional information.
567 vdev_trim_ranges(trim_args_t
*ta
)
569 vdev_t
*vd
= ta
->trim_vdev
;
570 zfs_btree_t
*t
= &ta
->trim_tree
->rt_root
;
571 zfs_btree_index_t idx
;
572 uint64_t extent_bytes_max
= ta
->trim_extent_bytes_max
;
573 uint64_t extent_bytes_min
= ta
->trim_extent_bytes_min
;
574 spa_t
*spa
= vd
->vdev_spa
;
576 ta
->trim_start_time
= gethrtime();
577 ta
->trim_bytes_done
= 0;
579 for (range_seg_t
*rs
= zfs_btree_first(t
, &idx
); rs
!= NULL
;
580 rs
= zfs_btree_next(t
, &idx
, &idx
)) {
581 uint64_t size
= rs_get_end(rs
, ta
->trim_tree
) - rs_get_start(rs
,
584 if (extent_bytes_min
&& size
< extent_bytes_min
) {
585 spa_iostats_trim_add(spa
, ta
->trim_type
,
586 0, 0, 1, size
, 0, 0);
590 /* Split range into legally-sized physical chunks */
591 uint64_t writes_required
= ((size
- 1) / extent_bytes_max
) + 1;
593 for (uint64_t w
= 0; w
< writes_required
; w
++) {
596 error
= vdev_trim_range(ta
, VDEV_LABEL_START_SIZE
+
597 rs_get_start(rs
, ta
->trim_tree
) +
598 (w
*extent_bytes_max
), MIN(size
-
599 (w
* extent_bytes_max
), extent_bytes_max
));
610 vdev_trim_xlate_last_rs_end(void *arg
, range_seg64_t
*physical_rs
)
612 uint64_t *last_rs_end
= (uint64_t *)arg
;
614 if (physical_rs
->rs_end
> *last_rs_end
)
615 *last_rs_end
= physical_rs
->rs_end
;
619 vdev_trim_xlate_progress(void *arg
, range_seg64_t
*physical_rs
)
621 vdev_t
*vd
= (vdev_t
*)arg
;
623 uint64_t size
= physical_rs
->rs_end
- physical_rs
->rs_start
;
624 vd
->vdev_trim_bytes_est
+= size
;
626 if (vd
->vdev_trim_last_offset
>= physical_rs
->rs_end
) {
627 vd
->vdev_trim_bytes_done
+= size
;
628 } else if (vd
->vdev_trim_last_offset
> physical_rs
->rs_start
&&
629 vd
->vdev_trim_last_offset
<= physical_rs
->rs_end
) {
630 vd
->vdev_trim_bytes_done
+=
631 vd
->vdev_trim_last_offset
- physical_rs
->rs_start
;
636 * Calculates the completion percentage of a manual TRIM.
639 vdev_trim_calculate_progress(vdev_t
*vd
)
641 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
642 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
643 ASSERT(vd
->vdev_leaf_zap
!= 0);
645 vd
->vdev_trim_bytes_est
= 0;
646 vd
->vdev_trim_bytes_done
= 0;
648 for (uint64_t i
= 0; i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
649 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
650 mutex_enter(&msp
->ms_lock
);
652 uint64_t ms_free
= (msp
->ms_size
-
653 metaslab_allocated_space(msp
)) /
654 vdev_get_ndisks(vd
->vdev_top
);
657 * Convert the metaslab range to a physical range
658 * on our vdev. We use this to determine if we are
659 * in the middle of this metaslab range.
661 range_seg64_t logical_rs
, physical_rs
, remain_rs
;
662 logical_rs
.rs_start
= msp
->ms_start
;
663 logical_rs
.rs_end
= msp
->ms_start
+ msp
->ms_size
;
665 /* Metaslab space after this offset has not been trimmed. */
666 vdev_xlate(vd
, &logical_rs
, &physical_rs
, &remain_rs
);
667 if (vd
->vdev_trim_last_offset
<= physical_rs
.rs_start
) {
668 vd
->vdev_trim_bytes_est
+= ms_free
;
669 mutex_exit(&msp
->ms_lock
);
673 /* Metaslab space before this offset has been trimmed */
674 uint64_t last_rs_end
= physical_rs
.rs_end
;
675 if (!vdev_xlate_is_empty(&remain_rs
)) {
676 vdev_xlate_walk(vd
, &remain_rs
,
677 vdev_trim_xlate_last_rs_end
, &last_rs_end
);
680 if (vd
->vdev_trim_last_offset
> last_rs_end
) {
681 vd
->vdev_trim_bytes_done
+= ms_free
;
682 vd
->vdev_trim_bytes_est
+= ms_free
;
683 mutex_exit(&msp
->ms_lock
);
688 * If we get here, we're in the middle of trimming this
689 * metaslab. Load it and walk the free tree for more
690 * accurate progress estimation.
692 VERIFY0(metaslab_load(msp
));
694 range_tree_t
*rt
= msp
->ms_allocatable
;
695 zfs_btree_t
*bt
= &rt
->rt_root
;
696 zfs_btree_index_t idx
;
697 for (range_seg_t
*rs
= zfs_btree_first(bt
, &idx
);
698 rs
!= NULL
; rs
= zfs_btree_next(bt
, &idx
, &idx
)) {
699 logical_rs
.rs_start
= rs_get_start(rs
, rt
);
700 logical_rs
.rs_end
= rs_get_end(rs
, rt
);
702 vdev_xlate_walk(vd
, &logical_rs
,
703 vdev_trim_xlate_progress
, vd
);
705 mutex_exit(&msp
->ms_lock
);
710 * Load from disk the vdev's manual TRIM information. This includes the
711 * state, progress, and options provided when initiating the manual TRIM.
714 vdev_trim_load(vdev_t
*vd
)
717 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
718 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
719 ASSERT(vd
->vdev_leaf_zap
!= 0);
721 if (vd
->vdev_trim_state
== VDEV_TRIM_ACTIVE
||
722 vd
->vdev_trim_state
== VDEV_TRIM_SUSPENDED
) {
723 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
724 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET
,
725 sizeof (vd
->vdev_trim_last_offset
), 1,
726 &vd
->vdev_trim_last_offset
);
728 vd
->vdev_trim_last_offset
= 0;
733 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
734 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_RATE
,
735 sizeof (vd
->vdev_trim_rate
), 1,
736 &vd
->vdev_trim_rate
);
738 vd
->vdev_trim_rate
= 0;
744 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
745 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_PARTIAL
,
746 sizeof (vd
->vdev_trim_partial
), 1,
747 &vd
->vdev_trim_partial
);
749 vd
->vdev_trim_partial
= 0;
755 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
756 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_SECURE
,
757 sizeof (vd
->vdev_trim_secure
), 1,
758 &vd
->vdev_trim_secure
);
760 vd
->vdev_trim_secure
= 0;
766 vdev_trim_calculate_progress(vd
);
772 vdev_trim_xlate_range_add(void *arg
, range_seg64_t
*physical_rs
)
774 trim_args_t
*ta
= arg
;
775 vdev_t
*vd
= ta
->trim_vdev
;
778 * Only a manual trim will be traversing the vdev sequentially.
779 * For an auto trim all valid ranges should be added.
781 if (ta
->trim_type
== TRIM_TYPE_MANUAL
) {
783 /* Only add segments that we have not visited yet */
784 if (physical_rs
->rs_end
<= vd
->vdev_trim_last_offset
)
787 /* Pick up where we left off mid-range. */
788 if (vd
->vdev_trim_last_offset
> physical_rs
->rs_start
) {
789 ASSERT3U(physical_rs
->rs_end
, >,
790 vd
->vdev_trim_last_offset
);
791 physical_rs
->rs_start
= vd
->vdev_trim_last_offset
;
795 ASSERT3U(physical_rs
->rs_end
, >, physical_rs
->rs_start
);
797 range_tree_add(ta
->trim_tree
, physical_rs
->rs_start
,
798 physical_rs
->rs_end
- physical_rs
->rs_start
);
802 * Convert the logical range into physical ranges and add them to the
803 * range tree passed in the trim_args_t.
806 vdev_trim_range_add(void *arg
, uint64_t start
, uint64_t size
)
808 trim_args_t
*ta
= arg
;
809 vdev_t
*vd
= ta
->trim_vdev
;
810 range_seg64_t logical_rs
;
811 logical_rs
.rs_start
= start
;
812 logical_rs
.rs_end
= start
+ size
;
815 * Every range to be trimmed must be part of ms_allocatable.
816 * When ZFS_DEBUG_TRIM is set load the metaslab to verify this
817 * is always the case.
819 if (zfs_flags
& ZFS_DEBUG_TRIM
) {
820 metaslab_t
*msp
= ta
->trim_msp
;
821 VERIFY0(metaslab_load(msp
));
822 VERIFY3B(msp
->ms_loaded
, ==, B_TRUE
);
823 VERIFY(range_tree_contains(msp
->ms_allocatable
, start
, size
));
826 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
827 vdev_xlate_walk(vd
, &logical_rs
, vdev_trim_xlate_range_add
, arg
);
831 * Each manual TRIM thread is responsible for trimming the unallocated
832 * space for each leaf vdev. This is accomplished by sequentially iterating
833 * over its top-level metaslabs and issuing TRIM I/O for the space described
834 * by its ms_allocatable. While a metaslab is undergoing trimming it is
835 * not eligible for new allocations.
837 static __attribute__((noreturn
)) void
838 vdev_trim_thread(void *arg
)
841 spa_t
*spa
= vd
->vdev_spa
;
846 * The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by
847 * vdev_trim(). Wait for the updated values to be reflected
848 * in the zap in order to start with the requested settings.
850 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
852 ASSERT(vdev_is_concrete(vd
));
853 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
855 vd
->vdev_trim_last_offset
= 0;
856 vd
->vdev_trim_rate
= 0;
857 vd
->vdev_trim_partial
= 0;
858 vd
->vdev_trim_secure
= 0;
860 VERIFY0(vdev_trim_load(vd
));
863 ta
.trim_extent_bytes_max
= zfs_trim_extent_bytes_max
;
864 ta
.trim_extent_bytes_min
= zfs_trim_extent_bytes_min
;
865 ta
.trim_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
, 0, 0);
866 ta
.trim_type
= TRIM_TYPE_MANUAL
;
870 * When a secure TRIM has been requested infer that the intent
871 * is that everything must be trimmed. Override the default
872 * minimum TRIM size to prevent ranges from being skipped.
874 if (vd
->vdev_trim_secure
) {
875 ta
.trim_flags
|= ZIO_TRIM_SECURE
;
876 ta
.trim_extent_bytes_min
= SPA_MINBLOCKSIZE
;
879 uint64_t ms_count
= 0;
880 for (uint64_t i
= 0; !vd
->vdev_detached
&&
881 i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
882 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
885 * If we've expanded the top-level vdev or it's our
886 * first pass, calculate our progress.
888 if (vd
->vdev_top
->vdev_ms_count
!= ms_count
) {
889 vdev_trim_calculate_progress(vd
);
890 ms_count
= vd
->vdev_top
->vdev_ms_count
;
893 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
894 metaslab_disable(msp
);
895 mutex_enter(&msp
->ms_lock
);
896 VERIFY0(metaslab_load(msp
));
899 * If a partial TRIM was requested skip metaslabs which have
900 * never been initialized and thus have never been written.
902 if (msp
->ms_sm
== NULL
&& vd
->vdev_trim_partial
) {
903 mutex_exit(&msp
->ms_lock
);
904 metaslab_enable(msp
, B_FALSE
, B_FALSE
);
905 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
906 vdev_trim_calculate_progress(vd
);
911 range_tree_walk(msp
->ms_allocatable
, vdev_trim_range_add
, &ta
);
912 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
913 mutex_exit(&msp
->ms_lock
);
915 error
= vdev_trim_ranges(&ta
);
916 metaslab_enable(msp
, B_TRUE
, B_FALSE
);
917 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
919 range_tree_vacate(ta
.trim_tree
, NULL
, NULL
);
924 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
925 mutex_enter(&vd
->vdev_trim_io_lock
);
926 while (vd
->vdev_trim_inflight
[0] > 0) {
927 cv_wait(&vd
->vdev_trim_io_cv
, &vd
->vdev_trim_io_lock
);
929 mutex_exit(&vd
->vdev_trim_io_lock
);
931 range_tree_destroy(ta
.trim_tree
);
933 mutex_enter(&vd
->vdev_trim_lock
);
934 if (!vd
->vdev_trim_exit_wanted
) {
935 if (vdev_writeable(vd
)) {
936 vdev_trim_change_state(vd
, VDEV_TRIM_COMPLETE
,
937 vd
->vdev_trim_rate
, vd
->vdev_trim_partial
,
938 vd
->vdev_trim_secure
);
939 } else if (vd
->vdev_faulted
) {
940 vdev_trim_change_state(vd
, VDEV_TRIM_CANCELED
,
941 vd
->vdev_trim_rate
, vd
->vdev_trim_partial
,
942 vd
->vdev_trim_secure
);
945 ASSERT(vd
->vdev_trim_thread
!= NULL
|| vd
->vdev_trim_inflight
[0] == 0);
948 * Drop the vdev_trim_lock while we sync out the txg since it's
949 * possible that a device might be trying to come online and must
950 * check to see if it needs to restart a trim. That thread will be
951 * holding the spa_config_lock which would prevent the txg_wait_synced
954 mutex_exit(&vd
->vdev_trim_lock
);
955 txg_wait_synced(spa_get_dsl(spa
), 0);
956 mutex_enter(&vd
->vdev_trim_lock
);
958 vd
->vdev_trim_thread
= NULL
;
959 cv_broadcast(&vd
->vdev_trim_cv
);
960 mutex_exit(&vd
->vdev_trim_lock
);
966 * Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
967 * the vdev_t must be a leaf and cannot already be manually trimming.
970 vdev_trim(vdev_t
*vd
, uint64_t rate
, boolean_t partial
, boolean_t secure
)
972 ASSERT(MUTEX_HELD(&vd
->vdev_trim_lock
));
973 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
974 ASSERT(vdev_is_concrete(vd
));
975 ASSERT3P(vd
->vdev_trim_thread
, ==, NULL
);
976 ASSERT(!vd
->vdev_detached
);
977 ASSERT(!vd
->vdev_trim_exit_wanted
);
978 ASSERT(!vd
->vdev_top
->vdev_removing
);
980 vdev_trim_change_state(vd
, VDEV_TRIM_ACTIVE
, rate
, partial
, secure
);
981 vd
->vdev_trim_thread
= thread_create(NULL
, 0,
982 vdev_trim_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
986 * Wait for the trimming thread to be terminated (canceled or stopped).
989 vdev_trim_stop_wait_impl(vdev_t
*vd
)
991 ASSERT(MUTEX_HELD(&vd
->vdev_trim_lock
));
993 while (vd
->vdev_trim_thread
!= NULL
)
994 cv_wait(&vd
->vdev_trim_cv
, &vd
->vdev_trim_lock
);
996 ASSERT3P(vd
->vdev_trim_thread
, ==, NULL
);
997 vd
->vdev_trim_exit_wanted
= B_FALSE
;
1001 * Wait for vdev trim threads which were listed to cleanly exit.
1004 vdev_trim_stop_wait(spa_t
*spa
, list_t
*vd_list
)
1009 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1011 while ((vd
= list_remove_head(vd_list
)) != NULL
) {
1012 mutex_enter(&vd
->vdev_trim_lock
);
1013 vdev_trim_stop_wait_impl(vd
);
1014 mutex_exit(&vd
->vdev_trim_lock
);
1019 * Stop trimming a device, with the resultant trimming state being tgt_state.
1020 * For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is
1021 * provided the stopping vdev is inserted in to the list. Callers are then
1022 * required to call vdev_trim_stop_wait() to block for all the trim threads
1023 * to exit. The caller must hold vdev_trim_lock and must not be writing to
1024 * the spa config, as the trimming thread may try to enter the config as a
1025 * reader before exiting.
1028 vdev_trim_stop(vdev_t
*vd
, vdev_trim_state_t tgt_state
, list_t
*vd_list
)
1030 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_CONFIG
|SCL_STATE
, RW_WRITER
));
1031 ASSERT(MUTEX_HELD(&vd
->vdev_trim_lock
));
1032 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
1033 ASSERT(vdev_is_concrete(vd
));
1036 * Allow cancel requests to proceed even if the trim thread has
1039 if (vd
->vdev_trim_thread
== NULL
&& tgt_state
!= VDEV_TRIM_CANCELED
)
1042 vdev_trim_change_state(vd
, tgt_state
, 0, 0, 0);
1043 vd
->vdev_trim_exit_wanted
= B_TRUE
;
1045 if (vd_list
== NULL
) {
1046 vdev_trim_stop_wait_impl(vd
);
1048 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1049 list_insert_tail(vd_list
, vd
);
1054 * Requests that all listed vdevs stop trimming.
1057 vdev_trim_stop_all_impl(vdev_t
*vd
, vdev_trim_state_t tgt_state
,
1060 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_is_concrete(vd
)) {
1061 mutex_enter(&vd
->vdev_trim_lock
);
1062 vdev_trim_stop(vd
, tgt_state
, vd_list
);
1063 mutex_exit(&vd
->vdev_trim_lock
);
1067 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
1068 vdev_trim_stop_all_impl(vd
->vdev_child
[i
], tgt_state
,
1074 * Convenience function to stop trimming of a vdev tree and set all trim
1075 * thread pointers to NULL.
1078 vdev_trim_stop_all(vdev_t
*vd
, vdev_trim_state_t tgt_state
)
1080 spa_t
*spa
= vd
->vdev_spa
;
1084 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1086 list_create(&vd_list
, sizeof (vdev_t
),
1087 offsetof(vdev_t
, vdev_trim_node
));
1089 vdev_trim_stop_all_impl(vd
, tgt_state
, &vd_list
);
1092 * Iterate over cache devices and request stop trimming the
1093 * whole device in case we export the pool or remove the cache
1094 * device prematurely.
1096 for (int i
= 0; i
< spa
->spa_l2cache
.sav_count
; i
++) {
1097 vd_l2cache
= spa
->spa_l2cache
.sav_vdevs
[i
];
1098 vdev_trim_stop_all_impl(vd_l2cache
, tgt_state
, &vd_list
);
1101 vdev_trim_stop_wait(spa
, &vd_list
);
1103 if (vd
->vdev_spa
->spa_sync_on
) {
1104 /* Make sure that our state has been synced to disk */
1105 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
1108 list_destroy(&vd_list
);
1112 * Conditionally restarts a manual TRIM given its on-disk state.
1115 vdev_trim_restart(vdev_t
*vd
)
1117 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1118 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
1120 if (vd
->vdev_leaf_zap
!= 0) {
1121 mutex_enter(&vd
->vdev_trim_lock
);
1122 uint64_t trim_state
= VDEV_TRIM_NONE
;
1123 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
1124 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_STATE
,
1125 sizeof (trim_state
), 1, &trim_state
);
1126 ASSERT(err
== 0 || err
== ENOENT
);
1127 vd
->vdev_trim_state
= trim_state
;
1129 uint64_t timestamp
= 0;
1130 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
1131 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_TRIM_ACTION_TIME
,
1132 sizeof (timestamp
), 1, ×tamp
);
1133 ASSERT(err
== 0 || err
== ENOENT
);
1134 vd
->vdev_trim_action_time
= timestamp
;
1136 if (vd
->vdev_trim_state
== VDEV_TRIM_SUSPENDED
||
1138 /* load progress for reporting, but don't resume */
1139 VERIFY0(vdev_trim_load(vd
));
1140 } else if (vd
->vdev_trim_state
== VDEV_TRIM_ACTIVE
&&
1141 vdev_writeable(vd
) && !vd
->vdev_top
->vdev_removing
&&
1142 vd
->vdev_trim_thread
== NULL
) {
1143 VERIFY0(vdev_trim_load(vd
));
1144 vdev_trim(vd
, vd
->vdev_trim_rate
,
1145 vd
->vdev_trim_partial
, vd
->vdev_trim_secure
);
1148 mutex_exit(&vd
->vdev_trim_lock
);
1151 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
1152 vdev_trim_restart(vd
->vdev_child
[i
]);
1157 * Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
1158 * every TRIM range is contained within ms_allocatable.
1161 vdev_trim_range_verify(void *arg
, uint64_t start
, uint64_t size
)
1163 trim_args_t
*ta
= arg
;
1164 metaslab_t
*msp
= ta
->trim_msp
;
1166 VERIFY3B(msp
->ms_loaded
, ==, B_TRUE
);
1167 VERIFY3U(msp
->ms_disabled
, >, 0);
1168 VERIFY(range_tree_contains(msp
->ms_allocatable
, start
, size
));
1172 * Each automatic TRIM thread is responsible for managing the trimming of a
1173 * top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
1175 * N.B. This behavior is different from a manual TRIM where a thread
1176 * is created for each leaf vdev, instead of each top-level vdev.
1178 static __attribute__((noreturn
)) void
1179 vdev_autotrim_thread(void *arg
)
1182 spa_t
*spa
= vd
->vdev_spa
;
1185 mutex_enter(&vd
->vdev_autotrim_lock
);
1186 ASSERT3P(vd
->vdev_top
, ==, vd
);
1187 ASSERT3P(vd
->vdev_autotrim_thread
, !=, NULL
);
1188 mutex_exit(&vd
->vdev_autotrim_lock
);
1189 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1191 uint64_t extent_bytes_max
= zfs_trim_extent_bytes_max
;
1192 uint64_t extent_bytes_min
= zfs_trim_extent_bytes_min
;
1194 while (!vdev_autotrim_should_stop(vd
)) {
1195 int txgs_per_trim
= MAX(zfs_trim_txg_batch
, 1);
1196 boolean_t issued_trim
= B_FALSE
;
1199 * All of the metaslabs are divided in to groups of size
1200 * num_metaslabs / zfs_trim_txg_batch. Each of these groups
1201 * is composed of metaslabs which are spread evenly over the
1204 * For example, when zfs_trim_txg_batch = 32 (default) then
1205 * group 0 will contain metaslabs 0, 32, 64, ...;
1206 * group 1 will contain metaslabs 1, 33, 65, ...;
1207 * group 2 will contain metaslabs 2, 34, 66, ...; and so on.
1209 * On each pass through the while() loop one of these groups
1210 * is selected. This is accomplished by using a shift value
1211 * to select the starting metaslab, then striding over the
1212 * metaslabs using the zfs_trim_txg_batch size. This is
1213 * done to accomplish two things.
1215 * 1) By dividing the metaslabs in to groups, and making sure
1216 * that each group takes a minimum of one txg to process.
1217 * Then zfs_trim_txg_batch controls the minimum number of
1218 * txgs which must occur before a metaslab is revisited.
1220 * 2) Selecting non-consecutive metaslabs distributes the
1221 * TRIM commands for a group evenly over the entire device.
1222 * This can be advantageous for certain types of devices.
1224 for (uint64_t i
= shift
% txgs_per_trim
; i
< vd
->vdev_ms_count
;
1225 i
+= txgs_per_trim
) {
1226 metaslab_t
*msp
= vd
->vdev_ms
[i
];
1227 range_tree_t
*trim_tree
;
1229 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1230 metaslab_disable(msp
);
1231 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1233 mutex_enter(&msp
->ms_lock
);
1236 * Skip the metaslab when it has never been allocated
1237 * or when there are no recent frees to trim.
1239 if (msp
->ms_sm
== NULL
||
1240 range_tree_is_empty(msp
->ms_trim
)) {
1241 mutex_exit(&msp
->ms_lock
);
1242 metaslab_enable(msp
, B_FALSE
, B_FALSE
);
1247 * Skip the metaslab when it has already been disabled.
1248 * This may happen when a manual TRIM or initialize
1249 * operation is running concurrently. In the case
1250 * of a manual TRIM, the ms_trim tree will have been
1251 * vacated. Only ranges added after the manual TRIM
1252 * disabled the metaslab will be included in the tree.
1253 * These will be processed when the automatic TRIM
1254 * next revisits this metaslab.
1256 if (msp
->ms_disabled
> 1) {
1257 mutex_exit(&msp
->ms_lock
);
1258 metaslab_enable(msp
, B_FALSE
, B_FALSE
);
1263 * Allocate an empty range tree which is swapped in
1264 * for the existing ms_trim tree while it is processed.
1266 trim_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
,
1268 range_tree_swap(&msp
->ms_trim
, &trim_tree
);
1269 ASSERT(range_tree_is_empty(msp
->ms_trim
));
1272 * There are two cases when constructing the per-vdev
1273 * trim trees for a metaslab. If the top-level vdev
1274 * has no children then it is also a leaf and should
1275 * be trimmed. Otherwise our children are the leaves
1276 * and a trim tree should be constructed for each.
1279 uint64_t children
= vd
->vdev_children
;
1280 if (children
== 0) {
1282 tap
= kmem_zalloc(sizeof (trim_args_t
) *
1283 children
, KM_SLEEP
);
1284 tap
[0].trim_vdev
= vd
;
1286 tap
= kmem_zalloc(sizeof (trim_args_t
) *
1287 children
, KM_SLEEP
);
1289 for (uint64_t c
= 0; c
< children
; c
++) {
1290 tap
[c
].trim_vdev
= vd
->vdev_child
[c
];
1294 for (uint64_t c
= 0; c
< children
; c
++) {
1295 trim_args_t
*ta
= &tap
[c
];
1296 vdev_t
*cvd
= ta
->trim_vdev
;
1299 ta
->trim_extent_bytes_max
= extent_bytes_max
;
1300 ta
->trim_extent_bytes_min
= extent_bytes_min
;
1301 ta
->trim_type
= TRIM_TYPE_AUTO
;
1304 if (cvd
->vdev_detached
||
1305 !vdev_writeable(cvd
) ||
1306 !cvd
->vdev_has_trim
||
1307 cvd
->vdev_trim_thread
!= NULL
) {
1312 * When a device has an attached hot spare, or
1313 * is being replaced it will not be trimmed.
1314 * This is done to avoid adding additional
1315 * stress to a potentially unhealthy device,
1316 * and to minimize the required rebuild time.
1318 if (!cvd
->vdev_ops
->vdev_op_leaf
)
1321 ta
->trim_tree
= range_tree_create(NULL
,
1322 RANGE_SEG64
, NULL
, 0, 0);
1323 range_tree_walk(trim_tree
,
1324 vdev_trim_range_add
, ta
);
1327 mutex_exit(&msp
->ms_lock
);
1328 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1331 * Issue the TRIM I/Os for all ranges covered by the
1332 * TRIM trees. These ranges are safe to TRIM because
1333 * no new allocations will be performed until the call
1334 * to metaslab_enabled() below.
1336 for (uint64_t c
= 0; c
< children
; c
++) {
1337 trim_args_t
*ta
= &tap
[c
];
1340 * Always yield to a manual TRIM if one has
1341 * been started for the child vdev.
1343 if (ta
->trim_tree
== NULL
||
1344 ta
->trim_vdev
->vdev_trim_thread
!= NULL
) {
1349 * After this point metaslab_enable() must be
1350 * called with the sync flag set. This is done
1351 * here because vdev_trim_ranges() is allowed
1352 * to be interrupted (EINTR) before issuing all
1353 * of the required TRIM I/Os.
1355 issued_trim
= B_TRUE
;
1357 int error
= vdev_trim_ranges(ta
);
1363 * Verify every range which was trimmed is still
1364 * contained within the ms_allocatable tree.
1366 if (zfs_flags
& ZFS_DEBUG_TRIM
) {
1367 mutex_enter(&msp
->ms_lock
);
1368 VERIFY0(metaslab_load(msp
));
1369 VERIFY3P(tap
[0].trim_msp
, ==, msp
);
1370 range_tree_walk(trim_tree
,
1371 vdev_trim_range_verify
, &tap
[0]);
1372 mutex_exit(&msp
->ms_lock
);
1375 range_tree_vacate(trim_tree
, NULL
, NULL
);
1376 range_tree_destroy(trim_tree
);
1378 metaslab_enable(msp
, issued_trim
, B_FALSE
);
1379 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1381 for (uint64_t c
= 0; c
< children
; c
++) {
1382 trim_args_t
*ta
= &tap
[c
];
1384 if (ta
->trim_tree
== NULL
)
1387 range_tree_vacate(ta
->trim_tree
, NULL
, NULL
);
1388 range_tree_destroy(ta
->trim_tree
);
1391 kmem_free(tap
, sizeof (trim_args_t
) * children
);
1394 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1397 * After completing the group of metaslabs wait for the next
1398 * open txg. This is done to make sure that a minimum of
1399 * zfs_trim_txg_batch txgs will occur before these metaslabs
1400 * are trimmed again.
1402 txg_wait_open(spa_get_dsl(spa
), 0, issued_trim
);
1405 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1408 for (uint64_t c
= 0; c
< vd
->vdev_children
; c
++) {
1409 vdev_t
*cvd
= vd
->vdev_child
[c
];
1410 mutex_enter(&cvd
->vdev_trim_io_lock
);
1412 while (cvd
->vdev_trim_inflight
[1] > 0) {
1413 cv_wait(&cvd
->vdev_trim_io_cv
,
1414 &cvd
->vdev_trim_io_lock
);
1416 mutex_exit(&cvd
->vdev_trim_io_lock
);
1419 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1422 * When exiting because the autotrim property was set to off, then
1423 * abandon any unprocessed ms_trim ranges to reclaim the memory.
1425 if (spa_get_autotrim(spa
) == SPA_AUTOTRIM_OFF
) {
1426 for (uint64_t i
= 0; i
< vd
->vdev_ms_count
; i
++) {
1427 metaslab_t
*msp
= vd
->vdev_ms
[i
];
1429 mutex_enter(&msp
->ms_lock
);
1430 range_tree_vacate(msp
->ms_trim
, NULL
, NULL
);
1431 mutex_exit(&msp
->ms_lock
);
1435 mutex_enter(&vd
->vdev_autotrim_lock
);
1436 ASSERT(vd
->vdev_autotrim_thread
!= NULL
);
1437 vd
->vdev_autotrim_thread
= NULL
;
1438 cv_broadcast(&vd
->vdev_autotrim_cv
);
1439 mutex_exit(&vd
->vdev_autotrim_lock
);
1445 * Starts an autotrim thread, if needed, for each top-level vdev which can be
1446 * trimmed. A top-level vdev which has been evacuated will never be trimmed.
1449 vdev_autotrim(spa_t
*spa
)
1451 vdev_t
*root_vd
= spa
->spa_root_vdev
;
1453 for (uint64_t i
= 0; i
< root_vd
->vdev_children
; i
++) {
1454 vdev_t
*tvd
= root_vd
->vdev_child
[i
];
1456 mutex_enter(&tvd
->vdev_autotrim_lock
);
1457 if (vdev_writeable(tvd
) && !tvd
->vdev_removing
&&
1458 tvd
->vdev_autotrim_thread
== NULL
) {
1459 ASSERT3P(tvd
->vdev_top
, ==, tvd
);
1461 tvd
->vdev_autotrim_thread
= thread_create(NULL
, 0,
1462 vdev_autotrim_thread
, tvd
, 0, &p0
, TS_RUN
,
1464 ASSERT(tvd
->vdev_autotrim_thread
!= NULL
);
1466 mutex_exit(&tvd
->vdev_autotrim_lock
);
1471 * Wait for the vdev_autotrim_thread associated with the passed top-level
1472 * vdev to be terminated (canceled or stopped).
1475 vdev_autotrim_stop_wait(vdev_t
*tvd
)
1477 mutex_enter(&tvd
->vdev_autotrim_lock
);
1478 if (tvd
->vdev_autotrim_thread
!= NULL
) {
1479 tvd
->vdev_autotrim_exit_wanted
= B_TRUE
;
1481 while (tvd
->vdev_autotrim_thread
!= NULL
) {
1482 cv_wait(&tvd
->vdev_autotrim_cv
,
1483 &tvd
->vdev_autotrim_lock
);
1486 ASSERT3P(tvd
->vdev_autotrim_thread
, ==, NULL
);
1487 tvd
->vdev_autotrim_exit_wanted
= B_FALSE
;
1489 mutex_exit(&tvd
->vdev_autotrim_lock
);
1493 * Wait for all of the vdev_autotrim_thread associated with the pool to
1494 * be terminated (canceled or stopped).
1497 vdev_autotrim_stop_all(spa_t
*spa
)
1499 vdev_t
*root_vd
= spa
->spa_root_vdev
;
1501 for (uint64_t i
= 0; i
< root_vd
->vdev_children
; i
++)
1502 vdev_autotrim_stop_wait(root_vd
->vdev_child
[i
]);
1506 * Conditionally restart all of the vdev_autotrim_thread's for the pool.
1509 vdev_autotrim_restart(spa_t
*spa
)
1511 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1513 if (spa
->spa_autotrim
)
1517 static __attribute__((noreturn
)) void
1518 vdev_trim_l2arc_thread(void *arg
)
1521 spa_t
*spa
= vd
->vdev_spa
;
1522 l2arc_dev_t
*dev
= l2arc_vdev_get(vd
);
1523 trim_args_t ta
= {0};
1524 range_seg64_t physical_rs
;
1526 ASSERT(vdev_is_concrete(vd
));
1527 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1529 vd
->vdev_trim_last_offset
= 0;
1530 vd
->vdev_trim_rate
= 0;
1531 vd
->vdev_trim_partial
= 0;
1532 vd
->vdev_trim_secure
= 0;
1535 ta
.trim_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
, 0, 0);
1536 ta
.trim_type
= TRIM_TYPE_MANUAL
;
1537 ta
.trim_extent_bytes_max
= zfs_trim_extent_bytes_max
;
1538 ta
.trim_extent_bytes_min
= SPA_MINBLOCKSIZE
;
1541 physical_rs
.rs_start
= vd
->vdev_trim_bytes_done
= 0;
1542 physical_rs
.rs_end
= vd
->vdev_trim_bytes_est
=
1543 vdev_get_min_asize(vd
);
1545 range_tree_add(ta
.trim_tree
, physical_rs
.rs_start
,
1546 physical_rs
.rs_end
- physical_rs
.rs_start
);
1548 mutex_enter(&vd
->vdev_trim_lock
);
1549 vdev_trim_change_state(vd
, VDEV_TRIM_ACTIVE
, 0, 0, 0);
1550 mutex_exit(&vd
->vdev_trim_lock
);
1552 (void) vdev_trim_ranges(&ta
);
1554 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1555 mutex_enter(&vd
->vdev_trim_io_lock
);
1556 while (vd
->vdev_trim_inflight
[TRIM_TYPE_MANUAL
] > 0) {
1557 cv_wait(&vd
->vdev_trim_io_cv
, &vd
->vdev_trim_io_lock
);
1559 mutex_exit(&vd
->vdev_trim_io_lock
);
1561 range_tree_vacate(ta
.trim_tree
, NULL
, NULL
);
1562 range_tree_destroy(ta
.trim_tree
);
1564 mutex_enter(&vd
->vdev_trim_lock
);
1565 if (!vd
->vdev_trim_exit_wanted
&& vdev_writeable(vd
)) {
1566 vdev_trim_change_state(vd
, VDEV_TRIM_COMPLETE
,
1567 vd
->vdev_trim_rate
, vd
->vdev_trim_partial
,
1568 vd
->vdev_trim_secure
);
1570 ASSERT(vd
->vdev_trim_thread
!= NULL
||
1571 vd
->vdev_trim_inflight
[TRIM_TYPE_MANUAL
] == 0);
1574 * Drop the vdev_trim_lock while we sync out the txg since it's
1575 * possible that a device might be trying to come online and
1576 * must check to see if it needs to restart a trim. That thread
1577 * will be holding the spa_config_lock which would prevent the
1578 * txg_wait_synced from completing. Same strategy as in
1579 * vdev_trim_thread().
1581 mutex_exit(&vd
->vdev_trim_lock
);
1582 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
1583 mutex_enter(&vd
->vdev_trim_lock
);
1586 * Update the header of the cache device here, before
1587 * broadcasting vdev_trim_cv which may lead to the removal
1588 * of the device. The same applies for setting l2ad_trim_all to
1591 spa_config_enter(vd
->vdev_spa
, SCL_L2ARC
, vd
,
1593 memset(dev
->l2ad_dev_hdr
, 0, dev
->l2ad_dev_hdr_asize
);
1594 l2arc_dev_hdr_update(dev
);
1595 spa_config_exit(vd
->vdev_spa
, SCL_L2ARC
, vd
);
1597 vd
->vdev_trim_thread
= NULL
;
1598 if (vd
->vdev_trim_state
== VDEV_TRIM_COMPLETE
)
1599 dev
->l2ad_trim_all
= B_FALSE
;
1601 cv_broadcast(&vd
->vdev_trim_cv
);
1602 mutex_exit(&vd
->vdev_trim_lock
);
1608 * Punches out TRIM threads for the L2ARC devices in a spa and assigns them
1609 * to vd->vdev_trim_thread variable. This facilitates the management of
1610 * trimming the whole cache device using TRIM_TYPE_MANUAL upon addition
1611 * to a pool or pool creation or when the header of the device is invalid.
1614 vdev_trim_l2arc(spa_t
*spa
)
1616 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1619 * Locate the spa's l2arc devices and kick off TRIM threads.
1621 for (int i
= 0; i
< spa
->spa_l2cache
.sav_count
; i
++) {
1622 vdev_t
*vd
= spa
->spa_l2cache
.sav_vdevs
[i
];
1623 l2arc_dev_t
*dev
= l2arc_vdev_get(vd
);
1625 if (dev
== NULL
|| !dev
->l2ad_trim_all
) {
1627 * Don't attempt TRIM if the vdev is UNAVAIL or if the
1628 * cache device was not marked for whole device TRIM
1629 * (ie l2arc_trim_ahead = 0, or the L2ARC device header
1630 * is valid with trim_state = VDEV_TRIM_COMPLETE and
1631 * l2ad_log_entries > 0).
1636 mutex_enter(&vd
->vdev_trim_lock
);
1637 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
1638 ASSERT(vdev_is_concrete(vd
));
1639 ASSERT3P(vd
->vdev_trim_thread
, ==, NULL
);
1640 ASSERT(!vd
->vdev_detached
);
1641 ASSERT(!vd
->vdev_trim_exit_wanted
);
1642 ASSERT(!vd
->vdev_top
->vdev_removing
);
1643 vdev_trim_change_state(vd
, VDEV_TRIM_ACTIVE
, 0, 0, 0);
1644 vd
->vdev_trim_thread
= thread_create(NULL
, 0,
1645 vdev_trim_l2arc_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
1646 mutex_exit(&vd
->vdev_trim_lock
);
1651 * A wrapper which calls vdev_trim_ranges(). It is intended to be called
1655 vdev_trim_simple(vdev_t
*vd
, uint64_t start
, uint64_t size
)
1657 trim_args_t ta
= {0};
1658 range_seg64_t physical_rs
;
1660 physical_rs
.rs_start
= start
;
1661 physical_rs
.rs_end
= start
+ size
;
1663 ASSERT(vdev_is_concrete(vd
));
1664 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
1665 ASSERT(!vd
->vdev_detached
);
1666 ASSERT(!vd
->vdev_top
->vdev_removing
);
1669 ta
.trim_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
, 0, 0);
1670 ta
.trim_type
= TRIM_TYPE_SIMPLE
;
1671 ta
.trim_extent_bytes_max
= zfs_trim_extent_bytes_max
;
1672 ta
.trim_extent_bytes_min
= SPA_MINBLOCKSIZE
;
1675 ASSERT3U(physical_rs
.rs_end
, >=, physical_rs
.rs_start
);
1677 if (physical_rs
.rs_end
> physical_rs
.rs_start
) {
1678 range_tree_add(ta
.trim_tree
, physical_rs
.rs_start
,
1679 physical_rs
.rs_end
- physical_rs
.rs_start
);
1681 ASSERT3U(physical_rs
.rs_end
, ==, physical_rs
.rs_start
);
1684 error
= vdev_trim_ranges(&ta
);
1686 mutex_enter(&vd
->vdev_trim_io_lock
);
1687 while (vd
->vdev_trim_inflight
[TRIM_TYPE_SIMPLE
] > 0) {
1688 cv_wait(&vd
->vdev_trim_io_cv
, &vd
->vdev_trim_io_lock
);
1690 mutex_exit(&vd
->vdev_trim_io_lock
);
1692 range_tree_vacate(ta
.trim_tree
, NULL
, NULL
);
1693 range_tree_destroy(ta
.trim_tree
);
1698 EXPORT_SYMBOL(vdev_trim
);
1699 EXPORT_SYMBOL(vdev_trim_stop
);
1700 EXPORT_SYMBOL(vdev_trim_stop_all
);
1701 EXPORT_SYMBOL(vdev_trim_stop_wait
);
1702 EXPORT_SYMBOL(vdev_trim_restart
);
1703 EXPORT_SYMBOL(vdev_autotrim
);
1704 EXPORT_SYMBOL(vdev_autotrim_stop_all
);
1705 EXPORT_SYMBOL(vdev_autotrim_stop_wait
);
1706 EXPORT_SYMBOL(vdev_autotrim_restart
);
1707 EXPORT_SYMBOL(vdev_trim_l2arc
);
1708 EXPORT_SYMBOL(vdev_trim_simple
);
1710 ZFS_MODULE_PARAM(zfs_trim
, zfs_trim_
, extent_bytes_max
, UINT
, ZMOD_RW
,
1711 "Max size of TRIM commands, larger will be split");
1713 ZFS_MODULE_PARAM(zfs_trim
, zfs_trim_
, extent_bytes_min
, UINT
, ZMOD_RW
,
1714 "Min size of TRIM commands, smaller will be skipped");
1716 ZFS_MODULE_PARAM(zfs_trim
, zfs_trim_
, metaslab_skip
, UINT
, ZMOD_RW
,
1717 "Skip metaslabs which have never been initialized");
1719 ZFS_MODULE_PARAM(zfs_trim
, zfs_trim_
, txg_batch
, UINT
, ZMOD_RW
,
1720 "Min number of txgs to aggregate frees before issuing TRIM");
1722 ZFS_MODULE_PARAM(zfs_trim
, zfs_trim_
, queue_limit
, UINT
, ZMOD_RW
,
1723 "Max queued TRIMs outstanding per leaf vdev");