Improve speculative prefetcher for block cloning
[zfs.git] / module / zfs / txg.c
blob5ce6be69be14cc4d033a822e098a85d5baa37685
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/zil.h>
35 #include <sys/callb.h>
36 #include <sys/trace_zfs.h>
39 * ZFS Transaction Groups
40 * ----------------------
42 * ZFS transaction groups are, as the name implies, groups of transactions
43 * that act on persistent state. ZFS asserts consistency at the granularity of
44 * these transaction groups. Each successive transaction group (txg) is
45 * assigned a 64-bit consecutive identifier. There are three active
46 * transaction group states: open, quiescing, or syncing. At any given time,
47 * there may be an active txg associated with each state; each active txg may
48 * either be processing, or blocked waiting to enter the next state. There may
49 * be up to three active txgs, and there is always a txg in the open state
50 * (though it may be blocked waiting to enter the quiescing state). In broad
51 * strokes, transactions -- operations that change in-memory structures -- are
52 * accepted into the txg in the open state, and are completed while the txg is
53 * in the open or quiescing states. The accumulated changes are written to
54 * disk in the syncing state.
56 * Open
58 * When a new txg becomes active, it first enters the open state. New
59 * transactions -- updates to in-memory structures -- are assigned to the
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
63 * reasons such as it hitting a time or size threshold, or the execution of an
64 * administrative action that must be completed in the syncing state.
66 * Quiescing
68 * After a txg exits the open state, it enters the quiescing state. The
69 * quiescing state is intended to provide a buffer between accepting new
70 * transactions in the open state and writing them out to stable storage in
71 * the syncing state. While quiescing, transactions can continue their
72 * operation without delaying either of the other states. Typically, a txg is
73 * in the quiescing state very briefly since the operations are bounded by
74 * software latencies rather than, say, slower I/O latencies. After all
75 * transactions complete, the txg is ready to enter the next state.
77 * Syncing
79 * In the syncing state, the in-memory state built up during the open and (to
80 * a lesser degree) the quiescing states is written to stable storage. The
81 * process of writing out modified data can, in turn modify more data. For
82 * example when we write new blocks, we need to allocate space for them; those
83 * allocations modify metadata (space maps)... which themselves must be
84 * written to stable storage. During the sync state, ZFS iterates, writing out
85 * data until it converges and all in-memory changes have been written out.
86 * The first such pass is the largest as it encompasses all the modified user
87 * data (as opposed to filesystem metadata). Subsequent passes typically have
88 * far less data to write as they consist exclusively of filesystem metadata.
90 * To ensure convergence, after a certain number of passes ZFS begins
91 * overwriting locations on stable storage that had been allocated earlier in
92 * the syncing state (and subsequently freed). ZFS usually allocates new
93 * blocks to optimize for large, continuous, writes. For the syncing state to
94 * converge however it must complete a pass where no new blocks are allocated
95 * since each allocation requires a modification of persistent metadata.
96 * Further, to hasten convergence, after a prescribed number of passes, ZFS
97 * also defers frees, and stops compressing.
99 * In addition to writing out user data, we must also execute synctasks during
100 * the syncing context. A synctask is the mechanism by which some
101 * administrative activities work such as creating and destroying snapshots or
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * syncing state in order to reduce the latency of the administrative
105 * activity. To complete the syncing state, ZFS writes out a new uberblock,
106 * the root of the tree of blocks that comprise all state stored on the ZFS
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108 * now transition to the syncing state.
111 static __attribute__((noreturn)) void txg_sync_thread(void *arg);
112 static __attribute__((noreturn)) void txg_quiesce_thread(void *arg);
114 uint_t zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
117 * Prepare the txg subsystem.
119 void
120 txg_init(dsl_pool_t *dp, uint64_t txg)
122 tx_state_t *tx = &dp->dp_tx;
123 int c;
124 memset(tx, 0, sizeof (tx_state_t));
126 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
128 for (c = 0; c < max_ncpus; c++) {
129 int i;
131 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
132 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
133 NULL);
134 for (i = 0; i < TXG_SIZE; i++) {
135 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
136 NULL);
137 list_create(&tx->tx_cpu[c].tc_callbacks[i],
138 sizeof (dmu_tx_callback_t),
139 offsetof(dmu_tx_callback_t, dcb_node));
143 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
145 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
146 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
147 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
148 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
149 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
151 tx->tx_open_txg = txg;
155 * Close down the txg subsystem.
157 void
158 txg_fini(dsl_pool_t *dp)
160 tx_state_t *tx = &dp->dp_tx;
161 int c;
163 ASSERT0(tx->tx_threads);
165 mutex_destroy(&tx->tx_sync_lock);
167 cv_destroy(&tx->tx_sync_more_cv);
168 cv_destroy(&tx->tx_sync_done_cv);
169 cv_destroy(&tx->tx_quiesce_more_cv);
170 cv_destroy(&tx->tx_quiesce_done_cv);
171 cv_destroy(&tx->tx_exit_cv);
173 for (c = 0; c < max_ncpus; c++) {
174 int i;
176 mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
177 mutex_destroy(&tx->tx_cpu[c].tc_lock);
178 for (i = 0; i < TXG_SIZE; i++) {
179 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
180 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
184 if (tx->tx_commit_cb_taskq != NULL)
185 taskq_destroy(tx->tx_commit_cb_taskq);
187 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
189 memset(tx, 0, sizeof (tx_state_t));
193 * Start syncing transaction groups.
195 void
196 txg_sync_start(dsl_pool_t *dp)
198 tx_state_t *tx = &dp->dp_tx;
200 mutex_enter(&tx->tx_sync_lock);
202 dprintf("pool %p\n", dp);
204 ASSERT0(tx->tx_threads);
206 tx->tx_threads = 2;
208 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
209 dp, 0, &p0, TS_RUN, defclsyspri);
212 * The sync thread can need a larger-than-default stack size on
213 * 32-bit x86. This is due in part to nested pools and
214 * scrub_visitbp() recursion.
216 tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
217 dp, 0, &p0, TS_RUN, defclsyspri);
219 mutex_exit(&tx->tx_sync_lock);
222 static void
223 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
225 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
226 mutex_enter(&tx->tx_sync_lock);
229 static void
230 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
232 ASSERT(*tpp != NULL);
233 *tpp = NULL;
234 tx->tx_threads--;
235 cv_broadcast(&tx->tx_exit_cv);
236 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
237 thread_exit();
240 static void
241 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
243 CALLB_CPR_SAFE_BEGIN(cpr);
245 if (time) {
246 (void) cv_timedwait_idle(cv, &tx->tx_sync_lock,
247 ddi_get_lbolt() + time);
248 } else {
249 cv_wait_idle(cv, &tx->tx_sync_lock);
252 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
256 * Stop syncing transaction groups.
258 void
259 txg_sync_stop(dsl_pool_t *dp)
261 tx_state_t *tx = &dp->dp_tx;
263 dprintf("pool %p\n", dp);
265 * Finish off any work in progress.
267 ASSERT3U(tx->tx_threads, ==, 2);
270 * We need to ensure that we've vacated the deferred metaslab trees.
272 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
275 * Wake all sync threads and wait for them to die.
277 mutex_enter(&tx->tx_sync_lock);
279 ASSERT3U(tx->tx_threads, ==, 2);
281 tx->tx_exiting = 1;
283 cv_broadcast(&tx->tx_quiesce_more_cv);
284 cv_broadcast(&tx->tx_quiesce_done_cv);
285 cv_broadcast(&tx->tx_sync_more_cv);
287 while (tx->tx_threads != 0)
288 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
290 tx->tx_exiting = 0;
292 mutex_exit(&tx->tx_sync_lock);
296 * Get a handle on the currently open txg and keep it open.
298 * The txg is guaranteed to stay open until txg_rele_to_quiesce() is called for
299 * the handle. Once txg_rele_to_quiesce() has been called, the txg stays
300 * in quiescing state until txg_rele_to_sync() is called for the handle.
302 * It is guaranteed that subsequent calls return monotonically increasing
303 * txgs for the same dsl_pool_t. Of course this is not strong monotonicity,
304 * because the same txg can be returned multiple times in a row. This
305 * guarantee holds both for subsequent calls from one thread and for multiple
306 * threads. For example, it is impossible to observe the following sequence
307 * of events:
309 * Thread 1 Thread 2
311 * 1 <- txg_hold_open(P, ...)
312 * 2 <- txg_hold_open(P, ...)
313 * 1 <- txg_hold_open(P, ...)
316 uint64_t
317 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
319 tx_state_t *tx = &dp->dp_tx;
320 tx_cpu_t *tc;
321 uint64_t txg;
324 * It appears the processor id is simply used as a "random"
325 * number to index into the array, and there isn't any other
326 * significance to the chosen tx_cpu. Because.. Why not use
327 * the current cpu to index into the array?
329 tc = &tx->tx_cpu[CPU_SEQID_UNSTABLE];
331 mutex_enter(&tc->tc_open_lock);
332 txg = tx->tx_open_txg;
334 mutex_enter(&tc->tc_lock);
335 tc->tc_count[txg & TXG_MASK]++;
336 mutex_exit(&tc->tc_lock);
338 th->th_cpu = tc;
339 th->th_txg = txg;
341 return (txg);
344 void
345 txg_rele_to_quiesce(txg_handle_t *th)
347 tx_cpu_t *tc = th->th_cpu;
349 ASSERT(!MUTEX_HELD(&tc->tc_lock));
350 mutex_exit(&tc->tc_open_lock);
353 void
354 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
356 tx_cpu_t *tc = th->th_cpu;
357 int g = th->th_txg & TXG_MASK;
359 mutex_enter(&tc->tc_lock);
360 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
361 mutex_exit(&tc->tc_lock);
364 void
365 txg_rele_to_sync(txg_handle_t *th)
367 tx_cpu_t *tc = th->th_cpu;
368 int g = th->th_txg & TXG_MASK;
370 mutex_enter(&tc->tc_lock);
371 ASSERT(tc->tc_count[g] != 0);
372 if (--tc->tc_count[g] == 0)
373 cv_broadcast(&tc->tc_cv[g]);
374 mutex_exit(&tc->tc_lock);
376 th->th_cpu = NULL; /* defensive */
380 * Blocks until all transactions in the group are committed.
382 * On return, the transaction group has reached a stable state in which it can
383 * then be passed off to the syncing context.
385 static void
386 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
388 tx_state_t *tx = &dp->dp_tx;
389 uint64_t tx_open_time;
390 int g = txg & TXG_MASK;
391 int c;
394 * Grab all tc_open_locks so nobody else can get into this txg.
396 for (c = 0; c < max_ncpus; c++)
397 mutex_enter(&tx->tx_cpu[c].tc_open_lock);
399 ASSERT(txg == tx->tx_open_txg);
400 tx->tx_open_txg++;
401 tx->tx_open_time = tx_open_time = gethrtime();
403 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
404 DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
407 * Now that we've incremented tx_open_txg, we can let threads
408 * enter the next transaction group.
410 for (c = 0; c < max_ncpus; c++)
411 mutex_exit(&tx->tx_cpu[c].tc_open_lock);
413 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
414 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
417 * Quiesce the transaction group by waiting for everyone to
418 * call txg_rele_to_sync() for their open transaction handles.
420 for (c = 0; c < max_ncpus; c++) {
421 tx_cpu_t *tc = &tx->tx_cpu[c];
422 mutex_enter(&tc->tc_lock);
423 while (tc->tc_count[g] != 0)
424 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
425 mutex_exit(&tc->tc_lock);
428 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
431 static void
432 txg_do_callbacks(void *cb_list)
434 dmu_tx_do_callbacks(cb_list, 0);
436 list_destroy(cb_list);
438 kmem_free(cb_list, sizeof (list_t));
442 * Dispatch the commit callbacks registered on this txg to worker threads.
444 * If no callbacks are registered for a given TXG, nothing happens.
445 * This function creates a taskq for the associated pool, if needed.
447 static void
448 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
450 int c;
451 tx_state_t *tx = &dp->dp_tx;
452 list_t *cb_list;
454 for (c = 0; c < max_ncpus; c++) {
455 tx_cpu_t *tc = &tx->tx_cpu[c];
457 * No need to lock tx_cpu_t at this point, since this can
458 * only be called once a txg has been synced.
461 int g = txg & TXG_MASK;
463 if (list_is_empty(&tc->tc_callbacks[g]))
464 continue;
466 if (tx->tx_commit_cb_taskq == NULL) {
468 * Commit callback taskq hasn't been created yet.
470 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
471 100, defclsyspri, boot_ncpus, boot_ncpus * 2,
472 TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
473 TASKQ_THREADS_CPU_PCT);
476 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
477 list_create(cb_list, sizeof (dmu_tx_callback_t),
478 offsetof(dmu_tx_callback_t, dcb_node));
480 list_move_tail(cb_list, &tc->tc_callbacks[g]);
482 (void) taskq_dispatch(tx->tx_commit_cb_taskq,
483 txg_do_callbacks, cb_list, TQ_SLEEP);
488 * Wait for pending commit callbacks of already-synced transactions to finish
489 * processing.
490 * Calling this function from within a commit callback will deadlock.
492 void
493 txg_wait_callbacks(dsl_pool_t *dp)
495 tx_state_t *tx = &dp->dp_tx;
497 if (tx->tx_commit_cb_taskq != NULL)
498 taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
501 static boolean_t
502 txg_is_quiescing(dsl_pool_t *dp)
504 tx_state_t *tx = &dp->dp_tx;
505 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
506 return (tx->tx_quiescing_txg != 0);
509 static boolean_t
510 txg_has_quiesced_to_sync(dsl_pool_t *dp)
512 tx_state_t *tx = &dp->dp_tx;
513 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
514 return (tx->tx_quiesced_txg != 0);
517 static __attribute__((noreturn)) void
518 txg_sync_thread(void *arg)
520 dsl_pool_t *dp = arg;
521 spa_t *spa = dp->dp_spa;
522 tx_state_t *tx = &dp->dp_tx;
523 callb_cpr_t cpr;
524 clock_t start, delta;
526 (void) spl_fstrans_mark();
527 txg_thread_enter(tx, &cpr);
529 start = delta = 0;
530 for (;;) {
531 clock_t timeout = zfs_txg_timeout * hz;
532 clock_t timer;
533 uint64_t txg;
536 * We sync when we're scanning, there's someone waiting
537 * on us, or the quiesce thread has handed off a txg to
538 * us, or we have reached our timeout.
540 timer = (delta >= timeout ? 0 : timeout - delta);
541 while (!dsl_scan_active(dp->dp_scan) &&
542 !tx->tx_exiting && timer > 0 &&
543 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
544 !txg_has_quiesced_to_sync(dp)) {
545 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
546 (u_longlong_t)tx->tx_synced_txg,
547 (u_longlong_t)tx->tx_sync_txg_waiting, dp);
548 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
549 delta = ddi_get_lbolt() - start;
550 timer = (delta > timeout ? 0 : timeout - delta);
554 * When we're suspended, nothing should be changing and for
555 * MMP we don't want to bump anything that would make it
556 * harder to detect if another host is changing it when
557 * resuming after a MMP suspend.
559 if (spa_suspended(spa))
560 continue;
563 * Wait until the quiesce thread hands off a txg to us,
564 * prompting it to do so if necessary.
566 while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
567 if (txg_is_quiescing(dp)) {
568 txg_thread_wait(tx, &cpr,
569 &tx->tx_quiesce_done_cv, 0);
570 continue;
572 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
573 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
574 cv_broadcast(&tx->tx_quiesce_more_cv);
575 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
578 if (tx->tx_exiting)
579 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
582 * Consume the quiesced txg which has been handed off to
583 * us. This may cause the quiescing thread to now be
584 * able to quiesce another txg, so we must signal it.
586 ASSERT(tx->tx_quiesced_txg != 0);
587 txg = tx->tx_quiesced_txg;
588 tx->tx_quiesced_txg = 0;
589 tx->tx_syncing_txg = txg;
590 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
591 cv_broadcast(&tx->tx_quiesce_more_cv);
593 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
594 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
595 (u_longlong_t)tx->tx_sync_txg_waiting);
596 mutex_exit(&tx->tx_sync_lock);
598 txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
599 start = ddi_get_lbolt();
600 spa_sync(spa, txg);
601 delta = ddi_get_lbolt() - start;
602 spa_txg_history_fini_io(spa, ts);
604 mutex_enter(&tx->tx_sync_lock);
605 tx->tx_synced_txg = txg;
606 tx->tx_syncing_txg = 0;
607 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
608 cv_broadcast(&tx->tx_sync_done_cv);
611 * Dispatch commit callbacks to worker threads.
613 txg_dispatch_callbacks(dp, txg);
617 static __attribute__((noreturn)) void
618 txg_quiesce_thread(void *arg)
620 dsl_pool_t *dp = arg;
621 tx_state_t *tx = &dp->dp_tx;
622 callb_cpr_t cpr;
624 txg_thread_enter(tx, &cpr);
626 for (;;) {
627 uint64_t txg;
630 * We quiesce when there's someone waiting on us.
631 * However, we can only have one txg in "quiescing" or
632 * "quiesced, waiting to sync" state. So we wait until
633 * the "quiesced, waiting to sync" txg has been consumed
634 * by the sync thread.
636 while (!tx->tx_exiting &&
637 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
638 txg_has_quiesced_to_sync(dp)))
639 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
641 if (tx->tx_exiting)
642 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
644 txg = tx->tx_open_txg;
645 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
646 (u_longlong_t)txg,
647 (u_longlong_t)tx->tx_quiesce_txg_waiting,
648 (u_longlong_t)tx->tx_sync_txg_waiting);
649 tx->tx_quiescing_txg = txg;
651 mutex_exit(&tx->tx_sync_lock);
652 txg_quiesce(dp, txg);
653 mutex_enter(&tx->tx_sync_lock);
656 * Hand this txg off to the sync thread.
658 dprintf("quiesce done, handing off txg %llu\n",
659 (u_longlong_t)txg);
660 tx->tx_quiescing_txg = 0;
661 tx->tx_quiesced_txg = txg;
662 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
663 cv_broadcast(&tx->tx_sync_more_cv);
664 cv_broadcast(&tx->tx_quiesce_done_cv);
669 * Delay this thread by delay nanoseconds if we are still in the open
670 * transaction group and there is already a waiting txg quiescing or quiesced.
671 * Abort the delay if this txg stalls or enters the quiescing state.
673 void
674 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
676 tx_state_t *tx = &dp->dp_tx;
677 hrtime_t start = gethrtime();
679 /* don't delay if this txg could transition to quiescing immediately */
680 if (tx->tx_open_txg > txg ||
681 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
682 return;
684 mutex_enter(&tx->tx_sync_lock);
685 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
686 mutex_exit(&tx->tx_sync_lock);
687 return;
690 while (gethrtime() - start < delay &&
691 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
692 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
693 &tx->tx_sync_lock, delay, resolution, 0);
696 DMU_TX_STAT_BUMP(dmu_tx_delay);
698 mutex_exit(&tx->tx_sync_lock);
701 static boolean_t
702 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig)
704 tx_state_t *tx = &dp->dp_tx;
706 ASSERT(!dsl_pool_config_held(dp));
708 mutex_enter(&tx->tx_sync_lock);
709 ASSERT3U(tx->tx_threads, ==, 2);
710 if (txg == 0)
711 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
712 if (tx->tx_sync_txg_waiting < txg)
713 tx->tx_sync_txg_waiting = txg;
714 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
715 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
716 (u_longlong_t)tx->tx_sync_txg_waiting);
717 while (tx->tx_synced_txg < txg) {
718 dprintf("broadcasting sync more "
719 "tx_synced=%llu waiting=%llu dp=%px\n",
720 (u_longlong_t)tx->tx_synced_txg,
721 (u_longlong_t)tx->tx_sync_txg_waiting, dp);
722 cv_broadcast(&tx->tx_sync_more_cv);
723 if (wait_sig) {
725 * Condition wait here but stop if the thread receives a
726 * signal. The caller may call txg_wait_synced*() again
727 * to resume waiting for this txg.
729 if (cv_wait_io_sig(&tx->tx_sync_done_cv,
730 &tx->tx_sync_lock) == 0) {
731 mutex_exit(&tx->tx_sync_lock);
732 return (B_TRUE);
734 } else {
735 cv_wait_io(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
738 mutex_exit(&tx->tx_sync_lock);
739 return (B_FALSE);
742 void
743 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
745 VERIFY0(txg_wait_synced_impl(dp, txg, B_FALSE));
749 * Similar to a txg_wait_synced but it can be interrupted from a signal.
750 * Returns B_TRUE if the thread was signaled while waiting.
752 boolean_t
753 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg)
755 return (txg_wait_synced_impl(dp, txg, B_TRUE));
759 * Wait for the specified open transaction group. Set should_quiesce
760 * when the current open txg should be quiesced immediately.
762 void
763 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
765 tx_state_t *tx = &dp->dp_tx;
767 ASSERT(!dsl_pool_config_held(dp));
769 mutex_enter(&tx->tx_sync_lock);
770 ASSERT3U(tx->tx_threads, ==, 2);
771 if (txg == 0)
772 txg = tx->tx_open_txg + 1;
773 if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
774 tx->tx_quiesce_txg_waiting = txg;
775 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
776 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
777 (u_longlong_t)tx->tx_sync_txg_waiting);
778 while (tx->tx_open_txg < txg) {
779 cv_broadcast(&tx->tx_quiesce_more_cv);
781 * Callers setting should_quiesce will use cv_wait_io() and
782 * be accounted for as iowait time. Otherwise, the caller is
783 * understood to be idle and cv_wait_sig() is used to prevent
784 * incorrectly inflating the system load average.
786 if (should_quiesce == B_TRUE) {
787 cv_wait_io(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
788 } else {
789 cv_wait_idle(&tx->tx_quiesce_done_cv,
790 &tx->tx_sync_lock);
793 mutex_exit(&tx->tx_sync_lock);
797 * Pass in the txg number that should be synced.
799 void
800 txg_kick(dsl_pool_t *dp, uint64_t txg)
802 tx_state_t *tx = &dp->dp_tx;
804 ASSERT(!dsl_pool_config_held(dp));
806 if (tx->tx_sync_txg_waiting >= txg)
807 return;
809 mutex_enter(&tx->tx_sync_lock);
810 if (tx->tx_sync_txg_waiting < txg) {
811 tx->tx_sync_txg_waiting = txg;
812 cv_broadcast(&tx->tx_sync_more_cv);
814 mutex_exit(&tx->tx_sync_lock);
817 boolean_t
818 txg_stalled(dsl_pool_t *dp)
820 tx_state_t *tx = &dp->dp_tx;
821 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
824 boolean_t
825 txg_sync_waiting(dsl_pool_t *dp)
827 tx_state_t *tx = &dp->dp_tx;
829 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
830 tx->tx_quiesced_txg != 0);
834 * Verify that this txg is active (open, quiescing, syncing). Non-active
835 * txg's should not be manipulated.
837 #ifdef ZFS_DEBUG
838 void
839 txg_verify(spa_t *spa, uint64_t txg)
841 dsl_pool_t *dp __maybe_unused = spa_get_dsl(spa);
842 if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
843 return;
844 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
845 ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
846 ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
848 #endif
851 * Per-txg object lists.
853 void
854 txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
856 int t;
858 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
860 tl->tl_offset = offset;
861 tl->tl_spa = spa;
863 for (t = 0; t < TXG_SIZE; t++)
864 tl->tl_head[t] = NULL;
867 static boolean_t
868 txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
870 ASSERT(MUTEX_HELD(&tl->tl_lock));
871 TXG_VERIFY(tl->tl_spa, txg);
872 return (tl->tl_head[txg & TXG_MASK] == NULL);
875 boolean_t
876 txg_list_empty(txg_list_t *tl, uint64_t txg)
878 mutex_enter(&tl->tl_lock);
879 boolean_t ret = txg_list_empty_impl(tl, txg);
880 mutex_exit(&tl->tl_lock);
882 return (ret);
885 void
886 txg_list_destroy(txg_list_t *tl)
888 int t;
890 mutex_enter(&tl->tl_lock);
891 for (t = 0; t < TXG_SIZE; t++)
892 ASSERT(txg_list_empty_impl(tl, t));
893 mutex_exit(&tl->tl_lock);
895 mutex_destroy(&tl->tl_lock);
899 * Returns true if all txg lists are empty.
901 * Warning: this is inherently racy (an item could be added immediately
902 * after this function returns).
904 boolean_t
905 txg_all_lists_empty(txg_list_t *tl)
907 boolean_t res = B_TRUE;
908 for (int i = 0; i < TXG_SIZE; i++)
909 res &= (tl->tl_head[i] == NULL);
910 return (res);
914 * Add an entry to the list (unless it's already on the list).
915 * Returns B_TRUE if it was actually added.
917 boolean_t
918 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
920 int t = txg & TXG_MASK;
921 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
922 boolean_t add;
924 TXG_VERIFY(tl->tl_spa, txg);
925 mutex_enter(&tl->tl_lock);
926 add = (tn->tn_member[t] == 0);
927 if (add) {
928 tn->tn_member[t] = 1;
929 tn->tn_next[t] = tl->tl_head[t];
930 tl->tl_head[t] = tn;
932 mutex_exit(&tl->tl_lock);
934 return (add);
938 * Add an entry to the end of the list, unless it's already on the list.
939 * (walks list to find end)
940 * Returns B_TRUE if it was actually added.
942 boolean_t
943 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
945 int t = txg & TXG_MASK;
946 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
947 boolean_t add;
949 TXG_VERIFY(tl->tl_spa, txg);
950 mutex_enter(&tl->tl_lock);
951 add = (tn->tn_member[t] == 0);
952 if (add) {
953 txg_node_t **tp;
955 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
956 continue;
958 tn->tn_member[t] = 1;
959 tn->tn_next[t] = NULL;
960 *tp = tn;
962 mutex_exit(&tl->tl_lock);
964 return (add);
968 * Remove the head of the list and return it.
970 void *
971 txg_list_remove(txg_list_t *tl, uint64_t txg)
973 int t = txg & TXG_MASK;
974 txg_node_t *tn;
975 void *p = NULL;
977 TXG_VERIFY(tl->tl_spa, txg);
978 mutex_enter(&tl->tl_lock);
979 if ((tn = tl->tl_head[t]) != NULL) {
980 ASSERT(tn->tn_member[t]);
981 ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]);
982 p = (char *)tn - tl->tl_offset;
983 tl->tl_head[t] = tn->tn_next[t];
984 tn->tn_next[t] = NULL;
985 tn->tn_member[t] = 0;
987 mutex_exit(&tl->tl_lock);
989 return (p);
993 * Remove a specific item from the list and return it.
995 void *
996 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
998 int t = txg & TXG_MASK;
999 txg_node_t *tn, **tp;
1001 TXG_VERIFY(tl->tl_spa, txg);
1002 mutex_enter(&tl->tl_lock);
1004 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
1005 if ((char *)tn - tl->tl_offset == p) {
1006 *tp = tn->tn_next[t];
1007 tn->tn_next[t] = NULL;
1008 tn->tn_member[t] = 0;
1009 mutex_exit(&tl->tl_lock);
1010 return (p);
1014 mutex_exit(&tl->tl_lock);
1016 return (NULL);
1019 boolean_t
1020 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
1022 int t = txg & TXG_MASK;
1023 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1025 TXG_VERIFY(tl->tl_spa, txg);
1026 return (tn->tn_member[t] != 0);
1030 * Walk a txg list
1032 void *
1033 txg_list_head(txg_list_t *tl, uint64_t txg)
1035 int t = txg & TXG_MASK;
1036 txg_node_t *tn;
1038 mutex_enter(&tl->tl_lock);
1039 tn = tl->tl_head[t];
1040 mutex_exit(&tl->tl_lock);
1042 TXG_VERIFY(tl->tl_spa, txg);
1043 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1046 void *
1047 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
1049 int t = txg & TXG_MASK;
1050 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1052 TXG_VERIFY(tl->tl_spa, txg);
1054 mutex_enter(&tl->tl_lock);
1055 tn = tn->tn_next[t];
1056 mutex_exit(&tl->tl_lock);
1058 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1061 EXPORT_SYMBOL(txg_init);
1062 EXPORT_SYMBOL(txg_fini);
1063 EXPORT_SYMBOL(txg_sync_start);
1064 EXPORT_SYMBOL(txg_sync_stop);
1065 EXPORT_SYMBOL(txg_hold_open);
1066 EXPORT_SYMBOL(txg_rele_to_quiesce);
1067 EXPORT_SYMBOL(txg_rele_to_sync);
1068 EXPORT_SYMBOL(txg_register_callbacks);
1069 EXPORT_SYMBOL(txg_delay);
1070 EXPORT_SYMBOL(txg_wait_synced);
1071 EXPORT_SYMBOL(txg_wait_open);
1072 EXPORT_SYMBOL(txg_wait_callbacks);
1073 EXPORT_SYMBOL(txg_stalled);
1074 EXPORT_SYMBOL(txg_sync_waiting);
1076 ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, UINT, ZMOD_RW,
1077 "Max seconds worth of delta per txg");