Fix false assertion in dmu_tx_dirty_buf() on cloning
[zfs.git] / include / sys / txg_impl.h
blob8ab7969b25bebbd8633227c7cb07dd529125ef86
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
31 #ifndef _SYS_TXG_IMPL_H
32 #define _SYS_TXG_IMPL_H
34 #include <sys/spa.h>
35 #include <sys/txg.h>
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
42 * The tx_cpu structure is a per-cpu structure that is used to track
43 * the number of active transaction holds (tc_count). As transactions
44 * are assigned into a transaction group the appropriate tc_count is
45 * incremented to indicate that there are pending changes that have yet
46 * to quiesce. Consumers eventually call txg_rele_to_sync() to decrement
47 * the tc_count. A transaction group is not considered quiesced until all
48 * tx_cpu structures have reached a tc_count of zero.
50 * This structure is a per-cpu structure by design. Updates to this structure
51 * are frequent and concurrent. Having a single structure would result in
52 * heavy lock contention so a per-cpu design was implemented. With the fanned
53 * out mutex design, consumers only need to lock the mutex associated with
54 * thread's cpu.
56 * The tx_cpu contains two locks, the tc_lock and tc_open_lock.
57 * The tc_lock is used to protect all members of the tx_cpu structure with
58 * the exception of the tc_open_lock. This lock should only be held for a
59 * short period of time, typically when updating the value of tc_count.
61 * The tc_open_lock protects the tx_open_txg member of the tx_state structure.
62 * This lock is used to ensure that transactions are only assigned into
63 * the current open transaction group. In order to move the current open
64 * transaction group to the quiesce phase, the txg_quiesce thread must
65 * grab all tc_open_locks, increment the tx_open_txg, and drop the locks.
66 * The tc_open_lock is held until the transaction is assigned into the
67 * transaction group. Typically, this is a short operation but if throttling
68 * is occurring it may be held for longer periods of time.
70 struct tx_cpu {
71 kmutex_t tc_open_lock; /* protects tx_open_txg */
72 kmutex_t tc_lock; /* protects the rest of this struct */
73 kcondvar_t tc_cv[TXG_SIZE];
74 uint64_t tc_count[TXG_SIZE]; /* tx hold count on each txg */
75 list_t tc_callbacks[TXG_SIZE]; /* commit cb list */
76 } ____cacheline_aligned;
79 * The tx_state structure maintains the state information about the different
80 * stages of the pool's transaction groups. A per pool tx_state structure
81 * is used to track this information. The tx_state structure also points to
82 * an array of tx_cpu structures (described above). Although the tx_sync_lock
83 * is used to protect the members of this structure, it is not used to
84 * protect the tx_open_txg. Instead a special lock in the tx_cpu structure
85 * is used. Readers of tx_open_txg must grab the per-cpu tc_open_lock.
86 * Any thread wishing to update tx_open_txg must grab the tc_open_lock on
87 * every cpu (see txg_quiesce()).
89 typedef struct tx_state {
90 tx_cpu_t *tx_cpu; /* protects access to tx_open_txg */
91 kmutex_t tx_sync_lock; /* protects the rest of this struct */
93 uint64_t tx_open_txg; /* currently open txg id */
94 uint64_t tx_quiescing_txg; /* currently quiescing txg id */
95 uint64_t tx_quiesced_txg; /* quiesced txg waiting for sync */
96 uint64_t tx_syncing_txg; /* currently syncing txg id */
97 uint64_t tx_synced_txg; /* last synced txg id */
99 hrtime_t tx_open_time; /* start time of tx_open_txg */
101 uint64_t tx_sync_txg_waiting; /* txg we're waiting to sync */
102 uint64_t tx_quiesce_txg_waiting; /* txg we're waiting to open */
104 kcondvar_t tx_sync_more_cv;
105 kcondvar_t tx_sync_done_cv;
106 kcondvar_t tx_quiesce_more_cv;
107 kcondvar_t tx_quiesce_done_cv;
108 kcondvar_t tx_timeout_cv;
109 kcondvar_t tx_exit_cv; /* wait for all threads to exit */
111 uint8_t tx_threads; /* number of threads */
112 uint8_t tx_exiting; /* set when we're exiting */
114 kthread_t *tx_sync_thread;
115 kthread_t *tx_quiesce_thread;
117 taskq_t *tx_commit_cb_taskq; /* commit callback taskq */
118 } tx_state_t;
120 #ifdef __cplusplus
122 #endif
124 #endif /* _SYS_TXG_IMPL_H */