4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
29 #include <sys/rrwlock.h>
30 #include <sys/trace_zfs.h>
33 * This file contains the implementation of a re-entrant read
34 * reader/writer lock (aka "rrwlock").
36 * This is a normal reader/writer lock with the additional feature
37 * of allowing threads who have already obtained a read lock to
38 * re-enter another read lock (re-entrant read) - even if there are
41 * Callers who have not obtained a read lock give waiting writers priority.
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
45 * allow a caller who has already obtained a read lock to be able to
46 * then grab a write lock without first dropping all read locks, and
49 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50 * nodes (rrw_node_t), where each node keeps track of which specific
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54 * tsd list can represent a different rrwlock_t. This allows a thread
55 * to enter multiple and unique rrwlock_ts for read locks at the same time.
57 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58 * keep tsd data when writers are waiting. If no writers are waiting, then
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60 * is needed. Once a writer attempts to grab the lock, readers then
61 * keep tsd data and bump the linked readers count (rr_linked_rcount).
63 * If there are waiting writers and there are anonymous readers, then a
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65 * we allow the read to proceed (otherwise it could deadlock). Since once
66 * waiting writers are active, readers no longer bump the anonymous count,
67 * the anonymous readers will eventually flush themselves out. At this point,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70 * we must let the proceed. If they are not, then the reader blocks for the
71 * waiting writers. Hence, we do not starve writers.
74 /* global key for TSD */
77 typedef struct rrw_node
{
78 struct rrw_node
*rn_next
;
84 rrn_find(rrwlock_t
*rrl
)
88 if (zfs_refcount_count(&rrl
->rr_linked_rcount
) == 0)
91 for (rn
= tsd_get(rrw_tsd_key
); rn
!= NULL
; rn
= rn
->rn_next
) {
92 if (rn
->rn_rrl
== rrl
)
99 * Add a node to the head of the singly linked list.
102 rrn_add(rrwlock_t
*rrl
, const void *tag
)
106 rn
= kmem_alloc(sizeof (*rn
), KM_SLEEP
);
108 rn
->rn_next
= tsd_get(rrw_tsd_key
);
110 VERIFY(tsd_set(rrw_tsd_key
, rn
) == 0);
114 * If a node is found for 'rrl', then remove the node from this
115 * thread's list and return TRUE; otherwise return FALSE.
118 rrn_find_and_remove(rrwlock_t
*rrl
, const void *tag
)
121 rrw_node_t
*prev
= NULL
;
123 if (zfs_refcount_count(&rrl
->rr_linked_rcount
) == 0)
126 for (rn
= tsd_get(rrw_tsd_key
); rn
!= NULL
; rn
= rn
->rn_next
) {
127 if (rn
->rn_rrl
== rrl
&& rn
->rn_tag
== tag
) {
129 prev
->rn_next
= rn
->rn_next
;
131 VERIFY(tsd_set(rrw_tsd_key
, rn
->rn_next
) == 0);
132 kmem_free(rn
, sizeof (*rn
));
141 rrw_init(rrwlock_t
*rrl
, boolean_t track_all
)
143 mutex_init(&rrl
->rr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
144 cv_init(&rrl
->rr_cv
, NULL
, CV_DEFAULT
, NULL
);
145 rrl
->rr_writer
= NULL
;
146 zfs_refcount_create(&rrl
->rr_anon_rcount
);
147 zfs_refcount_create(&rrl
->rr_linked_rcount
);
148 rrl
->rr_writer_wanted
= B_FALSE
;
149 rrl
->rr_track_all
= track_all
;
153 rrw_destroy(rrwlock_t
*rrl
)
155 mutex_destroy(&rrl
->rr_lock
);
156 cv_destroy(&rrl
->rr_cv
);
157 ASSERT(rrl
->rr_writer
== NULL
);
158 zfs_refcount_destroy(&rrl
->rr_anon_rcount
);
159 zfs_refcount_destroy(&rrl
->rr_linked_rcount
);
163 rrw_enter_read_impl(rrwlock_t
*rrl
, boolean_t prio
, const void *tag
)
165 mutex_enter(&rrl
->rr_lock
);
166 #if !defined(ZFS_DEBUG) && defined(_KERNEL)
167 if (rrl
->rr_writer
== NULL
&& !rrl
->rr_writer_wanted
&&
168 !rrl
->rr_track_all
) {
169 rrl
->rr_anon_rcount
.rc_count
++;
170 mutex_exit(&rrl
->rr_lock
);
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss
);
175 ASSERT(rrl
->rr_writer
!= curthread
);
176 ASSERT(zfs_refcount_count(&rrl
->rr_anon_rcount
) >= 0);
178 while (rrl
->rr_writer
!= NULL
|| (rrl
->rr_writer_wanted
&&
179 zfs_refcount_is_zero(&rrl
->rr_anon_rcount
) && !prio
&&
180 rrn_find(rrl
) == NULL
))
181 cv_wait(&rrl
->rr_cv
, &rrl
->rr_lock
);
183 if (rrl
->rr_writer_wanted
|| rrl
->rr_track_all
) {
184 /* may or may not be a re-entrant enter */
186 (void) zfs_refcount_add(&rrl
->rr_linked_rcount
, tag
);
188 (void) zfs_refcount_add(&rrl
->rr_anon_rcount
, tag
);
190 ASSERT(rrl
->rr_writer
== NULL
);
191 mutex_exit(&rrl
->rr_lock
);
195 rrw_enter_read(rrwlock_t
*rrl
, const void *tag
)
197 rrw_enter_read_impl(rrl
, B_FALSE
, tag
);
201 * take a read lock even if there are pending write lock requests. if we want
202 * to take a lock reentrantly, but from different threads (that have a
203 * relationship to each other), the normal detection mechanism to overrule
204 * the pending writer does not work, so we have to give an explicit hint here.
207 rrw_enter_read_prio(rrwlock_t
*rrl
, const void *tag
)
209 rrw_enter_read_impl(rrl
, B_TRUE
, tag
);
214 rrw_enter_write(rrwlock_t
*rrl
)
216 mutex_enter(&rrl
->rr_lock
);
217 ASSERT(rrl
->rr_writer
!= curthread
);
219 while (zfs_refcount_count(&rrl
->rr_anon_rcount
) > 0 ||
220 zfs_refcount_count(&rrl
->rr_linked_rcount
) > 0 ||
221 rrl
->rr_writer
!= NULL
) {
222 rrl
->rr_writer_wanted
= B_TRUE
;
223 cv_wait(&rrl
->rr_cv
, &rrl
->rr_lock
);
225 rrl
->rr_writer_wanted
= B_FALSE
;
226 rrl
->rr_writer
= curthread
;
227 mutex_exit(&rrl
->rr_lock
);
231 rrw_enter(rrwlock_t
*rrl
, krw_t rw
, const void *tag
)
234 rrw_enter_read(rrl
, tag
);
236 rrw_enter_write(rrl
);
240 rrw_exit(rrwlock_t
*rrl
, const void *tag
)
242 mutex_enter(&rrl
->rr_lock
);
243 #if !defined(ZFS_DEBUG) && defined(_KERNEL)
244 if (!rrl
->rr_writer
&& rrl
->rr_linked_rcount
.rc_count
== 0) {
245 rrl
->rr_anon_rcount
.rc_count
--;
246 if (rrl
->rr_anon_rcount
.rc_count
== 0)
247 cv_broadcast(&rrl
->rr_cv
);
248 mutex_exit(&rrl
->rr_lock
);
251 DTRACE_PROBE(zfs__rrwfastpath__exitmiss
);
253 ASSERT(!zfs_refcount_is_zero(&rrl
->rr_anon_rcount
) ||
254 !zfs_refcount_is_zero(&rrl
->rr_linked_rcount
) ||
255 rrl
->rr_writer
!= NULL
);
257 if (rrl
->rr_writer
== NULL
) {
259 if (rrn_find_and_remove(rrl
, tag
)) {
260 count
= zfs_refcount_remove(
261 &rrl
->rr_linked_rcount
, tag
);
263 ASSERT(!rrl
->rr_track_all
);
264 count
= zfs_refcount_remove(&rrl
->rr_anon_rcount
, tag
);
267 cv_broadcast(&rrl
->rr_cv
);
269 ASSERT(rrl
->rr_writer
== curthread
);
270 ASSERT(zfs_refcount_is_zero(&rrl
->rr_anon_rcount
) &&
271 zfs_refcount_is_zero(&rrl
->rr_linked_rcount
));
272 rrl
->rr_writer
= NULL
;
273 cv_broadcast(&rrl
->rr_cv
);
275 mutex_exit(&rrl
->rr_lock
);
279 * If the lock was created with track_all, rrw_held(RW_READER) will return
280 * B_TRUE iff the current thread has the lock for reader. Otherwise it may
281 * return B_TRUE if any thread has the lock for reader.
284 rrw_held(rrwlock_t
*rrl
, krw_t rw
)
288 mutex_enter(&rrl
->rr_lock
);
289 if (rw
== RW_WRITER
) {
290 held
= (rrl
->rr_writer
== curthread
);
292 held
= (!zfs_refcount_is_zero(&rrl
->rr_anon_rcount
) ||
293 rrn_find(rrl
) != NULL
);
295 mutex_exit(&rrl
->rr_lock
);
301 rrw_tsd_destroy(void *arg
)
303 rrw_node_t
*rn
= arg
;
305 panic("thread %p terminating with rrw lock %p held",
306 (void *)curthread
, (void *)rn
->rn_rrl
);
311 * A reader-mostly lock implementation, tuning above reader-writer locks
312 * for hightly parallel read acquisitions, while pessimizing writes.
314 * The idea is to split single busy lock into array of locks, so that
315 * each reader can lock only one of them for read, depending on result
316 * of simple hash function. That proportionally reduces lock congestion.
317 * Writer at the same time has to sequentially acquire write on all the locks.
318 * That makes write acquisition proportionally slower, but in places where
319 * it is used (filesystem unmount) performance is not critical.
321 * All the functions below are direct wrappers around functions above.
324 rrm_init(rrmlock_t
*rrl
, boolean_t track_all
)
328 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
329 rrw_init(&rrl
->locks
[i
], track_all
);
333 rrm_destroy(rrmlock_t
*rrl
)
337 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
338 rrw_destroy(&rrl
->locks
[i
]);
342 rrm_enter(rrmlock_t
*rrl
, krw_t rw
, const void *tag
)
345 rrm_enter_read(rrl
, tag
);
347 rrm_enter_write(rrl
);
351 * This maps the current thread to a specific lock. Note that the lock
352 * must be released by the same thread that acquired it. We do this
353 * mapping by taking the thread pointer mod a prime number. We examine
354 * only the low 32 bits of the thread pointer, because 32-bit division
355 * is faster than 64-bit division, and the high 32 bits have little
358 #define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
361 rrm_enter_read(rrmlock_t
*rrl
, const void *tag
)
363 rrw_enter_read(&rrl
->locks
[RRM_TD_LOCK()], tag
);
367 rrm_enter_write(rrmlock_t
*rrl
)
371 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
372 rrw_enter_write(&rrl
->locks
[i
]);
376 rrm_exit(rrmlock_t
*rrl
, const void *tag
)
380 if (rrl
->locks
[0].rr_writer
== curthread
) {
381 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
382 rrw_exit(&rrl
->locks
[i
], tag
);
384 rrw_exit(&rrl
->locks
[RRM_TD_LOCK()], tag
);
389 rrm_held(rrmlock_t
*rrl
, krw_t rw
)
391 if (rw
== RW_WRITER
) {
392 return (rrw_held(&rrl
->locks
[0], rw
));
394 return (rrw_held(&rrl
->locks
[RRM_TD_LOCK()], rw
));