4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
30 * This file contains the code to implement file range locking in
31 * ZFS, although there isn't much specific to ZFS (all that comes to mind is
32 * support for growing the blocksize).
36 * Defined in zfs_rlock.h but essentially:
37 * rl = zfs_range_lock(zp, off, len, lock_type);
38 * zfs_range_unlock(rl);
39 * zfs_range_reduce(rl, off, len);
43 * An AVL tree is used to maintain the state of the existing ranges
44 * that are locked for exclusive (writer) or shared (reader) use.
45 * The starting range offset is used for searching and sorting the tree.
49 * The (hopefully) usual case is of no overlaps or contention for
50 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
51 * searched that finds no overlap, and *this* rl_t is placed in the tree.
53 * Overlaps/Reference counting/Proxy locks
54 * ---------------------------------------
55 * The avl code only allows one node at a particular offset. Also it's very
56 * inefficient to search through all previous entries looking for overlaps
57 * (because the very 1st in the ordered list might be at offset 0 but
58 * cover the whole file).
59 * So this implementation uses reference counts and proxy range locks.
60 * Firstly, only reader locks use reference counts and proxy locks,
61 * because writer locks are exclusive.
62 * When a reader lock overlaps with another then a proxy lock is created
63 * for that range and replaces the original lock. If the overlap
64 * is exact then the reference count of the proxy is simply incremented.
65 * Otherwise, the proxy lock is split into smaller lock ranges and
66 * new proxy locks created for non overlapping ranges.
67 * The reference counts are adjusted accordingly.
68 * Meanwhile, the orginal lock is kept around (this is the callers handle)
69 * and its offset and length are used when releasing the lock.
73 * In order to make wakeups efficient and to ensure multiple continuous
74 * readers on a range don't starve a writer for the same range lock,
75 * two condition variables are allocated in each rl_t.
76 * If a writer (or reader) can't get a range it initialises the writer
77 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
78 * and waits on that cv. When a thread unlocks that range it wakes up all
79 * writers then all readers before destroying the lock.
83 * Append mode writes need to lock a range at the end of a file.
84 * The offset of the end of the file is determined under the
85 * range locking mutex, and the lock type converted from RL_APPEND to
86 * RL_WRITER and the range locked.
90 * ZFS supports multiple block sizes currently upto 128K. The smallest
91 * block size is used for the file which is grown as needed. During this
92 * growth all other writers and readers must be excluded.
93 * So if the block size needs to be grown then the whole file is
94 * exclusively locked, then later the caller will reduce the lock
95 * range to just the range to be written using zfs_reduce_range.
98 #include <sys/zfs_rlock.h>
101 * Check if a write lock can be grabbed, or wait and recheck until available.
104 zfs_range_lock_writer(znode_t
*zp
, rl_t
*new)
106 avl_tree_t
*tree
= &zp
->z_range_avl
;
110 uint64_t off
= new->r_off
;
111 uint64_t len
= new->r_len
;
115 * Range locking is also used by zvol and uses a
116 * dummied up znode. However, for zvol, we don't need to
117 * append or grow blocksize, and besides we don't have
118 * a "sa" data or z_zfsvfs - so skip that processing.
120 * Yes, this is ugly, and would be solved by not handling
121 * grow or append in range lock code. If that was done then
122 * we could make the range locking code generically available
123 * to other non-zfs consumers.
125 if (zp
->z_vnode
) { /* caller is ZPL */
127 * If in append mode pick up the current end of file.
128 * This is done under z_range_lock to avoid races.
130 if (new->r_type
== RL_APPEND
)
131 new->r_off
= zp
->z_size
;
134 * If we need to grow the block size then grab the whole
135 * file range. This is also done under z_range_lock to
138 end_size
= MAX(zp
->z_size
, new->r_off
+ len
);
139 if (end_size
> zp
->z_blksz
&& (!ISP2(zp
->z_blksz
) ||
140 zp
->z_blksz
< zp
->z_zfsvfs
->z_max_blksz
)) {
142 new->r_len
= UINT64_MAX
;
147 * First check for the usual case of no locks
149 if (avl_numnodes(tree
) == 0) {
150 new->r_type
= RL_WRITER
; /* convert to writer */
156 * Look for any locks in the range.
158 rl
= avl_find(tree
, new, &where
);
160 goto wait
; /* already locked at same offset */
162 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
163 if (rl
&& (rl
->r_off
< new->r_off
+ new->r_len
))
166 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
167 if (rl
&& rl
->r_off
+ rl
->r_len
> new->r_off
)
170 new->r_type
= RL_WRITER
; /* convert possible RL_APPEND */
171 avl_insert(tree
, new, where
);
174 if (!rl
->r_write_wanted
) {
175 cv_init(&rl
->r_wr_cv
, NULL
, CV_DEFAULT
, NULL
);
176 rl
->r_write_wanted
= B_TRUE
;
178 cv_wait(&rl
->r_wr_cv
, &zp
->z_range_lock
);
180 /* reset to original */
187 * If this is an original (non-proxy) lock then replace it by
188 * a proxy and return the proxy.
191 zfs_range_proxify(avl_tree_t
*tree
, rl_t
*rl
)
196 return (rl
); /* already a proxy */
198 ASSERT3U(rl
->r_cnt
, ==, 1);
199 ASSERT(rl
->r_write_wanted
== B_FALSE
);
200 ASSERT(rl
->r_read_wanted
== B_FALSE
);
201 avl_remove(tree
, rl
);
204 /* create a proxy range lock */
205 proxy
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
206 proxy
->r_off
= rl
->r_off
;
207 proxy
->r_len
= rl
->r_len
;
209 proxy
->r_type
= RL_READER
;
210 proxy
->r_proxy
= B_TRUE
;
211 proxy
->r_write_wanted
= B_FALSE
;
212 proxy
->r_read_wanted
= B_FALSE
;
213 avl_add(tree
, proxy
);
219 * Split the range lock at the supplied offset
220 * returning the *front* proxy.
223 zfs_range_split(avl_tree_t
*tree
, rl_t
*rl
, uint64_t off
)
227 ASSERT3U(rl
->r_len
, >, 1);
228 ASSERT3U(off
, >, rl
->r_off
);
229 ASSERT3U(off
, <, rl
->r_off
+ rl
->r_len
);
230 ASSERT(rl
->r_write_wanted
== B_FALSE
);
231 ASSERT(rl
->r_read_wanted
== B_FALSE
);
233 /* create the rear proxy range lock */
234 rear
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
236 rear
->r_len
= rl
->r_off
+ rl
->r_len
- off
;
237 rear
->r_cnt
= rl
->r_cnt
;
238 rear
->r_type
= RL_READER
;
239 rear
->r_proxy
= B_TRUE
;
240 rear
->r_write_wanted
= B_FALSE
;
241 rear
->r_read_wanted
= B_FALSE
;
243 front
= zfs_range_proxify(tree
, rl
);
244 front
->r_len
= off
- rl
->r_off
;
246 avl_insert_here(tree
, rear
, front
, AVL_AFTER
);
251 * Create and add a new proxy range lock for the supplied range.
254 zfs_range_new_proxy(avl_tree_t
*tree
, uint64_t off
, uint64_t len
)
259 rl
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
263 rl
->r_type
= RL_READER
;
264 rl
->r_proxy
= B_TRUE
;
265 rl
->r_write_wanted
= B_FALSE
;
266 rl
->r_read_wanted
= B_FALSE
;
271 zfs_range_add_reader(avl_tree_t
*tree
, rl_t
*new, rl_t
*prev
, avl_index_t where
)
274 uint64_t off
= new->r_off
;
275 uint64_t len
= new->r_len
;
278 * prev arrives either:
279 * - pointing to an entry at the same offset
280 * - pointing to the entry with the closest previous offset whose
281 * range may overlap with the new range
282 * - null, if there were no ranges starting before the new one
285 if (prev
->r_off
+ prev
->r_len
<= off
) {
287 } else if (prev
->r_off
!= off
) {
289 * convert to proxy if needed then
290 * split this entry and bump ref count
292 prev
= zfs_range_split(tree
, prev
, off
);
293 prev
= AVL_NEXT(tree
, prev
); /* move to rear range */
296 ASSERT((prev
== NULL
) || (prev
->r_off
== off
));
301 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
303 if (next
== NULL
|| off
+ len
<= next
->r_off
) {
304 /* no overlaps, use the original new rl_t in the tree */
305 avl_insert(tree
, new, where
);
309 if (off
< next
->r_off
) {
310 /* Add a proxy for initial range before the overlap */
311 zfs_range_new_proxy(tree
, off
, next
->r_off
- off
);
314 new->r_cnt
= 0; /* will use proxies in tree */
316 * We now search forward through the ranges, until we go past the end
317 * of the new range. For each entry we make it a proxy if it
318 * isn't already, then bump its reference count. If there's any
319 * gaps between the ranges then we create a new proxy range.
321 for (prev
= NULL
; next
; prev
= next
, next
= AVL_NEXT(tree
, next
)) {
322 if (off
+ len
<= next
->r_off
)
324 if (prev
&& prev
->r_off
+ prev
->r_len
< next
->r_off
) {
326 ASSERT3U(next
->r_off
, >, prev
->r_off
+ prev
->r_len
);
327 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
328 next
->r_off
- (prev
->r_off
+ prev
->r_len
));
330 if (off
+ len
== next
->r_off
+ next
->r_len
) {
331 /* exact overlap with end */
332 next
= zfs_range_proxify(tree
, next
);
336 if (off
+ len
< next
->r_off
+ next
->r_len
) {
337 /* new range ends in the middle of this block */
338 next
= zfs_range_split(tree
, next
, off
+ len
);
342 ASSERT3U(off
+ len
, >, next
->r_off
+ next
->r_len
);
343 next
= zfs_range_proxify(tree
, next
);
347 /* Add the remaining end range. */
348 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
349 (off
+ len
) - (prev
->r_off
+ prev
->r_len
));
353 * Check if a reader lock can be grabbed, or wait and recheck until available.
356 zfs_range_lock_reader(znode_t
*zp
, rl_t
*new)
358 avl_tree_t
*tree
= &zp
->z_range_avl
;
361 uint64_t off
= new->r_off
;
362 uint64_t len
= new->r_len
;
365 * Look for any writer locks in the range.
368 prev
= avl_find(tree
, new, &where
);
370 prev
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
373 * Check the previous range for a writer lock overlap.
375 if (prev
&& (off
< prev
->r_off
+ prev
->r_len
)) {
376 if ((prev
->r_type
== RL_WRITER
) || (prev
->r_write_wanted
)) {
377 if (!prev
->r_read_wanted
) {
378 cv_init(&prev
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
379 prev
->r_read_wanted
= B_TRUE
;
381 cv_wait(&prev
->r_rd_cv
, &zp
->z_range_lock
);
384 if (off
+ len
< prev
->r_off
+ prev
->r_len
)
389 * Search through the following ranges to see if there's
390 * write lock any overlap.
393 next
= AVL_NEXT(tree
, prev
);
395 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
396 for (; next
; next
= AVL_NEXT(tree
, next
)) {
397 if (off
+ len
<= next
->r_off
)
399 if ((next
->r_type
== RL_WRITER
) || (next
->r_write_wanted
)) {
400 if (!next
->r_read_wanted
) {
401 cv_init(&next
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
402 next
->r_read_wanted
= B_TRUE
;
404 cv_wait(&next
->r_rd_cv
, &zp
->z_range_lock
);
407 if (off
+ len
<= next
->r_off
+ next
->r_len
)
413 * Add the read lock, which may involve splitting existing
414 * locks and bumping ref counts (r_cnt).
416 zfs_range_add_reader(tree
, new, prev
, where
);
420 * Lock a range (offset, length) as either shared (RL_READER)
421 * or exclusive (RL_WRITER). Returns the range lock structure
422 * for later unlocking or reduce range (if entire file
423 * previously locked as RL_WRITER).
426 zfs_range_lock(znode_t
*zp
, uint64_t off
, uint64_t len
, rl_type_t type
)
430 ASSERT(type
== RL_READER
|| type
== RL_WRITER
|| type
== RL_APPEND
);
432 new = kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
435 if (len
+ off
< off
) /* overflow */
436 len
= UINT64_MAX
- off
;
438 new->r_cnt
= 1; /* assume it's going to be in the tree */
440 new->r_proxy
= B_FALSE
;
441 new->r_write_wanted
= B_FALSE
;
442 new->r_read_wanted
= B_FALSE
;
444 mutex_enter(&zp
->z_range_lock
);
445 if (type
== RL_READER
) {
447 * First check for the usual case of no locks
449 if (avl_numnodes(&zp
->z_range_avl
) == 0)
450 avl_add(&zp
->z_range_avl
, new);
452 zfs_range_lock_reader(zp
, new);
454 zfs_range_lock_writer(zp
, new); /* RL_WRITER or RL_APPEND */
455 mutex_exit(&zp
->z_range_lock
);
460 * Unlock a reader lock
463 zfs_range_unlock_reader(znode_t
*zp
, rl_t
*remove
)
465 avl_tree_t
*tree
= &zp
->z_range_avl
;
466 rl_t
*rl
, *next
= NULL
;
470 * The common case is when the remove entry is in the tree
471 * (cnt == 1) meaning there's been no other reader locks overlapping
472 * with this one. Otherwise the remove entry will have been
473 * removed from the tree and replaced by proxies (one or
474 * more ranges mapping to the entire range).
476 if (remove
->r_cnt
== 1) {
477 avl_remove(tree
, remove
);
478 if (remove
->r_write_wanted
) {
479 cv_broadcast(&remove
->r_wr_cv
);
480 cv_destroy(&remove
->r_wr_cv
);
482 if (remove
->r_read_wanted
) {
483 cv_broadcast(&remove
->r_rd_cv
);
484 cv_destroy(&remove
->r_rd_cv
);
487 ASSERT0(remove
->r_cnt
);
488 ASSERT0(remove
->r_write_wanted
);
489 ASSERT0(remove
->r_read_wanted
);
491 * Find start proxy representing this reader lock,
492 * then decrement ref count on all proxies
493 * that make up this range, freeing them as needed.
495 rl
= avl_find(tree
, remove
, NULL
);
498 ASSERT(rl
->r_type
== RL_READER
);
499 for (len
= remove
->r_len
; len
!= 0; rl
= next
) {
502 next
= AVL_NEXT(tree
, rl
);
504 ASSERT(rl
->r_off
+ rl
->r_len
== next
->r_off
);
506 ASSERT(next
->r_type
== RL_READER
);
509 if (rl
->r_cnt
== 0) {
510 avl_remove(tree
, rl
);
511 if (rl
->r_write_wanted
) {
512 cv_broadcast(&rl
->r_wr_cv
);
513 cv_destroy(&rl
->r_wr_cv
);
515 if (rl
->r_read_wanted
) {
516 cv_broadcast(&rl
->r_rd_cv
);
517 cv_destroy(&rl
->r_rd_cv
);
519 kmem_free(rl
, sizeof (rl_t
));
523 kmem_free(remove
, sizeof (rl_t
));
527 * Unlock range and destroy range lock structure.
530 zfs_range_unlock(rl_t
*rl
)
532 znode_t
*zp
= rl
->r_zp
;
534 ASSERT(rl
->r_type
== RL_WRITER
|| rl
->r_type
== RL_READER
);
535 ASSERT(rl
->r_cnt
== 1 || rl
->r_cnt
== 0);
536 ASSERT(!rl
->r_proxy
);
538 mutex_enter(&zp
->z_range_lock
);
539 if (rl
->r_type
== RL_WRITER
) {
540 /* writer locks can't be shared or split */
541 avl_remove(&zp
->z_range_avl
, rl
);
542 mutex_exit(&zp
->z_range_lock
);
543 if (rl
->r_write_wanted
) {
544 cv_broadcast(&rl
->r_wr_cv
);
545 cv_destroy(&rl
->r_wr_cv
);
547 if (rl
->r_read_wanted
) {
548 cv_broadcast(&rl
->r_rd_cv
);
549 cv_destroy(&rl
->r_rd_cv
);
551 kmem_free(rl
, sizeof (rl_t
));
554 * lock may be shared, let zfs_range_unlock_reader()
555 * release the lock and free the rl_t
557 zfs_range_unlock_reader(zp
, rl
);
558 mutex_exit(&zp
->z_range_lock
);
563 * Reduce range locked as RL_WRITER from whole file to specified range.
564 * Asserts the whole file is exclusivly locked and so there's only one
568 zfs_range_reduce(rl_t
*rl
, uint64_t off
, uint64_t len
)
570 znode_t
*zp
= rl
->r_zp
;
572 /* Ensure there are no other locks */
573 ASSERT(avl_numnodes(&zp
->z_range_avl
) == 1);
574 ASSERT(rl
->r_off
== 0);
575 ASSERT(rl
->r_type
== RL_WRITER
);
576 ASSERT(!rl
->r_proxy
);
577 ASSERT3U(rl
->r_len
, ==, UINT64_MAX
);
578 ASSERT3U(rl
->r_cnt
, ==, 1);
580 mutex_enter(&zp
->z_range_lock
);
583 mutex_exit(&zp
->z_range_lock
);
584 if (rl
->r_write_wanted
)
585 cv_broadcast(&rl
->r_wr_cv
);
586 if (rl
->r_read_wanted
)
587 cv_broadcast(&rl
->r_rd_cv
);
591 * AVL comparison function used to order range locks
592 * Locks are ordered on the start offset of the range.
595 zfs_range_compare(const void *arg1
, const void *arg2
)
597 const rl_t
*rl1
= arg1
;
598 const rl_t
*rl2
= arg2
;
600 if (rl1
->r_off
> rl2
->r_off
)
602 if (rl1
->r_off
< rl2
->r_off
)