4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
29 * This file contains the code to implement file range locking in
30 * ZFS, although there isn't much specific to ZFS (all that comes to mind
31 * support for growing the blocksize).
35 * Defined in zfs_rlock.h but essentially:
36 * rl = zfs_range_lock(zp, off, len, lock_type);
37 * zfs_range_unlock(rl);
38 * zfs_range_reduce(rl, off, len);
42 * An AVL tree is used to maintain the state of the existing ranges
43 * that are locked for exclusive (writer) or shared (reader) use.
44 * The starting range offset is used for searching and sorting the tree.
48 * The (hopefully) usual case is of no overlaps or contention for
49 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
50 * searched that finds no overlap, and *this* rl_t is placed in the tree.
52 * Overlaps/Reference counting/Proxy locks
53 * ---------------------------------------
54 * The avl code only allows one node at a particular offset. Also it's very
55 * inefficient to search through all previous entries looking for overlaps
56 * (because the very 1st in the ordered list might be at offset 0 but
57 * cover the whole file).
58 * So this implementation uses reference counts and proxy range locks.
59 * Firstly, only reader locks use reference counts and proxy locks,
60 * because writer locks are exclusive.
61 * When a reader lock overlaps with another then a proxy lock is created
62 * for that range and replaces the original lock. If the overlap
63 * is exact then the reference count of the proxy is simply incremented.
64 * Otherwise, the proxy lock is split into smaller lock ranges and
65 * new proxy locks created for non overlapping ranges.
66 * The reference counts are adjusted accordingly.
67 * Meanwhile, the orginal lock is kept around (this is the callers handle)
68 * and its offset and length are used when releasing the lock.
72 * In order to make wakeups efficient and to ensure multiple continuous
73 * readers on a range don't starve a writer for the same range lock,
74 * two condition variables are allocated in each rl_t.
75 * If a writer (or reader) can't get a range it initialises the writer
76 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
77 * and waits on that cv. When a thread unlocks that range it wakes up all
78 * writers then all readers before destroying the lock.
82 * Append mode writes need to lock a range at the end of a file.
83 * The offset of the end of the file is determined under the
84 * range locking mutex, and the lock type converted from RL_APPEND to
85 * RL_WRITER and the range locked.
89 * ZFS supports multiple block sizes currently upto 128K. The smallest
90 * block size is used for the file which is grown as needed. During this
91 * growth all other writers and readers must be excluded.
92 * So if the block size needs to be grown then the whole file is
93 * exclusively locked, then later the caller will reduce the lock
94 * range to just the range to be written using zfs_reduce_range.
97 #include <sys/zfs_rlock.h>
100 * Check if a write lock can be grabbed, or wait and recheck until available.
103 zfs_range_lock_writer(znode_t
*zp
, rl_t
*new)
105 avl_tree_t
*tree
= &zp
->z_range_avl
;
109 uint64_t off
= new->r_off
;
110 uint64_t len
= new->r_len
;
114 * Range locking is also used by zvol and uses a
115 * dummied up znode. However, for zvol, we don't need to
116 * append or grow blocksize, and besides we don't have
117 * a z_phys or z_zfsvfs - so skip that processing.
119 * Yes, this is ugly, and would be solved by not handling
120 * grow or append in range lock code. If that was done then
121 * we could make the range locking code generically available
122 * to other non-zfs consumers.
124 if (zp
->z_vnode
) { /* caller is ZPL */
126 * If in append mode pick up the current end of file.
127 * This is done under z_range_lock to avoid races.
129 if (new->r_type
== RL_APPEND
)
130 new->r_off
= zp
->z_phys
->zp_size
;
133 * If we need to grow the block size then grab the whole
134 * file range. This is also done under z_range_lock to
137 end_size
= MAX(zp
->z_phys
->zp_size
, new->r_off
+ len
);
138 if (end_size
> zp
->z_blksz
&& (!ISP2(zp
->z_blksz
) ||
139 zp
->z_blksz
< zp
->z_zfsvfs
->z_max_blksz
)) {
141 new->r_len
= UINT64_MAX
;
146 * First check for the usual case of no locks
148 if (avl_numnodes(tree
) == 0) {
149 new->r_type
= RL_WRITER
; /* convert to writer */
155 * Look for any locks in the range.
157 rl
= avl_find(tree
, new, &where
);
159 goto wait
; /* already locked at same offset */
161 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
162 if (rl
&& (rl
->r_off
< new->r_off
+ new->r_len
))
165 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
166 if (rl
&& rl
->r_off
+ rl
->r_len
> new->r_off
)
169 new->r_type
= RL_WRITER
; /* convert possible RL_APPEND */
170 avl_insert(tree
, new, where
);
173 if (!rl
->r_write_wanted
) {
174 cv_init(&rl
->r_wr_cv
, NULL
, CV_DEFAULT
, NULL
);
175 rl
->r_write_wanted
= B_TRUE
;
177 cv_wait(&rl
->r_wr_cv
, &zp
->z_range_lock
);
179 /* reset to original */
186 * If this is an original (non-proxy) lock then replace it by
187 * a proxy and return the proxy.
190 zfs_range_proxify(avl_tree_t
*tree
, rl_t
*rl
)
195 return (rl
); /* already a proxy */
197 ASSERT3U(rl
->r_cnt
, ==, 1);
198 ASSERT(rl
->r_write_wanted
== B_FALSE
);
199 ASSERT(rl
->r_read_wanted
== B_FALSE
);
200 avl_remove(tree
, rl
);
203 /* create a proxy range lock */
204 proxy
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
205 proxy
->r_off
= rl
->r_off
;
206 proxy
->r_len
= rl
->r_len
;
208 proxy
->r_type
= RL_READER
;
209 proxy
->r_proxy
= B_TRUE
;
210 proxy
->r_write_wanted
= B_FALSE
;
211 proxy
->r_read_wanted
= B_FALSE
;
212 avl_add(tree
, proxy
);
218 * Split the range lock at the supplied offset
219 * returning the *front* proxy.
222 zfs_range_split(avl_tree_t
*tree
, rl_t
*rl
, uint64_t off
)
226 ASSERT3U(rl
->r_len
, >, 1);
227 ASSERT3U(off
, >, rl
->r_off
);
228 ASSERT3U(off
, <, rl
->r_off
+ rl
->r_len
);
229 ASSERT(rl
->r_write_wanted
== B_FALSE
);
230 ASSERT(rl
->r_read_wanted
== B_FALSE
);
232 /* create the rear proxy range lock */
233 rear
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
235 rear
->r_len
= rl
->r_off
+ rl
->r_len
- off
;
236 rear
->r_cnt
= rl
->r_cnt
;
237 rear
->r_type
= RL_READER
;
238 rear
->r_proxy
= B_TRUE
;
239 rear
->r_write_wanted
= B_FALSE
;
240 rear
->r_read_wanted
= B_FALSE
;
242 front
= zfs_range_proxify(tree
, rl
);
243 front
->r_len
= off
- rl
->r_off
;
245 avl_insert_here(tree
, rear
, front
, AVL_AFTER
);
250 * Create and add a new proxy range lock for the supplied range.
253 zfs_range_new_proxy(avl_tree_t
*tree
, uint64_t off
, uint64_t len
)
258 rl
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
262 rl
->r_type
= RL_READER
;
263 rl
->r_proxy
= B_TRUE
;
264 rl
->r_write_wanted
= B_FALSE
;
265 rl
->r_read_wanted
= B_FALSE
;
270 zfs_range_add_reader(avl_tree_t
*tree
, rl_t
*new, rl_t
*prev
, avl_index_t where
)
273 uint64_t off
= new->r_off
;
274 uint64_t len
= new->r_len
;
277 * prev arrives either:
278 * - pointing to an entry at the same offset
279 * - pointing to the entry with the closest previous offset whose
280 * range may overlap with the new range
281 * - null, if there were no ranges starting before the new one
284 if (prev
->r_off
+ prev
->r_len
<= off
) {
286 } else if (prev
->r_off
!= off
) {
288 * convert to proxy if needed then
289 * split this entry and bump ref count
291 prev
= zfs_range_split(tree
, prev
, off
);
292 prev
= AVL_NEXT(tree
, prev
); /* move to rear range */
295 ASSERT((prev
== NULL
) || (prev
->r_off
== off
));
300 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
302 if (next
== NULL
|| off
+ len
<= next
->r_off
) {
303 /* no overlaps, use the original new rl_t in the tree */
304 avl_insert(tree
, new, where
);
308 if (off
< next
->r_off
) {
309 /* Add a proxy for initial range before the overlap */
310 zfs_range_new_proxy(tree
, off
, next
->r_off
- off
);
313 new->r_cnt
= 0; /* will use proxies in tree */
315 * We now search forward through the ranges, until we go past the end
316 * of the new range. For each entry we make it a proxy if it
317 * isn't already, then bump its reference count. If there's any
318 * gaps between the ranges then we create a new proxy range.
320 for (prev
= NULL
; next
; prev
= next
, next
= AVL_NEXT(tree
, next
)) {
321 if (off
+ len
<= next
->r_off
)
323 if (prev
&& prev
->r_off
+ prev
->r_len
< next
->r_off
) {
325 ASSERT3U(next
->r_off
, >, prev
->r_off
+ prev
->r_len
);
326 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
327 next
->r_off
- (prev
->r_off
+ prev
->r_len
));
329 if (off
+ len
== next
->r_off
+ next
->r_len
) {
330 /* exact overlap with end */
331 next
= zfs_range_proxify(tree
, next
);
335 if (off
+ len
< next
->r_off
+ next
->r_len
) {
336 /* new range ends in the middle of this block */
337 next
= zfs_range_split(tree
, next
, off
+ len
);
341 ASSERT3U(off
+ len
, >, next
->r_off
+ next
->r_len
);
342 next
= zfs_range_proxify(tree
, next
);
346 /* Add the remaining end range. */
347 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
348 (off
+ len
) - (prev
->r_off
+ prev
->r_len
));
352 * Check if a reader lock can be grabbed, or wait and recheck until available.
355 zfs_range_lock_reader(znode_t
*zp
, rl_t
*new)
357 avl_tree_t
*tree
= &zp
->z_range_avl
;
360 uint64_t off
= new->r_off
;
361 uint64_t len
= new->r_len
;
364 * Look for any writer locks in the range.
367 prev
= avl_find(tree
, new, &where
);
369 prev
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
372 * Check the previous range for a writer lock overlap.
374 if (prev
&& (off
< prev
->r_off
+ prev
->r_len
)) {
375 if ((prev
->r_type
== RL_WRITER
) || (prev
->r_write_wanted
)) {
376 if (!prev
->r_read_wanted
) {
377 cv_init(&prev
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
378 prev
->r_read_wanted
= B_TRUE
;
380 cv_wait(&prev
->r_rd_cv
, &zp
->z_range_lock
);
383 if (off
+ len
< prev
->r_off
+ prev
->r_len
)
388 * Search through the following ranges to see if there's
389 * write lock any overlap.
392 next
= AVL_NEXT(tree
, prev
);
394 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
395 for (; next
; next
= AVL_NEXT(tree
, next
)) {
396 if (off
+ len
<= next
->r_off
)
398 if ((next
->r_type
== RL_WRITER
) || (next
->r_write_wanted
)) {
399 if (!next
->r_read_wanted
) {
400 cv_init(&next
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
401 next
->r_read_wanted
= B_TRUE
;
403 cv_wait(&next
->r_rd_cv
, &zp
->z_range_lock
);
406 if (off
+ len
<= next
->r_off
+ next
->r_len
)
412 * Add the read lock, which may involve splitting existing
413 * locks and bumping ref counts (r_cnt).
415 zfs_range_add_reader(tree
, new, prev
, where
);
419 * Lock a range (offset, length) as either shared (RL_READER)
420 * or exclusive (RL_WRITER). Returns the range lock structure
421 * for later unlocking or reduce range (if entire file
422 * previously locked as RL_WRITER).
425 zfs_range_lock(znode_t
*zp
, uint64_t off
, uint64_t len
, rl_type_t type
)
429 ASSERT(type
== RL_READER
|| type
== RL_WRITER
|| type
== RL_APPEND
);
431 new = kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
435 new->r_cnt
= 1; /* assume it's going to be in the tree */
437 new->r_proxy
= B_FALSE
;
438 new->r_write_wanted
= B_FALSE
;
439 new->r_read_wanted
= B_FALSE
;
441 mutex_enter(&zp
->z_range_lock
);
442 if (type
== RL_READER
) {
444 * First check for the usual case of no locks
446 if (avl_numnodes(&zp
->z_range_avl
) == 0)
447 avl_add(&zp
->z_range_avl
, new);
449 zfs_range_lock_reader(zp
, new);
451 zfs_range_lock_writer(zp
, new); /* RL_WRITER or RL_APPEND */
452 mutex_exit(&zp
->z_range_lock
);
457 * Unlock a reader lock
460 zfs_range_unlock_reader(znode_t
*zp
, rl_t
*remove
)
462 avl_tree_t
*tree
= &zp
->z_range_avl
;
467 * The common case is when the remove entry is in the tree
468 * (cnt == 1) meaning there's been no other reader locks overlapping
469 * with this one. Otherwise the remove entry will have been
470 * removed from the tree and replaced by proxies (one or
471 * more ranges mapping to the entire range).
473 if (remove
->r_cnt
== 1) {
474 avl_remove(tree
, remove
);
475 if (remove
->r_write_wanted
) {
476 cv_broadcast(&remove
->r_wr_cv
);
477 cv_destroy(&remove
->r_wr_cv
);
479 if (remove
->r_read_wanted
) {
480 cv_broadcast(&remove
->r_rd_cv
);
481 cv_destroy(&remove
->r_rd_cv
);
484 ASSERT3U(remove
->r_cnt
, ==, 0);
485 ASSERT3U(remove
->r_write_wanted
, ==, 0);
486 ASSERT3U(remove
->r_read_wanted
, ==, 0);
488 * Find start proxy representing this reader lock,
489 * then decrement ref count on all proxies
490 * that make up this range, freeing them as needed.
492 rl
= avl_find(tree
, remove
, NULL
);
495 ASSERT(rl
->r_type
== RL_READER
);
496 for (len
= remove
->r_len
; len
!= 0; rl
= next
) {
499 next
= AVL_NEXT(tree
, rl
);
501 ASSERT(rl
->r_off
+ rl
->r_len
== next
->r_off
);
503 ASSERT(next
->r_type
== RL_READER
);
506 if (rl
->r_cnt
== 0) {
507 avl_remove(tree
, rl
);
508 if (rl
->r_write_wanted
) {
509 cv_broadcast(&rl
->r_wr_cv
);
510 cv_destroy(&rl
->r_wr_cv
);
512 if (rl
->r_read_wanted
) {
513 cv_broadcast(&rl
->r_rd_cv
);
514 cv_destroy(&rl
->r_rd_cv
);
516 kmem_free(rl
, sizeof (rl_t
));
520 kmem_free(remove
, sizeof (rl_t
));
524 * Unlock range and destroy range lock structure.
527 zfs_range_unlock(rl_t
*rl
)
529 znode_t
*zp
= rl
->r_zp
;
531 ASSERT(rl
->r_type
== RL_WRITER
|| rl
->r_type
== RL_READER
);
532 ASSERT(rl
->r_cnt
== 1 || rl
->r_cnt
== 0);
533 ASSERT(!rl
->r_proxy
);
535 mutex_enter(&zp
->z_range_lock
);
536 if (rl
->r_type
== RL_WRITER
) {
537 /* writer locks can't be shared or split */
538 avl_remove(&zp
->z_range_avl
, rl
);
539 mutex_exit(&zp
->z_range_lock
);
540 if (rl
->r_write_wanted
) {
541 cv_broadcast(&rl
->r_wr_cv
);
542 cv_destroy(&rl
->r_wr_cv
);
544 if (rl
->r_read_wanted
) {
545 cv_broadcast(&rl
->r_rd_cv
);
546 cv_destroy(&rl
->r_rd_cv
);
548 kmem_free(rl
, sizeof (rl_t
));
551 * lock may be shared, let zfs_range_unlock_reader()
552 * release the lock and free the rl_t
554 zfs_range_unlock_reader(zp
, rl
);
555 mutex_exit(&zp
->z_range_lock
);
560 * Reduce range locked as RL_WRITER from whole file to specified range.
561 * Asserts the whole file is exclusivly locked and so there's only one
565 zfs_range_reduce(rl_t
*rl
, uint64_t off
, uint64_t len
)
567 znode_t
*zp
= rl
->r_zp
;
569 /* Ensure there are no other locks */
570 ASSERT(avl_numnodes(&zp
->z_range_avl
) == 1);
571 ASSERT(rl
->r_off
== 0);
572 ASSERT(rl
->r_type
== RL_WRITER
);
573 ASSERT(!rl
->r_proxy
);
574 ASSERT3U(rl
->r_len
, ==, UINT64_MAX
);
575 ASSERT3U(rl
->r_cnt
, ==, 1);
577 mutex_enter(&zp
->z_range_lock
);
580 mutex_exit(&zp
->z_range_lock
);
581 if (rl
->r_write_wanted
)
582 cv_broadcast(&rl
->r_wr_cv
);
583 if (rl
->r_read_wanted
)
584 cv_broadcast(&rl
->r_rd_cv
);
588 * AVL comparison function used to order range locks
589 * Locks are ordered on the start offset of the range.
592 zfs_range_compare(const void *arg1
, const void *arg2
)
594 const rl_t
*rl1
= arg1
;
595 const rl_t
*rl2
= arg2
;
597 if (rl1
->r_off
> rl2
->r_off
)
599 if (rl1
->r_off
< rl2
->r_off
)