1 /* $NetBSD: vfs_lockf.c,v 1.71 2009/06/10 22:34:35 yamt Exp $ */
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.71 2009/06/10 22:34:35 yamt Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49 #include <sys/atomic.h>
50 #include <sys/kauth.h>
51 #include <sys/uidinfo.h>
54 * The lockf structure is a kernel structure which contains the information
55 * associated with a byte range lock. The lockf structures are linked into
56 * the vnode structure. Locks are sorted by the starting byte of the lock for
59 * lf_next is used for two purposes, depending on whether the lock is
60 * being held, or is in conflict with an existing lock. If this lock
61 * is held, it indicates the next lock on the same vnode.
62 * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block
63 * must be queued on the lf_blkhd TAILQ of lock->lf_next.
66 TAILQ_HEAD(locklist
, lockf
);
69 kcondvar_t lf_cv
; /* Signalling */
70 short lf_flags
; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
71 short lf_type
; /* Lock type: F_RDLCK, F_WRLCK */
72 off_t lf_start
; /* The byte # of the start of the lock */
73 off_t lf_end
; /* The byte # of the end of the lock (-1=EOF)*/
74 void *lf_id
; /* process or file description holding lock */
75 struct lockf
**lf_head
; /* Back pointer to the head of lockf list */
76 struct lockf
*lf_next
; /* Next lock on this vnode, or blocking lock */
77 struct locklist lf_blkhd
; /* List of requests blocked on this lock */
78 TAILQ_ENTRY(lockf
) lf_block
;/* A request waiting for a lock */
79 uid_t lf_uid
; /* User ID responsible */
82 /* Maximum length of sleep chains to traverse to try and detect deadlock. */
85 static pool_cache_t lockf_cache
;
86 static kmutex_t
*lockf_lock
;
87 static char lockstr
[] = "lockf";
90 * This variable controls the maximum number of processes that will
91 * be checked in doing deadlock detection.
93 int maxlockdepth
= MAXDEPTH
;
104 * Misc cleanups: "void *id" should be visible in the API as a
106 * (This requires rototilling all VFS's which support advisory locking).
110 * If there's a lot of lock contention on a single vnode, locking
111 * schemes which allow for more paralleism would be needed. Given how
112 * infrequently byte-range locks are actually used in typical BSD
113 * code, a more complex approach probably isn't worth it.
117 * We enforce a limit on locks by uid, so that a single user cannot
118 * run the kernel out of memory. For now, the limit is pretty coarse.
119 * There is no limit on root.
121 * Splitting a lock will always succeed, regardless of current allocations.
122 * If you're slightly above the limit, we still have to permit an allocation
123 * so that the unlock can succeed. If the unlocking causes too many splits,
124 * however, you're totally cutoff.
126 int maxlocksperuid
= 1024;
133 lf_print(const char *tag
, struct lockf
*lock
)
136 printf("%s: lock %p for ", tag
, lock
);
137 if (lock
->lf_flags
& F_POSIX
)
138 printf("proc %d", ((struct proc
*)lock
->lf_id
)->p_pid
);
140 printf("file %p", (struct file
*)lock
->lf_id
);
141 printf(" %s, start %qx, end %qx",
142 lock
->lf_type
== F_RDLCK
? "shared" :
143 lock
->lf_type
== F_WRLCK
? "exclusive" :
144 lock
->lf_type
== F_UNLCK
? "unlock" :
145 "unknown", lock
->lf_start
, lock
->lf_end
);
146 if (TAILQ_FIRST(&lock
->lf_blkhd
))
147 printf(" block %p\n", TAILQ_FIRST(&lock
->lf_blkhd
));
153 lf_printlist(const char *tag
, struct lockf
*lock
)
155 struct lockf
*lf
, *blk
;
157 printf("%s: Lock list:\n", tag
);
158 for (lf
= *lock
->lf_head
; lf
; lf
= lf
->lf_next
) {
159 printf("\tlock %p for ", lf
);
160 if (lf
->lf_flags
& F_POSIX
)
161 printf("proc %d", ((struct proc
*)lf
->lf_id
)->p_pid
);
163 printf("file %p", (struct file
*)lf
->lf_id
);
164 printf(", %s, start %qx, end %qx",
165 lf
->lf_type
== F_RDLCK
? "shared" :
166 lf
->lf_type
== F_WRLCK
? "exclusive" :
167 lf
->lf_type
== F_UNLCK
? "unlock" :
168 "unknown", lf
->lf_start
, lf
->lf_end
);
169 TAILQ_FOREACH(blk
, &lf
->lf_blkhd
, lf_block
) {
170 if (blk
->lf_flags
& F_POSIX
)
172 ((struct proc
*)blk
->lf_id
)->p_pid
);
174 printf("; file %p", (struct file
*)blk
->lf_id
);
175 printf(", %s, start %qx, end %qx",
176 blk
->lf_type
== F_RDLCK
? "shared" :
177 blk
->lf_type
== F_WRLCK
? "exclusive" :
178 blk
->lf_type
== F_UNLCK
? "unlock" :
179 "unknown", blk
->lf_start
, blk
->lf_end
);
180 if (TAILQ_FIRST(&blk
->lf_blkhd
))
181 panic("lf_printlist: bad list");
186 #endif /* LOCKF_DEBUG */
189 * 3 options for allowfail.
190 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
192 static struct lockf
*
193 lf_alloc(int allowfail
)
198 const uid_t uid
= kauth_cred_geteuid(kauth_cred_get());
201 lcnt
= atomic_inc_ulong_nv(&uip
->ui_lockcnt
);
202 if (uid
&& allowfail
&& lcnt
>
203 (allowfail
== 1 ? maxlocksperuid
: (maxlocksperuid
* 2))) {
204 atomic_dec_ulong(&uip
->ui_lockcnt
);
208 lock
= pool_cache_get(lockf_cache
, PR_WAITOK
);
214 lf_free(struct lockf
*lock
)
218 uip
= uid_find(lock
->lf_uid
);
219 atomic_dec_ulong(&uip
->ui_lockcnt
);
220 pool_cache_put(lockf_cache
, lock
);
224 lf_ctor(void *arg
, void *obj
, int flag
)
229 cv_init(&lock
->lf_cv
, lockstr
);
235 lf_dtor(void *arg
, void *obj
)
240 cv_destroy(&lock
->lf_cv
);
244 * Walk the list of locks for an inode to
245 * find an overlapping lock (if any).
247 * NOTE: this returns only the FIRST overlapping lock. There
248 * may be more than one.
251 lf_findoverlap(struct lockf
*lf
, struct lockf
*lock
, int type
,
252 struct lockf
***prev
, struct lockf
**overlap
)
261 lf_print("lf_findoverlap: looking for overlap in", lock
);
262 #endif /* LOCKF_DEBUG */
263 start
= lock
->lf_start
;
266 if (((type
== SELF
) && lf
->lf_id
!= lock
->lf_id
) ||
267 ((type
== OTHERS
) && lf
->lf_id
== lock
->lf_id
)) {
268 *prev
= &lf
->lf_next
;
269 *overlap
= lf
= lf
->lf_next
;
274 lf_print("\tchecking", lf
);
275 #endif /* LOCKF_DEBUG */
277 * OK, check for overlap
282 * 2) overlap contains lock
283 * 3) lock contains overlap
284 * 4) overlap starts before lock
285 * 5) overlap ends after lock
287 if ((lf
->lf_end
!= -1 && start
> lf
->lf_end
) ||
288 (end
!= -1 && lf
->lf_start
> end
)) {
292 printf("no overlap\n");
293 #endif /* LOCKF_DEBUG */
294 if ((type
& SELF
) && end
!= -1 && lf
->lf_start
> end
)
296 *prev
= &lf
->lf_next
;
297 *overlap
= lf
= lf
->lf_next
;
300 if ((lf
->lf_start
== start
) && (lf
->lf_end
== end
)) {
304 printf("overlap == lock\n");
305 #endif /* LOCKF_DEBUG */
308 if ((lf
->lf_start
<= start
) &&
310 ((lf
->lf_end
>= end
) || (lf
->lf_end
== -1))) {
314 printf("overlap contains lock\n");
315 #endif /* LOCKF_DEBUG */
318 if (start
<= lf
->lf_start
&&
320 (lf
->lf_end
!= -1 && end
>= lf
->lf_end
))) {
324 printf("lock contains overlap\n");
325 #endif /* LOCKF_DEBUG */
328 if ((lf
->lf_start
< start
) &&
329 ((lf
->lf_end
>= start
) || (lf
->lf_end
== -1))) {
333 printf("overlap starts before lock\n");
334 #endif /* LOCKF_DEBUG */
337 if ((lf
->lf_start
> start
) &&
339 ((lf
->lf_end
> end
) || (lf
->lf_end
== -1))) {
343 printf("overlap ends after lock\n");
344 #endif /* LOCKF_DEBUG */
347 panic("lf_findoverlap: default");
353 * Split a lock and a contained region into
354 * two or three locks as necessary.
357 lf_split(struct lockf
*lock1
, struct lockf
*lock2
, struct lockf
**sparelock
)
359 struct lockf
*splitlock
;
362 if (lockf_debug
& 2) {
363 lf_print("lf_split", lock1
);
364 lf_print("splitting from", lock2
);
366 #endif /* LOCKF_DEBUG */
368 * Check to see if spliting into only two pieces.
370 if (lock1
->lf_start
== lock2
->lf_start
) {
371 lock1
->lf_start
= lock2
->lf_end
+ 1;
372 lock2
->lf_next
= lock1
;
375 if (lock1
->lf_end
== lock2
->lf_end
) {
376 lock1
->lf_end
= lock2
->lf_start
- 1;
377 lock2
->lf_next
= lock1
->lf_next
;
378 lock1
->lf_next
= lock2
;
382 * Make a new lock consisting of the last part of
383 * the encompassing lock
385 splitlock
= *sparelock
;
387 cv_destroy(&splitlock
->lf_cv
);
388 memcpy(splitlock
, lock1
, sizeof(*splitlock
));
389 cv_init(&splitlock
->lf_cv
, lockstr
);
391 splitlock
->lf_start
= lock2
->lf_end
+ 1;
392 TAILQ_INIT(&splitlock
->lf_blkhd
);
393 lock1
->lf_end
= lock2
->lf_start
- 1;
397 splitlock
->lf_next
= lock1
->lf_next
;
398 lock2
->lf_next
= splitlock
;
399 lock1
->lf_next
= lock2
;
406 lf_wakelock(struct lockf
*listhead
)
408 struct lockf
*wakelock
;
410 while ((wakelock
= TAILQ_FIRST(&listhead
->lf_blkhd
))) {
411 KASSERT(wakelock
->lf_next
== listhead
);
412 TAILQ_REMOVE(&listhead
->lf_blkhd
, wakelock
, lf_block
);
413 wakelock
->lf_next
= NULL
;
416 lf_print("lf_wakelock: awakening", wakelock
);
418 cv_broadcast(&wakelock
->lf_cv
);
423 * Remove a byte-range lock on an inode.
425 * Generally, find the lock (or an overlap to that lock)
426 * and remove it (or shrink it), then wakeup anyone we can.
429 lf_clearlock(struct lockf
*unlock
, struct lockf
**sparelock
)
431 struct lockf
**head
= unlock
->lf_head
;
432 struct lockf
*lf
= *head
;
433 struct lockf
*overlap
, **prev
;
439 if (unlock
->lf_type
!= F_UNLCK
)
440 panic("lf_clearlock: bad type");
442 lf_print("lf_clearlock", unlock
);
443 #endif /* LOCKF_DEBUG */
445 while ((ovcase
= lf_findoverlap(lf
, unlock
, SELF
,
446 &prev
, &overlap
)) != 0) {
448 * Wakeup the list of locks to be retried.
450 lf_wakelock(overlap
);
454 case 1: /* overlap == lock */
455 *prev
= overlap
->lf_next
;
459 case 2: /* overlap contains lock: split it */
460 if (overlap
->lf_start
== unlock
->lf_start
) {
461 overlap
->lf_start
= unlock
->lf_end
+ 1;
464 lf_split(overlap
, unlock
, sparelock
);
465 overlap
->lf_next
= unlock
->lf_next
;
468 case 3: /* lock contains overlap */
469 *prev
= overlap
->lf_next
;
470 lf
= overlap
->lf_next
;
474 case 4: /* overlap starts before lock */
475 overlap
->lf_end
= unlock
->lf_start
- 1;
476 prev
= &overlap
->lf_next
;
477 lf
= overlap
->lf_next
;
480 case 5: /* overlap ends after lock */
481 overlap
->lf_start
= unlock
->lf_end
+ 1;
488 lf_printlist("lf_clearlock", unlock
);
489 #endif /* LOCKF_DEBUG */
494 * Walk the list of locks for an inode and
495 * return the first blocking lock.
497 static struct lockf
*
498 lf_getblock(struct lockf
*lock
)
500 struct lockf
**prev
, *overlap
, *lf
= *(lock
->lf_head
);
502 prev
= lock
->lf_head
;
503 while (lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
) != 0) {
505 * We've found an overlap, see if it blocks us
507 if ((lock
->lf_type
== F_WRLCK
|| overlap
->lf_type
== F_WRLCK
))
510 * Nope, point to the next one on the list and
511 * see if it blocks us
513 lf
= overlap
->lf_next
;
519 * Set a byte-range lock.
522 lf_setlock(struct lockf
*lock
, struct lockf
**sparelock
,
526 struct lockf
**head
= lock
->lf_head
;
527 struct lockf
**prev
, *overlap
, *ltmp
;
528 int ovcase
, needtolink
, error
;
532 lf_print("lf_setlock", lock
);
533 #endif /* LOCKF_DEBUG */
536 * Scan lock list for this file looking for locks that would block us.
538 while ((block
= lf_getblock(lock
)) != NULL
) {
540 * Free the structure and return if nonblocking.
542 if ((lock
->lf_flags
& F_WAIT
) == 0) {
547 * We are blocked. Since flock style locks cover
548 * the whole file, there is no chance for deadlock.
549 * For byte-range locks we must check for deadlock.
551 * Deadlock detection is done by looking through the
552 * wait channels to see if there are any cycles that
553 * involve us. MAXDEPTH is set just to make sure we
554 * do not go off into neverneverland.
556 if ((lock
->lf_flags
& F_POSIX
) &&
557 (block
->lf_flags
& F_POSIX
)) {
559 volatile const struct lockf
*waitblock
;
563 p
= (struct proc
*)block
->lf_id
;
565 while (i
++ < maxlockdepth
) {
566 mutex_enter(p
->p_lock
);
567 if (p
->p_nlwps
> 1) {
568 mutex_exit(p
->p_lock
);
571 wlwp
= LIST_FIRST(&p
->p_lwps
);
573 if (wlwp
->l_wchan
== NULL
||
574 wlwp
->l_wmesg
!= lockstr
) {
576 mutex_exit(p
->p_lock
);
579 waitblock
= wlwp
->l_wchan
;
581 mutex_exit(p
->p_lock
);
582 /* Get the owner of the blocking lock */
583 waitblock
= waitblock
->lf_next
;
584 if ((waitblock
->lf_flags
& F_POSIX
) == 0)
586 p
= (struct proc
*)waitblock
->lf_id
;
593 * If we're still following a dependency chain
594 * after maxlockdepth iterations, assume we're in
595 * a cycle to be safe.
597 if (i
>= maxlockdepth
) {
603 * For flock type locks, we must first remove
604 * any shared locks that we hold before we sleep
605 * waiting for an exclusive lock.
607 if ((lock
->lf_flags
& F_FLOCK
) &&
608 lock
->lf_type
== F_WRLCK
) {
609 lock
->lf_type
= F_UNLCK
;
610 (void) lf_clearlock(lock
, NULL
);
611 lock
->lf_type
= F_WRLCK
;
614 * Add our lock to the blocked list and sleep until we're free.
615 * Remember who blocked us (for deadlock detection).
617 lock
->lf_next
= block
;
618 TAILQ_INSERT_TAIL(&block
->lf_blkhd
, lock
, lf_block
);
620 if (lockf_debug
& 1) {
621 lf_print("lf_setlock: blocking on", block
);
622 lf_printlist("lf_setlock", block
);
624 #endif /* LOCKF_DEBUG */
625 error
= cv_wait_sig(&lock
->lf_cv
, interlock
);
628 * We may have been awoken by a signal (in
629 * which case we must remove ourselves from the
630 * blocked list) and/or by another process
631 * releasing a lock (in which case we have already
632 * been removed from the blocked list and our
633 * lf_next field set to NULL).
635 if (lock
->lf_next
!= NULL
) {
636 TAILQ_REMOVE(&lock
->lf_next
->lf_blkhd
, lock
, lf_block
);
637 lock
->lf_next
= NULL
;
645 * No blocks!! Add the lock. Note that we will
646 * downgrade or upgrade any overlapping locks this
647 * process already owns.
649 * Skip over locks owned by other processes.
650 * Handle any locks that overlap and are owned by ourselves.
656 ovcase
= lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
);
658 block
= overlap
->lf_next
;
663 * 2) overlap contains lock
664 * 3) lock contains overlap
665 * 4) overlap starts before lock
666 * 5) overlap ends after lock
669 case 0: /* no overlap */
672 lock
->lf_next
= overlap
;
676 case 1: /* overlap == lock */
678 * If downgrading lock, others may be
679 * able to acquire it.
681 if (lock
->lf_type
== F_RDLCK
&&
682 overlap
->lf_type
== F_WRLCK
)
683 lf_wakelock(overlap
);
684 overlap
->lf_type
= lock
->lf_type
;
686 lock
= overlap
; /* for debug output below */
689 case 2: /* overlap contains lock */
691 * Check for common starting point and different types.
693 if (overlap
->lf_type
== lock
->lf_type
) {
695 lock
= overlap
; /* for debug output below */
698 if (overlap
->lf_start
== lock
->lf_start
) {
700 lock
->lf_next
= overlap
;
701 overlap
->lf_start
= lock
->lf_end
+ 1;
703 lf_split(overlap
, lock
, sparelock
);
704 lf_wakelock(overlap
);
707 case 3: /* lock contains overlap */
709 * If downgrading lock, others may be able to
710 * acquire it, otherwise take the list.
712 if (lock
->lf_type
== F_RDLCK
&&
713 overlap
->lf_type
== F_WRLCK
) {
714 lf_wakelock(overlap
);
716 while ((ltmp
= TAILQ_FIRST(&overlap
->lf_blkhd
))) {
717 KASSERT(ltmp
->lf_next
== overlap
);
718 TAILQ_REMOVE(&overlap
->lf_blkhd
, ltmp
,
720 ltmp
->lf_next
= lock
;
721 TAILQ_INSERT_TAIL(&lock
->lf_blkhd
,
726 * Add the new lock if necessary and delete the overlap.
730 lock
->lf_next
= overlap
->lf_next
;
731 prev
= &lock
->lf_next
;
734 *prev
= overlap
->lf_next
;
738 case 4: /* overlap starts before lock */
740 * Add lock after overlap on the list.
742 lock
->lf_next
= overlap
->lf_next
;
743 overlap
->lf_next
= lock
;
744 overlap
->lf_end
= lock
->lf_start
- 1;
745 prev
= &lock
->lf_next
;
746 lf_wakelock(overlap
);
750 case 5: /* overlap ends after lock */
752 * Add the new lock before overlap.
756 lock
->lf_next
= overlap
;
758 overlap
->lf_start
= lock
->lf_end
+ 1;
759 lf_wakelock(overlap
);
765 if (lockf_debug
& 1) {
766 lf_print("lf_setlock: got the lock", lock
);
767 lf_printlist("lf_setlock", lock
);
769 #endif /* LOCKF_DEBUG */
774 * Check whether there is a blocking lock,
775 * and if so return its process identifier.
778 lf_getlock(struct lockf
*lock
, struct flock
*fl
)
784 lf_print("lf_getlock", lock
);
785 #endif /* LOCKF_DEBUG */
787 if ((block
= lf_getblock(lock
)) != NULL
) {
788 fl
->l_type
= block
->lf_type
;
789 fl
->l_whence
= SEEK_SET
;
790 fl
->l_start
= block
->lf_start
;
791 if (block
->lf_end
== -1)
794 fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
795 if (block
->lf_flags
& F_POSIX
)
796 fl
->l_pid
= ((struct proc
*)block
->lf_id
)->p_pid
;
800 fl
->l_type
= F_UNLCK
;
806 * Do an advisory lock operation.
809 lf_advlock(struct vop_advlock_args
*ap
, struct lockf
**head
, off_t size
)
811 struct flock
*fl
= ap
->a_fl
;
812 struct lockf
*lock
= NULL
;
813 struct lockf
*sparelock
;
814 kmutex_t
*interlock
= lockf_lock
;
819 * Convert the flock structure into a start and end.
821 switch (fl
->l_whence
) {
825 * Caller is responsible for adding any necessary offset
826 * when SEEK_CUR is used.
832 start
= size
+ fl
->l_start
;
843 end
= start
+ fl
->l_len
- 1;
845 /* lockf() allows -ve lengths */
854 * Allocate locks before acquiring the interlock. We need two
855 * locks in the worst case.
861 * XXX For F_UNLCK case, we can re-use the lock.
863 if ((ap
->a_flags
& F_FLOCK
) == 0) {
865 * Byte-range lock might need one more lock.
867 sparelock
= lf_alloc(0);
868 if (sparelock
== NULL
) {
889 if (start
== 0 || end
== -1) {
906 mutex_enter(interlock
);
909 * Avoid the common case of unlocking when inode has no locks.
911 if (*head
== (struct lockf
*)0) {
912 if (ap
->a_op
!= F_SETLK
) {
913 fl
->l_type
= F_UNLCK
;
920 * Create the lockf structure.
922 lock
->lf_start
= start
;
924 lock
->lf_head
= head
;
925 lock
->lf_type
= fl
->l_type
;
926 lock
->lf_next
= (struct lockf
*)0;
927 TAILQ_INIT(&lock
->lf_blkhd
);
928 lock
->lf_flags
= ap
->a_flags
;
929 if (lock
->lf_flags
& F_POSIX
) {
930 KASSERT(curproc
== (struct proc
*)ap
->a_id
);
932 lock
->lf_id
= ap
->a_id
;
935 * Do the requested operation.
940 error
= lf_setlock(lock
, &sparelock
, interlock
);
941 lock
= NULL
; /* lf_setlock freed it */
945 error
= lf_clearlock(lock
, &sparelock
);
949 error
= lf_getlock(lock
, fl
);
958 mutex_exit(interlock
);
969 * Initialize subsystem. XXX We use a global lock. This could be the
970 * vnode interlock, but the deadlock detection code may need to inspect
971 * locks belonging to other files.
977 lockf_cache
= pool_cache_init(sizeof(struct lockf
), 0, 0, 0, "lockf",
978 NULL
, IPL_NONE
, lf_ctor
, lf_dtor
, NULL
);
979 lockf_lock
= mutex_obj_alloc(MUTEX_DEFAULT
, IPL_NONE
);