2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include <afsconfig.h>
16 #include "afs/param.h"
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs/afs_cbqueue.h"
23 #include "afs/nfsclient.h"
24 #include "afs/afs_osidnlc.h"
25 #include "afs/unified_afs.h"
31 /* Static prototypes */
32 static int HandleGetLock(struct vcache
*avc
,
34 struct vrequest
*areq
, int clid
);
35 static int GetFlockCount(struct vcache
*avc
, struct vrequest
*areq
);
36 static int lockIdcmp2(struct AFS_FLOCK
*flock1
, struct vcache
*vp
,
37 struct SimpleLocks
*alp
, int onlymine
,
40 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
42 #if defined(AFS_SUN5_ENV)
44 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
46 proc_t
*procp
= ttoproc(curthread
);
50 slp
->pid
= procp
->p_pid
;
53 flock
->l_pid
= procp
->p_pid
;
56 #elif defined(AFS_SGI_ENV)
58 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
60 # if defined(AFS_SGI65_ENV)
62 get_current_flid(&flid
);
64 afs_proc_t
*procp
= OSI_GET_CURRENT_PROCP();
69 slp
->sysid
= flid
.fl_sysid
;
71 slp
->sysid
= OSI_GET_CURRENT_SYSID();
76 flock
->l_sysid
= flid
.fl_sysid
;
78 flock
->l_sysid
= OSI_GET_CURRENT_SYSID();
83 #elif defined(AFS_AIX_ENV)
85 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
87 # if !defined(AFS_AIX32_ENV)
88 afs_proc_t
*procp
= u
.u_procp
;
92 # if defined(AFS_AIX41_ENV)
95 # elif defined(AFS_AIX32_ENV)
96 slp
->sysid
= u
.u_sysid
;
99 slp
->sysid
= procp
->p_sysid
;
100 slp
->pid
= prcop
->p_epid
;
103 # if defined(AFS_AIX41_ENV)
105 flock
->l_pid
= getpid();
106 # elif defined(AFS_AIX32_ENV)
107 flock
->l_sysid
= u
.u_sysid
;
108 flock
->l_pid
= u
.u_epid
;
110 flock
->l_sysid
= procp
->p_sysid
;
111 flock
->l_pid
= procp
->p_epid
;
115 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
117 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
125 #elif defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
127 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
132 flock
->l_pid
= getpid();
135 #elif defined(UKERNEL)
137 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
140 slp
->pid
= get_user_struct()->u_procp
->p_pid
;
142 flock
->l_pid
= get_user_struct()->u_procp
->p_pid
;
147 lockIdSet(struct AFS_FLOCK
*flock
, struct SimpleLocks
*slp
, int clid
)
150 slp
->pid
= u
.u_procp
->p_pid
;
152 flock
->l_pid
= u
.u_procp
->p_pid
;
157 /* return 1 (true) if specified flock does not match alp (if
158 * specified), or any of the slp structs (if alp == 0)
160 /* I'm not sure that the comparsion of flock->pid to p_ppid
161 * is correct. Should that be a comparision of alp (or slp) ->pid
162 * to p_ppid? Especially in the context of the lower loop, where
163 * the repeated comparison doesn't make much sense...
165 /* onlymine - don't match any locks which are held by my parent */
166 /* clid - only irix 6.5 */
169 lockIdcmp2(struct AFS_FLOCK
*flock1
, struct vcache
*vp
,
170 struct SimpleLocks
*alp
, int onlymine
, int clid
)
172 struct SimpleLocks
*slp
;
173 #if defined(AFS_SUN5_ENV)
174 proc_t
*procp
= ttoproc(curthread
);
176 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
178 afs_proc_t
*procp
= curprocp
;
179 #elif defined(UKERNEL)
180 afs_proc_t
*procp
= get_user_struct()->u_procp
;
182 afs_proc_t
*procp
= u
.u_procp
;
183 #endif /* AFS_SGI64_ENV */
188 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
189 if (flock1
->l_sysid
!= alp
->sysid
) {
193 if ((flock1
->l_pid
== alp
->pid
) ||
194 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
195 (!onlymine
&& (flock1
->l_pid
== getppid()))
197 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
198 /* XXX check this. used to be *only* irix for some reason. */
199 (!onlymine
&& (flock1
->l_pid
== clid
))
201 (!onlymine
&& (flock1
->l_pid
== procp
->p_ppid
))
210 for (slp
= vp
->slocks
; slp
; slp
= slp
->next
) {
211 #if defined(AFS_HAVE_FLOCK_SYSID)
212 if (flock1
->l_sysid
!= slp
->sysid
) {
216 if (flock1
->l_pid
== slp
->pid
) {
220 return (1); /* failure */
224 /* we don't send multiple read flocks to the server, but rather just count
225 them up ourselves. Of course, multiple write locks are incompatible.
227 Note that we should always try to release a lock, even if we have
228 a network problem sending the release command through, since often
229 a lock is released on a close call, when the user can't retry anyway.
231 After we remove it from our structure, the lock will no longer be
232 kept alive, and the server should time it out within a few minutes.
234 94.04.13 add "force" parameter. If a child explicitly unlocks a
235 file, I guess we'll permit it. however, we don't want simple,
236 innocent closes by children to unlock files in the parent process.
238 If called when disconnected support is unabled, the discon_lock must
241 /* clid - nonzero on sgi sunos osf1 only */
243 HandleFlock(struct vcache
*avc
, int acom
, struct vrequest
*areq
,
244 pid_t clid
, int onlymine
)
247 struct SimpleLocks
*slp
, *tlp
, **slpp
;
249 struct AFSVolSync tsync
;
251 struct AFS_FLOCK flock
;
253 AFS_STATCNT(HandleFlock
);
254 code
= 0; /* default when we don't make any network calls */
255 lockIdSet(&flock
, NULL
, clid
);
257 #if defined(AFS_SGI_ENV)
258 osi_Assert(valusema(&avc
->vc_rwlock
) <= 0);
259 osi_Assert(OSI_GET_LOCKID() == avc
->vc_rwlockid
);
261 ObtainWriteLock(&avc
->lock
, 118);
262 if (acom
& LOCK_UN
) {
263 int stored_segments
= 0;
269 /* If the lock is held exclusive, then only the owning process
270 * or a child can unlock it. Use pid and ppid because they are
271 * unique identifiers.
273 if ((avc
->flockCount
< 0) && (getpid() != avc
->ownslock
)) {
275 if (onlymine
|| (getppid() != avc
->ownslock
)) {
277 if (onlymine
|| (u
.u_procp
->p_ppid
!= avc
->ownslock
)) {
279 ReleaseWriteLock(&avc
->lock
);
284 if (lockIdcmp2(&flock
, avc
, NULL
, onlymine
, clid
)) {
285 ReleaseWriteLock(&avc
->lock
);
291 if (avc
->flockCount
== 0) {
292 ReleaseWriteLock(&avc
->lock
);
296 /* unlock the lock */
297 if (avc
->flockCount
> 0) {
299 for (slp
= *slpp
; slp
;) {
300 if (!lockIdcmp2(&flock
, avc
, slp
, onlymine
, clid
)) {
302 tlp
= *slpp
= slp
->next
;
303 osi_FreeSmallSpace(slp
);
310 } else if (avc
->flockCount
== -1) {
311 if (!stored_segments
) {
312 afs_StoreAllSegments(avc
, areq
, AFS_SYNC
| AFS_VMSYNC
); /* fsync file early */
313 /* afs_StoreAllSegments can drop and reacquire the write lock
314 * on avc and GLOCK, so the flocks may be completely different
315 * now. Go back and perform all checks again. */
320 /* And remove the (only) exclusive lock entry from the list... */
321 osi_FreeSmallSpace(avc
->slocks
);
324 if (avc
->flockCount
== 0) {
325 if (!AFS_IS_DISCONNECTED
) {
326 struct rx_connection
*rxconn
;
328 tc
= afs_Conn(&avc
->f
.fid
, areq
, SHARED_LOCK
, &rxconn
);
330 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK
);
332 code
= RXAFS_ReleaseLock(rxconn
, (struct AFSFid
*)
333 &avc
->f
.fid
.Fid
, &tsync
);
339 (tc
, rxconn
, code
, &avc
->f
.fid
, areq
,
340 AFS_STATS_FS_RPCIDX_RELEASELOCK
, SHARED_LOCK
, NULL
));
342 /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
347 while (1) { /* set a new lock */
349 * Upgrading from shared locks to Exclusive and vice versa
350 * is a bit tricky and we don't really support it yet. But
351 * we try to support the common used one which is upgrade
352 * a shared lock to an exclusive for the same process...
354 if ((avc
->flockCount
> 0 && (acom
& LOCK_EX
))
355 || (avc
->flockCount
== -1 && (acom
& LOCK_SH
))) {
357 * Upgrading from shared locks to an exclusive one:
358 * For now if all the shared locks belong to the
359 * same process then we unlock them on the server
360 * and proceed with the upgrade. Unless we change the
361 * server's locking interface impl we prohibit from
362 * unlocking other processes's shared locks...
363 * Upgrading from an exclusive lock to a shared one:
364 * Again only allowed to be done by the same process.
367 for (slp
= *slpp
; slp
;) {
369 (&flock
, avc
, slp
, 1 /*!onlymine */ , clid
)) {
374 tlp
= *slpp
= slp
->next
;
375 osi_FreeSmallSpace(slp
);
383 if (!code
&& avc
->flockCount
== 0) {
384 if (!AFS_IS_DISCONNECTED
) {
385 struct rx_connection
*rxconn
;
387 tc
= afs_Conn(&avc
->f
.fid
, areq
, SHARED_LOCK
, &rxconn
);
390 (AFS_STATS_FS_RPCIDX_RELEASELOCK
);
393 RXAFS_ReleaseLock(rxconn
,
394 (struct AFSFid
*)&avc
->
401 (tc
, rxconn
, code
, &avc
->f
.fid
, areq
,
402 AFS_STATS_FS_RPCIDX_RELEASELOCK
, SHARED_LOCK
,
406 } else if (avc
->flockCount
== -1 && (acom
& LOCK_EX
)) {
407 if (lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
411 /* We've just re-grabbed an exclusive lock, so we don't
412 * need to contact the fileserver, and we don't need to
413 * add the lock to avc->slocks (since we already have a
414 * lock there). So, we are done. */
419 /* compatible here, decide if needs to go to file server. If
420 * we've already got the file locked (and thus read-locked, since
421 * we've already checked for compatibility), we shouldn't send
422 * the call through to the server again */
423 if (avc
->flockCount
== 0) {
424 struct rx_connection
*rxconn
;
425 /* we're the first on our block, send the call through */
426 lockType
= ((acom
& LOCK_EX
) ? LockWrite
: LockRead
);
427 if (!AFS_IS_DISCONNECTED
) {
429 tc
= afs_Conn(&avc
->f
.fid
, areq
, SHARED_LOCK
, &rxconn
);
431 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK
);
433 code
= RXAFS_SetLock(rxconn
, (struct AFSFid
*)
434 &avc
->f
.fid
.Fid
, lockType
,
441 (tc
, rxconn
, code
, &avc
->f
.fid
, areq
,
442 AFS_STATS_FS_RPCIDX_SETLOCK
, SHARED_LOCK
,
444 if ((lockType
== LockWrite
) && (code
== VREADONLY
))
445 code
= EBADF
; /* per POSIX; VREADONLY == EROFS */
447 /* XXX - Should probably try and log this when we're
448 * XXX - running with logging enabled. But it's horrid
450 code
= 0; /* pretend we worked - ick!!! */
452 code
= 0; /* otherwise, pretend things worked */
455 slp
= (struct SimpleLocks
*)
456 osi_AllocSmallSpace(sizeof(struct SimpleLocks
));
457 if (acom
& LOCK_EX
) {
462 /* Record unique id of process owning exclusive lock. */
463 avc
->ownslock
= getpid();
466 slp
->type
= LockWrite
;
469 avc
->flockCount
= -1;
471 slp
->type
= LockRead
;
472 slp
->next
= avc
->slocks
;
477 lockIdSet(&flock
, slp
, clid
);
480 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
481 if (((code
== EWOULDBLOCK
) || (code
== EAGAIN
) ||
482 (code
== UAEWOULDBLOCK
) || (code
== UAEAGAIN
))
483 && !(acom
& LOCK_NB
)) {
484 /* sleep for a second, allowing interrupts */
485 ReleaseWriteLock(&avc
->lock
);
486 #if defined(AFS_SGI_ENV)
487 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
489 code
= afs_osi_Wait(1000, NULL
, 1);
490 #if defined(AFS_SGI_ENV)
491 AFS_RWLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
493 ObtainWriteLock(&avc
->lock
, 120);
495 code
= EINTR
; /* return this if ^C typed */
502 ReleaseWriteLock(&avc
->lock
);
503 code
= afs_CheckCode(code
, areq
, 1); /* defeat a buggy AIX optimization */
508 /* warn a user that a lock has been ignored */
510 DoLockWarning(struct vcache
*avc
, afs_ucred_t
* acred
)
512 static afs_uint32 lastWarnTime
;
513 static pid_t lastWarnPid
;
516 pid_t pid
= MyPidxx2Pid(MyPidxx
);
522 AFS_STATCNT(DoLockWarning
);
524 /* check if we've already warned this user recently */
525 if ((now
< lastWarnTime
+ 120) && (lastWarnPid
== pid
)) {
528 if (now
< avc
->lastBRLWarnTime
+ 120) {
532 procname
= afs_osi_Alloc(256);
537 /* Copies process name to allocated procname, see osi_machdeps for details of macro */
538 osi_procname(procname
, 256);
539 procname
[255] = '\0';
541 lastWarnTime
= avc
->lastBRLWarnTime
= now
;
544 #ifdef AFS_LINUX26_ENV
545 message
= "byte-range locks only enforced for processes on this machine";
547 message
= "byte-range lock/unlock ignored; make sure no one else is running this program";
550 afs_warnuser("afs: %s (pid %d (%s), user %ld, fid %lu.%lu.%lu).\n",
551 message
, pid
, procname
, (long)afs_cr_uid(acred
),
552 (unsigned long)avc
->f
.fid
.Fid
.Volume
,
553 (unsigned long)avc
->f
.fid
.Fid
.Vnode
,
554 (unsigned long)avc
->f
.fid
.Fid
.Unique
);
556 afs_osi_Free(procname
, 256);
560 #if defined(AFS_SGI_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
561 int afs_lockctl(struct vcache
* avc
, struct AFS_FLOCK
* af
, int acmd
,
562 afs_ucred_t
* acred
, pid_t clid
)
565 int afs_lockctl(struct vcache
* avc
, struct AFS_FLOCK
* af
, int acmd
,
569 struct vrequest
*treq
= NULL
;
571 struct afs_fakestat_state fakestate
;
573 AFS_STATCNT(afs_lockctl
);
574 if ((code
= afs_CreateReq(&treq
, acred
)))
576 afs_InitFakeStat(&fakestate
);
580 code
= afs_EvalFakeStat(&avc
, &fakestate
, treq
);
584 #if defined(AFS_SGI_ENV)
585 if ((acmd
== F_GETLK
) || (acmd
== F_RGETLK
)) {
587 if (acmd
== F_GETLK
) {
589 if (af
->l_type
== F_UNLCK
) {
593 code
= HandleGetLock(avc
, af
, treq
, clid
);
594 code
= afs_CheckCode(code
, treq
, 2); /* defeat buggy AIX optimz */
596 } else if ((acmd
== F_SETLK
) || (acmd
== F_SETLKW
)
597 #if defined(AFS_SGI_ENV)
598 || (acmd
== F_RSETLK
) || (acmd
== F_RSETLKW
)) {
603 if ((avc
->f
.states
& CRO
)) {
604 /* for RO volumes, don't do anything for locks; the fileserver doesn't
605 * even track them. A write lock should not be possible, though. */
606 if (af
->l_type
== F_WRLCK
) {
614 /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */
615 if ((sizeof(af
->l_len
) == 8) && (af
->l_len
== 0x7fffffffffffffffLL
))
617 /* next line makes byte range locks always succeed,
618 * even when they should block */
619 if (af
->l_whence
!= 0 || af
->l_start
!= 0 || af
->l_len
!= 0) {
620 DoLockWarning(avc
, acred
);
624 /* otherwise we can turn this into a whole-file flock */
625 if (af
->l_type
== F_RDLCK
)
627 else if (af
->l_type
== F_WRLCK
)
629 else if (af
->l_type
== F_UNLCK
)
632 code
= EINVAL
; /* unknown lock type */
635 if (((acmd
== F_SETLK
)
636 #if defined(AFS_SGI_ENV)
637 || (acmd
== F_RSETLK
)
639 ) && code
!= LOCK_UN
)
640 code
|= LOCK_NB
; /* non-blocking, s.v.p. */
641 #if defined(AFS_DARWIN_ENV)
642 code
= HandleFlock(avc
, code
, treq
, clid
, 0 /*!onlymine */ );
643 #elif defined(AFS_SGI_ENV)
644 AFS_RWLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
645 code
= HandleFlock(avc
, code
, treq
, clid
, 0 /*!onlymine */ );
646 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
648 code
= HandleFlock(avc
, code
, treq
, 0, 0 /*!onlymine */ );
650 code
= afs_CheckCode(code
, treq
, 3); /* defeat AIX -O bug */
655 afs_PutFakeStat(&fakestate
);
657 afs_DestroyReq(treq
);
663 * Get a description of the first lock which would
664 * block the lock specified. If the specified lock
665 * would succeed, fill in the lock structure with 'F_UNLCK'.
667 * To do that, we have to ask the server for the lock
669 * 1. The file is not locked by this machine.
670 * 2. Asking for write lock, and only the current
671 * PID has the file read locked.
674 HandleGetLock(struct vcache
*avc
, struct AFS_FLOCK
*af
,
675 struct vrequest
*areq
, int clid
)
678 struct AFS_FLOCK flock
;
680 lockIdSet(&flock
, NULL
, clid
);
682 ObtainWriteLock(&avc
->lock
, 122);
683 if (avc
->flockCount
== 0) {
685 * We don't know ourselves, so ask the server. Unfortunately, we
686 * don't know the pid. Not even the server knows the pid. Besides,
687 * the process with the lock is on another machine
689 code
= GetFlockCount(avc
, areq
);
690 if (code
== 0 || (af
->l_type
== F_RDLCK
&& code
> 0)) {
691 af
->l_type
= F_UNLCK
;
695 af
->l_type
= F_RDLCK
;
697 af
->l_type
= F_WRLCK
;
700 #if defined(AFS_HAVE_FLOCK_SYSID)
706 if (af
->l_type
== F_RDLCK
) {
708 * We want a read lock. If there are only
709 * read locks, or we are the one with the
710 * write lock, say it is unlocked.
712 if (avc
->flockCount
> 0 || /* only read locks */
713 !lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
714 af
->l_type
= F_UNLCK
;
718 /* one write lock, but who? */
719 af
->l_type
= F_WRLCK
; /* not us, so lock would block */
720 if (avc
->slocks
) { /* we know who, so tell */
721 af
->l_pid
= avc
->slocks
->pid
;
722 #if defined(AFS_HAVE_FLOCK_SYSID)
723 af
->l_sysid
= avc
->slocks
->sysid
;
726 af
->l_pid
= 0; /* XXX can't happen?? */
727 #if defined(AFS_HAVE_FLOCK_SYSID)
735 * Ok, we want a write lock. If there is a write lock
736 * already, and it is not this process, we fail.
738 if (avc
->flockCount
< 0) {
739 if (lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
740 af
->l_type
= F_WRLCK
;
742 af
->l_pid
= avc
->slocks
->pid
;
743 #if defined(AFS_HAVE_FLOCK_SYSID)
744 af
->l_sysid
= avc
->slocks
->sysid
;
747 af
->l_pid
= 0; /* XXX can't happen?? */
748 #if defined(AFS_HAVE_FLOCK_SYSID)
754 /* we are the one with the write lock */
755 af
->l_type
= F_UNLCK
;
760 * Want a write lock, and we know there are read locks.
761 * If there is more than one, or it isn't us, we cannot lock.
763 if ((avc
->flockCount
> 1)
764 || lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
765 struct SimpleLocks
*slp
;
767 af
->l_type
= F_RDLCK
;
769 #if defined(AFS_HAVE_FLOCK_SYSID)
772 /* find a pid that isn't our own */
773 for (slp
= avc
->slocks
; slp
; slp
= slp
->next
) {
774 if (lockIdcmp2(&flock
, NULL
, slp
, 1, clid
)) {
775 af
->l_pid
= slp
->pid
;
776 #if defined(AFS_HAVE_FLOCK_SYSID)
777 af
->l_sysid
= avc
->slocks
->sysid
;
786 * Ok, we want a write lock. If there is a write lock
787 * already, and it is not this process, we fail.
789 if (avc
->flockCount
< 0) {
790 if (lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
791 af
->l_type
= F_WRLCK
;
793 af
->l_pid
= avc
->slocks
->pid
;
794 #if defined(AFS_HAVE_FLOCK_SYSID)
795 af
->l_sysid
= avc
->slocks
->sysid
;
798 af
->l_pid
= 0; /* XXX can't happen?? */
799 #if defined(AFS_HAVE_FLOCK_SYSID)
805 /* we are the one with the write lock */
806 af
->l_type
= F_UNLCK
;
811 * Want a write lock, and we know there are read locks.
812 * If there is more than one, or it isn't us, we cannot lock.
814 if ((avc
->flockCount
> 1)
815 || lockIdcmp2(&flock
, avc
, NULL
, 1, clid
)) {
816 struct SimpleLocks
*slp
;
817 af
->l_type
= F_RDLCK
;
819 #if defined(AFS_HAVE_FLOCK_SYSID)
822 /* find a pid that isn't our own */
823 for (slp
= avc
->slocks
; slp
; slp
= slp
->next
) {
824 if (lockIdcmp2(&flock
, NULL
, slp
, 1, clid
)) {
825 af
->l_pid
= slp
->pid
;
826 #if defined(AFS_HAVE_FLOCK_SYSID)
827 af
->l_sysid
= avc
->slocks
->sysid
;
836 * Want a write lock, and there is just one read lock, and it
837 * is this process with a read lock. Ask the server if there
838 * are any more processes with the file locked.
840 code
= GetFlockCount(avc
, areq
);
841 if (code
== 0 || code
== 1) {
842 af
->l_type
= F_UNLCK
;
846 af
->l_type
= F_RDLCK
;
848 af
->l_type
= F_WRLCK
;
850 #if defined(AFS_HAVE_FLOCK_SYSID)
857 af
->l_len
= 0; /* to end of file */
860 ReleaseWriteLock(&avc
->lock
);
864 /* Get the 'flock' count from the server. This comes back in a 'spare'
865 * field from a GetStatus RPC. If we have any problems with the RPC,
866 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
867 * the spare field will be a zero, saying the file is unlocked. This is
868 * OK, as a further 'lock' request will do the right thing.
871 GetFlockCount(struct vcache
*avc
, struct vrequest
*areq
)
875 struct AFSFetchStatus OutStatus
;
876 struct AFSCallBack CallBack
;
877 struct AFSVolSync tsync
;
878 struct rx_connection
*rxconn
;
881 temp
= areq
->flags
& O_NONBLOCK
;
882 areq
->flags
|= O_NONBLOCK
;
884 /* If we're disconnected, lie and say that we've got no locks. Ick */
885 if (AFS_IS_DISCONNECTED
)
889 tc
= afs_Conn(&avc
->f
.fid
, areq
, SHARED_LOCK
, &rxconn
);
891 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS
);
894 RXAFS_FetchStatus(rxconn
, (struct AFSFid
*)&avc
->f
.fid
.Fid
,
895 &OutStatus
, &CallBack
, &tsync
);
901 (tc
, rxconn
, code
, &avc
->f
.fid
, areq
, AFS_STATS_FS_RPCIDX_FETCHSTATUS
,
905 areq
->flags
&= ~O_NONBLOCK
;
908 return (0); /* failed, say it is 'unlocked' */
910 return ((int)OutStatus
.lockCount
);