Merge 1.8.0~pre4 packaging into master
[pkg-k5-afs_openafs.git] / src / afs / VNOPS / afs_vnop_flock.c
blob9efeeeb7830c19ee7c000be2791456c0f5cca2fb
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * Implements:
15 #include <afsconfig.h>
16 #include "afs/param.h"
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs/afs_cbqueue.h"
23 #include "afs/nfsclient.h"
24 #include "afs/afs_osidnlc.h"
25 #include "afs/unified_afs.h"
31 /* Static prototypes */
32 static int HandleGetLock(struct vcache *avc,
33 struct AFS_FLOCK *af,
34 struct vrequest *areq, int clid);
35 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
36 static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
37 struct SimpleLocks *alp, int onlymine,
38 int clid);
40 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
42 #if defined(AFS_SUN5_ENV)
43 void
44 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
46 proc_t *procp = ttoproc(curthread);
48 if (slp) {
49 slp->sysid = 0;
50 slp->pid = procp->p_pid;
51 } else {
52 flock->l_sysid = 0;
53 flock->l_pid = procp->p_pid;
56 #elif defined(AFS_SGI_ENV)
57 void
58 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
60 # if defined(AFS_SGI65_ENV)
61 flid_t flid;
62 get_current_flid(&flid);
63 # else
64 afs_proc_t *procp = OSI_GET_CURRENT_PROCP();
65 # endif
67 if (slp) {
68 # ifdef AFS_SGI65_ENV
69 slp->sysid = flid.fl_sysid;
70 # else
71 slp->sysid = OSI_GET_CURRENT_SYSID();
72 # endif
73 slp->pid = clid;
74 } else {
75 # ifdef AFS_SGI65_ENV
76 flock->l_sysid = flid.fl_sysid;
77 # else
78 flock->l_sysid = OSI_GET_CURRENT_SYSID();
79 # endif
80 flock->l_pid = clid;
83 #elif defined(AFS_AIX_ENV)
84 void
85 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
87 # if !defined(AFS_AIX32_ENV)
88 afs_proc_t *procp = u.u_procp;
89 # endif
91 if (slp) {
92 # if defined(AFS_AIX41_ENV)
93 slp->sysid = 0;
94 slp->pid = getpid();
95 # elif defined(AFS_AIX32_ENV)
96 slp->sysid = u.u_sysid;
97 slp->pid = u.u_epid;
98 # else
99 slp->sysid = procp->p_sysid;
100 slp->pid = prcop->p_epid;
101 # endif
102 } else {
103 # if defined(AFS_AIX41_ENV)
104 flock->l_sysid = 0;
105 flock->l_pid = getpid();
106 # elif defined(AFS_AIX32_ENV)
107 flock->l_sysid = u.u_sysid;
108 flock->l_pid = u.u_epid;
109 # else
110 flock->l_sysid = procp->p_sysid;
111 flock->l_pid = procp->p_epid;
112 # endif
115 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
116 void
117 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
119 if (slp) {
120 slp->pid = clid;
121 } else {
122 flock->l_pid = clid;
125 #elif defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
126 void
127 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
129 if (slp) {
130 slp->pid = getpid();
131 } else {
132 flock->l_pid = getpid();
135 #elif defined(UKERNEL)
136 void
137 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
139 if (slp) {
140 slp->pid = get_user_struct()->u_procp->p_pid;
141 } else {
142 flock->l_pid = get_user_struct()->u_procp->p_pid;
145 #else
146 void
147 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
149 if (slp) {
150 slp->pid = u.u_procp->p_pid;
151 } else {
152 flock->l_pid = u.u_procp->p_pid;
155 #endif
157 /* return 1 (true) if specified flock does not match alp (if
158 * specified), or any of the slp structs (if alp == 0)
160 /* I'm not sure that the comparsion of flock->pid to p_ppid
161 * is correct. Should that be a comparision of alp (or slp) ->pid
162 * to p_ppid? Especially in the context of the lower loop, where
163 * the repeated comparison doesn't make much sense...
165 /* onlymine - don't match any locks which are held by my parent */
166 /* clid - only irix 6.5 */
168 static int
169 lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
170 struct SimpleLocks *alp, int onlymine, int clid)
172 struct SimpleLocks *slp;
173 #if defined(AFS_SUN5_ENV)
174 proc_t *procp = ttoproc(curthread);
175 #else
176 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
177 #ifdef AFS_SGI64_ENV
178 afs_proc_t *procp = curprocp;
179 #elif defined(UKERNEL)
180 afs_proc_t *procp = get_user_struct()->u_procp;
181 #else
182 afs_proc_t *procp = u.u_procp;
183 #endif /* AFS_SGI64_ENV */
184 #endif
185 #endif
187 if (alp) {
188 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
189 if (flock1->l_sysid != alp->sysid) {
190 return 1;
192 #endif
193 if ((flock1->l_pid == alp->pid) ||
194 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
195 (!onlymine && (flock1->l_pid == getppid()))
196 #else
197 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
198 /* XXX check this. used to be *only* irix for some reason. */
199 (!onlymine && (flock1->l_pid == clid))
200 #else
201 (!onlymine && (flock1->l_pid == procp->p_ppid))
202 #endif
203 #endif
205 return 0;
207 return 1;
210 for (slp = vp->slocks; slp; slp = slp->next) {
211 #if defined(AFS_HAVE_FLOCK_SYSID)
212 if (flock1->l_sysid != slp->sysid) {
213 continue;
215 #endif
216 if (flock1->l_pid == slp->pid) {
217 return 0;
220 return (1); /* failure */
224 /* we don't send multiple read flocks to the server, but rather just count
225 them up ourselves. Of course, multiple write locks are incompatible.
227 Note that we should always try to release a lock, even if we have
228 a network problem sending the release command through, since often
229 a lock is released on a close call, when the user can't retry anyway.
231 After we remove it from our structure, the lock will no longer be
232 kept alive, and the server should time it out within a few minutes.
234 94.04.13 add "force" parameter. If a child explicitly unlocks a
235 file, I guess we'll permit it. however, we don't want simple,
236 innocent closes by children to unlock files in the parent process.
238 If called when disconnected support is unabled, the discon_lock must
239 be held
241 /* clid - nonzero on sgi sunos osf1 only */
243 HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
244 pid_t clid, int onlymine)
246 struct afs_conn *tc;
247 struct SimpleLocks *slp, *tlp, **slpp;
248 afs_int32 code;
249 struct AFSVolSync tsync;
250 afs_int32 lockType;
251 struct AFS_FLOCK flock;
252 XSTATS_DECLS;
253 AFS_STATCNT(HandleFlock);
254 code = 0; /* default when we don't make any network calls */
255 lockIdSet(&flock, NULL, clid);
257 #if defined(AFS_SGI_ENV)
258 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
259 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
260 #endif
261 ObtainWriteLock(&avc->lock, 118);
262 if (acom & LOCK_UN) {
263 int stored_segments = 0;
264 retry_unlock:
266 /* defect 3083 */
268 #ifdef AFS_AIX_ENV
269 /* If the lock is held exclusive, then only the owning process
270 * or a child can unlock it. Use pid and ppid because they are
271 * unique identifiers.
273 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
274 #ifdef AFS_AIX41_ENV
275 if (onlymine || (getppid() != avc->ownslock)) {
276 #else
277 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
278 #endif
279 ReleaseWriteLock(&avc->lock);
280 return 0;
283 #endif
284 if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
285 ReleaseWriteLock(&avc->lock);
286 return 0;
288 #ifdef AFS_AIX_ENV
289 avc->ownslock = 0;
290 #endif
291 if (avc->flockCount == 0) {
292 ReleaseWriteLock(&avc->lock);
293 return 0 /*ENOTTY*/;
294 /* no lock held */
296 /* unlock the lock */
297 if (avc->flockCount > 0) {
298 slpp = &avc->slocks;
299 for (slp = *slpp; slp;) {
300 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
301 avc->flockCount--;
302 tlp = *slpp = slp->next;
303 osi_FreeSmallSpace(slp);
304 slp = tlp;
305 } else {
306 slpp = &slp->next;
307 slp = *slpp;
310 } else if (avc->flockCount == -1) {
311 if (!stored_segments) {
312 afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC); /* fsync file early */
313 /* afs_StoreAllSegments can drop and reacquire the write lock
314 * on avc and GLOCK, so the flocks may be completely different
315 * now. Go back and perform all checks again. */
316 stored_segments = 1;
317 goto retry_unlock;
319 avc->flockCount = 0;
320 /* And remove the (only) exclusive lock entry from the list... */
321 osi_FreeSmallSpace(avc->slocks);
322 avc->slocks = 0;
324 if (avc->flockCount == 0) {
325 if (!AFS_IS_DISCONNECTED) {
326 struct rx_connection *rxconn;
327 do {
328 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
329 if (tc) {
330 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
331 RX_AFS_GUNLOCK();
332 code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
333 &avc->f.fid.Fid, &tsync);
334 RX_AFS_GLOCK();
335 XSTATS_END_TIME;
336 } else
337 code = -1;
338 } while (afs_Analyze
339 (tc, rxconn, code, &avc->f.fid, areq,
340 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
341 } else {
342 /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
343 code = ENETDOWN;
346 } else {
347 while (1) { /* set a new lock */
349 * Upgrading from shared locks to Exclusive and vice versa
350 * is a bit tricky and we don't really support it yet. But
351 * we try to support the common used one which is upgrade
352 * a shared lock to an exclusive for the same process...
354 if ((avc->flockCount > 0 && (acom & LOCK_EX))
355 || (avc->flockCount == -1 && (acom & LOCK_SH))) {
357 * Upgrading from shared locks to an exclusive one:
358 * For now if all the shared locks belong to the
359 * same process then we unlock them on the server
360 * and proceed with the upgrade. Unless we change the
361 * server's locking interface impl we prohibit from
362 * unlocking other processes's shared locks...
363 * Upgrading from an exclusive lock to a shared one:
364 * Again only allowed to be done by the same process.
366 slpp = &avc->slocks;
367 for (slp = *slpp; slp;) {
368 if (!lockIdcmp2
369 (&flock, avc, slp, 1 /*!onlymine */ , clid)) {
370 if (acom & LOCK_EX)
371 avc->flockCount--;
372 else
373 avc->flockCount = 0;
374 tlp = *slpp = slp->next;
375 osi_FreeSmallSpace(slp);
376 slp = tlp;
377 } else {
378 code = EWOULDBLOCK;
379 slpp = &slp->next;
380 slp = *slpp;
383 if (!code && avc->flockCount == 0) {
384 if (!AFS_IS_DISCONNECTED) {
385 struct rx_connection *rxconn;
386 do {
387 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
388 if (tc) {
389 XSTATS_START_TIME
390 (AFS_STATS_FS_RPCIDX_RELEASELOCK);
391 RX_AFS_GUNLOCK();
392 code =
393 RXAFS_ReleaseLock(rxconn,
394 (struct AFSFid *)&avc->
395 f.fid.Fid, &tsync);
396 RX_AFS_GLOCK();
397 XSTATS_END_TIME;
398 } else
399 code = -1;
400 } while (afs_Analyze
401 (tc, rxconn, code, &avc->f.fid, areq,
402 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
403 NULL));
406 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
407 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
408 code = EWOULDBLOCK;
409 } else {
410 code = 0;
411 /* We've just re-grabbed an exclusive lock, so we don't
412 * need to contact the fileserver, and we don't need to
413 * add the lock to avc->slocks (since we already have a
414 * lock there). So, we are done. */
415 break;
418 if (code == 0) {
419 /* compatible here, decide if needs to go to file server. If
420 * we've already got the file locked (and thus read-locked, since
421 * we've already checked for compatibility), we shouldn't send
422 * the call through to the server again */
423 if (avc->flockCount == 0) {
424 struct rx_connection *rxconn;
425 /* we're the first on our block, send the call through */
426 lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
427 if (!AFS_IS_DISCONNECTED) {
428 do {
429 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
430 if (tc) {
431 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
432 RX_AFS_GUNLOCK();
433 code = RXAFS_SetLock(rxconn, (struct AFSFid *)
434 &avc->f.fid.Fid, lockType,
435 &tsync);
436 RX_AFS_GLOCK();
437 XSTATS_END_TIME;
438 } else
439 code = -1;
440 } while (afs_Analyze
441 (tc, rxconn, code, &avc->f.fid, areq,
442 AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
443 NULL));
444 if ((lockType == LockWrite) && (code == VREADONLY))
445 code = EBADF; /* per POSIX; VREADONLY == EROFS */
446 } else
447 /* XXX - Should probably try and log this when we're
448 * XXX - running with logging enabled. But it's horrid
450 code = 0; /* pretend we worked - ick!!! */
451 } else
452 code = 0; /* otherwise, pretend things worked */
454 if (code == 0) {
455 slp = (struct SimpleLocks *)
456 osi_AllocSmallSpace(sizeof(struct SimpleLocks));
457 if (acom & LOCK_EX) {
459 /* defect 3083 */
461 #ifdef AFS_AIX_ENV
462 /* Record unique id of process owning exclusive lock. */
463 avc->ownslock = getpid();
464 #endif
466 slp->type = LockWrite;
467 slp->next = NULL;
468 avc->slocks = slp;
469 avc->flockCount = -1;
470 } else {
471 slp->type = LockRead;
472 slp->next = avc->slocks;
473 avc->slocks = slp;
474 avc->flockCount++;
477 lockIdSet(&flock, slp, clid);
478 break;
480 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
481 if (((code == EWOULDBLOCK) || (code == EAGAIN) ||
482 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
483 && !(acom & LOCK_NB)) {
484 /* sleep for a second, allowing interrupts */
485 ReleaseWriteLock(&avc->lock);
486 #if defined(AFS_SGI_ENV)
487 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
488 #endif
489 code = afs_osi_Wait(1000, NULL, 1);
490 #if defined(AFS_SGI_ENV)
491 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
492 #endif
493 ObtainWriteLock(&avc->lock, 120);
494 if (code) {
495 code = EINTR; /* return this if ^C typed */
496 break;
498 } else
499 break;
500 } /* while loop */
502 ReleaseWriteLock(&avc->lock);
503 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
504 return code;
508 /* warn a user that a lock has been ignored */
509 static void
510 DoLockWarning(struct vcache *avc, afs_ucred_t * acred)
512 static afs_uint32 lastWarnTime;
513 static pid_t lastWarnPid;
515 afs_uint32 now;
516 pid_t pid = MyPidxx2Pid(MyPidxx);
517 char *procname;
518 const char *message;
520 now = osi_Time();
522 AFS_STATCNT(DoLockWarning);
524 /* check if we've already warned this user recently */
525 if ((now < lastWarnTime + 120) && (lastWarnPid == pid)) {
526 return;
528 if (now < avc->lastBRLWarnTime + 120) {
529 return;
532 procname = afs_osi_Alloc(256);
534 if (!procname)
535 return;
537 /* Copies process name to allocated procname, see osi_machdeps for details of macro */
538 osi_procname(procname, 256);
539 procname[255] = '\0';
541 lastWarnTime = avc->lastBRLWarnTime = now;
542 lastWarnPid = pid;
544 #ifdef AFS_LINUX26_ENV
545 message = "byte-range locks only enforced for processes on this machine";
546 #else
547 message = "byte-range lock/unlock ignored; make sure no one else is running this program";
548 #endif
550 afs_warnuser("afs: %s (pid %d (%s), user %ld, fid %lu.%lu.%lu).\n",
551 message, pid, procname, (long)afs_cr_uid(acred),
552 (unsigned long)avc->f.fid.Fid.Volume,
553 (unsigned long)avc->f.fid.Fid.Vnode,
554 (unsigned long)avc->f.fid.Fid.Unique);
556 afs_osi_Free(procname, 256);
560 #if defined(AFS_SGI_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
561 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
562 afs_ucred_t * acred, pid_t clid)
563 #else
564 u_int clid = 0;
565 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
566 afs_ucred_t * acred)
567 #endif
569 struct vrequest *treq = NULL;
570 afs_int32 code;
571 struct afs_fakestat_state fakestate;
573 AFS_STATCNT(afs_lockctl);
574 if ((code = afs_CreateReq(&treq, acred)))
575 return code;
576 afs_InitFakeStat(&fakestate);
578 AFS_DISCON_LOCK();
580 code = afs_EvalFakeStat(&avc, &fakestate, treq);
581 if (code) {
582 goto done;
584 #if defined(AFS_SGI_ENV)
585 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
586 #else
587 if (acmd == F_GETLK) {
588 #endif
589 if (af->l_type == F_UNLCK) {
590 code = 0;
591 goto done;
593 code = HandleGetLock(avc, af, treq, clid);
594 code = afs_CheckCode(code, treq, 2); /* defeat buggy AIX optimz */
595 goto done;
596 } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
597 #if defined(AFS_SGI_ENV)
598 || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
599 #else
601 #endif
603 if ((avc->f.states & CRO)) {
604 /* for RO volumes, don't do anything for locks; the fileserver doesn't
605 * even track them. A write lock should not be possible, though. */
606 if (af->l_type == F_WRLCK) {
607 code = EBADF;
608 } else {
609 code = 0;
611 goto done;
614 /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */
615 if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7fffffffffffffffLL))
616 af->l_len = 0;
617 /* next line makes byte range locks always succeed,
618 * even when they should block */
619 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
620 DoLockWarning(avc, acred);
621 code = 0;
622 goto done;
624 /* otherwise we can turn this into a whole-file flock */
625 if (af->l_type == F_RDLCK)
626 code = LOCK_SH;
627 else if (af->l_type == F_WRLCK)
628 code = LOCK_EX;
629 else if (af->l_type == F_UNLCK)
630 code = LOCK_UN;
631 else {
632 code = EINVAL; /* unknown lock type */
633 goto done;
635 if (((acmd == F_SETLK)
636 #if defined(AFS_SGI_ENV)
637 || (acmd == F_RSETLK)
638 #endif
639 ) && code != LOCK_UN)
640 code |= LOCK_NB; /* non-blocking, s.v.p. */
641 #if defined(AFS_DARWIN_ENV)
642 code = HandleFlock(avc, code, treq, clid, 0 /*!onlymine */ );
643 #elif defined(AFS_SGI_ENV)
644 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
645 code = HandleFlock(avc, code, treq, clid, 0 /*!onlymine */ );
646 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
647 #else
648 code = HandleFlock(avc, code, treq, 0, 0 /*!onlymine */ );
649 #endif
650 code = afs_CheckCode(code, treq, 3); /* defeat AIX -O bug */
651 goto done;
653 code = EINVAL;
654 done:
655 afs_PutFakeStat(&fakestate);
656 AFS_DISCON_UNLOCK();
657 afs_DestroyReq(treq);
658 return code;
663 * Get a description of the first lock which would
664 * block the lock specified. If the specified lock
665 * would succeed, fill in the lock structure with 'F_UNLCK'.
667 * To do that, we have to ask the server for the lock
668 * count if:
669 * 1. The file is not locked by this machine.
670 * 2. Asking for write lock, and only the current
671 * PID has the file read locked.
673 static int
674 HandleGetLock(struct vcache *avc, struct AFS_FLOCK *af,
675 struct vrequest *areq, int clid)
677 afs_int32 code;
678 struct AFS_FLOCK flock;
680 lockIdSet(&flock, NULL, clid);
682 ObtainWriteLock(&avc->lock, 122);
683 if (avc->flockCount == 0) {
685 * We don't know ourselves, so ask the server. Unfortunately, we
686 * don't know the pid. Not even the server knows the pid. Besides,
687 * the process with the lock is on another machine
689 code = GetFlockCount(avc, areq);
690 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
691 af->l_type = F_UNLCK;
692 goto unlck_leave;
694 if (code > 0)
695 af->l_type = F_RDLCK;
696 else
697 af->l_type = F_WRLCK;
699 af->l_pid = 0;
700 #if defined(AFS_HAVE_FLOCK_SYSID)
701 af->l_sysid = 0;
702 #endif
703 goto done;
706 if (af->l_type == F_RDLCK) {
708 * We want a read lock. If there are only
709 * read locks, or we are the one with the
710 * write lock, say it is unlocked.
712 if (avc->flockCount > 0 || /* only read locks */
713 !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
714 af->l_type = F_UNLCK;
715 goto unlck_leave;
718 /* one write lock, but who? */
719 af->l_type = F_WRLCK; /* not us, so lock would block */
720 if (avc->slocks) { /* we know who, so tell */
721 af->l_pid = avc->slocks->pid;
722 #if defined(AFS_HAVE_FLOCK_SYSID)
723 af->l_sysid = avc->slocks->sysid;
724 #endif
725 } else {
726 af->l_pid = 0; /* XXX can't happen?? */
727 #if defined(AFS_HAVE_FLOCK_SYSID)
728 af->l_sysid = 0;
729 #endif
731 goto done;
735 * Ok, we want a write lock. If there is a write lock
736 * already, and it is not this process, we fail.
738 if (avc->flockCount < 0) {
739 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
740 af->l_type = F_WRLCK;
741 if (avc->slocks) {
742 af->l_pid = avc->slocks->pid;
743 #if defined(AFS_HAVE_FLOCK_SYSID)
744 af->l_sysid = avc->slocks->sysid;
745 #endif
746 } else {
747 af->l_pid = 0; /* XXX can't happen?? */
748 #if defined(AFS_HAVE_FLOCK_SYSID)
749 af->l_sysid = 0;
750 #endif
752 goto done;
754 /* we are the one with the write lock */
755 af->l_type = F_UNLCK;
756 goto unlck_leave;
760 * Want a write lock, and we know there are read locks.
761 * If there is more than one, or it isn't us, we cannot lock.
763 if ((avc->flockCount > 1)
764 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
765 struct SimpleLocks *slp;
767 af->l_type = F_RDLCK;
768 af->l_pid = 0;
769 #if defined(AFS_HAVE_FLOCK_SYSID)
770 af->l_sysid = 0;
771 #endif
772 /* find a pid that isn't our own */
773 for (slp = avc->slocks; slp; slp = slp->next) {
774 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
775 af->l_pid = slp->pid;
776 #if defined(AFS_HAVE_FLOCK_SYSID)
777 af->l_sysid = avc->slocks->sysid;
778 #endif
779 break;
782 goto done;
786 * Ok, we want a write lock. If there is a write lock
787 * already, and it is not this process, we fail.
789 if (avc->flockCount < 0) {
790 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
791 af->l_type = F_WRLCK;
792 if (avc->slocks) {
793 af->l_pid = avc->slocks->pid;
794 #if defined(AFS_HAVE_FLOCK_SYSID)
795 af->l_sysid = avc->slocks->sysid;
796 #endif
797 } else {
798 af->l_pid = 0; /* XXX can't happen?? */
799 #if defined(AFS_HAVE_FLOCK_SYSID)
800 af->l_sysid = 0;
801 #endif
803 goto done;
805 /* we are the one with the write lock */
806 af->l_type = F_UNLCK;
807 goto unlck_leave;
811 * Want a write lock, and we know there are read locks.
812 * If there is more than one, or it isn't us, we cannot lock.
814 if ((avc->flockCount > 1)
815 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
816 struct SimpleLocks *slp;
817 af->l_type = F_RDLCK;
818 af->l_pid = 0;
819 #if defined(AFS_HAVE_FLOCK_SYSID)
820 af->l_sysid = 0;
821 #endif
822 /* find a pid that isn't our own */
823 for (slp = avc->slocks; slp; slp = slp->next) {
824 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
825 af->l_pid = slp->pid;
826 #if defined(AFS_HAVE_FLOCK_SYSID)
827 af->l_sysid = avc->slocks->sysid;
828 #endif
829 break;
832 goto done;
836 * Want a write lock, and there is just one read lock, and it
837 * is this process with a read lock. Ask the server if there
838 * are any more processes with the file locked.
840 code = GetFlockCount(avc, areq);
841 if (code == 0 || code == 1) {
842 af->l_type = F_UNLCK;
843 goto unlck_leave;
845 if (code > 0)
846 af->l_type = F_RDLCK;
847 else
848 af->l_type = F_WRLCK;
849 af->l_pid = 0;
850 #if defined(AFS_HAVE_FLOCK_SYSID)
851 af->l_sysid = 0;
852 #endif
854 done:
855 af->l_whence = 0;
856 af->l_start = 0;
857 af->l_len = 0; /* to end of file */
859 unlck_leave:
860 ReleaseWriteLock(&avc->lock);
861 return 0;
864 /* Get the 'flock' count from the server. This comes back in a 'spare'
865 * field from a GetStatus RPC. If we have any problems with the RPC,
866 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
867 * the spare field will be a zero, saying the file is unlocked. This is
868 * OK, as a further 'lock' request will do the right thing.
870 static int
871 GetFlockCount(struct vcache *avc, struct vrequest *areq)
873 struct afs_conn *tc;
874 afs_int32 code;
875 struct AFSFetchStatus OutStatus;
876 struct AFSCallBack CallBack;
877 struct AFSVolSync tsync;
878 struct rx_connection *rxconn;
879 int temp;
880 XSTATS_DECLS;
881 temp = areq->flags & O_NONBLOCK;
882 areq->flags |= O_NONBLOCK;
884 /* If we're disconnected, lie and say that we've got no locks. Ick */
885 if (AFS_IS_DISCONNECTED)
886 return 0;
888 do {
889 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
890 if (tc) {
891 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
892 RX_AFS_GUNLOCK();
893 code =
894 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
895 &OutStatus, &CallBack, &tsync);
896 RX_AFS_GLOCK();
897 XSTATS_END_TIME;
898 } else
899 code = -1;
900 } while (afs_Analyze
901 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
902 SHARED_LOCK, NULL));
904 if (temp)
905 areq->flags &= ~O_NONBLOCK;
907 if (code) {
908 return (0); /* failed, say it is 'unlocked' */
909 } else {
910 return ((int)OutStatus.lockCount);