2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
19 #include <afsconfig.h>
20 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/afs_osidnlc.h"
31 extern unsigned char *afs_indexFlags
;
33 /* Called by all write-on-close routines: regular afs_close,
34 * store via background daemon and store via the
35 * afs_FlushActiveVCaches routine (when CCORE is on).
36 * avc->lock must be write-locked.
39 afs_StoreOnLastReference(struct vcache
*avc
,
40 struct vrequest
*treq
)
44 AFS_STATCNT(afs_StoreOnLastReference
);
45 /* if CCore flag is set, we clear it and do the extra decrement
46 * ourselves now. If we're called by the CCore clearer, the CCore
47 * flag will already be clear, so we don't have to worry about
48 * clearing it twice. */
49 if (avc
->f
.states
& CCore
) {
52 avc
->f
.states
&= ~CCore
;
53 #if defined(AFS_SGI_ENV)
54 osi_Assert(avc
->opens
> 0 && avc
->execsOrWriters
> 0);
56 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
57 * depending on the flags the file was opened with. So, if you make any
58 * changes to the way the execsOrWriters flag is handled check with the
61 avc
->execsOrWriters
--;
62 AFS_RELE(AFSTOV(avc
)); /* VN_HOLD at set CCore(afs_FakeClose) */
63 cred
= (afs_ucred_t
*)avc
->linkData
; /* "crheld" in afs_FakeClose */
68 if (!AFS_IS_DISCONNECTED
) {
71 /* Now, send the file back. Used to require 0 writers left, but now do
72 * it on every close for write, since two closes in a row are harmless
73 * since first will clean all chunks, and second will be noop. Note that
74 * this will also save confusion when someone keeps a file open
75 * inadvertently, since with old system, writes to the server would never
78 code
= afs_StoreAllSegments(avc
, treq
, AFS_LASTSTORE
/*!sync-to-disk */ );
80 * We have to do these after the above store in done: in some systems
81 * like aix they'll need to flush all the vm dirty pages to the disk via
82 * the strategy routine. During that all procedure (done under no avc
83 * locks) opens, refcounts would be zero, since it didn't reach the
84 * afs_{rd,wr} routines which means the vcache is a perfect candidate
87 } else if (AFS_IS_DISCON_RW
) {
88 afs_DisconAddDirty(avc
, VDisconWriteClose
, 0);
89 } /* if not disconnected */
91 #if defined(AFS_SGI_ENV)
92 osi_Assert(avc
->opens
> 0 && avc
->execsOrWriters
> 0);
96 avc
->execsOrWriters
--;
101 afs_UFSWriteUIO(struct vcache
*avc
, afs_dcache_id_t
*inode
, struct uio
*tuiop
)
103 struct osi_file
*tfile
;
106 tfile
= (struct osi_file
*)osi_UFSOpen(inode
);
110 #if defined(AFS_AIX41_ENV)
112 code
= VNOP_RDWR(tfile
->vnode
, UIO_WRITE
, FWRITE
, tuiop
, NULL
, NULL
,
113 NULL
, afs_osi_credp
);
115 #elif defined(AFS_AIX32_ENV)
116 code
= VNOP_RDWR(tfile
->vnode
, UIO_WRITE
, FWRITE
, tuiop
, NULL
, NULL
);
117 #elif defined(AFS_AIX_ENV)
118 code
= VNOP_RDWR(tfile
->vnode
, UIO_WRITE
, FWRITE
, (off_t
) &offset
,
119 tuiop
, NULL
, NULL
, -1);
120 #elif defined(AFS_SUN5_ENV)
122 # ifdef AFS_SUN510_ENV
123 VOP_RWLOCK(tfile
->vnode
, 1, NULL
);
124 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
, NULL
);
125 VOP_RWUNLOCK(tfile
->vnode
, 1, NULL
);
127 VOP_RWLOCK(tfile
->vnode
, 1);
128 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
129 VOP_RWUNLOCK(tfile
->vnode
, 1);
134 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
135 #elif defined(AFS_SGI_ENV)
137 avc
->f
.states
|= CWritingUFS
;
138 AFS_VOP_RWLOCK(tfile
->vnode
, VRWLOCK_WRITE
);
139 AFS_VOP_WRITE(tfile
->vnode
, tuiop
, IO_ISLOCKED
, afs_osi_credp
, code
);
140 AFS_VOP_RWUNLOCK(tfile
->vnode
, VRWLOCK_WRITE
);
141 avc
->f
.states
&= ~CWritingUFS
;
143 #elif defined(AFS_HPUX100_ENV)
146 code
= VOP_RDWR(tfile
->vnode
, tuiop
, UIO_WRITE
, 0, afs_osi_credp
);
149 #elif defined(AFS_LINUX20_ENV)
151 code
= osi_rdwr(tfile
, tuiop
, UIO_WRITE
);
153 #elif defined(AFS_DARWIN80_ENV)
155 code
= VNOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_ctxtp
);
157 #elif defined(AFS_DARWIN_ENV)
159 VOP_LOCK(tfile
->vnode
, LK_EXCLUSIVE
, current_proc());
160 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
161 VOP_UNLOCK(tfile
->vnode
, 0, current_proc());
163 #elif defined(AFS_FBSD80_ENV)
165 VOP_LOCK(tfile
->vnode
, LK_EXCLUSIVE
);
166 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
167 VOP_UNLOCK(tfile
->vnode
, 0);
169 #elif defined(AFS_FBSD_ENV)
171 VOP_LOCK(tfile
->vnode
, LK_EXCLUSIVE
, curthread
);
172 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
173 VOP_UNLOCK(tfile
->vnode
, 0, curthread
);
175 #elif defined(AFS_NBSD_ENV)
177 VOP_LOCK(tfile
->vnode
, LK_EXCLUSIVE
);
178 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
179 #if defined(AFS_NBSD60_ENV)
180 VOP_UNLOCK(tfile
->vnode
);
182 VOP_UNLOCK(tfile
->vnode
, 0);
185 #elif defined(AFS_XBSD_ENV)
187 VOP_LOCK(tfile
->vnode
, LK_EXCLUSIVE
, curproc
);
188 code
= VOP_WRITE(tfile
->vnode
, tuiop
, 0, afs_osi_credp
);
189 VOP_UNLOCK(tfile
->vnode
, 0, curproc
);
193 tuio
.uio_fpflags
&= ~FSYNCIO
; /* don't do sync io */
195 code
= VOP_RDWR(tfile
->vnode
, tuiop
, UIO_WRITE
, 0, afs_osi_credp
);
202 /* called on writes */
204 afs_write(struct vcache
*avc
, struct uio
*auio
, int aio
,
205 afs_ucred_t
*acred
, int noLock
)
207 afs_size_t totalLength
;
208 afs_size_t transferLength
;
210 afs_size_t offset
, len
;
220 #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
221 struct vnode
*vp
= AFSTOV(avc
);
223 struct uio
*tuiop
= NULL
;
225 struct vrequest
*treq
= NULL
;
227 AFS_STATCNT(afs_write
);
230 return avc
->vc_error
;
232 if (AFS_IS_DISCONNECTED
&& !AFS_IS_DISCON_RW
)
235 startDate
= osi_Time();
236 if ((code
= afs_CreateReq(&treq
, acred
)))
238 /* otherwise we read */
239 totalLength
= AFS_UIO_RESID(auio
);
240 filePos
= AFS_UIO_OFFSET(auio
);
243 afs_Trace4(afs_iclSetp
, CM_TRACE_WRITE
, ICL_TYPE_POINTER
, avc
,
244 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(filePos
), ICL_TYPE_OFFSET
,
245 ICL_HANDLE_OFFSET(totalLength
), ICL_TYPE_OFFSET
,
246 ICL_HANDLE_OFFSET(avc
->f
.m
.Length
));
248 afs_MaybeWakeupTruncateDaemon();
249 ObtainWriteLock(&avc
->lock
, 556);
251 #if defined(AFS_SGI_ENV)
255 * afs_xwrite handles setting m.Length
256 * and handles APPEND mode.
257 * Since we are called via strategy, we need to trim the write to
258 * the actual size of the file
260 osi_Assert(filePos
<= avc
->f
.m
.Length
);
261 diff
= avc
->f
.m
.Length
- filePos
;
262 AFS_UIO_SETRESID(auio
, MIN(totalLength
, diff
));
263 totalLength
= AFS_UIO_RESID(auio
);
266 if (aio
& IO_APPEND
) {
267 /* append mode, start it at the right spot */
268 #if defined(AFS_SUN5_ENV)
269 auio
->uio_loffset
= 0;
271 filePos
= avc
->f
.m
.Length
;
272 AFS_UIO_SETOFFSET(auio
, avc
->f
.m
.Length
);
276 * Note that we use startDate rather than calling osi_Time() here.
277 * This is to avoid counting lock-waiting time in file date (for ranlib).
279 avc
->f
.m
.Date
= startDate
;
281 #if defined(AFS_HPUX_ENV)
282 #if defined(AFS_HPUX101_ENV)
283 if ((totalLength
+ filePos
) >> 9 >
284 p_rlimit(u
.u_procp
)[RLIMIT_FSIZE
].rlim_cur
) {
286 if ((totalLength
+ filePos
) >> 9 > u
.u_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
289 ReleaseWriteLock(&avc
->lock
);
290 afs_DestroyReq(treq
);
294 #if defined(AFS_VM_RDWR_ENV) && !defined(AFS_FAKEOPEN_ENV)
296 * If write is implemented via VM, afs_FakeOpen() is called from the
297 * high-level write op.
299 if (avc
->execsOrWriters
<= 0) {
300 afs_warn("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc
,
301 avc
->execsOrWriters
);
306 avc
->f
.states
|= CDirty
;
308 while (totalLength
> 0) {
309 tdc
= afs_ObtainDCacheForWriting(avc
, filePos
, totalLength
, treq
,
315 len
= totalLength
; /* write this amount by default */
316 offset
= filePos
- AFS_CHUNKTOBASE(tdc
->f
.chunk
);
317 max
= AFS_CHUNKTOSIZE(tdc
->f
.chunk
); /* max size of this chunk */
318 if (max
<= len
+ offset
) { /*if we'd go past the end of this chunk */
319 /* it won't all fit in this chunk, so write as much
327 tuiop
= afsio_partialcopy(auio
, trimlen
);
328 AFS_UIO_SETOFFSET(tuiop
, offset
);
330 code
= (*(afs_cacheType
->vwriteUIO
))(avc
, &tdc
->f
.inode
, tuiop
);
336 ZapDCE(tdc
); /* bad data */
337 cfile
= afs_CFileOpen(&tdc
->f
.inode
);
338 afs_CFileTruncate(cfile
, 0);
339 afs_CFileClose(cfile
);
340 afs_AdjustSize(tdc
, 0); /* sets f.chunkSize to 0 */
342 afs_stats_cmperf
.cacheCurrDirtyChunks
--;
343 afs_indexFlags
[tdc
->index
] &= ~IFDataMod
; /* so it does disappear */
344 ReleaseWriteLock(&tdc
->lock
);
348 /* otherwise we've written some, fixup length, etc and continue with next seg */
349 len
= len
- AFS_UIO_RESID(tuiop
); /* compute amount really transferred */
351 afsio_skip(auio
, tlen
); /* advance auio over data written */
352 /* compute new file size */
353 if (offset
+ len
> tdc
->f
.chunkBytes
) {
354 afs_int32 tlength
= offset
+ len
;
355 afs_AdjustSize(tdc
, tlength
);
356 if (tdc
->validPos
< filePos
+ len
)
357 tdc
->validPos
= filePos
+ len
;
360 transferLength
+= len
;
362 #if defined(AFS_SGI_ENV)
363 /* afs_xwrite handles setting m.Length */
364 osi_Assert(filePos
<= avc
->f
.m
.Length
);
366 if (filePos
> avc
->f
.m
.Length
) {
367 if (AFS_IS_DISCON_RW
)
368 afs_PopulateDCache(avc
, filePos
, treq
);
369 afs_Trace4(afs_iclSetp
, CM_TRACE_SETLENGTH
, ICL_TYPE_STRING
,
370 __FILE__
, ICL_TYPE_LONG
, __LINE__
, ICL_TYPE_OFFSET
,
371 ICL_HANDLE_OFFSET(avc
->f
.m
.Length
), ICL_TYPE_OFFSET
,
372 ICL_HANDLE_OFFSET(filePos
));
373 avc
->f
.m
.Length
= filePos
;
374 #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
375 vnode_pager_setsize(vp
, filePos
);
379 ReleaseWriteLock(&tdc
->lock
);
381 #if !defined(AFS_VM_RDWR_ENV)
383 * If write is implemented via VM, afs_DoPartialWrite() is called from
384 * the high-level write op.
387 code
= afs_DoPartialWrite(avc
, treq
);
395 #if !defined(AFS_VM_RDWR_ENV) || defined(AFS_FAKEOPEN_ENV)
396 afs_FakeClose(avc
, acred
);
398 error
= afs_CheckCode(error
, treq
, 7);
399 /* This set is here so we get the CheckCode. */
400 if (error
&& !avc
->vc_error
)
401 avc
->vc_error
= error
;
403 ReleaseWriteLock(&avc
->lock
);
407 #ifndef AFS_VM_RDWR_ENV
409 * If write is implemented via VM, afs_fsync() is called from the high-level
412 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
413 if (noLock
&& (aio
& IO_SYNC
)) {
416 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
417 * we're doing them because the file was opened with O_SYNCIO specified,
418 * we have to look in the u area. No single mechanism here!!
420 if (noLock
&& ((aio
& IO_SYNC
) | (auio
->uio_fpflags
& FSYNCIO
))) {
422 if (noLock
&& (aio
& FSYNC
)) {
425 if (!AFS_NFSXLATORREQ(acred
))
426 afs_fsync(avc
, acred
);
429 afs_DestroyReq(treq
);
433 /* do partial write if we're low on unmodified chunks */
435 afs_DoPartialWrite(struct vcache
*avc
, struct vrequest
*areq
)
439 if (afs_stats_cmperf
.cacheCurrDirtyChunks
<=
440 afs_stats_cmperf
.cacheMaxDirtyChunks
441 || AFS_IS_DISCONNECTED
)
442 return 0; /* nothing to do */
443 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
444 afs_Trace2(afs_iclSetp
, CM_TRACE_PARTIALWRITE
, ICL_TYPE_POINTER
, avc
,
445 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(avc
->f
.m
.Length
));
447 #if defined(AFS_SUN5_ENV)
448 code
= afs_StoreAllSegments(avc
, areq
, AFS_ASYNC
| AFS_VMSYNC_INVAL
);
450 code
= afs_StoreAllSegments(avc
, areq
, AFS_ASYNC
);
455 /* handle any closing cleanup stuff */
457 #if defined(AFS_SGI65_ENV)
458 afs_close(OSI_VC_DECL(avc
), afs_int32 aflags
, lastclose_t lastclose
,
460 #elif defined(AFS_SGI64_ENV)
461 afs_close(OSI_VC_DECL(avc
), afs_int32 aflags
, lastclose_t lastclose
,
462 off_t offset
, afs_ucred_t
*acred
, struct flid
*flp
)
463 #elif defined(AFS_SGI_ENV)
464 afs_close(OSI_VC_DECL(avc
), afs_int32 aflags
, lastclose_t lastclose
465 off_t offset
, afs_ucred_t
*acred
)
466 #elif defined(AFS_SUN5_ENV)
467 afs_close(OSI_VC_DECL(avc
), afs_int32 aflags
, int count
, offset_t offset
,
470 afs_close(OSI_VC_DECL(avc
), afs_int32 aflags
, afs_ucred_t
*acred
)
474 afs_int32 code_checkcode
= 0;
476 struct vrequest
*treq
= NULL
;
480 struct afs_fakestat_state fakestat
;
483 AFS_STATCNT(afs_close
);
484 afs_Trace2(afs_iclSetp
, CM_TRACE_CLOSE
, ICL_TYPE_POINTER
, avc
,
485 ICL_TYPE_INT32
, aflags
);
486 code
= afs_CreateReq(&treq
, acred
);
489 afs_InitFakeStat(&fakestat
);
490 code
= afs_EvalFakeStat(&avc
, &fakestat
, treq
);
492 afs_PutFakeStat(&fakestat
);
493 afs_DestroyReq(treq
);
498 if (avc
->flockCount
) {
499 HandleFlock(avc
, LOCK_UN
, treq
, 0, 1 /*onlymine */ );
502 #if defined(AFS_SGI_ENV)
504 afs_PutFakeStat(&fakestat
);
506 afs_DestroyReq(treq
);
509 /* unlock any locks for pid - could be wrong for child .. */
510 AFS_RWLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
511 # ifdef AFS_SGI65_ENV
512 get_current_flid(&flid
);
513 cleanlocks((vnode_t
*) avc
, flid
.fl_pid
, flid
.fl_sysid
);
514 HandleFlock(avc
, LOCK_UN
, treq
, flid
.fl_pid
, 1 /*onlymine */ );
516 # ifdef AFS_SGI64_ENV
517 cleanlocks((vnode_t
*) avc
, flp
);
518 # else /* AFS_SGI64_ENV */
519 cleanlocks((vnode_t
*) avc
, u
.u_procp
->p_epid
, u
.u_procp
->p_sysid
);
520 # endif /* AFS_SGI64_ENV */
521 HandleFlock(avc
, LOCK_UN
, treq
, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
522 # endif /* AFS_SGI65_ENV */
523 /* afs_chkpgoob will drop and re-acquire the global lock. */
524 afs_chkpgoob(&avc
->v
, btoc(avc
->f
.m
.Length
));
525 #elif defined(AFS_SUN5_ENV)
527 /* The vfs layer may call this repeatedly with higher "count"; only
528 * on the last close (i.e. count = 1) we should actually proceed
530 afs_PutFakeStat(&fakestat
);
532 afs_DestroyReq(treq
);
536 if (avc
->flockCount
) { /* Release Lock */
537 HandleFlock(avc
, LOCK_UN
, treq
, 0, 1 /*onlymine */ );
540 if (aflags
& (FWRITE
| FTRUNC
)) {
541 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred
)) || AFS_IS_DISCONNECTED
) {
542 /* do it yourself if daemons are all busy */
543 ObtainWriteLock(&avc
->lock
, 124);
544 code
= afs_StoreOnLastReference(avc
, treq
);
545 ReleaseWriteLock(&avc
->lock
);
546 #if defined(AFS_SGI_ENV)
547 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
550 #if defined(AFS_SGI_ENV)
551 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
553 /* at least one daemon is idle, so ask it to do the store.
554 * Also, note that we don't lock it any more... */
555 tb
= afs_BQueue(BOP_STORE
, avc
, 0, 1, acred
,
556 (afs_size_t
) afs_cr_uid(acred
), (afs_size_t
) 0,
557 (void *)0, (void *)0, (void *)0);
558 /* sleep waiting for the store to start, then retrieve error code */
559 while ((tb
->flags
& BUVALID
) == 0) {
564 code_checkcode
= tb
->code_checkcode
;
568 /* VNOVNODE is "acceptable" error code from close, since
569 * may happen when deleting a file on another machine while
570 * it is open here. */
571 if (code
== VNOVNODE
)
574 /* Ensure last closer gets the error. If another thread caused
575 * DoPartialWrite and this thread does not actually store the data,
576 * it may not see the quota error.
578 ObtainWriteLock(&avc
->lock
, 406);
581 osi_ReleaseVM(avc
, acred
);
583 /* We don't know what the original raw error code was, so set
584 * 'code' to 0. But we have the afs_CheckCode-translated error
585 * code, so put that in code_checkcode. We cannot just set code
586 * to avc->vc_error, since vc_error is a checkcode-translated
587 * error code, and 'code' is supposed to be a raw error code. */
589 code_checkcode
= avc
->vc_error
;
592 ReleaseWriteLock(&avc
->lock
);
594 /* some codes merit specific complaint */
596 afs_warnuser("afs: failed to store file (network problems)\n");
599 else if (code
== ENOSPC
|| code_checkcode
== ENOSPC
) {
601 ("afs: failed to store file (over quota or partition full)\n");
604 else if (code
== ENOSPC
|| code_checkcode
== ENOSPC
) {
605 afs_warnuser("afs: failed to store file (partition full)\n");
606 } else if (code
== EDQUOT
|| code_checkcode
== EDQUOT
) {
607 afs_warnuser("afs: failed to store file (over quota)\n");
610 else if (code
|| code_checkcode
)
611 afs_warnuser("afs: failed to store file (%d/%d)\n", code
, code_checkcode
);
613 /* finally, we flush any text pages lying around here */
617 #if defined(AFS_SGI_ENV)
618 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
619 osi_Assert(avc
->opens
> 0);
621 /* file open for read */
622 ObtainWriteLock(&avc
->lock
, 411);
625 osi_ReleaseVM(avc
, acred
);
628 code_checkcode
= avc
->vc_error
;
631 #if defined(AFS_FBSD80_ENV)
634 afs_int32 opens
, is_free
, is_gone
, is_doomed
, iflag
;
635 struct vnode
*vp
= AFSTOV(avc
);
637 is_doomed
= vp
->v_iflag
& VI_DOOMED
;
638 is_free
= vp
->v_iflag
& VI_FREE
;
639 is_gone
= vp
->v_iflag
& VI_DOINGINACT
;
643 afs_warn("afs_close avc %p vp %p opens %d free %d doinginact %d doomed %d iflag %d\n",
644 avc
, vp
, opens
, is_free
, is_gone
, is_doomed
, iflag
);
648 ReleaseWriteLock(&avc
->lock
);
651 afs_PutFakeStat(&fakestat
);
653 if (code_checkcode
) {
654 code
= code_checkcode
;
656 code
= afs_CheckCode(code
, treq
, 5);
658 afs_DestroyReq(treq
);
664 #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)
665 afs_fsync(OSI_VC_DECL(avc
), int flag
, afs_ucred_t
*acred
666 # ifdef AFS_SGI65_ENV
667 , off_t start
, off_t stop
668 # endif /* AFS_SGI65_ENV */
670 #else /* !SUN5 && !SGI */
671 afs_fsync(OSI_VC_DECL(avc
), afs_ucred_t
*acred
)
675 struct vrequest
*treq
= NULL
;
679 return avc
->vc_error
;
681 #if defined(AFS_SUN5_ENV)
682 /* back out if called from NFS server */
683 if (curthread
->t_flag
& T_DONTPEND
)
687 AFS_STATCNT(afs_fsync
);
688 afs_Trace1(afs_iclSetp
, CM_TRACE_FSYNC
, ICL_TYPE_POINTER
, avc
);
689 if ((code
= afs_CreateReq(&treq
, acred
)))
692 #if defined(AFS_SGI_ENV)
693 AFS_RWLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
694 if (flag
& FSYNC_INVAL
)
695 osi_VM_FSyncInval(avc
);
696 #endif /* AFS_SGI_ENV */
698 ObtainSharedLock(&avc
->lock
, 18);
700 if (avc
->execsOrWriters
> 0) {
701 if (!AFS_IS_DISCONNECTED
&& !AFS_IS_DISCON_RW
) {
702 /* Your average flush. */
704 /* put the file back */
705 UpgradeSToWLock(&avc
->lock
, 41);
706 code
= afs_StoreAllSegments(avc
, treq
, AFS_SYNC
);
707 ConvertWToSLock(&avc
->lock
);
709 UpgradeSToWLock(&avc
->lock
, 711);
710 afs_DisconAddDirty(avc
, VDisconWriteFlush
, 1);
711 ConvertWToSLock(&avc
->lock
);
712 } /* if not disconnected */
713 } /* if (avc->execsOrWriters > 0) */
715 #if defined(AFS_SGI_ENV)
716 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
717 if (code
== VNOVNODE
) {
718 /* syncing an unlinked file! - non-informative to pass an errno
719 * 102 (== VNOVNODE) to user
725 code
= afs_CheckCode(code
, treq
, 33);
726 afs_DestroyReq(treq
);
727 ReleaseSharedLock(&avc
->lock
);