Merge 1.8.0~pre4 packaging into master
[pkg-k5-afs_openafs.git] / src / afs / VNOPS / afs_vnop_write.c
blobbe6c63adf6abf47214c3806da78147f8048d873e
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * Implements:
12 * afs_write
13 * afs_UFSWriteUIO
14 * afs_StoreOnLastReference
15 * afs_close
16 * afs_fsync
19 #include <afsconfig.h>
20 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/afs_osidnlc.h"
31 extern unsigned char *afs_indexFlags;
33 /* Called by all write-on-close routines: regular afs_close,
34 * store via background daemon and store via the
35 * afs_FlushActiveVCaches routine (when CCORE is on).
36 * avc->lock must be write-locked.
38 int
39 afs_StoreOnLastReference(struct vcache *avc,
40 struct vrequest *treq)
42 int code = 0;
44 AFS_STATCNT(afs_StoreOnLastReference);
45 /* if CCore flag is set, we clear it and do the extra decrement
46 * ourselves now. If we're called by the CCore clearer, the CCore
47 * flag will already be clear, so we don't have to worry about
48 * clearing it twice. */
49 if (avc->f.states & CCore) {
50 afs_ucred_t *cred;
52 avc->f.states &= ~CCore;
53 #if defined(AFS_SGI_ENV)
54 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
55 #endif
56 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
57 * depending on the flags the file was opened with. So, if you make any
58 * changes to the way the execsOrWriters flag is handled check with the
59 * top level code. */
60 avc->opens--;
61 avc->execsOrWriters--;
62 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
63 cred = (afs_ucred_t *)avc->linkData; /* "crheld" in afs_FakeClose */
64 crfree(cred);
65 avc->linkData = NULL;
68 if (!AFS_IS_DISCONNECTED) {
69 /* Connected. */
71 /* Now, send the file back. Used to require 0 writers left, but now do
72 * it on every close for write, since two closes in a row are harmless
73 * since first will clean all chunks, and second will be noop. Note that
74 * this will also save confusion when someone keeps a file open
75 * inadvertently, since with old system, writes to the server would never
76 * happen again.
78 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
80 * We have to do these after the above store in done: in some systems
81 * like aix they'll need to flush all the vm dirty pages to the disk via
82 * the strategy routine. During that all procedure (done under no avc
83 * locks) opens, refcounts would be zero, since it didn't reach the
84 * afs_{rd,wr} routines which means the vcache is a perfect candidate
85 * for flushing!
87 } else if (AFS_IS_DISCON_RW) {
88 afs_DisconAddDirty(avc, VDisconWriteClose, 0);
89 } /* if not disconnected */
91 #if defined(AFS_SGI_ENV)
92 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
93 #endif
95 avc->opens--;
96 avc->execsOrWriters--;
97 return code;
101 afs_UFSWriteUIO(struct vcache *avc, afs_dcache_id_t *inode, struct uio *tuiop)
103 struct osi_file *tfile;
104 int code;
106 tfile = (struct osi_file *)osi_UFSOpen(inode);
107 if (!tfile)
108 return -1;
110 #if defined(AFS_AIX41_ENV)
111 AFS_GUNLOCK();
112 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL,
113 NULL, afs_osi_credp);
114 AFS_GLOCK();
115 #elif defined(AFS_AIX32_ENV)
116 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL);
117 #elif defined(AFS_AIX_ENV)
118 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) &offset,
119 tuiop, NULL, NULL, -1);
120 #elif defined(AFS_SUN5_ENV)
121 AFS_GUNLOCK();
122 # ifdef AFS_SUN510_ENV
123 VOP_RWLOCK(tfile->vnode, 1, NULL);
124 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp, NULL);
125 VOP_RWUNLOCK(tfile->vnode, 1, NULL);
126 # else
127 VOP_RWLOCK(tfile->vnode, 1);
128 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
129 VOP_RWUNLOCK(tfile->vnode, 1);
130 # endif
131 AFS_GLOCK();
132 if (code == ENOSPC)
133 afs_warnuser
134 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
135 #elif defined(AFS_SGI_ENV)
136 AFS_GUNLOCK();
137 avc->f.states |= CWritingUFS;
138 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
139 AFS_VOP_WRITE(tfile->vnode, tuiop, IO_ISLOCKED, afs_osi_credp, code);
140 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
141 avc->f.states &= ~CWritingUFS;
142 AFS_GLOCK();
143 #elif defined(AFS_HPUX100_ENV)
145 AFS_GUNLOCK();
146 code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp);
147 AFS_GLOCK();
149 #elif defined(AFS_LINUX20_ENV)
150 AFS_GUNLOCK();
151 code = osi_rdwr(tfile, tuiop, UIO_WRITE);
152 AFS_GLOCK();
153 #elif defined(AFS_DARWIN80_ENV)
154 AFS_GUNLOCK();
155 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
156 AFS_GLOCK();
157 #elif defined(AFS_DARWIN_ENV)
158 AFS_GUNLOCK();
159 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
160 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
161 VOP_UNLOCK(tfile->vnode, 0, current_proc());
162 AFS_GLOCK();
163 #elif defined(AFS_FBSD80_ENV)
164 AFS_GUNLOCK();
165 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
166 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
167 VOP_UNLOCK(tfile->vnode, 0);
168 AFS_GLOCK();
169 #elif defined(AFS_FBSD_ENV)
170 AFS_GUNLOCK();
171 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
172 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
173 VOP_UNLOCK(tfile->vnode, 0, curthread);
174 AFS_GLOCK();
175 #elif defined(AFS_NBSD_ENV)
176 AFS_GUNLOCK();
177 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
178 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
179 #if defined(AFS_NBSD60_ENV)
180 VOP_UNLOCK(tfile->vnode);
181 #else
182 VOP_UNLOCK(tfile->vnode, 0);
183 #endif
184 AFS_GLOCK();
185 #elif defined(AFS_XBSD_ENV)
186 AFS_GUNLOCK();
187 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
188 code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
189 VOP_UNLOCK(tfile->vnode, 0, curproc);
190 AFS_GLOCK();
191 #else
192 # ifdef AFS_HPUX_ENV
193 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
194 # endif
195 code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp);
196 #endif
197 osi_UFSClose(tfile);
199 return code;
202 /* called on writes */
204 afs_write(struct vcache *avc, struct uio *auio, int aio,
205 afs_ucred_t *acred, int noLock)
207 afs_size_t totalLength;
208 afs_size_t transferLength;
209 afs_size_t filePos;
210 afs_size_t offset, len;
211 afs_int32 tlen;
212 afs_int32 trimlen;
213 afs_int32 startDate;
214 afs_int32 max;
215 struct dcache *tdc;
216 #ifdef _HIGHC_
217 volatile
218 #endif
219 afs_int32 error;
220 #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
221 struct vnode *vp = AFSTOV(avc);
222 #endif
223 struct uio *tuiop = NULL;
224 afs_int32 code;
225 struct vrequest *treq = NULL;
227 AFS_STATCNT(afs_write);
229 if (avc->vc_error)
230 return avc->vc_error;
232 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
233 return ENETDOWN;
235 startDate = osi_Time();
236 if ((code = afs_CreateReq(&treq, acred)))
237 return code;
238 /* otherwise we read */
239 totalLength = AFS_UIO_RESID(auio);
240 filePos = AFS_UIO_OFFSET(auio);
241 error = 0;
242 transferLength = 0;
243 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
244 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
245 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
246 ICL_HANDLE_OFFSET(avc->f.m.Length));
247 if (!noLock) {
248 afs_MaybeWakeupTruncateDaemon();
249 ObtainWriteLock(&avc->lock, 556);
251 #if defined(AFS_SGI_ENV)
253 off_t diff;
255 * afs_xwrite handles setting m.Length
256 * and handles APPEND mode.
257 * Since we are called via strategy, we need to trim the write to
258 * the actual size of the file
260 osi_Assert(filePos <= avc->f.m.Length);
261 diff = avc->f.m.Length - filePos;
262 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
263 totalLength = AFS_UIO_RESID(auio);
265 #else
266 if (aio & IO_APPEND) {
267 /* append mode, start it at the right spot */
268 #if defined(AFS_SUN5_ENV)
269 auio->uio_loffset = 0;
270 #endif
271 filePos = avc->f.m.Length;
272 AFS_UIO_SETOFFSET(auio, avc->f.m.Length);
274 #endif
276 * Note that we use startDate rather than calling osi_Time() here.
277 * This is to avoid counting lock-waiting time in file date (for ranlib).
279 avc->f.m.Date = startDate;
281 #if defined(AFS_HPUX_ENV)
282 #if defined(AFS_HPUX101_ENV)
283 if ((totalLength + filePos) >> 9 >
284 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
285 #else
286 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
287 #endif
288 if (!noLock)
289 ReleaseWriteLock(&avc->lock);
290 afs_DestroyReq(treq);
291 return (EFBIG);
293 #endif
294 #if defined(AFS_VM_RDWR_ENV) && !defined(AFS_FAKEOPEN_ENV)
296 * If write is implemented via VM, afs_FakeOpen() is called from the
297 * high-level write op.
299 if (avc->execsOrWriters <= 0) {
300 afs_warn("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
301 avc->execsOrWriters);
303 #else
304 afs_FakeOpen(avc);
305 #endif
306 avc->f.states |= CDirty;
308 while (totalLength > 0) {
309 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, treq,
310 noLock);
311 if (!tdc) {
312 error = EIO;
313 break;
315 len = totalLength; /* write this amount by default */
316 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
317 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
318 if (max <= len + offset) { /*if we'd go past the end of this chunk */
319 /* it won't all fit in this chunk, so write as much
320 * as will fit */
321 len = max - offset;
324 if (tuiop)
325 afsio_free(tuiop);
326 trimlen = len;
327 tuiop = afsio_partialcopy(auio, trimlen);
328 AFS_UIO_SETOFFSET(tuiop, offset);
330 code = (*(afs_cacheType->vwriteUIO))(avc, &tdc->f.inode, tuiop);
332 if (code) {
333 void *cfile;
335 error = code;
336 ZapDCE(tdc); /* bad data */
337 cfile = afs_CFileOpen(&tdc->f.inode);
338 afs_CFileTruncate(cfile, 0);
339 afs_CFileClose(cfile);
340 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
342 afs_stats_cmperf.cacheCurrDirtyChunks--;
343 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
344 ReleaseWriteLock(&tdc->lock);
345 afs_PutDCache(tdc);
346 break;
348 /* otherwise we've written some, fixup length, etc and continue with next seg */
349 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
350 tlen = len;
351 afsio_skip(auio, tlen); /* advance auio over data written */
352 /* compute new file size */
353 if (offset + len > tdc->f.chunkBytes) {
354 afs_int32 tlength = offset + len;
355 afs_AdjustSize(tdc, tlength);
356 if (tdc->validPos < filePos + len)
357 tdc->validPos = filePos + len;
359 totalLength -= len;
360 transferLength += len;
361 filePos += len;
362 #if defined(AFS_SGI_ENV)
363 /* afs_xwrite handles setting m.Length */
364 osi_Assert(filePos <= avc->f.m.Length);
365 #else
366 if (filePos > avc->f.m.Length) {
367 if (AFS_IS_DISCON_RW)
368 afs_PopulateDCache(avc, filePos, treq);
369 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
370 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
371 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
372 ICL_HANDLE_OFFSET(filePos));
373 avc->f.m.Length = filePos;
374 #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
375 vnode_pager_setsize(vp, filePos);
376 #endif
378 #endif
379 ReleaseWriteLock(&tdc->lock);
380 afs_PutDCache(tdc);
381 #if !defined(AFS_VM_RDWR_ENV)
383 * If write is implemented via VM, afs_DoPartialWrite() is called from
384 * the high-level write op.
386 if (!noLock) {
387 code = afs_DoPartialWrite(avc, treq);
388 if (code) {
389 error = code;
390 break;
393 #endif
395 #if !defined(AFS_VM_RDWR_ENV) || defined(AFS_FAKEOPEN_ENV)
396 afs_FakeClose(avc, acred);
397 #endif
398 error = afs_CheckCode(error, treq, 7);
399 /* This set is here so we get the CheckCode. */
400 if (error && !avc->vc_error)
401 avc->vc_error = error;
402 if (!noLock)
403 ReleaseWriteLock(&avc->lock);
404 if (tuiop)
405 afsio_free(tuiop);
407 #ifndef AFS_VM_RDWR_ENV
409 * If write is implemented via VM, afs_fsync() is called from the high-level
410 * write op.
412 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
413 if (noLock && (aio & IO_SYNC)) {
414 #else
415 #ifdef AFS_HPUX_ENV
416 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
417 * we're doing them because the file was opened with O_SYNCIO specified,
418 * we have to look in the u area. No single mechanism here!!
420 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
421 #else
422 if (noLock && (aio & FSYNC)) {
423 #endif
424 #endif
425 if (!AFS_NFSXLATORREQ(acred))
426 afs_fsync(avc, acred);
428 #endif
429 afs_DestroyReq(treq);
430 return error;
433 /* do partial write if we're low on unmodified chunks */
435 afs_DoPartialWrite(struct vcache *avc, struct vrequest *areq)
437 afs_int32 code;
439 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
440 afs_stats_cmperf.cacheMaxDirtyChunks
441 || AFS_IS_DISCONNECTED)
442 return 0; /* nothing to do */
443 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
444 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
445 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
447 #if defined(AFS_SUN5_ENV)
448 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
449 #else
450 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
451 #endif
452 return code;
455 /* handle any closing cleanup stuff */
457 #if defined(AFS_SGI65_ENV)
458 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
459 afs_ucred_t *acred)
460 #elif defined(AFS_SGI64_ENV)
461 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
462 off_t offset, afs_ucred_t *acred, struct flid *flp)
463 #elif defined(AFS_SGI_ENV)
464 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
465 off_t offset, afs_ucred_t *acred)
466 #elif defined(AFS_SUN5_ENV)
467 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
468 afs_ucred_t *acred)
469 #else
470 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred)
471 #endif
473 afs_int32 code;
474 afs_int32 code_checkcode = 0;
475 struct brequest *tb;
476 struct vrequest *treq = NULL;
477 #ifdef AFS_SGI65_ENV
478 struct flid flid;
479 #endif
480 struct afs_fakestat_state fakestat;
481 OSI_VC_CONVERT(avc);
483 AFS_STATCNT(afs_close);
484 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
485 ICL_TYPE_INT32, aflags);
486 code = afs_CreateReq(&treq, acred);
487 if (code)
488 return code;
489 afs_InitFakeStat(&fakestat);
490 code = afs_EvalFakeStat(&avc, &fakestat, treq);
491 if (code) {
492 afs_PutFakeStat(&fakestat);
493 afs_DestroyReq(treq);
494 return code;
496 AFS_DISCON_LOCK();
497 #ifdef AFS_SUN5_ENV
498 if (avc->flockCount) {
499 HandleFlock(avc, LOCK_UN, treq, 0, 1 /*onlymine */ );
501 #endif
502 #if defined(AFS_SGI_ENV)
503 if (!lastclose) {
504 afs_PutFakeStat(&fakestat);
505 AFS_DISCON_UNLOCK();
506 afs_DestroyReq(treq);
507 return 0;
509 /* unlock any locks for pid - could be wrong for child .. */
510 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
511 # ifdef AFS_SGI65_ENV
512 get_current_flid(&flid);
513 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
514 HandleFlock(avc, LOCK_UN, treq, flid.fl_pid, 1 /*onlymine */ );
515 # else
516 # ifdef AFS_SGI64_ENV
517 cleanlocks((vnode_t *) avc, flp);
518 # else /* AFS_SGI64_ENV */
519 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
520 # endif /* AFS_SGI64_ENV */
521 HandleFlock(avc, LOCK_UN, treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
522 # endif /* AFS_SGI65_ENV */
523 /* afs_chkpgoob will drop and re-acquire the global lock. */
524 afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
525 #elif defined(AFS_SUN5_ENV)
526 if (count > 1) {
527 /* The vfs layer may call this repeatedly with higher "count"; only
528 * on the last close (i.e. count = 1) we should actually proceed
529 * with the close. */
530 afs_PutFakeStat(&fakestat);
531 AFS_DISCON_UNLOCK();
532 afs_DestroyReq(treq);
533 return 0;
535 #else
536 if (avc->flockCount) { /* Release Lock */
537 HandleFlock(avc, LOCK_UN, treq, 0, 1 /*onlymine */ );
539 #endif
540 if (aflags & (FWRITE | FTRUNC)) {
541 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
542 /* do it yourself if daemons are all busy */
543 ObtainWriteLock(&avc->lock, 124);
544 code = afs_StoreOnLastReference(avc, treq);
545 ReleaseWriteLock(&avc->lock);
546 #if defined(AFS_SGI_ENV)
547 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
548 #endif
549 } else {
550 #if defined(AFS_SGI_ENV)
551 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
552 #endif
553 /* at least one daemon is idle, so ask it to do the store.
554 * Also, note that we don't lock it any more... */
555 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
556 (afs_size_t) afs_cr_uid(acred), (afs_size_t) 0,
557 (void *)0, (void *)0, (void *)0);
558 /* sleep waiting for the store to start, then retrieve error code */
559 while ((tb->flags & BUVALID) == 0) {
560 tb->flags |= BUWAIT;
561 afs_osi_Sleep(tb);
563 code = tb->code_raw;
564 code_checkcode = tb->code_checkcode;
565 afs_BRelease(tb);
568 /* VNOVNODE is "acceptable" error code from close, since
569 * may happen when deleting a file on another machine while
570 * it is open here. */
571 if (code == VNOVNODE)
572 code = 0;
574 /* Ensure last closer gets the error. If another thread caused
575 * DoPartialWrite and this thread does not actually store the data,
576 * it may not see the quota error.
578 ObtainWriteLock(&avc->lock, 406);
579 if (avc->vc_error) {
580 #ifdef AFS_AIX32_ENV
581 osi_ReleaseVM(avc, acred);
582 #endif
583 /* We don't know what the original raw error code was, so set
584 * 'code' to 0. But we have the afs_CheckCode-translated error
585 * code, so put that in code_checkcode. We cannot just set code
586 * to avc->vc_error, since vc_error is a checkcode-translated
587 * error code, and 'code' is supposed to be a raw error code. */
588 code = 0;
589 code_checkcode = avc->vc_error;
590 avc->vc_error = 0;
592 ReleaseWriteLock(&avc->lock);
594 /* some codes merit specific complaint */
595 if (code < 0) {
596 afs_warnuser("afs: failed to store file (network problems)\n");
598 #ifdef AFS_SUN5_ENV
599 else if (code == ENOSPC || code_checkcode == ENOSPC) {
600 afs_warnuser
601 ("afs: failed to store file (over quota or partition full)\n");
603 #else
604 else if (code == ENOSPC || code_checkcode == ENOSPC) {
605 afs_warnuser("afs: failed to store file (partition full)\n");
606 } else if (code == EDQUOT || code_checkcode == EDQUOT) {
607 afs_warnuser("afs: failed to store file (over quota)\n");
609 #endif
610 else if (code || code_checkcode)
611 afs_warnuser("afs: failed to store file (%d/%d)\n", code, code_checkcode);
613 /* finally, we flush any text pages lying around here */
614 hzero(avc->flushDV);
615 osi_FlushText(avc);
616 } else {
617 #if defined(AFS_SGI_ENV)
618 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
619 osi_Assert(avc->opens > 0);
620 #endif
621 /* file open for read */
622 ObtainWriteLock(&avc->lock, 411);
623 if (avc->vc_error) {
624 #ifdef AFS_AIX32_ENV
625 osi_ReleaseVM(avc, acred);
626 #endif
627 code = 0;
628 code_checkcode = avc->vc_error;
629 avc->vc_error = 0;
631 #if defined(AFS_FBSD80_ENV)
632 /* XXX */
633 if (!avc->opens) {
634 afs_int32 opens, is_free, is_gone, is_doomed, iflag;
635 struct vnode *vp = AFSTOV(avc);
636 VI_LOCK(vp);
637 is_doomed = vp->v_iflag & VI_DOOMED;
638 is_free = vp->v_iflag & VI_FREE;
639 is_gone = vp->v_iflag & VI_DOINGINACT;
640 iflag = vp->v_iflag;
641 VI_UNLOCK(vp);
642 opens = avc->opens;
643 afs_warn("afs_close avc %p vp %p opens %d free %d doinginact %d doomed %d iflag %d\n",
644 avc, vp, opens, is_free, is_gone, is_doomed, iflag);
646 #endif
647 avc->opens--;
648 ReleaseWriteLock(&avc->lock);
650 AFS_DISCON_UNLOCK();
651 afs_PutFakeStat(&fakestat);
653 if (code_checkcode) {
654 code = code_checkcode;
655 } else {
656 code = afs_CheckCode(code, treq, 5);
658 afs_DestroyReq(treq);
659 return code;
664 #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)
665 afs_fsync(OSI_VC_DECL(avc), int flag, afs_ucred_t *acred
666 # ifdef AFS_SGI65_ENV
667 , off_t start, off_t stop
668 # endif /* AFS_SGI65_ENV */
670 #else /* !SUN5 && !SGI */
671 afs_fsync(OSI_VC_DECL(avc), afs_ucred_t *acred)
672 #endif
674 afs_int32 code;
675 struct vrequest *treq = NULL;
676 OSI_VC_CONVERT(avc);
678 if (avc->vc_error)
679 return avc->vc_error;
681 #if defined(AFS_SUN5_ENV)
682 /* back out if called from NFS server */
683 if (curthread->t_flag & T_DONTPEND)
684 return 0;
685 #endif
687 AFS_STATCNT(afs_fsync);
688 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
689 if ((code = afs_CreateReq(&treq, acred)))
690 return code;
691 AFS_DISCON_LOCK();
692 #if defined(AFS_SGI_ENV)
693 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
694 if (flag & FSYNC_INVAL)
695 osi_VM_FSyncInval(avc);
696 #endif /* AFS_SGI_ENV */
698 ObtainSharedLock(&avc->lock, 18);
699 code = 0;
700 if (avc->execsOrWriters > 0) {
701 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
702 /* Your average flush. */
704 /* put the file back */
705 UpgradeSToWLock(&avc->lock, 41);
706 code = afs_StoreAllSegments(avc, treq, AFS_SYNC);
707 ConvertWToSLock(&avc->lock);
708 } else {
709 UpgradeSToWLock(&avc->lock, 711);
710 afs_DisconAddDirty(avc, VDisconWriteFlush, 1);
711 ConvertWToSLock(&avc->lock);
712 } /* if not disconnected */
713 } /* if (avc->execsOrWriters > 0) */
715 #if defined(AFS_SGI_ENV)
716 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
717 if (code == VNOVNODE) {
718 /* syncing an unlinked file! - non-informative to pass an errno
719 * 102 (== VNOVNODE) to user
721 code = ENOENT;
723 #endif
724 AFS_DISCON_UNLOCK();
725 code = afs_CheckCode(code, treq, 33);
726 afs_DestroyReq(treq);
727 ReleaseSharedLock(&avc->lock);
728 return code;