2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * SGI specific vnodeops + other misc interface glue
13 #include <afsconfig.h>
14 #include "afs/param.h"
18 #include "afs/sysincludes.h" /* Standard vendor system headers */
19 #include "afsincludes.h" /* Afs-based standard headers */
20 #include "afs/afs_stats.h" /* statistics */
21 #include "sys/flock.h"
22 #include "afs/nfsclient.h"
24 /* AFSBSIZE must be at least the size of a page, else the client will hang.
25 * For 64 bit platforms, the page size is more than 8K.
27 #define AFSBSIZE _PAGESZ
28 extern struct afs_exporter
*root_exported
;
29 extern void afs_chkpgoob(vnode_t
*, pgno_t
);
31 static void afs_strategy();
32 static int afs_xread(), afs_xwrite();
33 static int afs_xbmap(), afs_map(), afs_reclaim();
35 static int afs_addmap(), afs_delmap();
37 extern int afs_open(), afs_close(), afs_ioctl(), afs_getattr(), afs_setattr();
38 extern int afs_access(), afs_lookup();
39 extern int afs_create(), afs_remove(), afs_link(), afs_rename();
40 extern int afs_mkdir(), afs_rmdir(), afs_readdir();
41 extern int afs_symlink(), afs_readlink(), afs_fsync(), afs_fid(),
43 static int afs_seek(OSI_VC_DECL(a
), off_t b
, off_t
* c
);
45 extern int afs_xinactive();
47 extern void afs_xinactive();
50 extern void afs_rwlock(OSI_VN_DECL(vp
), AFS_RWLOCK_T b
);
51 extern void afs_rwunlock(OSI_VN_DECL(vp
), AFS_RWLOCK_T b
);
53 extern int afs_fid2();
55 static int afsrwvp(struct vcache
*avc
, struct uio
*uio
,
56 enum uio_rw rw
, int ioflag
,
58 struct cred
*cr
, struct flid
*flp
);
63 static void mp_afs_rwlock(OSI_VN_DECL(a
), AFS_RWLOCK_T b
);
64 static void mp_afs_rwunlock(OSI_VN_DECL(a
), AFS_RWLOCK_T b
);
65 struct vnodeops afs_lockedvnodeops
=
67 struct vnodeops Afs_vnodeops
=
72 BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE
),
105 fs_nosys
, /* realvp */
110 fs_noerr
, /* addmap - devices only */
111 fs_noerr
, /* delmap - devices only */
119 fs_nosys
, /* allocstore */
120 fs_nosys
, /* fcntl */
121 afs_reclaim
, /* reclaim */
122 fs_nosys
, /* attr_get */
123 fs_nosys
, /* attr_set */
124 fs_nosys
, /* attr_remove */
125 fs_nosys
, /* attr_list */
129 (vop_link_removed_t
) fs_noval
,
136 (vop_commit_t
) fs_nosys
,
137 (vop_readbuf_t
) fs_nosys
,
147 struct vnodeops
*afs_ops
= &Afs_vnodeops
;
151 afs_frlock(OSI_VN_DECL(vp
), int cmd
, struct flock
*lfp
, int flag
,
163 get_current_flid(&flid
);
168 * Since AFS doesn't support byte-wise locks (and simply
169 * says yes! we handle byte locking locally only.
170 * This makes lots of things work much better
171 * XXX This doesn't properly handle moving from a
172 * byte-wise lock up to a full file lock (we should
173 * remove the byte locks ..) Of course neither did the
174 * regular AFS way ...
176 * For GETLK we do a bit more - we first check any byte-wise
177 * locks - if none then check for full AFS file locks
179 if (cmd
== F_GETLK
|| lfp
->l_whence
!= 0 || lfp
->l_start
!= 0
180 || (lfp
->l_len
!= MAXEND
&& lfp
->l_len
!= 0)) {
181 AFS_RWLOCK(vp
, VRWLOCK_WRITE
);
185 fs_frlock(OSI_VN_ARG(vp
), cmd
, lfp
, flag
, offset
, vrwlock
, cr
);
187 error
= fs_frlock(vp
, cmd
, lfp
, flag
, offset
, cr
);
190 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
191 if (error
|| cmd
!= F_GETLK
)
193 if (lfp
->l_type
!= F_UNLCK
)
194 /* found some blocking lock */
196 /* fall through to check for full AFS file locks */
199 /* map BSD style to plain - we don't call reclock()
200 * and its only there that the difference is important
224 error
= convoff(vp
, lfp
, 0, offset
, SEEKLIMIT
226 , OSI_GET_CURRENT_CRED()
227 #endif /* AFS_SGI64_ENV */
233 error
= afs_lockctl(vp
, lfp
, cmd
, cr
, pid
);
235 error
= afs_lockctl(vp
, lfp
, cmd
, cr
, OSI_GET_CURRENT_PID());
243 * We need to get the cache hierarchy right.
244 * First comes the page cache - pages are hashed based on afs
245 * vnode and offset. It is important to have things hashed here
246 * for the VM/paging system to work.
247 * Note that the paging system calls VOP_READ with the UIO_NOSPACE -
248 * it simply requires that somehow the page is hashed
249 * upon successful return.
250 * This means in afs_read we
251 * must call the 'chunk' code that handles page insertion. In order
252 * to actually get the data, 'chunk' calls the VOP_STRATEGY routine.
253 * This is basically the std afs_read routine - validating and
254 * getting the info into the Dcache, then calling VOP_READ.
255 * The only bad thing here is that by calling VOP_READ (and VOP_WRITE
256 * to fill the cache) we will get 2 copies of these pages into the
257 * page cache - one hashed on afs vnode and one on efs vnode. THis
258 * is wasteful but does no harm. A potential solution involves
259 * causing an ASYNC flush of the newly fetched cache data and
260 * doing direct I/O on the read side....
265 afs_xread(OSI_VC_ARG(avc
), uiop
, ioflag
, cr
, flp
)
269 afs_xread(OSI_VC_ARG(avc
), uiop
, ioflag
, cr
)
279 osi_Assert(avc
->v
.v_count
> 0);
280 if (avc
->v
.v_type
!= VREG
)
285 if (!(ioflag
& IO_ISLOCKED
))
286 AFS_RWLOCK((vnode_t
*) avc
, VRWLOCK_READ
);
288 code
= afsrwvp(avc
, uiop
, UIO_READ
, ioflag
, cr
, flp
);
290 if (!(ioflag
& IO_ISLOCKED
))
291 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_READ
);
294 code
= afsrwvp(avc
, uiop
, UIO_READ
, ioflag
, cr
);
302 afs_xwrite(OSI_VC_ARG(avc
), uiop
, ioflag
, cr
, flp
)
306 afs_xwrite(OSI_VC_ARG(avc
), uiop
, ioflag
, cr
)
316 osi_Assert(avc
->v
.v_count
> 0);
317 if (avc
->v
.v_type
!= VREG
)
320 if (ioflag
& IO_APPEND
)
321 uiop
->uio_offset
= avc
->f
.m
.Length
;
324 if (!(ioflag
& IO_ISLOCKED
))
325 AFS_RWLOCK(((vnode_t
*) avc
), VRWLOCK_WRITE
);
327 code
= afsrwvp(avc
, uiop
, UIO_WRITE
, ioflag
, cr
, flp
);
329 if (!(ioflag
& IO_ISLOCKED
))
330 AFS_RWUNLOCK((vnode_t
*) avc
, VRWLOCK_WRITE
);
333 code
= afsrwvp(avc
, uiop
, UIO_WRITE
, ioflag
, cr
);
339 static int prnra
= 0;
340 static int acchk
= 0;
341 static int acdrop
= 0;
344 afsrwvp(struct vcache
*avc
, struct uio
*uio
, enum uio_rw rw
,
347 struct cred
*cr
, struct flid
*flp
)
352 struct vnode
*vp
= AFSTOV(avc
);
357 off_t bsize
, rem
, len
;
359 struct bmapval bmv
[2];
360 int nmaps
, didFakeOpen
= 0;
361 struct vrequest treq
;
365 osi_Assert((valusema(&avc
->vc_rwlock
) <= 0)
366 && (OSI_GET_LOCKID() == avc
->vc_rwlockid
));
369 newoff
= uio
->uio_resid
+ uio
->uio_offset
;
370 if (uio
->uio_resid
<= 0) {
373 if (uio
->uio_offset
< 0 || newoff
< 0) {
376 if (ioflag
& IO_DIRECT
)
379 if (rw
== UIO_WRITE
&& vp
->v_type
== VREG
&& newoff
> uio
->uio_limit
) {
383 afs_Trace4(afs_iclSetp
, CM_TRACE_GRDWR
, ICL_TYPE_POINTER
, avc
,
384 ICL_TYPE_INT32
, ioflag
, ICL_TYPE_INT32
, rw
, ICL_TYPE_INT32
, 0);
386 /* get a validated vcache entry */
387 error
= afs_InitReq(&treq
, cr
);
389 return afs_CheckCode(error
, NULL
, 63);
391 error
= afs_VerifyVCache(avc
, &treq
);
393 return afs_CheckCode(error
, &treq
, 51);
396 * flush any stale pages - this will unmap
397 * and invalidate all pages for vp (NOT writing them back!)
399 osi_FlushPages(avc
, cr
);
401 if (cr
&& AFS_NFSXLATORREQ(cr
) && rw
== UIO_READ
) {
403 (avc
, PRSFS_READ
, &treq
,
404 CHECK_MODE_BITS
| CMB_ALLOW_EXEC_AS_READ
))
408 * To handle anonymous calls to VOP_STRATEGY from afs_sync/sync/bdflush
409 * we need better than the callers credentials. So we squirrel away
410 * the last writers credentials
412 if (rw
== UIO_WRITE
|| (rw
== UIO_READ
&& avc
->cred
== NULL
)) {
413 ObtainWriteLock(&avc
->lock
, 92);
418 ReleaseWriteLock(&avc
->lock
);
422 * We have to bump the open/exwriters field here
423 * courtesy of the nfs xlator
424 * because there're no open/close nfs rpc's to call our afs_open/close.
426 if (root_exported
&& rw
== UIO_WRITE
) {
427 ObtainWriteLock(&avc
->lock
, 234);
432 ReleaseWriteLock(&avc
->lock
);
436 if (rw
== UIO_WRITE
) {
437 ObtainWriteLock(&avc
->lock
, 330);
438 avc
->f
.states
|= CDirty
;
439 ReleaseWriteLock(&avc
->lock
);
445 /* If v_dpages is set SGI 5.3 will convert those pages to
446 * B_DELWRI in chunkread and getchunk. Write the pages out
447 * before we trigger that behavior. For 6.1, dirty pages stay
448 * around too long and we should get rid of them as quickly
451 while (VN_GET_DPAGES(vp
))
455 error
= avc
->vc_error
;
458 bsize
= AFSBSIZE
; /* why not?? */
459 off
= uio
->uio_offset
% bsize
;
460 bn
= BTOBBT(uio
->uio_offset
- off
);
462 * decrease bsize - otherwise we will
463 * get 'extra' pages in the cache for this
464 * vnode that we would need to flush when
465 * calling e.g. ptossvp.
466 * So we can use Length in ptossvp,
467 * we make sure we never go more than to the file size
468 * rounded up to a page boundary.
469 * That doesn't quite work, since we may get a page hashed to
470 * the vnode w/o updating the length. Thus we always use
471 * MAXLONG in ptossvp to be safe.
473 if (rw
== UIO_READ
) {
475 * read/paging in a normal file
477 rem
= avc
->f
.m
.Length
- uio
->uio_offset
;
482 * compute minimum of rest of block and rest of file
484 cnt
= MIN(bsize
- off
, rem
);
485 osi_Assert((off
+ cnt
) <= bsize
);
486 bsize
= ctob(btoc(off
+ cnt
));
489 bmv
[0].bn
= bmv
[0].offset
= bn
;
491 bmv
[0].bsize
= bsize
;
493 bmv
[0].pbsize
= MIN(cnt
, uio
->uio_resid
);
496 bmv
[0].pbdev
= vp
->v_rdev
;
497 bmv
[0].pmp
= uio
->uio_pmp
;
501 * initiate read-ahead if it looks like
502 * we are reading sequentially OR they want
503 * more than one 'bsize' (==AFSBSIZE) worth
504 * XXXHack - to avoid DELWRI buffers we can't
505 * do read-ahead on any file that has potentially
508 if ((avc
->lastr
+ BTOBB(AFSBSIZE
) == bn
509 || uio
->uio_resid
> AFSBSIZE
)
511 && (!AFS_VN_MAPPED(vp
))
512 #else /* AFS_SGI61_ENV */
513 && ((vp
->v_flag
& VWASMAP
) == 0)
514 #endif /* AFS_SGI61_ENV */
519 bmv
[1].bn
= bmv
[1].offset
= bn
+ len
;
520 osi_Assert((BBTOB(bn
+ len
) % bsize
) == 0);
521 acnt
= MIN(bsize
, rem
);
522 bsize
= ctob(btoc(acnt
));
527 bmv
[1].bsize
= bsize
;
529 bmv
[1].pbsize
= acnt
;
531 bmv
[1].pmp
= uio
->uio_pmp
;
532 bmv
[1].pbdev
= vp
->v_rdev
;
539 ("NRA:vp 0x%x lastr %d bn %d len %d cnt %d bsize %d rem %d resid %d\n",
540 vp
, avc
->lastr
, bn
, len
, cnt
, bsize
, rem
,
545 bp
= chunkread(vp
, bmv
, nmaps
, cr
);
547 * If at a chunk boundary, start prefetch of next chunk.
549 if (counter
== 0 || AFS_CHUNKOFFSET(off
) == 0) {
551 ObtainWriteLock(&avc
->lock
, 562);
552 tdc
= afs_FindDCache(avc
, off
);
554 if (!(tdc
->mflags
& DFNextStarted
))
555 afs_PrefetchChunk(avc
, tdc
, cr
, &treq
);
558 ReleaseWriteLock(&avc
->lock
);
564 * writing a normal file
567 * Purge dirty chunks of file if there are too many dirty chunks.
568 * Inside the write loop, we only do this at a chunk boundary.
569 * Clean up partial chunk if necessary at end of loop.
571 if (counter
> 0 && AFS_CHUNKOFFSET(uio
->uio_offset
) == 0) {
573 ObtainWriteLock(&avc
->lock
, 90);
574 error
= afs_DoPartialWrite(avc
, &treq
);
576 avc
->f
.states
|= CDirty
;
577 ReleaseWriteLock(&avc
->lock
);
584 cnt
= MIN(bsize
- off
, uio
->uio_resid
);
585 bsize
= ctob(btoc(off
+ cnt
));
591 bmv
[0].bsize
= bsize
;
595 bmv
[0].pmp
= uio
->uio_pmp
;
599 bp
= getchunk(vp
, bmv
, cr
);
601 bp
= chunkread(vp
, bmv
, 1, cr
);
603 avc
->f
.m
.Date
= osi_Time(); /* Set file date (for ranlib) */
605 if (bp
->b_flags
& B_ERROR
) {
607 * Since we compile -signed, b_error is a signed
608 * char when it should be an unsigned char.
609 * This can cause some errors codes to be interpreted
612 error
= (unsigned char)(bp
->b_error
);
616 if (acchk
&& error
) {
617 cmn_err(CE_WARN
, "bp 0x%x has error %d\n", bp
, error
);
626 osi_Assert(bp
->b_error
== 0);
628 if (uio
->uio_segflg
!= UIO_NOSPACE
)
630 AFS_UIOMOVE(bp
->b_un
.b_addr
+ bmv
[0].pboff
, cnt
, rw
, uio
, error
);
631 if (rw
== UIO_READ
|| error
) {
632 if (bp
->b_flags
& B_DELWRI
) {
638 * m.Length is the maximum number of bytes known to be in the file.
639 * Make sure it is at least as high as the last byte we just wrote
642 if (avc
->f
.m
.Length
< uio
->uio_offset
) {
644 ObtainWriteLock(&avc
->lock
, 235);
645 avc
->f
.m
.Length
= uio
->uio_offset
;
646 ReleaseWriteLock(&avc
->lock
);
649 if (uio
->uio_fmode
& FSYNC
) {
651 } else if (off
+ cnt
< bsize
) {
652 bawrite(bp
); /* was bdwrite */
654 bp
->b_flags
|= B_AGE
;
658 * Since EIO on an unlinked file is non-intuitive - give some
662 if (avc
->f
.m
.LinkCount
== 0)
664 "AFS: Process pid %d write error %d writing to unlinked file.",
665 OSI_GET_CURRENT_PID(), error
);
668 } while (!error
&& uio
->uio_resid
> 0);
669 afs_chkpgoob(&avc
->v
, btoc(avc
->f
.m
.Length
));
673 if (rw
== UIO_WRITE
&& error
== 0 && (avc
->f
.states
& CDirty
)) {
674 ObtainWriteLock(&avc
->lock
, 405);
675 error
= afs_DoPartialWrite(avc
, &treq
);
676 ReleaseWriteLock(&avc
->lock
);
681 if (((ioflag
& IO_SYNC
) || (ioflag
& IO_DSYNC
)) && (rw
== UIO_WRITE
)
682 && !AFS_NFSXLATORREQ(cr
)) {
683 error
= afs_fsync(avc
, 0, cr
689 #else /* AFS_SGI61_ENV */
690 if ((ioflag
& IO_SYNC
) && (rw
== UIO_WRITE
) && !AFS_NFSXLATORREQ(cr
)) {
691 error
= afs_fsync(avc
, 0, cr
);
693 #endif /* AFS_SGI61_ENV */
696 ObtainWriteLock(&avc
->lock
, 236);
697 afs_FakeClose(avc
, cr
); /* XXXX For nfs trans XXXX */
698 ReleaseWriteLock(&avc
->lock
);
700 afs_Trace4(afs_iclSetp
, CM_TRACE_GRDWR
, ICL_TYPE_POINTER
, avc
,
701 ICL_TYPE_INT32
, ioflag
, ICL_TYPE_INT32
, rw
, ICL_TYPE_INT32
,
708 afs_xbmap(OSI_VC_ARG(avc
), offset
, count
, flag
, cr
, bmv
, nbmv
)
717 int bsize
; /* server's block size in bytes */
723 off
= offset
% bsize
; /* offset into block */
724 bmv
->bn
= BTOBBT(offset
- off
);
725 bmv
->offset
= bmv
->bn
;
727 rem
= avc
->f
.m
.Length
- offset
;
731 cnt
= MIN(bsize
- off
, rem
);
734 * It is benign to ignore *nbmv > 1, since it is only for requesting
739 * Don't map more than up to next page if at end of file
740 * See comment in afsrwvp
742 osi_Assert((off
+ cnt
) <= bsize
);
743 bsize
= ctob(btoc(off
+ cnt
));
744 bmv
->pbsize
= MIN(cnt
, count
);
748 bmv
->pbdev
= avc
->v
.v_rdev
;
751 bmv
->length
= BTOBBT(bsize
);
757 * called out of chunkread from afs_xread & clusterwrite to push dirty
758 * pages back - this routine
759 * actually does the reading/writing by calling afs_read/afs_write
760 * bp points to a set of pages that have been inserted into
761 * the page cache hashed on afs vp.
764 afs_strategy(OSI_VC_ARG(avc
), bp
)
774 vnode_t
*vp
= (vnode_t
*) avc
;
777 * We can't afford DELWRI buffers for 2 reasons:
778 * 1) Since we can call underlying EFS, we can require a
779 * buffer to flush a buffer. This leads to 2 potential
780 * recursions/deadlocks
781 * a) if all buffers are DELWRI afs buffers, then
782 * ngeteblk -> bwrite -> afs_strategy -> afs_write ->
783 * UFS_Write -> efs_write -> ngeteblk .... could
784 * recurse a long ways!
785 * b) brelse -> chunkhold which can call dchunkpush
786 * will look for any DELWRI buffers and call strategy
787 * on them. This can then end up via UFS_Write
790 * a) We never do bdwrite(s) on AFS buffers.
791 * b) We call pdflush with B_ASYNC
792 * c) in chunkhold where it can set a buffer DELWRI
793 * we immediatly do a clusterwrite for AFS vp's
794 * XXX Alas, 'c' got dropped in 5.1 so its possible to get DELWRI
795 * buffers if someone has mmaped the file and dirtied it then
796 * reads/faults it again.
797 * Instead - wherever we call chunkread/getchunk we check for a
798 * returned bp with DELWRI set, and write it out immediately
800 if (CheckLock(&avc
->lock
) && VN_GET_DBUF(vp
)) {
801 printf("WARN: afs_strategy vp=%x, v_dbuf=%x bp=%x\n", vp
,
802 VN_GET_DBUF(vp
), bp
);
804 bp
->b_flags
|= B_ERROR
;
808 if (bp
->b_error
!= 0)
809 printf("WARNING: afs_strategy3 vp=%x, bp=%x, err=%x\n", vp
, bp
,
813 * To get credentials somewhat correct (we may be called from bdflush/
814 * sync) we use saved credentials in Vcache.
815 * We must hold them since someone else could change them
817 ObtainReadLock(&avc
->lock
);
818 if (bp
->b_flags
& B_READ
) {
819 if (BBTOB(bp
->b_blkno
) >= avc
->f
.m
.Length
) {
820 /* we are responsible for zero'ing the page */
823 memset(c
, 0, bp
->b_bcount
);
825 ReleaseReadLock(&avc
->lock
);
828 } else if ((avc
->f
.states
& CWritingUFS
) && (bp
->b_flags
& B_DELWRI
)) {
830 ReleaseReadLock(&avc
->lock
);
837 ReleaseReadLock(&avc
->lock
);
839 aiovec
.iov_base
= bp_mapin(bp
);
840 uio
->uio_iov
= &aiovec
;
842 uio
->uio_resid
= aiovec
.iov_len
= bp
->b_bcount
;
843 uio
->uio_offset
= BBTOB(bp
->b_blkno
);
844 uio
->uio_segflg
= UIO_SYSSPACE
;
845 uio
->uio_limit
= RLIM_INFINITY
; /* we checked the limit earlier */
850 if (bp
->b_flags
& B_READ
) {
851 uio
->uio_fmode
= FREAD
;
852 error
= afs_read(vp
, uio
, cr
, 0);
854 uio
->uio_fmode
= FWRITE
;
855 error
= afs_write(vp
, uio
, 0, cr
, 0);
860 if (acchk
&& error
) {
861 cmn_err(CE_WARN
, "vp 0x%x has error %d\n", vp
, error
);
868 bp
->b_flags
|= B_ERROR
;
869 if ((uio
->uio_fmode
== FWRITE
) && !avc
->vc_error
)
870 avc
->vc_error
= error
;
878 afs_seek(OSI_VC_ARG(avc
), ooff
, noffp
)
883 return *noffp
< 0 ? EINVAL
: 0;
886 #if !defined(AFS_SGI65_ENV)
887 /* Irix 6.5 uses addmap/delmap only for devices. */
890 afs_addmap(OSI_VC_ARG(avc
), off
, prp
, addr
, len
, prot
, maxprot
, flags
, cr
)
901 struct vnode
*vp
= AFSTOV(avc
);
903 if (vp
->v_flag
& VNOMAP
)
907 AFS_RWLOCK(vp
, VRWLOCK_WRITE
);
908 if (avc
->mapcnt
== 0) {
909 /* on first mapping add a open reference */
910 ObtainWriteLock(&avc
->lock
, 237);
911 avc
->execsOrWriters
++;
913 ReleaseWriteLock(&avc
->lock
);
915 avc
->mapcnt
+= btoc(len
);
916 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
920 /*ARGSUSED*/ static int
921 afs_delmap(OSI_VC_ARG(avc
), off
, prp
, addr
, len
, prot
, maxprot
, flags
, acred
)
932 struct vnode
*vp
= AFSTOV(avc
);
934 struct vrequest treq
;
937 if (vp
->v_flag
& VNOMAP
)
941 AFS_RWLOCK(vp
, VRWLOCK_WRITE
);
942 osi_Assert(avc
->mapcnt
> 0);
943 avc
->mapcnt
-= btoc(len
);
944 osi_Assert(avc
->mapcnt
>= 0);
945 if (avc
->mapcnt
== 0) {
946 /* on last mapping push back and remove our reference */
947 osi_Assert(avc
->execsOrWriters
> 0);
948 osi_Assert(avc
->opens
> 0);
949 if (avc
->f
.m
.LinkCount
== 0) {
950 ObtainWriteLock(&avc
->lock
, 238);
952 PTOSSVP(vp
, (off_t
) 0, (off_t
) MAXLONG
);
954 ReleaseWriteLock(&avc
->lock
);
959 code
= afs_InitReq(&treq
, acred
);
961 code
= afs_CheckCode(code
, NULL
, 64);
962 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
963 } else if (afs_BBusy()) {
964 /* do it yourself if daemons are all busy */
965 ObtainWriteLock(&avc
->lock
, 239);
966 code
= afs_StoreOnLastReference(avc
, &treq
);
967 ReleaseWriteLock(&avc
->lock
);
968 /* BStore does CheckCode so we should also */
969 /* VNOVNODE is "acceptable" error code from close, since
970 * may happen when deleting a file on another machine while
971 * it is open here. */
972 if (code
== VNOVNODE
)
975 afs_StoreWarn(code
, avc
->f
.fid
.Fid
.Volume
, /* /dev/console */
978 code
= afs_CheckCode(code
, &treq
, 52);
979 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
981 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
982 /* at least one daemon is idle, so ask it to do the store.
983 * Also, note that we don't lock it any more... */
984 tb
= afs_BQueue(BOP_STORE
, avc
, 0, 1, acred
,
985 (afs_size_t
) afs_cr_uid(acred
), 0L, (void *)0,
986 (void *)0, (void *)0);
987 /* sleep waiting for the store to start, then retrieve error code */
988 while ((tb
->flags
& BUVALID
) == 0) {
995 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
999 #endif /* ! AFS_SGI65_ENV */
1004 * Note - if mapping in an ELF interpreter, one can get called without vp
1005 * ever having been 'opened'
1007 #ifdef AFS_SGI65_ENV
1009 afs_map(OSI_VC_ARG(avc
), off
, len
, prot
, flags
, cr
, vpp
)
1019 afs_map(OSI_VC_ARG(avc
), off
, prp
, addrp
, len
, prot
, maxprot
, flags
, cr
)
1022 struct pregion
*prp
;
1025 u_int prot
, maxprot
;
1030 OSI_VC_CONVERT(avc
);
1031 struct vnode
*vp
= AFSTOV(avc
);
1032 struct vrequest treq
;
1035 /* get a validated vcache entry */
1036 error
= afs_InitReq(&treq
, cr
);
1038 return afs_CheckCode(error
, NULL
, 65);
1040 error
= afs_VerifyVCache(avc
, &treq
);
1042 return afs_CheckCode(error
, &treq
, 53);
1044 osi_FlushPages(avc
, cr
); /* ensure old pages are gone */
1045 #ifdef AFS_SGI65_ENV
1046 /* If the vnode is currently opened for write, there's the potential
1047 * that this mapping might (now or in the future) have PROT_WRITE.
1048 * So assume it does and we'll have to call afs_StoreOnLastReference.
1050 AFS_RWLOCK(vp
, VRWLOCK_WRITE
);
1051 ObtainWriteLock(&avc
->lock
, 501);
1052 if (avc
->execsOrWriters
> 0) {
1053 avc
->execsOrWriters
++;
1055 avc
->mapcnt
++; /* count eow's due to mappings. */
1057 ReleaseWriteLock(&avc
->lock
);
1058 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1060 AFS_RWLOCK(vp
, VRWLOCK_WRITE
);
1063 fs_map_subr(vp
, (off_t
) avc
->f
.m
.Length
, (u_int
) avc
->f
.m
.Mode
, off
, prp
,
1064 *addrp
, len
, prot
, maxprot
, flags
, cr
);
1066 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1067 #endif /* AFS_SGI65_ENV */
1068 afs_Trace4(afs_iclSetp
, CM_TRACE_GMAP
, ICL_TYPE_POINTER
, vp
,
1069 #ifdef AFS_SGI65_ENV
1070 ICL_TYPE_POINTER
, NULL
,
1072 ICL_TYPE_POINTER
, *addrp
,
1074 ICL_TYPE_INT32
, len
, ICL_TYPE_INT32
, off
);
1079 extern afs_rwlock_t afs_xvcache
;
1080 extern afs_lock_t afs_xdcache
;
1081 #ifdef AFS_SGI64_ENV
1086 afs_xinactive(OSI_VC_ARG(avc
), acred
)
1088 struct ucred
*acred
;
1091 OSI_VC_CONVERT(avc
);
1092 vnode_t
*vp
= (vnode_t
*) avc
;
1093 int mapcnt
= avc
->mapcnt
; /* We just clear off this many. */
1095 AFS_STATCNT(afs_inactive
);
1098 if (!(vp
->v_flag
& VINACT
) || (vp
->v_count
> 0)) {
1099 /* inactive was already done, or someone did a VN_HOLD; just return */
1100 vp
->v_flag
&= ~VINACT
;
1102 #ifdef AFS_SGI64_ENV
1103 return VN_INACTIVE_CACHE
;
1108 osi_Assert((vp
->v_flag
& VSHARE
) == 0);
1109 vp
->v_flag
&= ~VINACT
;
1110 /* Removed broadcast to waiters, since no one ever will. Only for vnodes
1115 #ifdef AFS_SGI65_ENV
1116 /* In Irix 6.5, the last unmap of a dirty mmap'd file does not
1117 * get an explicit vnode op. Instead we only find out at VOP_INACTIVE.
1119 if (!afs_rwlock_nowait((vnode_t
*) avc
, VRWLOCK_WRITE
)) {
1120 return VN_INACTIVE_CACHE
;
1122 if (NBObtainWriteLock(&avc
->lock
, 502)) {
1123 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1124 return VN_INACTIVE_CACHE
;
1126 if (avc
->f
.states
& CUnlinked
) {
1127 if (CheckLock(&afs_xvcache
) || CheckLock(&afs_xdcache
)) {
1128 avc
->f
.states
|= CUnlinkedDel
;
1129 ReleaseWriteLock(&avc
->lock
);
1130 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1132 ReleaseWriteLock(&avc
->lock
);
1133 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1134 afs_remunlink(avc
, 1); /* ignore any return code */
1136 return VN_INACTIVE_CACHE
;
1138 if ((avc
->f
.states
& CDirty
) || (avc
->execsOrWriters
> 0)) {
1139 /* File either already has dirty chunks (CDirty) or was mapped at
1140 * time in its life with the potential for being written into.
1141 * Note that afs_close defers storebacks if the vnode's ref count
1145 struct vrequest treq
;
1146 if (!afs_InitReq(&treq
, acred
)) {
1150 avc
->execsOrWriters
-= mapcnt
- 1;
1151 avc
->opens
-= mapcnt
- 1;
1152 avc
->mapcnt
-= mapcnt
;
1153 code
= afs_StoreOnLastReference(avc
, &treq
);
1154 /* The following behavior mimics the behavior in afs_close. */
1155 if (code
== VNOVNODE
)
1160 "AFS: Failed to store FID (%x:%lu.%lu.%lu) in VOP_INACTIVE, error = %d\n",
1161 (int)(avc
->f
.fid
.Cell
) & 0xffffffff,
1162 avc
->f
.fid
.Fid
.Volume
, avc
->f
.fid
.Fid
.Vnode
,
1163 avc
->f
.fid
.Fid
.Unique
, code
);
1165 afs_InvalidateAllSegments(avc
);
1169 code
= (vp
->v_count
== 0);
1171 /* If the vnode is now in use by someone else, return early. */
1173 ReleaseWriteLock(&avc
->lock
);
1174 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1175 return VN_INACTIVE_CACHE
;
1181 osi_Assert((avc
->f
.states
& (CCore
| CMAPPED
)) == 0);
1187 ReleaseWriteLock(&avc
->lock
);
1188 AFS_RWUNLOCK(vp
, VRWLOCK_WRITE
);
1191 * If someone unlinked a file and this is the last hurrah -
1192 * nuke all the pages.
1194 if (avc
->f
.m
.LinkCount
== 0) {
1196 PTOSSVP(vp
, (off_t
) 0, (off_t
) MAXLONG
);
1199 #ifndef AFS_SGI65_ENV
1200 osi_Assert(avc
->mapcnt
== 0);
1201 afs_chkpgoob(&avc
->v
, btoc(avc
->f
.m
.Length
));
1203 avc
->f
.states
&= ~CDirty
; /* Give up on store-backs */
1204 if (avc
->f
.states
& CUnlinked
) {
1205 if (CheckLock(&afs_xvcache
) || CheckLock(&afs_xdcache
)) {
1206 avc
->f
.states
|= CUnlinkedDel
;
1208 afs_remunlink(avc
, 1); /* ignore any return code */
1212 #ifdef AFS_SGI64_ENV
1213 return VN_INACTIVE_CACHE
;
1218 afs_reclaim(OSI_VC_DECL(avc
), int flag
)
1220 #ifdef AFS_SGI64_ENV
1221 /* Get's called via VOP_RELCAIM in afs_FlushVCache to clear repl_vnodeops */
1224 panic("afs_reclaim");
1229 afs_rwlock(OSI_VN_DECL(vp
), AFS_RWLOCK_T flag
)
1232 struct vcache
*avc
= VTOAFS(vp
);
1234 if (OSI_GET_LOCKID() == avc
->vc_rwlockid
) {
1235 avc
->vc_locktrips
++;
1239 psema(&avc
->vc_rwlock
, PINOD
);
1241 avc
->vc_rwlockid
= OSI_GET_LOCKID();
1245 afs_rwunlock(OSI_VN_DECL(vp
), AFS_RWLOCK_T flag
)
1248 struct vcache
*avc
= VTOAFS(vp
);
1251 osi_Assert(OSI_GET_LOCKID() == avc
->vc_rwlockid
);
1252 if (avc
->vc_locktrips
> 0) {
1253 --avc
->vc_locktrips
;
1256 avc
->vc_rwlockid
= OSI_NO_LOCKID
;
1257 vsema(&avc
->vc_rwlock
);
1261 /* The flag argument is for symmetry with the afs_rwlock and afs_rwunlock
1262 * calls. SGI currently only uses the flag to assert if the unlock flag
1263 * does not match the corresponding lock flag. But they may start using this
1264 * flag for a real rw lock at some time.
1267 afs_rwlock_nowait(vnode_t
* vp
, AFS_RWLOCK_T flag
)
1269 struct vcache
*avc
= VTOAFS(vp
);
1272 if (OSI_GET_LOCKID() == avc
->vc_rwlockid
) {
1273 avc
->vc_locktrips
++;
1276 if (cpsema(&avc
->vc_rwlock
)) {
1277 avc
->vc_rwlockid
= OSI_GET_LOCKID();
1283 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
1285 afs_fid2(OSI_VC_DECL(avc
), struct fid
*fidp
)
1288 afs_fid2_t
*afid
= (afs_fid2_t
*) fidp
;
1289 OSI_VC_CONVERT(avc
);
1291 osi_Assert(sizeof(fid_t
) >= sizeof(afs_fid2_t
));
1292 afid
->af_len
= sizeof(afs_fid2_t
) - sizeof(afid
->af_len
);
1294 tcell
= afs_GetCell(avc
->f
.fid
.Cell
, READ_LOCK
);
1295 afid
->af_cell
= tcell
->cellIndex
& 0xffff;
1296 afs_PutCell(tcell
, READ_LOCK
);
1298 afid
->af_volid
= avc
->f
.fid
.Fid
.Volume
;
1299 afid
->af_vno
= avc
->f
.fid
.Fid
.Vnode
;
1300 afid
->af_uniq
= avc
->f
.fid
.Fid
.Unique
;
1305 /* Only use so far is in checkpoint/restart for IRIX 6.4. In ckpt_fid, a
1306 * return of ENOSYS would make the code fail over to VOP_FID. We can't let
1307 * that happen, since we do a VN_HOLD there in the expectation that
1308 * posthandle will be called to release the vnode.
1310 * afs_fid2 is used to support the R5000 workarounds (_R5000_CVT_WAR)
1313 afs_fid2(OSI_VC_DECL(avc
), struct fid
*fidp
)
1315 #if defined(_R5000_CVT_WAR)
1316 extern int R5000_cvt_war
;
1326 #endif /* AFS_SGI64_ENV && CKPT */
1330 * check for any pages hashed that shouldn't be!
1331 * Only valid if PGCACHEDEBUG is set in os/page.c
1332 * Drop the global lock here, since we may not actually do the call.
1335 afs_chkpgoob(vnode_t
* vp
, pgno_t pgno
)
1340 pfindanyoob(vp
, pgno
);
1348 #ifdef AFS_SGI64_ENV
1349 #define AFS_MP_VC_ARG(A) bhv_desc_t A
1351 #define AFS_MP_VC_ARG(A) vnode_t A
1354 #ifdef AFS_SGI64_ENV
1356 mp_afs_open(bhv_desc_t
* bhp
, vnode_t
** a
, mode_t b
, struct cred
*c
)
1359 mp_afs_open(vnode_t
** a
, mode_t b
, struct cred
*c
)
1364 #ifdef AFS_SGI64_ENV
1365 rv
= afs_lockedvnodeops
.vop_open(bhp
, a
, b
, c
);
1367 rv
= afs_lockedvnodeops
.vop_open(a
, b
, c
);
1373 #if defined(AFS_SGI64_ENV)
1374 #if defined(AFS_SGI65_ENV)
1376 mp_afs_close(AFS_MP_VC_ARG(*a
), int b
, lastclose_t c
, struct cred
*d
)
1379 mp_afs_close(AFS_MP_VC_ARG(*a
), int b
, lastclose_t c
, off_t d
, struct cred
*e
,
1384 mp_afs_close(AFS_MP_VC_ARG(*a
), int b
, lastclose_t c
, off_t d
, struct cred
*e
)
1389 rv
= afs_lockedvnodeops
.vop_close(a
, b
, c
, d
1390 #if !defined(AFS_SGI65_ENV)
1392 #if defined(AFS_SGI64_ENV)
1402 #ifdef AFS_SGI64_ENV
1404 mp_afs_read(AFS_MP_VC_ARG(*a
), struct uio
*b
, int c
, struct cred
*d
,
1408 mp_afs_read(AFS_MP_VC_ARG(*a
), struct uio
*b
, int c
, struct cred
*d
)
1413 #ifdef AFS_SGI64_ENV
1414 rv
= afs_lockedvnodeops
.vop_read(a
, b
, c
, d
, f
);
1416 rv
= afs_lockedvnodeops
.vop_read(a
, b
, c
, d
);
1423 #ifdef AFS_SGI64_ENV
1425 mp_afs_write(AFS_MP_VC_ARG(*a
), struct uio
*b
, int c
, struct cred
*d
,
1429 mp_afs_write(AFS_MP_VC_ARG(*a
), struct uio
*b
, int c
, struct cred
*d
)
1434 #ifdef AFS_SGI64_ENV
1435 rv
= afs_lockedvnodeops
.vop_write(a
, b
, c
, d
, f
);
1437 rv
= afs_lockedvnodeops
.vop_write(a
, b
, c
, d
);
1444 mp_afs_ioctl(AFS_MP_VC_ARG(*a
), int b
, void *c
, int d
, struct cred
*e
, int *f
1445 #ifdef AFS_SGI65_ENV
1446 , struct vopbd
*vbds
1452 rv
= afs_lockedvnodeops
.vop_ioctl(a
, b
, c
, d
, e
, f
1453 #ifdef AFS_SGI65_ENV
1462 mp_fs_setfl(AFS_MP_VC_ARG(*a
), int b
, int c
, struct cred
*d
)
1466 rv
= afs_lockedvnodeops
.vop_setfl(a
, b
, c
, d
);
1472 mp_afs_getattr(AFS_MP_VC_ARG(*a
), struct vattr
*b
, int c
, struct cred
*d
)
1476 rv
= afs_lockedvnodeops
.vop_getattr(a
, b
, c
, d
);
1482 mp_afs_setattr(AFS_MP_VC_ARG(*a
), struct vattr
*b
, int c
, struct cred
*d
)
1486 rv
= afs_lockedvnodeops
.vop_setattr(a
, b
, c
, d
);
1492 mp_afs_access(AFS_MP_VC_ARG(*a
), int b
,
1493 #ifndef AFS_SGI65_ENV
1500 rv
= afs_lockedvnodeops
.vop_access(a
, b
,
1501 #ifndef AFS_SGI65_ENV
1510 mp_afs_lookup(AFS_MP_VC_ARG(*a
), char *b
, vnode_t
** c
, struct pathname
*d
,
1511 int e
, vnode_t
* f
, struct cred
*g
)
1515 rv
= afs_lockedvnodeops
.vop_lookup(a
, b
, c
, d
, e
, f
, g
);
1520 #ifdef AFS_SGI64_ENV
1522 mp_afs_create(AFS_MP_VC_ARG(*a
), char *b
, struct vattr
*c
, int d
, int e
,
1523 vnode_t
** f
, struct cred
*g
)
1526 mp_afs_create(AFS_MP_VC_ARG(*a
), char *b
, struct vattr
*c
, enum vcexcl d
,
1527 int e
, vnode_t
** f
, struct cred
*g
)
1532 rv
= afs_lockedvnodeops
.vop_create(a
, b
, c
, d
, e
, f
, g
);
1538 mp_afs_remove(AFS_MP_VC_ARG(*a
), char *b
, struct cred
*c
)
1542 rv
= afs_lockedvnodeops
.vop_remove(a
, b
, c
);
1548 mp_afs_link(AFS_MP_VC_ARG(*a
), vnode_t
* b
, char *c
, struct cred
*d
)
1552 rv
= afs_lockedvnodeops
.vop_link(a
, b
, c
, d
);
1558 mp_afs_rename(AFS_MP_VC_ARG(*a
), char *b
, vnode_t
* c
, char *d
,
1559 struct pathname
*e
, struct cred
*f
)
1563 rv
= afs_lockedvnodeops
.vop_rename(a
, b
, c
, d
, e
, f
);
1569 mp_afs_mkdir(AFS_MP_VC_ARG(*a
), char *b
, struct vattr
*c
, vnode_t
** d
,
1574 rv
= afs_lockedvnodeops
.vop_mkdir(a
, b
, c
, d
, e
);
1580 mp_afs_rmdir(AFS_MP_VC_ARG(*a
), char *b
, vnode_t
* c
, struct cred
*d
)
1584 rv
= afs_lockedvnodeops
.vop_rmdir(a
, b
, c
, d
);
1590 mp_afs_readdir(AFS_MP_VC_ARG(*a
), struct uio
*b
, struct cred
*c
, int *d
)
1594 rv
= afs_lockedvnodeops
.vop_readdir(a
, b
, c
, d
);
1600 mp_afs_symlink(AFS_MP_VC_ARG(*a
), char *b
, struct vattr
*c
, char *d
,
1605 rv
= afs_lockedvnodeops
.vop_symlink(a
, b
, c
, d
, e
);
1611 mp_afs_readlink(AFS_MP_VC_ARG(*a
), struct uio
*b
, struct cred
*c
)
1615 rv
= afs_lockedvnodeops
.vop_readlink(a
, b
, c
);
1621 mp_afs_fsync(AFS_MP_VC_ARG(*a
), int b
, struct cred
*c
1622 #ifdef AFS_SGI65_ENV
1623 , off_t start
, off_t stop
1629 rv
= afs_lockedvnodeops
.vop_fsync(a
, b
, c
1630 #ifdef AFS_SGI65_ENV
1639 mp_afs_inactive(AFS_MP_VC_ARG(*a
), struct cred
*b
)
1642 afs_lockedvnodeops
.vop_inactive(a
, b
);
1648 mp_afs_fid(AFS_MP_VC_ARG(*a
), struct fid
**b
)
1652 rv
= afs_lockedvnodeops
.vop_fid(a
, b
);
1658 mp_afs_fid2(AFS_MP_VC_ARG(*a
), struct fid
*b
)
1662 rv
= afs_lockedvnodeops
.vop_fid2(a
, b
);
1668 mp_afs_rwlock(AFS_MP_VC_ARG(*a
), AFS_RWLOCK_T b
)
1671 afs_rwlock(a
, VRWLOCK_WRITE
);
1676 mp_afs_rwunlock(AFS_MP_VC_ARG(*a
), AFS_RWLOCK_T b
)
1679 afs_rwunlock(a
, VRWLOCK_WRITE
);
1684 mp_afs_seek(AFS_MP_VC_ARG(*a
), off_t b
, off_t
* c
)
1688 rv
= afs_lockedvnodeops
.vop_seek(a
, b
, c
);
1694 mp_fs_cmp(AFS_MP_VC_ARG(*a
), vnode_t
* b
)
1698 rv
= afs_lockedvnodeops
.vop_cmp(a
, b
);
1704 mp_afs_frlock(AFS_MP_VC_ARG(*a
), int b
, struct flock
*c
, int d
, off_t e
,
1705 #ifdef AFS_SGI65_ENV
1712 rv
= afs_lockedvnodeops
.vop_frlock(a
, b
, c
, d
, e
,
1713 #ifdef AFS_SGI65_ENV
1722 mp_afs_realvp(AFS_MP_VC_ARG(*a
), vnode_t
** b
)
1726 rv
= afs_lockedvnodeops
.vop_realvp(a
, b
);
1732 mp_afs_bmap(AFS_MP_VC_ARG(*a
), off_t b
, ssize_t c
, int d
, struct cred
*e
,
1733 struct bmapval
*f
, int *g
)
1737 rv
= afs_lockedvnodeops
.vop_bmap(a
, b
, c
, d
, e
, f
, g
);
1743 mp_afs_strategy(AFS_MP_VC_ARG(*a
), struct buf
*b
)
1747 afs_lockedvnodeops
.vop_strategy(a
, b
);
1752 #ifdef AFS_SGI65_ENV
1754 mp_afs_map(AFS_MP_VC_ARG(*a
), off_t b
, size_t c
, mprot_t d
, u_int e
,
1755 struct cred
*f
, vnode_t
** g
)
1758 mp_afs_map(AFS_MP_VC_ARG(*a
), off_t b
, struct pregion
*c
, char **d
, size_t e
,
1759 u_int f
, u_int g
, u_int h
, struct cred
*i
)
1764 rv
= afs_lockedvnodeops
.vop_map(a
, b
, c
, d
, e
, f
, g
1765 #ifndef AFS_SGI65_ENV
1774 #ifndef AFS_SGI65_ENV
1775 /* As of Irix 6.5, addmap and delmap are only for devices */
1777 mp_afs_addmap(AFS_MP_VC_ARG(*a
), off_t b
, struct pregion
*c
, addr_t d
,
1778 size_t e
, u_int f
, u_int g
, u_int h
, struct cred
*i
)
1782 rv
= afs_lockedvnodeops
.vop_addmap(a
, b
, c
, d
, e
, f
, g
, h
, i
);
1788 mp_afs_delmap(AFS_MP_VC_ARG(*a
), off_t b
, struct pregion
*c
, addr_t d
,
1789 size_t e
, u_int f
, u_int g
, u_int h
, struct cred
*i
)
1793 rv
= afs_lockedvnodeops
.vop_delmap(a
, b
, c
, d
, e
, f
, g
, h
, i
);
1797 #endif /* ! AFS_SGI65_ENV */
1800 mp_fs_poll(AFS_MP_VC_ARG(*a
), short b
, int c
, short *d
, struct pollhead
**e
1801 #ifdef AFS_SGI65_ENV
1808 rv
= afs_lockedvnodeops
.vop_poll(a
, b
, c
, d
, e
1809 #ifdef AFS_SGI65_ENV
1818 struct vnodeops Afs_vnodeops
= {
1819 #ifdef AFS_SGI64_ENV
1820 #ifdef AFS_SGI65_ENV
1821 BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE
),
1823 VNODE_POSITION_BASE
,
1854 fs_nosys
, /* realvp */
1858 #ifdef AFS_SGI65_ENV
1859 fs_noerr
, /* addmap - devices only */
1860 fs_noerr
, /* delmap - devices only */
1865 mp_fs_poll
, /* poll */
1866 fs_nosys
, /* dump */
1868 fs_nosys
, /* allocstore */
1869 fs_nosys
, /* fcntl */
1870 afs_reclaim
, /* reclaim */
1871 fs_nosys
, /* attr_get */
1872 fs_nosys
, /* attr_set */
1873 fs_nosys
, /* attr_remove */
1874 fs_nosys
, /* attr_list */
1875 #ifdef AFS_SGI64_ENV
1876 #ifdef AFS_SGI65_ENV
1878 (vop_link_removed_t
) fs_noval
,
1881 fs_flushinval_pages
,
1885 (vop_commit_t
) fs_nosys
,
1886 (vop_readbuf_t
) fs_nosys
,
1894 struct vnodeops
*afs_ops
= &Afs_vnodeops
;
1898 /* Support for XFS caches. The assumption here is that the size of
1899 * a cache file also does not exceed 32 bits.
1902 /* Initialized in osi_InitCacheFSType(). Used to determine inode type. */
1903 vnodeops_t
*afs_xfs_vnodeopsp
;
1905 extern afs_lock_t afs_xosi
; /* lock is for tvattr */
1908 VnodeToIno(vnode_t
* vp
)
1913 ObtainWriteLock(&afs_xosi
, 579);
1914 vattr
.va_mask
= AT_FSID
| AT_NODEID
; /* quick return using this mask. */
1916 AFS_VOP_GETATTR(vp
, &vattr
, 0, OSI_GET_CURRENT_CRED(), code
);
1919 osi_Panic("VnodeToIno");
1921 ReleaseWriteLock(&afs_xosi
);
1922 return vattr
.va_nodeid
;
1926 VnodeToDev(vnode_t
* vp
)
1931 ObtainWriteLock(&afs_xosi
, 580);
1932 vattr
.va_mask
= AT_FSID
| AT_NODEID
; /* quick return using this mask. */
1934 AFS_VOP_GETATTR(vp
, &vattr
, 0, OSI_GET_CURRENT_CRED(), code
);
1937 osi_Panic("VnodeToDev");
1939 ReleaseWriteLock(&afs_xosi
);
1940 return (dev_t
) vattr
.va_fsid
;
1944 VnodeToSize(vnode_t
* vp
)
1949 ObtainWriteLock(&afs_xosi
, 581);
1950 vattr
.va_mask
= AT_SIZE
;
1952 AFS_VOP_GETATTR(vp
, &vattr
, 0, OSI_GET_CURRENT_CRED(), code
);
1955 osi_Panic("VnodeToSize");
1957 ReleaseWriteLock(&afs_xosi
);
1958 return vattr
.va_size
;
1960 #endif /* AFS_SGI62_ENV */