2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
15 * SOLARIS/osi_vnodeops.c
19 * Functions: AFS_TRYUP, _init, _info, _fini, afs_addmap, afs_delmap,
20 * afs_vmread, afs_vmwrite, afs_getpage, afs_GetOnePage, afs_putpage,
21 * afs_putapage, afs_nfsrdwr, afs_map, afs_PageLeft, afs_pathconf/afs_cntl,
22 * afs_ioctl, afs_rwlock, afs_rwunlock, afs_seek, afs_space, afs_dump,
23 * afs_cmp, afs_realvp, afs_pageio, afs_dumpctl, afs_dispose, afs_setsecattr,
24 * afs_getsecattr, gafs_open, gafs_close, gafs_getattr, gafs_setattr,
25 * gafs_access, gafs_lookup, gafs_create, gafs_remove, gafs_link,
26 * gafs_rename, gafs_mkdir, gafs_rmdir, gafs_readdir, gafs_symlink,
27 * gafs_readlink, gafs_fsync, afs_inactive, gafs_inactive, gafs_fid
30 * Variables: Afs_vnodeops
33 #include "afs/sysincludes.h" /* Standard vendor system headers */
34 #include "afsincludes.h" /* Afs-based standard headers */
35 #include "afs/afs_stats.h" /* statistics */
36 #include "afs/nfsclient.h"
45 #include <vm/seg_map.h>
46 #include <vm/seg_vn.h>
48 #if defined(AFS_SUN511_ENV)
49 #include <sys/vfs_opreg.h>
51 #include <sys/modctl.h>
52 #include <sys/syscall.h>
53 #include <sys/debug.h>
54 #include <sys/fs_subr.h>
56 /* Translate a faultcode_t as returned by some of the vm routines
57 * into a suitable errno value.
60 afs_fc2errno(faultcode_t fc
)
62 switch (FC_CODE(fc
)) {
75 extern struct as kas
; /* kernel addr space */
76 extern unsigned char *afs_indexFlags
;
77 extern afs_lock_t afs_xdcache
;
79 /* Additional vnodeops for SunOS 4.0.x */
80 int afs_nfsrdwr(), afs_getpage(), afs_putpage(), afs_map();
81 int afs_dump(), afs_cmp(), afs_realvp(), afs_GetOnePage();
86 afs_addmap(struct vnode
*avp
, offset_t offset
, struct as
*asp
,
87 caddr_t addr
, int length
, int prot
, int maxprot
, int flags
,
90 /* XXX What should we do here?? XXX */
95 afs_delmap(struct vnode
*avp
, offset_t offset
, struct as
*asp
,
96 caddr_t addr
, int length
, int prot
, int maxprot
, int flags
,
99 /* XXX What should we do here?? XXX */
103 #ifdef AFS_SUN510_ENV
105 afs_vmread(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
106 afs_ucred_t
*acred
, caller_context_t
*ct
)
109 afs_vmread(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
115 if (!RW_READ_HELD(&(VTOAFS(avp
))->rwlock
))
116 osi_Panic("afs_vmread: !rwlock");
118 code
= afs_nfsrdwr(VTOAFS(avp
), auio
, UIO_READ
, ioflag
, acred
);
124 #ifdef AFS_SUN510_ENV
126 afs_vmwrite(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
127 afs_ucred_t
*acred
, caller_context_t
*ct
)
130 afs_vmwrite(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
136 if (!RW_WRITE_HELD(&(VTOAFS(avp
))->rwlock
))
137 osi_Panic("afs_vmwrite: !rwlock");
139 code
= afs_nfsrdwr(VTOAFS(avp
), auio
, UIO_WRITE
, ioflag
, acred
);
145 afs_getpage(struct vnode
*vp
, offset_t off
, u_int len
, u_int
*protp
,
146 struct page
*pl
[], u_int plsz
, struct seg
*seg
, caddr_t addr
,
147 enum seg_rw rw
, afs_ucred_t
*acred
)
150 AFS_STATCNT(afs_getpage
);
152 if (vp
->v_flag
& VNOMAP
) /* File doesn't allow mapping */
159 afs_GetOnePage(vp
, off
, len
, protp
, pl
, plsz
, seg
, addr
, rw
, acred
);
161 struct multiPage_range range
;
162 struct vcache
*vcp
= VTOAFS(vp
);
164 /* We've been asked to get more than one page. We must return all
165 * requested pages at once, all of them locked, which means all of
166 * these dcache entries cannot be kicked out of the cache before we
167 * return (since their pages cannot be invalidated).
169 * afs_GetOnePage will be called multiple times by pvn_getpages in
170 * order to get all of the requested pages. One of the later
171 * afs_GetOnePage calls may need to evict some cache entries in order
172 * to perform its read. If we try to kick out one of the entries an
173 * earlier afs_GetOnePage call used, we will deadlock since we have
174 * the page locked. So, to tell afs_GetDownD that it should skip over
175 * any entries we've read in due to this afs_getpage call, record the
176 * offset and length in avc->multiPage.
178 * Ideally we would just set something in each dcache as we get it,
179 * but that is rather difficult, since pvn_getpages doesn't let us
180 * retain any information between calls to afs_GetOnePage. So instead
181 * just record the offset and length, and let afs_GetDownD calculate
182 * which dcache entries should be skipped. */
187 ObtainWriteLock(&vcp
->vlock
, 548);
188 QAdd(&vcp
->multiPage
, &range
.q
);
189 ReleaseWriteLock(&vcp
->vlock
);
191 pvn_getpages(afs_GetOnePage
, vp
, off
, len
, protp
, pl
, plsz
, seg
, addr
, rw
, acred
);
192 ObtainWriteLock(&vcp
->vlock
, 549);
194 ReleaseWriteLock(&vcp
->vlock
);
200 /* Return all the pages from [off..off+len) in file */
202 afs_GetOnePage(struct vnode
*vp
, u_offset_t off
, u_int alen
, u_int
*protp
,
203 struct page
*pl
[], u_int plsz
, struct seg
*seg
, caddr_t addr
,
204 enum seg_rw rw
, afs_ucred_t
*acred
)
215 afs_size_t offset
, nlen
= 0;
216 struct vrequest treq
;
217 afs_int32 mapForRead
= 0, Code
= 0;
221 osi_Panic("GetOnePage: !acred");
223 avc
= VTOAFS(vp
); /* cast to afs vnode */
225 if (avc
->credp
/*&& AFS_NFSXLATORREQ(acred) */
226 && AFS_NFSXLATORREQ(avc
->credp
)) {
229 if (code
= afs_InitReq(&treq
, acred
))
233 /* This is a read-ahead request, e.g. due to madvise. */
235 ObtainReadLock(&avc
->lock
);
237 while (plen
> 0 && !afs_BBusy()) {
238 /* Obtain a dcache entry at off. 2 means don't fetch data. */
240 afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
,
245 /* Write-lock the dcache entry, if we don't succeed, just go on */
246 if (0 != NBObtainWriteLock(&tdc
->lock
, 642)) {
251 /* If we aren't already fetching this dcache entry, queue it */
252 if (!(tdc
->mflags
& DFFetchReq
)) {
255 tdc
->mflags
|= DFFetchReq
;
256 bp
= afs_BQueue(BOP_FETCH
, avc
, B_DONTWAIT
, 0, acred
,
257 (afs_size_t
) off
, (afs_size_t
) 1, tdc
,
258 (void *)0, (void *)0);
260 /* Unable to start background fetch; might as well stop */
261 tdc
->mflags
&= ~DFFetchReq
;
262 ReleaseWriteLock(&tdc
->lock
);
266 ReleaseWriteLock(&tdc
->lock
);
268 ReleaseWriteLock(&tdc
->lock
);
273 /* Adjust our offset and remaining length values */
277 /* If we aren't making progress for some reason, bail out */
282 ReleaseReadLock(&avc
->lock
);
287 pl
[0] = NULL
; /* Make sure it's empty */
289 /* first, obtain the proper lock for the VM system */
291 /* if this is a read request, map the page in read-only. This will
292 * allow us to swap out the dcache entry if there are only read-only
293 * pages created for the chunk, which helps a *lot* when dealing
294 * with small caches. Otherwise, we have to invalidate the vm
295 * pages for the range covered by a chunk when we swap out the
298 if (rw
== S_READ
|| rw
== S_EXEC
)
305 if (rw
== S_WRITE
|| rw
== S_CREATE
)
306 tdc
= afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
, 5);
308 tdc
= afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
, 1);
310 return afs_CheckCode(EINVAL
, &treq
, 62);
311 code
= afs_VerifyVCache(avc
, &treq
);
314 return afs_CheckCode(code
, &treq
, 44); /* failed to get it */
317 ObtainReadLock(&avc
->lock
);
319 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEIN
, ICL_TYPE_POINTER
, (afs_int32
) vp
,
320 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(off
), ICL_TYPE_LONG
, len
,
321 ICL_TYPE_LONG
, (int)rw
);
326 /* Check to see if we're in the middle of a VM purge, and if we are, release
327 * the locks and try again when the VM purge is done. */
328 ObtainWriteLock(&avc
->vlock
, 550);
330 ReleaseReadLock(&avc
->lock
);
331 ReleaseWriteLock(&avc
->vlock
);
333 /* Check activeV again, it may have been turned off
334 * while we were waiting for a lock in afs_PutDCache */
335 ObtainWriteLock(&avc
->vlock
, 574);
337 avc
->vstates
|= VRevokeWait
;
338 ReleaseWriteLock(&avc
->vlock
);
339 afs_osi_Sleep(&avc
->vstates
);
341 ReleaseWriteLock(&avc
->vlock
);
345 ReleaseWriteLock(&avc
->vlock
);
347 /* We're about to do stuff with our dcache entry.. Lock it. */
348 ObtainReadLock(&tdc
->lock
);
350 /* Check to see whether the cache entry is still valid */
351 if (!(avc
->f
.states
& CStatd
)
352 || !hsame(avc
->f
.m
.DataVersion
, tdc
->f
.versionNo
)) {
353 ReleaseReadLock(&tdc
->lock
);
354 ReleaseReadLock(&avc
->lock
);
360 while (1) { /* loop over all pages */
361 /* now, try to find the page in memory (it may already be intransit or laying
362 * around the free list */
364 page_lookup(vp
, toffset
, (rw
== S_CREATE
? SE_EXCL
: SE_SHARED
));
368 /* if we make it here, we can't find the page in memory. Do a real disk read
369 * from the cache to get the data */
370 Code
|= 0x200; /* XXX */
371 /* use PG_EXCL because we know the page does not exist already. If it
372 * actually does exist, we have somehow raced between lookup and create.
373 * As of 4/98, that shouldn't be possible, but we'll be defensive here
374 * in case someone tries to relax all the serialization of read and write
375 * operations with harmless things like stat. */
377 page_create_va(vp
, toffset
, PAGESIZE
, PG_WAIT
| PG_EXCL
, seg
,
383 pagezero(page
, alen
, PAGESIZE
- alen
);
385 if (rw
== S_CREATE
) {
386 /* XXX Don't read from AFS in write only cases XXX */
387 page_io_unlock(page
);
390 /* now it is time to start I/O operation */
391 buf
= pageio_setup(page
, PAGESIZE
, vp
, B_READ
); /* allocate a buf structure */
394 buf
->b_lblkno
= lbtodb(toffset
);
395 bp_mapin(buf
); /* map it in to our address space */
398 /* afs_ustrategy will want to lock the dcache entry */
399 ReleaseReadLock(&tdc
->lock
);
400 code
= afs_ustrategy(buf
, acred
); /* do the I/O */
401 ObtainReadLock(&tdc
->lock
);
404 /* Before freeing unmap the buffer */
410 page_io_unlock(page
);
413 /* come here when we have another page (already held) to enter */
415 /* put page in array and continue */
416 /* The p_selock must be downgraded to a shared lock after the page is read */
417 if ((rw
!= S_CREATE
) && !(PAGE_SHARED(page
))) {
418 page_downgrade(page
);
421 code
= page_iolock_assert(page
);
427 break; /* done all the pages */
428 } /* while (1) ... */
432 ReleaseReadLock(&tdc
->lock
);
434 /* Prefetch next chunk if we're at a chunk boundary */
435 if (AFS_CHUNKOFFSET(off
) == 0) {
436 if (!(tdc
->mflags
& DFNextStarted
))
437 afs_PrefetchChunk(avc
, tdc
, acred
, &treq
);
440 ReleaseReadLock(&avc
->lock
);
441 ObtainWriteLock(&afs_xdcache
, 246);
443 /* track that we have dirty (or dirty-able) pages for this chunk. */
444 afs_indexFlags
[tdc
->index
] |= IFDirtyPages
;
446 afs_indexFlags
[tdc
->index
] |= IFAnyPages
;
447 ReleaseWriteLock(&afs_xdcache
);
449 afs_Trace3(afs_iclSetp
, CM_TRACE_PAGEINDONE
, ICL_TYPE_LONG
, code
,
450 ICL_TYPE_LONG
, (int)page
, ICL_TYPE_LONG
, Code
);
455 afs_Trace3(afs_iclSetp
, CM_TRACE_PAGEINDONE
, ICL_TYPE_LONG
, code
,
456 ICL_TYPE_LONG
, (int)page
, ICL_TYPE_LONG
, Code
);
457 /* release all pages, drop locks, return code */
459 pvn_read_done(page
, B_ERROR
);
460 ReleaseReadLock(&avc
->lock
);
461 ReleaseReadLock(&tdc
->lock
);
467 afs_putpage(struct vnode
*vp
, offset_t off
, u_int len
, int flags
,
475 afs_int32 NPages
= 0;
476 u_offset_t toff
= off
;
479 AFS_STATCNT(afs_putpage
);
480 if (vp
->v_flag
& VNOMAP
) /* file doesn't allow mapping */
484 * Putpage (ASYNC) is called every sec to flush out dirty vm pages
487 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEOUT
, ICL_TYPE_POINTER
,
488 (afs_int32
) vp
, ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(off
),
489 ICL_TYPE_INT32
, (afs_int32
) len
, ICL_TYPE_LONG
, (int)flags
);
491 ObtainSharedLock(&avc
->lock
, 247);
494 /* Get a list of modified (or whatever) pages */
496 endPos
= (afs_offs_t
) off
+ len
; /* position we're supposed to write up to */
497 while ((afs_offs_t
) toff
< endPos
498 && (afs_offs_t
) toff
< avc
->f
.m
.Length
) {
499 /* If not invalidating pages use page_lookup_nowait to avoid reclaiming
500 * them from the free list
503 if (flags
& (B_FREE
| B_INVAL
))
504 pages
= page_lookup(vp
, toff
, SE_EXCL
);
506 pages
= page_lookup_nowait(vp
, toff
, SE_SHARED
);
507 if (!pages
|| !pvn_getdirty(pages
, flags
))
513 UpgradeSToWLock(&avc
->lock
, 671);
517 code
= afs_putapage(vp
, pages
, &toff
, &tlen
, flags
, cred
);
528 UpgradeSToWLock(&avc
->lock
, 670);
533 code
= pvn_vplist_dirty(vp
, toff
, afs_putapage
, flags
, cred
);
537 if (code
&& !avc
->vc_error
) {
539 UpgradeSToWLock(&avc
->lock
, 669);
542 avc
->vc_error
= code
;
546 ReleaseWriteLock(&avc
->lock
);
548 ReleaseSharedLock(&avc
->lock
);
549 afs_Trace2(afs_iclSetp
, CM_TRACE_PAGEOUTDONE
, ICL_TYPE_LONG
, code
,
550 ICL_TYPE_LONG
, NPages
);
557 afs_putapage(struct vnode
*vp
, struct page
*pages
, u_offset_t
* offp
,
558 size_t * lenp
, int flags
, afs_ucred_t
*credp
)
561 struct vcache
*avc
= VTOAFS(vp
);
563 u_int tlen
= PAGESIZE
;
564 afs_offs_t off
= (pages
->p_offset
/ PAGESIZE
) * PAGESIZE
;
567 * Now we've got the modified pages. All pages are locked and held
568 * XXX Find a kluster that fits in one block (or page). We also
569 * adjust the i/o if the file space is less than a while page. XXX
571 if (off
+ tlen
> avc
->f
.m
.Length
) {
572 tlen
= avc
->f
.m
.Length
- off
;
574 /* can't call mapout with 0 length buffers (rmfree panics) */
575 if (((tlen
>> 24) & 0xff) == 0xff) {
580 * Can't call mapout with 0 length buffers since we'll get rmfree panics
582 tbuf
= pageio_setup(pages
, tlen
, vp
, B_WRITE
| flags
);
587 tbuf
->b_lblkno
= lbtodb(pages
->p_offset
);
590 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEOUTONE
, ICL_TYPE_LONG
, avc
,
591 ICL_TYPE_LONG
, pages
, ICL_TYPE_LONG
, tlen
, ICL_TYPE_OFFSET
,
592 ICL_HANDLE_OFFSET(off
));
593 code
= afs_ustrategy(tbuf
, credp
); /* unlocks page */
597 pvn_write_done(pages
, ((code
) ? B_ERROR
: 0) | B_WRITE
| flags
);
608 afs_nfsrdwr(struct vcache
*avc
, struct uio
*auio
, enum uio_rw arw
,
609 int ioflag
, afs_ucred_t
*acred
)
613 afs_int32 code_checkcode
= 0;
615 afs_int32 mode
, sflags
;
617 struct dcache
*dcp
, *dcp_newpage
;
618 afs_size_t fileBase
, size
;
621 afs_int32 pageOffset
, extraResid
= 0;
622 afs_size_t origLength
; /* length when reading/writing started */
623 long appendLength
; /* length when this call will finish */
624 int created
; /* created pages instead of faulting them */
626 int didFakeOpen
, eof
;
627 struct vrequest treq
;
631 AFS_STATCNT(afs_nfsrdwr
);
633 /* can't read or write other things */
634 if (vType(avc
) != VREG
)
637 if (auio
->uio_resid
== 0)
640 afs_Trace4(afs_iclSetp
, CM_TRACE_VMRW
, ICL_TYPE_POINTER
, (afs_int32
) avc
,
641 ICL_TYPE_LONG
, (arw
== UIO_WRITE
? 1 : 0), ICL_TYPE_OFFSET
,
642 ICL_HANDLE_OFFSET(auio
->uio_loffset
), ICL_TYPE_OFFSET
,
643 ICL_HANDLE_OFFSET(auio
->uio_resid
));
645 #ifndef AFS_64BIT_CLIENT
646 if (AfsLargeFileUio(auio
)) /* file is larger than 2 GB */
651 osi_Panic("rdwr: !acred");
653 if (code
= afs_InitReq(&treq
, acred
))
656 /* It's not really possible to know if a write cause a growth in the
657 * cache size, we we wait for a cache drain for any write.
659 afs_MaybeWakeupTruncateDaemon();
660 while ((arw
== UIO_WRITE
)
661 && (afs_blocksUsed
> PERCENT(CM_WAITFORDRAINPCT
, afs_cacheBlocks
))) {
662 if (afs_blocksUsed
- afs_blocksDiscarded
>
663 PERCENT(CM_WAITFORDRAINPCT
, afs_cacheBlocks
)) {
664 afs_WaitForCacheDrain
= 1;
665 afs_osi_Sleep(&afs_WaitForCacheDrain
);
667 afs_MaybeFreeDiscardedDCache();
668 afs_MaybeWakeupTruncateDaemon();
670 code
= afs_VerifyVCache(avc
, &treq
);
672 return afs_CheckCode(code
, &treq
, 45);
674 osi_FlushPages(avc
, acred
);
676 ObtainWriteLock(&avc
->lock
, 250);
678 /* adjust parameters when appending files */
679 if ((ioflag
& IO_APPEND
) && arw
== UIO_WRITE
) {
680 auio
->uio_loffset
= avc
->f
.m
.Length
; /* write at EOF position */
682 if (auio
->afsio_offset
< 0 || (auio
->afsio_offset
+ auio
->uio_resid
) < 0) {
683 ReleaseWriteLock(&avc
->lock
);
686 #ifndef AFS_64BIT_CLIENT
687 /* file is larger than 2GB */
688 if (AfsLargeFileSize(auio
->uio_offset
, auio
->uio_resid
)) {
689 ReleaseWriteLock(&avc
->lock
);
694 didFakeOpen
= 0; /* keep track of open so we can do close */
695 if (arw
== UIO_WRITE
) {
696 /* do ulimit processing; shrink resid or fail */
697 if (auio
->uio_loffset
+ auio
->afsio_resid
> auio
->uio_llimit
) {
698 if (auio
->uio_loffset
>= auio
->uio_llimit
) {
699 ReleaseWriteLock(&avc
->lock
);
702 /* track # of bytes we should write, but won't because of
703 * ulimit; we must add this into the final resid value
704 * so caller knows we punted some data.
706 extraResid
= auio
->uio_resid
;
707 auio
->uio_resid
= auio
->uio_llimit
- auio
->uio_loffset
;
708 extraResid
-= auio
->uio_resid
;
711 mode
= S_WRITE
; /* segment map-in mode */
712 afs_FakeOpen(avc
); /* do this for writes, so data gets put back
713 * when we want it to be put back */
714 didFakeOpen
= 1; /* we'll be doing a fake open */
715 /* before starting any I/O, we must ensure that the file is big enough
716 * to hold the results (since afs_putpage will be called to force the I/O */
717 size
= auio
->afsio_resid
+ auio
->afsio_offset
; /* new file size */
719 origLength
= avc
->f
.m
.Length
;
720 if (size
> avc
->f
.m
.Length
) {
721 afs_Trace4(afs_iclSetp
, CM_TRACE_SETLENGTH
, ICL_TYPE_STRING
,
722 __FILE__
, ICL_TYPE_LONG
, __LINE__
, ICL_TYPE_OFFSET
,
723 ICL_HANDLE_OFFSET(avc
->f
.m
.Length
), ICL_TYPE_OFFSET
,
724 ICL_HANDLE_OFFSET(size
));
725 avc
->f
.m
.Length
= size
; /* file grew */
727 avc
->f
.states
|= CDirty
; /* Set the dirty bit */
728 avc
->f
.m
.Date
= osi_Time(); /* Set file date (for ranlib) */
730 mode
= S_READ
; /* map-in read-only */
731 origLength
= avc
->f
.m
.Length
;
734 if (acred
&& AFS_NFSXLATORREQ(acred
)) {
735 if (arw
== UIO_READ
) {
737 (avc
, PRSFS_READ
, &treq
,
738 CHECK_MODE_BITS
| CMB_ALLOW_EXEC_AS_READ
)) {
739 ReleaseWriteLock(&avc
->lock
);
749 counter
= 0; /* don't call afs_DoPartialWrite first time through. */
751 /* compute the amount of data to move into this block,
752 * based on auio->afsio_resid. Note that we copy data in units of
753 * MAXBSIZE, not PAGESIZE. This is because segmap_getmap panics if you
754 * call it with an offset based on blocks smaller than MAXBSIZE
755 * (implying that it should be named BSIZE, since it is clearly both a
757 size
= auio
->afsio_resid
; /* transfer size */
758 fileBase
= ((arw
== UIO_READ
) && (origLength
< auio
->uio_offset
)) ?
759 origLength
: auio
->afsio_offset
; /* start file position for xfr */
760 pageBase
= fileBase
& ~(MAXBSIZE
- 1); /* file position of the page */
761 pageOffset
= fileBase
& (MAXBSIZE
- 1); /* xfr start's offset within page */
762 tsize
= MAXBSIZE
- pageOffset
; /* how much more fits in this page */
763 /* we'll read tsize bytes, but first must make sure tsize isn't too big */
765 tsize
= size
; /* don't read past end of request */
766 eof
= 0; /* flag telling us if we hit the EOF on the read */
767 if (arw
== UIO_READ
) { /* we're doing a read operation */
768 /* don't read past EOF */
769 if (fileBase
+ tsize
> origLength
) {
770 tsize
= origLength
- fileBase
;
771 eof
= 1; /* we did hit the EOF */
773 tsize
= 0; /* better safe than sorry */
777 /* Purge dirty chunks of file if there are too many dirty
778 * chunks. Inside the write loop, we only do this at a chunk
779 * boundary. Clean up partial chunk if necessary at end of loop.
781 if (counter
> 0 && code
== 0 && AFS_CHUNKOFFSET(fileBase
) == 0) {
782 code
= afs_DoPartialWrite(avc
, &treq
);
786 /* write case, we ask segmap_release to call putpage. Really, we
787 * don't have to do this on every page mapin, but for now we're
788 * lazy, and don't modify the rest of AFS to scan for modified
789 * pages on a close or other "synchronize with file server"
790 * operation. This makes things a little cleaner, but probably
791 * hurts performance. */
796 break; /* nothing to transfer, we're done */
798 if (arw
== UIO_WRITE
)
799 avc
->f
.states
|= CDirty
; /* may have been cleared by DoPartialWrite */
801 /* Before dropping lock, hold the chunk (create it if necessary). This
802 * serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
803 * to purge the chunk's pages while we have them locked. This would
804 * cause deadlock because we might be waiting for the CTD to free up
805 * a chunk. (2) If we're writing past the original EOF, and we're
806 * at the base of the chunk, then make sure it exists online
807 * before we do the uiomove, since the segmap_release will
808 * write out to the chunk, causing it to get fetched if it hasn't
809 * been created yet. The code that would otherwise notice that
810 * we're fetching a chunk past EOF won't work, since we've
811 * already adjusted the file size above.
813 ObtainWriteLock(&avc
->vlock
, 551);
814 while (avc
->vstates
& VPageCleaning
) {
815 ReleaseWriteLock(&avc
->vlock
);
816 ReleaseWriteLock(&avc
->lock
);
817 afs_osi_Sleep(&avc
->vstates
);
818 ObtainWriteLock(&avc
->lock
, 334);
819 ObtainWriteLock(&avc
->vlock
, 552);
821 ReleaseWriteLock(&avc
->vlock
);
823 afs_size_t toff
, tlen
;
824 dcp
= afs_GetDCache(avc
, fileBase
, &treq
, &toff
, &tlen
, 2);
830 ReleaseWriteLock(&avc
->lock
); /* uiomove may page fault */
832 data
= segmap_getmap(segkmap
, AFSTOV(avc
), (u_offset_t
) pageBase
);
833 raddr
= (caddr_t
) (((uintptr_t) data
+ pageOffset
) & PAGEMASK
);
835 (((u_int
) data
+ pageOffset
+ tsize
+ PAGEOFFSET
) & PAGEMASK
) -
838 /* if we're doing a write, and we're starting at the rounded
839 * down page base, and we're writing enough data to cover all
840 * created pages, then we must be writing all of the pages
841 * in this MAXBSIZE window that we're creating.
844 if (arw
== UIO_WRITE
&& ((long)raddr
== (long)data
+ pageOffset
)
846 /* probably the dcache backing this guy is around, but if
847 * not, we can't do this optimization, since we're creating
848 * writable pages, which must be backed by a chunk.
851 dcp_newpage
= afs_FindDCache(avc
, pageBase
);
853 && hsame(avc
->f
.m
.DataVersion
, dcp_newpage
->f
.versionNo
)) {
854 ObtainWriteLock(&avc
->lock
, 251);
855 ObtainWriteLock(&avc
->vlock
, 576);
856 ObtainReadLock(&dcp_newpage
->lock
);
857 if ((avc
->activeV
== 0)
858 && hsame(avc
->f
.m
.DataVersion
, dcp_newpage
->f
.versionNo
)
859 && !(dcp_newpage
->dflags
& (DFFetching
))) {
861 segmap_pagecreate(segkmap
, raddr
, rsize
, 1);
863 ObtainWriteLock(&afs_xdcache
, 252);
864 /* Mark the pages as created and dirty */
865 afs_indexFlags
[dcp_newpage
->index
]
866 |= (IFAnyPages
| IFDirtyPages
);
867 ReleaseWriteLock(&afs_xdcache
);
870 ReleaseReadLock(&dcp_newpage
->lock
);
871 afs_PutDCache(dcp_newpage
);
872 ReleaseWriteLock(&avc
->vlock
);
873 ReleaseWriteLock(&avc
->lock
);
874 } else if (dcp_newpage
)
875 afs_PutDCache(dcp_newpage
);
880 afs_fc2errno(segmap_fault
881 (kas
.a_hat
, segkmap
, raddr
, rsize
,
885 AFS_UIOMOVE(data
+ pageOffset
, tsize
, arw
, auio
, code
);
886 segmap_fault(kas
.a_hat
, segkmap
, raddr
, rsize
, F_SOFTUNLOCK
,
890 code
= segmap_release(segkmap
, data
, sflags
);
892 (void)segmap_release(segkmap
, data
, 0);
895 ObtainWriteLock(&avc
->lock
, 253);
903 afs_FakeClose(avc
, acred
);
905 if (arw
== UIO_WRITE
&& (avc
->f
.states
& CDirty
)) {
906 code2
= afs_DoPartialWrite(avc
, &treq
);
911 if (!code
&& avc
->vc_error
) {
912 code
= code_checkcode
= avc
->vc_error
;
914 ReleaseWriteLock(&avc
->lock
);
916 if ((ioflag
& FSYNC
) && (arw
== UIO_WRITE
)
917 && !AFS_NFSXLATORREQ(acred
))
918 code
= afs_fsync(avc
, 0, acred
);
921 * If things worked, add in as remaining in request any bytes
922 * we didn't write due to file size ulimit.
924 if (code
== 0 && extraResid
> 0)
925 auio
->uio_resid
+= extraResid
;
926 if (code_checkcode
) {
927 return code_checkcode
;
929 return afs_CheckCode(code
, &treq
, 46);
934 afs_map(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t
*addr
, size_t len
, u_char prot
, u_char maxprot
, u_int flags
, afs_ucred_t
*cred
)
936 struct segvn_crargs crargs
;
938 struct vrequest treq
;
939 struct vcache
*avc
= VTOAFS(vp
);
941 AFS_STATCNT(afs_map
);
944 /* check for reasonableness on segment bounds; apparently len can be < 0 */
945 if (off
< 0 || off
+ len
< 0) {
948 #ifndef AFS_64BIT_CLIENT
949 if (AfsLargeFileSize(off
, len
)) { /* file is larger than 2 GB */
955 if (vp
->v_flag
& VNOMAP
) /* File isn't allowed to be mapped */
958 if (vp
->v_filocks
) /* if locked, disallow mapping */
962 if (code
= afs_InitReq(&treq
, cred
))
965 if (vp
->v_type
!= VREG
) {
970 code
= afs_VerifyVCache(avc
, &treq
);
974 osi_FlushPages(avc
, cred
); /* ensure old pages are gone */
975 avc
->f
.states
|= CMAPPED
; /* flag cleared at afs_inactive */
979 if ((flags
& MAP_FIXED
) == 0) {
980 map_addr(addr
, len
, off
, 1, flags
);
987 (void)as_unmap(as
, *addr
, len
); /* unmap old address space use */
988 /* setup the create parameter block for the call */
989 crargs
.vp
= AFSTOV(avc
);
990 crargs
.offset
= (u_offset_t
)off
;
992 crargs
.type
= flags
& MAP_TYPE
;
994 crargs
.maxprot
= maxprot
;
995 crargs
.amp
= (struct anon_map
*)0;
996 crargs
.flags
= flags
& ~MAP_TYPE
;
998 code
= as_map(as
, *addr
, len
, segvn_create
, (char *)&crargs
);
1002 code
= afs_CheckCode(code
, &treq
, 47);
1006 code
= afs_CheckCode(code
, &treq
, 48);
1013 * For Now We use standard local kernel params for AFS system values. Change this
1017 #ifdef AFS_SUN511_ENV
1018 afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1019 afs_ucred_t
*credp
, caller_context_t
*ct
)
1021 afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1023 #endif /* AFS_SUN511_ENV */
1025 AFS_STATCNT(afs_cntl
);
1028 *outdatap
= MAXLINK
;
1031 *outdatap
= MAXNAMLEN
;
1034 *outdatap
= MAXPATHLEN
;
1036 case _PC_CHOWN_RESTRICTED
:
1042 case _PC_FILESIZEBITS
:
1043 #ifdef AFS_64BIT_CLIENT
1050 #ifdef AFS_SUN511_ENV
1051 return fs_pathconf(vp
, cmd
, outdatap
, credp
, ct
);
1053 return fs_pathconf(vp
, cmd
, outdatap
, credp
);
1054 #endif /* AFS_SUN511_ENV */
1060 afs_ioctl(struct vnode
*vnp
, int com
, int arg
, int flag
, cred_t
*credp
,
1067 afs_rwlock(struct vnode
*vnp
, int wlock
)
1069 rw_enter(&(VTOAFS(vnp
))->rwlock
, (wlock
? RW_WRITER
: RW_READER
));
1074 afs_rwunlock(struct vnode
*vnp
, int wlock
)
1076 rw_exit(&(VTOAFS(vnp
))->rwlock
);
1082 afs_seek(struct vnode
*vnp
, offset_t ooff
, offset_t
*noffp
)
1086 #ifndef AFS_64BIT_CLIENT
1087 # define __MAXOFF_T MAXOFF_T
1089 # define __MAXOFF_T MAXOFFSET_T
1092 if ((*noffp
< 0 || *noffp
> __MAXOFF_T
))
1098 #ifdef AFS_SUN59_ENV
1099 afs_frlock(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1100 offset_t off
, struct flk_callback
*flkcb
, afs_ucred_t
*credp
)
1102 afs_frlock(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1103 offset_t off
, afs_ucred_t
*credp
)
1108 * Implement based on afs_lockctl
1111 #ifdef AFS_SUN59_ENV
1113 afs_warn("Don't know how to deal with flk_callback's!\n");
1115 if ((cmd
== F_GETLK
) || (cmd
== F_O_GETLK
) || (cmd
== F_SETLK
)
1116 || (cmd
== F_SETLKW
)) {
1117 ap
->l_pid
= ttoproc(curthread
)->p_pid
;
1121 code
= convoff(vnp
, ap
, 0, off
);
1127 code
= afs_lockctl(VTOAFS(vnp
), ap
, cmd
, credp
);
1134 afs_space(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1135 offset_t off
, afs_ucred_t
*credp
)
1137 afs_int32 code
= EINVAL
;
1140 if ((cmd
== F_FREESP
)
1141 && ((code
= convoff(vnp
, ap
, 0, off
)) == 0)) {
1144 vattr
.va_mask
= AT_SIZE
;
1145 vattr
.va_size
= ap
->l_start
;
1146 code
= afs_setattr(VTOAFS(vnp
), &vattr
, 0, credp
);
1154 afs_dump(struct vnode
*vp
, caddr_t addr
, int i1
, int i2
)
1156 AFS_STATCNT(afs_dump
);
1157 afs_warn("AFS_DUMP. MUST IMPLEMENT THIS!!!\n");
1162 /* Nothing fancy here; just compare if vnodes are identical ones */
1164 afs_cmp(struct vnode
*vp1
, struct vnode
*vp2
)
1166 AFS_STATCNT(afs_cmp
);
1167 return (vp1
== vp2
);
1172 afs_realvp(struct vnode
*vp
, struct vnode
**vpp
)
1174 AFS_STATCNT(afs_realvp
);
1180 afs_pageio(struct vnode
*vp
, struct page
*pp
, u_int ui1
, u_int ui2
, int i1
,
1183 afs_warn("afs_pageio: Not implemented\n");
1188 #ifdef AFS_SUN59_ENV
1189 afs_dumpctl(struct vnode
*vp
, int i
, int *blkp
)
1191 afs_dumpctl(struct vnode
*vp
, int i
)
1194 afs_warn("afs_dumpctl: Not implemented\n");
1198 #ifdef AFS_SUN511_ENV
1200 afs_dispose(struct vnode
*vp
, struct page
*p
, int fl
, int dn
, struct cred
*cr
, struct caller_context_t
*ct
)
1202 fs_dispose(vp
, p
, fl
, dn
, cr
,ct
);
1206 afs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
, struct caller_context_t
*ct
)
1212 afs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
, struct caller_context_t
*ct
)
1214 return fs_fab_acl(vp
, vsecattr
, flag
, creds
,ct
);
1218 afs_dispose(struct vnode
*vp
, struct page
*p
, int fl
, int dn
, struct cred
*cr
)
1220 fs_dispose(vp
, p
, fl
, dn
, cr
);
1224 afs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
,
1231 afs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
)
1233 return fs_fab_acl(vp
, vsecattr
, flag
, creds
);
1237 #ifdef AFS_GLOBAL_SUNLOCK
1238 extern int gafs_open(struct vcache
**avcp
, afs_int32 aflags
,
1239 afs_ucred_t
*acred
);
1240 extern int gafs_close(struct vcache
*avc
, afs_int32 aflags
,
1241 int count
, offset_t offset
, afs_ucred_t
*acred
);
1242 extern int afs_ioctl(struct vnode
*vnp
, int com
, int arg
, int flag
,
1243 cred_t
*credp
, int *rvalp
);
1244 extern int gafs_access(struct vcache
*avc
, afs_int32 amode
,
1245 int flags
, afs_ucred_t
*acred
);
1246 extern int gafs_getattr(struct vcache
*avc
,
1247 struct vattr
*attrs
, int flags
,
1248 afs_ucred_t
*acred
);
1249 extern int gafs_setattr(struct vcache
*avc
,
1250 struct vattr
*attrs
, int flags
,
1251 afs_ucred_t
*acred
);
1252 extern int gafs_lookup(struct vcache
*adp
, char *aname
,
1253 struct vcache
**avcp
, struct pathname
*pnp
,
1254 int flags
, struct vnode
*rdir
, afs_ucred_t
*acred
);
1255 extern int gafs_remove(struct vcache
*adp
, char *aname
,
1256 afs_ucred_t
*acred
);
1257 extern int gafs_link(struct vcache
*adp
, struct vcache
*avc
,
1258 char *aname
, afs_ucred_t
*acred
);
1259 extern int gafs_rename(struct vcache
*aodp
, char *aname1
,
1260 struct vcache
*andp
, char *aname2
,
1261 afs_ucred_t
*acred
);
1262 extern int gafs_symlink(struct vcache
*adp
, char *aname
,
1263 struct vattr
*attrs
, char *atargetName
,
1264 afs_ucred_t
*acred
);
1265 extern int gafs_rmdir(struct vcache
*adp
, char *aname
,
1266 struct vnode
*cdirp
, afs_ucred_t
*acred
);
1267 extern int gafs_mkdir(struct vcache
*adp
, char *aname
,
1268 struct vattr
*attrs
, struct vcache
**avcp
,
1269 afs_ucred_t
*acred
);
1270 extern int gafs_fsync(struct vcache
*avc
, int flag
, afs_ucred_t
*acred
);
1271 extern int gafs_readlink(struct vcache
*avc
, struct uio
*auio
,
1272 afs_ucred_t
*acred
);
1273 extern int gafs_readdir(struct vcache
*avc
, struct uio
*auio
,
1274 afs_ucred_t
*acred
, int *eofp
);
1275 extern void gafs_inactive(struct vcache
*avc
,
1276 afs_ucred_t
*acred
);
1277 extern int gafs_fid(struct vcache
*avc
, struct fid
**fidpp
);
1278 extern int gafs_create(struct vcache
*adp
, char *aname
,
1279 struct vattr
*attrs
, enum vcexcl aexcl
, int amode
,
1280 struct vcache
**avcp
, afs_ucred_t
*acred
);
1281 #ifdef AFS_SUN511_ENV
1282 extern int afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1283 afs_ucred_t
*credp
, caller_context_t
*ct
);
1285 extern int afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1286 afs_ucred_t
*credp
);
1287 #endif /* AFS_SUN511_ENV */
1289 #if defined(AFS_SUN511_ENV)
1290 /* The following list must always be NULL-terminated */
1291 const fs_operation_def_t afs_vnodeops_template
[] = {
1292 VOPNAME_OPEN
, { .vop_open
= gafs_open
},
1293 VOPNAME_CLOSE
, { .vop_close
= gafs_close
},
1294 VOPNAME_READ
, { .vop_read
= afs_vmread
},
1295 VOPNAME_WRITE
, { .vop_write
= afs_vmwrite
},
1296 VOPNAME_IOCTL
, { .vop_ioctl
= afs_ioctl
},
1297 VOPNAME_SETFL
, { .vop_setfl
= fs_setfl
},
1298 VOPNAME_GETATTR
, { .vop_getattr
= gafs_getattr
},
1299 VOPNAME_SETATTR
, { .vop_setattr
= gafs_setattr
},
1300 VOPNAME_ACCESS
, { .vop_access
= gafs_access
},
1301 VOPNAME_LOOKUP
, { .vop_lookup
= gafs_lookup
},
1302 VOPNAME_CREATE
, { .vop_create
= gafs_create
},
1303 VOPNAME_REMOVE
, { .vop_remove
= gafs_remove
},
1304 VOPNAME_LINK
, { .vop_link
= gafs_link
},
1305 VOPNAME_RENAME
, { .vop_rename
= gafs_rename
},
1306 VOPNAME_MKDIR
, { .vop_mkdir
= gafs_mkdir
},
1307 VOPNAME_RMDIR
, { .vop_rmdir
= gafs_rmdir
},
1308 VOPNAME_READDIR
, { .vop_readdir
= gafs_readdir
},
1309 VOPNAME_SYMLINK
, { .vop_symlink
= gafs_symlink
},
1310 VOPNAME_READLINK
, { .vop_readlink
= gafs_readlink
},
1311 VOPNAME_FSYNC
, { .vop_fsync
= gafs_fsync
},
1312 VOPNAME_INACTIVE
, { .vop_inactive
= gafs_inactive
},
1313 VOPNAME_FID
, { .vop_fid
= gafs_fid
},
1314 VOPNAME_RWLOCK
, { .vop_rwlock
= afs_rwlock
},
1315 VOPNAME_RWUNLOCK
, { .vop_rwunlock
= afs_rwunlock
},
1316 VOPNAME_SEEK
, { .vop_seek
= afs_seek
},
1317 VOPNAME_CMP
, { .vop_cmp
= afs_cmp
},
1318 VOPNAME_FRLOCK
, { .vop_frlock
= afs_frlock
},
1319 VOPNAME_SPACE
, { .vop_space
= afs_space
},
1320 VOPNAME_REALVP
, { .vop_realvp
= afs_realvp
},
1321 VOPNAME_GETPAGE
, { .vop_getpage
= afs_getpage
},
1322 VOPNAME_PUTPAGE
, { .vop_putpage
= afs_putpage
},
1323 VOPNAME_MAP
, { .vop_map
= afs_map
},
1324 VOPNAME_ADDMAP
, { .vop_addmap
= afs_addmap
},
1325 VOPNAME_DELMAP
, { .vop_delmap
= afs_delmap
},
1326 VOPNAME_POLL
, { .vop_poll
= fs_poll
},
1327 VOPNAME_PATHCONF
, { .vop_pathconf
= afs_pathconf
},
1328 VOPNAME_PAGEIO
, { .vop_pageio
= afs_pageio
},
1329 VOPNAME_DUMP
, { .vop_dump
= afs_dump
},
1330 VOPNAME_DUMPCTL
, { .vop_dumpctl
= afs_dumpctl
},
1331 VOPNAME_DISPOSE
, { .vop_dispose
= afs_dispose
},
1332 VOPNAME_GETSECATTR
, { .vop_getsecattr
= afs_getsecattr
},
1333 VOPNAME_SETSECATTR
, { .vop_setsecattr
= afs_setsecattr
},
1334 VOPNAME_SHRLOCK
, { .vop_shrlock
= fs_shrlock
},
1337 vnodeops_t
*afs_ops
;
1338 #elif defined(AFS_SUN510_ENV)
1339 /* The following list must always be NULL-terminated */
1340 const fs_operation_def_t afs_vnodeops_template
[] = {
1341 VOPNAME_OPEN
, gafs_open
,
1342 VOPNAME_CLOSE
, gafs_close
,
1343 VOPNAME_READ
, afs_vmread
,
1344 VOPNAME_WRITE
, afs_vmwrite
,
1345 VOPNAME_IOCTL
, afs_ioctl
,
1346 VOPNAME_SETFL
, fs_setfl
,
1347 VOPNAME_GETATTR
, gafs_getattr
,
1348 VOPNAME_SETATTR
, gafs_setattr
,
1349 VOPNAME_ACCESS
, gafs_access
,
1350 VOPNAME_LOOKUP
, gafs_lookup
,
1351 VOPNAME_CREATE
, gafs_create
,
1352 VOPNAME_REMOVE
, gafs_remove
,
1353 VOPNAME_LINK
, gafs_link
,
1354 VOPNAME_RENAME
, gafs_rename
,
1355 VOPNAME_MKDIR
, gafs_mkdir
,
1356 VOPNAME_RMDIR
, gafs_rmdir
,
1357 VOPNAME_READDIR
, gafs_readdir
,
1358 VOPNAME_SYMLINK
, gafs_symlink
,
1359 VOPNAME_READLINK
, gafs_readlink
,
1360 VOPNAME_FSYNC
, gafs_fsync
,
1361 VOPNAME_INACTIVE
, gafs_inactive
,
1362 VOPNAME_FID
, gafs_fid
,
1363 VOPNAME_RWLOCK
, afs_rwlock
,
1364 VOPNAME_RWUNLOCK
, afs_rwunlock
,
1365 VOPNAME_SEEK
, afs_seek
,
1366 VOPNAME_CMP
, afs_cmp
,
1367 VOPNAME_FRLOCK
, afs_frlock
,
1368 VOPNAME_SPACE
, afs_space
,
1369 VOPNAME_REALVP
, afs_realvp
,
1370 VOPNAME_GETPAGE
, afs_getpage
,
1371 VOPNAME_PUTPAGE
, afs_putpage
,
1372 VOPNAME_MAP
, afs_map
,
1373 VOPNAME_ADDMAP
, afs_addmap
,
1374 VOPNAME_DELMAP
, afs_delmap
,
1375 VOPNAME_POLL
, fs_poll
,
1376 VOPNAME_DUMP
, afs_dump
,
1377 VOPNAME_PATHCONF
, afs_pathconf
,
1378 VOPNAME_PAGEIO
, afs_pageio
,
1379 VOPNAME_DUMPCTL
, afs_dumpctl
,
1380 VOPNAME_DISPOSE
, afs_dispose
,
1381 VOPNAME_GETSECATTR
, afs_getsecattr
,
1382 VOPNAME_SETSECATTR
, afs_setsecattr
,
1383 VOPNAME_SHRLOCK
, fs_shrlock
,
1386 struct vnodeops
*afs_ops
;
1388 struct vnodeops Afs_vnodeops
= {
1433 struct vnodeops
*afs_ops
= &Afs_vnodeops
;
1437 gafs_open(struct vcache
**avcp
, afs_int32 aflags
,
1443 code
= afs_open(avcp
, aflags
, acred
);
1449 gafs_close(struct vcache
*avc
, afs_int32 aflags
, int count
,
1450 offset_t offset
, afs_ucred_t
*acred
)
1455 code
= afs_close(avc
, aflags
, count
, offset
, acred
);
1461 gafs_getattr(struct vcache
*avc
, struct vattr
*attrs
,
1462 int flags
, afs_ucred_t
*acred
)
1467 code
= afs_getattr(avc
, attrs
, flags
, acred
);
1474 gafs_setattr(struct vcache
*avc
, struct vattr
*attrs
,
1475 int flags
, afs_ucred_t
*acred
)
1480 code
= afs_setattr(avc
, attrs
, flags
, acred
);
1487 gafs_access(struct vcache
*avc
, afs_int32 amode
, int flags
,
1493 code
= afs_access(avc
, amode
, flags
, acred
);
1500 gafs_lookup(struct vcache
*adp
, char *aname
,
1501 struct vcache
**avcp
, struct pathname
*pnp
, int flags
,
1502 struct vnode
*rdir
, afs_ucred_t
*acred
)
1507 code
= afs_lookup(adp
, aname
, avcp
, pnp
, flags
, rdir
, acred
);
1514 gafs_create(struct vcache
*adp
, char *aname
, struct vattr
*attrs
,
1515 enum vcexcl aexcl
, int amode
, struct vcache
**avcp
,
1521 code
= afs_create(adp
, aname
, attrs
, aexcl
, amode
, avcp
, acred
);
1527 gafs_remove(struct vcache
*adp
, char *aname
, afs_ucred_t
*acred
)
1532 code
= afs_remove(adp
, aname
, acred
);
1538 gafs_link(struct vcache
*adp
, struct vcache
*avc
,
1539 char *aname
, afs_ucred_t
*acred
)
1544 code
= afs_link(adp
, avc
, aname
, acred
);
1550 gafs_rename(struct vcache
*aodp
, char *aname1
,
1551 struct vcache
*andp
, char *aname2
,
1557 code
= afs_rename(aodp
, aname1
, andp
, aname2
, acred
);
1558 #ifdef AFS_SUN510_ENV
1560 struct vcache
*avcp
= NULL
;
1562 (void) afs_lookup(andp
, aname2
, &avcp
, NULL
, 0, NULL
, acred
);
1564 struct vnode
*vp
= AFSTOV(avcp
), *pvp
= AFSTOV(andp
);
1566 # ifdef HAVE_VN_RENAMEPATH
1567 vn_renamepath(pvp
, vp
, aname2
, strlen(aname2
));
1569 mutex_enter(&vp
->v_lock
);
1570 if (vp
->v_path
!= NULL
) {
1571 kmem_free(vp
->v_path
, strlen(vp
->v_path
) + 1);
1574 mutex_exit(&vp
->v_lock
);
1575 vn_setpath(afs_globalVp
, pvp
, vp
, aname2
, strlen(aname2
));
1576 # endif /* !HAVE_VN_RENAMEPATH */
1587 gafs_mkdir(struct vcache
*adp
, char *aname
, struct vattr
*attrs
,
1588 struct vcache
**avcp
, afs_ucred_t
*acred
)
1593 code
= afs_mkdir(adp
, aname
, attrs
, avcp
, acred
);
1599 gafs_rmdir(struct vcache
*adp
, char *aname
, struct vnode
*cdirp
,
1605 code
= afs_rmdir(adp
, aname
, cdirp
, acred
);
1612 gafs_readdir(struct vcache
*avc
, struct uio
*auio
,
1613 afs_ucred_t
*acred
, int *eofp
)
1618 code
= afs_readdir(avc
, auio
, acred
, eofp
);
1624 gafs_symlink(struct vcache
*adp
, char *aname
, struct vattr
*attrs
,
1625 char *atargetName
, afs_ucred_t
*acred
)
1630 code
= afs_symlink(adp
, aname
, attrs
, atargetName
, NULL
, acred
);
1637 gafs_readlink(struct vcache
*avc
, struct uio
*auio
, afs_ucred_t
*acred
)
1642 code
= afs_readlink(avc
, auio
, acred
);
1648 gafs_fsync(struct vcache
*avc
, int flag
, afs_ucred_t
*acred
)
1653 code
= afs_fsync(avc
, flag
, acred
);
1659 afs_inactive(struct vcache
*avc
, afs_ucred_t
*acred
)
1661 struct vnode
*vp
= AFSTOV(avc
);
1662 if (afs_shuttingdown
!= AFS_RUNNING
)
1666 * In Solaris and HPUX s800 and HP-UX10.0 they actually call us with
1667 * v_count 1 on last reference!
1669 mutex_enter(&vp
->v_lock
);
1670 if (avc
->vrefCount
<= 0)
1671 osi_Panic("afs_inactive : v_count <=0\n");
1674 * If more than 1 don't unmap the vnode but do decrement the ref count
1677 if (vp
->v_count
> 0) {
1678 mutex_exit(&vp
->v_lock
);
1681 mutex_exit(&vp
->v_lock
);
1683 #ifndef AFS_SUN511_ENV
1685 * Solaris calls VOP_OPEN on exec, but doesn't call VOP_CLOSE when
1686 * the executable exits. So we clean up the open count here.
1688 * Only do this for AFS_MVSTAT_FILE vnodes: when using fakestat, we can't
1689 * lose the open count for volume roots (AFS_MVSTAT_ROOT), even though they
1690 * will get VOP_INACTIVE'd when released by afs_PutFakeStat().
1692 if (avc
->opens
> 0 && avc
->mvstat
== AFS_MVSTAT_FILE
&& !(avc
->f
.states
& CCore
))
1693 avc
->opens
= avc
->execsOrWriters
= 0;
1696 afs_InactiveVCache(avc
, acred
);
1699 /* VFS_RELE must be called outside of GLOCK, since it can potentially
1700 * call afs_freevfs, which acquires GLOCK */
1701 VFS_RELE(afs_globalVFS
);
1708 gafs_inactive(struct vcache
*avc
, afs_ucred_t
*acred
)
1711 (void)afs_inactive(avc
, acred
);
1717 gafs_fid(struct vcache
*avc
, struct fid
**fidpp
)
1722 code
= afs_fid(avc
, fidpp
);
1727 #endif /* AFS_GLOBAL_SUNLOCK */