Merge 1.8.0~pre4 packaging into master
[pkg-k5-afs_openafs.git] / src / afs / SOLARIS / osi_vnodeops.c
blob7a8277a1ff39e4f0a1e2098835d8fccccea86ed9
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
10 #include <afsconfig.h>
11 #include "afs/param.h"
15 * SOLARIS/osi_vnodeops.c
17 * Implements:
19 * Functions: AFS_TRYUP, _init, _info, _fini, afs_addmap, afs_delmap,
20 * afs_vmread, afs_vmwrite, afs_getpage, afs_GetOnePage, afs_putpage,
21 * afs_putapage, afs_nfsrdwr, afs_map, afs_PageLeft, afs_pathconf/afs_cntl,
22 * afs_ioctl, afs_rwlock, afs_rwunlock, afs_seek, afs_space, afs_dump,
23 * afs_cmp, afs_realvp, afs_pageio, afs_dumpctl, afs_dispose, afs_setsecattr,
24 * afs_getsecattr, gafs_open, gafs_close, gafs_getattr, gafs_setattr,
25 * gafs_access, gafs_lookup, gafs_create, gafs_remove, gafs_link,
26 * gafs_rename, gafs_mkdir, gafs_rmdir, gafs_readdir, gafs_symlink,
27 * gafs_readlink, gafs_fsync, afs_inactive, gafs_inactive, gafs_fid
30 * Variables: Afs_vnodeops
33 #include "afs/sysincludes.h" /* Standard vendor system headers */
34 #include "afsincludes.h" /* Afs-based standard headers */
35 #include "afs/afs_stats.h" /* statistics */
36 #include "afs/nfsclient.h"
39 #include <sys/mman.h>
40 #include <vm/hat.h>
41 #include <vm/as.h>
42 #include <vm/page.h>
43 #include <vm/pvn.h>
44 #include <vm/seg.h>
45 #include <vm/seg_map.h>
46 #include <vm/seg_vn.h>
47 #include <vm/rm.h>
48 #if defined(AFS_SUN511_ENV)
49 #include <sys/vfs_opreg.h>
50 #endif
51 #include <sys/modctl.h>
52 #include <sys/syscall.h>
53 #include <sys/debug.h>
54 #include <sys/fs_subr.h>
56 /* Translate a faultcode_t as returned by some of the vm routines
57 * into a suitable errno value.
59 static int
60 afs_fc2errno(faultcode_t fc)
62 switch (FC_CODE(fc)) {
63 case 0:
64 return 0;
66 case FC_OBJERR:
67 return FC_ERRNO(fc);
69 default:
70 return EIO;
75 extern struct as kas; /* kernel addr space */
76 extern unsigned char *afs_indexFlags;
77 extern afs_lock_t afs_xdcache;
79 /* Additional vnodeops for SunOS 4.0.x */
80 int afs_nfsrdwr(), afs_getpage(), afs_putpage(), afs_map();
81 int afs_dump(), afs_cmp(), afs_realvp(), afs_GetOnePage();
83 int afs_pvn_vptrunc;
85 int
86 afs_addmap(struct vnode *avp, offset_t offset, struct as *asp,
87 caddr_t addr, int length, int prot, int maxprot, int flags,
88 afs_ucred_t *credp)
90 /* XXX What should we do here?? XXX */
91 return (0);
94 int
95 afs_delmap(struct vnode *avp, offset_t offset, struct as *asp,
96 caddr_t addr, int length, int prot, int maxprot, int flags,
97 afs_ucred_t *credp)
99 /* XXX What should we do here?? XXX */
100 return (0);
103 #ifdef AFS_SUN510_ENV
105 afs_vmread(struct vnode *avp, struct uio *auio, int ioflag,
106 afs_ucred_t *acred, caller_context_t *ct)
107 #else
109 afs_vmread(struct vnode *avp, struct uio *auio, int ioflag,
110 afs_ucred_t *acred)
111 #endif
113 int code;
115 if (!RW_READ_HELD(&(VTOAFS(avp))->rwlock))
116 osi_Panic("afs_vmread: !rwlock");
117 AFS_GLOCK();
118 code = afs_nfsrdwr(VTOAFS(avp), auio, UIO_READ, ioflag, acred);
119 AFS_GUNLOCK();
120 return code;
124 #ifdef AFS_SUN510_ENV
126 afs_vmwrite(struct vnode *avp, struct uio *auio, int ioflag,
127 afs_ucred_t *acred, caller_context_t *ct)
128 #else
130 afs_vmwrite(struct vnode *avp, struct uio *auio, int ioflag,
131 afs_ucred_t *acred)
132 #endif
134 int code;
136 if (!RW_WRITE_HELD(&(VTOAFS(avp))->rwlock))
137 osi_Panic("afs_vmwrite: !rwlock");
138 AFS_GLOCK();
139 code = afs_nfsrdwr(VTOAFS(avp), auio, UIO_WRITE, ioflag, acred);
140 AFS_GUNLOCK();
141 return code;
145 afs_getpage(struct vnode *vp, offset_t off, u_int len, u_int *protp,
146 struct page *pl[], u_int plsz, struct seg *seg, caddr_t addr,
147 enum seg_rw rw, afs_ucred_t *acred)
149 afs_int32 code = 0;
150 AFS_STATCNT(afs_getpage);
152 if (vp->v_flag & VNOMAP) /* File doesn't allow mapping */
153 return (ENOSYS);
155 AFS_GLOCK();
157 if (len <= PAGESIZE)
158 code =
159 afs_GetOnePage(vp, off, len, protp, pl, plsz, seg, addr, rw, acred);
160 else {
161 struct multiPage_range range;
162 struct vcache *vcp = VTOAFS(vp);
164 /* We've been asked to get more than one page. We must return all
165 * requested pages at once, all of them locked, which means all of
166 * these dcache entries cannot be kicked out of the cache before we
167 * return (since their pages cannot be invalidated).
169 * afs_GetOnePage will be called multiple times by pvn_getpages in
170 * order to get all of the requested pages. One of the later
171 * afs_GetOnePage calls may need to evict some cache entries in order
172 * to perform its read. If we try to kick out one of the entries an
173 * earlier afs_GetOnePage call used, we will deadlock since we have
174 * the page locked. So, to tell afs_GetDownD that it should skip over
175 * any entries we've read in due to this afs_getpage call, record the
176 * offset and length in avc->multiPage.
178 * Ideally we would just set something in each dcache as we get it,
179 * but that is rather difficult, since pvn_getpages doesn't let us
180 * retain any information between calls to afs_GetOnePage. So instead
181 * just record the offset and length, and let afs_GetDownD calculate
182 * which dcache entries should be skipped. */
184 range.off = off;
185 range.len = len;
187 ObtainWriteLock(&vcp->vlock, 548);
188 QAdd(&vcp->multiPage, &range.q);
189 ReleaseWriteLock(&vcp->vlock);
190 code =
191 pvn_getpages(afs_GetOnePage, vp, off, len, protp, pl, plsz, seg, addr, rw, acred);
192 ObtainWriteLock(&vcp->vlock, 549);
193 QRemove(&range.q);
194 ReleaseWriteLock(&vcp->vlock);
196 AFS_GUNLOCK();
197 return code;
200 /* Return all the pages from [off..off+len) in file */
202 afs_GetOnePage(struct vnode *vp, u_offset_t off, u_int alen, u_int *protp,
203 struct page *pl[], u_int plsz, struct seg *seg, caddr_t addr,
204 enum seg_rw rw, afs_ucred_t *acred)
206 struct page *page;
207 afs_int32 code = 0;
208 u_int len;
209 struct buf *buf;
210 afs_int32 tlen;
211 struct vcache *avc;
212 struct dcache *tdc;
213 int i, s, pexists;
214 int slot;
215 afs_size_t offset, nlen = 0;
216 struct vrequest treq;
217 afs_int32 mapForRead = 0, Code = 0;
218 u_offset_t toffset;
220 if (!acred)
221 osi_Panic("GetOnePage: !acred");
223 avc = VTOAFS(vp); /* cast to afs vnode */
225 if (avc->credp /*&& AFS_NFSXLATORREQ(acred) */
226 && AFS_NFSXLATORREQ(avc->credp)) {
227 acred = avc->credp;
229 if (code = afs_InitReq(&treq, acred))
230 return code;
232 if (!pl) {
233 /* This is a read-ahead request, e.g. due to madvise. */
234 int plen = alen;
235 ObtainReadLock(&avc->lock);
237 while (plen > 0 && !afs_BBusy()) {
238 /* Obtain a dcache entry at off. 2 means don't fetch data. */
239 tdc =
240 afs_GetDCache(avc, (afs_offs_t) off, &treq, &offset, &nlen,
242 if (!tdc)
243 break;
245 /* Write-lock the dcache entry, if we don't succeed, just go on */
246 if (0 != NBObtainWriteLock(&tdc->lock, 642)) {
247 afs_PutDCache(tdc);
248 goto next_prefetch;
251 /* If we aren't already fetching this dcache entry, queue it */
252 if (!(tdc->mflags & DFFetchReq)) {
253 struct brequest *bp;
255 tdc->mflags |= DFFetchReq;
256 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
257 (afs_size_t) off, (afs_size_t) 1, tdc,
258 (void *)0, (void *)0);
259 if (!bp) {
260 /* Unable to start background fetch; might as well stop */
261 tdc->mflags &= ~DFFetchReq;
262 ReleaseWriteLock(&tdc->lock);
263 afs_PutDCache(tdc);
264 break;
266 ReleaseWriteLock(&tdc->lock);
267 } else {
268 ReleaseWriteLock(&tdc->lock);
269 afs_PutDCache(tdc);
272 next_prefetch:
273 /* Adjust our offset and remaining length values */
274 off += nlen;
275 plen -= nlen;
277 /* If we aren't making progress for some reason, bail out */
278 if (nlen <= 0)
279 break;
282 ReleaseReadLock(&avc->lock);
283 return 0;
286 len = PAGESIZE;
287 pl[0] = NULL; /* Make sure it's empty */
289 /* first, obtain the proper lock for the VM system */
291 /* if this is a read request, map the page in read-only. This will
292 * allow us to swap out the dcache entry if there are only read-only
293 * pages created for the chunk, which helps a *lot* when dealing
294 * with small caches. Otherwise, we have to invalidate the vm
295 * pages for the range covered by a chunk when we swap out the
296 * chunk.
298 if (rw == S_READ || rw == S_EXEC)
299 mapForRead = 1;
301 if (protp)
302 *protp = PROT_ALL;
304 retry:
305 if (rw == S_WRITE || rw == S_CREATE)
306 tdc = afs_GetDCache(avc, (afs_offs_t) off, &treq, &offset, &nlen, 5);
307 else
308 tdc = afs_GetDCache(avc, (afs_offs_t) off, &treq, &offset, &nlen, 1);
309 if (!tdc)
310 return afs_CheckCode(EINVAL, &treq, 62);
311 code = afs_VerifyVCache(avc, &treq);
312 if (code) {
313 afs_PutDCache(tdc);
314 return afs_CheckCode(code, &treq, 44); /* failed to get it */
317 ObtainReadLock(&avc->lock);
319 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, (afs_int32) vp,
320 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(off), ICL_TYPE_LONG, len,
321 ICL_TYPE_LONG, (int)rw);
323 tlen = len;
324 slot = 0;
325 toffset = off;
326 /* Check to see if we're in the middle of a VM purge, and if we are, release
327 * the locks and try again when the VM purge is done. */
328 ObtainWriteLock(&avc->vlock, 550);
329 if (avc->activeV) {
330 ReleaseReadLock(&avc->lock);
331 ReleaseWriteLock(&avc->vlock);
332 afs_PutDCache(tdc);
333 /* Check activeV again, it may have been turned off
334 * while we were waiting for a lock in afs_PutDCache */
335 ObtainWriteLock(&avc->vlock, 574);
336 if (avc->activeV) {
337 avc->vstates |= VRevokeWait;
338 ReleaseWriteLock(&avc->vlock);
339 afs_osi_Sleep(&avc->vstates);
340 } else {
341 ReleaseWriteLock(&avc->vlock);
343 goto retry;
345 ReleaseWriteLock(&avc->vlock);
347 /* We're about to do stuff with our dcache entry.. Lock it. */
348 ObtainReadLock(&tdc->lock);
350 /* Check to see whether the cache entry is still valid */
351 if (!(avc->f.states & CStatd)
352 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
353 ReleaseReadLock(&tdc->lock);
354 ReleaseReadLock(&avc->lock);
355 afs_PutDCache(tdc);
356 goto retry;
359 AFS_GUNLOCK();
360 while (1) { /* loop over all pages */
361 /* now, try to find the page in memory (it may already be intransit or laying
362 * around the free list */
363 page =
364 page_lookup(vp, toffset, (rw == S_CREATE ? SE_EXCL : SE_SHARED));
365 if (page)
366 goto nextpage;
368 /* if we make it here, we can't find the page in memory. Do a real disk read
369 * from the cache to get the data */
370 Code |= 0x200; /* XXX */
371 /* use PG_EXCL because we know the page does not exist already. If it
372 * actually does exist, we have somehow raced between lookup and create.
373 * As of 4/98, that shouldn't be possible, but we'll be defensive here
374 * in case someone tries to relax all the serialization of read and write
375 * operations with harmless things like stat. */
376 page =
377 page_create_va(vp, toffset, PAGESIZE, PG_WAIT | PG_EXCL, seg,
378 addr);
379 if (!page) {
380 continue;
382 if (alen < PAGESIZE)
383 pagezero(page, alen, PAGESIZE - alen);
385 if (rw == S_CREATE) {
386 /* XXX Don't read from AFS in write only cases XXX */
387 page_io_unlock(page);
388 } else
390 /* now it is time to start I/O operation */
391 buf = pageio_setup(page, PAGESIZE, vp, B_READ); /* allocate a buf structure */
392 buf->b_edev = 0;
393 buf->b_dev = 0;
394 buf->b_lblkno = lbtodb(toffset);
395 bp_mapin(buf); /* map it in to our address space */
397 AFS_GLOCK();
398 /* afs_ustrategy will want to lock the dcache entry */
399 ReleaseReadLock(&tdc->lock);
400 code = afs_ustrategy(buf, acred); /* do the I/O */
401 ObtainReadLock(&tdc->lock);
402 AFS_GUNLOCK();
404 /* Before freeing unmap the buffer */
405 bp_mapout(buf);
406 pageio_done(buf);
407 if (code) {
408 goto bad;
410 page_io_unlock(page);
413 /* come here when we have another page (already held) to enter */
414 nextpage:
415 /* put page in array and continue */
416 /* The p_selock must be downgraded to a shared lock after the page is read */
417 if ((rw != S_CREATE) && !(PAGE_SHARED(page))) {
418 page_downgrade(page);
420 pl[slot++] = page;
421 code = page_iolock_assert(page);
422 code = 0;
423 toffset += PAGESIZE;
424 addr += PAGESIZE;
425 tlen -= PAGESIZE;
426 if (tlen <= 0)
427 break; /* done all the pages */
428 } /* while (1) ... */
430 AFS_GLOCK();
431 pl[slot] = NULL;
432 ReleaseReadLock(&tdc->lock);
434 /* Prefetch next chunk if we're at a chunk boundary */
435 if (AFS_CHUNKOFFSET(off) == 0) {
436 if (!(tdc->mflags & DFNextStarted))
437 afs_PrefetchChunk(avc, tdc, acred, &treq);
440 ReleaseReadLock(&avc->lock);
441 ObtainWriteLock(&afs_xdcache, 246);
442 if (!mapForRead) {
443 /* track that we have dirty (or dirty-able) pages for this chunk. */
444 afs_indexFlags[tdc->index] |= IFDirtyPages;
446 afs_indexFlags[tdc->index] |= IFAnyPages;
447 ReleaseWriteLock(&afs_xdcache);
448 afs_PutDCache(tdc);
449 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code,
450 ICL_TYPE_LONG, (int)page, ICL_TYPE_LONG, Code);
451 return 0;
453 bad:
454 AFS_GLOCK();
455 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code,
456 ICL_TYPE_LONG, (int)page, ICL_TYPE_LONG, Code);
457 /* release all pages, drop locks, return code */
458 if (page)
459 pvn_read_done(page, B_ERROR);
460 ReleaseReadLock(&avc->lock);
461 ReleaseReadLock(&tdc->lock);
462 afs_PutDCache(tdc);
463 return code;
467 afs_putpage(struct vnode *vp, offset_t off, u_int len, int flags,
468 afs_ucred_t *cred)
470 struct vcache *avc;
471 struct page *pages;
472 afs_int32 code = 0;
473 size_t tlen;
474 afs_offs_t endPos;
475 afs_int32 NPages = 0;
476 u_offset_t toff = off;
477 int didWriteLock;
479 AFS_STATCNT(afs_putpage);
480 if (vp->v_flag & VNOMAP) /* file doesn't allow mapping */
481 return (ENOSYS);
484 * Putpage (ASYNC) is called every sec to flush out dirty vm pages
486 AFS_GLOCK();
487 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER,
488 (afs_int32) vp, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(off),
489 ICL_TYPE_INT32, (afs_int32) len, ICL_TYPE_LONG, (int)flags);
490 avc = VTOAFS(vp);
491 ObtainSharedLock(&avc->lock, 247);
492 didWriteLock = 0;
494 /* Get a list of modified (or whatever) pages */
495 if (len) {
496 endPos = (afs_offs_t) off + len; /* position we're supposed to write up to */
497 while ((afs_offs_t) toff < endPos
498 && (afs_offs_t) toff < avc->f.m.Length) {
499 /* If not invalidating pages use page_lookup_nowait to avoid reclaiming
500 * them from the free list
502 AFS_GUNLOCK();
503 if (flags & (B_FREE | B_INVAL))
504 pages = page_lookup(vp, toff, SE_EXCL);
505 else
506 pages = page_lookup_nowait(vp, toff, SE_SHARED);
507 if (!pages || !pvn_getdirty(pages, flags))
508 tlen = PAGESIZE;
509 else {
510 if (!didWriteLock) {
511 AFS_GLOCK();
512 didWriteLock = 1;
513 UpgradeSToWLock(&avc->lock, 671);
514 AFS_GUNLOCK();
516 NPages++;
517 code = afs_putapage(vp, pages, &toff, &tlen, flags, cred);
518 if (code) {
519 AFS_GLOCK();
520 break;
523 toff += tlen;
524 AFS_GLOCK();
526 } else {
527 if (!didWriteLock) {
528 UpgradeSToWLock(&avc->lock, 670);
529 didWriteLock = 1;
532 AFS_GUNLOCK();
533 code = pvn_vplist_dirty(vp, toff, afs_putapage, flags, cred);
534 AFS_GLOCK();
537 if (code && !avc->vc_error) {
538 if (!didWriteLock) {
539 UpgradeSToWLock(&avc->lock, 669);
540 didWriteLock = 1;
542 avc->vc_error = code;
545 if (didWriteLock)
546 ReleaseWriteLock(&avc->lock);
547 else
548 ReleaseSharedLock(&avc->lock);
549 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_LONG, code,
550 ICL_TYPE_LONG, NPages);
551 AFS_GUNLOCK();
552 return (code);
557 afs_putapage(struct vnode *vp, struct page *pages, u_offset_t * offp,
558 size_t * lenp, int flags, afs_ucred_t *credp)
560 struct buf *tbuf;
561 struct vcache *avc = VTOAFS(vp);
562 afs_int32 code = 0;
563 u_int tlen = PAGESIZE;
564 afs_offs_t off = (pages->p_offset / PAGESIZE) * PAGESIZE;
567 * Now we've got the modified pages. All pages are locked and held
568 * XXX Find a kluster that fits in one block (or page). We also
569 * adjust the i/o if the file space is less than a while page. XXX
571 if (off + tlen > avc->f.m.Length) {
572 tlen = avc->f.m.Length - off;
574 /* can't call mapout with 0 length buffers (rmfree panics) */
575 if (((tlen >> 24) & 0xff) == 0xff) {
576 tlen = 0;
578 if ((int)tlen > 0) {
580 * Can't call mapout with 0 length buffers since we'll get rmfree panics
582 tbuf = pageio_setup(pages, tlen, vp, B_WRITE | flags);
583 if (!tbuf)
584 return (ENOMEM);
586 tbuf->b_dev = 0;
587 tbuf->b_lblkno = lbtodb(pages->p_offset);
588 bp_mapin(tbuf);
589 AFS_GLOCK();
590 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUTONE, ICL_TYPE_LONG, avc,
591 ICL_TYPE_LONG, pages, ICL_TYPE_LONG, tlen, ICL_TYPE_OFFSET,
592 ICL_HANDLE_OFFSET(off));
593 code = afs_ustrategy(tbuf, credp); /* unlocks page */
594 AFS_GUNLOCK();
595 bp_mapout(tbuf);
597 pvn_write_done(pages, ((code) ? B_ERROR : 0) | B_WRITE | flags);
598 if ((int)tlen > 0)
599 pageio_done(tbuf);
600 if (offp)
601 *offp = off;
602 if (lenp)
603 *lenp = tlen;
604 return code;
608 afs_nfsrdwr(struct vcache *avc, struct uio *auio, enum uio_rw arw,
609 int ioflag, afs_ucred_t *acred)
611 afs_int32 code;
612 afs_int32 code2;
613 afs_int32 code_checkcode = 0;
614 int counter;
615 afs_int32 mode, sflags;
616 char *data;
617 struct dcache *dcp, *dcp_newpage;
618 afs_size_t fileBase, size;
619 afs_size_t pageBase;
620 afs_int32 tsize;
621 afs_int32 pageOffset, extraResid = 0;
622 afs_size_t origLength; /* length when reading/writing started */
623 long appendLength; /* length when this call will finish */
624 int created; /* created pages instead of faulting them */
625 int lockCode;
626 int didFakeOpen, eof;
627 struct vrequest treq;
628 caddr_t raddr;
629 u_int rsize;
631 AFS_STATCNT(afs_nfsrdwr);
633 /* can't read or write other things */
634 if (vType(avc) != VREG)
635 return EISDIR;
637 if (auio->uio_resid == 0)
638 return (0);
640 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, (afs_int32) avc,
641 ICL_TYPE_LONG, (arw == UIO_WRITE ? 1 : 0), ICL_TYPE_OFFSET,
642 ICL_HANDLE_OFFSET(auio->uio_loffset), ICL_TYPE_OFFSET,
643 ICL_HANDLE_OFFSET(auio->uio_resid));
645 #ifndef AFS_64BIT_CLIENT
646 if (AfsLargeFileUio(auio)) /* file is larger than 2 GB */
647 return (EFBIG);
648 #endif
650 if (!acred)
651 osi_Panic("rdwr: !acred");
653 if (code = afs_InitReq(&treq, acred))
654 return code;
656 /* It's not really possible to know if a write cause a growth in the
657 * cache size, we we wait for a cache drain for any write.
659 afs_MaybeWakeupTruncateDaemon();
660 while ((arw == UIO_WRITE)
661 && (afs_blocksUsed > PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks))) {
662 if (afs_blocksUsed - afs_blocksDiscarded >
663 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
664 afs_WaitForCacheDrain = 1;
665 afs_osi_Sleep(&afs_WaitForCacheDrain);
667 afs_MaybeFreeDiscardedDCache();
668 afs_MaybeWakeupTruncateDaemon();
670 code = afs_VerifyVCache(avc, &treq);
671 if (code)
672 return afs_CheckCode(code, &treq, 45);
674 osi_FlushPages(avc, acred);
676 ObtainWriteLock(&avc->lock, 250);
678 /* adjust parameters when appending files */
679 if ((ioflag & IO_APPEND) && arw == UIO_WRITE) {
680 auio->uio_loffset = avc->f.m.Length; /* write at EOF position */
682 if (auio->afsio_offset < 0 || (auio->afsio_offset + auio->uio_resid) < 0) {
683 ReleaseWriteLock(&avc->lock);
684 return EINVAL;
686 #ifndef AFS_64BIT_CLIENT
687 /* file is larger than 2GB */
688 if (AfsLargeFileSize(auio->uio_offset, auio->uio_resid)) {
689 ReleaseWriteLock(&avc->lock);
690 return EFBIG;
692 #endif
694 didFakeOpen = 0; /* keep track of open so we can do close */
695 if (arw == UIO_WRITE) {
696 /* do ulimit processing; shrink resid or fail */
697 if (auio->uio_loffset + auio->afsio_resid > auio->uio_llimit) {
698 if (auio->uio_loffset >= auio->uio_llimit) {
699 ReleaseWriteLock(&avc->lock);
700 return EFBIG;
701 } else {
702 /* track # of bytes we should write, but won't because of
703 * ulimit; we must add this into the final resid value
704 * so caller knows we punted some data.
706 extraResid = auio->uio_resid;
707 auio->uio_resid = auio->uio_llimit - auio->uio_loffset;
708 extraResid -= auio->uio_resid;
711 mode = S_WRITE; /* segment map-in mode */
712 afs_FakeOpen(avc); /* do this for writes, so data gets put back
713 * when we want it to be put back */
714 didFakeOpen = 1; /* we'll be doing a fake open */
715 /* before starting any I/O, we must ensure that the file is big enough
716 * to hold the results (since afs_putpage will be called to force the I/O */
717 size = auio->afsio_resid + auio->afsio_offset; /* new file size */
718 appendLength = size;
719 origLength = avc->f.m.Length;
720 if (size > avc->f.m.Length) {
721 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
722 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
723 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
724 ICL_HANDLE_OFFSET(size));
725 avc->f.m.Length = size; /* file grew */
727 avc->f.states |= CDirty; /* Set the dirty bit */
728 avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
729 } else {
730 mode = S_READ; /* map-in read-only */
731 origLength = avc->f.m.Length;
734 if (acred && AFS_NFSXLATORREQ(acred)) {
735 if (arw == UIO_READ) {
736 if (!afs_AccessOK
737 (avc, PRSFS_READ, &treq,
738 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
739 ReleaseWriteLock(&avc->lock);
740 return EACCES;
743 crhold(acred);
744 if (avc->credp) {
745 crfree(avc->credp);
747 avc->credp = acred;
749 counter = 0; /* don't call afs_DoPartialWrite first time through. */
750 while (1) {
751 /* compute the amount of data to move into this block,
752 * based on auio->afsio_resid. Note that we copy data in units of
753 * MAXBSIZE, not PAGESIZE. This is because segmap_getmap panics if you
754 * call it with an offset based on blocks smaller than MAXBSIZE
755 * (implying that it should be named BSIZE, since it is clearly both a
756 * max and a min). */
757 size = auio->afsio_resid; /* transfer size */
758 fileBase = ((arw == UIO_READ) && (origLength < auio->uio_offset)) ?
759 origLength : auio->afsio_offset; /* start file position for xfr */
760 pageBase = fileBase & ~(MAXBSIZE - 1); /* file position of the page */
761 pageOffset = fileBase & (MAXBSIZE - 1); /* xfr start's offset within page */
762 tsize = MAXBSIZE - pageOffset; /* how much more fits in this page */
763 /* we'll read tsize bytes, but first must make sure tsize isn't too big */
764 if (tsize > size)
765 tsize = size; /* don't read past end of request */
766 eof = 0; /* flag telling us if we hit the EOF on the read */
767 if (arw == UIO_READ) { /* we're doing a read operation */
768 /* don't read past EOF */
769 if (fileBase + tsize > origLength) {
770 tsize = origLength - fileBase;
771 eof = 1; /* we did hit the EOF */
772 if (tsize < 0)
773 tsize = 0; /* better safe than sorry */
775 sflags = 0;
776 } else {
777 /* Purge dirty chunks of file if there are too many dirty
778 * chunks. Inside the write loop, we only do this at a chunk
779 * boundary. Clean up partial chunk if necessary at end of loop.
781 if (counter > 0 && code == 0 && AFS_CHUNKOFFSET(fileBase) == 0) {
782 code = afs_DoPartialWrite(avc, &treq);
783 if (code)
784 break;
786 /* write case, we ask segmap_release to call putpage. Really, we
787 * don't have to do this on every page mapin, but for now we're
788 * lazy, and don't modify the rest of AFS to scan for modified
789 * pages on a close or other "synchronize with file server"
790 * operation. This makes things a little cleaner, but probably
791 * hurts performance. */
792 sflags = SM_WRITE;
794 if (tsize <= 0) {
795 code = 0;
796 break; /* nothing to transfer, we're done */
798 if (arw == UIO_WRITE)
799 avc->f.states |= CDirty; /* may have been cleared by DoPartialWrite */
801 /* Before dropping lock, hold the chunk (create it if necessary). This
802 * serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
803 * to purge the chunk's pages while we have them locked. This would
804 * cause deadlock because we might be waiting for the CTD to free up
805 * a chunk. (2) If we're writing past the original EOF, and we're
806 * at the base of the chunk, then make sure it exists online
807 * before we do the uiomove, since the segmap_release will
808 * write out to the chunk, causing it to get fetched if it hasn't
809 * been created yet. The code that would otherwise notice that
810 * we're fetching a chunk past EOF won't work, since we've
811 * already adjusted the file size above.
813 ObtainWriteLock(&avc->vlock, 551);
814 while (avc->vstates & VPageCleaning) {
815 ReleaseWriteLock(&avc->vlock);
816 ReleaseWriteLock(&avc->lock);
817 afs_osi_Sleep(&avc->vstates);
818 ObtainWriteLock(&avc->lock, 334);
819 ObtainWriteLock(&avc->vlock, 552);
821 ReleaseWriteLock(&avc->vlock);
823 afs_size_t toff, tlen;
824 dcp = afs_GetDCache(avc, fileBase, &treq, &toff, &tlen, 2);
825 if (!dcp) {
826 code = EIO;
827 break;
830 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
831 AFS_GUNLOCK();
832 data = segmap_getmap(segkmap, AFSTOV(avc), (u_offset_t) pageBase);
833 raddr = (caddr_t) (((uintptr_t) data + pageOffset) & PAGEMASK);
834 rsize =
835 (((u_int) data + pageOffset + tsize + PAGEOFFSET) & PAGEMASK) -
836 (u_int) raddr;
837 if (code == 0) {
838 /* if we're doing a write, and we're starting at the rounded
839 * down page base, and we're writing enough data to cover all
840 * created pages, then we must be writing all of the pages
841 * in this MAXBSIZE window that we're creating.
843 created = 0;
844 if (arw == UIO_WRITE && ((long)raddr == (long)data + pageOffset)
845 && tsize >= rsize) {
846 /* probably the dcache backing this guy is around, but if
847 * not, we can't do this optimization, since we're creating
848 * writable pages, which must be backed by a chunk.
850 AFS_GLOCK();
851 dcp_newpage = afs_FindDCache(avc, pageBase);
852 if (dcp_newpage
853 && hsame(avc->f.m.DataVersion, dcp_newpage->f.versionNo)) {
854 ObtainWriteLock(&avc->lock, 251);
855 ObtainWriteLock(&avc->vlock, 576);
856 ObtainReadLock(&dcp_newpage->lock);
857 if ((avc->activeV == 0)
858 && hsame(avc->f.m.DataVersion, dcp_newpage->f.versionNo)
859 && !(dcp_newpage->dflags & (DFFetching))) {
860 AFS_GUNLOCK();
861 segmap_pagecreate(segkmap, raddr, rsize, 1);
862 AFS_GLOCK();
863 ObtainWriteLock(&afs_xdcache, 252);
864 /* Mark the pages as created and dirty */
865 afs_indexFlags[dcp_newpage->index]
866 |= (IFAnyPages | IFDirtyPages);
867 ReleaseWriteLock(&afs_xdcache);
868 created = 1;
870 ReleaseReadLock(&dcp_newpage->lock);
871 afs_PutDCache(dcp_newpage);
872 ReleaseWriteLock(&avc->vlock);
873 ReleaseWriteLock(&avc->lock);
874 } else if (dcp_newpage)
875 afs_PutDCache(dcp_newpage);
876 AFS_GUNLOCK();
878 if (!created)
879 code =
880 afs_fc2errno(segmap_fault
881 (kas.a_hat, segkmap, raddr, rsize,
882 F_SOFTLOCK, mode));
884 if (code == 0) {
885 AFS_UIOMOVE(data + pageOffset, tsize, arw, auio, code);
886 segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTUNLOCK,
887 mode);
889 if (code == 0) {
890 code = segmap_release(segkmap, data, sflags);
891 } else {
892 (void)segmap_release(segkmap, data, 0);
894 AFS_GLOCK();
895 ObtainWriteLock(&avc->lock, 253);
896 counter++;
897 if (dcp)
898 afs_PutDCache(dcp);
899 if (code)
900 break;
902 if (didFakeOpen) {
903 afs_FakeClose(avc, acred);
905 if (arw == UIO_WRITE && (avc->f.states & CDirty)) {
906 code2 = afs_DoPartialWrite(avc, &treq);
907 if (!code)
908 code = code2;
911 if (!code && avc->vc_error) {
912 code = code_checkcode = avc->vc_error;
914 ReleaseWriteLock(&avc->lock);
915 if (!code) {
916 if ((ioflag & FSYNC) && (arw == UIO_WRITE)
917 && !AFS_NFSXLATORREQ(acred))
918 code = afs_fsync(avc, 0, acred);
921 * If things worked, add in as remaining in request any bytes
922 * we didn't write due to file size ulimit.
924 if (code == 0 && extraResid > 0)
925 auio->uio_resid += extraResid;
926 if (code_checkcode) {
927 return code_checkcode;
928 } else {
929 return afs_CheckCode(code, &treq, 46);
934 afs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addr, size_t len, u_char prot, u_char maxprot, u_int flags, afs_ucred_t *cred)
936 struct segvn_crargs crargs;
937 afs_int32 code;
938 struct vrequest treq;
939 struct vcache *avc = VTOAFS(vp);
941 AFS_STATCNT(afs_map);
944 /* check for reasonableness on segment bounds; apparently len can be < 0 */
945 if (off < 0 || off + len < 0) {
946 return (EINVAL);
948 #ifndef AFS_64BIT_CLIENT
949 if (AfsLargeFileSize(off, len)) { /* file is larger than 2 GB */
950 code = EFBIG;
951 goto out;
953 #endif
955 if (vp->v_flag & VNOMAP) /* File isn't allowed to be mapped */
956 return (ENOSYS);
958 if (vp->v_filocks) /* if locked, disallow mapping */
959 return (EAGAIN);
961 AFS_GLOCK();
962 if (code = afs_InitReq(&treq, cred))
963 goto out;
965 if (vp->v_type != VREG) {
966 code = ENODEV;
967 goto out;
970 code = afs_VerifyVCache(avc, &treq);
971 if (code) {
972 goto out;
974 osi_FlushPages(avc, cred); /* ensure old pages are gone */
975 avc->f.states |= CMAPPED; /* flag cleared at afs_inactive */
977 AFS_GUNLOCK();
978 as_rangelock(as);
979 if ((flags & MAP_FIXED) == 0) {
980 map_addr(addr, len, off, 1, flags);
981 if (*addr == NULL) {
982 as_rangeunlock(as);
983 code = ENOMEM;
984 goto out1;
986 } else
987 (void)as_unmap(as, *addr, len); /* unmap old address space use */
988 /* setup the create parameter block for the call */
989 crargs.vp = AFSTOV(avc);
990 crargs.offset = (u_offset_t)off;
991 crargs.cred = cred;
992 crargs.type = flags & MAP_TYPE;
993 crargs.prot = prot;
994 crargs.maxprot = maxprot;
995 crargs.amp = (struct anon_map *)0;
996 crargs.flags = flags & ~MAP_TYPE;
998 code = as_map(as, *addr, len, segvn_create, (char *)&crargs);
999 as_rangeunlock(as);
1000 out1:
1001 AFS_GLOCK();
1002 code = afs_CheckCode(code, &treq, 47);
1003 AFS_GUNLOCK();
1004 return code;
1005 out:
1006 code = afs_CheckCode(code, &treq, 48);
1007 AFS_GUNLOCK();
1008 return code;
1013 * For Now We use standard local kernel params for AFS system values. Change this
1014 * at some point.
1017 #ifdef AFS_SUN511_ENV
1018 afs_pathconf(struct vnode *vp, int cmd, u_long *outdatap,
1019 afs_ucred_t *credp, caller_context_t *ct)
1020 #else
1021 afs_pathconf(struct vnode *vp, int cmd, u_long *outdatap,
1022 afs_ucred_t *credp)
1023 #endif /* AFS_SUN511_ENV */
1025 AFS_STATCNT(afs_cntl);
1026 switch (cmd) {
1027 case _PC_LINK_MAX:
1028 *outdatap = MAXLINK;
1029 break;
1030 case _PC_NAME_MAX:
1031 *outdatap = MAXNAMLEN;
1032 break;
1033 case _PC_PATH_MAX:
1034 *outdatap = MAXPATHLEN;
1035 break;
1036 case _PC_CHOWN_RESTRICTED:
1037 *outdatap = 1;
1038 break;
1039 case _PC_NO_TRUNC:
1040 *outdatap = 1;
1041 break;
1042 case _PC_FILESIZEBITS:
1043 #ifdef AFS_64BIT_CLIENT
1044 *outdatap = 64;
1045 #else
1046 *outdatap = 32;
1047 #endif
1048 break;
1049 default:
1050 #ifdef AFS_SUN511_ENV
1051 return fs_pathconf(vp, cmd, outdatap, credp, ct);
1052 #else
1053 return fs_pathconf(vp, cmd, outdatap, credp);
1054 #endif /* AFS_SUN511_ENV */
1056 return 0;
1060 afs_ioctl(struct vnode *vnp, int com, int arg, int flag, cred_t *credp,
1061 int *rvalp)
1063 return (ENOTTY);
1066 void
1067 afs_rwlock(struct vnode *vnp, int wlock)
1069 rw_enter(&(VTOAFS(vnp))->rwlock, (wlock ? RW_WRITER : RW_READER));
1073 void
1074 afs_rwunlock(struct vnode *vnp, int wlock)
1076 rw_exit(&(VTOAFS(vnp))->rwlock);
1080 /* NOT SUPPORTED */
1082 afs_seek(struct vnode *vnp, offset_t ooff, offset_t *noffp)
1084 int code = 0;
1086 #ifndef AFS_64BIT_CLIENT
1087 # define __MAXOFF_T MAXOFF_T
1088 #else
1089 # define __MAXOFF_T MAXOFFSET_T
1090 #endif
1092 if ((*noffp < 0 || *noffp > __MAXOFF_T))
1093 code = EINVAL;
1094 return code;
1098 #ifdef AFS_SUN59_ENV
1099 afs_frlock(struct vnode *vnp, int cmd, struct flock64 *ap, int flag,
1100 offset_t off, struct flk_callback *flkcb, afs_ucred_t *credp)
1101 #else
1102 afs_frlock(struct vnode *vnp, int cmd, struct flock64 *ap, int flag,
1103 offset_t off, afs_ucred_t *credp)
1104 #endif
1106 afs_int32 code = 0;
1108 * Implement based on afs_lockctl
1110 AFS_GLOCK();
1111 #ifdef AFS_SUN59_ENV
1112 if (flkcb)
1113 afs_warn("Don't know how to deal with flk_callback's!\n");
1114 #endif
1115 if ((cmd == F_GETLK) || (cmd == F_O_GETLK) || (cmd == F_SETLK)
1116 || (cmd == F_SETLKW)) {
1117 ap->l_pid = ttoproc(curthread)->p_pid;
1118 ap->l_sysid = 0;
1120 AFS_GUNLOCK();
1121 code = convoff(vnp, ap, 0, off);
1122 if (code)
1123 return code;
1124 AFS_GLOCK();
1127 code = afs_lockctl(VTOAFS(vnp), ap, cmd, credp);
1128 AFS_GUNLOCK();
1129 return code;
1134 afs_space(struct vnode *vnp, int cmd, struct flock64 *ap, int flag,
1135 offset_t off, afs_ucred_t *credp)
1137 afs_int32 code = EINVAL;
1138 struct vattr vattr;
1140 if ((cmd == F_FREESP)
1141 && ((code = convoff(vnp, ap, 0, off)) == 0)) {
1142 AFS_GLOCK();
1143 if (!ap->l_len) {
1144 vattr.va_mask = AT_SIZE;
1145 vattr.va_size = ap->l_start;
1146 code = afs_setattr(VTOAFS(vnp), &vattr, 0, credp);
1148 AFS_GUNLOCK();
1150 return (code);
1154 afs_dump(struct vnode *vp, caddr_t addr, int i1, int i2)
1156 AFS_STATCNT(afs_dump);
1157 afs_warn("AFS_DUMP. MUST IMPLEMENT THIS!!!\n");
1158 return EINVAL;
1162 /* Nothing fancy here; just compare if vnodes are identical ones */
1164 afs_cmp(struct vnode *vp1, struct vnode *vp2)
1166 AFS_STATCNT(afs_cmp);
1167 return (vp1 == vp2);
1172 afs_realvp(struct vnode *vp, struct vnode **vpp)
1174 AFS_STATCNT(afs_realvp);
1175 return EINVAL;
1180 afs_pageio(struct vnode *vp, struct page *pp, u_int ui1, u_int ui2, int i1,
1181 struct cred *credp)
1183 afs_warn("afs_pageio: Not implemented\n");
1184 return EINVAL;
1188 #ifdef AFS_SUN59_ENV
1189 afs_dumpctl(struct vnode *vp, int i, int *blkp)
1190 #else
1191 afs_dumpctl(struct vnode *vp, int i)
1192 #endif
1194 afs_warn("afs_dumpctl: Not implemented\n");
1195 return EINVAL;
1198 #ifdef AFS_SUN511_ENV
1199 extern void
1200 afs_dispose(struct vnode *vp, struct page *p, int fl, int dn, struct cred *cr, struct caller_context_t *ct)
1202 fs_dispose(vp, p, fl, dn, cr,ct);
1206 afs_setsecattr(struct vnode *vp, vsecattr_t *vsecattr, int flag, struct cred *creds, struct caller_context_t *ct)
1208 return ENOSYS;
1212 afs_getsecattr(struct vnode *vp, vsecattr_t *vsecattr, int flag, struct cred *creds, struct caller_context_t *ct)
1214 return fs_fab_acl(vp, vsecattr, flag, creds,ct);
1216 #else
1217 extern void
1218 afs_dispose(struct vnode *vp, struct page *p, int fl, int dn, struct cred *cr)
1220 fs_dispose(vp, p, fl, dn, cr);
1224 afs_setsecattr(struct vnode *vp, vsecattr_t *vsecattr, int flag,
1225 struct cred *creds)
1227 return ENOSYS;
1231 afs_getsecattr(struct vnode *vp, vsecattr_t *vsecattr, int flag, struct cred *creds)
1233 return fs_fab_acl(vp, vsecattr, flag, creds);
1235 #endif
1237 #ifdef AFS_GLOBAL_SUNLOCK
1238 extern int gafs_open(struct vcache **avcp, afs_int32 aflags,
1239 afs_ucred_t *acred);
1240 extern int gafs_close(struct vcache *avc, afs_int32 aflags,
1241 int count, offset_t offset, afs_ucred_t *acred);
1242 extern int afs_ioctl(struct vnode *vnp, int com, int arg, int flag,
1243 cred_t *credp, int *rvalp);
1244 extern int gafs_access(struct vcache *avc, afs_int32 amode,
1245 int flags, afs_ucred_t *acred);
1246 extern int gafs_getattr(struct vcache *avc,
1247 struct vattr *attrs, int flags,
1248 afs_ucred_t *acred);
1249 extern int gafs_setattr(struct vcache *avc,
1250 struct vattr *attrs, int flags,
1251 afs_ucred_t *acred);
1252 extern int gafs_lookup(struct vcache *adp, char *aname,
1253 struct vcache **avcp, struct pathname *pnp,
1254 int flags, struct vnode *rdir, afs_ucred_t *acred);
1255 extern int gafs_remove(struct vcache *adp, char *aname,
1256 afs_ucred_t *acred);
1257 extern int gafs_link(struct vcache *adp, struct vcache *avc,
1258 char *aname, afs_ucred_t *acred);
1259 extern int gafs_rename(struct vcache *aodp, char *aname1,
1260 struct vcache *andp, char *aname2,
1261 afs_ucred_t *acred);
1262 extern int gafs_symlink(struct vcache *adp, char *aname,
1263 struct vattr *attrs, char *atargetName,
1264 afs_ucred_t *acred);
1265 extern int gafs_rmdir(struct vcache *adp, char *aname,
1266 struct vnode *cdirp, afs_ucred_t *acred);
1267 extern int gafs_mkdir(struct vcache *adp, char *aname,
1268 struct vattr *attrs, struct vcache **avcp,
1269 afs_ucred_t *acred);
1270 extern int gafs_fsync(struct vcache *avc, int flag, afs_ucred_t *acred);
1271 extern int gafs_readlink(struct vcache *avc, struct uio *auio,
1272 afs_ucred_t *acred);
1273 extern int gafs_readdir(struct vcache *avc, struct uio *auio,
1274 afs_ucred_t *acred, int *eofp);
1275 extern void gafs_inactive(struct vcache *avc,
1276 afs_ucred_t *acred);
1277 extern int gafs_fid(struct vcache *avc, struct fid **fidpp);
1278 extern int gafs_create(struct vcache *adp, char *aname,
1279 struct vattr *attrs, enum vcexcl aexcl, int amode,
1280 struct vcache **avcp, afs_ucred_t *acred);
1281 #ifdef AFS_SUN511_ENV
1282 extern int afs_pathconf(struct vnode *vp, int cmd, u_long *outdatap,
1283 afs_ucred_t *credp, caller_context_t *ct);
1284 #else
1285 extern int afs_pathconf(struct vnode *vp, int cmd, u_long *outdatap,
1286 afs_ucred_t *credp);
1287 #endif /* AFS_SUN511_ENV */
1289 #if defined(AFS_SUN511_ENV)
1290 /* The following list must always be NULL-terminated */
1291 const fs_operation_def_t afs_vnodeops_template[] = {
1292 VOPNAME_OPEN, { .vop_open = gafs_open },
1293 VOPNAME_CLOSE, { .vop_close = gafs_close },
1294 VOPNAME_READ, { .vop_read = afs_vmread },
1295 VOPNAME_WRITE, { .vop_write = afs_vmwrite },
1296 VOPNAME_IOCTL, { .vop_ioctl = afs_ioctl },
1297 VOPNAME_SETFL, { .vop_setfl = fs_setfl },
1298 VOPNAME_GETATTR, { .vop_getattr = gafs_getattr },
1299 VOPNAME_SETATTR, { .vop_setattr = gafs_setattr },
1300 VOPNAME_ACCESS, { .vop_access = gafs_access },
1301 VOPNAME_LOOKUP, { .vop_lookup = gafs_lookup },
1302 VOPNAME_CREATE, { .vop_create = gafs_create },
1303 VOPNAME_REMOVE, { .vop_remove = gafs_remove },
1304 VOPNAME_LINK, { .vop_link = gafs_link },
1305 VOPNAME_RENAME, { .vop_rename = gafs_rename },
1306 VOPNAME_MKDIR, { .vop_mkdir = gafs_mkdir },
1307 VOPNAME_RMDIR, { .vop_rmdir = gafs_rmdir },
1308 VOPNAME_READDIR, { .vop_readdir = gafs_readdir },
1309 VOPNAME_SYMLINK, { .vop_symlink = gafs_symlink },
1310 VOPNAME_READLINK, { .vop_readlink = gafs_readlink },
1311 VOPNAME_FSYNC, { .vop_fsync = gafs_fsync },
1312 VOPNAME_INACTIVE, { .vop_inactive = gafs_inactive },
1313 VOPNAME_FID, { .vop_fid = gafs_fid },
1314 VOPNAME_RWLOCK, { .vop_rwlock = afs_rwlock },
1315 VOPNAME_RWUNLOCK, { .vop_rwunlock = afs_rwunlock },
1316 VOPNAME_SEEK, { .vop_seek = afs_seek },
1317 VOPNAME_CMP, { .vop_cmp = afs_cmp },
1318 VOPNAME_FRLOCK, { .vop_frlock = afs_frlock },
1319 VOPNAME_SPACE, { .vop_space = afs_space },
1320 VOPNAME_REALVP, { .vop_realvp = afs_realvp },
1321 VOPNAME_GETPAGE, { .vop_getpage = afs_getpage },
1322 VOPNAME_PUTPAGE, { .vop_putpage = afs_putpage },
1323 VOPNAME_MAP, { .vop_map = afs_map },
1324 VOPNAME_ADDMAP, { .vop_addmap = afs_addmap },
1325 VOPNAME_DELMAP, { .vop_delmap = afs_delmap },
1326 VOPNAME_POLL, { .vop_poll = fs_poll },
1327 VOPNAME_PATHCONF, { .vop_pathconf = afs_pathconf },
1328 VOPNAME_PAGEIO, { .vop_pageio = afs_pageio },
1329 VOPNAME_DUMP, { .vop_dump = afs_dump },
1330 VOPNAME_DUMPCTL, { .vop_dumpctl = afs_dumpctl },
1331 VOPNAME_DISPOSE, { .vop_dispose = afs_dispose },
1332 VOPNAME_GETSECATTR, { .vop_getsecattr = afs_getsecattr },
1333 VOPNAME_SETSECATTR, { .vop_setsecattr = afs_setsecattr },
1334 VOPNAME_SHRLOCK, { .vop_shrlock = fs_shrlock },
1335 NULL, NULL
1337 vnodeops_t *afs_ops;
1338 #elif defined(AFS_SUN510_ENV)
1339 /* The following list must always be NULL-terminated */
1340 const fs_operation_def_t afs_vnodeops_template[] = {
1341 VOPNAME_OPEN, gafs_open,
1342 VOPNAME_CLOSE, gafs_close,
1343 VOPNAME_READ, afs_vmread,
1344 VOPNAME_WRITE, afs_vmwrite,
1345 VOPNAME_IOCTL, afs_ioctl,
1346 VOPNAME_SETFL, fs_setfl,
1347 VOPNAME_GETATTR, gafs_getattr,
1348 VOPNAME_SETATTR, gafs_setattr,
1349 VOPNAME_ACCESS, gafs_access,
1350 VOPNAME_LOOKUP, gafs_lookup,
1351 VOPNAME_CREATE, gafs_create,
1352 VOPNAME_REMOVE, gafs_remove,
1353 VOPNAME_LINK, gafs_link,
1354 VOPNAME_RENAME, gafs_rename,
1355 VOPNAME_MKDIR, gafs_mkdir,
1356 VOPNAME_RMDIR, gafs_rmdir,
1357 VOPNAME_READDIR, gafs_readdir,
1358 VOPNAME_SYMLINK, gafs_symlink,
1359 VOPNAME_READLINK, gafs_readlink,
1360 VOPNAME_FSYNC, gafs_fsync,
1361 VOPNAME_INACTIVE, gafs_inactive,
1362 VOPNAME_FID, gafs_fid,
1363 VOPNAME_RWLOCK, afs_rwlock,
1364 VOPNAME_RWUNLOCK, afs_rwunlock,
1365 VOPNAME_SEEK, afs_seek,
1366 VOPNAME_CMP, afs_cmp,
1367 VOPNAME_FRLOCK, afs_frlock,
1368 VOPNAME_SPACE, afs_space,
1369 VOPNAME_REALVP, afs_realvp,
1370 VOPNAME_GETPAGE, afs_getpage,
1371 VOPNAME_PUTPAGE, afs_putpage,
1372 VOPNAME_MAP, afs_map,
1373 VOPNAME_ADDMAP, afs_addmap,
1374 VOPNAME_DELMAP, afs_delmap,
1375 VOPNAME_POLL, fs_poll,
1376 VOPNAME_DUMP, afs_dump,
1377 VOPNAME_PATHCONF, afs_pathconf,
1378 VOPNAME_PAGEIO, afs_pageio,
1379 VOPNAME_DUMPCTL, afs_dumpctl,
1380 VOPNAME_DISPOSE, afs_dispose,
1381 VOPNAME_GETSECATTR, afs_getsecattr,
1382 VOPNAME_SETSECATTR, afs_setsecattr,
1383 VOPNAME_SHRLOCK, fs_shrlock,
1384 NULL, NULL
1386 struct vnodeops *afs_ops;
1387 #else
1388 struct vnodeops Afs_vnodeops = {
1389 gafs_open,
1390 gafs_close,
1391 afs_vmread,
1392 afs_vmwrite,
1393 afs_ioctl,
1394 fs_setfl,
1395 gafs_getattr,
1396 gafs_setattr,
1397 gafs_access,
1398 gafs_lookup,
1399 gafs_create,
1400 gafs_remove,
1401 gafs_link,
1402 gafs_rename,
1403 gafs_mkdir,
1404 gafs_rmdir,
1405 gafs_readdir,
1406 gafs_symlink,
1407 gafs_readlink,
1408 gafs_fsync,
1409 gafs_inactive,
1410 gafs_fid,
1411 afs_rwlock,
1412 afs_rwunlock,
1413 afs_seek,
1414 afs_cmp,
1415 afs_frlock,
1416 afs_space,
1417 afs_realvp,
1418 afs_getpage,
1419 afs_putpage,
1420 afs_map,
1421 afs_addmap,
1422 afs_delmap,
1423 fs_poll,
1424 afs_dump,
1425 afs_pathconf,
1426 afs_pageio,
1427 afs_dumpctl,
1428 afs_dispose,
1429 afs_setsecattr,
1430 afs_getsecattr,
1431 fs_shrlock,
1433 struct vnodeops *afs_ops = &Afs_vnodeops;
1434 #endif
1437 gafs_open(struct vcache **avcp, afs_int32 aflags,
1438 afs_ucred_t *acred)
1440 int code;
1442 AFS_GLOCK();
1443 code = afs_open(avcp, aflags, acred);
1444 AFS_GUNLOCK();
1445 return (code);
1449 gafs_close(struct vcache *avc, afs_int32 aflags, int count,
1450 offset_t offset, afs_ucred_t *acred)
1452 int code;
1454 AFS_GLOCK();
1455 code = afs_close(avc, aflags, count, offset, acred);
1456 AFS_GUNLOCK();
1457 return (code);
1461 gafs_getattr(struct vcache *avc, struct vattr *attrs,
1462 int flags, afs_ucred_t *acred)
1464 int code;
1466 AFS_GLOCK();
1467 code = afs_getattr(avc, attrs, flags, acred);
1468 AFS_GUNLOCK();
1469 return (code);
1474 gafs_setattr(struct vcache *avc, struct vattr *attrs,
1475 int flags, afs_ucred_t *acred)
1477 int code;
1479 AFS_GLOCK();
1480 code = afs_setattr(avc, attrs, flags, acred);
1481 AFS_GUNLOCK();
1482 return (code);
1487 gafs_access(struct vcache *avc, afs_int32 amode, int flags,
1488 afs_ucred_t *acred)
1490 int code;
1492 AFS_GLOCK();
1493 code = afs_access(avc, amode, flags, acred);
1494 AFS_GUNLOCK();
1495 return (code);
1500 gafs_lookup(struct vcache *adp, char *aname,
1501 struct vcache **avcp, struct pathname *pnp, int flags,
1502 struct vnode *rdir, afs_ucred_t *acred)
1504 int code;
1506 AFS_GLOCK();
1507 code = afs_lookup(adp, aname, avcp, pnp, flags, rdir, acred);
1508 AFS_GUNLOCK();
1509 return (code);
1514 gafs_create(struct vcache *adp, char *aname, struct vattr *attrs,
1515 enum vcexcl aexcl, int amode, struct vcache **avcp,
1516 afs_ucred_t *acred)
1518 int code;
1520 AFS_GLOCK();
1521 code = afs_create(adp, aname, attrs, aexcl, amode, avcp, acred);
1522 AFS_GUNLOCK();
1523 return (code);
1527 gafs_remove(struct vcache *adp, char *aname, afs_ucred_t *acred)
1529 int code;
1531 AFS_GLOCK();
1532 code = afs_remove(adp, aname, acred);
1533 AFS_GUNLOCK();
1534 return (code);
1538 gafs_link(struct vcache *adp, struct vcache *avc,
1539 char *aname, afs_ucred_t *acred)
1541 int code;
1543 AFS_GLOCK();
1544 code = afs_link(adp, avc, aname, acred);
1545 AFS_GUNLOCK();
1546 return (code);
1550 gafs_rename(struct vcache *aodp, char *aname1,
1551 struct vcache *andp, char *aname2,
1552 afs_ucred_t *acred)
1554 int code;
1556 AFS_GLOCK();
1557 code = afs_rename(aodp, aname1, andp, aname2, acred);
1558 #ifdef AFS_SUN510_ENV
1559 if (code == 0) {
1560 struct vcache *avcp = NULL;
1562 (void) afs_lookup(andp, aname2, &avcp, NULL, 0, NULL, acred);
1563 if (avcp) {
1564 struct vnode *vp = AFSTOV(avcp), *pvp = AFSTOV(andp);
1566 # ifdef HAVE_VN_RENAMEPATH
1567 vn_renamepath(pvp, vp, aname2, strlen(aname2));
1568 # else
1569 mutex_enter(&vp->v_lock);
1570 if (vp->v_path != NULL) {
1571 kmem_free(vp->v_path, strlen(vp->v_path) + 1);
1572 vp->v_path = NULL;
1574 mutex_exit(&vp->v_lock);
1575 vn_setpath(afs_globalVp, pvp, vp, aname2, strlen(aname2));
1576 # endif /* !HAVE_VN_RENAMEPATH */
1578 AFS_RELE(avcp);
1581 #endif
1582 AFS_GUNLOCK();
1583 return (code);
1587 gafs_mkdir(struct vcache *adp, char *aname, struct vattr *attrs,
1588 struct vcache **avcp, afs_ucred_t *acred)
1590 int code;
1592 AFS_GLOCK();
1593 code = afs_mkdir(adp, aname, attrs, avcp, acred);
1594 AFS_GUNLOCK();
1595 return (code);
1599 gafs_rmdir(struct vcache *adp, char *aname, struct vnode *cdirp,
1600 afs_ucred_t *acred)
1602 int code;
1604 AFS_GLOCK();
1605 code = afs_rmdir(adp, aname, cdirp, acred);
1606 AFS_GUNLOCK();
1607 return (code);
1612 gafs_readdir(struct vcache *avc, struct uio *auio,
1613 afs_ucred_t *acred, int *eofp)
1615 int code;
1617 AFS_GLOCK();
1618 code = afs_readdir(avc, auio, acred, eofp);
1619 AFS_GUNLOCK();
1620 return (code);
1624 gafs_symlink(struct vcache *adp, char *aname, struct vattr *attrs,
1625 char *atargetName, afs_ucred_t *acred)
1627 int code;
1629 AFS_GLOCK();
1630 code = afs_symlink(adp, aname, attrs, atargetName, NULL, acred);
1631 AFS_GUNLOCK();
1632 return (code);
1637 gafs_readlink(struct vcache *avc, struct uio *auio, afs_ucred_t *acred)
1639 int code;
1641 AFS_GLOCK();
1642 code = afs_readlink(avc, auio, acred);
1643 AFS_GUNLOCK();
1644 return (code);
1648 gafs_fsync(struct vcache *avc, int flag, afs_ucred_t *acred)
1650 int code;
1652 AFS_GLOCK();
1653 code = afs_fsync(avc, flag, acred);
1654 AFS_GUNLOCK();
1655 return (code);
1659 afs_inactive(struct vcache *avc, afs_ucred_t *acred)
1661 struct vnode *vp = AFSTOV(avc);
1662 if (afs_shuttingdown != AFS_RUNNING)
1663 return 0;
1666 * In Solaris and HPUX s800 and HP-UX10.0 they actually call us with
1667 * v_count 1 on last reference!
1669 mutex_enter(&vp->v_lock);
1670 if (avc->vrefCount <= 0)
1671 osi_Panic("afs_inactive : v_count <=0\n");
1674 * If more than 1 don't unmap the vnode but do decrement the ref count
1676 vp->v_count--;
1677 if (vp->v_count > 0) {
1678 mutex_exit(&vp->v_lock);
1679 return 0;
1681 mutex_exit(&vp->v_lock);
1683 #ifndef AFS_SUN511_ENV
1685 * Solaris calls VOP_OPEN on exec, but doesn't call VOP_CLOSE when
1686 * the executable exits. So we clean up the open count here.
1688 * Only do this for AFS_MVSTAT_FILE vnodes: when using fakestat, we can't
1689 * lose the open count for volume roots (AFS_MVSTAT_ROOT), even though they
1690 * will get VOP_INACTIVE'd when released by afs_PutFakeStat().
1692 if (avc->opens > 0 && avc->mvstat == AFS_MVSTAT_FILE && !(avc->f.states & CCore))
1693 avc->opens = avc->execsOrWriters = 0;
1694 #endif
1696 afs_InactiveVCache(avc, acred);
1698 AFS_GUNLOCK();
1699 /* VFS_RELE must be called outside of GLOCK, since it can potentially
1700 * call afs_freevfs, which acquires GLOCK */
1701 VFS_RELE(afs_globalVFS);
1702 AFS_GLOCK();
1704 return 0;
1707 void
1708 gafs_inactive(struct vcache *avc, afs_ucred_t *acred)
1710 AFS_GLOCK();
1711 (void)afs_inactive(avc, acred);
1712 AFS_GUNLOCK();
1717 gafs_fid(struct vcache *avc, struct fid **fidpp)
1719 int code;
1721 AFS_GLOCK();
1722 code = afs_fid(avc, fidpp);
1723 AFS_GUNLOCK();
1724 return (code);
1727 #endif /* AFS_GLOBAL_SUNLOCK */