Merge 1.8.0~pre4 packaging into master
[pkg-k5-afs_openafs.git] / src / afs / IRIX / osi_vnodeops.c
blob5efd81cc34bfc3d2a42c80175ffd4b4b30d3fa6e
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * SGI specific vnodeops + other misc interface glue
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #ifdef AFS_SGI62_ENV
18 #include "afs/sysincludes.h" /* Standard vendor system headers */
19 #include "afsincludes.h" /* Afs-based standard headers */
20 #include "afs/afs_stats.h" /* statistics */
21 #include "sys/flock.h"
22 #include "afs/nfsclient.h"
24 /* AFSBSIZE must be at least the size of a page, else the client will hang.
25 * For 64 bit platforms, the page size is more than 8K.
27 #define AFSBSIZE _PAGESZ
28 extern struct afs_exporter *root_exported;
29 extern void afs_chkpgoob(vnode_t *, pgno_t);
31 static void afs_strategy();
32 static int afs_xread(), afs_xwrite();
33 static int afs_xbmap(), afs_map(), afs_reclaim();
34 #ifndef AFS_SGI65_ENV
35 static int afs_addmap(), afs_delmap();
36 #endif
37 extern int afs_open(), afs_close(), afs_ioctl(), afs_getattr(), afs_setattr();
38 extern int afs_access(), afs_lookup();
39 extern int afs_create(), afs_remove(), afs_link(), afs_rename();
40 extern int afs_mkdir(), afs_rmdir(), afs_readdir();
41 extern int afs_symlink(), afs_readlink(), afs_fsync(), afs_fid(),
42 afs_frlock();
43 static int afs_seek(OSI_VC_DECL(a), off_t b, off_t * c);
44 #ifdef AFS_SGI64_ENV
45 extern int afs_xinactive();
46 #else
47 extern void afs_xinactive();
48 #endif
50 extern void afs_rwlock(OSI_VN_DECL(vp), AFS_RWLOCK_T b);
51 extern void afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T b);
53 extern int afs_fid2();
55 static int afsrwvp(struct vcache *avc, struct uio *uio,
56 enum uio_rw rw, int ioflag,
57 #ifdef AFS_SGI64_ENV
58 struct cred *cr, struct flid *flp);
59 #else
60 struct cred *cr);
61 #endif
62 #ifdef MP
63 static void mp_afs_rwlock(OSI_VN_DECL(a), AFS_RWLOCK_T b);
64 static void mp_afs_rwunlock(OSI_VN_DECL(a), AFS_RWLOCK_T b);
65 struct vnodeops afs_lockedvnodeops =
66 #else
67 struct vnodeops Afs_vnodeops =
68 #endif
70 #ifdef AFS_SGI64_ENV
71 #ifdef AFS_SGI65_ENV
72 BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
73 #else
74 VNODE_POSITION_BASE,
75 #endif
76 #endif
77 afs_open,
78 afs_close,
79 afs_xread,
80 afs_xwrite,
81 afs_ioctl,
82 fs_setfl,
83 afs_getattr,
84 afs_setattr,
85 afs_access,
86 afs_lookup,
87 afs_create,
88 afs_remove,
89 afs_link,
90 afs_rename,
91 afs_mkdir,
92 afs_rmdir,
93 afs_readdir,
94 afs_symlink,
95 afs_readlink,
96 afs_fsync,
97 afs_xinactive,
98 afs_fid,
99 afs_fid2,
100 afs_rwlock,
101 afs_rwunlock,
102 afs_seek,
103 fs_cmp,
104 afs_frlock,
105 fs_nosys, /* realvp */
106 afs_xbmap,
107 afs_strategy,
108 afs_map,
109 #ifdef AFS_SGI65_ENV
110 fs_noerr, /* addmap - devices only */
111 fs_noerr, /* delmap - devices only */
112 #else
113 afs_addmap,
114 afs_delmap,
115 #endif
116 fs_poll, /* poll */
117 fs_nosys, /* dump */
118 fs_pathconf,
119 fs_nosys, /* allocstore */
120 fs_nosys, /* fcntl */
121 afs_reclaim, /* reclaim */
122 fs_nosys, /* attr_get */
123 fs_nosys, /* attr_set */
124 fs_nosys, /* attr_remove */
125 fs_nosys, /* attr_list */
126 #ifdef AFS_SGI64_ENV
127 #ifdef AFS_SGI65_ENV
128 fs_cover,
129 (vop_link_removed_t) fs_noval,
130 fs_vnode_change,
131 fs_tosspages,
132 fs_flushinval_pages,
133 fs_flush_pages,
134 fs_invalfree_pages,
135 fs_pages_sethole,
136 (vop_commit_t) fs_nosys,
137 (vop_readbuf_t) fs_nosys,
138 fs_strgetmsg,
139 fs_strputmsg,
140 #else
141 fs_mount,
142 #endif
143 #endif
146 #ifndef MP
147 struct vnodeops *afs_ops = &Afs_vnodeops;
148 #endif
151 afs_frlock(OSI_VN_DECL(vp), int cmd, struct flock *lfp, int flag,
152 off_t offset,
153 #ifdef AFS_SGI65_ENV
154 vrwlock_t vrwlock,
155 #endif
156 cred_t * cr)
158 int error;
159 OSI_VN_CONVERT(vp);
160 #ifdef AFS_SGI65_ENV
161 struct flid flid;
162 int pid;
163 get_current_flid(&flid);
164 pid = flid.fl_pid;
165 #endif
168 * Since AFS doesn't support byte-wise locks (and simply
169 * says yes! we handle byte locking locally only.
170 * This makes lots of things work much better
171 * XXX This doesn't properly handle moving from a
172 * byte-wise lock up to a full file lock (we should
173 * remove the byte locks ..) Of course neither did the
174 * regular AFS way ...
176 * For GETLK we do a bit more - we first check any byte-wise
177 * locks - if none then check for full AFS file locks
179 if (cmd == F_GETLK || lfp->l_whence != 0 || lfp->l_start != 0
180 || (lfp->l_len != MAXEND && lfp->l_len != 0)) {
181 AFS_RWLOCK(vp, VRWLOCK_WRITE);
182 AFS_GUNLOCK();
183 #ifdef AFS_SGI65_ENV
184 error =
185 fs_frlock(OSI_VN_ARG(vp), cmd, lfp, flag, offset, vrwlock, cr);
186 #else
187 error = fs_frlock(vp, cmd, lfp, flag, offset, cr);
188 #endif
189 AFS_GLOCK();
190 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
191 if (error || cmd != F_GETLK)
192 return error;
193 if (lfp->l_type != F_UNLCK)
194 /* found some blocking lock */
195 return 0;
196 /* fall through to check for full AFS file locks */
199 /* map BSD style to plain - we don't call reclock()
200 * and its only there that the difference is important
202 switch (cmd) {
203 case F_GETLK:
204 case F_RGETLK:
205 break;
206 case F_SETLK:
207 case F_RSETLK:
208 break;
209 case F_SETBSDLK:
210 cmd = F_SETLK;
211 break;
212 case F_SETLKW:
213 case F_RSETLKW:
214 break;
215 case F_SETBSDLKW:
216 cmd = F_SETLKW;
217 break;
218 default:
219 return EINVAL;
222 AFS_GUNLOCK();
224 error = convoff(vp, lfp, 0, offset, SEEKLIMIT
225 #ifdef AFS_SGI64_ENV
226 , OSI_GET_CURRENT_CRED()
227 #endif /* AFS_SGI64_ENV */
230 AFS_GLOCK();
231 if (!error) {
232 #ifdef AFS_SGI65_ENV
233 error = afs_lockctl(vp, lfp, cmd, cr, pid);
234 #else
235 error = afs_lockctl(vp, lfp, cmd, cr, OSI_GET_CURRENT_PID());
236 #endif
238 return error;
243 * We need to get the cache hierarchy right.
244 * First comes the page cache - pages are hashed based on afs
245 * vnode and offset. It is important to have things hashed here
246 * for the VM/paging system to work.
247 * Note that the paging system calls VOP_READ with the UIO_NOSPACE -
248 * it simply requires that somehow the page is hashed
249 * upon successful return.
250 * This means in afs_read we
251 * must call the 'chunk' code that handles page insertion. In order
252 * to actually get the data, 'chunk' calls the VOP_STRATEGY routine.
253 * This is basically the std afs_read routine - validating and
254 * getting the info into the Dcache, then calling VOP_READ.
255 * The only bad thing here is that by calling VOP_READ (and VOP_WRITE
256 * to fill the cache) we will get 2 copies of these pages into the
257 * page cache - one hashed on afs vnode and one on efs vnode. THis
258 * is wasteful but does no harm. A potential solution involves
259 * causing an ASYNC flush of the newly fetched cache data and
260 * doing direct I/O on the read side....
262 /* ARGSUSED */
263 #ifdef AFS_SGI64_ENV
264 static int
265 afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
266 struct flid *flp;
267 #else
268 static int
269 afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr)
270 #endif
271 OSI_VC_DECL(avc);
272 struct uio *uiop;
273 int ioflag;
274 struct cred *cr;
276 int code;
277 OSI_VC_CONVERT(avc);
279 osi_Assert(avc->v.v_count > 0);
280 if (avc->v.v_type != VREG)
281 return EISDIR;
283 #ifdef AFS_SGI64_ENV
284 #ifdef AFS_SGI65_ENV
285 if (!(ioflag & IO_ISLOCKED))
286 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_READ);
287 #endif
288 code = afsrwvp(avc, uiop, UIO_READ, ioflag, cr, flp);
289 #ifdef AFS_SGI65_ENV
290 if (!(ioflag & IO_ISLOCKED))
291 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_READ);
292 #endif
293 #else
294 code = afsrwvp(avc, uiop, UIO_READ, ioflag, cr);
295 #endif
296 return code;
299 /* ARGSUSED */
300 #ifdef AFS_SGI64_ENV
301 static int
302 afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
303 struct flid *flp;
304 #else
305 static int
306 afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr)
307 #endif
308 OSI_VC_DECL(avc);
309 struct uio *uiop;
310 int ioflag;
311 struct cred *cr;
313 int code;
314 OSI_VC_CONVERT(avc);
316 osi_Assert(avc->v.v_count > 0);
317 if (avc->v.v_type != VREG)
318 return EISDIR;
320 if (ioflag & IO_APPEND)
321 uiop->uio_offset = avc->f.m.Length;
322 #ifdef AFS_SGI64_ENV
323 #ifdef AFS_SGI65_ENV
324 if (!(ioflag & IO_ISLOCKED))
325 AFS_RWLOCK(((vnode_t *) avc), VRWLOCK_WRITE);
326 #endif
327 code = afsrwvp(avc, uiop, UIO_WRITE, ioflag, cr, flp);
328 #ifdef AFS_SGI65_ENV
329 if (!(ioflag & IO_ISLOCKED))
330 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
331 #endif
332 #else
333 code = afsrwvp(avc, uiop, UIO_WRITE, ioflag, cr);
334 #endif
335 return code;
338 static int prra = 0;
339 static int prnra = 0;
340 static int acchk = 0;
341 static int acdrop = 0;
343 static int
344 afsrwvp(struct vcache *avc, struct uio *uio, enum uio_rw rw,
345 int ioflag,
346 #ifdef AFS_SGI64_ENV
347 struct cred *cr, struct flid *flp)
348 #else
349 struct cred *cr)
350 #endif
352 struct vnode *vp = AFSTOV(avc);
353 struct buf *bp;
354 daddr_t bn;
355 off_t acnt, cnt;
356 off_t off, newoff;
357 off_t bsize, rem, len;
358 int error;
359 struct bmapval bmv[2];
360 int nmaps, didFakeOpen = 0;
361 struct vrequest treq;
362 struct dcache *tdc;
363 int counter = 0;
365 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
366 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
369 newoff = uio->uio_resid + uio->uio_offset;
370 if (uio->uio_resid <= 0) {
371 return (0);
373 if (uio->uio_offset < 0 || newoff < 0) {
374 return (EINVAL);
376 if (ioflag & IO_DIRECT)
377 return EINVAL;
379 if (rw == UIO_WRITE && vp->v_type == VREG && newoff > uio->uio_limit) {
380 return (EFBIG);
383 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, avc,
384 ICL_TYPE_INT32, ioflag, ICL_TYPE_INT32, rw, ICL_TYPE_INT32, 0);
386 /* get a validated vcache entry */
387 error = afs_InitReq(&treq, cr);
388 if (error)
389 return afs_CheckCode(error, NULL, 63);
391 error = afs_VerifyVCache(avc, &treq);
392 if (error)
393 return afs_CheckCode(error, &treq, 51);
396 * flush any stale pages - this will unmap
397 * and invalidate all pages for vp (NOT writing them back!)
399 osi_FlushPages(avc, cr);
401 if (cr && AFS_NFSXLATORREQ(cr) && rw == UIO_READ) {
402 if (!afs_AccessOK
403 (avc, PRSFS_READ, &treq,
404 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ))
405 return EACCES;
408 * To handle anonymous calls to VOP_STRATEGY from afs_sync/sync/bdflush
409 * we need better than the callers credentials. So we squirrel away
410 * the last writers credentials
412 if (rw == UIO_WRITE || (rw == UIO_READ && avc->cred == NULL)) {
413 ObtainWriteLock(&avc->lock, 92);
414 if (avc->cred)
415 crfree(avc->cred);
416 crhold(cr);
417 avc->cred = cr;
418 ReleaseWriteLock(&avc->lock);
422 * We have to bump the open/exwriters field here
423 * courtesy of the nfs xlator
424 * because there're no open/close nfs rpc's to call our afs_open/close.
426 if (root_exported && rw == UIO_WRITE) {
427 ObtainWriteLock(&avc->lock, 234);
428 if (root_exported) {
429 didFakeOpen = 1;
430 afs_FakeOpen(avc);
432 ReleaseWriteLock(&avc->lock);
434 error = 0;
436 if (rw == UIO_WRITE) {
437 ObtainWriteLock(&avc->lock, 330);
438 avc->f.states |= CDirty;
439 ReleaseWriteLock(&avc->lock);
442 AFS_GUNLOCK();
444 do {
445 /* If v_dpages is set SGI 5.3 will convert those pages to
446 * B_DELWRI in chunkread and getchunk. Write the pages out
447 * before we trigger that behavior. For 6.1, dirty pages stay
448 * around too long and we should get rid of them as quickly
449 * as possible.
451 while (VN_GET_DPAGES(vp))
452 pdflush(vp, 0);
454 if (avc->vc_error) {
455 error = avc->vc_error;
456 break;
458 bsize = AFSBSIZE; /* why not?? */
459 off = uio->uio_offset % bsize;
460 bn = BTOBBT(uio->uio_offset - off);
462 * decrease bsize - otherwise we will
463 * get 'extra' pages in the cache for this
464 * vnode that we would need to flush when
465 * calling e.g. ptossvp.
466 * So we can use Length in ptossvp,
467 * we make sure we never go more than to the file size
468 * rounded up to a page boundary.
469 * That doesn't quite work, since we may get a page hashed to
470 * the vnode w/o updating the length. Thus we always use
471 * MAXLONG in ptossvp to be safe.
473 if (rw == UIO_READ) {
475 * read/paging in a normal file
477 rem = avc->f.m.Length - uio->uio_offset;
478 if (rem <= 0)
479 /* EOF */
480 break;
482 * compute minimum of rest of block and rest of file
484 cnt = MIN(bsize - off, rem);
485 osi_Assert((off + cnt) <= bsize);
486 bsize = ctob(btoc(off + cnt));
487 len = BTOBBT(bsize);
488 nmaps = 1;
489 bmv[0].bn = bmv[0].offset = bn;
490 bmv[0].length = len;
491 bmv[0].bsize = bsize;
492 bmv[0].pboff = off;
493 bmv[0].pbsize = MIN(cnt, uio->uio_resid);
494 bmv[0].eof = 0;
495 #ifdef AFS_SGI64_ENV
496 bmv[0].pbdev = vp->v_rdev;
497 bmv[0].pmp = uio->uio_pmp;
498 #endif
499 osi_Assert(cnt > 0);
501 * initiate read-ahead if it looks like
502 * we are reading sequentially OR they want
503 * more than one 'bsize' (==AFSBSIZE) worth
504 * XXXHack - to avoid DELWRI buffers we can't
505 * do read-ahead on any file that has potentially
506 * dirty mmap pages.
508 if ((avc->lastr + BTOBB(AFSBSIZE) == bn
509 || uio->uio_resid > AFSBSIZE)
510 #ifdef AFS_SGI61_ENV
511 && (!AFS_VN_MAPPED(vp))
512 #else /* AFS_SGI61_ENV */
513 && ((vp->v_flag & VWASMAP) == 0)
514 #endif /* AFS_SGI61_ENV */
516 rem -= cnt;
517 if (rem > 0) {
518 bsize = AFSBSIZE;
519 bmv[1].bn = bmv[1].offset = bn + len;
520 osi_Assert((BBTOB(bn + len) % bsize) == 0);
521 acnt = MIN(bsize, rem);
522 bsize = ctob(btoc(acnt));
523 len = BTOBBT(bsize);
524 nmaps = 2;
525 bmv[1].length = len;
526 bmv[1].eof = 0;
527 bmv[1].bsize = bsize;
528 bmv[1].pboff = 0;
529 bmv[1].pbsize = acnt;
530 #ifdef AFS_SGI64_ENV
531 bmv[1].pmp = uio->uio_pmp;
532 bmv[1].pbdev = vp->v_rdev;
533 #endif
536 #ifdef DEBUG
537 else if (prnra)
538 printf
539 ("NRA:vp 0x%x lastr %d bn %d len %d cnt %d bsize %d rem %d resid %d\n",
540 vp, avc->lastr, bn, len, cnt, bsize, rem,
541 uio->uio_resid);
542 #endif
544 avc->lastr = bn;
545 bp = chunkread(vp, bmv, nmaps, cr);
547 * If at a chunk boundary, start prefetch of next chunk.
549 if (counter == 0 || AFS_CHUNKOFFSET(off) == 0) {
550 AFS_GLOCK();
551 ObtainWriteLock(&avc->lock, 562);
552 tdc = afs_FindDCache(avc, off);
553 if (tdc) {
554 if (!(tdc->mflags & DFNextStarted))
555 afs_PrefetchChunk(avc, tdc, cr, &treq);
556 afs_PutDCache(tdc);
558 ReleaseWriteLock(&avc->lock);
559 AFS_GUNLOCK();
561 counter++;
562 } else {
564 * writing a normal file
567 * Purge dirty chunks of file if there are too many dirty chunks.
568 * Inside the write loop, we only do this at a chunk boundary.
569 * Clean up partial chunk if necessary at end of loop.
571 if (counter > 0 && AFS_CHUNKOFFSET(uio->uio_offset) == 0) {
572 AFS_GLOCK();
573 ObtainWriteLock(&avc->lock, 90);
574 error = afs_DoPartialWrite(avc, &treq);
575 if (error == 0)
576 avc->f.states |= CDirty;
577 ReleaseWriteLock(&avc->lock);
578 AFS_GUNLOCK();
579 if (error)
580 break;
582 counter++;
584 cnt = MIN(bsize - off, uio->uio_resid);
585 bsize = ctob(btoc(off + cnt));
586 len = BTOBBT(bsize);
587 bmv[0].bn = bn;
588 bmv[0].offset = bn;
589 bmv[0].length = len;
590 bmv[0].eof = 0;
591 bmv[0].bsize = bsize;
592 bmv[0].pboff = off;
593 bmv[0].pbsize = cnt;
594 #ifdef AFS_SGI64_ENV
595 bmv[0].pmp = uio->uio_pmp;
596 #endif
598 if (cnt == bsize)
599 bp = getchunk(vp, bmv, cr);
600 else
601 bp = chunkread(vp, bmv, 1, cr);
603 avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
605 if (bp->b_flags & B_ERROR) {
607 * Since we compile -signed, b_error is a signed
608 * char when it should be an unsigned char.
609 * This can cause some errors codes to be interpreted
610 * as negative #s
612 error = (unsigned char)(bp->b_error);
613 if (!error)
614 error = EIO;
615 #ifdef DEBUG
616 if (acchk && error) {
617 cmn_err(CE_WARN, "bp 0x%x has error %d\n", bp, error);
618 if (acdrop)
619 debug("AFS");
621 #endif
622 brelse(bp);
623 break;
626 osi_Assert(bp->b_error == 0);
628 if (uio->uio_segflg != UIO_NOSPACE)
629 (void)bp_mapin(bp);
630 AFS_UIOMOVE(bp->b_un.b_addr + bmv[0].pboff, cnt, rw, uio, error);
631 if (rw == UIO_READ || error) {
632 if (bp->b_flags & B_DELWRI) {
633 bawrite(bp);
634 } else
635 brelse(bp);
636 } else {
638 * m.Length is the maximum number of bytes known to be in the file.
639 * Make sure it is at least as high as the last byte we just wrote
640 * into the buffer.
642 if (avc->f.m.Length < uio->uio_offset) {
643 AFS_GLOCK();
644 ObtainWriteLock(&avc->lock, 235);
645 avc->f.m.Length = uio->uio_offset;
646 ReleaseWriteLock(&avc->lock);
647 AFS_GUNLOCK();
649 if (uio->uio_fmode & FSYNC) {
650 error = bwrite(bp);
651 } else if (off + cnt < bsize) {
652 bawrite(bp); /* was bdwrite */
653 } else {
654 bp->b_flags |= B_AGE;
655 bawrite(bp);
658 * Since EIO on an unlinked file is non-intuitive - give some
659 * explanation
661 if (error) {
662 if (avc->f.m.LinkCount == 0)
663 cmn_err(CE_WARN,
664 "AFS: Process pid %d write error %d writing to unlinked file.",
665 OSI_GET_CURRENT_PID(), error);
668 } while (!error && uio->uio_resid > 0);
669 afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
671 AFS_GLOCK();
673 if (rw == UIO_WRITE && error == 0 && (avc->f.states & CDirty)) {
674 ObtainWriteLock(&avc->lock, 405);
675 error = afs_DoPartialWrite(avc, &treq);
676 ReleaseWriteLock(&avc->lock);
679 if (!error) {
680 #ifdef AFS_SGI61_ENV
681 if (((ioflag & IO_SYNC) || (ioflag & IO_DSYNC)) && (rw == UIO_WRITE)
682 && !AFS_NFSXLATORREQ(cr)) {
683 error = afs_fsync(avc, 0, cr
684 #ifdef AFS_SGI65_ENV
685 , 0, 0
686 #endif
689 #else /* AFS_SGI61_ENV */
690 if ((ioflag & IO_SYNC) && (rw == UIO_WRITE) && !AFS_NFSXLATORREQ(cr)) {
691 error = afs_fsync(avc, 0, cr);
693 #endif /* AFS_SGI61_ENV */
695 if (didFakeOpen) {
696 ObtainWriteLock(&avc->lock, 236);
697 afs_FakeClose(avc, cr); /* XXXX For nfs trans XXXX */
698 ReleaseWriteLock(&avc->lock);
700 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, avc,
701 ICL_TYPE_INT32, ioflag, ICL_TYPE_INT32, rw, ICL_TYPE_INT32,
702 error);
704 return (error);
708 afs_xbmap(OSI_VC_ARG(avc), offset, count, flag, cr, bmv, nbmv)
709 OSI_VC_DECL(avc);
710 off_t offset;
711 ssize_t count;
712 int flag;
713 struct cred *cr;
714 struct bmapval *bmv;
715 int *nbmv;
717 int bsize; /* server's block size in bytes */
718 off_t off;
719 size_t rem, cnt;
720 OSI_VC_CONVERT(avc);
722 bsize = AFSBSIZE;
723 off = offset % bsize; /* offset into block */
724 bmv->bn = BTOBBT(offset - off);
725 bmv->offset = bmv->bn;
726 bmv->pboff = off;
727 rem = avc->f.m.Length - offset;
728 if (rem <= 0)
729 cnt = 0; /* EOF */
730 else
731 cnt = MIN(bsize - off, rem);
734 * It is benign to ignore *nbmv > 1, since it is only for requesting
735 * readahead.
739 * Don't map more than up to next page if at end of file
740 * See comment in afsrwvp
742 osi_Assert((off + cnt) <= bsize);
743 bsize = ctob(btoc(off + cnt));
744 bmv->pbsize = MIN(cnt, count);
745 bmv->eof = 0;
746 #ifdef AFS_SGI64_ENV
747 bmv->pmp = NULL;
748 bmv->pbdev = avc->v.v_rdev;
749 #endif
750 bmv->bsize = bsize;
751 bmv->length = BTOBBT(bsize);
752 *nbmv = 1;
753 return (0);
757 * called out of chunkread from afs_xread & clusterwrite to push dirty
758 * pages back - this routine
759 * actually does the reading/writing by calling afs_read/afs_write
760 * bp points to a set of pages that have been inserted into
761 * the page cache hashed on afs vp.
763 static void
764 afs_strategy(OSI_VC_ARG(avc), bp)
765 OSI_VC_DECL(avc);
766 struct buf *bp;
768 uio_t auio;
769 uio_t *uio = &auio;
770 iovec_t aiovec;
771 int error;
772 struct cred *cr;
773 OSI_VC_CONVERT(avc);
774 vnode_t *vp = (vnode_t *) avc;
777 * We can't afford DELWRI buffers for 2 reasons:
778 * 1) Since we can call underlying EFS, we can require a
779 * buffer to flush a buffer. This leads to 2 potential
780 * recursions/deadlocks
781 * a) if all buffers are DELWRI afs buffers, then
782 * ngeteblk -> bwrite -> afs_strategy -> afs_write ->
783 * UFS_Write -> efs_write -> ngeteblk .... could
784 * recurse a long ways!
785 * b) brelse -> chunkhold which can call dchunkpush
786 * will look for any DELWRI buffers and call strategy
787 * on them. This can then end up via UFS_Write
788 * recursing
789 * Current hack:
790 * a) We never do bdwrite(s) on AFS buffers.
791 * b) We call pdflush with B_ASYNC
792 * c) in chunkhold where it can set a buffer DELWRI
793 * we immediatly do a clusterwrite for AFS vp's
794 * XXX Alas, 'c' got dropped in 5.1 so its possible to get DELWRI
795 * buffers if someone has mmaped the file and dirtied it then
796 * reads/faults it again.
797 * Instead - wherever we call chunkread/getchunk we check for a
798 * returned bp with DELWRI set, and write it out immediately
800 if (CheckLock(&avc->lock) && VN_GET_DBUF(vp)) {
801 printf("WARN: afs_strategy vp=%x, v_dbuf=%x bp=%x\n", vp,
802 VN_GET_DBUF(vp), bp);
803 bp->b_error = EIO;
804 bp->b_flags |= B_ERROR;
805 iodone(bp);
806 return;
808 if (bp->b_error != 0)
809 printf("WARNING: afs_strategy3 vp=%x, bp=%x, err=%x\n", vp, bp,
810 bp->b_error);
813 * To get credentials somewhat correct (we may be called from bdflush/
814 * sync) we use saved credentials in Vcache.
815 * We must hold them since someone else could change them
817 ObtainReadLock(&avc->lock);
818 if (bp->b_flags & B_READ) {
819 if (BBTOB(bp->b_blkno) >= avc->f.m.Length) {
820 /* we are responsible for zero'ing the page */
821 caddr_t c;
822 c = bp_mapin(bp);
823 memset(c, 0, bp->b_bcount);
824 iodone(bp);
825 ReleaseReadLock(&avc->lock);
826 return;
828 } else if ((avc->f.states & CWritingUFS) && (bp->b_flags & B_DELWRI)) {
829 bp->b_ref = 3;
830 ReleaseReadLock(&avc->lock);
831 iodone(bp);
832 return;
834 cr = avc->cred;
835 osi_Assert(cr);
836 crhold(cr);
837 ReleaseReadLock(&avc->lock);
839 aiovec.iov_base = bp_mapin(bp);
840 uio->uio_iov = &aiovec;
841 uio->uio_iovcnt = 1;
842 uio->uio_resid = aiovec.iov_len = bp->b_bcount;
843 uio->uio_offset = BBTOB(bp->b_blkno);
844 uio->uio_segflg = UIO_SYSSPACE;
845 uio->uio_limit = RLIM_INFINITY; /* we checked the limit earlier */
846 #ifdef AFS_SGI64_ENV
847 uio->uio_pmp = NULL;
848 #endif
850 if (bp->b_flags & B_READ) {
851 uio->uio_fmode = FREAD;
852 error = afs_read(vp, uio, cr, 0);
853 } else {
854 uio->uio_fmode = FWRITE;
855 error = afs_write(vp, uio, 0, cr, 0);
857 crfree(cr);
859 #ifdef DEBUG
860 if (acchk && error) {
861 cmn_err(CE_WARN, "vp 0x%x has error %d\n", vp, error);
862 if (acdrop)
863 debug("AFS");
865 #endif
866 if (error) {
867 bp->b_error = error;
868 bp->b_flags |= B_ERROR;
869 if ((uio->uio_fmode == FWRITE) && !avc->vc_error)
870 avc->vc_error = error;
872 iodone(bp);
873 return;
876 /* ARGSUSED */
877 static int
878 afs_seek(OSI_VC_ARG(avc), ooff, noffp)
879 OSI_VC_DECL(avc);
880 off_t ooff;
881 off_t *noffp;
883 return *noffp < 0 ? EINVAL : 0;
886 #if !defined(AFS_SGI65_ENV)
887 /* Irix 6.5 uses addmap/delmap only for devices. */
888 /* ARGSUSED */
889 static int
890 afs_addmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot, flags, cr)
891 off_t off;
892 OSI_VC_DECL(avc);
893 struct pregion *prp;
894 addr_t addr;
895 size_t len;
896 u_int prot, maxprot;
897 u_int flags;
898 struct cred *cr;
900 OSI_VC_CONVERT(avc);
901 struct vnode *vp = AFSTOV(avc);
903 if (vp->v_flag & VNOMAP)
904 return ENOSYS;
905 if (len == 0)
906 return 0;
907 AFS_RWLOCK(vp, VRWLOCK_WRITE);
908 if (avc->mapcnt == 0) {
909 /* on first mapping add a open reference */
910 ObtainWriteLock(&avc->lock, 237);
911 avc->execsOrWriters++;
912 avc->opens++;
913 ReleaseWriteLock(&avc->lock);
915 avc->mapcnt += btoc(len);
916 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
917 return 0;
920 /*ARGSUSED*/ static int
921 afs_delmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot, flags, acred)
922 off_t off;
923 OSI_VC_DECL(avc);
924 struct pregion *prp;
925 addr_t addr;
926 size_t len;
927 u_int prot, maxprot;
928 u_int flags;
929 struct cred *acred;
931 OSI_VC_CONVERT(avc);
932 struct vnode *vp = AFSTOV(avc);
933 struct brequest *tb;
934 struct vrequest treq;
935 afs_int32 code;
937 if (vp->v_flag & VNOMAP)
938 return ENOSYS;
939 if (len == 0)
940 return 0;
941 AFS_RWLOCK(vp, VRWLOCK_WRITE);
942 osi_Assert(avc->mapcnt > 0);
943 avc->mapcnt -= btoc(len);
944 osi_Assert(avc->mapcnt >= 0);
945 if (avc->mapcnt == 0) {
946 /* on last mapping push back and remove our reference */
947 osi_Assert(avc->execsOrWriters > 0);
948 osi_Assert(avc->opens > 0);
949 if (avc->f.m.LinkCount == 0) {
950 ObtainWriteLock(&avc->lock, 238);
951 AFS_GUNLOCK();
952 PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
953 AFS_GLOCK();
954 ReleaseWriteLock(&avc->lock);
957 * mimic afs_close
959 code = afs_InitReq(&treq, acred);
960 if (code) {
961 code = afs_CheckCode(code, NULL, 64);
962 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
963 } else if (afs_BBusy()) {
964 /* do it yourself if daemons are all busy */
965 ObtainWriteLock(&avc->lock, 239);
966 code = afs_StoreOnLastReference(avc, &treq);
967 ReleaseWriteLock(&avc->lock);
968 /* BStore does CheckCode so we should also */
969 /* VNOVNODE is "acceptable" error code from close, since
970 * may happen when deleting a file on another machine while
971 * it is open here. */
972 if (code == VNOVNODE)
973 code = 0;
974 if (code) {
975 afs_StoreWarn(code, avc->f.fid.Fid.Volume, /* /dev/console */
978 code = afs_CheckCode(code, &treq, 52);
979 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
980 } else {
981 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
982 /* at least one daemon is idle, so ask it to do the store.
983 * Also, note that we don't lock it any more... */
984 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
985 (afs_size_t) afs_cr_uid(acred), 0L, (void *)0,
986 (void *)0, (void *)0);
987 /* sleep waiting for the store to start, then retrieve error code */
988 while ((tb->flags & BUVALID) == 0) {
989 tb->flags |= BUWAIT;
990 afs_osi_Sleep(tb);
992 afs_BRelease(tb);
994 } else {
995 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
997 return 0;
999 #endif /* ! AFS_SGI65_ENV */
1002 /* ARGSUSED */
1004 * Note - if mapping in an ELF interpreter, one can get called without vp
1005 * ever having been 'opened'
1007 #ifdef AFS_SGI65_ENV
1008 static int
1009 afs_map(OSI_VC_ARG(avc), off, len, prot, flags, cr, vpp)
1010 off_t off;
1011 OSI_VC_DECL(avc);
1012 size_t len;
1013 mprot_t prot;
1014 u_int flags;
1015 struct cred *cr;
1016 vnode_t **vpp;
1017 #else
1018 static int
1019 afs_map(OSI_VC_ARG(avc), off, prp, addrp, len, prot, maxprot, flags, cr)
1020 off_t off;
1021 OSI_VC_DECL(avc);
1022 struct pregion *prp;
1023 addr_t *addrp;
1024 size_t len;
1025 u_int prot, maxprot;
1026 u_int flags;
1027 struct cred *cr;
1028 #endif
1030 OSI_VC_CONVERT(avc);
1031 struct vnode *vp = AFSTOV(avc);
1032 struct vrequest treq;
1033 int error;
1035 /* get a validated vcache entry */
1036 error = afs_InitReq(&treq, cr);
1037 if (error)
1038 return afs_CheckCode(error, NULL, 65);
1040 error = afs_VerifyVCache(avc, &treq);
1041 if (error)
1042 return afs_CheckCode(error, &treq, 53);
1044 osi_FlushPages(avc, cr); /* ensure old pages are gone */
1045 #ifdef AFS_SGI65_ENV
1046 /* If the vnode is currently opened for write, there's the potential
1047 * that this mapping might (now or in the future) have PROT_WRITE.
1048 * So assume it does and we'll have to call afs_StoreOnLastReference.
1050 AFS_RWLOCK(vp, VRWLOCK_WRITE);
1051 ObtainWriteLock(&avc->lock, 501);
1052 if (avc->execsOrWriters > 0) {
1053 avc->execsOrWriters++;
1054 avc->opens++;
1055 avc->mapcnt++; /* count eow's due to mappings. */
1057 ReleaseWriteLock(&avc->lock);
1058 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1059 #else
1060 AFS_RWLOCK(vp, VRWLOCK_WRITE);
1061 AFS_GUNLOCK();
1062 error =
1063 fs_map_subr(vp, (off_t) avc->f.m.Length, (u_int) avc->f.m.Mode, off, prp,
1064 *addrp, len, prot, maxprot, flags, cr);
1065 AFS_GLOCK();
1066 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1067 #endif /* AFS_SGI65_ENV */
1068 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
1069 #ifdef AFS_SGI65_ENV
1070 ICL_TYPE_POINTER, NULL,
1071 #else
1072 ICL_TYPE_POINTER, *addrp,
1073 #endif
1074 ICL_TYPE_INT32, len, ICL_TYPE_INT32, off);
1075 return error;
1079 extern afs_rwlock_t afs_xvcache;
1080 extern afs_lock_t afs_xdcache;
1081 #ifdef AFS_SGI64_ENV
1083 #else
1084 void
1085 #endif
1086 afs_xinactive(OSI_VC_ARG(avc), acred)
1087 OSI_VC_DECL(avc);
1088 struct ucred *acred;
1090 int s;
1091 OSI_VC_CONVERT(avc);
1092 vnode_t *vp = (vnode_t *) avc;
1093 int mapcnt = avc->mapcnt; /* We just clear off this many. */
1095 AFS_STATCNT(afs_inactive);
1097 s = VN_LOCK(vp);
1098 if (!(vp->v_flag & VINACT) || (vp->v_count > 0)) {
1099 /* inactive was already done, or someone did a VN_HOLD; just return */
1100 vp->v_flag &= ~VINACT;
1101 VN_UNLOCK(vp, s);
1102 #ifdef AFS_SGI64_ENV
1103 return VN_INACTIVE_CACHE;
1104 #else
1105 return;
1106 #endif
1108 osi_Assert((vp->v_flag & VSHARE) == 0);
1109 vp->v_flag &= ~VINACT;
1110 /* Removed broadcast to waiters, since no one ever will. Only for vnodes
1111 * in common pool.
1113 VN_UNLOCK(vp, s);
1115 #ifdef AFS_SGI65_ENV
1116 /* In Irix 6.5, the last unmap of a dirty mmap'd file does not
1117 * get an explicit vnode op. Instead we only find out at VOP_INACTIVE.
1119 if (!afs_rwlock_nowait((vnode_t *) avc, VRWLOCK_WRITE)) {
1120 return VN_INACTIVE_CACHE;
1122 if (NBObtainWriteLock(&avc->lock, 502)) {
1123 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1124 return VN_INACTIVE_CACHE;
1126 if (avc->f.states & CUnlinked) {
1127 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
1128 avc->f.states |= CUnlinkedDel;
1129 ReleaseWriteLock(&avc->lock);
1130 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1131 } else {
1132 ReleaseWriteLock(&avc->lock);
1133 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1134 afs_remunlink(avc, 1); /* ignore any return code */
1136 return VN_INACTIVE_CACHE;
1138 if ((avc->f.states & CDirty) || (avc->execsOrWriters > 0)) {
1139 /* File either already has dirty chunks (CDirty) or was mapped at
1140 * time in its life with the potential for being written into.
1141 * Note that afs_close defers storebacks if the vnode's ref count
1142 * if more than 1.
1144 int code;
1145 struct vrequest treq;
1146 if (!afs_InitReq(&treq, acred)) {
1147 int s;
1149 VN_HOLD(vp);
1150 avc->execsOrWriters -= mapcnt - 1;
1151 avc->opens -= mapcnt - 1;
1152 avc->mapcnt -= mapcnt;
1153 code = afs_StoreOnLastReference(avc, &treq);
1154 /* The following behavior mimics the behavior in afs_close. */
1155 if (code == VNOVNODE)
1156 code = 0;
1157 if (code) {
1158 if (mapcnt) {
1159 cmn_err(CE_WARN,
1160 "AFS: Failed to store FID (%x:%lu.%lu.%lu) in VOP_INACTIVE, error = %d\n",
1161 (int)(avc->f.fid.Cell) & 0xffffffff,
1162 avc->f.fid.Fid.Volume, avc->f.fid.Fid.Vnode,
1163 avc->f.fid.Fid.Unique, code);
1165 afs_InvalidateAllSegments(avc);
1167 s = VN_LOCK(vp);
1168 vp->v_count--;
1169 code = (vp->v_count == 0);
1170 VN_UNLOCK(vp, s);
1171 /* If the vnode is now in use by someone else, return early. */
1172 if (!code) {
1173 ReleaseWriteLock(&avc->lock);
1174 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1175 return VN_INACTIVE_CACHE;
1179 #endif
1181 osi_Assert((avc->f.states & (CCore | CMAPPED)) == 0);
1183 if (avc->cred) {
1184 crfree(avc->cred);
1185 avc->cred = NULL;
1187 ReleaseWriteLock(&avc->lock);
1188 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
1191 * If someone unlinked a file and this is the last hurrah -
1192 * nuke all the pages.
1194 if (avc->f.m.LinkCount == 0) {
1195 AFS_GUNLOCK();
1196 PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
1197 AFS_GLOCK();
1199 #ifndef AFS_SGI65_ENV
1200 osi_Assert(avc->mapcnt == 0);
1201 afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
1203 avc->f.states &= ~CDirty; /* Give up on store-backs */
1204 if (avc->f.states & CUnlinked) {
1205 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
1206 avc->f.states |= CUnlinkedDel;
1207 } else {
1208 afs_remunlink(avc, 1); /* ignore any return code */
1211 #endif
1212 #ifdef AFS_SGI64_ENV
1213 return VN_INACTIVE_CACHE;
1214 #endif
1217 static int
1218 afs_reclaim(OSI_VC_DECL(avc), int flag)
1220 #ifdef AFS_SGI64_ENV
1221 /* Get's called via VOP_RELCAIM in afs_FlushVCache to clear repl_vnodeops */
1222 return 0;
1223 #else
1224 panic("afs_reclaim");
1225 #endif
1228 void
1229 afs_rwlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
1231 OSI_VN_CONVERT(vp);
1232 struct vcache *avc = VTOAFS(vp);
1234 if (OSI_GET_LOCKID() == avc->vc_rwlockid) {
1235 avc->vc_locktrips++;
1236 return;
1238 AFS_GUNLOCK();
1239 psema(&avc->vc_rwlock, PINOD);
1240 AFS_GLOCK();
1241 avc->vc_rwlockid = OSI_GET_LOCKID();
1244 void
1245 afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
1247 OSI_VN_CONVERT(vp);
1248 struct vcache *avc = VTOAFS(vp);
1250 AFS_ASSERT_GLOCK();
1251 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
1252 if (avc->vc_locktrips > 0) {
1253 --avc->vc_locktrips;
1254 return;
1256 avc->vc_rwlockid = OSI_NO_LOCKID;
1257 vsema(&avc->vc_rwlock);
1261 /* The flag argument is for symmetry with the afs_rwlock and afs_rwunlock
1262 * calls. SGI currently only uses the flag to assert if the unlock flag
1263 * does not match the corresponding lock flag. But they may start using this
1264 * flag for a real rw lock at some time.
1267 afs_rwlock_nowait(vnode_t * vp, AFS_RWLOCK_T flag)
1269 struct vcache *avc = VTOAFS(vp);
1271 AFS_ASSERT_GLOCK();
1272 if (OSI_GET_LOCKID() == avc->vc_rwlockid) {
1273 avc->vc_locktrips++;
1274 return 1;
1276 if (cpsema(&avc->vc_rwlock)) {
1277 avc->vc_rwlockid = OSI_GET_LOCKID();
1278 return 1;
1280 return 0;
1283 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
1285 afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
1287 struct cell *tcell;
1288 afs_fid2_t *afid = (afs_fid2_t *) fidp;
1289 OSI_VC_CONVERT(avc);
1291 osi_Assert(sizeof(fid_t) >= sizeof(afs_fid2_t));
1292 afid->af_len = sizeof(afs_fid2_t) - sizeof(afid->af_len);
1294 tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
1295 afid->af_cell = tcell->cellIndex & 0xffff;
1296 afs_PutCell(tcell, READ_LOCK);
1298 afid->af_volid = avc->f.fid.Fid.Volume;
1299 afid->af_vno = avc->f.fid.Fid.Vnode;
1300 afid->af_uniq = avc->f.fid.Fid.Unique;
1302 return 0;
1304 #else
1305 /* Only use so far is in checkpoint/restart for IRIX 6.4. In ckpt_fid, a
1306 * return of ENOSYS would make the code fail over to VOP_FID. We can't let
1307 * that happen, since we do a VN_HOLD there in the expectation that
1308 * posthandle will be called to release the vnode.
1310 * afs_fid2 is used to support the R5000 workarounds (_R5000_CVT_WAR)
1313 afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
1315 #if defined(_R5000_CVT_WAR)
1316 extern int R5000_cvt_war;
1318 if (R5000_cvt_war)
1319 return ENOSYS;
1320 else
1321 return EINVAL;
1322 #else
1323 return EINVAL;
1324 #endif
1326 #endif /* AFS_SGI64_ENV && CKPT */
1330 * check for any pages hashed that shouldn't be!
1331 * Only valid if PGCACHEDEBUG is set in os/page.c
1332 * Drop the global lock here, since we may not actually do the call.
1334 void
1335 afs_chkpgoob(vnode_t * vp, pgno_t pgno)
1337 #undef PGDEBUG
1338 #ifdef PGDEBUG
1339 AFS_GUNLOCK();
1340 pfindanyoob(vp, pgno);
1341 AFS_GLOCK();
1342 #endif
1346 #ifdef MP
1348 #ifdef AFS_SGI64_ENV
1349 #define AFS_MP_VC_ARG(A) bhv_desc_t A
1350 #else
1351 #define AFS_MP_VC_ARG(A) vnode_t A
1352 #endif
1354 #ifdef AFS_SGI64_ENV
1356 mp_afs_open(bhv_desc_t * bhp, vnode_t ** a, mode_t b, struct cred *c)
1357 #else
1359 mp_afs_open(vnode_t ** a, mode_t b, struct cred *c)
1360 #endif
1362 int rv;
1363 AFS_GLOCK();
1364 #ifdef AFS_SGI64_ENV
1365 rv = afs_lockedvnodeops.vop_open(bhp, a, b, c);
1366 #else
1367 rv = afs_lockedvnodeops.vop_open(a, b, c);
1368 #endif
1369 AFS_GUNLOCK();
1370 return rv;
1373 #if defined(AFS_SGI64_ENV)
1374 #if defined(AFS_SGI65_ENV)
1376 mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, struct cred *d)
1377 #else
1379 mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, off_t d, struct cred *e,
1380 struct flid *f)
1381 #endif
1382 #else
1384 mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, off_t d, struct cred *e)
1385 #endif
1387 int rv;
1388 AFS_GLOCK();
1389 rv = afs_lockedvnodeops.vop_close(a, b, c, d
1390 #if !defined(AFS_SGI65_ENV)
1392 #if defined(AFS_SGI64_ENV)
1394 #endif
1395 #endif
1398 AFS_GUNLOCK();
1399 return rv;
1402 #ifdef AFS_SGI64_ENV
1404 mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
1405 struct flid *f)
1406 #else
1408 mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
1409 #endif
1411 int rv;
1412 AFS_GLOCK();
1413 #ifdef AFS_SGI64_ENV
1414 rv = afs_lockedvnodeops.vop_read(a, b, c, d, f);
1415 #else
1416 rv = afs_lockedvnodeops.vop_read(a, b, c, d);
1417 #endif
1418 AFS_GUNLOCK();
1419 return rv;
1423 #ifdef AFS_SGI64_ENV
1425 mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
1426 struct flid *f)
1427 #else
1429 mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
1430 #endif
1432 int rv;
1433 AFS_GLOCK();
1434 #ifdef AFS_SGI64_ENV
1435 rv = afs_lockedvnodeops.vop_write(a, b, c, d, f);
1436 #else
1437 rv = afs_lockedvnodeops.vop_write(a, b, c, d);
1438 #endif
1439 AFS_GUNLOCK();
1440 return rv;
1444 mp_afs_ioctl(AFS_MP_VC_ARG(*a), int b, void *c, int d, struct cred *e, int *f
1445 #ifdef AFS_SGI65_ENV
1446 , struct vopbd *vbds
1447 #endif
1450 int rv;
1451 AFS_GLOCK();
1452 rv = afs_lockedvnodeops.vop_ioctl(a, b, c, d, e, f
1453 #ifdef AFS_SGI65_ENV
1454 , vbds
1455 #endif
1457 AFS_GUNLOCK();
1458 return rv;
1462 mp_fs_setfl(AFS_MP_VC_ARG(*a), int b, int c, struct cred *d)
1464 int rv;
1465 AFS_GLOCK();
1466 rv = afs_lockedvnodeops.vop_setfl(a, b, c, d);
1467 AFS_GUNLOCK();
1468 return rv;
1472 mp_afs_getattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
1474 int rv;
1475 AFS_GLOCK();
1476 rv = afs_lockedvnodeops.vop_getattr(a, b, c, d);
1477 AFS_GUNLOCK();
1478 return rv;
1482 mp_afs_setattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
1484 int rv;
1485 AFS_GLOCK();
1486 rv = afs_lockedvnodeops.vop_setattr(a, b, c, d);
1487 AFS_GUNLOCK();
1488 return rv;
1492 mp_afs_access(AFS_MP_VC_ARG(*a), int b,
1493 #ifndef AFS_SGI65_ENV
1494 int c,
1495 #endif
1496 struct cred *d)
1498 int rv;
1499 AFS_GLOCK();
1500 rv = afs_lockedvnodeops.vop_access(a, b,
1501 #ifndef AFS_SGI65_ENV
1503 #endif
1505 AFS_GUNLOCK();
1506 return rv;
1510 mp_afs_lookup(AFS_MP_VC_ARG(*a), char *b, vnode_t ** c, struct pathname *d,
1511 int e, vnode_t * f, struct cred *g)
1513 int rv;
1514 AFS_GLOCK();
1515 rv = afs_lockedvnodeops.vop_lookup(a, b, c, d, e, f, g);
1516 AFS_GUNLOCK();
1517 return rv;
1520 #ifdef AFS_SGI64_ENV
1522 mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, int d, int e,
1523 vnode_t ** f, struct cred *g)
1524 #else
1526 mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, enum vcexcl d,
1527 int e, vnode_t ** f, struct cred *g)
1528 #endif
1530 int rv;
1531 AFS_GLOCK();
1532 rv = afs_lockedvnodeops.vop_create(a, b, c, d, e, f, g);
1533 AFS_GUNLOCK();
1534 return rv;
1538 mp_afs_remove(AFS_MP_VC_ARG(*a), char *b, struct cred *c)
1540 int rv;
1541 AFS_GLOCK();
1542 rv = afs_lockedvnodeops.vop_remove(a, b, c);
1543 AFS_GUNLOCK();
1544 return rv;
1548 mp_afs_link(AFS_MP_VC_ARG(*a), vnode_t * b, char *c, struct cred *d)
1550 int rv;
1551 AFS_GLOCK();
1552 rv = afs_lockedvnodeops.vop_link(a, b, c, d);
1553 AFS_GUNLOCK();
1554 return rv;
1558 mp_afs_rename(AFS_MP_VC_ARG(*a), char *b, vnode_t * c, char *d,
1559 struct pathname *e, struct cred *f)
1561 int rv;
1562 AFS_GLOCK();
1563 rv = afs_lockedvnodeops.vop_rename(a, b, c, d, e, f);
1564 AFS_GUNLOCK();
1565 return rv;
1569 mp_afs_mkdir(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, vnode_t ** d,
1570 struct cred *e)
1572 int rv;
1573 AFS_GLOCK();
1574 rv = afs_lockedvnodeops.vop_mkdir(a, b, c, d, e);
1575 AFS_GUNLOCK();
1576 return rv;
1580 mp_afs_rmdir(AFS_MP_VC_ARG(*a), char *b, vnode_t * c, struct cred *d)
1582 int rv;
1583 AFS_GLOCK();
1584 rv = afs_lockedvnodeops.vop_rmdir(a, b, c, d);
1585 AFS_GUNLOCK();
1586 return rv;
1590 mp_afs_readdir(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c, int *d)
1592 int rv;
1593 AFS_GLOCK();
1594 rv = afs_lockedvnodeops.vop_readdir(a, b, c, d);
1595 AFS_GUNLOCK();
1596 return rv;
1600 mp_afs_symlink(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, char *d,
1601 struct cred *e)
1603 int rv;
1604 AFS_GLOCK();
1605 rv = afs_lockedvnodeops.vop_symlink(a, b, c, d, e);
1606 AFS_GUNLOCK();
1607 return rv;
1611 mp_afs_readlink(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c)
1613 int rv;
1614 AFS_GLOCK();
1615 rv = afs_lockedvnodeops.vop_readlink(a, b, c);
1616 AFS_GUNLOCK();
1617 return rv;
1621 mp_afs_fsync(AFS_MP_VC_ARG(*a), int b, struct cred *c
1622 #ifdef AFS_SGI65_ENV
1623 , off_t start, off_t stop
1624 #endif
1627 int rv;
1628 AFS_GLOCK();
1629 rv = afs_lockedvnodeops.vop_fsync(a, b, c
1630 #ifdef AFS_SGI65_ENV
1631 , start, stop
1632 #endif
1634 AFS_GUNLOCK();
1635 return rv;
1638 void
1639 mp_afs_inactive(AFS_MP_VC_ARG(*a), struct cred *b)
1641 AFS_GLOCK();
1642 afs_lockedvnodeops.vop_inactive(a, b);
1643 AFS_GUNLOCK();
1644 return;
1648 mp_afs_fid(AFS_MP_VC_ARG(*a), struct fid **b)
1650 int rv;
1651 AFS_GLOCK();
1652 rv = afs_lockedvnodeops.vop_fid(a, b);
1653 AFS_GUNLOCK();
1654 return rv;
1658 mp_afs_fid2(AFS_MP_VC_ARG(*a), struct fid *b)
1660 int rv;
1661 AFS_GLOCK();
1662 rv = afs_lockedvnodeops.vop_fid2(a, b);
1663 AFS_GUNLOCK();
1664 return rv;
1667 void
1668 mp_afs_rwlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
1670 AFS_GLOCK();
1671 afs_rwlock(a, VRWLOCK_WRITE);
1672 AFS_GUNLOCK();
1675 void
1676 mp_afs_rwunlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
1678 AFS_GLOCK();
1679 afs_rwunlock(a, VRWLOCK_WRITE);
1680 AFS_GUNLOCK();
1684 mp_afs_seek(AFS_MP_VC_ARG(*a), off_t b, off_t * c)
1686 int rv;
1687 AFS_GLOCK();
1688 rv = afs_lockedvnodeops.vop_seek(a, b, c);
1689 AFS_GUNLOCK();
1690 return rv;
1694 mp_fs_cmp(AFS_MP_VC_ARG(*a), vnode_t * b)
1696 int rv;
1697 AFS_GLOCK();
1698 rv = afs_lockedvnodeops.vop_cmp(a, b);
1699 AFS_GUNLOCK();
1700 return rv;
1704 mp_afs_frlock(AFS_MP_VC_ARG(*a), int b, struct flock *c, int d, off_t e,
1705 #ifdef AFS_SGI65_ENV
1706 vrwlock_t vrwlock,
1707 #endif
1708 struct cred *f)
1710 int rv;
1711 AFS_GLOCK();
1712 rv = afs_lockedvnodeops.vop_frlock(a, b, c, d, e,
1713 #ifdef AFS_SGI65_ENV
1714 vrwlock,
1715 #endif
1717 AFS_GUNLOCK();
1718 return rv;
1722 mp_afs_realvp(AFS_MP_VC_ARG(*a), vnode_t ** b)
1724 int rv;
1725 AFS_GLOCK();
1726 rv = afs_lockedvnodeops.vop_realvp(a, b);
1727 AFS_GUNLOCK();
1728 return rv;
1732 mp_afs_bmap(AFS_MP_VC_ARG(*a), off_t b, ssize_t c, int d, struct cred *e,
1733 struct bmapval *f, int *g)
1735 int rv;
1736 AFS_GLOCK();
1737 rv = afs_lockedvnodeops.vop_bmap(a, b, c, d, e, f, g);
1738 AFS_GUNLOCK();
1739 return rv;
1742 void
1743 mp_afs_strategy(AFS_MP_VC_ARG(*a), struct buf *b)
1745 int rv;
1746 AFS_GLOCK();
1747 afs_lockedvnodeops.vop_strategy(a, b);
1748 AFS_GUNLOCK();
1749 return;
1752 #ifdef AFS_SGI65_ENV
1754 mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, size_t c, mprot_t d, u_int e,
1755 struct cred *f, vnode_t ** g)
1756 #else
1758 mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, char **d, size_t e,
1759 u_int f, u_int g, u_int h, struct cred *i)
1760 #endif
1762 int rv;
1763 AFS_GLOCK();
1764 rv = afs_lockedvnodeops.vop_map(a, b, c, d, e, f, g
1765 #ifndef AFS_SGI65_ENV
1766 , h, i
1767 #endif
1769 AFS_GUNLOCK();
1770 return rv;
1774 #ifndef AFS_SGI65_ENV
1775 /* As of Irix 6.5, addmap and delmap are only for devices */
1777 mp_afs_addmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
1778 size_t e, u_int f, u_int g, u_int h, struct cred *i)
1780 int rv;
1781 AFS_GLOCK();
1782 rv = afs_lockedvnodeops.vop_addmap(a, b, c, d, e, f, g, h, i);
1783 AFS_GUNLOCK();
1784 return rv;
1788 mp_afs_delmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
1789 size_t e, u_int f, u_int g, u_int h, struct cred *i)
1791 int rv;
1792 AFS_GLOCK();
1793 rv = afs_lockedvnodeops.vop_delmap(a, b, c, d, e, f, g, h, i);
1794 AFS_GUNLOCK();
1795 return rv;
1797 #endif /* ! AFS_SGI65_ENV */
1800 mp_fs_poll(AFS_MP_VC_ARG(*a), short b, int c, short *d, struct pollhead **e
1801 #ifdef AFS_SGI65_ENV
1802 , unsigned int *f
1803 #endif
1806 int rv;
1807 AFS_GLOCK();
1808 rv = afs_lockedvnodeops.vop_poll(a, b, c, d, e
1809 #ifdef AFS_SGI65_ENV
1811 #endif
1813 AFS_GUNLOCK();
1814 return rv;
1818 struct vnodeops Afs_vnodeops = {
1819 #ifdef AFS_SGI64_ENV
1820 #ifdef AFS_SGI65_ENV
1821 BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
1822 #else
1823 VNODE_POSITION_BASE,
1824 #endif
1825 #endif
1826 mp_afs_open,
1827 mp_afs_close,
1828 mp_afs_read,
1829 mp_afs_write,
1830 mp_afs_ioctl,
1831 mp_fs_setfl,
1832 mp_afs_getattr,
1833 mp_afs_setattr,
1834 mp_afs_access,
1835 mp_afs_lookup,
1836 mp_afs_create,
1837 mp_afs_remove,
1838 mp_afs_link,
1839 mp_afs_rename,
1840 mp_afs_mkdir,
1841 mp_afs_rmdir,
1842 mp_afs_readdir,
1843 mp_afs_symlink,
1844 mp_afs_readlink,
1845 mp_afs_fsync,
1846 mp_afs_inactive,
1847 mp_afs_fid,
1848 mp_afs_fid2,
1849 mp_afs_rwlock,
1850 mp_afs_rwunlock,
1851 mp_afs_seek,
1852 mp_fs_cmp,
1853 mp_afs_frlock,
1854 fs_nosys, /* realvp */
1855 mp_afs_bmap,
1856 mp_afs_strategy,
1857 mp_afs_map,
1858 #ifdef AFS_SGI65_ENV
1859 fs_noerr, /* addmap - devices only */
1860 fs_noerr, /* delmap - devices only */
1861 #else
1862 mp_afs_addmap,
1863 mp_afs_delmap,
1864 #endif
1865 mp_fs_poll, /* poll */
1866 fs_nosys, /* dump */
1867 fs_pathconf,
1868 fs_nosys, /* allocstore */
1869 fs_nosys, /* fcntl */
1870 afs_reclaim, /* reclaim */
1871 fs_nosys, /* attr_get */
1872 fs_nosys, /* attr_set */
1873 fs_nosys, /* attr_remove */
1874 fs_nosys, /* attr_list */
1875 #ifdef AFS_SGI64_ENV
1876 #ifdef AFS_SGI65_ENV
1877 fs_cover,
1878 (vop_link_removed_t) fs_noval,
1879 fs_vnode_change,
1880 fs_tosspages,
1881 fs_flushinval_pages,
1882 fs_flush_pages,
1883 fs_invalfree_pages,
1884 fs_pages_sethole,
1885 (vop_commit_t) fs_nosys,
1886 (vop_readbuf_t) fs_nosys,
1887 fs_strgetmsg,
1888 fs_strputmsg,
1889 #else
1890 fs_mount,
1891 #endif
1892 #endif
1894 struct vnodeops *afs_ops = &Afs_vnodeops;
1895 #endif /* MP */
1898 /* Support for XFS caches. The assumption here is that the size of
1899 * a cache file also does not exceed 32 bits.
1902 /* Initialized in osi_InitCacheFSType(). Used to determine inode type. */
1903 vnodeops_t *afs_xfs_vnodeopsp;
1905 extern afs_lock_t afs_xosi; /* lock is for tvattr */
1907 ino_t
1908 VnodeToIno(vnode_t * vp)
1910 int code;
1911 struct vattr vattr;
1913 ObtainWriteLock(&afs_xosi, 579);
1914 vattr.va_mask = AT_FSID | AT_NODEID; /* quick return using this mask. */
1915 AFS_GUNLOCK();
1916 AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
1917 AFS_GLOCK();
1918 if (code) {
1919 osi_Panic("VnodeToIno");
1921 ReleaseWriteLock(&afs_xosi);
1922 return vattr.va_nodeid;
1925 dev_t
1926 VnodeToDev(vnode_t * vp)
1928 int code;
1929 struct vattr vattr;
1931 ObtainWriteLock(&afs_xosi, 580);
1932 vattr.va_mask = AT_FSID | AT_NODEID; /* quick return using this mask. */
1933 AFS_GUNLOCK();
1934 AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
1935 AFS_GLOCK();
1936 if (code) {
1937 osi_Panic("VnodeToDev");
1939 ReleaseWriteLock(&afs_xosi);
1940 return (dev_t) vattr.va_fsid;
1943 off_t
1944 VnodeToSize(vnode_t * vp)
1946 int code;
1947 struct vattr vattr;
1949 ObtainWriteLock(&afs_xosi, 581);
1950 vattr.va_mask = AT_SIZE;
1951 AFS_GUNLOCK();
1952 AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
1953 AFS_GLOCK();
1954 if (code) {
1955 osi_Panic("VnodeToSize");
1957 ReleaseWriteLock(&afs_xosi);
1958 return vattr.va_size;
1960 #endif /* AFS_SGI62_ENV */