Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / coda / coda_vnops.c
blob800e819586cc4cda49c41f2216579f07c6fcc873
1 /* $NetBSD: coda_vnops.c,v 1.70 2009/06/29 05:08:15 dholland Exp $ */
3 /*
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.70 2009/06/29 05:08:15 dholland Exp $");
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
66 #include <miscfs/genfs/genfs.h>
68 #include <coda/coda.h>
69 #include <coda/cnode.h>
70 #include <coda/coda_vnops.h>
71 #include <coda/coda_venus.h>
72 #include <coda/coda_opstats.h>
73 #include <coda/coda_subr.h>
74 #include <coda/coda_namecache.h>
75 #include <coda/coda_pioctl.h>
78 * These flags select various performance enhancements.
80 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
81 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
82 int coda_access_cache = 1; /* Set to handle some access checks directly */
84 /* structure to keep track of vfs calls */
86 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
89 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
90 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
91 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93 /* What we are delaying for in printf */
94 int coda_printf_delay = 0; /* in microseconds */
95 int coda_vnop_print_entry = 0;
96 static int coda_lockdebug = 0;
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
100 /* Definition of the vnode operation vector */
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103 { &vop_default_desc, coda_vop_error },
104 { &vop_lookup_desc, coda_lookup }, /* lookup */
105 { &vop_create_desc, coda_create }, /* create */
106 { &vop_mknod_desc, coda_vop_error }, /* mknod */
107 { &vop_open_desc, coda_open }, /* open */
108 { &vop_close_desc, coda_close }, /* close */
109 { &vop_access_desc, coda_access }, /* access */
110 { &vop_getattr_desc, coda_getattr }, /* getattr */
111 { &vop_setattr_desc, coda_setattr }, /* setattr */
112 { &vop_read_desc, coda_read }, /* read */
113 { &vop_write_desc, coda_write }, /* write */
114 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
115 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
116 { &vop_mmap_desc, genfs_mmap }, /* mmap */
117 { &vop_fsync_desc, coda_fsync }, /* fsync */
118 { &vop_remove_desc, coda_remove }, /* remove */
119 { &vop_link_desc, coda_link }, /* link */
120 { &vop_rename_desc, coda_rename }, /* rename */
121 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
122 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
123 { &vop_symlink_desc, coda_symlink }, /* symlink */
124 { &vop_readdir_desc, coda_readdir }, /* readdir */
125 { &vop_readlink_desc, coda_readlink }, /* readlink */
126 { &vop_abortop_desc, coda_abortop }, /* abortop */
127 { &vop_inactive_desc, coda_inactive }, /* inactive */
128 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
129 { &vop_lock_desc, coda_lock }, /* lock */
130 { &vop_unlock_desc, coda_unlock }, /* unlock */
131 { &vop_bmap_desc, coda_bmap }, /* bmap */
132 { &vop_strategy_desc, coda_strategy }, /* strategy */
133 { &vop_print_desc, coda_vop_error }, /* print */
134 { &vop_islocked_desc, coda_islocked }, /* islocked */
135 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
136 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
137 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
138 { &vop_seek_desc, genfs_seek }, /* seek */
139 { &vop_poll_desc, genfs_poll }, /* poll */
140 { &vop_getpages_desc, coda_getpages }, /* getpages */
141 { &vop_putpages_desc, coda_putpages }, /* putpages */
142 { NULL, NULL }
145 const struct vnodeopv_desc coda_vnodeop_opv_desc =
146 { &coda_vnodeop_p, coda_vnodeop_entries };
148 /* Definitions of NetBSD vnodeop interfaces */
151 * A generic error routine. Return EIO without looking at arguments.
154 coda_vop_error(void *anon) {
155 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157 if (codadebug) {
158 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
159 (*desc)->vdesc_name));
162 return EIO;
165 /* A generic do-nothing. */
167 coda_vop_nop(void *anon) {
168 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170 if (codadebug) {
171 myprintf(("Vnode operation %s called, but unsupported\n",
172 (*desc)->vdesc_name));
174 return (0);
178 coda_vnodeopstats_init(void)
180 int i;
182 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
183 coda_vnodeopstats[i].opcode = i;
184 coda_vnodeopstats[i].entries = 0;
185 coda_vnodeopstats[i].sat_intrn = 0;
186 coda_vnodeopstats[i].unsat_intrn = 0;
187 coda_vnodeopstats[i].gen_intrn = 0;
190 return 0;
194 * XXX The entire relationship between VOP_OPEN and having a container
195 * file (via venus_open) needs to be reexamined. In particular, it's
196 * valid to open/mmap/close and then reference. Instead of doing
197 * VOP_OPEN when getpages needs a container, we should do the
198 * venus_open part, and record that the vnode has opened the container
199 * for getpages, and do the matching logical close on coda_inactive.
200 * Further, coda_rdwr needs a container file, and sometimes needs to
201 * do the equivalent of open (core dumps).
204 * coda_open calls Venus to return the device and inode of the
205 * container file, and then obtains a vnode for that file. The
206 * container vnode is stored in the coda vnode, and a reference is
207 * added for each open file.
210 coda_open(void *v)
213 * NetBSD can pass the O_EXCL flag in mode, even though the check
214 * has already happened. Venus defensively assumes that if open
215 * is passed the EXCL, it must be a bug. We strip the flag here.
217 /* true args */
218 struct vop_open_args *ap = v;
219 struct vnode *vp = ap->a_vp;
220 struct cnode *cp = VTOC(vp);
221 int flag = ap->a_mode & (~O_EXCL);
222 kauth_cred_t cred = ap->a_cred;
223 /* locals */
224 int error;
225 dev_t dev; /* container file device, inode, vnode */
226 ino_t inode;
227 struct vnode *container_vp;
229 MARK_ENTRY(CODA_OPEN_STATS);
231 /* Check for open of control file. */
232 if (IS_CTL_VP(vp)) {
233 /* if (WRITABLE(flag)) */
234 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
235 MARK_INT_FAIL(CODA_OPEN_STATS);
236 return(EACCES);
238 MARK_INT_SAT(CODA_OPEN_STATS);
239 return(0);
242 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
243 if (error)
244 return (error);
245 if (!error) {
246 CODADEBUG(CODA_OPEN,
247 myprintf(("open: dev 0x%llx inode %llu result %d\n",
248 (unsigned long long)dev, (unsigned long long)inode, error));)
252 * Obtain locked and referenced container vnode from container
253 * device/inode.
255 error = coda_grab_vnode(dev, inode, &container_vp);
256 if (error)
257 return (error);
259 /* Save the vnode pointer for the container file. */
260 if (cp->c_ovp == NULL) {
261 cp->c_ovp = container_vp;
262 } else {
263 if (cp->c_ovp != container_vp)
265 * Perhaps venus returned a different container, or
266 * something else went wrong.
268 panic("coda_open: cp->c_ovp != container_vp");
270 cp->c_ocount++;
272 /* Flush the attribute cache if writing the file. */
273 if (flag & FWRITE) {
274 cp->c_owrite++;
275 cp->c_flags &= ~C_VATTR;
279 * Save the <device, inode> pair for the container file to speed
280 * up subsequent reads while closed (mmap, program execution).
281 * This is perhaps safe because venus will invalidate the node
282 * before changing the container file mapping.
284 cp->c_device = dev;
285 cp->c_inode = inode;
287 /* Open the container file. */
288 error = VOP_OPEN(container_vp, flag, cred);
290 * Drop the lock on the container, after we have done VOP_OPEN
291 * (which requires a locked vnode).
293 VOP_UNLOCK(container_vp, 0);
294 return(error);
298 * Close the cache file used for I/O and notify Venus.
301 coda_close(void *v)
303 /* true args */
304 struct vop_close_args *ap = v;
305 struct vnode *vp = ap->a_vp;
306 struct cnode *cp = VTOC(vp);
307 int flag = ap->a_fflag;
308 kauth_cred_t cred = ap->a_cred;
309 /* locals */
310 int error;
312 MARK_ENTRY(CODA_CLOSE_STATS);
314 /* Check for close of control file. */
315 if (IS_CTL_VP(vp)) {
316 MARK_INT_SAT(CODA_CLOSE_STATS);
317 return(0);
321 * XXX The IS_UNMOUNTING part of this is very suspect.
323 if (IS_UNMOUNTING(cp)) {
324 if (cp->c_ovp) {
325 #ifdef CODA_VERBOSE
326 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
327 vp->v_usecount, cp->c_ovp, vp, cp);
328 #endif
329 #ifdef hmm
330 vgone(cp->c_ovp);
331 #else
332 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
333 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
334 vput(cp->c_ovp);
335 #endif
336 } else {
337 #ifdef CODA_VERBOSE
338 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
339 #endif
341 return ENODEV;
344 /* Lock the container node, and VOP_CLOSE it. */
345 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
346 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
348 * Drop the lock we just obtained, and vrele the container vnode.
349 * Decrement reference counts, and clear container vnode pointer on
350 * last close.
352 vput(cp->c_ovp);
353 if (flag & FWRITE)
354 --cp->c_owrite;
355 if (--cp->c_ocount == 0)
356 cp->c_ovp = NULL;
358 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
360 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
361 return(error);
365 coda_read(void *v)
367 struct vop_read_args *ap = v;
369 ENTRY;
370 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
371 ap->a_ioflag, ap->a_cred, curlwp));
375 coda_write(void *v)
377 struct vop_write_args *ap = v;
379 ENTRY;
380 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
381 ap->a_ioflag, ap->a_cred, curlwp));
385 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
386 kauth_cred_t cred, struct lwp *l)
388 /* upcall decl */
389 /* NOTE: container file operation!!! */
390 /* locals */
391 struct cnode *cp = VTOC(vp);
392 struct vnode *cfvp = cp->c_ovp;
393 struct proc *p = l->l_proc;
394 int opened_internally = 0;
395 int error = 0;
397 MARK_ENTRY(CODA_RDWR_STATS);
399 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
400 uiop->uio_iov->iov_base,
401 (unsigned long) uiop->uio_resid,
402 (long long) uiop->uio_offset)); )
404 /* Check for rdwr of control object. */
405 if (IS_CTL_VP(vp)) {
406 MARK_INT_FAIL(CODA_RDWR_STATS);
407 return(EINVAL);
410 /* Redirect the request to UFS. */
413 * If file is not already open this must be a page
414 * {read,write} request. Iget the cache file's inode
415 * pointer if we still have its <device, inode> pair.
416 * Otherwise, we must do an internal open to derive the
417 * pair.
418 * XXX Integrate this into a coherent strategy for container
419 * file acquisition.
421 if (cfvp == NULL) {
423 * If we're dumping core, do the internal open. Otherwise
424 * venus won't have the correct size of the core when
425 * it's completely written.
427 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
428 printf("coda_rdwr: grabbing container vnode, losing reference\n");
429 /* Get locked and refed vnode. */
430 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
431 if (error) {
432 MARK_INT_FAIL(CODA_RDWR_STATS);
433 return(error);
436 * Drop lock.
437 * XXX Where is reference released.
439 VOP_UNLOCK(cfvp, 0);
441 else {
442 printf("coda_rdwr: internal VOP_OPEN\n");
443 opened_internally = 1;
444 MARK_INT_GEN(CODA_OPEN_STATS);
445 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
446 #ifdef CODA_VERBOSE
447 printf("coda_rdwr: Internally Opening %p\n", vp);
448 #endif
449 if (error) {
450 MARK_INT_FAIL(CODA_RDWR_STATS);
451 return(error);
453 cfvp = cp->c_ovp;
457 /* Have UFS handle the call. */
458 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
459 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
461 if (rw == UIO_READ) {
462 error = VOP_READ(cfvp, uiop, ioflag, cred);
463 } else {
464 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
467 if (error)
468 MARK_INT_FAIL(CODA_RDWR_STATS);
469 else
470 MARK_INT_SAT(CODA_RDWR_STATS);
472 /* Do an internal close if necessary. */
473 if (opened_internally) {
474 MARK_INT_GEN(CODA_CLOSE_STATS);
475 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
478 /* Invalidate cached attributes if writing. */
479 if (rw == UIO_WRITE)
480 cp->c_flags &= ~C_VATTR;
481 return(error);
485 coda_ioctl(void *v)
487 /* true args */
488 struct vop_ioctl_args *ap = v;
489 struct vnode *vp = ap->a_vp;
490 int com = ap->a_command;
491 void *data = ap->a_data;
492 int flag = ap->a_fflag;
493 kauth_cred_t cred = ap->a_cred;
494 /* locals */
495 int error;
496 struct vnode *tvp;
497 struct PioctlData *iap = (struct PioctlData *)data;
498 namei_simple_flags_t sflags;
500 MARK_ENTRY(CODA_IOCTL_STATS);
502 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
504 /* Don't check for operation on a dying object, for ctlvp it
505 shouldn't matter */
507 /* Must be control object to succeed. */
508 if (!IS_CTL_VP(vp)) {
509 MARK_INT_FAIL(CODA_IOCTL_STATS);
510 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
511 return (EOPNOTSUPP);
513 /* Look up the pathname. */
515 /* Should we use the name cache here? It would get it from
516 lookupname sooner or later anyway, right? */
518 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
519 error = namei_simple_user(iap->path, sflags, &tvp);
521 if (error) {
522 MARK_INT_FAIL(CODA_IOCTL_STATS);
523 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
524 error));)
525 return(error);
529 * Make sure this is a coda style cnode, but it may be a
530 * different vfsp
532 /* XXX: this totally violates the comment about vtagtype in vnode.h */
533 if (tvp->v_tag != VT_CODA) {
534 vrele(tvp);
535 MARK_INT_FAIL(CODA_IOCTL_STATS);
536 CODADEBUG(CODA_IOCTL,
537 myprintf(("coda_ioctl error: %s not a coda object\n",
538 iap->path));)
539 return(EINVAL);
542 if (iap->vi.in_size > VC_MAXDATASIZE) {
543 vrele(tvp);
544 return(EINVAL);
546 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
547 cred, curlwp);
549 if (error)
550 MARK_INT_FAIL(CODA_IOCTL_STATS);
551 else
552 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
554 vrele(tvp);
555 return(error);
559 * To reduce the cost of a user-level venus;we cache attributes in
560 * the kernel. Each cnode has storage allocated for an attribute. If
561 * c_vattr is valid, return a reference to it. Otherwise, get the
562 * attributes from venus and store them in the cnode. There is some
563 * question if this method is a security leak. But I think that in
564 * order to make this call, the user must have done a lookup and
565 * opened the file, and therefore should already have access.
568 coda_getattr(void *v)
570 /* true args */
571 struct vop_getattr_args *ap = v;
572 struct vnode *vp = ap->a_vp;
573 struct cnode *cp = VTOC(vp);
574 struct vattr *vap = ap->a_vap;
575 kauth_cred_t cred = ap->a_cred;
576 /* locals */
577 int error;
579 MARK_ENTRY(CODA_GETATTR_STATS);
581 /* Check for getattr of control object. */
582 if (IS_CTL_VP(vp)) {
583 MARK_INT_FAIL(CODA_GETATTR_STATS);
584 return(ENOENT);
587 /* Check to see if the attributes have already been cached */
588 if (VALID_VATTR(cp)) {
589 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
590 coda_f2s(&cp->c_fid)));});
591 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
592 print_vattr(&cp->c_vattr); );
594 *vap = cp->c_vattr;
595 MARK_INT_SAT(CODA_GETATTR_STATS);
596 return(0);
599 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
601 if (!error) {
602 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
603 coda_f2s(&cp->c_fid), error)); )
605 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
606 print_vattr(vap); );
608 /* If not open for write, store attributes in cnode */
609 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
610 cp->c_vattr = *vap;
611 cp->c_flags |= C_VATTR;
615 return(error);
619 coda_setattr(void *v)
621 /* true args */
622 struct vop_setattr_args *ap = v;
623 struct vnode *vp = ap->a_vp;
624 struct cnode *cp = VTOC(vp);
625 struct vattr *vap = ap->a_vap;
626 kauth_cred_t cred = ap->a_cred;
627 /* locals */
628 int error;
630 MARK_ENTRY(CODA_SETATTR_STATS);
632 /* Check for setattr of control object. */
633 if (IS_CTL_VP(vp)) {
634 MARK_INT_FAIL(CODA_SETATTR_STATS);
635 return(ENOENT);
638 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
639 print_vattr(vap);
641 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
643 if (!error)
644 cp->c_flags &= ~C_VATTR;
646 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
647 return(error);
651 coda_access(void *v)
653 /* true args */
654 struct vop_access_args *ap = v;
655 struct vnode *vp = ap->a_vp;
656 struct cnode *cp = VTOC(vp);
657 int mode = ap->a_mode;
658 kauth_cred_t cred = ap->a_cred;
659 /* locals */
660 int error;
662 MARK_ENTRY(CODA_ACCESS_STATS);
664 /* Check for access of control object. Only read access is
665 allowed on it. */
666 if (IS_CTL_VP(vp)) {
667 /* bogus hack - all will be marked as successes */
668 MARK_INT_SAT(CODA_ACCESS_STATS);
669 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
670 ? 0 : EACCES);
674 * if the file is a directory, and we are checking exec (eg lookup)
675 * access, and the file is in the namecache, then the user must have
676 * lookup access to it.
678 if (coda_access_cache) {
679 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
680 if (coda_nc_lookup(cp, ".", 1, cred)) {
681 MARK_INT_SAT(CODA_ACCESS_STATS);
682 return(0); /* it was in the cache */
687 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
689 return(error);
693 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
694 * done. If a buffer has been saved in anticipation of a coda_create or
695 * a coda_remove, delete it.
697 /* ARGSUSED */
699 coda_abortop(void *v)
701 /* true args */
702 struct vop_abortop_args /* {
703 struct vnode *a_dvp;
704 struct componentname *a_cnp;
705 } */ *ap = v;
706 /* upcall decl */
707 /* locals */
709 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
710 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
711 return (0);
715 coda_readlink(void *v)
717 /* true args */
718 struct vop_readlink_args *ap = v;
719 struct vnode *vp = ap->a_vp;
720 struct cnode *cp = VTOC(vp);
721 struct uio *uiop = ap->a_uio;
722 kauth_cred_t cred = ap->a_cred;
723 /* locals */
724 struct lwp *l = curlwp;
725 int error;
726 char *str;
727 int len;
729 MARK_ENTRY(CODA_READLINK_STATS);
731 /* Check for readlink of control object. */
732 if (IS_CTL_VP(vp)) {
733 MARK_INT_FAIL(CODA_READLINK_STATS);
734 return(ENOENT);
737 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
738 uiop->uio_rw = UIO_READ;
739 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
740 if (error)
741 MARK_INT_FAIL(CODA_READLINK_STATS);
742 else
743 MARK_INT_SAT(CODA_READLINK_STATS);
744 return(error);
747 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
749 if (!error) {
750 uiop->uio_rw = UIO_READ;
751 error = uiomove(str, len, uiop);
753 if (coda_symlink_cache) {
754 cp->c_symlink = str;
755 cp->c_symlen = len;
756 cp->c_flags |= C_SYMLINK;
757 } else
758 CODA_FREE(str, len);
761 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
762 return(error);
766 coda_fsync(void *v)
768 /* true args */
769 struct vop_fsync_args *ap = v;
770 struct vnode *vp = ap->a_vp;
771 struct cnode *cp = VTOC(vp);
772 kauth_cred_t cred = ap->a_cred;
773 /* locals */
774 struct vnode *convp = cp->c_ovp;
775 int error;
777 MARK_ENTRY(CODA_FSYNC_STATS);
779 /* Check for fsync on an unmounting object */
780 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
781 * after an unmount has been initiated. This is a Bad Thing,
782 * which we have to avoid. Not a legitimate failure for stats.
784 if (IS_UNMOUNTING(cp)) {
785 return(ENODEV);
788 /* Check for fsync of control object. */
789 if (IS_CTL_VP(vp)) {
790 MARK_INT_SAT(CODA_FSYNC_STATS);
791 return(0);
794 if (convp)
795 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
798 * We can expect fsync on any vnode at all if venus is pruging it.
799 * Venus can't very well answer the fsync request, now can it?
800 * Hopefully, it won't have to, because hopefully, venus preserves
801 * the (possibly untrue) invariant that it never purges an open
802 * vnode. Hopefully.
804 if (cp->c_flags & C_PURGING) {
805 return(0);
808 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
810 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
811 return(error);
815 * vp is locked on entry, and we must unlock it.
816 * XXX This routine is suspect and probably needs rewriting.
819 coda_inactive(void *v)
821 /* true args */
822 struct vop_inactive_args *ap = v;
823 struct vnode *vp = ap->a_vp;
824 struct cnode *cp = VTOC(vp);
825 kauth_cred_t cred __unused = NULL;
827 /* We don't need to send inactive to venus - DCS */
828 MARK_ENTRY(CODA_INACTIVE_STATS);
830 if (IS_CTL_VP(vp)) {
831 MARK_INT_SAT(CODA_INACTIVE_STATS);
832 return 0;
835 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
836 coda_f2s(&cp->c_fid), vp->v_mount));)
838 /* If an array has been allocated to hold the symlink, deallocate it */
839 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
840 if (cp->c_symlink == NULL)
841 panic("coda_inactive: null symlink pointer in cnode");
843 CODA_FREE(cp->c_symlink, cp->c_symlen);
844 cp->c_flags &= ~C_SYMLINK;
845 cp->c_symlen = 0;
848 /* Remove it from the table so it can't be found. */
849 coda_unsave(cp);
850 if (vp->v_mount->mnt_data == NULL) {
851 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
852 panic("badness in coda_inactive");
855 if (IS_UNMOUNTING(cp)) {
856 /* XXX Do we need to VOP_CLOSE container vnodes? */
857 if (vp->v_usecount > 0)
858 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
859 vp, vp->v_usecount);
860 if (cp->c_ovp != NULL)
861 printf("coda_inactive: %p ovp != NULL\n", vp);
862 VOP_UNLOCK(vp, 0);
863 } else {
864 /* Sanity checks that perhaps should be panic. */
865 if (vp->v_usecount) {
866 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
868 if (cp->c_ovp != NULL) {
869 printf("coda_inactive: %p ovp != NULL\n", vp);
871 VOP_UNLOCK(vp, 0);
872 *ap->a_recycle = true;
875 MARK_INT_SAT(CODA_INACTIVE_STATS);
876 return(0);
880 * Coda does not use the normal namecache, but a private version.
881 * Consider how to use the standard facility instead.
884 coda_lookup(void *v)
886 /* true args */
887 struct vop_lookup_args *ap = v;
888 /* (locked) vnode of dir in which to do lookup */
889 struct vnode *dvp = ap->a_dvp;
890 struct cnode *dcp = VTOC(dvp);
891 /* output variable for result */
892 struct vnode **vpp = ap->a_vpp;
893 /* name to lookup */
894 struct componentname *cnp = ap->a_cnp;
895 kauth_cred_t cred = cnp->cn_cred;
896 struct lwp *l = curlwp;
897 /* locals */
898 struct cnode *cp;
899 const char *nm = cnp->cn_nameptr;
900 int len = cnp->cn_namelen;
901 int flags = cnp->cn_flags;
902 int isdot;
903 CodaFid VFid;
904 int vtype;
905 int error = 0;
907 MARK_ENTRY(CODA_LOOKUP_STATS);
909 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
910 nm, coda_f2s(&dcp->c_fid))););
913 * XXX componentname flags in MODMASK are not handled at all
917 * The overall strategy is to switch on the lookup type and get a
918 * result vnode that is vref'd but not locked. Then, the code at
919 * exit: switches on ., .., and regular lookups and does the right
920 * locking.
923 /* Check for lookup of control object. */
924 if (IS_CTL_NAME(dvp, nm, len)) {
925 *vpp = coda_ctlvp;
926 vref(*vpp);
927 MARK_INT_SAT(CODA_LOOKUP_STATS);
928 goto exit;
931 /* Avoid trying to hand venus an unreasonably long name. */
932 if (len+1 > CODA_MAXNAMLEN) {
933 MARK_INT_FAIL(CODA_LOOKUP_STATS);
934 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
935 coda_f2s(&dcp->c_fid), nm)););
936 *vpp = (struct vnode *)0;
937 error = EINVAL;
938 goto exit;
942 * XXX Check for DOT lookups, and short circuit all the caches,
943 * just doing an extra vref. (venus guarantees that lookup of
944 * . returns self.)
946 isdot = (len == 1 && nm[0] == '.');
949 * Try to resolve the lookup in the minicache. If that fails, ask
950 * venus to do the lookup. XXX The interaction between vnode
951 * locking and any locking that coda does is not clear.
953 cp = coda_nc_lookup(dcp, nm, len, cred);
954 if (cp) {
955 *vpp = CTOV(cp);
956 vref(*vpp);
957 CODADEBUG(CODA_LOOKUP,
958 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
959 } else {
960 /* The name wasn't cached, so ask Venus. */
961 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
963 if (error) {
964 MARK_INT_FAIL(CODA_LOOKUP_STATS);
965 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
966 coda_f2s(&dcp->c_fid), nm, error));)
967 *vpp = (struct vnode *)0;
968 } else {
969 MARK_INT_SAT(CODA_LOOKUP_STATS);
970 CODADEBUG(CODA_LOOKUP,
971 myprintf(("lookup: %s type %o result %d\n",
972 coda_f2s(&VFid), vtype, error)); )
974 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
975 *vpp = CTOV(cp);
976 /* vpp is now vrefed. */
979 * Unless this vnode is marked CODA_NOCACHE, enter it into
980 * the coda name cache to avoid a future venus round-trip.
981 * XXX Interaction with componentname NOCACHE is unclear.
983 if (!(vtype & CODA_NOCACHE))
984 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
988 exit:
990 * If we are creating, and this was the last name to be looked up,
991 * and the error was ENOENT, then make the leaf NULL and return
992 * success.
993 * XXX Check against new lookup rules.
995 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
996 && (cnp->cn_flags & ISLASTCN)
997 && (error == ENOENT))
999 error = EJUSTRETURN;
1000 cnp->cn_flags |= SAVENAME;
1001 *ap->a_vpp = NULL;
1005 * If we are removing, and we are at the last element, and we
1006 * found it, then we need to keep the name around so that the
1007 * removal will go ahead as planned.
1008 * XXX Check against new lookup rules.
1010 if ((cnp->cn_nameiop == DELETE)
1011 && (cnp->cn_flags & ISLASTCN)
1012 && !error)
1014 cnp->cn_flags |= SAVENAME;
1018 * If the lookup succeeded, we must generally lock the returned
1019 * vnode. This could be a ., .., or normal lookup. See
1020 * vnodeops(9) for the details.
1023 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1024 * somehow, and remove LK_RETRY.
1026 if (!error || (error == EJUSTRETURN)) {
1027 /* Lookup has a value and it isn't "."? */
1028 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1029 if (flags & ISDOTDOT)
1030 /* ..: unlock parent */
1031 VOP_UNLOCK(dvp, 0);
1032 /* all but .: lock child */
1033 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1034 if (flags & ISDOTDOT)
1035 /* ..: relock parent */
1036 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1038 /* else .: leave dvp locked */
1039 } else {
1040 /* The lookup failed, so return NULL. Leave dvp locked. */
1041 *ap->a_vpp = NULL;
1043 return(error);
1046 /*ARGSUSED*/
1048 coda_create(void *v)
1050 /* true args */
1051 struct vop_create_args *ap = v;
1052 struct vnode *dvp = ap->a_dvp;
1053 struct cnode *dcp = VTOC(dvp);
1054 struct vattr *va = ap->a_vap;
1055 int exclusive = 1;
1056 int mode = ap->a_vap->va_mode;
1057 struct vnode **vpp = ap->a_vpp;
1058 struct componentname *cnp = ap->a_cnp;
1059 kauth_cred_t cred = cnp->cn_cred;
1060 struct lwp *l = curlwp;
1061 /* locals */
1062 int error;
1063 struct cnode *cp;
1064 const char *nm = cnp->cn_nameptr;
1065 int len = cnp->cn_namelen;
1066 CodaFid VFid;
1067 struct vattr attr;
1069 MARK_ENTRY(CODA_CREATE_STATS);
1071 /* All creates are exclusive XXX */
1072 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1074 /* Check for create of control object. */
1075 if (IS_CTL_NAME(dvp, nm, len)) {
1076 *vpp = (struct vnode *)0;
1077 MARK_INT_FAIL(CODA_CREATE_STATS);
1078 return(EACCES);
1081 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1083 if (!error) {
1086 * XXX Violation of venus/kernel invariants is a difficult case,
1087 * but venus should not be able to cause a panic.
1089 /* If this is an exclusive create, panic if the file already exists. */
1090 /* Venus should have detected the file and reported EEXIST. */
1092 if ((exclusive == 1) &&
1093 (coda_find(&VFid) != NULL))
1094 panic("cnode existed for newly created file!");
1096 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1097 *vpp = CTOV(cp);
1099 /* XXX vnodeops doesn't say this argument can be changed. */
1100 /* Update va to reflect the new attributes. */
1101 (*va) = attr;
1103 /* Update the attribute cache and mark it as valid */
1104 if (coda_attr_cache) {
1105 VTOC(*vpp)->c_vattr = attr;
1106 VTOC(*vpp)->c_flags |= C_VATTR;
1109 /* Invalidate parent's attr cache (modification time has changed). */
1110 VTOC(dvp)->c_flags &= ~C_VATTR;
1112 /* enter the new vnode in the Name Cache */
1113 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1115 CODADEBUG(CODA_CREATE,
1116 myprintf(("create: %s, result %d\n",
1117 coda_f2s(&VFid), error)); )
1118 } else {
1119 *vpp = (struct vnode *)0;
1120 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1124 * vnodeops(9) says that we must unlock the parent and lock the child.
1125 * XXX Should we lock the child first?
1127 vput(dvp);
1128 if (!error) {
1129 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1130 /* This should not happen; flags are for lookup only. */
1131 printf("coda_create: LOCKLEAF not set!\n");
1134 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1135 /* XXX Perhaps avoid this panic. */
1136 panic("coda_create: couldn't lock child");
1140 /* Per vnodeops(9), free name except on success and SAVESTART. */
1141 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1142 PNBUF_PUT(cnp->cn_pnbuf);
1144 return(error);
1148 coda_remove(void *v)
1150 /* true args */
1151 struct vop_remove_args *ap = v;
1152 struct vnode *dvp = ap->a_dvp;
1153 struct cnode *cp = VTOC(dvp);
1154 struct vnode *vp = ap->a_vp;
1155 struct componentname *cnp = ap->a_cnp;
1156 kauth_cred_t cred = cnp->cn_cred;
1157 struct lwp *l = curlwp;
1158 /* locals */
1159 int error;
1160 const char *nm = cnp->cn_nameptr;
1161 int len = cnp->cn_namelen;
1162 struct cnode *tp;
1164 MARK_ENTRY(CODA_REMOVE_STATS);
1166 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1167 nm, coda_f2s(&cp->c_fid))););
1169 /* Remove the file's entry from the CODA Name Cache */
1170 /* We're being conservative here, it might be that this person
1171 * doesn't really have sufficient access to delete the file
1172 * but we feel zapping the entry won't really hurt anyone -- dcs
1174 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1175 * exist, and one is removed, the link count on the other will be
1176 * off by 1. We could either invalidate the attrs if cached, or
1177 * fix them. I'll try to fix them. DCS 11/8/94
1179 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1180 if (tp) {
1181 if (VALID_VATTR(tp)) { /* If attrs are cached */
1182 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1183 tp->c_vattr.va_nlink--;
1187 coda_nc_zapfile(VTOC(dvp), nm, len);
1188 /* No need to flush it if it doesn't exist! */
1190 /* Invalidate the parent's attr cache, the modification time has changed */
1191 VTOC(dvp)->c_flags &= ~C_VATTR;
1193 /* Check for remove of control object. */
1194 if (IS_CTL_NAME(dvp, nm, len)) {
1195 MARK_INT_FAIL(CODA_REMOVE_STATS);
1196 return(ENOENT);
1199 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1201 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1204 * Unlock parent and child (avoiding double if ".").
1206 if (dvp == vp) {
1207 vrele(vp);
1208 } else {
1209 vput(vp);
1211 vput(dvp);
1213 return(error);
1217 * dvp is the directory where the link is to go, and is locked.
1218 * vp is the object to be linked to, and is unlocked.
1219 * At exit, we must unlock dvp, and vput dvp.
1222 coda_link(void *v)
1224 /* true args */
1225 struct vop_link_args *ap = v;
1226 struct vnode *vp = ap->a_vp;
1227 struct cnode *cp = VTOC(vp);
1228 struct vnode *dvp = ap->a_dvp;
1229 struct cnode *dcp = VTOC(dvp);
1230 struct componentname *cnp = ap->a_cnp;
1231 kauth_cred_t cred = cnp->cn_cred;
1232 struct lwp *l = curlwp;
1233 /* locals */
1234 int error;
1235 const char *nm = cnp->cn_nameptr;
1236 int len = cnp->cn_namelen;
1238 MARK_ENTRY(CODA_LINK_STATS);
1240 if (codadebug & CODADBGMSK(CODA_LINK)) {
1242 myprintf(("nb_link: vp fid: %s\n",
1243 coda_f2s(&cp->c_fid)));
1244 myprintf(("nb_link: dvp fid: %s)\n",
1245 coda_f2s(&dcp->c_fid)));
1248 if (codadebug & CODADBGMSK(CODA_LINK)) {
1249 myprintf(("link: vp fid: %s\n",
1250 coda_f2s(&cp->c_fid)));
1251 myprintf(("link: dvp fid: %s\n",
1252 coda_f2s(&dcp->c_fid)));
1256 /* Check for link to/from control object. */
1257 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1258 MARK_INT_FAIL(CODA_LINK_STATS);
1259 return(EACCES);
1262 /* If linking . to a name, error out earlier. */
1263 if (vp == dvp) {
1264 printf("coda_link vp==dvp\n");
1265 error = EISDIR;
1266 goto exit;
1269 /* XXX Why does venus_link need the vnode to be locked?*/
1270 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1271 printf("coda_link: couldn't lock vnode %p\n", vp);
1272 error = EFAULT; /* XXX better value */
1273 goto exit;
1275 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1276 VOP_UNLOCK(vp, 0);
1278 /* Invalidate parent's attr cache (the modification time has changed). */
1279 VTOC(dvp)->c_flags &= ~C_VATTR;
1280 /* Invalidate child's attr cache (XXX why). */
1281 VTOC(vp)->c_flags &= ~C_VATTR;
1283 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1285 exit:
1286 vput(dvp);
1287 return(error);
1291 coda_rename(void *v)
1293 /* true args */
1294 struct vop_rename_args *ap = v;
1295 struct vnode *odvp = ap->a_fdvp;
1296 struct cnode *odcp = VTOC(odvp);
1297 struct componentname *fcnp = ap->a_fcnp;
1298 struct vnode *ndvp = ap->a_tdvp;
1299 struct cnode *ndcp = VTOC(ndvp);
1300 struct componentname *tcnp = ap->a_tcnp;
1301 kauth_cred_t cred = fcnp->cn_cred;
1302 struct lwp *l = curlwp;
1303 /* true args */
1304 int error;
1305 const char *fnm = fcnp->cn_nameptr;
1306 int flen = fcnp->cn_namelen;
1307 const char *tnm = tcnp->cn_nameptr;
1308 int tlen = tcnp->cn_namelen;
1310 MARK_ENTRY(CODA_RENAME_STATS);
1312 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1313 This could be Bad. XXX */
1314 #ifdef OLD_DIAGNOSTIC
1315 if ((fcnp->cn_cred != tcnp->cn_cred)
1316 || (fcnp->cn_lwp != tcnp->cn_lwp))
1318 panic("coda_rename: component names don't agree");
1320 #endif
1322 /* Check for rename involving control object. */
1323 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1324 MARK_INT_FAIL(CODA_RENAME_STATS);
1325 return(EACCES);
1328 /* Problem with moving directories -- need to flush entry for .. */
1329 if (odvp != ndvp) {
1330 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1331 if (ovcp) {
1332 struct vnode *ovp = CTOV(ovcp);
1333 if ((ovp) &&
1334 (ovp->v_type == VDIR)) /* If it's a directory */
1335 coda_nc_zapfile(VTOC(ovp),"..", 2);
1339 /* Remove the entries for both source and target files */
1340 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1341 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1343 /* Invalidate the parent's attr cache, the modification time has changed */
1344 VTOC(odvp)->c_flags &= ~C_VATTR;
1345 VTOC(ndvp)->c_flags &= ~C_VATTR;
1347 if (flen+1 > CODA_MAXNAMLEN) {
1348 MARK_INT_FAIL(CODA_RENAME_STATS);
1349 error = EINVAL;
1350 goto exit;
1353 if (tlen+1 > CODA_MAXNAMLEN) {
1354 MARK_INT_FAIL(CODA_RENAME_STATS);
1355 error = EINVAL;
1356 goto exit;
1359 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1361 exit:
1362 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1363 /* XXX - do we need to call cache pureg on the moved vnode? */
1364 cache_purge(ap->a_fvp);
1366 /* It seems to be incumbent on us to drop locks on all four vnodes */
1367 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1369 vrele(ap->a_fvp);
1370 vrele(odvp);
1372 if (ap->a_tvp) {
1373 if (ap->a_tvp == ndvp) {
1374 vrele(ap->a_tvp);
1375 } else {
1376 vput(ap->a_tvp);
1380 vput(ndvp);
1381 return(error);
1385 coda_mkdir(void *v)
1387 /* true args */
1388 struct vop_mkdir_args *ap = v;
1389 struct vnode *dvp = ap->a_dvp;
1390 struct cnode *dcp = VTOC(dvp);
1391 struct componentname *cnp = ap->a_cnp;
1392 struct vattr *va = ap->a_vap;
1393 struct vnode **vpp = ap->a_vpp;
1394 kauth_cred_t cred = cnp->cn_cred;
1395 struct lwp *l = curlwp;
1396 /* locals */
1397 int error;
1398 const char *nm = cnp->cn_nameptr;
1399 int len = cnp->cn_namelen;
1400 struct cnode *cp;
1401 CodaFid VFid;
1402 struct vattr ova;
1404 MARK_ENTRY(CODA_MKDIR_STATS);
1406 /* Check for mkdir of target object. */
1407 if (IS_CTL_NAME(dvp, nm, len)) {
1408 *vpp = (struct vnode *)0;
1409 MARK_INT_FAIL(CODA_MKDIR_STATS);
1410 return(EACCES);
1413 if (len+1 > CODA_MAXNAMLEN) {
1414 *vpp = (struct vnode *)0;
1415 MARK_INT_FAIL(CODA_MKDIR_STATS);
1416 return(EACCES);
1419 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1421 if (!error) {
1422 if (coda_find(&VFid) != NULL)
1423 panic("cnode existed for newly created directory!");
1426 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1427 *vpp = CTOV(cp);
1429 /* enter the new vnode in the Name Cache */
1430 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1432 /* as a side effect, enter "." and ".." for the directory */
1433 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1434 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1436 if (coda_attr_cache) {
1437 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1438 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1441 /* Invalidate the parent's attr cache, the modification time has changed */
1442 VTOC(dvp)->c_flags &= ~C_VATTR;
1444 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1445 coda_f2s(&VFid), error)); )
1446 } else {
1447 *vpp = (struct vnode *)0;
1448 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1452 * Currently, all mkdirs explicitly vput their dvp's.
1453 * It also appears that we *must* lock the vpp, since
1454 * lockleaf isn't set, but someone down the road is going
1455 * to try to unlock the new directory.
1457 vput(dvp);
1458 if (!error) {
1459 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1460 panic("coda_mkdir: couldn't lock child");
1464 /* Per vnodeops(9), free name except on success and SAVESTART. */
1465 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1466 PNBUF_PUT(cnp->cn_pnbuf);
1468 return(error);
1472 coda_rmdir(void *v)
1474 /* true args */
1475 struct vop_rmdir_args *ap = v;
1476 struct vnode *dvp = ap->a_dvp;
1477 struct cnode *dcp = VTOC(dvp);
1478 struct vnode *vp = ap->a_vp;
1479 struct componentname *cnp = ap->a_cnp;
1480 kauth_cred_t cred = cnp->cn_cred;
1481 struct lwp *l = curlwp;
1482 /* true args */
1483 int error;
1484 const char *nm = cnp->cn_nameptr;
1485 int len = cnp->cn_namelen;
1486 struct cnode *cp;
1488 MARK_ENTRY(CODA_RMDIR_STATS);
1490 /* Check for rmdir of control object. */
1491 if (IS_CTL_NAME(dvp, nm, len)) {
1492 MARK_INT_FAIL(CODA_RMDIR_STATS);
1493 return(ENOENT);
1496 /* Can't remove . in self. */
1497 if (dvp == vp) {
1498 printf("coda_rmdir: dvp == vp\n");
1499 error = EINVAL;
1500 goto exit;
1504 * The caller may not have adequate permissions, and the venus
1505 * operation may fail, but it doesn't hurt from a correctness
1506 * viewpoint to invalidate cache entries.
1507 * XXX Why isn't this done after the venus_rmdir call?
1509 /* Look up child in name cache (by name, from parent). */
1510 cp = coda_nc_lookup(dcp, nm, len, cred);
1511 /* If found, remove all children of the child (., ..). */
1512 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1514 /* Remove child's own entry. */
1515 coda_nc_zapfile(dcp, nm, len);
1517 /* Invalidate parent's attr cache (the modification time has changed). */
1518 dcp->c_flags &= ~C_VATTR;
1520 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1522 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1524 exit:
1525 /* vput both vnodes */
1526 vput(dvp);
1527 if (dvp == vp) {
1528 vrele(vp);
1529 } else {
1530 vput(vp);
1533 return(error);
1537 coda_symlink(void *v)
1539 /* true args */
1540 struct vop_symlink_args *ap = v;
1541 struct vnode *dvp = ap->a_dvp;
1542 struct cnode *dcp = VTOC(dvp);
1543 /* a_vpp is used in place below */
1544 struct componentname *cnp = ap->a_cnp;
1545 struct vattr *tva = ap->a_vap;
1546 char *path = ap->a_target;
1547 kauth_cred_t cred = cnp->cn_cred;
1548 struct lwp *l = curlwp;
1549 /* locals */
1550 int error;
1551 u_long saved_cn_flags;
1552 const char *nm = cnp->cn_nameptr;
1553 int len = cnp->cn_namelen;
1554 int plen = strlen(path);
1557 * Here's the strategy for the moment: perform the symlink, then
1558 * do a lookup to grab the resulting vnode. I know this requires
1559 * two communications with Venus for a new sybolic link, but
1560 * that's the way the ball bounces. I don't yet want to change
1561 * the way the Mach symlink works. When Mach support is
1562 * deprecated, we should change symlink so that the common case
1563 * returns the resultant vnode in a vpp argument.
1566 MARK_ENTRY(CODA_SYMLINK_STATS);
1568 /* Check for symlink of control object. */
1569 if (IS_CTL_NAME(dvp, nm, len)) {
1570 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1571 error = EACCES;
1572 goto exit;
1575 if (plen+1 > CODA_MAXPATHLEN) {
1576 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1577 error = EINVAL;
1578 goto exit;
1581 if (len+1 > CODA_MAXNAMLEN) {
1582 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1583 error = EINVAL;
1584 goto exit;
1587 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1589 /* Invalidate the parent's attr cache (modification time has changed). */
1590 dcp->c_flags &= ~C_VATTR;
1592 if (!error) {
1594 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1595 * these are defined only for VOP_LOOKUP. We desire to reuse
1596 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1597 * stray flags passed to us. Such stray flags can occur because
1598 * sys_symlink makes a namei call and then reuses the
1599 * componentname structure.
1602 * XXX Arguably we should create our own componentname structure
1603 * and not reuse the one that was passed in.
1605 saved_cn_flags = cnp->cn_flags;
1606 cnp->cn_flags &= ~(MODMASK | OPMASK);
1607 cnp->cn_flags |= LOOKUP;
1608 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1609 cnp->cn_flags = saved_cn_flags;
1610 /* Either an error occurs, or ap->a_vpp is locked. */
1613 exit:
1614 /* unlock and deference parent */
1615 vput(dvp);
1617 /* Per vnodeops(9), free name except on success and SAVESTART. */
1618 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1619 PNBUF_PUT(cnp->cn_pnbuf);
1622 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1623 return(error);
1627 * Read directory entries.
1630 coda_readdir(void *v)
1632 /* true args */
1633 struct vop_readdir_args *ap = v;
1634 struct vnode *vp = ap->a_vp;
1635 struct cnode *cp = VTOC(vp);
1636 struct uio *uiop = ap->a_uio;
1637 kauth_cred_t cred = ap->a_cred;
1638 int *eofflag = ap->a_eofflag;
1639 off_t **cookies = ap->a_cookies;
1640 int *ncookies = ap->a_ncookies;
1641 /* upcall decl */
1642 /* locals */
1643 int error = 0;
1645 MARK_ENTRY(CODA_READDIR_STATS);
1647 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1649 /* Check for readdir of control object. */
1650 if (IS_CTL_VP(vp)) {
1651 MARK_INT_FAIL(CODA_READDIR_STATS);
1652 return(ENOENT);
1656 /* Redirect the request to UFS. */
1658 /* If directory is not already open do an "internal open" on it. */
1659 int opened_internally = 0;
1660 if (cp->c_ovp == NULL) {
1661 opened_internally = 1;
1662 MARK_INT_GEN(CODA_OPEN_STATS);
1663 error = VOP_OPEN(vp, FREAD, cred);
1664 #ifdef CODA_VERBOSE
1665 printf("coda_readdir: Internally Opening %p\n", vp);
1666 #endif
1667 if (error) return(error);
1668 } else
1669 vp = cp->c_ovp;
1671 /* Have UFS handle the call. */
1672 CODADEBUG(CODA_READDIR, myprintf((
1673 "indirect readdir: fid = %s, refcnt = %d\n",
1674 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1675 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1676 if (error)
1677 MARK_INT_FAIL(CODA_READDIR_STATS);
1678 else
1679 MARK_INT_SAT(CODA_READDIR_STATS);
1681 /* Do an "internal close" if necessary. */
1682 if (opened_internally) {
1683 MARK_INT_GEN(CODA_CLOSE_STATS);
1684 (void)VOP_CLOSE(vp, FREAD, cred);
1688 return(error);
1692 * Convert from file system blocks to device blocks
1695 coda_bmap(void *v)
1697 /* XXX on the global proc */
1698 /* true args */
1699 struct vop_bmap_args *ap = v;
1700 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1701 daddr_t bn __unused = ap->a_bn; /* fs block number */
1702 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1703 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1704 struct lwp *l __unused = curlwp;
1705 /* upcall decl */
1706 /* locals */
1708 *vpp = (struct vnode *)0;
1709 myprintf(("coda_bmap called!\n"));
1710 return(EINVAL);
1714 * I don't think the following two things are used anywhere, so I've
1715 * commented them out
1717 * struct buf *async_bufhead;
1718 * int async_daemon_count;
1721 coda_strategy(void *v)
1723 /* true args */
1724 struct vop_strategy_args *ap = v;
1725 struct buf *bp __unused = ap->a_bp;
1726 struct lwp *l __unused = curlwp;
1727 /* upcall decl */
1728 /* locals */
1730 myprintf(("coda_strategy called! "));
1731 return(EINVAL);
1735 coda_reclaim(void *v)
1737 /* true args */
1738 struct vop_reclaim_args *ap = v;
1739 struct vnode *vp = ap->a_vp;
1740 struct cnode *cp = VTOC(vp);
1741 /* upcall decl */
1742 /* locals */
1745 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1747 ENTRY;
1749 if (IS_UNMOUNTING(cp)) {
1750 #ifdef DEBUG
1751 if (VTOC(vp)->c_ovp) {
1752 if (IS_UNMOUNTING(cp))
1753 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1755 #endif
1756 } else {
1757 #ifdef OLD_DIAGNOSTIC
1758 if (vp->v_usecount != 0)
1759 print("coda_reclaim: pushing active %p\n", vp);
1760 if (VTOC(vp)->c_ovp) {
1761 panic("coda_reclaim: c_ovp not void");
1763 #endif
1765 cache_purge(vp);
1766 coda_free(VTOC(vp));
1767 SET_VTOC(vp) = NULL;
1768 return (0);
1772 coda_lock(void *v)
1774 /* true args */
1775 struct vop_lock_args *ap = v;
1776 struct vnode *vp = ap->a_vp;
1777 struct cnode *cp = VTOC(vp);
1778 int flags = ap->a_flags;
1779 /* upcall decl */
1780 /* locals */
1782 ENTRY;
1784 if (coda_lockdebug) {
1785 myprintf(("Attempting lock on %s\n",
1786 coda_f2s(&cp->c_fid)));
1789 if ((flags & LK_INTERLOCK) != 0) {
1790 mutex_exit(&vp->v_interlock);
1791 flags &= ~LK_INTERLOCK;
1794 return (vlockmgr(&vp->v_lock, flags));
1798 coda_unlock(void *v)
1800 /* true args */
1801 struct vop_unlock_args *ap = v;
1802 struct vnode *vp = ap->a_vp;
1803 struct cnode *cp = VTOC(vp);
1804 /* upcall decl */
1805 /* locals */
1807 ENTRY;
1808 if (coda_lockdebug) {
1809 myprintf(("Attempting unlock on %s\n",
1810 coda_f2s(&cp->c_fid)));
1813 return (vlockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE));
1817 coda_islocked(void *v)
1819 /* true args */
1820 struct vop_islocked_args *ap = v;
1821 ENTRY;
1823 return (vlockstatus(&ap->a_vp->v_lock));
1827 * Given a device and inode, obtain a locked vnode. One reference is
1828 * obtained and passed back to the caller.
1831 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1833 int error;
1834 struct mount *mp;
1836 /* Obtain mount point structure from device. */
1837 if (!(mp = devtomp(dev))) {
1838 myprintf(("coda_grab_vnode: devtomp(0x%llx) returns NULL\n",
1839 (unsigned long long)dev));
1840 return(ENXIO);
1844 * Obtain vnode from mount point and inode.
1845 * XXX VFS_VGET does not clearly define locked/referenced state of
1846 * returned vnode.
1848 error = VFS_VGET(mp, ino, vpp);
1849 if (error) {
1850 myprintf(("coda_grab_vnode: iget/vget(0x%llx, %llu) returns %p, err %d\n",
1851 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1852 return(ENOENT);
1854 return(0);
1857 void
1858 print_vattr(struct vattr *attr)
1860 const char *typestr;
1862 switch (attr->va_type) {
1863 case VNON:
1864 typestr = "VNON";
1865 break;
1866 case VREG:
1867 typestr = "VREG";
1868 break;
1869 case VDIR:
1870 typestr = "VDIR";
1871 break;
1872 case VBLK:
1873 typestr = "VBLK";
1874 break;
1875 case VCHR:
1876 typestr = "VCHR";
1877 break;
1878 case VLNK:
1879 typestr = "VLNK";
1880 break;
1881 case VSOCK:
1882 typestr = "VSCK";
1883 break;
1884 case VFIFO:
1885 typestr = "VFFO";
1886 break;
1887 case VBAD:
1888 typestr = "VBAD";
1889 break;
1890 default:
1891 typestr = "????";
1892 break;
1896 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1897 typestr, (int)attr->va_mode, (int)attr->va_uid,
1898 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1900 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1901 (int)attr->va_fileid, (int)attr->va_nlink,
1902 (int)attr->va_size,
1903 (int)attr->va_blocksize,(int)attr->va_bytes));
1904 myprintf((" gen %ld flags %ld vaflags %d\n",
1905 attr->va_gen, attr->va_flags, attr->va_vaflags));
1906 myprintf((" atime sec %d nsec %d\n",
1907 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1908 myprintf((" mtime sec %d nsec %d\n",
1909 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1910 myprintf((" ctime sec %d nsec %d\n",
1911 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1914 /* How to print a ucred */
1915 void
1916 print_cred(kauth_cred_t cred)
1919 uint16_t ngroups;
1920 int i;
1922 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1923 kauth_cred_geteuid(cred)));
1925 ngroups = kauth_cred_ngroups(cred);
1926 for (i=0; i < ngroups; i++)
1927 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1928 myprintf(("\n"));
1933 * Return a vnode for the given fid.
1934 * If no cnode exists for this fid create one and put it
1935 * in a table hashed by coda_f2i(). If the cnode for
1936 * this fid is already in the table return it (ref count is
1937 * incremented by coda_find. The cnode will be flushed from the
1938 * table when coda_inactive calls coda_unsave.
1940 struct cnode *
1941 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1943 struct cnode *cp;
1944 int err;
1946 if ((cp = coda_find(fid)) == NULL) {
1947 struct vnode *vp;
1949 cp = coda_alloc();
1950 cp->c_fid = *fid;
1952 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1953 if (err) {
1954 panic("coda: getnewvnode returned error %d", err);
1956 vp->v_data = cp;
1957 vp->v_type = type;
1958 cp->c_vnode = vp;
1959 uvm_vnp_setsize(vp, 0);
1960 coda_save(cp);
1962 } else {
1963 vref(CTOV(cp));
1966 return cp;
1970 * coda_getpages may be called on a vnode which has not been opened,
1971 * e.g. to fault in pages to execute a program. In that case, we must
1972 * open the file to get the container. The vnode may or may not be
1973 * locked, and we must leave it in the same state.
1974 * XXX The protocol requires v_uobj.vmobjlock to be
1975 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1978 coda_getpages(void *v)
1980 struct vop_getpages_args /* {
1981 struct vnode *a_vp;
1982 voff_t a_offset;
1983 struct vm_page **a_m;
1984 int *a_count;
1985 int a_centeridx;
1986 vm_prot_t a_access_type;
1987 int a_advice;
1988 int a_flags;
1989 } */ *ap = v;
1990 struct vnode *vp = ap->a_vp;
1991 struct cnode *cp = VTOC(vp);
1992 struct lwp *l = curlwp;
1993 kauth_cred_t cred = l->l_cred;
1994 int error, cerror;
1995 int waslocked; /* 1 if vnode lock was held on entry */
1996 int didopen = 0; /* 1 if we opened container file */
1999 * Handle a case that uvm_fault doesn't quite use yet.
2000 * See layer_vnops.c. for inspiration.
2002 if (ap->a_flags & PGO_LOCKED) {
2003 return EBUSY;
2006 /* Check for control object. */
2007 if (IS_CTL_VP(vp)) {
2008 printf("coda_getpages: control object %p\n", vp);
2009 mutex_exit(&vp->v_uobj.vmobjlock);
2010 return(EINVAL);
2014 * XXX It's really not ok to be releasing the lock we get,
2015 * because we could be overlapping with another call to
2016 * getpages and drop a lock they are relying on. We need to
2017 * figure out whether getpages ever is called holding the
2018 * lock, and if we should serialize getpages calls by some
2019 * mechanism.
2021 waslocked = VOP_ISLOCKED(vp);
2023 /* Drop the vmobject lock. */
2024 mutex_exit(&vp->v_uobj.vmobjlock);
2026 /* Get container file if not already present. */
2027 if (cp->c_ovp == NULL) {
2029 * VOP_OPEN requires a locked vnode. We must avoid
2030 * locking the vnode if it is already locked, and
2031 * leave it in the same state on exit.
2033 if (waslocked == 0) {
2034 cerror = vn_lock(vp, LK_EXCLUSIVE);
2035 if (cerror) {
2036 printf("coda_getpages: can't lock vnode %p\n",
2037 vp);
2038 return cerror;
2040 #if 0
2041 printf("coda_getpages: locked vnode %p\n", vp);
2042 #endif
2046 * Open file (causes upcall to venus).
2047 * XXX Perhaps we should not fully open the file, but
2048 * simply obtain a container file.
2050 /* XXX Is it ok to do this while holding the simplelock? */
2051 cerror = VOP_OPEN(vp, FREAD, cred);
2053 if (cerror) {
2054 printf("coda_getpages: cannot open vnode %p => %d\n",
2055 vp, cerror);
2056 if (waslocked == 0)
2057 VOP_UNLOCK(vp, 0);
2058 return cerror;
2061 #if 0
2062 printf("coda_getpages: opened vnode %p\n", vp);
2063 #endif
2064 didopen = 1;
2066 KASSERT(cp->c_ovp != NULL);
2068 /* Munge the arg structure to refer to the container vnode. */
2069 ap->a_vp = cp->c_ovp;
2071 /* Get the lock on the container vnode, and call getpages on it. */
2072 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2073 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2075 /* If we opened the vnode, we must close it. */
2076 if (didopen) {
2078 * VOP_CLOSE requires a locked vnode, but we are still
2079 * holding the lock (or riding a caller's lock).
2081 cerror = VOP_CLOSE(vp, FREAD, cred);
2082 if (cerror != 0)
2083 /* XXX How should we handle this? */
2084 printf("coda_getpages: closed vnode %p -> %d\n",
2085 vp, cerror);
2087 /* If we obtained a lock, drop it. */
2088 if (waslocked == 0)
2089 VOP_UNLOCK(vp, 0);
2092 return error;
2096 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2097 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2100 coda_putpages(void *v)
2102 struct vop_putpages_args /* {
2103 struct vnode *a_vp;
2104 voff_t a_offlo;
2105 voff_t a_offhi;
2106 int a_flags;
2107 } */ *ap = v;
2108 struct vnode *vp = ap->a_vp;
2109 struct cnode *cp = VTOC(vp);
2110 int error;
2112 /* Drop the vmobject lock. */
2113 mutex_exit(&vp->v_uobj.vmobjlock);
2115 /* Check for control object. */
2116 if (IS_CTL_VP(vp)) {
2117 printf("coda_putpages: control object %p\n", vp);
2118 return(EINVAL);
2122 * If container object is not present, then there are no pages
2123 * to put; just return without error. This happens all the
2124 * time, apparently during discard of a closed vnode (which
2125 * trivially can't have dirty pages).
2127 if (cp->c_ovp == NULL)
2128 return 0;
2130 /* Munge the arg structure to refer to the container vnode. */
2131 ap->a_vp = cp->c_ovp;
2133 /* Get the lock on the container vnode, and call putpages on it. */
2134 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2135 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2137 return error;