1 /* $NetBSD: union_vnops.c,v 1.33 2008/05/06 18:43:44 ad Exp $ */
4 * Copyright (c) 1992, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
38 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
40 * This code is derived from software contributed to Berkeley by
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)union_vnops.c 8.33 (Berkeley) 7/31/95
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.33 2008/05/06 18:43:44 ad Exp $");
77 #include <sys/param.h>
78 #include <sys/systm.h>
83 #include <sys/vnode.h>
84 #include <sys/mount.h>
85 #include <sys/namei.h>
86 #include <sys/malloc.h>
88 #include <sys/queue.h>
90 #include <sys/kauth.h>
92 #include <fs/union/union.h>
93 #include <miscfs/genfs/genfs.h>
95 int union_lookup(void *);
96 int union_create(void *);
97 int union_whiteout(void *);
98 int union_mknod(void *);
99 int union_open(void *);
100 int union_close(void *);
101 int union_access(void *);
102 int union_getattr(void *);
103 int union_setattr(void *);
104 int union_read(void *);
105 int union_write(void *);
106 int union_ioctl(void *);
107 int union_poll(void *);
108 int union_revoke(void *);
109 int union_mmap(void *);
110 int union_fsync(void *);
111 int union_seek(void *);
112 int union_remove(void *);
113 int union_link(void *);
114 int union_rename(void *);
115 int union_mkdir(void *);
116 int union_rmdir(void *);
117 int union_symlink(void *);
118 int union_readdir(void *);
119 int union_readlink(void *);
120 int union_abortop(void *);
121 int union_inactive(void *);
122 int union_reclaim(void *);
123 int union_lock(void *);
124 int union_unlock(void *);
125 int union_bmap(void *);
126 int union_print(void *);
127 int union_islocked(void *);
128 int union_pathconf(void *);
129 int union_advlock(void *);
130 int union_strategy(void *);
131 int union_getpages(void *);
132 int union_putpages(void *);
133 int union_kqfilter(void *);
135 static void union_fixup(struct union_node
*);
136 static int union_lookup1(struct vnode
*, struct vnode
**,
137 struct vnode
**, struct componentname
*);
141 * Global vfs data structures
143 int (**union_vnodeop_p
)(void *);
144 const struct vnodeopv_entry_desc union_vnodeop_entries
[] = {
145 { &vop_default_desc
, vn_default_error
},
146 { &vop_lookup_desc
, union_lookup
}, /* lookup */
147 { &vop_create_desc
, union_create
}, /* create */
148 { &vop_whiteout_desc
, union_whiteout
}, /* whiteout */
149 { &vop_mknod_desc
, union_mknod
}, /* mknod */
150 { &vop_open_desc
, union_open
}, /* open */
151 { &vop_close_desc
, union_close
}, /* close */
152 { &vop_access_desc
, union_access
}, /* access */
153 { &vop_getattr_desc
, union_getattr
}, /* getattr */
154 { &vop_setattr_desc
, union_setattr
}, /* setattr */
155 { &vop_read_desc
, union_read
}, /* read */
156 { &vop_write_desc
, union_write
}, /* write */
157 { &vop_ioctl_desc
, union_ioctl
}, /* ioctl */
158 { &vop_poll_desc
, union_poll
}, /* select */
159 { &vop_revoke_desc
, union_revoke
}, /* revoke */
160 { &vop_mmap_desc
, union_mmap
}, /* mmap */
161 { &vop_fsync_desc
, union_fsync
}, /* fsync */
162 { &vop_seek_desc
, union_seek
}, /* seek */
163 { &vop_remove_desc
, union_remove
}, /* remove */
164 { &vop_link_desc
, union_link
}, /* link */
165 { &vop_rename_desc
, union_rename
}, /* rename */
166 { &vop_mkdir_desc
, union_mkdir
}, /* mkdir */
167 { &vop_rmdir_desc
, union_rmdir
}, /* rmdir */
168 { &vop_symlink_desc
, union_symlink
}, /* symlink */
169 { &vop_readdir_desc
, union_readdir
}, /* readdir */
170 { &vop_readlink_desc
, union_readlink
}, /* readlink */
171 { &vop_abortop_desc
, union_abortop
}, /* abortop */
172 { &vop_inactive_desc
, union_inactive
}, /* inactive */
173 { &vop_reclaim_desc
, union_reclaim
}, /* reclaim */
174 { &vop_lock_desc
, union_lock
}, /* lock */
175 { &vop_unlock_desc
, union_unlock
}, /* unlock */
176 { &vop_bmap_desc
, union_bmap
}, /* bmap */
177 { &vop_strategy_desc
, union_strategy
}, /* strategy */
178 { &vop_print_desc
, union_print
}, /* print */
179 { &vop_islocked_desc
, union_islocked
}, /* islocked */
180 { &vop_pathconf_desc
, union_pathconf
}, /* pathconf */
181 { &vop_advlock_desc
, union_advlock
}, /* advlock */
182 { &vop_getpages_desc
, union_getpages
}, /* getpages */
183 { &vop_putpages_desc
, union_putpages
}, /* putpages */
184 { &vop_kqfilter_desc
, union_kqfilter
}, /* kqfilter */
186 { &vop_bwrite_desc
, union_bwrite
}, /* bwrite */
190 const struct vnodeopv_desc union_vnodeop_opv_desc
=
191 { &union_vnodeop_p
, union_vnodeop_entries
};
193 #define FIXUP(un) { \
194 if (((un)->un_flags & UN_ULOCK) == 0) { \
200 union_fixup(struct union_node
*un
)
203 vn_lock(un
->un_uppervp
, LK_EXCLUSIVE
| LK_RETRY
);
204 un
->un_flags
|= UN_ULOCK
;
208 union_lookup1(struct vnode
*udvp
, struct vnode
**dvpp
, struct vnode
**vpp
,
209 struct componentname
*cnp
)
219 * If stepping up the directory tree, check for going
220 * back across the mount point, in which case do what
221 * lookup would do by stepping back down the mount
224 if (cnp
->cn_flags
& ISDOTDOT
) {
225 while ((dvp
!= udvp
) && (dvp
->v_vflag
& VV_ROOT
)) {
227 * Don't do the NOCROSSMOUNT check
228 * at this level. By definition,
229 * union fs deals with namespaces, not
233 *dvpp
= dvp
= dvp
->v_mount
->mnt_vnodecovered
;
235 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
);
239 error
= VOP_LOOKUP(dvp
, &tdvp
, cnp
);
246 * Lastly check if the current node is a mount point in
247 * which case walk up the mount hierarchy making sure not to
248 * bump into the root of the mount tree (ie. dvp != udvp).
250 while (dvp
!= udvp
&& (dvp
->v_type
== VDIR
) &&
251 (mp
= dvp
->v_mountedhere
)) {
252 if (vfs_busy(mp
, NULL
))
255 error
= VFS_ROOT(mp
, &tdvp
);
256 vfs_unbusy(mp
, false, NULL
);
268 union_lookup(void *v
)
270 struct vop_lookup_args
/* {
271 struct vnodeop_desc *a_desc;
273 struct vnode **a_vpp;
274 struct componentname *a_cnp;
278 struct vnode
*uppervp
, *lowervp
;
279 struct vnode
*upperdvp
, *lowerdvp
;
280 struct vnode
*dvp
= ap
->a_dvp
;
281 struct union_node
*dun
= VTOUNION(dvp
);
282 struct componentname
*cnp
= ap
->a_cnp
;
283 struct union_mount
*um
= MOUNTTOUNIONMOUNT(dvp
->v_mount
);
284 kauth_cred_t saved_cred
= NULL
;
289 if (cnp
->cn_namelen
== 3 &&
290 cnp
->cn_nameptr
[2] == '.' &&
291 cnp
->cn_nameptr
[1] == '.' &&
292 cnp
->cn_nameptr
[0] == '.') {
293 dvp
= *ap
->a_vpp
= LOWERVP(ap
->a_dvp
);
297 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
);
302 if ((cnp
->cn_flags
& ISLASTCN
) &&
303 (dvp
->v_mount
->mnt_flag
& MNT_RDONLY
) &&
304 (cnp
->cn_nameiop
== DELETE
|| cnp
->cn_nameiop
== RENAME
))
307 upperdvp
= dun
->un_uppervp
;
308 lowerdvp
= dun
->un_lowervp
;
314 * do the lookup in the upper level.
315 * if that level comsumes additional pathnames,
316 * then assume that something special is going
317 * on and just return that vnode.
319 if (upperdvp
!= NULLVP
) {
322 * If we're doing `..' in the underlying filesystem,
323 * we must drop our lock on the union node before
324 * going up the tree in the lower file system--if we block
325 * on the lowervp lock, and that's held by someone else
326 * coming down the tree and who's waiting for our lock,
329 if (cnp
->cn_flags
& ISDOTDOT
) {
330 /* retain lock on underlying VP */
331 dun
->un_flags
|= UN_KLOCK
;
334 uerror
= union_lookup1(um
->um_uppervp
, &upperdvp
,
337 if (cnp
->cn_flags
& ISDOTDOT
) {
338 if (dun
->un_uppervp
== upperdvp
) {
340 * we got the underlying bugger back locked...
341 * now take back the union node lock. Since we
342 * hold the uppervp lock, we can diddle union
343 * locking flags at will. :)
345 dun
->un_flags
|= UN_ULOCK
;
348 * if upperdvp got swapped out, it means we did
349 * some mount point magic, and we do not have
350 * dun->un_uppervp locked currently--so we get it
351 * locked here (don't set the UN_ULOCK flag).
353 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
);
355 if (cnp
->cn_consume
!= 0) {
356 *ap
->a_vpp
= uppervp
;
359 if (uerror
== ENOENT
|| uerror
== EJUSTRETURN
) {
360 if (cnp
->cn_flags
& ISWHITEOUT
) {
362 } else if (lowerdvp
!= NULLVP
) {
363 lerror
= VOP_GETATTR(upperdvp
, &va
,
365 if (lerror
== 0 && (va
.va_flags
& OPAQUE
))
374 * in a similar way to the upper layer, do the lookup
375 * in the lower layer. this time, if there is some
376 * component magic going on, then vput whatever we got
377 * back from the upper layer and return the lower vnode
380 if (lowerdvp
!= NULLVP
&& !iswhiteout
) {
383 vn_lock(lowerdvp
, LK_EXCLUSIVE
| LK_RETRY
);
386 * Only do a LOOKUP on the bottom node, since
387 * we won't be making changes to it anyway.
389 nameiop
= cnp
->cn_nameiop
;
390 cnp
->cn_nameiop
= LOOKUP
;
391 if (um
->um_op
== UNMNT_BELOW
) {
392 saved_cred
= cnp
->cn_cred
;
393 cnp
->cn_cred
= um
->um_cred
;
397 * we shouldn't have to worry about locking interactions
398 * between the lower layer and our union layer (w.r.t.
399 * `..' processing) because we don't futz with lowervp
400 * locks in the union-node instantiation code path.
402 lerror
= union_lookup1(um
->um_lowervp
, &lowerdvp
,
404 if (um
->um_op
== UNMNT_BELOW
)
405 cnp
->cn_cred
= saved_cred
;
406 cnp
->cn_nameiop
= nameiop
;
408 if (lowervp
!= lowerdvp
)
409 VOP_UNLOCK(lowerdvp
, 0);
411 if (cnp
->cn_consume
!= 0) {
412 if (uppervp
!= NULLVP
) {
413 if (uppervp
== upperdvp
)
419 *ap
->a_vpp
= lowervp
;
424 if ((cnp
->cn_flags
& ISDOTDOT
) && dun
->un_pvp
!= NULLVP
) {
425 lowervp
= LOWERVP(dun
->un_pvp
);
426 if (lowervp
!= NULLVP
) {
428 vn_lock(lowervp
, LK_EXCLUSIVE
| LK_RETRY
);
435 * EJUSTRETURN is used by underlying filesystems to indicate that
436 * a directory modification op was started successfully.
437 * This will only happen in the upper layer, since
438 * the lower layer only does LOOKUPs.
439 * If this union is mounted read-only, bounce it now.
442 if ((uerror
== EJUSTRETURN
) && (cnp
->cn_flags
& ISLASTCN
) &&
443 (dvp
->v_mount
->mnt_flag
& MNT_RDONLY
) &&
444 ((cnp
->cn_nameiop
== CREATE
) || (cnp
->cn_nameiop
== RENAME
)))
448 * at this point, we have uerror and lerror indicating
449 * possible errors with the lookups in the upper and lower
450 * layers. additionally, uppervp and lowervp are (locked)
451 * references to existing vnodes in the upper and lower layers.
453 * there are now three cases to consider.
454 * 1. if both layers returned an error, then return whatever
455 * error the upper layer generated.
457 * 2. if the top layer failed and the bottom layer succeeded
458 * then two subcases occur.
459 * a. the bottom vnode is not a directory, in which
460 * case just return a new union vnode referencing
461 * an empty top layer and the existing bottom layer.
462 * b. the bottom vnode is a directory, in which case
463 * create a new directory in the top-level and
464 * continue as in case 3.
466 * 3. if the top layer succeeded then return a new union
467 * vnode referencing whatever the new top layer and
468 * whatever the bottom layer returned.
475 if ((uerror
!= 0) && (lerror
!= 0)) {
480 if (uerror
!= 0 /* && (lerror == 0) */ ) {
481 if (lowervp
->v_type
== VDIR
) { /* case 2b. */
483 * We may be racing another process to make the
484 * upper-level shadow directory. Be careful with
488 dun
->un_flags
&= ~UN_ULOCK
;
489 VOP_UNLOCK(upperdvp
, 0);
490 uerror
= union_mkshadow(um
, upperdvp
, cnp
,
492 vn_lock(upperdvp
, LK_EXCLUSIVE
| LK_RETRY
);
493 dun
->un_flags
|= UN_ULOCK
;
496 if (lowervp
!= NULLVP
) {
505 if (lowervp
!= NULLVP
)
506 VOP_UNLOCK(lowervp
, 0);
508 error
= union_allocvp(ap
->a_vpp
, dvp
->v_mount
, dvp
, upperdvp
, cnp
,
509 uppervp
, lowervp
, 1);
512 if (uppervp
!= NULLVP
)
514 if (lowervp
!= NULLVP
)
522 union_create(void *v
)
524 struct vop_create_args
/* {
526 struct vnode **a_vpp;
527 struct componentname *a_cnp;
530 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
531 struct vnode
*dvp
= un
->un_uppervp
;
532 struct componentname
*cnp
= ap
->a_cnp
;
542 un
->un_flags
|= UN_KLOCK
;
543 mp
= ap
->a_dvp
->v_mount
;
545 error
= VOP_CREATE(dvp
, &vp
, cnp
, ap
->a_vap
);
549 error
= union_allocvp(ap
->a_vpp
, mp
, NULLVP
, NULLVP
, cnp
, vp
,
561 union_whiteout(void *v
)
563 struct vop_whiteout_args
/* {
565 struct componentname *a_cnp;
568 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
569 struct componentname
*cnp
= ap
->a_cnp
;
571 if (un
->un_uppervp
== NULLVP
)
575 return (VOP_WHITEOUT(un
->un_uppervp
, cnp
, ap
->a_flags
));
581 struct vop_mknod_args
/* {
583 struct vnode **a_vpp;
584 struct componentname *a_cnp;
587 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
588 struct vnode
*dvp
= un
->un_uppervp
;
589 struct componentname
*cnp
= ap
->a_cnp
;
599 un
->un_flags
|= UN_KLOCK
;
600 mp
= ap
->a_dvp
->v_mount
;
602 error
= VOP_MKNOD(dvp
, &vp
, cnp
, ap
->a_vap
);
606 error
= union_allocvp(ap
->a_vpp
, mp
, NULLVP
, NULLVP
,
620 struct vop_open_args
/* {
621 struct vnodeop_desc *a_desc;
626 struct union_node
*un
= VTOUNION(ap
->a_vp
);
628 int mode
= ap
->a_mode
;
629 kauth_cred_t cred
= ap
->a_cred
;
630 struct lwp
*l
= curlwp
;
634 * If there is an existing upper vp then simply open that.
636 tvp
= un
->un_uppervp
;
639 * If the lower vnode is being opened for writing, then
640 * copy the file contents to the upper vnode and open that,
641 * otherwise can simply open the lower vnode.
643 tvp
= un
->un_lowervp
;
644 if ((ap
->a_mode
& FWRITE
) && (tvp
->v_type
== VREG
)) {
645 error
= union_copyup(un
, (mode
&O_TRUNC
) == 0, cred
, l
);
647 error
= VOP_OPEN(un
->un_uppervp
, mode
, cred
);
652 * Just open the lower vnode, but check for nodev mount flag
654 if ((tvp
->v_type
== VBLK
|| tvp
->v_type
== VCHR
) &&
655 (ap
->a_vp
->v_mount
->mnt_flag
& MNT_NODEV
))
658 vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
);
659 error
= VOP_OPEN(tvp
, mode
, cred
);
665 * Just open the upper vnode, checking for nodev mount flag first
667 if ((tvp
->v_type
== VBLK
|| tvp
->v_type
== VCHR
) &&
668 (ap
->a_vp
->v_mount
->mnt_flag
& MNT_NODEV
))
673 error
= VOP_OPEN(tvp
, mode
, cred
);
681 struct vop_close_args
/* {
686 struct union_node
*un
= VTOUNION(ap
->a_vp
);
691 #ifdef UNION_DIAGNOSTIC
692 if (un
->un_openl
<= 0)
693 panic("union: un_openl cnt");
701 panic("union_close empty union vnode");
705 return (VCALL(vp
, VOFFSET(vop_close
), ap
));
709 * Check access permission on the union vnode.
710 * The access check being enforced is to check
711 * against both the underlying vnode, and any
712 * copied vnode. This ensures that no additional
713 * file permissions are given away simply because
714 * the user caused an implicit file copy.
717 union_access(void *v
)
719 struct vop_access_args
/* {
720 struct vnodeop_desc *a_desc;
725 struct vnode
*vp
= ap
->a_vp
;
726 struct union_node
*un
= VTOUNION(vp
);
728 struct union_mount
*um
= MOUNTTOUNIONMOUNT(vp
->v_mount
);
731 * Disallow write attempts on read-only file systems;
732 * unless the file is a socket, fifo, or a block or
733 * character device resident on the file system.
735 if (ap
->a_mode
& VWRITE
) {
736 switch (vp
->v_type
) {
740 if (vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
755 if ((vp
= un
->un_uppervp
) != NULLVP
) {
758 return (VCALL(vp
, VOFFSET(vop_access
), ap
));
761 if ((vp
= un
->un_lowervp
) != NULLVP
) {
762 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
764 error
= VCALL(vp
, VOFFSET(vop_access
), ap
);
766 if (um
->um_op
== UNMNT_BELOW
) {
767 ap
->a_cred
= um
->um_cred
;
768 error
= VCALL(vp
, VOFFSET(vop_access
), ap
);
780 * We handle getattr only to change the fsid and
784 union_getattr(void *v
)
786 struct vop_getattr_args
/* {
792 struct union_node
*un
= VTOUNION(ap
->a_vp
);
793 struct vnode
*vp
= un
->un_uppervp
;
799 * Some programs walk the filesystem hierarchy by counting
800 * links to directories to avoid stat'ing all the time.
801 * This means the link count on directories needs to be "correct".
802 * The only way to do that is to call getattr on both layers
803 * and fix up the link count. The link count will not necessarily
804 * be accurate but will be large enough to defeat the tree walkers.
806 * To make life more interesting, some filesystems don't keep
807 * track of link counts in the expected way, and return a
808 * link count of `1' for those directories; if either of the
809 * component directories returns a link count of `1', we return a 1.
817 * It's not clear whether VOP_GETATTR is to be
818 * called with the vnode locked or not. stat() calls
819 * it with (vp) locked, and fstat calls it with
821 * In the mean time, compensate here by checking
822 * the union_node's lock flag.
824 if (un
->un_flags
& UN_LOCKED
)
827 error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
);
830 union_newsize(ap
->a_vp
, vap
->va_size
, VNOVAL
);
835 } else if (vp
->v_type
== VDIR
) {
844 error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
);
847 union_newsize(ap
->a_vp
, VNOVAL
, vap
->va_size
);
850 if ((vap
!= ap
->a_vap
) && (vap
->va_type
== VDIR
)) {
852 * Link count manipulation:
853 * - If both return "2", return 2 (no subdirs)
854 * - If one or the other return "1", return "1" (ENOCLUE)
856 if ((ap
->a_vap
->va_nlink
== 2) &&
857 (vap
->va_nlink
== 2))
859 else if (ap
->a_vap
->va_nlink
!= 1) {
860 if (vap
->va_nlink
== 1)
861 ap
->a_vap
->va_nlink
= 1;
863 ap
->a_vap
->va_nlink
+= vap
->va_nlink
;
866 ap
->a_vap
->va_fsid
= ap
->a_vp
->v_mount
->mnt_stat
.f_fsidx
.__fsid_val
[0];
871 union_setattr(void *v
)
873 struct vop_setattr_args
/* {
878 struct vattr
*vap
= ap
->a_vap
;
879 struct vnode
*vp
= ap
->a_vp
;
880 struct union_node
*un
= VTOUNION(vp
);
883 if ((vap
->va_flags
!= VNOVAL
|| vap
->va_uid
!= (uid_t
)VNOVAL
||
884 vap
->va_gid
!= (gid_t
)VNOVAL
|| vap
->va_atime
.tv_sec
!= VNOVAL
||
885 vap
->va_mtime
.tv_sec
!= VNOVAL
|| vap
->va_mode
!= (mode_t
)VNOVAL
) &&
886 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
))
888 if (vap
->va_size
!= VNOVAL
) {
889 switch (vp
->v_type
) {
901 * Disallow write attempts if the filesystem is
904 if (vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
910 * Handle case of truncating lower object to zero size,
911 * by creating a zero length upper object. This is to
912 * handle the case of open with O_TRUNC and O_CREAT.
914 if ((un
->un_uppervp
== NULLVP
) &&
915 /* assert(un->un_lowervp != NULLVP) */
916 (un
->un_lowervp
->v_type
== VREG
)) {
917 error
= union_copyup(un
, (vap
->va_size
!= 0),
924 * Try to set attributes in upper layer,
925 * otherwise return read-only filesystem error.
927 if (un
->un_uppervp
!= NULLVP
) {
929 error
= VOP_SETATTR(un
->un_uppervp
, vap
, ap
->a_cred
);
930 if ((error
== 0) && (vap
->va_size
!= VNOVAL
))
931 union_newsize(ap
->a_vp
, vap
->va_size
, VNOVAL
);
942 struct vop_read_args
/* {
949 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
950 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
953 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
955 FIXUP(VTOUNION(ap
->a_vp
));
956 error
= VOP_READ(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
962 * perhaps the size of the underlying object has changed under
963 * our feet. take advantage of the offset information present
964 * in the uio structure.
967 struct union_node
*un
= VTOUNION(ap
->a_vp
);
968 off_t cur
= ap
->a_uio
->uio_offset
;
970 if (vp
== un
->un_uppervp
) {
971 if (cur
> un
->un_uppersz
)
972 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
974 if (cur
> un
->un_lowersz
)
975 union_newsize(ap
->a_vp
, VNOVAL
, cur
);
985 struct vop_read_args
/* {
993 struct union_node
*un
= VTOUNION(ap
->a_vp
);
995 vp
= UPPERVP(ap
->a_vp
);
997 panic("union: missing upper layer in write");
1000 error
= VOP_WRITE(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
1003 * the size of the underlying object may be changed by the
1007 off_t cur
= ap
->a_uio
->uio_offset
;
1009 if (cur
> un
->un_uppersz
)
1010 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
1017 union_ioctl(void *v
)
1019 struct vop_ioctl_args
/* {
1024 kauth_cred_t a_cred;
1026 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1029 return (VCALL(ovp
, VOFFSET(vop_ioctl
), ap
));
1035 struct vop_poll_args
/* {
1039 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1042 return (VCALL(ovp
, VOFFSET(vop_poll
), ap
));
1046 union_revoke(void *v
)
1048 struct vop_revoke_args
/* {
1053 struct vnode
*vp
= ap
->a_vp
;
1056 VOP_REVOKE(UPPERVP(vp
), ap
->a_flags
);
1058 VOP_REVOKE(LOWERVP(vp
), ap
->a_flags
);
1059 vgone(vp
); /* XXXAD?? */
1066 struct vop_mmap_args
/* {
1069 kauth_cred_t a_cred;
1071 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1074 return (VCALL(ovp
, VOFFSET(vop_mmap
), ap
));
1078 union_fsync(void *v
)
1080 struct vop_fsync_args
/* {
1082 kauth_cred_t a_cred;
1088 struct vnode
*targetvp
;
1091 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't
1092 * bother syncing the underlying vnodes, since (a) they'll be
1093 * fsync'ed when reclaimed and (b) we could deadlock if
1094 * they're locked; otherwise, pass it through to the
1097 if (ap
->a_flags
& FSYNC_RECLAIM
)
1100 targetvp
= OTHERVP(ap
->a_vp
);
1102 if (targetvp
!= NULLVP
) {
1103 int dolock
= (targetvp
== LOWERVP(ap
->a_vp
));
1106 vn_lock(targetvp
, LK_EXCLUSIVE
| LK_RETRY
);
1108 FIXUP(VTOUNION(ap
->a_vp
));
1109 error
= VOP_FSYNC(targetvp
, ap
->a_cred
, ap
->a_flags
,
1110 ap
->a_offlo
, ap
->a_offhi
);
1112 VOP_UNLOCK(targetvp
, 0);
1121 struct vop_seek_args
/* {
1125 kauth_cred_t a_cred;
1127 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1130 return (VCALL(ovp
, VOFFSET(vop_seek
), ap
));
1134 union_remove(void *v
)
1136 struct vop_remove_args
/* {
1137 struct vnode *a_dvp;
1139 struct componentname *a_cnp;
1142 struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
1143 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1144 struct componentname
*cnp
= ap
->a_cnp
;
1146 if (dun
->un_uppervp
== NULLVP
)
1147 panic("union remove: null upper vnode");
1149 if (un
->un_uppervp
!= NULLVP
) {
1150 struct vnode
*dvp
= dun
->un_uppervp
;
1151 struct vnode
*vp
= un
->un_uppervp
;
1155 dun
->un_flags
|= UN_KLOCK
;
1159 un
->un_flags
|= UN_KLOCK
;
1162 if (union_dowhiteout(un
, cnp
->cn_cred
))
1163 cnp
->cn_flags
|= DOWHITEOUT
;
1164 error
= VOP_REMOVE(dvp
, vp
, cnp
);
1166 union_removed_upper(un
);
1169 error
= union_mkwhiteout(
1170 MOUNTTOUNIONMOUNT(UNIONTOV(dun
)->v_mount
),
1171 dun
->un_uppervp
, ap
->a_cnp
, un
->un_path
);
1182 struct vop_link_args
/* {
1183 struct vnode *a_dvp;
1185 struct componentname *a_cnp;
1188 struct componentname
*cnp
= ap
->a_cnp
;
1189 struct union_node
*dun
;
1193 dun
= VTOUNION(ap
->a_dvp
);
1196 if (!(ap
->a_cnp
->cn_flags
& LOCKPARENT
)) {
1197 printf("union_link called without LOCKPARENT set!\n");
1198 error
= EIO
; /* need some error code for "caller is a bozo" */
1203 if (ap
->a_dvp
->v_op
!= ap
->a_vp
->v_op
) {
1206 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1207 if (un
->un_uppervp
== NULLVP
) {
1209 * Needs to be copied before we can link it.
1211 vn_lock(ap
->a_vp
, LK_EXCLUSIVE
| LK_RETRY
);
1212 if (dun
->un_uppervp
== un
->un_dirvp
) {
1213 dun
->un_flags
&= ~UN_ULOCK
;
1214 VOP_UNLOCK(dun
->un_uppervp
, 0);
1216 error
= union_copyup(un
, 1, cnp
->cn_cred
, curlwp
);
1217 if (dun
->un_uppervp
== un
->un_dirvp
) {
1219 * During copyup, we dropped the lock on the
1220 * dir and invalidated any saved namei lookup
1221 * state for the directory we'll be entering
1222 * the link in. We need to re-run the lookup
1223 * in that directory to reset any state needed
1225 * Call relookup on the union-layer to reset
1229 if (dun
->un_uppervp
== NULLVP
)
1230 panic("union: null upperdvp?");
1231 error
= relookup(ap
->a_dvp
, &vp
, ap
->a_cnp
);
1233 VOP_UNLOCK(ap
->a_vp
, 0);
1234 return EROFS
; /* ? */
1238 * The name we want to create has
1239 * mysteriously appeared (a race?)
1242 VOP_UNLOCK(ap
->a_vp
, 0);
1248 VOP_UNLOCK(ap
->a_vp
, 0);
1250 vp
= un
->un_uppervp
;
1253 dvp
= dun
->un_uppervp
;
1264 dun
->un_flags
|= UN_KLOCK
;
1267 return (VOP_LINK(dvp
, vp
, cnp
));
1271 union_rename(void *v
)
1273 struct vop_rename_args
/* {
1274 struct vnode *a_fdvp;
1275 struct vnode *a_fvp;
1276 struct componentname *a_fcnp;
1277 struct vnode *a_tdvp;
1278 struct vnode *a_tvp;
1279 struct componentname *a_tcnp;
1283 struct vnode
*fdvp
= ap
->a_fdvp
;
1284 struct vnode
*fvp
= ap
->a_fvp
;
1285 struct vnode
*tdvp
= ap
->a_tdvp
;
1286 struct vnode
*tvp
= ap
->a_tvp
;
1288 if (fdvp
->v_op
== union_vnodeop_p
) { /* always true */
1289 struct union_node
*un
= VTOUNION(fdvp
);
1290 if (un
->un_uppervp
== NULLVP
) {
1292 * this should never happen in normal
1293 * operation but might if there was
1294 * a problem creating the top-level shadow
1301 fdvp
= un
->un_uppervp
;
1305 if (fvp
->v_op
== union_vnodeop_p
) { /* always true */
1306 struct union_node
*un
= VTOUNION(fvp
);
1307 if (un
->un_uppervp
== NULLVP
) {
1308 /* XXX: should do a copyup */
1313 if (un
->un_lowervp
!= NULLVP
)
1314 ap
->a_fcnp
->cn_flags
|= DOWHITEOUT
;
1316 fvp
= un
->un_uppervp
;
1320 if (tdvp
->v_op
== union_vnodeop_p
) {
1321 struct union_node
*un
= VTOUNION(tdvp
);
1322 if (un
->un_uppervp
== NULLVP
) {
1324 * this should never happen in normal
1325 * operation but might if there was
1326 * a problem creating the top-level shadow
1333 tdvp
= un
->un_uppervp
;
1335 un
->un_flags
|= UN_KLOCK
;
1339 if (tvp
!= NULLVP
&& tvp
->v_op
== union_vnodeop_p
) {
1340 struct union_node
*un
= VTOUNION(tvp
);
1342 tvp
= un
->un_uppervp
;
1343 if (tvp
!= NULLVP
) {
1345 un
->un_flags
|= UN_KLOCK
;
1350 error
= VOP_RENAME(fdvp
, fvp
, ap
->a_fcnp
, tdvp
, tvp
, ap
->a_tcnp
);
1361 if (fdvp
!= ap
->a_fdvp
) {
1364 if (fvp
!= ap
->a_fvp
) {
1371 union_mkdir(void *v
)
1373 struct vop_mkdir_args
/* {
1374 struct vnode *a_dvp;
1375 struct vnode **a_vpp;
1376 struct componentname *a_cnp;
1377 struct vattr *a_vap;
1379 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1380 struct vnode
*dvp
= un
->un_uppervp
;
1381 struct componentname
*cnp
= ap
->a_cnp
;
1383 if (dvp
!= NULLVP
) {
1389 un
->un_flags
|= UN_KLOCK
;
1390 VOP_UNLOCK(ap
->a_dvp
, 0);
1391 error
= VOP_MKDIR(dvp
, &vp
, cnp
, ap
->a_vap
);
1397 error
= union_allocvp(ap
->a_vpp
, ap
->a_dvp
->v_mount
, ap
->a_dvp
,
1398 NULLVP
, cnp
, vp
, NULLVP
, 1);
1410 union_rmdir(void *v
)
1412 struct vop_rmdir_args
/* {
1413 struct vnode *a_dvp;
1415 struct componentname *a_cnp;
1418 struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
1419 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1420 struct componentname
*cnp
= ap
->a_cnp
;
1422 if (dun
->un_uppervp
== NULLVP
)
1423 panic("union rmdir: null upper vnode");
1425 if (un
->un_uppervp
!= NULLVP
) {
1426 struct vnode
*dvp
= dun
->un_uppervp
;
1427 struct vnode
*vp
= un
->un_uppervp
;
1431 dun
->un_flags
|= UN_KLOCK
;
1435 un
->un_flags
|= UN_KLOCK
;
1438 if (union_dowhiteout(un
, cnp
->cn_cred
))
1439 cnp
->cn_flags
|= DOWHITEOUT
;
1440 error
= VOP_RMDIR(dvp
, vp
, ap
->a_cnp
);
1442 union_removed_upper(un
);
1445 error
= union_mkwhiteout(
1446 MOUNTTOUNIONMOUNT(UNIONTOV(dun
)->v_mount
),
1447 dun
->un_uppervp
, ap
->a_cnp
, un
->un_path
);
1456 union_symlink(void *v
)
1458 struct vop_symlink_args
/* {
1459 struct vnode *a_dvp;
1460 struct vnode **a_vpp;
1461 struct componentname *a_cnp;
1462 struct vattr *a_vap;
1465 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1466 struct vnode
*dvp
= un
->un_uppervp
;
1467 struct componentname
*cnp
= ap
->a_cnp
;
1469 if (dvp
!= NULLVP
) {
1474 un
->un_flags
|= UN_KLOCK
;
1476 error
= VOP_SYMLINK(dvp
, ap
->a_vpp
, cnp
, ap
->a_vap
,
1486 * union_readdir works in concert with getdirentries and
1487 * readdir(3) to provide a list of entries in the unioned
1488 * directories. getdirentries is responsible for walking
1489 * down the union stack. readdir(3) is responsible for
1490 * eliminating duplicate names from the returned data stream.
1493 union_readdir(void *v
)
1495 struct vop_readdir_args
/* {
1496 struct vnodeop_desc *a_desc;
1499 kauth_cred_t a_cred;
1504 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1505 struct vnode
*uvp
= un
->un_uppervp
;
1512 return (VCALL(uvp
, VOFFSET(vop_readdir
), ap
));
1516 union_readlink(void *v
)
1518 struct vop_readlink_args
/* {
1521 kauth_cred_t a_cred;
1524 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1525 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1528 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1530 FIXUP(VTOUNION(ap
->a_vp
));
1532 error
= VCALL(vp
, VOFFSET(vop_readlink
), ap
);
1540 union_abortop(void *v
)
1542 struct vop_abortop_args
/* {
1543 struct vnode *a_dvp;
1544 struct componentname *a_cnp;
1547 struct vnode
*vp
= OTHERVP(ap
->a_dvp
);
1548 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1549 int islocked
= un
->un_flags
& UN_LOCKED
;
1550 int dolock
= (vp
== LOWERVP(ap
->a_dvp
));
1554 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1556 FIXUP(VTOUNION(ap
->a_dvp
));
1559 error
= VCALL(vp
, VOFFSET(vop_abortop
), ap
);
1560 if (islocked
&& dolock
)
1567 union_inactive(void *v
)
1569 struct vop_inactive_args
/* {
1570 const struct vnodeop_desc *a_desc;
1574 struct vnode
*vp
= ap
->a_vp
;
1575 struct union_node
*un
= VTOUNION(vp
);
1579 * Do nothing (and _don't_ bypass).
1580 * Wait to vrele lowervp until reclaim,
1581 * so that until then our union_node is in the
1582 * cache and reusable.
1584 * NEEDSWORK: Someday, consider inactive'ing
1585 * the lowervp and then trying to reactivate it
1586 * with capabilities (v_id)
1587 * like they do in the name lookup cache code.
1588 * That's too much work for now.
1591 if (un
->un_dircache
!= 0) {
1592 for (vpp
= un
->un_dircache
; *vpp
!= NULLVP
; vpp
++)
1594 free(un
->un_dircache
, M_TEMP
);
1595 un
->un_dircache
= 0;
1598 *ap
->a_recycle
= ((un
->un_flags
& UN_CACHED
) == 0);
1605 union_reclaim(void *v
)
1607 struct vop_reclaim_args
/* {
1611 union_freevp(ap
->a_vp
);
1619 struct vop_lock_args
/* {
1623 struct vnode
*vp
= ap
->a_vp
;
1624 int flags
= ap
->a_flags
;
1625 struct union_node
*un
;
1628 /* XXX unionfs can't handle shared locks yet */
1629 if ((flags
& LK_TYPE_MASK
) == LK_SHARED
) {
1630 flags
= LK_EXCLUSIVE
| (flags
& ~LK_TYPE_MASK
);
1635 * Need to do real lockmgr-style locking here.
1636 * in the mean time, draining won't work quite right,
1637 * which could lead to a few race conditions.
1638 * the following test was here, but is not quite right, we
1639 * still need to take the lock:
1640 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
1643 flags
&= ~LK_INTERLOCK
;
1649 if (un
->un_uppervp
!= NULLVP
) {
1650 if (((un
->un_flags
& UN_ULOCK
) == 0) &&
1651 (vp
->v_usecount
!= 0)) {
1653 * We MUST always use the order of: take upper
1654 * vp lock, manipulate union node flags, drop
1655 * upper vp lock. This code must not be an
1658 error
= vn_lock(un
->un_uppervp
, flags
);
1661 un
->un_flags
|= UN_ULOCK
;
1664 if (un
->un_flags
& UN_KLOCK
) {
1665 vprint("union: dangling klock", vp
);
1666 panic("union: dangling upper lock (%p)", vp
);
1671 /* XXX ignores LK_NOWAIT */
1672 if (un
->un_flags
& UN_LOCKED
) {
1674 if (curproc
&& un
->un_pid
== curproc
->p_pid
&&
1675 un
->un_pid
> -1 && curproc
->p_pid
> -1)
1676 panic("union: locking against myself");
1678 un
->un_flags
|= UN_WANTED
;
1679 tsleep(&un
->un_flags
, PINOD
, "unionlk2", 0);
1685 un
->un_pid
= curproc
->p_pid
;
1690 un
->un_flags
|= UN_LOCKED
;
1695 * When operations want to vput() a union node yet retain a lock on
1696 * the upper vnode (say, to do some further operations like link(),
1697 * mkdir(), ...), they set UN_KLOCK on the union node, then call
1698 * vput() which calls VOP_UNLOCK() and comes here. union_unlock()
1699 * unlocks the union node (leaving the upper vnode alone), clears the
1700 * KLOCK flag, and then returns to vput(). The caller then does whatever
1701 * is left to do with the upper vnode, and ensures that it gets unlocked.
1703 * If UN_KLOCK isn't set, then the upper vnode is unlocked here.
1706 union_unlock(void *v
)
1708 struct vop_unlock_args
/* {
1712 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1715 if ((un
->un_flags
& UN_LOCKED
) == 0)
1716 panic("union: unlock unlocked node");
1717 if (curproc
&& un
->un_pid
!= curproc
->p_pid
&&
1718 curproc
->p_pid
> -1 && un
->un_pid
> -1)
1719 panic("union: unlocking other process's union node");
1722 un
->un_flags
&= ~UN_LOCKED
;
1724 if ((un
->un_flags
& (UN_ULOCK
|UN_KLOCK
)) == UN_ULOCK
)
1725 VOP_UNLOCK(un
->un_uppervp
, 0);
1727 un
->un_flags
&= ~(UN_ULOCK
|UN_KLOCK
);
1729 if (un
->un_flags
& UN_WANTED
) {
1730 un
->un_flags
&= ~UN_WANTED
;
1731 wakeup( &un
->un_flags
);
1745 struct vop_bmap_args
/* {
1748 struct vnode **a_vpp;
1753 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1754 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1757 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1759 FIXUP(VTOUNION(ap
->a_vp
));
1761 error
= VCALL(vp
, VOFFSET(vop_bmap
), ap
);
1769 union_print(void *v
)
1771 struct vop_print_args
/* {
1774 struct vnode
*vp
= ap
->a_vp
;
1776 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1777 vp
, UPPERVP(vp
), LOWERVP(vp
));
1778 if (UPPERVP(vp
) != NULLVP
)
1779 vprint("union: upper", UPPERVP(vp
));
1780 if (LOWERVP(vp
) != NULLVP
)
1781 vprint("union: lower", LOWERVP(vp
));
1782 if (VTOUNION(vp
)->un_dircache
) {
1784 for (vpp
= VTOUNION(vp
)->un_dircache
; *vpp
!= NULLVP
; vpp
++)
1785 vprint("dircache:", *vpp
);
1792 union_islocked(void *v
)
1794 struct vop_islocked_args
/* {
1798 return ((VTOUNION(ap
->a_vp
)->un_flags
& UN_LOCKED
) ? 1 : 0);
1802 union_pathconf(void *v
)
1804 struct vop_pathconf_args
/* {
1810 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1811 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1814 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1816 FIXUP(VTOUNION(ap
->a_vp
));
1818 error
= VCALL(vp
, VOFFSET(vop_pathconf
), ap
);
1826 union_advlock(void *v
)
1828 struct vop_advlock_args
/* {
1835 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1838 return (VCALL(ovp
, VOFFSET(vop_advlock
), ap
));
1843 * XXX - vop_strategy must be hand coded because it has no
1844 * vnode in its arguments.
1845 * This goes away with a merged VM/buffer cache.
1848 union_strategy(void *v
)
1850 struct vop_strategy_args
/* {
1854 struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1855 struct buf
*bp
= ap
->a_bp
;
1859 panic("union_strategy: nil vp");
1860 if (((bp
->b_flags
& B_READ
) == 0) &&
1861 (ovp
== LOWERVP(bp
->b_vp
)))
1862 panic("union_strategy: writing to lowervp");
1865 return (VOP_STRATEGY(ovp
, bp
));
1869 union_getpages(void *v
)
1871 struct vop_getpages_args
/* {
1874 struct vm_page **a_m;
1877 vm_prot_t a_access_type;
1881 struct vnode
*vp
= ap
->a_vp
;
1885 * just pass the request on to the underlying layer.
1888 if (ap
->a_flags
& PGO_LOCKED
) {
1891 ap
->a_vp
= OTHERVP(vp
);
1892 mutex_exit(&vp
->v_interlock
);
1893 mutex_enter(&ap
->a_vp
->v_interlock
);
1894 error
= VCALL(ap
->a_vp
, VOFFSET(vop_getpages
), ap
);
1899 union_putpages(void *v
)
1901 struct vop_putpages_args
/* {
1907 struct vnode
*vp
= ap
->a_vp
;
1911 * just pass the request on to the underlying layer.
1914 ap
->a_vp
= OTHERVP(vp
);
1915 mutex_exit(&vp
->v_interlock
);
1916 if (ap
->a_flags
& PGO_RECLAIM
) {
1919 mutex_enter(&ap
->a_vp
->v_interlock
);
1920 error
= VCALL(ap
->a_vp
, VOFFSET(vop_putpages
), ap
);
1925 union_kqfilter(void *v
)
1927 struct vop_kqfilter_args
/* {
1934 * We watch either the upper layer file (if it already exists),
1935 * or the lower layer one. If there is lower layer file only
1936 * at this moment, we will keep watching that lower layer file
1937 * even if upper layer file would be created later on.
1939 if (UPPERVP(ap
->a_vp
))
1940 error
= VOP_KQFILTER(UPPERVP(ap
->a_vp
), ap
->a_kn
);
1941 else if (LOWERVP(ap
->a_vp
))
1942 error
= VOP_KQFILTER(LOWERVP(ap
->a_vp
), ap
->a_kn
);