1 /* $NetBSD: p2k.c,v 1.32 2009/12/23 01:11:39 pooka Exp $ */
4 * Copyright (c) 2007, 2008, 2009 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * puffs 2k, i.e. puffs 2 kernel. Converts the puffs protocol to
33 * the kernel vfs protocol and vice versa.
35 * A word about reference counting: puffs in the kernel is the king of
36 * reference counting. We must maintain a vnode alive and kicking
37 * until the kernel tells us to reclaim it. Therefore we make sure
38 * we never accidentally lose a vnode. Before calling operations which
39 * decrease the refcount we always bump the refcount up to compensate.
40 * Come inactive, if the file system thinks that the vnode should be
41 * put out of its misery, it will set the recycle flag. We use this
42 * to tell the kernel to reclaim the vnode. Only in reclaim do we
43 * really nuke the last reference.
46 #include <sys/cdefs.h>
47 #include <sys/mount.h>
48 #include <sys/param.h>
49 #include <sys/vnode.h>
51 #include <sys/namei.h>
52 #include <sys/dirent.h>
61 #include <rump/rump.h>
63 #include <rump/ukfs.h>
67 LIST_HEAD(p2k_vp_hash
, p2k_node
);
68 #define NHASHBUCK (1<<16)
70 struct vnode
*p2m_rvp
;
71 struct puffs_usermount
*p2m_pu
;
72 struct ukfs
*p2m_ukfs
;
73 struct p2k_vp_hash p2m_vphash
[NHASHBUCK
];
80 struct componentname
*p2n_cn
;
83 * Ok, then, uhm, we need .. *drumroll*.. two componentname
84 * storages for rename. This is because the source dir is
85 * unlocked after the first lookup, and someone else might
86 * race in here. However, we know it's not another rename
87 * because of the kernel rename lock. And we need two since
88 * srcdir and targdir might be the same. It's a wonderful world.
90 struct componentname
*p2n_cn_ren_src
, *p2n_cn_ren_targ
;
92 LIST_ENTRY(p2k_node
) p2n_entries
;
95 #define OPC2VP(opc) (((struct p2k_node *)opc)->p2n_vp)
98 static uid_t wizarduid
;
101 cred_create(const struct puffs_cred
*pcr
)
103 gid_t groups
[NGROUPS
];
106 short ngroups
= __arraycount(groups
);
111 if (puffs_cred_getuid(pcr
, &uid
) == -1)
114 if (puffs_cred_getgid(pcr
, &gid
) == -1)
116 puffs_cred_getgroups(pcr
, groups
, &ngroups
);
118 /* LINTED: ngroups is ok */
119 return rump_pub_cred_create(uid
, gid
, ngroups
, groups
);
123 cred_destroy(kauth_cred_t cred
)
126 rump_pub_cred_put(cred
);
129 static struct componentname
*
130 makecn(const struct puffs_cn
*pcn
, int myflags
)
134 cred
= cred_create(pcn
->pcn_cred
);
135 /* LINTED: prehistoric types in first two args */
136 return rump_pub_makecn(pcn
->pcn_nameiop
, pcn
->pcn_flags
| myflags
,
137 pcn
->pcn_name
, pcn
->pcn_namelen
, cred
, rump_pub_lwp_curlwp());
141 freecn(struct componentname
*cnp
, int flags
)
144 rump_pub_freecn(cnp
, flags
| RUMPCN_FREECRED
);
148 makelwp(struct puffs_usermount
*pu
)
153 puffs_cc_getcaller(puffs_cc_getcc(pu
), &pid
, &lid
);
154 rump_pub_lwp_alloc_and_switch(pid
, lid
);
159 clearlwp(struct puffs_usermount
*pu
)
162 rump_pub_lwp_release(rump_pub_lwp_curlwp());
165 static __inline
struct p2k_vp_hash
*
166 gethash(struct p2k_mount
*p2m
, struct vnode
*vp
)
170 hash
= hash32_buf(&vp
, sizeof(vp
), HASH32_BUF_INIT
);
171 return &p2m
->p2m_vphash
[hash
% NHASHBUCK
];
175 * Find node based on hash of vnode pointer. If vnode is found,
176 * releases one reference to vnode based on the fact that we just
177 * performed a lookup for it.
179 * If the optinal p2n_storage parameter is passed, it is used instead
180 * of allocating more memory. This allows for easier error recovery.
182 static struct p2k_node
*
183 getp2n(struct p2k_mount
*p2m
, struct vnode
*vp
, bool initial
,
184 struct p2k_node
*p2n_storage
)
186 struct p2k_vp_hash
*hl
;
187 struct p2k_node
*p2n
= NULL
;
189 /* p2n_storage => initial */
190 assert(!p2n_storage
|| initial
);
192 hl
= gethash(p2m
, vp
);
194 LIST_FOREACH(p2n
, hl
, p2n_entries
)
195 if (p2n
->p2n_vp
== vp
)
198 hl
= gethash(p2m
, vp
);
200 rump_pub_vp_rele(vp
);
205 p2n
= malloc(sizeof(*p2n
));
207 rump_pub_vp_rele(vp
);
210 memset(p2n
, 0, sizeof(*p2n
));
211 LIST_INSERT_HEAD(hl
, p2n
, p2n_entries
);
218 freep2n(struct p2k_node
*p2n
)
221 assert(p2n
->p2n_vp
== NULL
);
222 assert(p2n
->p2n_cn
== NULL
);
223 LIST_REMOVE(p2n
, p2n_entries
);
229 p2k_errcatcher(struct puffs_usermount
*pu
, uint8_t type
, int error
,
230 const char *str
, puffs_cookie_t cook
)
233 fprintf(stderr
, "type %d, error %d, cookie %p (%s)\n",
234 type
, error
, cook
, str
);
237 * Trap all EINVAL responses to lookup. It most likely means
238 * that we supplied VNON/VBAD as the type. The real kernel
239 * doesn't panic from this either, but just handles it.
241 if (type
!= PUFFS_VN_LOOKUP
&& error
== EINVAL
)
245 /* just to avoid annoying loop when singlestepping */
246 static struct p2k_mount
*
249 struct p2k_mount
*p2m
;
252 p2m
= malloc(sizeof(*p2m
));
255 memset(p2m
, 0, sizeof(*p2m
));
257 for (i
= 0; i
< NHASHBUCK
; i
++)
258 LIST_INIT(&p2m
->p2m_vphash
[i
]);
264 p2k_init(uint32_t puffs_flags
)
266 struct puffs_ops
*pops
;
267 struct p2k_mount
*p2m
;
273 PUFFSOP_SET(pops
, p2k
, fs
, statvfs
);
274 PUFFSOP_SET(pops
, p2k
, fs
, unmount
);
275 PUFFSOP_SET(pops
, p2k
, fs
, sync
);
276 PUFFSOP_SET(pops
, p2k
, fs
, fhtonode
);
277 PUFFSOP_SET(pops
, p2k
, fs
, nodetofh
);
279 PUFFSOP_SET(pops
, p2k
, node
, lookup
);
280 PUFFSOP_SET(pops
, p2k
, node
, create
);
281 PUFFSOP_SET(pops
, p2k
, node
, mknod
);
282 PUFFSOP_SET(pops
, p2k
, node
, open
);
283 PUFFSOP_SET(pops
, p2k
, node
, close
);
284 PUFFSOP_SET(pops
, p2k
, node
, access
);
285 PUFFSOP_SET(pops
, p2k
, node
, getattr
);
286 PUFFSOP_SET(pops
, p2k
, node
, setattr
);
288 PUFFSOP_SET(pops
, p2k
, node
, poll
);
290 PUFFSOP_SET(pops
, p2k
, node
, mmap
);
291 PUFFSOP_SET(pops
, p2k
, node
, fsync
);
292 PUFFSOP_SET(pops
, p2k
, node
, seek
);
293 PUFFSOP_SET(pops
, p2k
, node
, remove
);
294 PUFFSOP_SET(pops
, p2k
, node
, link
);
295 PUFFSOP_SET(pops
, p2k
, node
, rename
);
296 PUFFSOP_SET(pops
, p2k
, node
, mkdir
);
297 PUFFSOP_SET(pops
, p2k
, node
, rmdir
);
298 PUFFSOP_SET(pops
, p2k
, node
, symlink
);
299 PUFFSOP_SET(pops
, p2k
, node
, readdir
);
300 PUFFSOP_SET(pops
, p2k
, node
, readlink
);
301 PUFFSOP_SET(pops
, p2k
, node
, read
);
302 PUFFSOP_SET(pops
, p2k
, node
, write
);
304 PUFFSOP_SET(pops
, p2k
, node
, inactive
);
305 PUFFSOP_SET(pops
, p2k
, node
, reclaim
);
306 PUFFSOP_SET(pops
, p2k
, node
, abortop
);
309 if (getenv("P2K_DEBUG") != NULL
) {
310 puffs_flags
|= PUFFS_FLAG_OPDUMP
;
313 if (getenv("P2K_NODETACH") != NULL
) {
316 if (getenv("P2K_NOCACHE_PAGE") != NULL
) {
317 puffs_flags
|= PUFFS_KFLAG_NOCACHE_PAGE
;
319 if (getenv("P2K_NOCACHE_NAME") != NULL
) {
320 puffs_flags
|= PUFFS_KFLAG_NOCACHE_NAME
;
322 if (getenv("P2K_NOCACHE") != NULL
) {
323 puffs_flags
|= PUFFS_KFLAG_NOCACHE
;
325 if ((envbuf
= getenv("P2K_WIZARDUID")) != NULL
) {
326 /* default to 0 in error cases */
327 wizarduid
= atoi(envbuf
);
329 printf("P2K WIZARD MODE: using uid %d\n", wizarduid
);
335 p2m
->p2m_pu
= puffs_init(pops
, PUFFS_DEFER
, PUFFS_DEFER
,
336 PUFFS_DEFER
, puffs_flags
);
337 if (p2m
->p2m_pu
== NULL
) {
345 if (puffs_daemon(p2m
->p2m_pu
, 1, 1) == -1) {
347 p2k_cancel(p2m
, sverrno
);
359 p2k_cancel(struct p2k_mount
*p2m
, int error
)
362 puffs_cancel(p2m
->p2m_pu
, error
);
367 setupfs(struct p2k_mount
*p2m
, const char *vfsname
, const char *devpath
,
368 struct ukfs_part
*part
, const char *mountpath
, int mntflags
,
369 void *arg
, size_t alen
)
371 char partpath
[UKFS_DEVICE_MAXPATHLEN
];
372 char partbuf
[UKFS_DEVICE_MAXSTR
];
373 char typebuf
[PUFFS_TYPELEN
];
374 struct puffs_usermount
*pu
= p2m
->p2m_pu
;
375 struct p2k_node
*p2n_root
;
376 struct ukfs
*ukfs
= NULL
;
377 extern int puffs_fakecc
;
378 int rv
= -1, sverrno
;
380 strcpy(typebuf
, "p2k|");
381 if (strcmp(vfsname
, "puffs") == 0) { /* XXX */
382 struct puffs_kargs
*args
= arg
;
383 strlcat(typebuf
, args
->pa_typename
, sizeof(typebuf
));
385 strlcat(typebuf
, vfsname
, sizeof(typebuf
));
388 strlcpy(partpath
, devpath
, sizeof(partpath
));
389 if (ukfs_part_tostring(part
, partbuf
, sizeof(partbuf
))) {
390 strlcat(partpath
, partbuf
, sizeof(partpath
));
392 puffs_setmntinfo(pu
, partpath
, typebuf
);
394 if (ukfs_init() == -1)
396 if (part
!= ukfs_part_na
)
397 ukfs
= ukfs_mount_disk(vfsname
, devpath
, part
,
398 mountpath
, mntflags
, arg
, alen
);
400 ukfs
= ukfs_mount(vfsname
, devpath
, mountpath
, mntflags
,
404 ukfs_setspecific(ukfs
, p2m
);
405 p2m
->p2m_ukfs
= ukfs
;
409 * Detect tmpfs. XXX: this is a kludge. See inactive().
411 * In reality we'd want "does file system use anon objects
412 * for storage?". But since tmpfs hides the anon object from
413 * the public interface, we can't actually detect it sanely.
414 * Therefore, use this kludge.
416 p2m
->p2m_imtmpfsman
= strcmp(vfsname
, MOUNT_TMPFS
) == 0;
418 p2m
->p2m_rvp
= ukfs_getrvp(ukfs
);
419 p2n_root
= getp2n(p2m
, p2m
->p2m_rvp
, true, NULL
);
420 puffs_setfhsize(pu
, 0, PUFFS_FHFLAG_PASSTHROUGH
);
421 puffs_setstacksize(pu
, PUFFS_STACKSIZE_MIN
);
423 puffs_set_prepost(pu
, makelwp
, clearlwp
);
424 puffs_set_errnotify(pu
, p2k_errcatcher
);
426 puffs_setspecific(pu
, ukfs
);
427 rv
= puffs_mount(pu
, mountpath
, mntflags
, p2n_root
);
432 puffs_cancel(pu
, sverrno
);
434 ukfs_release(p2m
->p2m_ukfs
, UKFS_RELFLAG_FORCE
);
443 p2k_mainloop(struct p2k_mount
*p2m
)
447 rv
= puffs_mainloop(p2m
->p2m_pu
);
449 puffs_exit(p2m
->p2m_pu
, 1);
451 ukfs_release(p2m
->p2m_ukfs
, UKFS_RELFLAG_FORCE
);
460 p2k_run_fs(const char *vfsname
, const char *devpath
, const char *mountpath
,
461 int mntflags
, void *arg
, size_t alen
, uint32_t puffs_flags
)
463 struct p2k_mount
*p2m
;
466 p2m
= p2k_init(puffs_flags
);
469 rv
= setupfs(p2m
, vfsname
, devpath
, ukfs_part_na
, mountpath
,
470 mntflags
, arg
, alen
);
473 return p2k_mainloop(p2m
);
477 p2k_run_diskfs(const char *vfsname
, const char *devpath
, struct ukfs_part
*part
,
478 const char *mountpath
, int mntflags
, void *arg
, size_t alen
,
479 uint32_t puffs_flags
)
481 struct p2k_mount
*p2m
;
484 p2m
= p2k_init(puffs_flags
);
487 rv
= setupfs(p2m
, vfsname
, devpath
, part
, mountpath
, mntflags
,
491 return p2k_mainloop(p2m
);
495 p2k_setup_fs(struct p2k_mount
*p2m
, const char *vfsname
, const char *devpath
,
496 const char *mountpath
, int mntflags
, void *arg
, size_t alen
)
499 return setupfs(p2m
, vfsname
, devpath
, ukfs_part_na
, mountpath
,
500 mntflags
, arg
, alen
);
504 p2k_setup_diskfs(struct p2k_mount
*p2m
, const char *vfsname
,
505 const char *devpath
, struct ukfs_part
*part
, const char *mountpath
,
506 int mntflags
, void *arg
, size_t alen
)
509 return setupfs(p2m
, vfsname
, devpath
, part
, mountpath
, mntflags
,
514 p2k_fs_statvfs(struct puffs_usermount
*pu
, struct statvfs
*sbp
)
516 struct mount
*mp
= ukfs_getmp(puffs_getspecific(pu
));
518 return rump_pub_vfs_statvfs(mp
, sbp
);
523 p2k_fs_unmount(struct puffs_usermount
*pu
, int flags
)
525 struct ukfs
*fs
= puffs_getspecific(pu
);
526 struct p2k_mount
*p2m
= ukfs_getspecific(fs
);
529 rump_pub_lwp_release(rump_pub_lwp_curlwp()); /* ukfs & curlwp tricks */
531 rump_pub_vp_rele(p2m
->p2m_rvp
);
532 if (ukfs_release(fs
, 0) != 0) {
533 ukfs_release(fs
, UKFS_RELFLAG_FORCE
);
536 p2m
->p2m_ukfs
= NULL
;
538 rump_pub_lwp_alloc_and_switch(0, 0);
543 p2k_fs_sync(struct puffs_usermount
*pu
, int waitfor
,
544 const struct puffs_cred
*pcr
)
546 struct mount
*mp
= ukfs_getmp(puffs_getspecific(pu
));
550 cred
= cred_create(pcr
);
551 rv
= rump_pub_vfs_sync(mp
, waitfor
, (kauth_cred_t
)cred
);
559 p2k_fs_fhtonode(struct puffs_usermount
*pu
, void *fid
, size_t fidsize
,
560 struct puffs_newinfo
*pni
)
562 struct mount
*mp
= ukfs_getmp(puffs_getspecific(pu
));
563 struct p2k_mount
*p2m
= ukfs_getspecific(puffs_getspecific(pu
));
564 struct p2k_node
*p2n
;
568 uint64_t rdev
; /* XXX: allows running this on NetBSD 5.0 */
571 rv
= rump_pub_vfs_fhtovp(mp
, fid
, &vp
);
574 RUMP_VOP_UNLOCK(vp
, 0);
576 p2n
= getp2n(p2m
, vp
, false, NULL
);
580 puffs_newinfo_setcookie(pni
, p2n
);
581 rump_pub_getvninfo(vp
, &vtype
, &vsize
, (void *)&rdev
);
582 puffs_newinfo_setvtype(pni
, vtype
);
583 puffs_newinfo_setsize(pni
, vsize
);
584 /* LINTED: yea, it'll lose accuracy, but that's life */
585 puffs_newinfo_setrdev(pni
, rdev
);
592 p2k_fs_nodetofh(struct puffs_usermount
*pu
, puffs_cookie_t cookie
, void *fid
,
595 struct vnode
*vp
= cookie
;
597 return rump_pub_vfs_vptofh(vp
, fid
, fidsize
);
602 p2k_node_lookup(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
603 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
)
605 struct p2k_mount
*p2m
= ukfs_getspecific(puffs_getspecific(pu
));
606 struct p2k_node
*p2n_dir
= opc
, *p2n
;
607 struct componentname
*cn
;
608 struct vnode
*dvp
= p2n_dir
->p2n_vp
, *vp
;
611 uint64_t rdev
; /* XXX: uint64_t because of stack overwrite in compat */
615 RUMP_VOP_LOCK(dvp
, LK_EXCLUSIVE
);
616 rv
= RUMP_VOP_LOOKUP(dvp
, &vp
, cn
);
617 RUMP_VOP_UNLOCK(dvp
, 0);
618 if (rump_pub_checksavecn(cn
)) {
620 * XXX the rename lookup protocol is currently horribly
621 * broken. We get 1) DELETE with SAVESTART 2) DELETE
622 * without SAVESTART 3) RENAME. Hold on to this like
623 * it were the absolute truth for now. However, do
624 * not sprinkle asserts based on this due to abovementioned
625 * brokenness -- some file system drivers might not
626 * even issue ABORT properly, so just free resources
627 * on the fly and hope for the best. PR kern/42348
629 if (pcn
->pcn_flags
& RUMP_NAMEI_INRENAME
) {
630 if (pcn
->pcn_nameiop
== RUMP_NAMEI_DELETE
) {
631 /* save path from the first lookup */
632 if (pcn
->pcn_flags
& RUMP_NAMEI_SAVESTART
) {
633 if (p2n_dir
->p2n_cn_ren_src
)
634 freecn(p2n_dir
->p2n_cn_ren_src
,
636 p2n_dir
->p2n_cn_ren_src
= cn
;
638 freecn(cn
, RUMPCN_FORCEFREE
);
642 assert(pcn
->pcn_nameiop
== RUMP_NAMEI_RENAME
);
643 if (p2n_dir
->p2n_cn_ren_targ
)
644 freecn(p2n_dir
->p2n_cn_ren_targ
,
646 p2n_dir
->p2n_cn_ren_targ
= cn
;
649 assert(p2n_dir
->p2n_cn
== NULL
);
650 p2n_dir
->p2n_cn
= cn
;
657 if (rv
== EJUSTRETURN
) {
662 RUMP_VOP_UNLOCK(vp
, 0);
664 p2n
= getp2n(p2m
, vp
, false, NULL
);
666 if (pcn
->pcn_flags
& RUMP_NAMEI_INRENAME
) {
667 if (pcn
->pcn_nameiop
== RUMP_NAMEI_DELETE
) {
668 p2n_dir
->p2n_cn_ren_src
= NULL
;
670 p2n_dir
->p2n_cn_ren_targ
= NULL
;
673 p2n_dir
->p2n_cn
= NULL
;
675 /* XXX: what in the world should happen with SAVESTART? */
676 RUMP_VOP_ABORTOP(dvp
, cn
);
680 puffs_newinfo_setcookie(pni
, p2n
);
681 rump_pub_getvninfo(vp
, &vtype
, &vsize
, (void *)&rdev
);
682 puffs_newinfo_setvtype(pni
, vtype
);
683 puffs_newinfo_setsize(pni
, vsize
);
684 /* LINTED: yea, it'll lose accuracy, but that's life */
685 puffs_newinfo_setrdev(pni
, rdev
);
690 #define VERS_TIMECHANGE 599000700
696 return __NetBSD_Version__
< VERS_TIMECHANGE
697 && rump_pub_getversion() >= VERS_TIMECHANGE
;
700 #define DOCOMPAT(va, va_compat) \
702 if (needcompat()) { \
703 va_compat = rump_pub_vattr_init(); \
704 rump_pub_vattr50_to_vattr(va, va_compat); \
706 va_compat = __UNCONST(va); \
708 } while (/*CONSTCOND*/0)
710 #define UNDOCOMPAT(va_compat) \
713 rump_pub_vattr_free(va_compat); \
714 } while (/*CONSTCOND*/0)
717 do_makenode(struct puffs_usermount
*pu
, struct p2k_node
*p2n_dir
,
718 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
,
719 const struct vattr
*vap
, char *link_target
,
720 int (*makefn
)(struct vnode
*, struct vnode
**, struct componentname
*,
722 int (*symfn
)(struct vnode
*, struct vnode
**, struct componentname
*,
723 struct vattr
*, char *))
725 struct p2k_mount
*p2m
= ukfs_getspecific(puffs_getspecific(pu
));
726 struct vnode
*dvp
= p2n_dir
->p2n_vp
;
727 struct p2k_node
*p2n
;
728 struct componentname
*cn
;
733 p2n
= malloc(sizeof(*p2n
));
738 if (p2n_dir
->p2n_cn
) {
739 cn
= p2n_dir
->p2n_cn
;
740 p2n_dir
->p2n_cn
= NULL
;
742 cn
= makecn(pcn
, RUMP_NAMEI_HASBUF
);
745 RUMP_VOP_LOCK(dvp
, LK_EXCLUSIVE
);
746 rump_pub_vp_incref(dvp
);
748 rv
= makefn(dvp
, &vp
, cn
, va_x
);
750 rv
= symfn(dvp
, &vp
, cn
, va_x
, link_target
);
752 assert(RUMP_VOP_ISLOCKED(dvp
) == 0);
756 RUMP_VOP_UNLOCK(vp
, 0);
757 p2n
= getp2n(p2m
, vp
, true, p2n
);
758 puffs_newinfo_setcookie(pni
, p2n
);
771 p2k_node_create(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
772 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
,
773 const struct vattr
*vap
)
776 return do_makenode(pu
, opc
, pni
, pcn
, vap
, NULL
, RUMP_VOP_CREATE
, NULL
);
781 p2k_node_mknod(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
782 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
,
783 const struct vattr
*vap
)
786 return do_makenode(pu
, opc
, pni
, pcn
, vap
, NULL
, RUMP_VOP_MKNOD
, NULL
);
791 p2k_node_open(struct puffs_usermount
*pu
, puffs_cookie_t opc
, int mode
,
792 const struct puffs_cred
*pcr
)
794 struct vnode
*vp
= OPC2VP(opc
);
798 cred
= cred_create(pcr
);
799 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
800 rv
= RUMP_VOP_OPEN(vp
, mode
, cred
);
801 RUMP_VOP_UNLOCK(vp
, 0);
809 p2k_node_close(struct puffs_usermount
*pu
, puffs_cookie_t opc
, int flags
,
810 const struct puffs_cred
*pcr
)
812 struct vnode
*vp
= OPC2VP(opc
);
815 cred
= cred_create(pcr
);
816 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
817 RUMP_VOP_CLOSE(vp
, flags
, cred
);
818 RUMP_VOP_UNLOCK(vp
, 0);
826 p2k_node_access(struct puffs_usermount
*pu
, puffs_cookie_t opc
, int mode
,
827 const struct puffs_cred
*pcr
)
829 struct vnode
*vp
= OPC2VP(opc
);
833 cred
= cred_create(pcr
);
834 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
835 rv
= RUMP_VOP_ACCESS(vp
, mode
, cred
);
836 RUMP_VOP_UNLOCK(vp
, 0);
844 p2k_node_getattr(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
845 struct vattr
*vap
, const struct puffs_cred
*pcr
)
847 struct vnode
*vp
= OPC2VP(opc
);
857 va_x
= rump_pub_vattr_init();
862 cred
= cred_create(pcr
);
863 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
864 rv
= RUMP_VOP_GETATTR(vp
, va_x
, cred
);
865 RUMP_VOP_UNLOCK(vp
, 0);
869 rump_pub_vattr_to_vattr50(va_x
, vap
);
870 rump_pub_vattr_free(va_x
);
878 p2k_node_setattr(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
879 const struct vattr
*vap
, const struct puffs_cred
*pcr
)
881 struct vnode
*vp
= OPC2VP(opc
);
892 cred
= cred_create(pcr
);
893 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
894 rv
= RUMP_VOP_SETATTR(vp
, va_x
, cred
);
895 RUMP_VOP_UNLOCK(vp
, 0);
905 p2k_node_fsync(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
906 const struct puffs_cred
*pcr
, int flags
, off_t offlo
, off_t offhi
)
908 struct vnode
*vp
= OPC2VP(opc
);
916 cred
= cred_create(pcr
);
917 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
918 rv
= RUMP_VOP_FSYNC(vp
, cred
, flags
, offlo
, offhi
);
919 RUMP_VOP_UNLOCK(vp
, 0);
927 p2k_node_mmap(struct puffs_usermount
*pu
, puffs_cookie_t opc
, vm_prot_t flags
,
928 const struct puffs_cred
*pcr
)
933 cred
= cred_create(pcr
);
934 rv
= RUMP_VOP_MMAP(OPC2VP(opc
), flags
, cred
);
942 p2k_node_seek(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
943 off_t oldoff
, off_t newoff
, const struct puffs_cred
*pcr
)
945 struct vnode
*vp
= OPC2VP(opc
);
949 cred
= cred_create(pcr
);
950 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
951 rv
= RUMP_VOP_SEEK(vp
, oldoff
, newoff
, cred
);
952 RUMP_VOP_UNLOCK(vp
, 0);
960 p2k_node_abortop(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
961 const struct puffs_cn
*pcn
)
963 struct p2k_node
*p2n_dir
= opc
;
964 struct componentname
*cnp
;
966 if ((cnp
= p2n_dir
->p2n_cn
) != NULL
) {
968 p2n_dir
->p2n_cn
= NULL
;
970 if ((cnp
= p2n_dir
->p2n_cn_ren_src
) != NULL
) {
971 freecn(cnp
, RUMPCN_FORCEFREE
);
972 p2n_dir
->p2n_cn_ren_src
= NULL
;
974 if ((cnp
= p2n_dir
->p2n_cn_ren_targ
) != NULL
) {
975 freecn(cnp
, RUMPCN_FORCEFREE
);
976 p2n_dir
->p2n_cn_ren_targ
= NULL
;
983 do_nukenode(struct p2k_node
*p2n_dir
, struct p2k_node
*p2n
,
984 const struct puffs_cn
*pcn
,
985 int (*nukefn
)(struct vnode
*, struct vnode
*, struct componentname
*))
987 struct vnode
*dvp
= p2n_dir
->p2n_vp
, *vp
= p2n
->p2n_vp
;
988 struct componentname
*cn
;
991 if (p2n_dir
->p2n_cn
) {
992 cn
= p2n_dir
->p2n_cn
;
993 p2n_dir
->p2n_cn
= NULL
;
995 cn
= makecn(pcn
, RUMP_NAMEI_HASBUF
);
998 RUMP_VOP_LOCK(dvp
, LK_EXCLUSIVE
);
999 rump_pub_vp_incref(dvp
);
1000 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
1001 rump_pub_vp_incref(vp
);
1002 rv
= nukefn(dvp
, vp
, cn
);
1003 assert(RUMP_VOP_ISLOCKED(dvp
) == 0);
1004 assert(RUMP_VOP_ISLOCKED(vp
) == 0);
1013 p2k_node_remove(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1014 puffs_cookie_t targ
, const struct puffs_cn
*pcn
)
1017 return do_nukenode(opc
, targ
, pcn
, RUMP_VOP_REMOVE
);
1022 p2k_node_link(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1023 puffs_cookie_t targ
, const struct puffs_cn
*pcn
)
1025 struct vnode
*dvp
= OPC2VP(opc
);
1026 struct p2k_node
*p2n_dir
= opc
;
1027 struct componentname
*cn
;
1030 if (p2n_dir
->p2n_cn
) {
1031 cn
= p2n_dir
->p2n_cn
;
1032 p2n_dir
->p2n_cn
= NULL
;
1034 cn
= makecn(pcn
, RUMP_NAMEI_HASBUF
);
1037 RUMP_VOP_LOCK(dvp
, LK_EXCLUSIVE
);
1038 rump_pub_vp_incref(dvp
);
1039 rv
= RUMP_VOP_LINK(dvp
, OPC2VP(targ
), cn
);
1047 p2k_node_rename(struct puffs_usermount
*pu
,
1048 puffs_cookie_t src_dir
, puffs_cookie_t src
,
1049 const struct puffs_cn
*pcn_src
,
1050 puffs_cookie_t targ_dir
, puffs_cookie_t targ
,
1051 const struct puffs_cn
*pcn_targ
)
1053 struct p2k_node
*p2n_srcdir
= src_dir
, *p2n_targdir
= targ_dir
;
1054 struct vnode
*dvp
, *vp
, *tdvp
, *tvp
= NULL
;
1055 struct componentname
*cn_src
, *cn_targ
;
1058 if (p2n_srcdir
->p2n_cn_ren_src
) {
1059 cn_src
= p2n_srcdir
->p2n_cn_ren_src
;
1060 p2n_srcdir
->p2n_cn_ren_src
= NULL
;
1062 cn_src
= makecn(pcn_src
, RUMP_NAMEI_HASBUF
);
1065 if (p2n_targdir
->p2n_cn_ren_targ
) {
1066 cn_targ
= p2n_targdir
->p2n_cn_ren_targ
;
1067 p2n_targdir
->p2n_cn_ren_targ
= NULL
;
1069 cn_targ
= makecn(pcn_targ
, RUMP_NAMEI_HASBUF
);
1072 dvp
= OPC2VP(src_dir
);
1074 tdvp
= OPC2VP(targ_dir
);
1079 rump_pub_vp_incref(dvp
);
1080 rump_pub_vp_incref(vp
);
1081 RUMP_VOP_LOCK(tdvp
, LK_EXCLUSIVE
);
1082 rump_pub_vp_incref(tdvp
);
1084 RUMP_VOP_LOCK(tvp
, LK_EXCLUSIVE
);
1085 rump_pub_vp_incref(tvp
);
1087 rv
= RUMP_VOP_RENAME(dvp
, vp
, cn_src
, tdvp
, tvp
, cn_targ
);
1088 assert(RUMP_VOP_ISLOCKED(tdvp
) == 0);
1090 assert(RUMP_VOP_ISLOCKED(tvp
) == 0);
1092 freecn(cn_src
, RUMPCN_FORCEFREE
);
1093 freecn(cn_targ
, RUMPCN_FORCEFREE
);
1100 p2k_node_mkdir(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1101 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
,
1102 const struct vattr
*vap
)
1105 return do_makenode(pu
, opc
, pni
, pcn
, vap
, NULL
, RUMP_VOP_MKDIR
, NULL
);
1110 p2k_node_rmdir(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1111 puffs_cookie_t targ
, const struct puffs_cn
*pcn
)
1114 return do_nukenode(opc
, targ
, pcn
, RUMP_VOP_RMDIR
);
1119 p2k_node_symlink(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1120 struct puffs_newinfo
*pni
, const struct puffs_cn
*pcn
,
1121 const struct vattr
*vap
, const char *link_target
)
1124 return do_makenode(pu
, opc
, pni
, pcn
, vap
,
1125 __UNCONST(link_target
), NULL
, RUMP_VOP_SYMLINK
);
1130 p2k_node_readdir(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1131 struct dirent
*dent
, off_t
*readoff
, size_t *reslen
,
1132 const struct puffs_cred
*pcr
, int *eofflag
,
1133 off_t
*cookies
, size_t *ncookies
)
1135 struct vnode
*vp
= OPC2VP(opc
);
1142 cred
= cred_create(pcr
);
1143 uio
= rump_pub_uio_setup(dent
, *reslen
, *readoff
, RUMPUIO_READ
);
1144 RUMP_VOP_LOCK(vp
, LK_SHARED
);
1146 rv
= RUMP_VOP_READDIR(vp
, uio
, cred
, eofflag
,
1147 &vop_cookies
, &vop_ncookies
);
1148 memcpy(cookies
, vop_cookies
, vop_ncookies
* sizeof(*cookies
));
1149 *ncookies
= vop_ncookies
;
1152 rv
= RUMP_VOP_READDIR(vp
, uio
, cred
, eofflag
, NULL
, NULL
);
1154 RUMP_VOP_UNLOCK(vp
, 0);
1156 *reslen
= rump_pub_uio_getresid(uio
);
1157 *readoff
= rump_pub_uio_getoff(uio
);
1159 rump_pub_uio_free(uio
);
1167 p2k_node_readlink(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1168 const struct puffs_cred
*pcr
, char *linkname
, size_t *linklen
)
1170 struct vnode
*vp
= OPC2VP(opc
);
1175 cred
= cred_create(pcr
);
1176 uio
= rump_pub_uio_setup(linkname
, *linklen
, 0, RUMPUIO_READ
);
1177 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
1178 rv
= RUMP_VOP_READLINK(vp
, uio
, cred
);
1179 RUMP_VOP_UNLOCK(vp
, 0);
1180 *linklen
-= rump_pub_uio_free(uio
);
1188 p2k_node_read(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1189 uint8_t *buf
, off_t offset
, size_t *resid
,
1190 const struct puffs_cred
*pcr
, int ioflag
)
1192 struct vnode
*vp
= OPC2VP(opc
);
1197 cred
= cred_create(pcr
);
1198 uio
= rump_pub_uio_setup(buf
, *resid
, offset
, RUMPUIO_READ
);
1199 RUMP_VOP_LOCK(vp
, LK_SHARED
);
1200 rv
= RUMP_VOP_READ(vp
, uio
, ioflag
, cred
);
1201 RUMP_VOP_UNLOCK(vp
, 0);
1202 *resid
= rump_pub_uio_free(uio
);
1210 p2k_node_write(struct puffs_usermount
*pu
, puffs_cookie_t opc
,
1211 uint8_t *buf
, off_t offset
, size_t *resid
,
1212 const struct puffs_cred
*pcr
, int ioflag
)
1214 struct vnode
*vp
= OPC2VP(opc
);
1223 cred
= cred_create(pcr
);
1224 uio
= rump_pub_uio_setup(buf
, *resid
, offset
, RUMPUIO_WRITE
);
1225 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
1226 rv
= RUMP_VOP_WRITE(vp
, uio
, ioflag
, cred
);
1227 RUMP_VOP_UNLOCK(vp
, 0);
1228 *resid
= rump_pub_uio_free(uio
);
1234 /* the kernel releases its last reference here */
1236 p2k_node_inactive(struct puffs_usermount
*pu
, puffs_cookie_t opc
)
1238 struct ukfs
*fs
= puffs_getspecific(pu
);
1239 struct p2k_mount
*p2m
= ukfs_getspecific(fs
);
1240 struct p2k_node
*p2n
= opc
;
1241 struct vnode
*vp
= OPC2VP(opc
);
1242 bool recycle
= false;
1250 * Flush all cached vnode pages from the rump kernel -- they
1251 * are kept in puffs for all things that matter. However,
1252 * don't do this for tmpfs (vnodes are backed by an aobj), since that
1253 * would cause us to clear the backing storage leaving us without
1254 * a way to regain the data from "stable storage".
1256 if (!p2m
->p2m_imtmpfsman
) {
1257 rump_pub_vp_interlock(vp
);
1258 RUMP_VOP_PUTPAGES(vp
, 0, 0,
1259 PGO_ALLPAGES
|PGO_CLEANIT
|PGO_FREE
);
1263 * Ok, this is where we get nasty. We pretend the vnode is
1264 * inactive and already tell the file system that. However,
1265 * we are allowed to pretend it also grows a reference immediately
1266 * after per vget(), so this does not do harm. Cheap trick, but ...
1268 * If the file system thinks the inode is done for, we release
1269 * our reference and clear all knowledge of the vnode. If,
1270 * however, the inode is still active, we retain our reference
1271 * until reclaim, since puffs might be flushing out some data
1274 RUMP_VOP_LOCK(vp
, LK_EXCLUSIVE
);
1275 rv
= RUMP_VOP_INACTIVE(vp
, &recycle
);
1277 puffs_setback(puffs_cc_getcc(pu
), PUFFS_SETBACK_NOREF_N1
);
1278 rump_pub_vp_rele(p2n
->p2n_vp
);
1287 p2k_node_reclaim(struct puffs_usermount
*pu
, puffs_croissant_t opc
)
1289 struct p2k_node
*p2n
= opc
;
1292 rump_pub_vp_rele(p2n
->p2n_vp
);