4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/types.h>
29 #include <sys/param.h>
34 #include <sys/vnode.h>
35 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/sysmacros.h>
38 #include <sys/fs_subr.h>
39 #include <sys/contract.h>
40 #include <sys/contract_impl.h>
42 #include <sys/ctfs_impl.h>
44 #include <sys/policy.h>
47 * CTFS routines for the /system/contract/<type>/bundle vnode.
48 * CTFS routines for the /system/contract/<type>/pbundle vnode.
49 * CTFS routines for the /system/contract/<type>/<ctid>/events vnode.
55 * Called by the fop_open entry points to perform some common checks
56 * and set up the endpoint listener, if not already done.
59 ctfs_endpoint_open(ctfs_endpoint_t
*endpt
, ct_equeue_t
*q
, int flag
)
61 if ((flag
& ~FNONBLOCK
) != (FREAD
| FOFFMAX
))
64 mutex_enter(&endpt
->ctfs_endpt_lock
);
65 if ((endpt
->ctfs_endpt_flags
& CTFS_ENDPT_SETUP
) == 0) {
66 endpt
->ctfs_endpt_flags
|= CTFS_ENDPT_SETUP
;
68 endpt
->ctfs_endpt_flags
|= CTFS_ENDPT_NBLOCK
;
69 cte_add_listener(q
, &endpt
->ctfs_endpt_listener
);
71 mutex_exit(&endpt
->ctfs_endpt_lock
);
77 * ctfs_endpoint inactive
79 * Called by the fop_inactive entry points to perform common listener
83 ctfs_endpoint_inactive(ctfs_endpoint_t
*endpt
)
85 mutex_enter(&endpt
->ctfs_endpt_lock
);
86 if (endpt
->ctfs_endpt_flags
& CTFS_ENDPT_SETUP
) {
87 endpt
->ctfs_endpt_flags
= 0;
88 cte_remove_listener(&endpt
->ctfs_endpt_listener
);
90 mutex_exit(&endpt
->ctfs_endpt_lock
);
96 * Implements the common fop_ioctl handling for the event endpoints.
97 * rprivchk, if true, indicates that event receive requests should
98 * check the provided credentials. This distinction exists because
99 * contract endpoints perform their privilege checks at open-time, and
100 * process bundle queue listeners by definition may view all events
101 * their queues contain.
104 ctfs_endpoint_ioctl(ctfs_endpoint_t
*endpt
, int cmd
, intptr_t arg
, cred_t
*cr
,
105 zone_t
*zone
, int rprivchk
)
107 uint64_t id
, zuniqid
;
109 zuniqid
= zone
->zone_uniqid
;
113 cte_reset_listener(&endpt
->ctfs_endpt_listener
);
117 * We pass in NULL for the cred when reading from
118 * process bundle queues and contract queues because
119 * the privilege check was performed at open time.
121 return (cte_get_event(&endpt
->ctfs_endpt_listener
,
122 endpt
->ctfs_endpt_flags
& CTFS_ENDPT_NBLOCK
,
123 (void *)arg
, rprivchk
? cr
: NULL
, zuniqid
, 0));
125 return (cte_get_event(&endpt
->ctfs_endpt_listener
,
126 endpt
->ctfs_endpt_flags
& CTFS_ENDPT_NBLOCK
,
127 (void *)arg
, rprivchk
? cr
: NULL
, zuniqid
, 1));
129 if (copyin((void *)arg
, &id
, sizeof (uint64_t)))
131 return (cte_next_event(&endpt
->ctfs_endpt_listener
, id
));
133 return (cte_set_reliable(&endpt
->ctfs_endpt_listener
, cr
));
144 * Called by the fop_poll entry points.
147 ctfs_endpoint_poll(ctfs_endpoint_t
*endpt
, short events
, int anyyet
,
148 short *reventsp
, pollhead_t
**php
)
150 if ((events
& POLLIN
) && endpt
->ctfs_endpt_listener
.ctl_position
) {
155 *php
= &endpt
->ctfs_endpt_listener
.ctl_pollhead
;
164 * Creates and returns a new evnode.
167 ctfs_create_evnode(vnode_t
*pvp
)
170 ctfs_evnode_t
*evnode
;
171 ctfs_cdirnode_t
*cdirnode
= pvp
->v_data
;
173 vp
= gfs_file_create(sizeof (ctfs_evnode_t
), pvp
, &ctfs_ops_event
);
177 * We transitively have a hold on the contract through our
180 evnode
->ctfs_ev_contract
= cdirnode
->ctfs_cn_contract
;
186 * ctfs_ev_access - fop_access entry point
188 * You only get to access event files for contracts you or your
189 * effective user id owns, unless you have a privilege.
198 caller_context_t
*cct
)
200 ctfs_evnode_t
*evnode
= vp
->v_data
;
201 contract_t
*ct
= evnode
->ctfs_ev_contract
;
204 if (mode
& (VWRITE
| VEXEC
))
207 if (error
= secpolicy_contract_observer(cr
, ct
))
214 * ctfs_ev_open - fop_open entry point
216 * Performs the same privilege checks as ctfs_ev_access, and then calls
217 * ctfs_endpoint_open to perform the common endpoint initialization.
221 ctfs_ev_open(vnode_t
**vpp
, int flag
, cred_t
*cr
, caller_context_t
*cct
)
223 ctfs_evnode_t
*evnode
= (*vpp
)->v_data
;
224 contract_t
*ct
= evnode
->ctfs_ev_contract
;
227 if (error
= secpolicy_contract_observer(cr
, ct
))
231 * See comment in ctfs_bu_open.
233 return (ctfs_endpoint_open(&evnode
->ctfs_ev_listener
,
234 &evnode
->ctfs_ev_contract
->ct_events
, flag
));
238 * ctfs_ev_inactive - fop_inactive entry point
242 ctfs_ev_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
244 ctfs_evnode_t
*evnode
;
245 vnode_t
*pvp
= gfs_file_parent(vp
);
248 * We must destroy the endpoint before releasing the parent; otherwise
249 * we will try to destroy a contract with active listeners. To prevent
250 * this, we grab an extra hold on the parent.
253 if ((evnode
= gfs_file_inactive(vp
)) != NULL
) {
254 ctfs_endpoint_inactive(&evnode
->ctfs_ev_listener
);
255 kmem_free(evnode
, sizeof (ctfs_evnode_t
));
261 * ctfs_ev_getattr - fop_getattr entry point
270 caller_context_t
*ct
)
272 ctfs_evnode_t
*evnode
= vp
->v_data
;
278 vap
->va_ctime
= evnode
->ctfs_ev_contract
->ct_ctime
;
279 mutex_enter(&evnode
->ctfs_ev_contract
->ct_events
.ctq_lock
);
280 vap
->va_atime
= vap
->va_mtime
=
281 evnode
->ctfs_ev_contract
->ct_events
.ctq_atime
;
282 mutex_exit(&evnode
->ctfs_ev_contract
->ct_events
.ctq_lock
);
283 ctfs_common_getattr(vp
, vap
);
289 * ctfs_ev_ioctl - fop_ioctl entry point
300 caller_context_t
*ct
)
302 ctfs_evnode_t
*evnode
= vp
->v_data
;
304 return (ctfs_endpoint_ioctl(&evnode
->ctfs_ev_listener
, cmd
, arg
, cr
,
309 * ctfs_ev_poll - fop_poll entry point
319 caller_context_t
*ct
)
321 ctfs_evnode_t
*evnode
= vp
->v_data
;
323 return (ctfs_endpoint_poll(&evnode
->ctfs_ev_listener
, events
, anyyet
,
327 const struct vnodeops ctfs_ops_event
= {
328 .vnop_name
= "ctfs events file",
329 .vop_open
= ctfs_ev_open
,
330 .vop_close
= ctfs_close
,
331 .vop_ioctl
= ctfs_ev_ioctl
,
332 .vop_getattr
= ctfs_ev_getattr
,
333 .vop_access
= ctfs_ev_access
,
334 .vop_readdir
= fs_notdir
,
335 .vop_lookup
= fs_notdir
,
336 .vop_inactive
= ctfs_ev_inactive
,
337 .vop_poll
= ctfs_ev_poll
,
341 * ctfs_create_pbundle
343 * Creates and returns a bunode for a /system/contract/<type>/pbundle
347 ctfs_create_pbundle(vnode_t
*pvp
)
350 ctfs_bunode_t
*bundle
;
352 vp
= gfs_file_create(sizeof (ctfs_bunode_t
), pvp
, &ctfs_ops_bundle
);
354 bundle
->ctfs_bu_queue
=
355 contract_type_pbundle(ct_types
[gfs_file_index(pvp
)], curproc
);
363 * Creates and returns a bunode for a /system/contract/<type>/bundle
367 ctfs_create_bundle(vnode_t
*pvp
)
370 ctfs_bunode_t
*bundle
;
372 vp
= gfs_file_create(sizeof (ctfs_bunode_t
), pvp
, &ctfs_ops_bundle
);
374 bundle
->ctfs_bu_queue
=
375 contract_type_bundle(ct_types
[gfs_file_index(pvp
)]);
381 * ctfs_bu_open - fop_open entry point
385 ctfs_bu_open(vnode_t
**vpp
, int flag
, cred_t
*cr
, caller_context_t
*ct
)
387 ctfs_bunode_t
*bunode
= (*vpp
)->v_data
;
390 * This assumes we are only ever called immediately after a
391 * fop_lookup. We could clone ourselves here, but doing so
392 * would make /proc/pid/fd accesses less useful.
394 return (ctfs_endpoint_open(&bunode
->ctfs_bu_listener
,
395 bunode
->ctfs_bu_queue
, flag
));
399 * ctfs_bu_inactive - fop_inactive entry point
403 ctfs_bu_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
405 ctfs_bunode_t
*bunode
;
406 vnode_t
*pvp
= gfs_file_parent(vp
);
409 * See comments in ctfs_ev_inactive() above.
412 if ((bunode
= gfs_file_inactive(vp
)) != NULL
) {
413 ctfs_endpoint_inactive(&bunode
->ctfs_bu_listener
);
414 kmem_free(bunode
, sizeof (ctfs_bunode_t
));
420 * ctfs_bu_getattr - fop_getattr entry point
429 caller_context_t
*ct
)
431 ctfs_bunode_t
*bunode
= vp
->v_data
;
435 vap
->va_nodeid
= gfs_file_index(vp
);
438 vap
->va_ctime
.tv_sec
= vp
->v_vfsp
->vfs_mtime
;
439 vap
->va_ctime
.tv_nsec
= 0;
440 mutex_enter(&bunode
->ctfs_bu_queue
->ctq_lock
);
441 vap
->va_mtime
= vap
->va_atime
= bunode
->ctfs_bu_queue
->ctq_atime
;
442 mutex_exit(&bunode
->ctfs_bu_queue
->ctq_lock
);
443 ctfs_common_getattr(vp
, vap
);
449 * ctfs_bu_ioctl - fop_ioctl entry point
460 caller_context_t
*ct
)
462 ctfs_bunode_t
*bunode
= vp
->v_data
;
464 return (ctfs_endpoint_ioctl(&bunode
->ctfs_bu_listener
, cmd
, arg
, cr
,
465 VTOZONE(vp
), bunode
->ctfs_bu_queue
->ctq_listno
== CTEL_BUNDLE
));
469 * ctfs_bu_poll - fop_poll entry point
479 caller_context_t
*ct
)
481 ctfs_bunode_t
*bunode
= vp
->v_data
;
483 return (ctfs_endpoint_poll(&bunode
->ctfs_bu_listener
, events
, anyyet
,
487 const struct vnodeops ctfs_ops_bundle
= {
488 .vnop_name
= "ctfs bundle file",
489 .vop_open
= ctfs_bu_open
,
490 .vop_close
= ctfs_close
,
491 .vop_ioctl
= ctfs_bu_ioctl
,
492 .vop_getattr
= ctfs_bu_getattr
,
493 .vop_access
= ctfs_access_readonly
,
494 .vop_readdir
= fs_notdir
,
495 .vop_lookup
= fs_notdir
,
496 .vop_inactive
= ctfs_bu_inactive
,
497 .vop_poll
= ctfs_bu_poll
,