8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / cmd / svc / configd / rc_node.c
blobec885d44f47888df87dbcf5ecb9886aa9b73465b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * rc_node.c - In-memory SCF object management
31 * This layer manages the in-memory cache (the Repository Cache) of SCF
32 * data. Read requests are usually satisfied from here, but may require
33 * load calls to the "object" layer. Modify requests always write-through
34 * to the object layer.
36 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
37 * property groups, properties, and property values. All but the last are
38 * known here as "entities" and are represented by rc_node_t data
39 * structures. (Property values are kept in the rn_values member of the
40 * respective property, not as separate objects.) All entities besides
41 * the "localhost" scope have some entity as a parent, and therefore form
42 * a tree.
44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
45 * the "localhost" scope. The tree is filled in from the database on-demand
46 * by rc_node_fill_children().
48 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
49 * lookup.
51 * Multiple threads may service client requests, so access to each
52 * rc_node_t is synchronized by its rn_lock member. Some fields are
53 * protected by bits in the rn_flags field instead, to support operations
54 * which need to drop rn_lock, for example to respect locking order. Such
55 * flags should be manipulated with the rc_node_{hold,rele}_flag()
56 * functions.
58 * We track references to nodes to tell when they can be free()d. rn_refs
59 * should be incremented with rc_node_hold() on the creation of client
60 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
61 * references") should be incremented when a pointer is read into a local
62 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
63 * hasn't been fully implemented, however, so rc_node_rele() tolerates
64 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
65 * references in rn_refs. Other references are tracked by the
66 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
67 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
69 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
70 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
71 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
72 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
73 * RC_NODE_DEAD before you can use it. This is usually done with the
74 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
75 * functions & RC_NODE_*() macros), which fail if the object has died.
77 * When a transactional node (property group or snapshot) is updated,
78 * a new node takes the place of the old node in the global hash and the
79 * old node is hung off of the rn_former list of the new node. At the
80 * same time, all of its children have their rn_parent_ref pointer set,
81 * and any holds they have are reflected in the old node's rn_other_refs
82 * count. This is automatically kept up to date until the final reference
83 * to the subgraph is dropped, at which point the node is unrefed and
84 * destroyed, along with all of its children.
86 * Because name service lookups may take a long time and, more importantly
87 * may trigger additional accesses to the repository, perm_granted() must be
88 * called without holding any locks.
90 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
91 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
92 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
93 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
94 * apropriate child.
96 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
97 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
98 * the proper values and updates the offset information.
100 * To allow aliases, snapshots are implemented with a level of indirection.
101 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
102 * snapshot.c which contains the authoritative snaplevel information. The
103 * snapid is "assigned" by rc_attach_snapshot().
105 * We provide the client layer with rc_node_ptr_t's to reference objects.
106 * Objects referred to by them are automatically held & released by
107 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
108 * client.c entry points to read the pointers. They fetch the pointer to the
109 * object, return (from the function) if it is dead, and lock, hold, or hold
110 * a flag of the object.
114 * Permission checking is authorization-based: some operations may only
115 * proceed if the user has been assigned at least one of a set of
116 * authorization strings. The set of enabling authorizations depends on the
117 * operation and the target object. The set of authorizations assigned to
118 * a user is determined by an algorithm defined in libsecdb.
120 * The fastest way to decide whether the two sets intersect is by entering the
121 * strings into a hash table and detecting collisions, which takes linear time
122 * in the total size of the sets. Except for the authorization patterns which
123 * may be assigned to users, which without advanced pattern-matching
124 * algorithms will take O(n) in the number of enabling authorizations, per
125 * pattern.
127 * We can achieve some practical speed-ups by noting that if we enter all of
128 * the authorizations from one of the sets into the hash table we can merely
129 * check the elements of the second set for existence without adding them.
130 * This reduces memory requirements and hash table clutter. The enabling set
131 * is well suited for this because it is internal to configd (for now, at
132 * least). Combine this with short-circuiting and we can even minimize the
133 * number of queries to the security databases (user_attr & prof_attr).
135 * To force this usage onto clients we provide functions for adding
136 * authorizations to the enabling set of a permission context structure
137 * (perm_add_*()) and one to decide whether the the user associated with the
138 * current door call client possesses any of them (perm_granted()).
140 * At some point, a generic version of this should move to libsecdb.
142 * While entering the enabling strings into the hash table, we keep track
143 * of which is the most specific for use in generating auditing events.
144 * See the "Collecting the Authorization String" section of the "SMF Audit
145 * Events" block comment below.
149 * Composition is the combination of sets of properties. The sets are ordered
150 * and properties in higher sets obscure properties of the same name in lower
151 * sets. Here we present a composed view of an instance's properties as the
152 * union of its properties and its service's properties. Similarly the
153 * properties of snaplevels are combined to form a composed view of the
154 * properties of a snapshot (which should match the composed view of the
155 * properties of the instance when the snapshot was taken).
157 * In terms of the client interface, the client may request that a property
158 * group iterator for an instance or snapshot be composed. Property groups
159 * traversed by such an iterator may not have the target entity as a parent.
160 * Similarly, the properties traversed by a property iterator for those
161 * property groups may not have the property groups iterated as parents.
163 * Implementation requires that iterators for instances and snapshots be
164 * composition-savvy, and that we have a "composed property group" entity
165 * which represents the composition of a number of property groups. Iteration
166 * over "composed property groups" yields properties which may have different
167 * parents, but for all other operations a composed property group behaves
168 * like the top-most property group it represents.
170 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
171 * in rc_node_t. For instances, the pointers point to the instance and its
172 * parent service. For snapshots they point to the child snaplevels, and for
173 * composed property groups they point to property groups. A composed
174 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
175 * int the rc_iter_*() code.
178 * SMF Audit Events:
179 * ================
181 * To maintain security, SMF generates audit events whenever
182 * privileged operations are attempted. See the System Administration
183 * Guide:Security Services answerbook for a discussion of the Solaris
184 * audit system.
186 * The SMF audit event codes are defined in adt_event.h by symbols
187 * starting with ADT_smf_ and are described in audit_event.txt. The
188 * audit record structures are defined in the SMF section of adt.xml.
189 * adt.xml is used to automatically generate adt_event.h which
190 * contains the definitions that we code to in this file. For the
191 * most part the audit events map closely to actions that you would
192 * perform with svcadm or svccfg, but there are some special cases
193 * which we'll discuss later.
195 * The software associated with SMF audit events falls into three
196 * categories:
197 * - collecting information to be written to the audit
198 * records
199 * - using the adt_* functions in
200 * usr/src/lib/libbsm/common/adt.c to generate the audit
201 * records.
202 * - handling special cases
204 * Collecting Information:
205 * ----------------------
207 * Most all of the audit events require the FMRI of the affected
208 * object and the authorization string that was used. The one
209 * exception is ADT_smf_annotation which we'll talk about later.
211 * Collecting the FMRI:
213 * The rc_node structure has a member called rn_fmri which points to
214 * its FMRI. This is initialized by a call to rc_node_build_fmri()
215 * when the node's parent is established. The reason for doing it
216 * at this time is that a node's FMRI is basically the concatenation
217 * of the parent's FMRI and the node's name with the appropriate
218 * decoration. rc_node_build_fmri() does this concatenation and
219 * decorating. It is called from rc_node_link_child() and
220 * rc_node_relink_child() where a node is linked to its parent.
222 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
223 * when it is needed. It returns rn_fmri if it is set. If the node
224 * is at the top level, however, rn_fmri won't be set because it was
225 * never linked to a parent. In this case,
226 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
227 * its node type and its name, rn_name.
229 * Collecting the Authorization String:
231 * Naturally, the authorization string is captured during the
232 * authorization checking process. Acceptable authorization strings
233 * are added to a permcheck_t hash table as noted in the section on
234 * permission checking above. Once all entries have been added to the
235 * hash table, perm_granted() is called. If the client is authorized,
236 * perm_granted() returns with pc_auth_string of the permcheck_t
237 * structure pointing to the authorization string.
239 * This works fine if the client is authorized, but what happens if
240 * the client is not authorized? We need to report the required
241 * authorization string. This is the authorization that would have
242 * been used if permission had been granted. perm_granted() will
243 * find no match, so it needs to decide which string in the hash
244 * table to use as the required authorization string. It needs to do
245 * this, because configd is still going to generate an event. A
246 * design decision was made to use the most specific authorization
247 * in the hash table. The pc_auth_type enum designates the
248 * specificity of an authorization string. For example, an
249 * authorization string that is declared in an instance PG is more
250 * specific than one that is declared in a service PG.
252 * The pc_add() function keeps track of the most specific
253 * authorization in the hash table. It does this using the
254 * pc_specific and pc_specific_type members of the permcheck
255 * structure. pc_add() updates these members whenever a more
256 * specific authorization string is added to the hash table. Thus, if
257 * an authorization match is not found, perm_granted() will return
258 * with pc_auth_string in the permcheck_t pointing to the string that
259 * is referenced by pc_specific.
261 * Generating the Audit Events:
262 * ===========================
264 * As the functions in this file process requests for clients of
265 * configd, they gather the information that is required for an audit
266 * event. Eventually, the request processing gets to the point where
267 * the authorization is rejected or to the point where the requested
268 * action was attempted. At these two points smf_audit_event() is
269 * called.
271 * smf_audit_event() takes 4 parameters:
272 * - the event ID which is one of the ADT_smf_* symbols from
273 * adt_event.h.
274 * - status to pass to adt_put_event()
275 * - return value to pass to adt_put_event()
276 * - the event data (see audit_event_data structure)
278 * All interactions with the auditing software require an audit
279 * session. We use one audit session per configd client. We keep
280 * track of the audit session in the repcache_client structure.
281 * smf_audit_event() calls get_audit_session() to get the session
282 * pointer.
284 * smf_audit_event() then calls adt_alloc_event() to allocate an
285 * adt_event_data union which is defined in adt_event.h, copies the
286 * data into the appropriate members of the union and calls
287 * adt_put_event() to generate the event.
289 * Special Cases:
290 * =============
292 * There are three major types of special cases:
294 * - gathering event information for each action in a
295 * transaction
296 * - Higher level events represented by special property
297 * group/property name combinations. Many of these are
298 * restarter actions.
299 * - ADT_smf_annotation event
301 * Processing Transaction Actions:
302 * ------------------------------
304 * A transaction can contain multiple actions to modify, create or
305 * delete one or more properties. We need to capture information so
306 * that we can generate an event for each property action. The
307 * transaction information is stored in a tx_commmit_data_t, and
308 * object.c provides accessor functions to retrieve data from this
309 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
310 * tx_commit_data_new() and passes this to object_tx_commit() to
311 * commit the transaction. Then we call generate_property_events() to
312 * generate an audit event for each property action.
314 * Special Properties:
315 * ------------------
317 * There are combinations of property group/property name that are special.
318 * They are special because they have specific meaning to startd. startd
319 * interprets them in a service-independent fashion.
320 * restarter_actions/refresh and general/enabled are two examples of these.
321 * A special event is generated for these properties in addition to the
322 * regular property event described in the previous section. The special
323 * properties are declared as an array of audit_special_prop_item
324 * structures at special_props_list in rc_node.c.
326 * In the previous section, we mentioned the
327 * generate_property_event() function that generates an event for
328 * every property action. Before generating the event,
329 * generate_property_event() calls special_property_event().
330 * special_property_event() checks to see if the action involves a
331 * special property. If it does, it generates a special audit
332 * event.
334 * ADT_smf_annotation event:
335 * ------------------------
337 * This is a special event unlike any other. It allows the svccfg
338 * program to store an annotation in the event log before a series
339 * of transactions is processed. It is used with the import and
340 * apply svccfg commands. svccfg uses the rep_protocol_annotation
341 * message to pass the operation (import or apply) and the file name
342 * to configd. The set_annotation() function in client.c stores
343 * these away in the a repcache_client structure. The address of
344 * this structure is saved in the thread_info structure.
346 * Before it generates any events, smf_audit_event() calls
347 * smf_annotation_event(). smf_annotation_event() calls
348 * client_annotation_needed() which is defined in client.c. If an
349 * annotation is needed client_annotation_needed() returns the
350 * operation and filename strings that were saved from the
351 * rep_protocol_annotation message. smf_annotation_event() then
352 * generates the ADT_smf_annotation event.
355 #include <assert.h>
356 #include <atomic.h>
357 #include <bsm/adt_event.h>
358 #include <errno.h>
359 #include <libuutil.h>
360 #include <libscf.h>
361 #include <libscf_priv.h>
362 #include <pthread.h>
363 #include <pwd.h>
364 #include <stdio.h>
365 #include <stdlib.h>
366 #include <strings.h>
367 #include <sys/types.h>
368 #include <syslog.h>
369 #include <unistd.h>
370 #include <secdb.h>
372 #include "configd.h"
374 #define AUTH_PREFIX "solaris.smf."
375 #define AUTH_MANAGE AUTH_PREFIX "manage"
376 #define AUTH_MODIFY AUTH_PREFIX "modify"
377 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
378 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
379 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
380 #define AUTH_PG_GENERAL SCF_PG_GENERAL
381 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
382 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
383 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
384 #define AUTH_PROP_ACTION "action_authorization"
385 #define AUTH_PROP_ENABLED "enabled"
386 #define AUTH_PROP_MODIFY "modify_authorization"
387 #define AUTH_PROP_VALUE "value_authorization"
388 #define AUTH_PROP_READ "read_authorization"
390 #define MAX_VALID_CHILDREN 3
393 * The ADT_smf_* symbols may not be defined on the build machine. Because
394 * of this, we do not want to compile the _smf_aud_event() function when
395 * doing native builds.
397 #ifdef NATIVE_BUILD
398 #define smf_audit_event(i, s, r, d)
399 #else
400 #define smf_audit_event(i, s, r, d) _smf_audit_event(i, s, r, d)
401 #endif /* NATIVE_BUILD */
403 typedef struct rc_type_info {
404 uint32_t rt_type; /* matches array index */
405 uint32_t rt_num_ids;
406 uint32_t rt_name_flags;
407 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
408 } rc_type_info_t;
410 #define RT_NO_NAME -1U
412 static rc_type_info_t rc_types[] = {
413 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
414 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
415 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
416 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
417 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
418 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
419 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
420 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
421 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
422 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
423 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
424 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
425 {REP_PROTOCOL_ENTITY_PROPERTY}},
426 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
427 {REP_PROTOCOL_ENTITY_PROPERTY}},
428 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
429 {-1UL}
431 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
433 /* Element of a permcheck_t hash table. */
434 struct pc_elt {
435 struct pc_elt *pce_next;
436 char pce_auth[1];
440 * If an authorization fails, we must decide which of the elements in the
441 * permcheck hash table to use in the audit event. That is to say of all
442 * the strings in the hash table, we must choose one and use it in the audit
443 * event. It is desirable to use the most specific string in the audit
444 * event.
446 * The pc_auth_type specifies the types (sources) of authorization
447 * strings. The enum is ordered in increasing specificity.
449 typedef enum pc_auth_type {
450 PC_AUTH_NONE = 0, /* no auth string available. */
451 PC_AUTH_SMF, /* strings coded into SMF. */
452 PC_AUTH_SVC, /* strings specified in PG of a service. */
453 PC_AUTH_INST /* strings specified in PG of an instance. */
454 } pc_auth_type_t;
457 * The following enum is used to represent the results of the checks to see
458 * if the client has the appropriate permissions to perform an action.
460 typedef enum perm_status {
461 PERM_DENIED = 0, /* Permission denied. */
462 PERM_GRANTED, /* Client has authorizations. */
463 PERM_GONE, /* Door client went away. */
464 PERM_FAIL /* Generic failure. e.g. resources */
465 } perm_status_t;
467 /* An authorization set hash table. */
468 typedef struct {
469 struct pc_elt **pc_buckets;
470 uint_t pc_bnum; /* number of buckets */
471 uint_t pc_enum; /* number of elements */
472 struct pc_elt *pc_specific; /* most specific element */
473 pc_auth_type_t pc_specific_type; /* type of pc_specific */
474 char *pc_auth_string; /* authorization string */
475 /* for audit events */
476 } permcheck_t;
479 * Structure for holding audit event data. Not all events use all members
480 * of the structure.
482 typedef struct audit_event_data {
483 char *ed_auth; /* authorization string. */
484 char *ed_fmri; /* affected FMRI. */
485 char *ed_snapname; /* name of snapshot. */
486 char *ed_old_fmri; /* old fmri in attach case. */
487 char *ed_old_name; /* old snapshot in attach case. */
488 char *ed_type; /* prop. group or prop. type. */
489 char *ed_prop_value; /* property value. */
490 } audit_event_data_t;
493 * Pointer to function to do special processing to get audit event ID.
494 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
495 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
497 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
498 au_event_t *);
499 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
500 au_event_t *);
502 static uu_list_pool_t *rc_children_pool;
503 static uu_list_pool_t *rc_pg_notify_pool;
504 static uu_list_pool_t *rc_notify_pool;
505 static uu_list_pool_t *rc_notify_info_pool;
507 static rc_node_t *rc_scope;
509 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
510 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
511 static uint_t rc_notify_in_use; /* blocks removals */
514 * Some combinations of property group/property name require a special
515 * audit event to be generated when there is a change.
516 * audit_special_prop_item_t is used to specify these special cases. The
517 * special_props_list array defines a list of these special properties.
519 typedef struct audit_special_prop_item {
520 const char *api_pg_name; /* property group name. */
521 const char *api_prop_name; /* property name. */
522 au_event_t api_event_id; /* event id or 0. */
523 spc_getid_fn_t api_event_func; /* function to get event id. */
524 } audit_special_prop_item_t;
527 * Native builds are done using the build machine's standard include
528 * files. These files may not yet have the definitions for the ADT_smf_*
529 * symbols. Thus, we do not compile this table when doing native builds.
531 #ifndef NATIVE_BUILD
533 * The following special_props_list array specifies property group/property
534 * name combinations that have specific meaning to startd. A special event
535 * is generated for these combinations in addition to the regular property
536 * event.
538 * At run time this array gets sorted. See the call to qsort(3C) in
539 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
540 * to do lookups.
542 static audit_special_prop_item_t special_props_list[] = {
543 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
544 NULL},
545 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
546 ADT_smf_immediate_degrade, NULL},
547 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
548 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
549 ADT_smf_maintenance, NULL},
550 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
551 ADT_smf_immediate_maintenance, NULL},
552 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
553 ADT_smf_immtmp_maintenance, NULL},
554 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
555 ADT_smf_tmp_maintenance, NULL},
556 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
557 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
558 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
559 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
560 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
561 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
562 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
564 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
565 sizeof (audit_special_prop_item_t))
566 #endif /* NATIVE_BUILD */
569 * We support an arbitrary number of clients interested in events for certain
570 * types of changes. Each client is represented by an rc_notify_info_t, and
571 * all clients are chained onto the rc_notify_info_list.
573 * The rc_notify_list is the global notification list. Each entry is of
574 * type rc_notify_t, which is embedded in one of three other structures:
576 * rc_node_t property group update notification
577 * rc_notify_delete_t object deletion notification
578 * rc_notify_info_t notification clients
580 * Which type of object is determined by which pointer in the rc_notify_t is
581 * non-NULL.
583 * New notifications and clients are added to the end of the list.
584 * Notifications no-one is interested in are never added to the list.
586 * Clients use their position in the list to track which notifications they
587 * have not yet reported. As they process notifications, they move forward
588 * in the list past them. There is always a client at the beginning of the
589 * list -- as it moves past notifications, it removes them from the list and
590 * cleans them up.
592 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
593 * is used for global signalling, and each client has a cv which it waits for
594 * events of interest on.
596 * rc_notify_in_use is used to protect rc_notify_list from deletions when
597 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
598 * must drop the lock to call rc_node_assign(), and then it reacquires the
599 * lock. Deletions from rc_notify_list during this period are not
600 * allowed. Insertions do not matter, because they are always done at the
601 * end of the list.
603 static uu_list_t *rc_notify_info_list;
604 static uu_list_t *rc_notify_list;
606 #define HASH_SIZE 512
607 #define HASH_MASK (HASH_SIZE - 1)
609 #pragma align 64(cache_hash)
610 static cache_bucket_t cache_hash[HASH_SIZE];
612 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
615 static void rc_node_no_client_refs(rc_node_t *np);
618 static uint32_t
619 rc_node_hash(rc_node_lookup_t *lp)
621 uint32_t type = lp->rl_type;
622 uint32_t backend = lp->rl_backend;
623 uint32_t mainid = lp->rl_main_id;
624 uint32_t *ids = lp->rl_ids;
626 rc_type_info_t *tp = &rc_types[type];
627 uint32_t num_ids;
628 uint32_t left;
629 uint32_t hash;
631 assert(backend == BACKEND_TYPE_NORMAL ||
632 backend == BACKEND_TYPE_NONPERSIST);
634 assert(type > 0 && type < NUM_TYPES);
635 num_ids = tp->rt_num_ids;
637 left = MAX_IDS - num_ids;
638 assert(num_ids <= MAX_IDS);
640 hash = type * 7 + mainid * 5 + backend;
642 while (num_ids-- > 0)
643 hash = hash * 11 + *ids++ * 7;
646 * the rest should be zeroed
648 while (left-- > 0)
649 assert(*ids++ == 0);
651 return (hash);
654 static int
655 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
657 rc_node_lookup_t *r = &np->rn_id;
658 rc_type_info_t *tp;
659 uint32_t type;
660 uint32_t num_ids;
662 if (r->rl_main_id != l->rl_main_id)
663 return (0);
665 type = r->rl_type;
666 if (type != l->rl_type)
667 return (0);
669 assert(type > 0 && type < NUM_TYPES);
671 tp = &rc_types[r->rl_type];
672 num_ids = tp->rt_num_ids;
674 assert(num_ids <= MAX_IDS);
675 while (num_ids-- > 0)
676 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
677 return (0);
679 return (1);
683 * Register an ephemeral reference to np. This should be done while both
684 * the persistent reference from which the np pointer was read is locked
685 * and np itself is locked. This guarantees that another thread which
686 * thinks it has the last reference will yield without destroying the
687 * node.
689 static void
690 rc_node_hold_ephemeral_locked(rc_node_t *np)
692 assert(MUTEX_HELD(&np->rn_lock));
694 ++np->rn_erefs;
698 * the "other" references on a node are maintained in an atomically
699 * updated refcount, rn_other_refs. This can be bumped from arbitrary
700 * context, and tracks references to a possibly out-of-date node's children.
702 * To prevent the node from disappearing between the final drop of
703 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
704 * 0->1 transitions and decremented (with the node lock held) on 1->0
705 * transitions.
707 static void
708 rc_node_hold_other(rc_node_t *np)
710 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
711 atomic_add_32(&np->rn_other_refs_held, 1);
712 assert(np->rn_other_refs_held > 0);
714 assert(np->rn_other_refs > 0);
718 * No node locks may be held
720 static void
721 rc_node_rele_other(rc_node_t *np)
723 assert(np->rn_other_refs > 0);
724 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
725 (void) pthread_mutex_lock(&np->rn_lock);
726 assert(np->rn_other_refs_held > 0);
727 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
728 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
730 * This was the last client reference. Destroy
731 * any other references and free() the node.
733 rc_node_no_client_refs(np);
734 } else {
735 (void) pthread_mutex_unlock(&np->rn_lock);
740 static void
741 rc_node_hold_locked(rc_node_t *np)
743 assert(MUTEX_HELD(&np->rn_lock));
745 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
746 rc_node_hold_other(np->rn_parent_ref);
747 np->rn_refs++;
748 assert(np->rn_refs > 0);
751 static void
752 rc_node_hold(rc_node_t *np)
754 (void) pthread_mutex_lock(&np->rn_lock);
755 rc_node_hold_locked(np);
756 (void) pthread_mutex_unlock(&np->rn_lock);
759 static void
760 rc_node_rele_locked(rc_node_t *np)
762 int unref = 0;
763 rc_node_t *par_ref = NULL;
765 assert(MUTEX_HELD(&np->rn_lock));
766 assert(np->rn_refs > 0);
768 if (--np->rn_refs == 0) {
769 if (np->rn_flags & RC_NODE_PARENT_REF)
770 par_ref = np->rn_parent_ref;
773 * Composed property groups are only as good as their
774 * references.
776 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
777 np->rn_flags |= RC_NODE_DEAD;
779 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
780 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
781 unref = 1;
784 if (unref) {
786 * This was the last client reference. Destroy any other
787 * references and free() the node.
789 rc_node_no_client_refs(np);
790 } else {
792 * rn_erefs can be 0 if we acquired the reference in
793 * a path which hasn't been updated to increment rn_erefs.
794 * When all paths which end here are updated, we should
795 * assert rn_erefs > 0 and always decrement it.
797 if (np->rn_erefs > 0)
798 --np->rn_erefs;
799 (void) pthread_mutex_unlock(&np->rn_lock);
802 if (par_ref != NULL)
803 rc_node_rele_other(par_ref);
806 void
807 rc_node_rele(rc_node_t *np)
809 (void) pthread_mutex_lock(&np->rn_lock);
810 rc_node_rele_locked(np);
813 static cache_bucket_t *
814 cache_hold(uint32_t h)
816 cache_bucket_t *bp = CACHE_BUCKET(h);
817 (void) pthread_mutex_lock(&bp->cb_lock);
818 return (bp);
821 static void
822 cache_release(cache_bucket_t *bp)
824 (void) pthread_mutex_unlock(&bp->cb_lock);
827 static rc_node_t *
828 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
830 uint32_t h = rc_node_hash(lp);
831 rc_node_t *np;
833 assert(MUTEX_HELD(&bp->cb_lock));
834 assert(bp == CACHE_BUCKET(h));
836 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
837 if (np->rn_hash == h && rc_node_match(np, lp)) {
838 rc_node_hold(np);
839 return (np);
843 return (NULL);
846 static rc_node_t *
847 cache_lookup(rc_node_lookup_t *lp)
849 uint32_t h;
850 cache_bucket_t *bp;
851 rc_node_t *np;
853 h = rc_node_hash(lp);
854 bp = cache_hold(h);
856 np = cache_lookup_unlocked(bp, lp);
858 cache_release(bp);
860 return (np);
863 static void
864 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
866 assert(MUTEX_HELD(&bp->cb_lock));
867 assert(np->rn_hash == rc_node_hash(&np->rn_id));
868 assert(bp == CACHE_BUCKET(np->rn_hash));
870 assert(np->rn_hash_next == NULL);
872 np->rn_hash_next = bp->cb_head;
873 bp->cb_head = np;
876 static void
877 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
879 rc_node_t **npp;
881 assert(MUTEX_HELD(&bp->cb_lock));
882 assert(np->rn_hash == rc_node_hash(&np->rn_id));
883 assert(bp == CACHE_BUCKET(np->rn_hash));
885 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
886 if (*npp == np)
887 break;
889 assert(*npp == np);
890 *npp = np->rn_hash_next;
891 np->rn_hash_next = NULL;
895 * verify that the 'parent' type can have a child typed 'child'
896 * Fails with
897 * _INVALID_TYPE - argument is invalid
898 * _TYPE_MISMATCH - parent type cannot have children of type child
900 static int
901 rc_check_parent_child(uint32_t parent, uint32_t child)
903 int idx;
904 uint32_t type;
906 if (parent == 0 || parent >= NUM_TYPES ||
907 child == 0 || child >= NUM_TYPES)
908 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
910 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
911 type = rc_types[parent].rt_valid_children[idx];
912 if (type == child)
913 return (REP_PROTOCOL_SUCCESS);
916 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
920 * Fails with
921 * _INVALID_TYPE - type is invalid
922 * _BAD_REQUEST - name is an invalid name for a node of type type
925 rc_check_type_name(uint32_t type, const char *name)
927 if (type == 0 || type >= NUM_TYPES)
928 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
930 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
931 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
933 return (REP_PROTOCOL_SUCCESS);
936 static int
937 rc_check_pgtype_name(const char *name)
939 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
940 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
942 return (REP_PROTOCOL_SUCCESS);
946 * rc_node_free_fmri should be called whenever a node loses its parent.
947 * The reason is that the node's fmri string is built up by concatenating
948 * its name to the parent's fmri. Thus, when the node no longer has a
949 * parent, its fmri is no longer valid.
951 static void
952 rc_node_free_fmri(rc_node_t *np)
954 if (np->rn_fmri != NULL) {
955 free((void *)np->rn_fmri);
956 np->rn_fmri = NULL;
961 * Concatenate the appropriate separator and the FMRI element to the base
962 * FMRI string at fmri.
964 * Fails with
965 * _TRUNCATED Not enough room in buffer at fmri.
967 static int
968 rc_concat_fmri_element(
969 char *fmri, /* base fmri */
970 size_t bufsize, /* size of buf at fmri */
971 size_t *sz_out, /* receives result size. */
972 const char *element, /* element name to concat */
973 rep_protocol_entity_t type) /* type of element */
975 size_t actual;
976 const char *name = element;
977 int rc;
978 const char *separator;
980 if (bufsize > 0)
981 *sz_out = strlen(fmri);
982 else
983 *sz_out = 0;
985 switch (type) {
986 case REP_PROTOCOL_ENTITY_SCOPE:
987 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
989 * No need to display scope information if we are
990 * in the local scope.
992 separator = SCF_FMRI_SVC_PREFIX;
993 name = NULL;
994 } else {
996 * Need to display scope information, because it is
997 * not the local scope.
999 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
1001 break;
1002 case REP_PROTOCOL_ENTITY_SERVICE:
1003 separator = SCF_FMRI_SERVICE_PREFIX;
1004 break;
1005 case REP_PROTOCOL_ENTITY_INSTANCE:
1006 separator = SCF_FMRI_INSTANCE_PREFIX;
1007 break;
1008 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
1009 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
1010 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1011 break;
1012 case REP_PROTOCOL_ENTITY_PROPERTY:
1013 separator = SCF_FMRI_PROPERTY_PREFIX;
1014 break;
1015 case REP_PROTOCOL_ENTITY_VALUE:
1017 * A value does not have a separate FMRI from its property,
1018 * so there is nothing to concat.
1020 return (REP_PROTOCOL_SUCCESS);
1021 case REP_PROTOCOL_ENTITY_SNAPSHOT:
1022 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1023 /* Snapshots do not have FMRIs, so there is nothing to do. */
1024 return (REP_PROTOCOL_SUCCESS);
1025 default:
1026 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1027 __FILE__, __LINE__, type);
1028 abort(); /* Missing a case in switch if we get here. */
1031 /* Concatenate separator and element to the fmri buffer. */
1033 actual = strlcat(fmri, separator, bufsize);
1034 if (name != NULL) {
1035 if (actual < bufsize) {
1036 actual = strlcat(fmri, name, bufsize);
1037 } else {
1038 actual += strlen(name);
1041 if (actual < bufsize) {
1042 rc = REP_PROTOCOL_SUCCESS;
1043 } else {
1044 rc = REP_PROTOCOL_FAIL_TRUNCATED;
1046 *sz_out = actual;
1047 return (rc);
1051 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1052 * success sz_out will be set to the size of the fmri in buf. If
1053 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1054 * of the buffer that would be required to avoid truncation.
1056 * Fails with
1057 * _TRUNCATED not enough room in buf for the FMRI.
1059 static int
1060 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1061 size_t *sz_out)
1063 size_t fmri_len = 0;
1064 int r;
1066 if (bufsize > 0)
1067 *buf = 0;
1068 *sz_out = 0;
1070 if (np->rn_fmri == NULL) {
1072 * A NULL rn_fmri implies that this is a top level scope.
1073 * Child nodes will always have an rn_fmri established
1074 * because both rc_node_link_child() and
1075 * rc_node_relink_child() call rc_node_build_fmri(). In
1076 * this case, we'll just return our name preceded by the
1077 * appropriate FMRI decorations.
1079 assert(np->rn_parent == NULL);
1080 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1081 np->rn_id.rl_type);
1082 if (r != REP_PROTOCOL_SUCCESS)
1083 return (r);
1084 } else {
1085 /* We have an fmri, so return it. */
1086 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1089 *sz_out = fmri_len;
1091 if (fmri_len >= bufsize)
1092 return (REP_PROTOCOL_FAIL_TRUNCATED);
1094 return (REP_PROTOCOL_SUCCESS);
1098 * Build an FMRI string for this node and save it in rn_fmri.
1100 * The basic strategy here is to get the fmri of our parent and then
1101 * concatenate the appropriate separator followed by our name. If our name
1102 * is null, the resulting fmri will just be a copy of the parent fmri.
1103 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1104 * set. Also the rn_lock for this node should be held.
1106 * Fails with
1107 * _NO_RESOURCES Could not allocate memory.
1109 static int
1110 rc_node_build_fmri(rc_node_t *np)
1112 size_t actual;
1113 char fmri[REP_PROTOCOL_FMRI_LEN];
1114 int rc;
1115 size_t sz = REP_PROTOCOL_FMRI_LEN;
1117 assert(MUTEX_HELD(&np->rn_lock));
1118 assert(np->rn_flags & RC_NODE_USING_PARENT);
1120 rc_node_free_fmri(np);
1122 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1123 assert(rc == REP_PROTOCOL_SUCCESS);
1125 if (np->rn_name != NULL) {
1126 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1127 np->rn_id.rl_type);
1128 assert(rc == REP_PROTOCOL_SUCCESS);
1129 np->rn_fmri = strdup(fmri);
1130 } else {
1131 np->rn_fmri = strdup(fmri);
1133 if (np->rn_fmri == NULL) {
1134 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1135 } else {
1136 rc = REP_PROTOCOL_SUCCESS;
1139 return (rc);
1143 * Get the FMRI of the node at np placing the result in fmri. Then
1144 * concatenate the additional element to fmri. The type variable indicates
1145 * the type of element, so that the appropriate separator can be
1146 * generated. size is the number of bytes in the buffer at fmri, and
1147 * sz_out receives the size of the generated string. If the result is
1148 * truncated, sz_out will receive the size of the buffer that would be
1149 * required to avoid truncation.
1151 * Fails with
1152 * _TRUNCATED Not enough room in buffer at fmri.
1154 static int
1155 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1156 const char *element, rep_protocol_entity_t type)
1158 int rc;
1160 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1161 REP_PROTOCOL_SUCCESS) {
1162 return (rc);
1164 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1165 REP_PROTOCOL_SUCCESS) {
1166 return (rc);
1169 return (REP_PROTOCOL_SUCCESS);
1172 static int
1173 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1175 rc_node_t *nnp = np->rcn_node;
1176 int i;
1178 assert(MUTEX_HELD(&rc_pg_notify_lock));
1180 if (np->rcn_delete != NULL) {
1181 assert(np->rcn_info == NULL && np->rcn_node == NULL);
1182 return (1); /* everyone likes deletes */
1184 if (np->rcn_node == NULL) {
1185 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1186 return (0);
1188 assert(np->rcn_info == NULL);
1190 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1191 if (rnip->rni_namelist[i] != NULL) {
1192 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1193 return (1);
1195 if (rnip->rni_typelist[i] != NULL) {
1196 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1197 return (1);
1200 return (0);
1203 static void
1204 rc_notify_insert_node(rc_node_t *nnp)
1206 rc_notify_t *np = &nnp->rn_notify;
1207 rc_notify_info_t *nip;
1208 int found = 0;
1210 assert(np->rcn_info == NULL);
1212 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1213 return;
1215 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1216 np->rcn_node = nnp;
1217 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1218 nip = uu_list_next(rc_notify_info_list, nip)) {
1219 if (rc_notify_info_interested(nip, np)) {
1220 (void) pthread_cond_broadcast(&nip->rni_cv);
1221 found++;
1224 if (found)
1225 (void) uu_list_insert_before(rc_notify_list, NULL, np);
1226 else
1227 np->rcn_node = NULL;
1229 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1232 static void
1233 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1234 const char *instance, const char *pg)
1236 rc_notify_info_t *nip;
1238 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1239 rc_notify_pool);
1240 ndp->rnd_notify.rcn_delete = ndp;
1242 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1243 "svc:/%s%s%s%s%s", service,
1244 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1245 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1248 * add to notification list, notify watchers
1250 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1251 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1252 nip = uu_list_next(rc_notify_info_list, nip))
1253 (void) pthread_cond_broadcast(&nip->rni_cv);
1254 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1255 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1258 static void
1259 rc_notify_remove_node(rc_node_t *nnp)
1261 rc_notify_t *np = &nnp->rn_notify;
1263 assert(np->rcn_info == NULL);
1264 assert(!MUTEX_HELD(&nnp->rn_lock));
1266 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1267 while (np->rcn_node != NULL) {
1268 if (rc_notify_in_use) {
1269 (void) pthread_cond_wait(&rc_pg_notify_cv,
1270 &rc_pg_notify_lock);
1271 continue;
1273 (void) uu_list_remove(rc_notify_list, np);
1274 np->rcn_node = NULL;
1275 break;
1277 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1280 static void
1281 rc_notify_remove_locked(rc_notify_t *np)
1283 assert(MUTEX_HELD(&rc_pg_notify_lock));
1284 assert(rc_notify_in_use == 0);
1286 (void) uu_list_remove(rc_notify_list, np);
1287 if (np->rcn_node) {
1288 np->rcn_node = NULL;
1289 } else if (np->rcn_delete) {
1290 uu_free(np->rcn_delete);
1291 } else {
1292 assert(0); /* CAN'T HAPPEN */
1297 * Permission checking functions. See comment atop this file.
1299 #ifndef NATIVE_BUILD
1300 static permcheck_t *
1301 pc_create()
1303 permcheck_t *p;
1305 p = uu_zalloc(sizeof (*p));
1306 if (p == NULL)
1307 return (NULL);
1308 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1309 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1310 if (p->pc_buckets == NULL) {
1311 uu_free(p);
1312 return (NULL);
1315 p->pc_enum = 0;
1316 return (p);
1319 static void
1320 pc_free(permcheck_t *pcp)
1322 uint_t i;
1323 struct pc_elt *ep, *next;
1325 for (i = 0; i < pcp->pc_bnum; ++i) {
1326 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1327 next = ep->pce_next;
1328 free(ep);
1332 free(pcp->pc_buckets);
1333 free(pcp);
1336 static uint32_t
1337 pc_hash(const char *auth)
1339 uint32_t h = 0, g;
1340 const char *p;
1343 * Generic hash function from uts/common/os/modhash.c.
1345 for (p = auth; *p != '\0'; ++p) {
1346 h = (h << 4) + *p;
1347 g = (h & 0xf0000000);
1348 if (g != 0) {
1349 h ^= (g >> 24);
1350 h ^= g;
1354 return (h);
1357 static perm_status_t
1358 pc_exists(permcheck_t *pcp, const char *auth)
1360 uint32_t h;
1361 struct pc_elt *ep;
1363 h = pc_hash(auth);
1364 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1365 ep != NULL;
1366 ep = ep->pce_next) {
1367 if (strcmp(auth, ep->pce_auth) == 0) {
1368 pcp->pc_auth_string = ep->pce_auth;
1369 return (PERM_GRANTED);
1373 return (PERM_DENIED);
1376 static perm_status_t
1377 pc_match(permcheck_t *pcp, const char *pattern)
1379 uint_t i;
1380 struct pc_elt *ep;
1382 for (i = 0; i < pcp->pc_bnum; ++i) {
1383 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1384 if (_auth_match(pattern, ep->pce_auth)) {
1385 pcp->pc_auth_string = ep->pce_auth;
1386 return (PERM_GRANTED);
1391 return (PERM_DENIED);
1394 static int
1395 pc_grow(permcheck_t *pcp)
1397 uint_t new_bnum, i, j;
1398 struct pc_elt **new_buckets;
1399 struct pc_elt *ep, *next;
1401 new_bnum = pcp->pc_bnum * 2;
1402 if (new_bnum < pcp->pc_bnum)
1403 /* Homey don't play that. */
1404 return (-1);
1406 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1407 if (new_buckets == NULL)
1408 return (-1);
1410 for (i = 0; i < pcp->pc_bnum; ++i) {
1411 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1412 next = ep->pce_next;
1413 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1414 ep->pce_next = new_buckets[j];
1415 new_buckets[j] = ep;
1419 uu_free(pcp->pc_buckets);
1420 pcp->pc_buckets = new_buckets;
1421 pcp->pc_bnum = new_bnum;
1423 return (0);
1426 static int
1427 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1429 struct pc_elt *ep;
1430 uint_t i;
1432 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1433 if (ep == NULL)
1434 return (-1);
1436 /* Grow if pc_enum / pc_bnum > 3/4. */
1437 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1438 /* Failure is not a stopper; we'll try again next time. */
1439 (void) pc_grow(pcp);
1441 (void) strcpy(ep->pce_auth, auth);
1443 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1444 ep->pce_next = pcp->pc_buckets[i];
1445 pcp->pc_buckets[i] = ep;
1447 if (auth_type > pcp->pc_specific_type) {
1448 pcp->pc_specific_type = auth_type;
1449 pcp->pc_specific = ep;
1452 ++pcp->pc_enum;
1454 return (0);
1458 * For the type of a property group, return the authorization which may be
1459 * used to modify it.
1461 static const char *
1462 perm_auth_for_pgtype(const char *pgtype)
1464 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1465 return (AUTH_MODIFY_PREFIX "method");
1466 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1467 return (AUTH_MODIFY_PREFIX "dependency");
1468 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1469 return (AUTH_MODIFY_PREFIX "application");
1470 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1471 return (AUTH_MODIFY_PREFIX "framework");
1472 else
1473 return (NULL);
1477 * Fails with
1478 * _NO_RESOURCES - out of memory
1480 static int
1481 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1482 pc_auth_type_t auth_type)
1484 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1485 REP_PROTOCOL_FAIL_NO_RESOURCES);
1489 * Fails with
1490 * _NO_RESOURCES - out of memory
1492 static int
1493 perm_add_enabling(permcheck_t *pcp, const char *auth)
1495 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1498 /* Note that perm_add_enabling_values() is defined below. */
1501 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1502 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1503 * the door client went away and PERM_FAIL if an error (usually lack of
1504 * memory) occurs. auth_cb() checks each and every authorizations as
1505 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1506 * we short-cut the enumeration and return non-zero.
1509 static int
1510 auth_cb(const char *auth, void *ctxt, void *vres)
1512 permcheck_t *pcp = ctxt;
1513 int *pret = vres;
1515 if (strchr(auth, KV_WILDCHAR) == NULL)
1516 *pret = pc_exists(pcp, auth);
1517 else
1518 *pret = pc_match(pcp, auth);
1520 if (*pret != PERM_DENIED)
1521 return (1);
1523 * If we failed, choose the most specific auth string for use in
1524 * the audit event.
1526 assert(pcp->pc_specific != NULL);
1527 pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1529 return (0); /* Tells that we need to continue */
1532 static perm_status_t
1533 perm_granted(permcheck_t *pcp)
1535 ucred_t *uc;
1537 perm_status_t ret = PERM_DENIED;
1538 uid_t uid;
1539 struct passwd pw;
1540 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1542 /* Get the uid */
1543 if ((uc = get_ucred()) == NULL) {
1544 if (errno == EINVAL) {
1546 * Client is no longer waiting for our response (e.g.,
1547 * it received a signal & resumed with EINTR).
1548 * Punting with door_return() would be nice but we
1549 * need to release all of the locks & references we
1550 * hold. And we must report failure to the client
1551 * layer to keep it from ignoring retries as
1552 * already-done (idempotency & all that). None of the
1553 * error codes fit very well, so we might as well
1554 * force the return of _PERMISSION_DENIED since we
1555 * couldn't determine the user.
1557 return (PERM_GONE);
1559 assert(0);
1560 abort();
1563 uid = ucred_geteuid(uc);
1564 assert(uid != (uid_t)-1);
1566 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1567 return (PERM_FAIL);
1571 * Enumerate all the auths defined for the user and return the
1572 * result in ret.
1574 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1575 return (PERM_FAIL);
1577 return (ret);
1580 static int
1581 map_granted_status(perm_status_t status, permcheck_t *pcp,
1582 char **match_auth)
1584 int rc;
1586 *match_auth = NULL;
1587 switch (status) {
1588 case PERM_DENIED:
1589 *match_auth = strdup(pcp->pc_auth_string);
1590 if (*match_auth == NULL)
1591 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1592 else
1593 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1594 break;
1595 case PERM_GRANTED:
1596 *match_auth = strdup(pcp->pc_auth_string);
1597 if (*match_auth == NULL)
1598 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1599 else
1600 rc = REP_PROTOCOL_SUCCESS;
1601 break;
1602 case PERM_GONE:
1603 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1604 break;
1605 case PERM_FAIL:
1606 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1607 break;
1609 return (rc);
1611 #endif /* NATIVE_BUILD */
1614 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1615 * serialize certain actions, and to wait for certain operations to complete
1617 * The waiting flags are:
1618 * RC_NODE_CHILDREN_CHANGING
1619 * The child list is being built or changed (due to creation
1620 * or deletion). All iterators pause.
1622 * RC_NODE_USING_PARENT
1623 * Someone is actively using the parent pointer, so we can't
1624 * be removed from the parent list.
1626 * RC_NODE_CREATING_CHILD
1627 * A child is being created -- locks out other creations, to
1628 * prevent insert-insert races.
1630 * RC_NODE_IN_TX
1631 * This object is running a transaction.
1633 * RC_NODE_DYING
1634 * This node might be dying. Always set as a set, using
1635 * RC_NODE_DYING_FLAGS (which is everything but
1636 * RC_NODE_USING_PARENT)
1638 static int
1639 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1641 assert(MUTEX_HELD(&np->rn_lock));
1642 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1644 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1645 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1647 if (np->rn_flags & RC_NODE_DEAD)
1648 return (0);
1650 np->rn_flags |= flag;
1651 return (1);
1654 static void
1655 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1657 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1658 assert(MUTEX_HELD(&np->rn_lock));
1659 assert((np->rn_flags & flag) == flag);
1660 np->rn_flags &= ~flag;
1661 (void) pthread_cond_broadcast(&np->rn_cv);
1665 * wait until a particular flag has cleared. Fails if the object dies.
1667 static int
1668 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1670 assert(MUTEX_HELD(&np->rn_lock));
1671 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1672 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1674 return (!(np->rn_flags & RC_NODE_DEAD));
1678 * On entry, np's lock must be held, and this thread must be holding
1679 * RC_NODE_USING_PARENT. On return, both of them are released.
1681 * If the return value is NULL, np either does not have a parent, or
1682 * the parent has been marked DEAD.
1684 * If the return value is non-NULL, it is the parent of np, and both
1685 * its lock and the requested flags are held.
1687 static rc_node_t *
1688 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1690 rc_node_t *pp;
1692 assert(MUTEX_HELD(&np->rn_lock));
1693 assert(np->rn_flags & RC_NODE_USING_PARENT);
1695 if ((pp = np->rn_parent) == NULL) {
1696 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1697 (void) pthread_mutex_unlock(&np->rn_lock);
1698 return (NULL);
1700 (void) pthread_mutex_unlock(&np->rn_lock);
1702 (void) pthread_mutex_lock(&pp->rn_lock);
1703 (void) pthread_mutex_lock(&np->rn_lock);
1704 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1705 (void) pthread_mutex_unlock(&np->rn_lock);
1707 if (!rc_node_hold_flag(pp, flag)) {
1708 (void) pthread_mutex_unlock(&pp->rn_lock);
1709 return (NULL);
1711 return (pp);
1714 rc_node_t *
1715 rc_node_alloc(void)
1717 rc_node_t *np = uu_zalloc(sizeof (*np));
1719 if (np == NULL)
1720 return (NULL);
1722 (void) pthread_mutex_init(&np->rn_lock, NULL);
1723 (void) pthread_cond_init(&np->rn_cv, NULL);
1725 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1726 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1728 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1730 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1731 rc_notify_pool);
1733 return (np);
1736 void
1737 rc_node_destroy(rc_node_t *np)
1739 int i;
1741 if (np->rn_flags & RC_NODE_UNREFED)
1742 return; /* being handled elsewhere */
1744 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1745 assert(np->rn_former == NULL);
1747 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1748 /* Release the holds from rc_iter_next(). */
1749 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1750 /* rn_cchain[i] may be NULL for empty snapshots. */
1751 if (np->rn_cchain[i] != NULL)
1752 rc_node_rele(np->rn_cchain[i]);
1756 if (np->rn_name != NULL)
1757 free((void *)np->rn_name);
1758 np->rn_name = NULL;
1759 if (np->rn_type != NULL)
1760 free((void *)np->rn_type);
1761 np->rn_type = NULL;
1762 if (np->rn_values != NULL)
1763 object_free_values(np->rn_values, np->rn_valtype,
1764 np->rn_values_count, np->rn_values_size);
1765 np->rn_values = NULL;
1766 rc_node_free_fmri(np);
1768 if (np->rn_snaplevel != NULL)
1769 rc_snaplevel_rele(np->rn_snaplevel);
1770 np->rn_snaplevel = NULL;
1772 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1774 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1775 rc_notify_pool);
1777 assert(uu_list_first(np->rn_children) == NULL);
1778 uu_list_destroy(np->rn_children);
1779 uu_list_destroy(np->rn_pg_notify_list);
1781 (void) pthread_mutex_destroy(&np->rn_lock);
1782 (void) pthread_cond_destroy(&np->rn_cv);
1784 uu_free(np);
1788 * Link in a child node.
1790 * Because of the lock ordering, cp has to already be in the hash table with
1791 * its lock dropped before we get it. To prevent anyone from noticing that
1792 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1793 * we've linked it in, we release the flag.
1795 static void
1796 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1798 assert(!MUTEX_HELD(&np->rn_lock));
1799 assert(!MUTEX_HELD(&cp->rn_lock));
1801 (void) pthread_mutex_lock(&np->rn_lock);
1802 (void) pthread_mutex_lock(&cp->rn_lock);
1803 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1804 (cp->rn_flags & RC_NODE_USING_PARENT));
1806 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1807 REP_PROTOCOL_SUCCESS);
1809 cp->rn_parent = np;
1810 cp->rn_flags |= RC_NODE_IN_PARENT;
1811 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1812 (void) rc_node_build_fmri(cp);
1814 (void) pthread_mutex_unlock(&np->rn_lock);
1816 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1817 (void) pthread_mutex_unlock(&cp->rn_lock);
1821 * Sets the rn_parent_ref field of all the children of np to pp -- always
1822 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1824 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1825 * its children are no longer referenced, they will all be deleted as a unit.
1827 static void
1828 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1830 rc_node_t *cp;
1832 assert(MUTEX_HELD(&np->rn_lock));
1834 for (cp = uu_list_first(np->rn_children); cp != NULL;
1835 cp = uu_list_next(np->rn_children, cp)) {
1836 (void) pthread_mutex_lock(&cp->rn_lock);
1837 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1838 assert(cp->rn_parent_ref == pp);
1839 } else {
1840 assert(cp->rn_parent_ref == NULL);
1842 cp->rn_flags |= RC_NODE_PARENT_REF;
1843 cp->rn_parent_ref = pp;
1844 if (cp->rn_refs != 0)
1845 rc_node_hold_other(pp);
1847 rc_node_setup_parent_ref(cp, pp); /* recurse */
1848 (void) pthread_mutex_unlock(&cp->rn_lock);
1853 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1855 * Requirements:
1856 * *no* node locks may be held.
1857 * pp must be held with RC_NODE_CHILDREN_CHANGING
1858 * newp and np must be held with RC_NODE_IN_TX
1859 * np must be marked RC_NODE_IN_PARENT, newp must not be
1860 * np must be marked RC_NODE_OLD
1862 * Afterwards:
1863 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1864 * newp and np's RC_NODE_IN_TX is dropped
1865 * newp->rn_former = np;
1866 * newp is RC_NODE_IN_PARENT, np is not.
1867 * interested notify subscribers have been notified of newp's new status.
1869 static void
1870 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1872 cache_bucket_t *bp;
1874 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1875 * keeps rc_node_update() from seeing it until we are done.
1877 bp = cache_hold(newp->rn_hash);
1878 cache_remove_unlocked(bp, np);
1879 cache_insert_unlocked(bp, newp);
1880 cache_release(bp);
1883 * replace np with newp in pp's list, and attach it to newp's rn_former
1884 * link.
1886 (void) pthread_mutex_lock(&pp->rn_lock);
1887 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1889 (void) pthread_mutex_lock(&newp->rn_lock);
1890 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1891 assert(newp->rn_flags & RC_NODE_IN_TX);
1893 (void) pthread_mutex_lock(&np->rn_lock);
1894 assert(np->rn_flags & RC_NODE_IN_PARENT);
1895 assert(np->rn_flags & RC_NODE_OLD);
1896 assert(np->rn_flags & RC_NODE_IN_TX);
1898 newp->rn_parent = pp;
1899 newp->rn_flags |= RC_NODE_IN_PARENT;
1902 * Note that we carefully add newp before removing np -- this
1903 * keeps iterators on the list from missing us.
1905 (void) uu_list_insert_after(pp->rn_children, np, newp);
1906 (void) rc_node_build_fmri(newp);
1907 (void) uu_list_remove(pp->rn_children, np);
1910 * re-set np
1912 newp->rn_former = np;
1913 np->rn_parent = NULL;
1914 np->rn_flags &= ~RC_NODE_IN_PARENT;
1915 np->rn_flags |= RC_NODE_ON_FORMER;
1917 rc_notify_insert_node(newp);
1919 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1920 (void) pthread_mutex_unlock(&pp->rn_lock);
1921 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1922 (void) pthread_mutex_unlock(&newp->rn_lock);
1923 rc_node_setup_parent_ref(np, np);
1924 rc_node_rele_flag(np, RC_NODE_IN_TX);
1925 (void) pthread_mutex_unlock(&np->rn_lock);
1929 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1930 * 'cp' is used (and returned) if the node does not yet exist. If it does
1931 * exist, 'cp' is freed, and the existent node is returned instead.
1933 rc_node_t *
1934 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1935 rc_node_t *pp)
1937 rc_node_t *np;
1938 cache_bucket_t *bp;
1939 uint32_t h = rc_node_hash(nip);
1941 assert(cp->rn_refs == 0);
1943 bp = cache_hold(h);
1944 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1945 cache_release(bp);
1948 * make sure it matches our expectations
1950 (void) pthread_mutex_lock(&np->rn_lock);
1951 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1952 assert(np->rn_parent == pp);
1953 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1954 assert(strcmp(np->rn_name, name) == 0);
1955 assert(np->rn_type == NULL);
1956 assert(np->rn_flags & RC_NODE_IN_PARENT);
1957 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1959 (void) pthread_mutex_unlock(&np->rn_lock);
1961 rc_node_destroy(cp);
1962 return (np);
1966 * No one is there -- setup & install the new node.
1968 np = cp;
1969 rc_node_hold(np);
1970 np->rn_id = *nip;
1971 np->rn_hash = h;
1972 np->rn_name = strdup(name);
1974 np->rn_flags |= RC_NODE_USING_PARENT;
1976 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1977 #if COMPOSITION_DEPTH == 2
1978 np->rn_cchain[0] = np;
1979 np->rn_cchain[1] = pp;
1980 #else
1981 #error This code must be updated.
1982 #endif
1985 cache_insert_unlocked(bp, np);
1986 cache_release(bp); /* we are now visible */
1988 rc_node_link_child(pp, np);
1990 return (np);
1994 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1995 * 'cp' is used (and returned) if the node does not yet exist. If it does
1996 * exist, 'cp' is freed, and the existent node is returned instead.
1998 rc_node_t *
1999 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2000 uint32_t snap_id, rc_node_t *pp)
2002 rc_node_t *np;
2003 cache_bucket_t *bp;
2004 uint32_t h = rc_node_hash(nip);
2006 assert(cp->rn_refs == 0);
2008 bp = cache_hold(h);
2009 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2010 cache_release(bp);
2013 * make sure it matches our expectations
2015 (void) pthread_mutex_lock(&np->rn_lock);
2016 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2017 assert(np->rn_parent == pp);
2018 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2019 assert(strcmp(np->rn_name, name) == 0);
2020 assert(np->rn_type == NULL);
2021 assert(np->rn_flags & RC_NODE_IN_PARENT);
2022 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2024 (void) pthread_mutex_unlock(&np->rn_lock);
2026 rc_node_destroy(cp);
2027 return (np);
2031 * No one is there -- create a new node.
2033 np = cp;
2034 rc_node_hold(np);
2035 np->rn_id = *nip;
2036 np->rn_hash = h;
2037 np->rn_name = strdup(name);
2038 np->rn_snapshot_id = snap_id;
2040 np->rn_flags |= RC_NODE_USING_PARENT;
2042 cache_insert_unlocked(bp, np);
2043 cache_release(bp); /* we are now visible */
2045 rc_node_link_child(pp, np);
2047 return (np);
2051 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2052 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2053 * is freed, and the existent node is returned instead.
2055 rc_node_t *
2056 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2057 rc_snaplevel_t *lvl, rc_node_t *pp)
2059 rc_node_t *np;
2060 cache_bucket_t *bp;
2061 uint32_t h = rc_node_hash(nip);
2063 assert(cp->rn_refs == 0);
2065 bp = cache_hold(h);
2066 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2067 cache_release(bp);
2070 * make sure it matches our expectations
2072 (void) pthread_mutex_lock(&np->rn_lock);
2073 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2074 assert(np->rn_parent == pp);
2075 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2076 assert(np->rn_name == NULL);
2077 assert(np->rn_type == NULL);
2078 assert(np->rn_flags & RC_NODE_IN_PARENT);
2079 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2081 (void) pthread_mutex_unlock(&np->rn_lock);
2083 rc_node_destroy(cp);
2084 return (np);
2088 * No one is there -- create a new node.
2090 np = cp;
2091 rc_node_hold(np); /* released in snapshot_fill_children() */
2092 np->rn_id = *nip;
2093 np->rn_hash = h;
2095 rc_snaplevel_hold(lvl);
2096 np->rn_snaplevel = lvl;
2098 np->rn_flags |= RC_NODE_USING_PARENT;
2100 cache_insert_unlocked(bp, np);
2101 cache_release(bp); /* we are now visible */
2103 /* Add this snaplevel to the snapshot's composition chain. */
2104 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2105 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2107 rc_node_link_child(pp, np);
2109 return (np);
2113 * Returns NULL if strdup() fails.
2115 rc_node_t *
2116 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2117 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2119 rc_node_t *np;
2120 cache_bucket_t *bp;
2122 uint32_t h = rc_node_hash(nip);
2123 bp = cache_hold(h);
2124 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2125 cache_release(bp);
2128 * make sure it matches our expectations (don't check
2129 * the generation number or parent, since someone could
2130 * have gotten a transaction through while we weren't
2131 * looking)
2133 (void) pthread_mutex_lock(&np->rn_lock);
2134 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2135 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2136 assert(strcmp(np->rn_name, name) == 0);
2137 assert(strcmp(np->rn_type, type) == 0);
2138 assert(np->rn_pgflags == flags);
2139 assert(np->rn_flags & RC_NODE_IN_PARENT);
2140 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2142 (void) pthread_mutex_unlock(&np->rn_lock);
2144 rc_node_destroy(cp);
2145 return (np);
2148 np = cp;
2149 rc_node_hold(np); /* released in fill_pg_callback() */
2150 np->rn_id = *nip;
2151 np->rn_hash = h;
2152 np->rn_name = strdup(name);
2153 if (np->rn_name == NULL) {
2154 rc_node_rele(np);
2155 return (NULL);
2157 np->rn_type = strdup(type);
2158 if (np->rn_type == NULL) {
2159 free((void *)np->rn_name);
2160 rc_node_rele(np);
2161 return (NULL);
2163 np->rn_pgflags = flags;
2164 np->rn_gen_id = gen_id;
2166 np->rn_flags |= RC_NODE_USING_PARENT;
2168 cache_insert_unlocked(bp, np);
2169 cache_release(bp); /* we are now visible */
2171 rc_node_link_child(pp, np);
2173 return (np);
2176 #if COMPOSITION_DEPTH == 2
2178 * Initialize a "composed property group" which represents the composition of
2179 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2180 * ITER_READ request, keeping it out of cache_hash and any child lists
2181 * prevents it from being looked up. Operations besides iteration are passed
2182 * through to pg1.
2184 * pg1 & pg2 should be held before entering this function. They will be
2185 * released in rc_node_destroy().
2187 static int
2188 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2190 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2191 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2193 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2194 cpg->rn_name = strdup(pg1->rn_name);
2195 if (cpg->rn_name == NULL)
2196 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2198 cpg->rn_cchain[0] = pg1;
2199 cpg->rn_cchain[1] = pg2;
2201 return (REP_PROTOCOL_SUCCESS);
2203 #else
2204 #error This code must be updated.
2205 #endif
2208 * Fails with _NO_RESOURCES.
2211 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2212 const char *name, rep_protocol_value_type_t type,
2213 const char *vals, size_t count, size_t size)
2215 rc_node_t *np;
2216 cache_bucket_t *bp;
2218 uint32_t h = rc_node_hash(nip);
2219 bp = cache_hold(h);
2220 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2221 cache_release(bp);
2223 * make sure it matches our expectations
2225 (void) pthread_mutex_lock(&np->rn_lock);
2226 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2227 assert(np->rn_parent == pp);
2228 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2229 assert(strcmp(np->rn_name, name) == 0);
2230 assert(np->rn_valtype == type);
2231 assert(np->rn_values_count == count);
2232 assert(np->rn_values_size == size);
2233 assert(vals == NULL ||
2234 memcmp(np->rn_values, vals, size) == 0);
2235 assert(np->rn_flags & RC_NODE_IN_PARENT);
2236 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2238 rc_node_rele_locked(np);
2239 object_free_values(vals, type, count, size);
2240 return (REP_PROTOCOL_SUCCESS);
2244 * No one is there -- create a new node.
2246 np = rc_node_alloc();
2247 if (np == NULL) {
2248 cache_release(bp);
2249 object_free_values(vals, type, count, size);
2250 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2252 np->rn_id = *nip;
2253 np->rn_hash = h;
2254 np->rn_name = strdup(name);
2255 if (np->rn_name == NULL) {
2256 cache_release(bp);
2257 object_free_values(vals, type, count, size);
2258 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2261 np->rn_valtype = type;
2262 np->rn_values = vals;
2263 np->rn_values_count = count;
2264 np->rn_values_size = size;
2266 np->rn_flags |= RC_NODE_USING_PARENT;
2268 cache_insert_unlocked(bp, np);
2269 cache_release(bp); /* we are now visible */
2271 rc_node_link_child(pp, np);
2273 return (REP_PROTOCOL_SUCCESS);
2277 * This function implements a decision table to determine the event ID for
2278 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2279 * determined by the value of the first property in the command specified
2280 * by cmd_no and the name of the property group. Here is the decision
2281 * table:
2283 * Property Group Name
2284 * Property ------------------------------------------
2285 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2286 * -------- -------------- ------------------
2287 * "0" ADT_smf_disable ADT_smf_tmp_disable
2288 * "1" ADT_smf_enable ADT_smf_tmp_enable
2290 * This function is called by special_property_event through a function
2291 * pointer in the special_props_list array.
2293 * Since the ADT_smf_* symbols may not be defined in the build machine's
2294 * include files, this function is not compiled when doing native builds.
2296 #ifndef NATIVE_BUILD
2297 static int
2298 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2299 au_event_t *event_id)
2301 const char *value;
2302 uint32_t nvalues;
2303 int enable;
2306 * First, check property value.
2308 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2309 return (-1);
2310 if (nvalues == 0)
2311 return (-1);
2312 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2313 return (-1);
2314 if (strcmp(value, "0") == 0) {
2315 enable = 0;
2316 } else if (strcmp(value, "1") == 0) {
2317 enable = 1;
2318 } else {
2319 return (-1);
2323 * Now check property group name.
2325 if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2326 *event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2327 return (0);
2328 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2329 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2330 return (0);
2332 return (-1);
2334 #endif /* NATIVE_BUILD */
2337 * This function compares two audit_special_prop_item_t structures
2338 * represented by item1 and item2. It returns an integer greater than 0 if
2339 * item1 is greater than item2. It returns 0 if they are equal and an
2340 * integer less than 0 if item1 is less than item2. api_prop_name and
2341 * api_pg_name are the key fields for sorting.
2343 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2345 static int
2346 special_prop_compare(const void *item1, const void *item2)
2348 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2349 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2350 int r;
2352 r = strcmp(a->api_prop_name, b->api_prop_name);
2353 if (r == 0) {
2355 * Primary keys are the same, so check the secondary key.
2357 r = strcmp(a->api_pg_name, b->api_pg_name);
2359 return (r);
2363 rc_node_init(void)
2365 rc_node_t *np;
2366 cache_bucket_t *bp;
2368 rc_children_pool = uu_list_pool_create("rc_children_pool",
2369 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2370 NULL, UU_LIST_POOL_DEBUG);
2372 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2373 sizeof (rc_node_pg_notify_t),
2374 offsetof(rc_node_pg_notify_t, rnpn_node),
2375 NULL, UU_LIST_POOL_DEBUG);
2377 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2378 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2379 NULL, UU_LIST_POOL_DEBUG);
2381 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2382 sizeof (rc_notify_info_t),
2383 offsetof(rc_notify_info_t, rni_list_node),
2384 NULL, UU_LIST_POOL_DEBUG);
2386 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2387 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2388 uu_die("out of memory");
2390 rc_notify_list = uu_list_create(rc_notify_pool,
2391 &rc_notify_list, 0);
2393 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2394 &rc_notify_info_list, 0);
2396 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2397 uu_die("out of memory");
2400 * Sort the special_props_list array so that it can be searched
2401 * with bsearch(3C).
2403 * The special_props_list array is not compiled into the native
2404 * build code, so there is no need to call qsort if NATIVE_BUILD is
2405 * defined.
2407 #ifndef NATIVE_BUILD
2408 qsort(special_props_list, SPECIAL_PROP_COUNT,
2409 sizeof (special_props_list[0]), special_prop_compare);
2410 #endif /* NATIVE_BUILD */
2412 if ((np = rc_node_alloc()) == NULL)
2413 uu_die("out of memory");
2415 rc_node_hold(np);
2416 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2417 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2418 np->rn_hash = rc_node_hash(&np->rn_id);
2419 np->rn_name = "localhost";
2421 bp = cache_hold(np->rn_hash);
2422 cache_insert_unlocked(bp, np);
2423 cache_release(bp);
2425 rc_scope = np;
2426 return (1);
2430 * Fails with
2431 * _INVALID_TYPE - type is invalid
2432 * _TYPE_MISMATCH - np doesn't carry children of type type
2433 * _DELETED - np has been deleted
2434 * _NO_RESOURCES
2436 static int
2437 rc_node_fill_children(rc_node_t *np, uint32_t type)
2439 int rc;
2441 assert(MUTEX_HELD(&np->rn_lock));
2443 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2444 REP_PROTOCOL_SUCCESS)
2445 return (rc);
2447 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2448 return (REP_PROTOCOL_FAIL_DELETED);
2450 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2451 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2452 return (REP_PROTOCOL_SUCCESS);
2455 (void) pthread_mutex_unlock(&np->rn_lock);
2456 rc = object_fill_children(np);
2457 (void) pthread_mutex_lock(&np->rn_lock);
2459 if (rc == REP_PROTOCOL_SUCCESS) {
2460 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2462 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2464 return (rc);
2468 * Returns
2469 * _INVALID_TYPE - type is invalid
2470 * _TYPE_MISMATCH - np doesn't carry children of type type
2471 * _DELETED - np has been deleted
2472 * _NO_RESOURCES
2473 * _SUCCESS - if *cpp is not NULL, it is held
2475 static int
2476 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2477 rc_node_t **cpp)
2479 int ret;
2480 rc_node_t *cp;
2482 assert(MUTEX_HELD(&np->rn_lock));
2483 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2485 ret = rc_node_fill_children(np, type);
2486 if (ret != REP_PROTOCOL_SUCCESS)
2487 return (ret);
2489 for (cp = uu_list_first(np->rn_children);
2490 cp != NULL;
2491 cp = uu_list_next(np->rn_children, cp)) {
2492 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2493 break;
2496 if (cp != NULL)
2497 rc_node_hold(cp);
2498 *cpp = cp;
2500 return (REP_PROTOCOL_SUCCESS);
2503 static int rc_node_parent(rc_node_t *, rc_node_t **);
2506 * Returns
2507 * _INVALID_TYPE - type is invalid
2508 * _DELETED - np or an ancestor has been deleted
2509 * _NOT_FOUND - no ancestor of specified type exists
2510 * _SUCCESS - *app is held
2512 static int
2513 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2515 int ret;
2516 rc_node_t *parent, *np_orig;
2518 if (type >= REP_PROTOCOL_ENTITY_MAX)
2519 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2521 np_orig = np;
2523 while (np->rn_id.rl_type > type) {
2524 ret = rc_node_parent(np, &parent);
2525 if (np != np_orig)
2526 rc_node_rele(np);
2527 if (ret != REP_PROTOCOL_SUCCESS)
2528 return (ret);
2529 np = parent;
2532 if (np->rn_id.rl_type == type) {
2533 *app = parent;
2534 return (REP_PROTOCOL_SUCCESS);
2537 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2540 #ifndef NATIVE_BUILD
2542 * If the propname property exists in pg, and it is of type string, add its
2543 * values as authorizations to pcp. pg must not be locked on entry, and it is
2544 * returned unlocked. Returns
2545 * _DELETED - pg was deleted
2546 * _NO_RESOURCES
2547 * _NOT_FOUND - pg has no property named propname
2548 * _SUCCESS
2550 static int
2551 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2553 rc_node_t *prop;
2554 int result;
2556 uint_t count;
2557 const char *cp;
2559 assert(!MUTEX_HELD(&pg->rn_lock));
2560 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2562 (void) pthread_mutex_lock(&pg->rn_lock);
2563 result = rc_node_find_named_child(pg, propname,
2564 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2565 (void) pthread_mutex_unlock(&pg->rn_lock);
2566 if (result != REP_PROTOCOL_SUCCESS) {
2567 switch (result) {
2568 case REP_PROTOCOL_FAIL_DELETED:
2569 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2570 return (result);
2572 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2573 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2574 default:
2575 bad_error("rc_node_find_named_child", result);
2579 if (prop == NULL)
2580 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2582 /* rn_valtype is immutable, so no locking. */
2583 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2584 rc_node_rele(prop);
2585 return (REP_PROTOCOL_SUCCESS);
2588 (void) pthread_mutex_lock(&prop->rn_lock);
2589 for (count = prop->rn_values_count, cp = prop->rn_values;
2590 count > 0;
2591 --count) {
2592 result = perm_add_enabling_type(pcp, cp,
2593 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2594 PC_AUTH_SVC);
2595 if (result != REP_PROTOCOL_SUCCESS)
2596 break;
2598 cp = strchr(cp, '\0') + 1;
2601 rc_node_rele_locked(prop);
2603 return (result);
2607 * Assuming that ent is a service or instance node, if the pgname property
2608 * group has type pgtype, and it has a propname property with string type, add
2609 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2610 * Returns
2611 * _SUCCESS
2612 * _DELETED - ent was deleted
2613 * _NO_RESOURCES - no resources
2614 * _NOT_FOUND - ent does not have pgname pg or propname property
2616 static int
2617 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2618 const char *pgtype, const char *propname)
2620 int r;
2621 rc_node_t *pg;
2623 assert(!MUTEX_HELD(&ent->rn_lock));
2625 (void) pthread_mutex_lock(&ent->rn_lock);
2626 r = rc_node_find_named_child(ent, pgname,
2627 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2628 (void) pthread_mutex_unlock(&ent->rn_lock);
2630 switch (r) {
2631 case REP_PROTOCOL_SUCCESS:
2632 break;
2634 case REP_PROTOCOL_FAIL_DELETED:
2635 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2636 return (r);
2638 default:
2639 bad_error("rc_node_find_named_child", r);
2642 if (pg == NULL)
2643 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2645 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2646 r = perm_add_pg_prop_values(pcp, pg, propname);
2647 switch (r) {
2648 case REP_PROTOCOL_FAIL_DELETED:
2649 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2650 break;
2652 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2653 case REP_PROTOCOL_SUCCESS:
2654 case REP_PROTOCOL_FAIL_NOT_FOUND:
2655 break;
2657 default:
2658 bad_error("perm_add_pg_prop_values", r);
2662 rc_node_rele(pg);
2664 return (r);
2668 * If pg has a property named propname, and is string typed, add its values as
2669 * authorizations to pcp. If pg has no such property, and its parent is an
2670 * instance, walk up to the service and try doing the same with the property
2671 * of the same name from the property group of the same name. Returns
2672 * _SUCCESS
2673 * _NO_RESOURCES
2674 * _DELETED - pg (or an ancestor) was deleted
2676 static int
2677 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2679 int r;
2680 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2681 rc_node_t *svc;
2682 size_t sz;
2684 r = perm_add_pg_prop_values(pcp, pg, propname);
2686 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2687 return (r);
2689 assert(!MUTEX_HELD(&pg->rn_lock));
2691 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2692 return (REP_PROTOCOL_SUCCESS);
2694 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2695 assert(sz < sizeof (pgname));
2698 * If pg is a child of an instance or snapshot, we want to compose the
2699 * authorization property with the service's (if it exists). The
2700 * snapshot case applies only to read_authorization. In all other
2701 * cases, the pg's parent will be the instance.
2703 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2704 if (r != REP_PROTOCOL_SUCCESS) {
2705 assert(r == REP_PROTOCOL_FAIL_DELETED);
2706 return (r);
2708 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2710 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2712 rc_node_rele(svc);
2714 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2715 r = REP_PROTOCOL_SUCCESS;
2717 return (r);
2721 * Call perm_add_enabling_values() for the "action_authorization" property of
2722 * the "general" property group of inst. Returns
2723 * _DELETED - inst (or an ancestor) was deleted
2724 * _NO_RESOURCES
2725 * _SUCCESS
2727 static int
2728 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2730 int r;
2731 rc_node_t *svc;
2733 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2735 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2736 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2738 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2739 return (r);
2741 r = rc_node_parent(inst, &svc);
2742 if (r != REP_PROTOCOL_SUCCESS) {
2743 assert(r == REP_PROTOCOL_FAIL_DELETED);
2744 return (r);
2747 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2748 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2750 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2752 #endif /* NATIVE_BUILD */
2754 void
2755 rc_node_ptr_init(rc_node_ptr_t *out)
2757 out->rnp_node = NULL;
2758 out->rnp_auth_string = NULL;
2759 out->rnp_authorized = RC_AUTH_UNKNOWN;
2760 out->rnp_deleted = 0;
2763 void
2764 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2766 if (npp->rnp_auth_string != NULL) {
2767 free((void *)npp->rnp_auth_string);
2768 npp->rnp_auth_string = NULL;
2772 static void
2773 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2775 rc_node_t *cur = out->rnp_node;
2776 if (val != NULL)
2777 rc_node_hold(val);
2778 out->rnp_node = val;
2779 if (cur != NULL) {
2780 NODE_LOCK(cur);
2783 * Register the ephemeral reference created by reading
2784 * out->rnp_node into cur. Note that the persistent
2785 * reference we're destroying is locked by the client
2786 * layer.
2788 rc_node_hold_ephemeral_locked(cur);
2790 rc_node_rele_locked(cur);
2792 out->rnp_authorized = RC_AUTH_UNKNOWN;
2793 rc_node_ptr_free_mem(out);
2794 out->rnp_deleted = 0;
2797 void
2798 rc_node_clear(rc_node_ptr_t *out, int deleted)
2800 rc_node_assign(out, NULL);
2801 out->rnp_deleted = deleted;
2804 void
2805 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2807 rc_node_assign(out, val->rnp_node);
2811 * rc_node_check()/RC_NODE_CHECK()
2812 * generic "entry" checks, run before the use of an rc_node pointer.
2814 * Fails with
2815 * _NOT_SET
2816 * _DELETED
2818 static int
2819 rc_node_check_and_lock(rc_node_t *np)
2821 int result = REP_PROTOCOL_SUCCESS;
2822 if (np == NULL)
2823 return (REP_PROTOCOL_FAIL_NOT_SET);
2825 (void) pthread_mutex_lock(&np->rn_lock);
2826 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2827 result = REP_PROTOCOL_FAIL_DELETED;
2828 (void) pthread_mutex_unlock(&np->rn_lock);
2831 return (result);
2835 * Fails with
2836 * _NOT_SET - ptr is reset
2837 * _DELETED - node has been deleted
2839 static rc_node_t *
2840 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2842 rc_node_t *np = npp->rnp_node;
2843 if (np == NULL) {
2844 if (npp->rnp_deleted)
2845 *res = REP_PROTOCOL_FAIL_DELETED;
2846 else
2847 *res = REP_PROTOCOL_FAIL_NOT_SET;
2848 return (NULL);
2851 (void) pthread_mutex_lock(&np->rn_lock);
2852 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2853 (void) pthread_mutex_unlock(&np->rn_lock);
2854 rc_node_clear(npp, 1);
2855 *res = REP_PROTOCOL_FAIL_DELETED;
2856 return (NULL);
2858 return (np);
2861 #define RC_NODE_CHECK_AND_LOCK(n) { \
2862 int rc__res; \
2863 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2864 return (rc__res); \
2867 #define RC_NODE_CHECK(n) { \
2868 RC_NODE_CHECK_AND_LOCK(n); \
2869 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2872 #define RC_NODE_CHECK_AND_HOLD(n) { \
2873 RC_NODE_CHECK_AND_LOCK(n); \
2874 rc_node_hold_locked(n); \
2875 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2878 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2879 int rc__res; \
2880 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2881 return (rc__res); \
2884 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2885 int rc__res; \
2886 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2887 NULL) { \
2888 if ((mem) != NULL) \
2889 free((mem)); \
2890 return (rc__res); \
2894 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2895 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2896 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2899 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2900 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2901 rc_node_hold_locked(np); \
2902 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2905 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2906 assert(MUTEX_HELD(&(np)->rn_lock)); \
2907 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2908 if (!rc_node_hold_flag((np), flag)) { \
2909 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2910 return (REP_PROTOCOL_FAIL_DELETED); \
2914 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2915 assert(MUTEX_HELD(&(np)->rn_lock)); \
2916 if (!rc_node_hold_flag((np), flag)) { \
2917 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2918 assert((np) == (npp)->rnp_node); \
2919 rc_node_clear(npp, 1); \
2920 if ((mem) != NULL) \
2921 free((mem)); \
2922 return (REP_PROTOCOL_FAIL_DELETED); \
2927 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2929 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2930 rc_node_clear(out, 0);
2931 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2935 * the main scope never gets destroyed
2937 rc_node_assign(out, rc_scope);
2939 return (REP_PROTOCOL_SUCCESS);
2943 * Fails with
2944 * _NOT_SET - npp is not set
2945 * _DELETED - the node npp pointed at has been deleted
2946 * _TYPE_MISMATCH - type is not _SCOPE
2947 * _NOT_FOUND - scope has no parent
2949 static int
2950 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2952 rc_node_t *np;
2954 rc_node_clear(out, 0);
2956 RC_NODE_PTR_GET_CHECK(np, npp);
2958 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2959 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2961 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2964 static int rc_node_pg_check_read_protect(rc_node_t *);
2967 * Fails with
2968 * _NOT_SET
2969 * _DELETED
2970 * _NOT_APPLICABLE
2971 * _NOT_FOUND
2972 * _BAD_REQUEST
2973 * _TRUNCATED
2974 * _NO_RESOURCES
2977 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2978 size_t *sz_out)
2980 size_t actual;
2981 rc_node_t *np;
2983 assert(sz == *sz_out);
2985 RC_NODE_PTR_GET_CHECK(np, npp);
2987 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2988 np = np->rn_cchain[0];
2989 RC_NODE_CHECK(np);
2992 switch (answertype) {
2993 case RP_ENTITY_NAME_NAME:
2994 if (np->rn_name == NULL)
2995 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2996 actual = strlcpy(buf, np->rn_name, sz);
2997 break;
2998 case RP_ENTITY_NAME_PGTYPE:
2999 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3000 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3001 actual = strlcpy(buf, np->rn_type, sz);
3002 break;
3003 case RP_ENTITY_NAME_PGFLAGS:
3004 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3005 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3006 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
3007 break;
3008 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3009 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3010 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3011 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3012 break;
3013 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3014 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3015 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3016 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3017 break;
3018 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3019 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3020 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3021 if (np->rn_snaplevel->rsl_instance == NULL)
3022 return (REP_PROTOCOL_FAIL_NOT_FOUND);
3023 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3024 break;
3025 case RP_ENTITY_NAME_PGREADPROT:
3027 int ret;
3029 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3030 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3031 ret = rc_node_pg_check_read_protect(np);
3032 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3033 switch (ret) {
3034 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3035 actual = snprintf(buf, sz, "1");
3036 break;
3037 case REP_PROTOCOL_SUCCESS:
3038 actual = snprintf(buf, sz, "0");
3039 break;
3040 default:
3041 return (ret);
3043 break;
3045 default:
3046 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3048 if (actual >= sz)
3049 return (REP_PROTOCOL_FAIL_TRUNCATED);
3051 *sz_out = actual;
3052 return (REP_PROTOCOL_SUCCESS);
3056 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3058 rc_node_t *np;
3060 RC_NODE_PTR_GET_CHECK(np, npp);
3062 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3063 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3065 *out = np->rn_valtype;
3067 return (REP_PROTOCOL_SUCCESS);
3071 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3072 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3074 static int
3075 rc_node_parent(rc_node_t *np, rc_node_t **out)
3077 rc_node_t *pnp;
3078 rc_node_t *np_orig;
3080 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3081 RC_NODE_CHECK_AND_LOCK(np);
3082 } else {
3083 np = np->rn_cchain[0];
3084 RC_NODE_CHECK_AND_LOCK(np);
3087 np_orig = np;
3088 rc_node_hold_locked(np); /* simplifies the remainder */
3090 for (;;) {
3091 if (!rc_node_wait_flag(np,
3092 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3093 rc_node_rele_locked(np);
3094 return (REP_PROTOCOL_FAIL_DELETED);
3097 if (!(np->rn_flags & RC_NODE_OLD))
3098 break;
3100 rc_node_rele_locked(np);
3101 np = cache_lookup(&np_orig->rn_id);
3102 assert(np != np_orig);
3104 if (np == NULL)
3105 goto deleted;
3106 (void) pthread_mutex_lock(&np->rn_lock);
3109 /* guaranteed to succeed without dropping the lock */
3110 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3111 (void) pthread_mutex_unlock(&np->rn_lock);
3112 *out = NULL;
3113 rc_node_rele(np);
3114 return (REP_PROTOCOL_FAIL_DELETED);
3117 assert(np->rn_parent != NULL);
3118 pnp = np->rn_parent;
3119 (void) pthread_mutex_unlock(&np->rn_lock);
3121 (void) pthread_mutex_lock(&pnp->rn_lock);
3122 (void) pthread_mutex_lock(&np->rn_lock);
3123 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3124 (void) pthread_mutex_unlock(&np->rn_lock);
3126 rc_node_hold_locked(pnp);
3128 (void) pthread_mutex_unlock(&pnp->rn_lock);
3130 rc_node_rele(np);
3131 *out = pnp;
3132 return (REP_PROTOCOL_SUCCESS);
3134 deleted:
3135 rc_node_rele(np);
3136 return (REP_PROTOCOL_FAIL_DELETED);
3140 * Fails with
3141 * _NOT_SET
3142 * _DELETED
3144 static int
3145 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3147 rc_node_t *np;
3149 RC_NODE_PTR_GET_CHECK(np, npp);
3151 return (rc_node_parent(np, out));
3155 * Fails with
3156 * _NOT_SET - npp is not set
3157 * _DELETED - the node npp pointed at has been deleted
3158 * _TYPE_MISMATCH - npp's node's parent is not of type type
3160 * If npp points to a scope, can also fail with
3161 * _NOT_FOUND - scope has no parent
3164 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3166 rc_node_t *pnp;
3167 int rc;
3169 if (npp->rnp_node != NULL &&
3170 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3171 return (rc_scope_parent_scope(npp, type, out));
3173 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3174 rc_node_clear(out, 0);
3175 return (rc);
3178 if (type != pnp->rn_id.rl_type) {
3179 rc_node_rele(pnp);
3180 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3183 rc_node_assign(out, pnp);
3184 rc_node_rele(pnp);
3186 return (REP_PROTOCOL_SUCCESS);
3190 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3192 rc_node_t *pnp;
3193 int rc;
3195 if (npp->rnp_node != NULL &&
3196 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3197 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
3198 return (REP_PROTOCOL_SUCCESS);
3201 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3202 return (rc);
3204 *type_out = pnp->rn_id.rl_type;
3206 rc_node_rele(pnp);
3208 return (REP_PROTOCOL_SUCCESS);
3212 * Fails with
3213 * _INVALID_TYPE - type is invalid
3214 * _TYPE_MISMATCH - np doesn't carry children of type type
3215 * _DELETED - np has been deleted
3216 * _NOT_FOUND - no child with that name/type combo found
3217 * _NO_RESOURCES
3218 * _BACKEND_ACCESS
3221 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3222 rc_node_ptr_t *outp)
3224 rc_node_t *np, *cp;
3225 rc_node_t *child = NULL;
3226 int ret, idx;
3228 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3229 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3230 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3231 ret = rc_node_find_named_child(np, name, type, &child);
3232 } else {
3233 (void) pthread_mutex_unlock(&np->rn_lock);
3234 ret = REP_PROTOCOL_SUCCESS;
3235 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3236 cp = np->rn_cchain[idx];
3237 if (cp == NULL)
3238 break;
3239 RC_NODE_CHECK_AND_LOCK(cp);
3240 ret = rc_node_find_named_child(cp, name, type,
3241 &child);
3242 (void) pthread_mutex_unlock(&cp->rn_lock);
3244 * loop only if we succeeded, but no child of
3245 * the correct name was found.
3247 if (ret != REP_PROTOCOL_SUCCESS ||
3248 child != NULL)
3249 break;
3251 (void) pthread_mutex_lock(&np->rn_lock);
3254 (void) pthread_mutex_unlock(&np->rn_lock);
3256 if (ret == REP_PROTOCOL_SUCCESS) {
3257 rc_node_assign(outp, child);
3258 if (child != NULL)
3259 rc_node_rele(child);
3260 else
3261 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3262 } else {
3263 rc_node_assign(outp, NULL);
3265 return (ret);
3269 rc_node_update(rc_node_ptr_t *npp)
3271 cache_bucket_t *bp;
3272 rc_node_t *np = npp->rnp_node;
3273 rc_node_t *nnp;
3274 rc_node_t *cpg = NULL;
3276 if (np != NULL &&
3277 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3279 * If we're updating a composed property group, actually
3280 * update the top-level property group & return the
3281 * appropriate value. But leave *nnp pointing at us.
3283 cpg = np;
3284 np = np->rn_cchain[0];
3287 RC_NODE_CHECK(np);
3289 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3290 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3291 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3293 for (;;) {
3294 bp = cache_hold(np->rn_hash);
3295 nnp = cache_lookup_unlocked(bp, &np->rn_id);
3296 if (nnp == NULL) {
3297 cache_release(bp);
3298 rc_node_clear(npp, 1);
3299 return (REP_PROTOCOL_FAIL_DELETED);
3302 * grab the lock before dropping the cache bucket, so
3303 * that no one else can sneak in
3305 (void) pthread_mutex_lock(&nnp->rn_lock);
3306 cache_release(bp);
3308 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3309 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3310 break;
3312 rc_node_rele_locked(nnp);
3316 * If it is dead, we want to update it so that it will continue to
3317 * report being dead.
3319 if (nnp->rn_flags & RC_NODE_DEAD) {
3320 (void) pthread_mutex_unlock(&nnp->rn_lock);
3321 if (nnp != np && cpg == NULL)
3322 rc_node_assign(npp, nnp); /* updated */
3323 rc_node_rele(nnp);
3324 return (REP_PROTOCOL_FAIL_DELETED);
3327 assert(!(nnp->rn_flags & RC_NODE_OLD));
3328 (void) pthread_mutex_unlock(&nnp->rn_lock);
3330 if (nnp != np && cpg == NULL)
3331 rc_node_assign(npp, nnp); /* updated */
3333 rc_node_rele(nnp);
3335 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3339 * does a generic modification check, for creation, deletion, and snapshot
3340 * management only. Property group transactions have different checks.
3342 * The string returned to *match_auth must be freed.
3344 static perm_status_t
3345 rc_node_modify_permission_check(char **match_auth)
3347 permcheck_t *pcp;
3348 perm_status_t granted = PERM_GRANTED;
3349 int rc;
3351 *match_auth = NULL;
3352 #ifdef NATIVE_BUILD
3353 if (!client_is_privileged()) {
3354 granted = PERM_DENIED;
3356 return (granted);
3357 #else
3358 if (is_main_repository == 0)
3359 return (PERM_GRANTED);
3360 pcp = pc_create();
3361 if (pcp != NULL) {
3362 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3364 if (rc == REP_PROTOCOL_SUCCESS) {
3365 granted = perm_granted(pcp);
3367 if ((granted == PERM_GRANTED) ||
3368 (granted == PERM_DENIED)) {
3370 * Copy off the authorization
3371 * string before freeing pcp.
3373 *match_auth =
3374 strdup(pcp->pc_auth_string);
3375 if (*match_auth == NULL)
3376 granted = PERM_FAIL;
3378 } else {
3379 granted = PERM_FAIL;
3382 pc_free(pcp);
3383 } else {
3384 granted = PERM_FAIL;
3387 return (granted);
3388 #endif /* NATIVE_BUILD */
3392 * Native builds are done to create svc.configd-native. This program runs
3393 * only on the Solaris build machines to create the seed repository, and it
3394 * is compiled against the build machine's header files. The ADT_smf_*
3395 * symbols may not be defined in these header files. For this reason
3396 * smf_annotation_event(), _smf_audit_event() and special_property_event()
3397 * are not compiled for native builds.
3399 #ifndef NATIVE_BUILD
3402 * This function generates an annotation audit event if one has been setup.
3403 * Annotation events should only be generated immediately before the audit
3404 * record from the first attempt to modify the repository from a client
3405 * which has requested an annotation.
3407 static void
3408 smf_annotation_event(int status, int return_val)
3410 adt_session_data_t *session;
3411 adt_event_data_t *event = NULL;
3412 char file[MAXPATHLEN];
3413 char operation[REP_PROTOCOL_NAME_LEN];
3415 /* Don't audit if we're using an alternate repository. */
3416 if (is_main_repository == 0)
3417 return;
3419 if (client_annotation_needed(operation, sizeof (operation), file,
3420 sizeof (file)) == 0) {
3421 return;
3423 if (file[0] == 0) {
3424 (void) strlcpy(file, "NO FILE", sizeof (file));
3426 if (operation[0] == 0) {
3427 (void) strlcpy(operation, "NO OPERATION",
3428 sizeof (operation));
3430 if ((session = get_audit_session()) == NULL)
3431 return;
3432 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3433 uu_warn("smf_annotation_event cannot allocate event "
3434 "data. %s\n", strerror(errno));
3435 return;
3437 event->adt_smf_annotation.operation = operation;
3438 event->adt_smf_annotation.file = file;
3439 if (adt_put_event(event, status, return_val) == 0) {
3440 client_annotation_finished();
3441 } else {
3442 uu_warn("smf_annotation_event failed to put event. "
3443 "%s\n", strerror(errno));
3445 adt_free_event(event);
3449 * _smf_audit_event interacts with the security auditing system to generate
3450 * an audit event structure. It establishes an audit session and allocates
3451 * an audit event. The event is filled in from the audit data, and
3452 * adt_put_event is called to generate the event.
3454 static void
3455 _smf_audit_event(au_event_t event_id, int status, int return_val,
3456 audit_event_data_t *data)
3458 char *auth_used;
3459 char *fmri;
3460 char *prop_value;
3461 adt_session_data_t *session;
3462 adt_event_data_t *event = NULL;
3464 /* Don't audit if we're using an alternate repository */
3465 if (is_main_repository == 0)
3466 return;
3468 smf_annotation_event(status, return_val);
3469 if ((session = get_audit_session()) == NULL)
3470 return;
3471 if ((event = adt_alloc_event(session, event_id)) == NULL) {
3472 uu_warn("_smf_audit_event cannot allocate event "
3473 "data. %s\n", strerror(errno));
3474 return;
3478 * Handle possibility of NULL authorization strings, FMRIs and
3479 * property values.
3481 if (data->ed_auth == NULL) {
3482 auth_used = "PRIVILEGED";
3483 } else {
3484 auth_used = data->ed_auth;
3486 if (data->ed_fmri == NULL) {
3487 syslog(LOG_WARNING, "_smf_audit_event called with "
3488 "empty FMRI string");
3489 fmri = "UNKNOWN FMRI";
3490 } else {
3491 fmri = data->ed_fmri;
3493 if (data->ed_prop_value == NULL) {
3494 prop_value = "";
3495 } else {
3496 prop_value = data->ed_prop_value;
3499 /* Fill in the event data. */
3500 switch (event_id) {
3501 case ADT_smf_attach_snap:
3502 event->adt_smf_attach_snap.auth_used = auth_used;
3503 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3504 event->adt_smf_attach_snap.old_name = data->ed_old_name;
3505 event->adt_smf_attach_snap.new_fmri = fmri;
3506 event->adt_smf_attach_snap.new_name = data->ed_snapname;
3507 break;
3508 case ADT_smf_change_prop:
3509 event->adt_smf_change_prop.auth_used = auth_used;
3510 event->adt_smf_change_prop.fmri = fmri;
3511 event->adt_smf_change_prop.type = data->ed_type;
3512 event->adt_smf_change_prop.value = prop_value;
3513 break;
3514 case ADT_smf_clear:
3515 event->adt_smf_clear.auth_used = auth_used;
3516 event->adt_smf_clear.fmri = fmri;
3517 break;
3518 case ADT_smf_create:
3519 event->adt_smf_create.fmri = fmri;
3520 event->adt_smf_create.auth_used = auth_used;
3521 break;
3522 case ADT_smf_create_npg:
3523 event->adt_smf_create_npg.auth_used = auth_used;
3524 event->adt_smf_create_npg.fmri = fmri;
3525 event->adt_smf_create_npg.type = data->ed_type;
3526 break;
3527 case ADT_smf_create_pg:
3528 event->adt_smf_create_pg.auth_used = auth_used;
3529 event->adt_smf_create_pg.fmri = fmri;
3530 event->adt_smf_create_pg.type = data->ed_type;
3531 break;
3532 case ADT_smf_create_prop:
3533 event->adt_smf_create_prop.auth_used = auth_used;
3534 event->adt_smf_create_prop.fmri = fmri;
3535 event->adt_smf_create_prop.type = data->ed_type;
3536 event->adt_smf_create_prop.value = prop_value;
3537 break;
3538 case ADT_smf_create_snap:
3539 event->adt_smf_create_snap.auth_used = auth_used;
3540 event->adt_smf_create_snap.fmri = fmri;
3541 event->adt_smf_create_snap.name = data->ed_snapname;
3542 break;
3543 case ADT_smf_degrade:
3544 event->adt_smf_degrade.auth_used = auth_used;
3545 event->adt_smf_degrade.fmri = fmri;
3546 break;
3547 case ADT_smf_delete:
3548 event->adt_smf_delete.fmri = fmri;
3549 event->adt_smf_delete.auth_used = auth_used;
3550 break;
3551 case ADT_smf_delete_npg:
3552 event->adt_smf_delete_npg.auth_used = auth_used;
3553 event->adt_smf_delete_npg.fmri = fmri;
3554 event->adt_smf_delete_npg.type = data->ed_type;
3555 break;
3556 case ADT_smf_delete_pg:
3557 event->adt_smf_delete_pg.auth_used = auth_used;
3558 event->adt_smf_delete_pg.fmri = fmri;
3559 event->adt_smf_delete_pg.type = data->ed_type;
3560 break;
3561 case ADT_smf_delete_prop:
3562 event->adt_smf_delete_prop.auth_used = auth_used;
3563 event->adt_smf_delete_prop.fmri = fmri;
3564 break;
3565 case ADT_smf_delete_snap:
3566 event->adt_smf_delete_snap.auth_used = auth_used;
3567 event->adt_smf_delete_snap.fmri = fmri;
3568 event->adt_smf_delete_snap.name = data->ed_snapname;
3569 break;
3570 case ADT_smf_disable:
3571 event->adt_smf_disable.auth_used = auth_used;
3572 event->adt_smf_disable.fmri = fmri;
3573 break;
3574 case ADT_smf_enable:
3575 event->adt_smf_enable.auth_used = auth_used;
3576 event->adt_smf_enable.fmri = fmri;
3577 break;
3578 case ADT_smf_immediate_degrade:
3579 event->adt_smf_immediate_degrade.auth_used = auth_used;
3580 event->adt_smf_immediate_degrade.fmri = fmri;
3581 break;
3582 case ADT_smf_immediate_maintenance:
3583 event->adt_smf_immediate_maintenance.auth_used = auth_used;
3584 event->adt_smf_immediate_maintenance.fmri = fmri;
3585 break;
3586 case ADT_smf_immtmp_maintenance:
3587 event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3588 event->adt_smf_immtmp_maintenance.fmri = fmri;
3589 break;
3590 case ADT_smf_maintenance:
3591 event->adt_smf_maintenance.auth_used = auth_used;
3592 event->adt_smf_maintenance.fmri = fmri;
3593 break;
3594 case ADT_smf_milestone:
3595 event->adt_smf_milestone.auth_used = auth_used;
3596 event->adt_smf_milestone.fmri = fmri;
3597 break;
3598 case ADT_smf_read_prop:
3599 event->adt_smf_read_prop.auth_used = auth_used;
3600 event->adt_smf_read_prop.fmri = fmri;
3601 break;
3602 case ADT_smf_refresh:
3603 event->adt_smf_refresh.auth_used = auth_used;
3604 event->adt_smf_refresh.fmri = fmri;
3605 break;
3606 case ADT_smf_restart:
3607 event->adt_smf_restart.auth_used = auth_used;
3608 event->adt_smf_restart.fmri = fmri;
3609 break;
3610 case ADT_smf_tmp_disable:
3611 event->adt_smf_tmp_disable.auth_used = auth_used;
3612 event->adt_smf_tmp_disable.fmri = fmri;
3613 break;
3614 case ADT_smf_tmp_enable:
3615 event->adt_smf_tmp_enable.auth_used = auth_used;
3616 event->adt_smf_tmp_enable.fmri = fmri;
3617 break;
3618 case ADT_smf_tmp_maintenance:
3619 event->adt_smf_tmp_maintenance.auth_used = auth_used;
3620 event->adt_smf_tmp_maintenance.fmri = fmri;
3621 break;
3622 default:
3623 abort(); /* Need to cover all SMF event IDs */
3626 if (adt_put_event(event, status, return_val) != 0) {
3627 uu_warn("_smf_audit_event failed to put event. %s\n",
3628 strerror(errno));
3630 adt_free_event(event);
3634 * Determine if the combination of the property group at pg_name and the
3635 * property at prop_name are in the set of special startd properties. If
3636 * they are, a special audit event will be generated.
3638 static void
3639 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3640 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3641 size_t cmd_no)
3643 au_event_t event_id;
3644 audit_special_prop_item_t search_key;
3645 audit_special_prop_item_t *found;
3647 /* Use bsearch to find the special property information. */
3648 search_key.api_prop_name = prop_name;
3649 search_key.api_pg_name = pg_name;
3650 found = (audit_special_prop_item_t *)bsearch(&search_key,
3651 special_props_list, SPECIAL_PROP_COUNT,
3652 sizeof (special_props_list[0]), special_prop_compare);
3653 if (found == NULL) {
3654 /* Not a special property. */
3655 return;
3658 /* Get the event id */
3659 if (found->api_event_func == NULL) {
3660 event_id = found->api_event_id;
3661 } else {
3662 if ((*found->api_event_func)(tx_data, cmd_no,
3663 found->api_pg_name, &event_id) < 0)
3664 return;
3667 /* Generate the event. */
3668 smf_audit_event(event_id, status, return_val, evdp);
3670 #endif /* NATIVE_BUILD */
3673 * Return a pointer to a string containing all the values of the command
3674 * specified by cmd_no with each value enclosed in quotes. It is up to the
3675 * caller to free the memory at the returned pointer.
3677 static char *
3678 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3680 const char *cp;
3681 const char *cur_value;
3682 size_t byte_count = 0;
3683 uint32_t i;
3684 uint32_t nvalues;
3685 size_t str_size = 0;
3686 char *values = NULL;
3687 char *vp;
3689 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3690 return (NULL);
3692 * First determine the size of the buffer that we will need. We
3693 * will represent each property value surrounded by quotes with a
3694 * space separating the values. Thus, we need to find the total
3695 * size of all the value strings and add 3 for each value.
3697 * There is one catch, though. We need to escape any internal
3698 * quote marks in the values. So for each quote in the value we
3699 * need to add another byte to the buffer size.
3701 for (i = 0; i < nvalues; i++) {
3702 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3703 REP_PROTOCOL_SUCCESS)
3704 return (NULL);
3705 for (cp = cur_value; *cp != 0; cp++) {
3706 byte_count += (*cp == '"') ? 2 : 1;
3708 byte_count += 3; /* surrounding quotes & space */
3710 byte_count++; /* nul terminator */
3711 values = malloc(byte_count);
3712 if (values == NULL)
3713 return (NULL);
3714 *values = 0;
3716 /* Now build up the string of values. */
3717 for (i = 0; i < nvalues; i++) {
3718 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3719 REP_PROTOCOL_SUCCESS) {
3720 free(values);
3721 return (NULL);
3723 (void) strlcat(values, "\"", byte_count);
3724 for (cp = cur_value, vp = values + strlen(values);
3725 *cp != 0; cp++) {
3726 if (*cp == '"') {
3727 *vp++ = '\\';
3728 *vp++ = '"';
3729 } else {
3730 *vp++ = *cp;
3733 *vp = 0;
3734 str_size = strlcat(values, "\" ", byte_count);
3735 assert(str_size < byte_count);
3737 if (str_size > 0)
3738 values[str_size - 1] = 0; /* get rid of trailing space */
3739 return (values);
3743 * generate_property_events takes the transaction commit data at tx_data
3744 * and generates an audit event for each command.
3746 * Native builds are done to create svc.configd-native. This program runs
3747 * only on the Solaris build machines to create the seed repository. Thus,
3748 * no audit events should be generated when running svc.configd-native.
3750 static void
3751 generate_property_events(
3752 tx_commit_data_t *tx_data,
3753 char *pg_fmri, /* FMRI of property group */
3754 char *auth_string,
3755 int auth_status,
3756 int auth_ret_value)
3758 #ifndef NATIVE_BUILD
3759 enum rep_protocol_transaction_action action;
3760 audit_event_data_t audit_data;
3761 size_t count;
3762 size_t cmd_no;
3763 char *cp;
3764 au_event_t event_id;
3765 char fmri[REP_PROTOCOL_FMRI_LEN];
3766 char pg_name[REP_PROTOCOL_NAME_LEN];
3767 char *pg_end; /* End of prop. group fmri */
3768 const char *prop_name;
3769 uint32_t ptype;
3770 char prop_type[3];
3771 enum rep_protocol_responseid rc;
3772 size_t sz_out;
3774 /* Make sure we have something to do. */
3775 if (tx_data == NULL)
3776 return;
3777 if ((count = tx_cmd_count(tx_data)) == 0)
3778 return;
3780 /* Copy the property group fmri */
3781 pg_end = fmri;
3782 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3785 * Get the property group name. It is the first component after
3786 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3788 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3789 if (cp == NULL) {
3790 pg_name[0] = 0;
3791 } else {
3792 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3793 (void) strlcpy(pg_name, cp, sizeof (pg_name));
3796 audit_data.ed_auth = auth_string;
3797 audit_data.ed_fmri = fmri;
3798 audit_data.ed_type = prop_type;
3801 * Property type is two characters (see
3802 * rep_protocol_value_type_t), so terminate the string.
3804 prop_type[2] = 0;
3806 for (cmd_no = 0; cmd_no < count; cmd_no++) {
3807 /* Construct FMRI of the property */
3808 *pg_end = 0;
3809 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3810 REP_PROTOCOL_SUCCESS) {
3811 continue;
3813 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3814 prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3815 if (rc != REP_PROTOCOL_SUCCESS) {
3817 * If we can't get the FMRI, we'll abandon this
3818 * command
3820 continue;
3823 /* Generate special property event if necessary. */
3824 special_property_event(&audit_data, prop_name, pg_name,
3825 auth_status, auth_ret_value, tx_data, cmd_no);
3827 /* Capture rest of audit data. */
3828 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3829 REP_PROTOCOL_SUCCESS) {
3830 continue;
3832 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3833 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3834 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3836 /* Determine the event type. */
3837 if (tx_cmd_action(tx_data, cmd_no, &action) !=
3838 REP_PROTOCOL_SUCCESS) {
3839 free(audit_data.ed_prop_value);
3840 continue;
3842 switch (action) {
3843 case REP_PROTOCOL_TX_ENTRY_NEW:
3844 event_id = ADT_smf_create_prop;
3845 break;
3846 case REP_PROTOCOL_TX_ENTRY_CLEAR:
3847 event_id = ADT_smf_change_prop;
3848 break;
3849 case REP_PROTOCOL_TX_ENTRY_REPLACE:
3850 event_id = ADT_smf_change_prop;
3851 break;
3852 case REP_PROTOCOL_TX_ENTRY_DELETE:
3853 event_id = ADT_smf_delete_prop;
3854 break;
3855 default:
3856 assert(0); /* Missing a case */
3857 free(audit_data.ed_prop_value);
3858 continue;
3861 /* Generate the event. */
3862 smf_audit_event(event_id, auth_status, auth_ret_value,
3863 &audit_data);
3864 free(audit_data.ed_prop_value);
3866 #endif /* NATIVE_BUILD */
3870 * Fails with
3871 * _DELETED - node has been deleted
3872 * _NOT_SET - npp is reset
3873 * _NOT_APPLICABLE - type is _PROPERTYGRP
3874 * _INVALID_TYPE - node is corrupt or type is invalid
3875 * _TYPE_MISMATCH - node cannot have children of type type
3876 * _BAD_REQUEST - name is invalid
3877 * cannot create children for this type of node
3878 * _NO_RESOURCES - out of memory, or could not allocate new id
3879 * _PERMISSION_DENIED
3880 * _BACKEND_ACCESS
3881 * _BACKEND_READONLY
3882 * _EXISTS - child already exists
3883 * _TRUNCATED - truncated FMRI for the audit record
3886 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3887 rc_node_ptr_t *cpp)
3889 rc_node_t *np;
3890 rc_node_t *cp = NULL;
3891 int rc;
3892 perm_status_t perm_rc;
3893 size_t sz_out;
3894 char fmri[REP_PROTOCOL_FMRI_LEN];
3895 audit_event_data_t audit_data;
3897 rc_node_clear(cpp, 0);
3900 * rc_node_modify_permission_check() must be called before the node
3901 * is locked. This is because the library functions that check
3902 * authorizations can trigger calls back into configd.
3904 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3905 switch (perm_rc) {
3906 case PERM_DENIED:
3908 * We continue in this case, so that an audit event can be
3909 * generated later in the function.
3911 break;
3912 case PERM_GRANTED:
3913 break;
3914 case PERM_GONE:
3915 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3916 case PERM_FAIL:
3917 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3918 default:
3919 bad_error(rc_node_modify_permission_check, perm_rc);
3922 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3924 audit_data.ed_fmri = fmri;
3927 * there is a separate interface for creating property groups
3929 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3930 (void) pthread_mutex_unlock(&np->rn_lock);
3931 free(audit_data.ed_auth);
3932 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3935 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3936 (void) pthread_mutex_unlock(&np->rn_lock);
3937 np = np->rn_cchain[0];
3938 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3939 free(audit_data.ed_auth);
3940 return (rc);
3944 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3945 REP_PROTOCOL_SUCCESS) {
3946 (void) pthread_mutex_unlock(&np->rn_lock);
3947 free(audit_data.ed_auth);
3948 return (rc);
3950 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3951 (void) pthread_mutex_unlock(&np->rn_lock);
3952 free(audit_data.ed_auth);
3953 return (rc);
3956 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3957 name, type)) != REP_PROTOCOL_SUCCESS) {
3958 (void) pthread_mutex_unlock(&np->rn_lock);
3959 free(audit_data.ed_auth);
3960 return (rc);
3962 if (perm_rc == PERM_DENIED) {
3963 (void) pthread_mutex_unlock(&np->rn_lock);
3964 smf_audit_event(ADT_smf_create, ADT_FAILURE,
3965 ADT_FAIL_VALUE_AUTH, &audit_data);
3966 free(audit_data.ed_auth);
3967 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3970 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3971 audit_data.ed_auth);
3972 (void) pthread_mutex_unlock(&np->rn_lock);
3974 rc = object_create(np, type, name, &cp);
3975 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3977 if (rc == REP_PROTOCOL_SUCCESS) {
3978 rc_node_assign(cpp, cp);
3979 rc_node_rele(cp);
3982 (void) pthread_mutex_lock(&np->rn_lock);
3983 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3984 (void) pthread_mutex_unlock(&np->rn_lock);
3986 if (rc == REP_PROTOCOL_SUCCESS) {
3987 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3988 &audit_data);
3991 free(audit_data.ed_auth);
3993 return (rc);
3997 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3998 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
4000 rc_node_t *np;
4001 rc_node_t *cp;
4002 int rc;
4003 permcheck_t *pcp;
4004 perm_status_t granted;
4005 char fmri[REP_PROTOCOL_FMRI_LEN];
4006 audit_event_data_t audit_data;
4007 au_event_t event_id;
4008 size_t sz_out;
4010 audit_data.ed_auth = NULL;
4011 audit_data.ed_fmri = fmri;
4012 audit_data.ed_type = (char *)pgtype;
4014 rc_node_clear(cpp, 0);
4016 /* verify flags is valid */
4017 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4018 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4020 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4022 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4023 rc_node_rele(np);
4024 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4027 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4028 REP_PROTOCOL_SUCCESS) {
4029 rc_node_rele(np);
4030 return (rc);
4032 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4033 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4034 rc_node_rele(np);
4035 return (rc);
4038 #ifdef NATIVE_BUILD
4039 if (!client_is_privileged()) {
4040 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4042 #else
4043 if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4044 event_id = ADT_smf_create_npg;
4045 } else {
4046 event_id = ADT_smf_create_pg;
4048 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4049 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4050 rc_node_rele(np);
4051 return (rc);
4054 if (is_main_repository) {
4055 /* Must have .smf.modify or smf.modify.<type> authorization */
4056 pcp = pc_create();
4057 if (pcp != NULL) {
4058 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4060 if (rc == REP_PROTOCOL_SUCCESS) {
4061 const char * const auth =
4062 perm_auth_for_pgtype(pgtype);
4064 if (auth != NULL)
4065 rc = perm_add_enabling(pcp, auth);
4069 * .manage or $action_authorization can be used to
4070 * create the actions pg and the general_ovr pg.
4072 if (rc == REP_PROTOCOL_SUCCESS &&
4073 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4074 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4075 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4076 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4077 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4078 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4079 rc = perm_add_enabling(pcp, AUTH_MANAGE);
4081 if (rc == REP_PROTOCOL_SUCCESS)
4082 rc = perm_add_inst_action_auth(pcp, np);
4085 if (rc == REP_PROTOCOL_SUCCESS) {
4086 granted = perm_granted(pcp);
4088 rc = map_granted_status(granted, pcp,
4089 &audit_data.ed_auth);
4090 if (granted == PERM_GONE) {
4091 /* No auditing if client gone. */
4092 pc_free(pcp);
4093 rc_node_rele(np);
4094 return (rc);
4098 pc_free(pcp);
4099 } else {
4100 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4103 } else {
4104 rc = REP_PROTOCOL_SUCCESS;
4106 #endif /* NATIVE_BUILD */
4109 if (rc != REP_PROTOCOL_SUCCESS) {
4110 rc_node_rele(np);
4111 if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4112 smf_audit_event(event_id, ADT_FAILURE,
4113 ADT_FAIL_VALUE_AUTH, &audit_data);
4115 if (audit_data.ed_auth != NULL)
4116 free(audit_data.ed_auth);
4117 return (rc);
4120 (void) pthread_mutex_lock(&np->rn_lock);
4121 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4122 audit_data.ed_auth);
4123 (void) pthread_mutex_unlock(&np->rn_lock);
4125 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4127 if (rc == REP_PROTOCOL_SUCCESS) {
4128 rc_node_assign(cpp, cp);
4129 rc_node_rele(cp);
4132 (void) pthread_mutex_lock(&np->rn_lock);
4133 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4134 (void) pthread_mutex_unlock(&np->rn_lock);
4136 if (rc == REP_PROTOCOL_SUCCESS) {
4137 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4138 &audit_data);
4140 if (audit_data.ed_auth != NULL)
4141 free(audit_data.ed_auth);
4143 return (rc);
4146 static void
4147 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4149 assert(MUTEX_HELD(&rc_pg_notify_lock));
4151 if (pnp->rnpn_pg != NULL) {
4152 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4153 (void) close(pnp->rnpn_fd);
4155 pnp->rnpn_pg = NULL;
4156 pnp->rnpn_fd = -1;
4157 } else {
4158 assert(pnp->rnpn_fd == -1);
4162 static void
4163 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4165 rc_node_t *svc = NULL;
4166 rc_node_t *inst = NULL;
4167 rc_node_t *pg = NULL;
4168 rc_node_t *np = np_arg;
4169 rc_node_t *nnp;
4171 while (svc == NULL) {
4172 (void) pthread_mutex_lock(&np->rn_lock);
4173 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4174 (void) pthread_mutex_unlock(&np->rn_lock);
4175 goto cleanup;
4177 nnp = np->rn_parent;
4178 rc_node_hold_locked(np); /* hold it in place */
4180 switch (np->rn_id.rl_type) {
4181 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4182 assert(pg == NULL);
4183 pg = np;
4184 break;
4185 case REP_PROTOCOL_ENTITY_INSTANCE:
4186 assert(inst == NULL);
4187 inst = np;
4188 break;
4189 case REP_PROTOCOL_ENTITY_SERVICE:
4190 assert(svc == NULL);
4191 svc = np;
4192 break;
4193 default:
4194 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4195 rc_node_rele_locked(np);
4196 goto cleanup;
4199 (void) pthread_mutex_unlock(&np->rn_lock);
4201 np = nnp;
4202 if (np == NULL)
4203 goto cleanup;
4206 rc_notify_deletion(ndp,
4207 svc->rn_name,
4208 inst != NULL ? inst->rn_name : NULL,
4209 pg != NULL ? pg->rn_name : NULL);
4211 ndp = NULL;
4213 cleanup:
4214 if (ndp != NULL)
4215 uu_free(ndp);
4217 for (;;) {
4218 if (svc != NULL) {
4219 np = svc;
4220 svc = NULL;
4221 } else if (inst != NULL) {
4222 np = inst;
4223 inst = NULL;
4224 } else if (pg != NULL) {
4225 np = pg;
4226 pg = NULL;
4227 } else
4228 break;
4230 (void) pthread_mutex_lock(&np->rn_lock);
4231 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4232 rc_node_rele_locked(np);
4237 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4238 * the same down the rn_former chain.
4240 static void
4241 rc_node_delete_hold(rc_node_t *np, int andformer)
4243 rc_node_t *cp;
4245 again:
4246 assert(MUTEX_HELD(&np->rn_lock));
4247 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4249 for (cp = uu_list_first(np->rn_children); cp != NULL;
4250 cp = uu_list_next(np->rn_children, cp)) {
4251 (void) pthread_mutex_lock(&cp->rn_lock);
4252 (void) pthread_mutex_unlock(&np->rn_lock);
4253 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4255 * already marked as dead -- can't happen, since that
4256 * would require setting RC_NODE_CHILDREN_CHANGING
4257 * in np, and we're holding that...
4259 abort();
4261 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
4263 (void) pthread_mutex_lock(&np->rn_lock);
4265 if (andformer && (cp = np->rn_former) != NULL) {
4266 (void) pthread_mutex_lock(&cp->rn_lock);
4267 (void) pthread_mutex_unlock(&np->rn_lock);
4268 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4269 abort(); /* can't happen, see above */
4270 np = cp;
4271 goto again; /* tail-recurse down rn_former */
4273 (void) pthread_mutex_unlock(&np->rn_lock);
4277 * N.B.: this function drops np->rn_lock on the way out.
4279 static void
4280 rc_node_delete_rele(rc_node_t *np, int andformer)
4282 rc_node_t *cp;
4284 again:
4285 assert(MUTEX_HELD(&np->rn_lock));
4286 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4288 for (cp = uu_list_first(np->rn_children); cp != NULL;
4289 cp = uu_list_next(np->rn_children, cp)) {
4290 (void) pthread_mutex_lock(&cp->rn_lock);
4291 (void) pthread_mutex_unlock(&np->rn_lock);
4292 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
4293 (void) pthread_mutex_lock(&np->rn_lock);
4295 if (andformer && (cp = np->rn_former) != NULL) {
4296 (void) pthread_mutex_lock(&cp->rn_lock);
4297 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4298 (void) pthread_mutex_unlock(&np->rn_lock);
4300 np = cp;
4301 goto again; /* tail-recurse down rn_former */
4303 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4304 (void) pthread_mutex_unlock(&np->rn_lock);
4307 static void
4308 rc_node_finish_delete(rc_node_t *cp)
4310 cache_bucket_t *bp;
4311 rc_node_pg_notify_t *pnp;
4313 assert(MUTEX_HELD(&cp->rn_lock));
4315 if (!(cp->rn_flags & RC_NODE_OLD)) {
4316 assert(cp->rn_flags & RC_NODE_IN_PARENT);
4317 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4318 abort(); /* can't happen, see above */
4320 cp->rn_flags &= ~RC_NODE_IN_PARENT;
4321 cp->rn_parent = NULL;
4322 rc_node_free_fmri(cp);
4325 cp->rn_flags |= RC_NODE_DEAD;
4328 * If this node is not out-dated, we need to remove it from
4329 * the notify list and cache hash table.
4331 if (!(cp->rn_flags & RC_NODE_OLD)) {
4332 assert(cp->rn_refs > 0); /* can't go away yet */
4333 (void) pthread_mutex_unlock(&cp->rn_lock);
4335 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4336 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4337 rc_pg_notify_fire(pnp);
4338 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4339 rc_notify_remove_node(cp);
4341 bp = cache_hold(cp->rn_hash);
4342 (void) pthread_mutex_lock(&cp->rn_lock);
4343 cache_remove_unlocked(bp, cp);
4344 cache_release(bp);
4349 * For each child, call rc_node_finish_delete() and recurse. If andformer
4350 * is set, also recurse down rn_former. Finally release np, which might
4351 * free it.
4353 static void
4354 rc_node_delete_children(rc_node_t *np, int andformer)
4356 rc_node_t *cp;
4358 again:
4359 assert(np->rn_refs > 0);
4360 assert(MUTEX_HELD(&np->rn_lock));
4361 assert(np->rn_flags & RC_NODE_DEAD);
4363 while ((cp = uu_list_first(np->rn_children)) != NULL) {
4364 uu_list_remove(np->rn_children, cp);
4365 (void) pthread_mutex_lock(&cp->rn_lock);
4366 (void) pthread_mutex_unlock(&np->rn_lock);
4367 rc_node_hold_locked(cp); /* hold while we recurse */
4368 rc_node_finish_delete(cp);
4369 rc_node_delete_children(cp, andformer); /* drops lock + ref */
4370 (void) pthread_mutex_lock(&np->rn_lock);
4374 * When we drop cp's lock, all the children will be gone, so we
4375 * can release DYING_FLAGS.
4377 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4378 if (andformer && (cp = np->rn_former) != NULL) {
4379 np->rn_former = NULL; /* unlink */
4380 (void) pthread_mutex_lock(&cp->rn_lock);
4383 * Register the ephemeral reference created by reading
4384 * np->rn_former into cp. Note that the persistent
4385 * reference (np->rn_former) is locked because we haven't
4386 * dropped np's lock since we dropped its RC_NODE_IN_TX
4387 * (via RC_NODE_DYING_FLAGS).
4389 rc_node_hold_ephemeral_locked(cp);
4391 (void) pthread_mutex_unlock(&np->rn_lock);
4392 cp->rn_flags &= ~RC_NODE_ON_FORMER;
4394 rc_node_hold_locked(cp); /* hold while we loop */
4396 rc_node_finish_delete(cp);
4398 rc_node_rele(np); /* drop the old reference */
4400 np = cp;
4401 goto again; /* tail-recurse down rn_former */
4403 rc_node_rele_locked(np);
4407 * The last client or child reference to np, which must be either
4408 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4409 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4410 * free np.
4412 static void
4413 rc_node_no_client_refs(rc_node_t *np)
4415 int unrefed;
4416 rc_node_t *current, *cur;
4418 assert(MUTEX_HELD(&np->rn_lock));
4419 assert(np->rn_refs == 0);
4420 assert(np->rn_other_refs == 0);
4421 assert(np->rn_other_refs_held == 0);
4423 if (np->rn_flags & RC_NODE_DEAD) {
4425 * The node is DEAD, so the deletion code should have
4426 * destroyed all rn_children or rn_former references.
4427 * Since the last client or child reference has been
4428 * destroyed, we're free to destroy np. Unless another
4429 * thread has an ephemeral reference, in which case we'll
4430 * pass the buck.
4432 if (np->rn_erefs > 1) {
4433 --np->rn_erefs;
4434 NODE_UNLOCK(np);
4435 return;
4438 (void) pthread_mutex_unlock(&np->rn_lock);
4439 rc_node_destroy(np);
4440 return;
4443 /* We only collect DEAD and OLD nodes, thank you. */
4444 assert(np->rn_flags & RC_NODE_OLD);
4447 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4448 * nodes. But it's vulnerable to unfriendly scheduling, so full
4449 * use of rn_erefs should supersede it someday.
4451 if (np->rn_flags & RC_NODE_UNREFED) {
4452 (void) pthread_mutex_unlock(&np->rn_lock);
4453 return;
4455 np->rn_flags |= RC_NODE_UNREFED;
4458 * Now we'll remove the node from the rn_former chain and take its
4459 * DYING_FLAGS.
4463 * Since this node is OLD, it should be on an rn_former chain. To
4464 * remove it, we must find the current in-hash object and grab its
4465 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4468 (void) pthread_mutex_unlock(&np->rn_lock);
4470 for (;;) {
4471 current = cache_lookup(&np->rn_id);
4473 if (current == NULL) {
4474 (void) pthread_mutex_lock(&np->rn_lock);
4476 if (np->rn_flags & RC_NODE_DEAD)
4477 goto died;
4480 * We are trying to unreference this node, but the
4481 * owner of the former list does not exist. It must
4482 * be the case that another thread is deleting this
4483 * entire sub-branch, but has not yet reached us.
4484 * We will in short order be deleted.
4486 np->rn_flags &= ~RC_NODE_UNREFED;
4487 (void) pthread_mutex_unlock(&np->rn_lock);
4488 return;
4491 if (current == np) {
4493 * no longer unreferenced
4495 (void) pthread_mutex_lock(&np->rn_lock);
4496 np->rn_flags &= ~RC_NODE_UNREFED;
4497 /* held in cache_lookup() */
4498 rc_node_rele_locked(np);
4499 return;
4502 (void) pthread_mutex_lock(&current->rn_lock);
4503 if (current->rn_flags & RC_NODE_OLD) {
4505 * current has been replaced since we looked it
4506 * up. Try again.
4508 /* held in cache_lookup() */
4509 rc_node_rele_locked(current);
4510 continue;
4513 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4515 * current has been deleted since we looked it up. Try
4516 * again.
4518 /* held in cache_lookup() */
4519 rc_node_rele_locked(current);
4520 continue;
4524 * rc_node_hold_flag() might have dropped current's lock, so
4525 * check OLD again.
4527 if (!(current->rn_flags & RC_NODE_OLD)) {
4528 /* Not old. Stop looping. */
4529 (void) pthread_mutex_unlock(&current->rn_lock);
4530 break;
4533 rc_node_rele_flag(current, RC_NODE_IN_TX);
4534 rc_node_rele_locked(current);
4537 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4538 (void) pthread_mutex_lock(&np->rn_lock);
4541 * While we didn't have the lock, a thread may have added
4542 * a reference or changed the flags.
4544 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4545 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4546 np->rn_other_refs_held != 0) {
4547 np->rn_flags &= ~RC_NODE_UNREFED;
4549 (void) pthread_mutex_lock(&current->rn_lock);
4550 rc_node_rele_flag(current, RC_NODE_IN_TX);
4551 /* held by cache_lookup() */
4552 rc_node_rele_locked(current);
4553 return;
4556 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4558 * Someone deleted the node while we were waiting for
4559 * DYING_FLAGS. Undo the modifications to current.
4561 (void) pthread_mutex_unlock(&np->rn_lock);
4563 rc_node_rele_flag(current, RC_NODE_IN_TX);
4564 /* held by cache_lookup() */
4565 rc_node_rele_locked(current);
4567 (void) pthread_mutex_lock(&np->rn_lock);
4568 goto died;
4571 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4572 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
4574 /* Mark np DEAD. This requires the lock. */
4575 (void) pthread_mutex_lock(&np->rn_lock);
4577 /* Recheck for new references. */
4578 if (!(np->rn_flags & RC_NODE_OLD) ||
4579 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4580 np->rn_other_refs_held != 0) {
4581 np->rn_flags &= ~RC_NODE_UNREFED;
4582 rc_node_delete_rele(np, 0); /* drops np's lock */
4584 (void) pthread_mutex_lock(&current->rn_lock);
4585 rc_node_rele_flag(current, RC_NODE_IN_TX);
4586 /* held by cache_lookup() */
4587 rc_node_rele_locked(current);
4588 return;
4591 np->rn_flags |= RC_NODE_DEAD;
4594 * Delete the children. This calls rc_node_rele_locked() on np at
4595 * the end, so add a reference to keep the count from going
4596 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4597 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4598 * shouldn't actually free() np.
4600 rc_node_hold_locked(np);
4601 rc_node_delete_children(np, 0); /* unlocks np */
4603 /* Remove np from current's rn_former chain. */
4604 (void) pthread_mutex_lock(&current->rn_lock);
4605 for (cur = current; cur != NULL && cur->rn_former != np;
4606 cur = cur->rn_former)
4608 assert(cur != NULL && cur != np);
4610 cur->rn_former = np->rn_former;
4611 np->rn_former = NULL;
4613 rc_node_rele_flag(current, RC_NODE_IN_TX);
4614 /* held by cache_lookup() */
4615 rc_node_rele_locked(current);
4617 /* Clear ON_FORMER and UNREFED, and destroy. */
4618 (void) pthread_mutex_lock(&np->rn_lock);
4619 assert(np->rn_flags & RC_NODE_ON_FORMER);
4620 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4622 if (np->rn_erefs > 1) {
4623 /* Still referenced. Stay execution. */
4624 --np->rn_erefs;
4625 NODE_UNLOCK(np);
4626 return;
4629 (void) pthread_mutex_unlock(&np->rn_lock);
4630 rc_node_destroy(np);
4631 return;
4633 died:
4635 * Another thread marked np DEAD. If there still aren't any
4636 * persistent references, destroy the node.
4638 np->rn_flags &= ~RC_NODE_UNREFED;
4640 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4641 np->rn_other_refs_held == 0);
4643 if (np->rn_erefs > 0)
4644 --np->rn_erefs;
4646 if (unrefed && np->rn_erefs > 0) {
4647 NODE_UNLOCK(np);
4648 return;
4651 (void) pthread_mutex_unlock(&np->rn_lock);
4653 if (unrefed)
4654 rc_node_destroy(np);
4657 static au_event_t
4658 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4660 au_event_t id = 0;
4662 #ifndef NATIVE_BUILD
4663 switch (entity) {
4664 case REP_PROTOCOL_ENTITY_SERVICE:
4665 case REP_PROTOCOL_ENTITY_INSTANCE:
4666 id = ADT_smf_delete;
4667 break;
4668 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4669 id = ADT_smf_delete_snap;
4670 break;
4671 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4672 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4673 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4674 id = ADT_smf_delete_npg;
4675 } else {
4676 id = ADT_smf_delete_pg;
4678 break;
4679 default:
4680 abort();
4682 #endif /* NATIVE_BUILD */
4683 return (id);
4687 * Fails with
4688 * _NOT_SET
4689 * _DELETED
4690 * _BAD_REQUEST
4691 * _PERMISSION_DENIED
4692 * _NO_RESOURCES
4693 * _TRUNCATED
4694 * and whatever object_delete() fails with.
4697 rc_node_delete(rc_node_ptr_t *npp)
4699 rc_node_t *np, *np_orig;
4700 rc_node_t *pp = NULL;
4701 int rc;
4702 rc_node_pg_notify_t *pnp;
4703 cache_bucket_t *bp;
4704 rc_notify_delete_t *ndp;
4705 permcheck_t *pcp;
4706 int granted;
4707 au_event_t event_id = 0;
4708 size_t sz_out;
4709 audit_event_data_t audit_data;
4710 int audit_failure = 0;
4712 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4714 audit_data.ed_fmri = NULL;
4715 audit_data.ed_auth = NULL;
4716 audit_data.ed_snapname = NULL;
4717 audit_data.ed_type = NULL;
4719 switch (np->rn_id.rl_type) {
4720 case REP_PROTOCOL_ENTITY_SERVICE:
4721 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4722 np->rn_pgflags);
4723 break;
4724 case REP_PROTOCOL_ENTITY_INSTANCE:
4725 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4726 np->rn_pgflags);
4727 break;
4728 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4729 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4730 np->rn_pgflags);
4731 audit_data.ed_snapname = strdup(np->rn_name);
4732 if (audit_data.ed_snapname == NULL) {
4733 (void) pthread_mutex_unlock(&np->rn_lock);
4734 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4736 break; /* deletable */
4738 case REP_PROTOCOL_ENTITY_SCOPE:
4739 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4740 /* Scopes and snaplevels are indelible. */
4741 (void) pthread_mutex_unlock(&np->rn_lock);
4742 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4744 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4745 (void) pthread_mutex_unlock(&np->rn_lock);
4746 np = np->rn_cchain[0];
4747 RC_NODE_CHECK_AND_LOCK(np);
4748 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4749 np->rn_pgflags);
4750 break;
4752 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4753 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4754 event_id =
4755 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4756 np->rn_pgflags);
4757 audit_data.ed_type = strdup(np->rn_type);
4758 if (audit_data.ed_type == NULL) {
4759 (void) pthread_mutex_unlock(&np->rn_lock);
4760 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4762 break;
4765 /* Snapshot property groups are indelible. */
4766 (void) pthread_mutex_unlock(&np->rn_lock);
4767 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4769 case REP_PROTOCOL_ENTITY_PROPERTY:
4770 (void) pthread_mutex_unlock(&np->rn_lock);
4771 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4773 default:
4774 assert(0);
4775 abort();
4776 break;
4779 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4780 if (audit_data.ed_fmri == NULL) {
4781 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4782 goto cleanout;
4784 np_orig = np;
4785 rc_node_hold_locked(np); /* simplifies rest of the code */
4787 again:
4789 * The following loop is to deal with the fact that snapshots and
4790 * property groups are moving targets -- changes to them result
4791 * in a new "child" node. Since we can only delete from the top node,
4792 * we have to loop until we have a non-RC_NODE_OLD version.
4794 for (;;) {
4795 if (!rc_node_wait_flag(np,
4796 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4797 rc_node_rele_locked(np);
4798 rc = REP_PROTOCOL_FAIL_DELETED;
4799 goto cleanout;
4802 if (np->rn_flags & RC_NODE_OLD) {
4803 rc_node_rele_locked(np);
4804 np = cache_lookup(&np_orig->rn_id);
4805 assert(np != np_orig);
4807 if (np == NULL) {
4808 rc = REP_PROTOCOL_FAIL_DELETED;
4809 goto fail;
4811 (void) pthread_mutex_lock(&np->rn_lock);
4812 continue;
4815 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4816 rc_node_rele_locked(np);
4817 rc_node_clear(npp, 1);
4818 rc = REP_PROTOCOL_FAIL_DELETED;
4822 * Mark our parent as children changing. this call drops our
4823 * lock and the RC_NODE_USING_PARENT flag, and returns with
4824 * pp's lock held
4826 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4827 if (pp == NULL) {
4828 /* our parent is gone, we're going next... */
4829 rc_node_rele(np);
4831 rc_node_clear(npp, 1);
4832 rc = REP_PROTOCOL_FAIL_DELETED;
4833 goto cleanout;
4836 rc_node_hold_locked(pp); /* hold for later */
4837 (void) pthread_mutex_unlock(&pp->rn_lock);
4839 (void) pthread_mutex_lock(&np->rn_lock);
4840 if (!(np->rn_flags & RC_NODE_OLD))
4841 break; /* not old -- we're done */
4843 (void) pthread_mutex_unlock(&np->rn_lock);
4844 (void) pthread_mutex_lock(&pp->rn_lock);
4845 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4846 rc_node_rele_locked(pp);
4847 (void) pthread_mutex_lock(&np->rn_lock);
4848 continue; /* loop around and try again */
4851 * Everyone out of the pool -- we grab everything but
4852 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4853 * any changes from occurring while we are attempting to
4854 * delete the node.
4856 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4857 (void) pthread_mutex_unlock(&np->rn_lock);
4858 rc = REP_PROTOCOL_FAIL_DELETED;
4859 goto fail;
4862 assert(!(np->rn_flags & RC_NODE_OLD));
4864 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4865 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4866 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4867 (void) pthread_mutex_unlock(&np->rn_lock);
4868 goto fail;
4871 #ifdef NATIVE_BUILD
4872 if (!client_is_privileged()) {
4873 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4875 #else
4876 if (is_main_repository) {
4877 /* permission check */
4878 (void) pthread_mutex_unlock(&np->rn_lock);
4879 pcp = pc_create();
4880 if (pcp != NULL) {
4881 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4883 /* add .smf.modify.<type> for pgs. */
4884 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4885 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4886 const char * const auth =
4887 perm_auth_for_pgtype(np->rn_type);
4889 if (auth != NULL)
4890 rc = perm_add_enabling(pcp, auth);
4893 if (rc == REP_PROTOCOL_SUCCESS) {
4894 granted = perm_granted(pcp);
4896 rc = map_granted_status(granted, pcp,
4897 &audit_data.ed_auth);
4898 if (granted == PERM_GONE) {
4899 /* No need to audit if client gone. */
4900 pc_free(pcp);
4901 rc_node_rele_flag(np,
4902 RC_NODE_DYING_FLAGS);
4903 return (rc);
4905 if (granted == PERM_DENIED)
4906 audit_failure = 1;
4909 pc_free(pcp);
4910 } else {
4911 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4914 (void) pthread_mutex_lock(&np->rn_lock);
4915 } else {
4916 rc = REP_PROTOCOL_SUCCESS;
4918 #endif /* NATIVE_BUILD */
4920 if (rc != REP_PROTOCOL_SUCCESS) {
4921 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4922 (void) pthread_mutex_unlock(&np->rn_lock);
4923 goto fail;
4926 ndp = uu_zalloc(sizeof (*ndp));
4927 if (ndp == NULL) {
4928 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4929 (void) pthread_mutex_unlock(&np->rn_lock);
4930 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4931 goto fail;
4934 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
4936 rc = object_delete(np);
4938 if (rc != REP_PROTOCOL_SUCCESS) {
4939 (void) pthread_mutex_lock(&np->rn_lock);
4940 rc_node_delete_rele(np, 1); /* drops lock */
4941 uu_free(ndp);
4942 goto fail;
4946 * Now, delicately unlink and delete the object.
4948 * Create the delete notification, atomically remove
4949 * from the hash table and set the NODE_DEAD flag, and
4950 * remove from the parent's children list.
4952 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4954 bp = cache_hold(np->rn_hash);
4956 (void) pthread_mutex_lock(&np->rn_lock);
4957 cache_remove_unlocked(bp, np);
4958 cache_release(bp);
4960 np->rn_flags |= RC_NODE_DEAD;
4962 if (pp != NULL) {
4964 * Remove from pp's rn_children. This requires pp's lock,
4965 * so we must drop np's lock to respect lock order.
4967 (void) pthread_mutex_unlock(&np->rn_lock);
4968 (void) pthread_mutex_lock(&pp->rn_lock);
4969 (void) pthread_mutex_lock(&np->rn_lock);
4971 uu_list_remove(pp->rn_children, np);
4973 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4975 (void) pthread_mutex_unlock(&pp->rn_lock);
4977 np->rn_flags &= ~RC_NODE_IN_PARENT;
4981 * finally, propagate death to our children (including marking
4982 * them DEAD), handle notifications, and release our hold.
4984 rc_node_hold_locked(np); /* hold for delete */
4985 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4987 rc_node_clear(npp, 1);
4989 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4990 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4991 rc_pg_notify_fire(pnp);
4992 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4993 rc_notify_remove_node(np);
4995 rc_node_rele(np);
4997 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4998 &audit_data);
4999 free(audit_data.ed_auth);
5000 free(audit_data.ed_snapname);
5001 free(audit_data.ed_type);
5002 free(audit_data.ed_fmri);
5003 return (rc);
5005 fail:
5006 rc_node_rele(np);
5007 if (rc == REP_PROTOCOL_FAIL_DELETED)
5008 rc_node_clear(npp, 1);
5009 if (pp != NULL) {
5010 (void) pthread_mutex_lock(&pp->rn_lock);
5011 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5012 rc_node_rele_locked(pp); /* drop ref and lock */
5014 if (audit_failure) {
5015 smf_audit_event(event_id, ADT_FAILURE,
5016 ADT_FAIL_VALUE_AUTH, &audit_data);
5018 cleanout:
5019 free(audit_data.ed_auth);
5020 free(audit_data.ed_snapname);
5021 free(audit_data.ed_type);
5022 free(audit_data.ed_fmri);
5023 return (rc);
5027 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5029 rc_node_t *np;
5030 rc_node_t *cp, *pp;
5031 int res;
5033 rc_node_clear(cpp, 0);
5035 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5037 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5038 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5039 (void) pthread_mutex_unlock(&np->rn_lock);
5040 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5043 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5044 if ((res = rc_node_fill_children(np,
5045 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5046 (void) pthread_mutex_unlock(&np->rn_lock);
5047 return (res);
5050 for (cp = uu_list_first(np->rn_children);
5051 cp != NULL;
5052 cp = uu_list_next(np->rn_children, cp)) {
5053 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5054 continue;
5055 rc_node_hold(cp);
5056 break;
5059 (void) pthread_mutex_unlock(&np->rn_lock);
5060 } else {
5061 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5062 (void) pthread_mutex_unlock(&np->rn_lock);
5063 rc_node_clear(npp, 1);
5064 return (REP_PROTOCOL_FAIL_DELETED);
5068 * mark our parent as children changing. This call drops our
5069 * lock and the RC_NODE_USING_PARENT flag, and returns with
5070 * pp's lock held
5072 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5073 if (pp == NULL) {
5074 /* our parent is gone, we're going next... */
5076 rc_node_clear(npp, 1);
5077 return (REP_PROTOCOL_FAIL_DELETED);
5081 * find the next snaplevel
5083 cp = np;
5084 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5085 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5088 /* it must match the snaplevel list */
5089 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5090 (cp != NULL && np->rn_snaplevel->rsl_next ==
5091 cp->rn_snaplevel));
5093 if (cp != NULL)
5094 rc_node_hold(cp);
5096 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5098 (void) pthread_mutex_unlock(&pp->rn_lock);
5101 rc_node_assign(cpp, cp);
5102 if (cp != NULL) {
5103 rc_node_rele(cp);
5105 return (REP_PROTOCOL_SUCCESS);
5107 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5111 * This call takes a snapshot (np) and either:
5112 * an existing snapid (to be associated with np), or
5113 * a non-NULL parentp (from which a new snapshot is taken, and associated
5114 * with np)
5116 * To do the association, np is duplicated, the duplicate is made to
5117 * represent the new snapid, and np is replaced with the new rc_node_t on
5118 * np's parent's child list. np is placed on the new node's rn_former list,
5119 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5121 * old_fmri and old_name point to the original snap shot's FMRI and name.
5122 * These values are used when generating audit events.
5124 * Fails with
5125 * _BAD_REQUEST
5126 * _BACKEND_READONLY
5127 * _DELETED
5128 * _NO_RESOURCES
5129 * _TRUNCATED
5130 * _TYPE_MISMATCH
5132 static int
5133 rc_attach_snapshot(
5134 rc_node_t *np,
5135 uint32_t snapid,
5136 rc_node_t *parentp,
5137 char *old_fmri,
5138 char *old_name)
5140 rc_node_t *np_orig;
5141 rc_node_t *nnp, *prev;
5142 rc_node_t *pp;
5143 int rc;
5144 size_t sz_out;
5145 perm_status_t granted;
5146 au_event_t event_id;
5147 audit_event_data_t audit_data;
5149 if (parentp == NULL) {
5150 assert(old_fmri != NULL);
5151 } else {
5152 assert(snapid == 0);
5154 assert(MUTEX_HELD(&np->rn_lock));
5156 /* Gather the audit data. */
5158 * ADT_smf_* symbols may not be defined in the /usr/include header
5159 * files on the build machine. Thus, the following if-else will
5160 * not be compiled when doing native builds.
5162 #ifndef NATIVE_BUILD
5163 if (parentp == NULL) {
5164 event_id = ADT_smf_attach_snap;
5165 } else {
5166 event_id = ADT_smf_create_snap;
5168 #endif /* NATIVE_BUILD */
5169 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5170 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5171 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5172 (void) pthread_mutex_unlock(&np->rn_lock);
5173 free(audit_data.ed_fmri);
5174 free(audit_data.ed_snapname);
5175 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5177 audit_data.ed_auth = NULL;
5178 if (strlcpy(audit_data.ed_snapname, np->rn_name,
5179 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5180 abort();
5182 audit_data.ed_old_fmri = old_fmri;
5183 audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5185 if (parentp == NULL) {
5187 * In the attach case, get the instance FMRIs of the
5188 * snapshots.
5190 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5191 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5192 (void) pthread_mutex_unlock(&np->rn_lock);
5193 free(audit_data.ed_fmri);
5194 free(audit_data.ed_snapname);
5195 return (rc);
5197 } else {
5199 * Capture the FMRI of the parent if we're actually going
5200 * to take the snapshot.
5202 if ((rc = rc_node_get_fmri_or_fragment(parentp,
5203 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5204 REP_PROTOCOL_SUCCESS) {
5205 (void) pthread_mutex_unlock(&np->rn_lock);
5206 free(audit_data.ed_fmri);
5207 free(audit_data.ed_snapname);
5208 return (rc);
5212 np_orig = np;
5213 rc_node_hold_locked(np); /* simplifies the remainder */
5215 (void) pthread_mutex_unlock(&np->rn_lock);
5216 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5217 switch (granted) {
5218 case PERM_DENIED:
5219 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5220 &audit_data);
5221 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5222 rc_node_rele(np);
5223 goto cleanout;
5224 case PERM_GRANTED:
5225 break;
5226 case PERM_GONE:
5227 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5228 rc_node_rele(np);
5229 goto cleanout;
5230 case PERM_FAIL:
5231 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5232 rc_node_rele(np);
5233 goto cleanout;
5234 default:
5235 bad_error(rc_node_modify_permission_check, granted);
5237 (void) pthread_mutex_lock(&np->rn_lock);
5240 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5241 * list from changing.
5243 for (;;) {
5244 if (!(np->rn_flags & RC_NODE_OLD)) {
5245 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5246 goto again;
5248 pp = rc_node_hold_parent_flag(np,
5249 RC_NODE_CHILDREN_CHANGING);
5251 (void) pthread_mutex_lock(&np->rn_lock);
5252 if (pp == NULL) {
5253 goto again;
5255 if (np->rn_flags & RC_NODE_OLD) {
5256 rc_node_rele_flag(pp,
5257 RC_NODE_CHILDREN_CHANGING);
5258 (void) pthread_mutex_unlock(&pp->rn_lock);
5259 goto again;
5261 (void) pthread_mutex_unlock(&pp->rn_lock);
5263 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5265 * Can't happen, since we're holding our
5266 * parent's CHILDREN_CHANGING flag...
5268 abort();
5270 break; /* everything's ready */
5272 again:
5273 rc_node_rele_locked(np);
5274 np = cache_lookup(&np_orig->rn_id);
5276 if (np == NULL) {
5277 rc = REP_PROTOCOL_FAIL_DELETED;
5278 goto cleanout;
5281 (void) pthread_mutex_lock(&np->rn_lock);
5284 if (parentp != NULL) {
5285 if (pp != parentp) {
5286 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5287 goto fail;
5289 nnp = NULL;
5290 } else {
5292 * look for a former node with the snapid we need.
5294 if (np->rn_snapshot_id == snapid) {
5295 rc_node_rele_flag(np, RC_NODE_IN_TX);
5296 rc_node_rele_locked(np);
5298 (void) pthread_mutex_lock(&pp->rn_lock);
5299 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5300 (void) pthread_mutex_unlock(&pp->rn_lock);
5301 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
5302 goto cleanout;
5305 prev = np;
5306 while ((nnp = prev->rn_former) != NULL) {
5307 if (nnp->rn_snapshot_id == snapid) {
5308 rc_node_hold(nnp);
5309 break; /* existing node with that id */
5311 prev = nnp;
5315 if (nnp == NULL) {
5316 prev = NULL;
5317 nnp = rc_node_alloc();
5318 if (nnp == NULL) {
5319 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5320 goto fail;
5323 nnp->rn_id = np->rn_id; /* structure assignment */
5324 nnp->rn_hash = np->rn_hash;
5325 nnp->rn_name = strdup(np->rn_name);
5326 nnp->rn_snapshot_id = snapid;
5327 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5329 if (nnp->rn_name == NULL) {
5330 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5331 goto fail;
5335 (void) pthread_mutex_unlock(&np->rn_lock);
5337 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5339 if (parentp != NULL)
5340 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
5341 else
5342 assert(nnp->rn_snapshot_id == snapid);
5344 (void) pthread_mutex_lock(&np->rn_lock);
5345 if (rc != REP_PROTOCOL_SUCCESS)
5346 goto fail;
5349 * fix up the former chain
5351 if (prev != NULL) {
5352 prev->rn_former = nnp->rn_former;
5353 (void) pthread_mutex_lock(&nnp->rn_lock);
5354 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5355 nnp->rn_former = NULL;
5356 (void) pthread_mutex_unlock(&nnp->rn_lock);
5358 np->rn_flags |= RC_NODE_OLD;
5359 (void) pthread_mutex_unlock(&np->rn_lock);
5362 * replace np with nnp
5364 rc_node_relink_child(pp, np, nnp);
5366 rc_node_rele(np);
5367 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5368 rc = REP_PROTOCOL_SUCCESS;
5370 cleanout:
5371 free(audit_data.ed_auth);
5372 free(audit_data.ed_fmri);
5373 free(audit_data.ed_snapname);
5374 return (rc);
5376 fail:
5377 rc_node_rele_flag(np, RC_NODE_IN_TX);
5378 rc_node_rele_locked(np);
5379 (void) pthread_mutex_lock(&pp->rn_lock);
5380 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5381 (void) pthread_mutex_unlock(&pp->rn_lock);
5383 if (nnp != NULL) {
5384 if (prev == NULL)
5385 rc_node_destroy(nnp);
5386 else
5387 rc_node_rele(nnp);
5390 free(audit_data.ed_auth);
5391 free(audit_data.ed_fmri);
5392 free(audit_data.ed_snapname);
5393 return (rc);
5397 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5398 const char *instname, const char *name, rc_node_ptr_t *outpp)
5400 perm_status_t granted;
5401 rc_node_t *np;
5402 rc_node_t *outp = NULL;
5403 int rc, perm_rc;
5404 char fmri[REP_PROTOCOL_FMRI_LEN];
5405 audit_event_data_t audit_data;
5406 size_t sz_out;
5408 rc_node_clear(outpp, 0);
5411 * rc_node_modify_permission_check() must be called before the node
5412 * is locked. This is because the library functions that check
5413 * authorizations can trigger calls back into configd.
5415 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5416 switch (granted) {
5417 case PERM_DENIED:
5419 * We continue in this case, so that we can generate an
5420 * audit event later in this function.
5422 perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5423 break;
5424 case PERM_GRANTED:
5425 perm_rc = REP_PROTOCOL_SUCCESS;
5426 break;
5427 case PERM_GONE:
5428 /* No need to produce audit event if client is gone. */
5429 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5430 case PERM_FAIL:
5431 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5432 default:
5433 bad_error("rc_node_modify_permission_check", granted);
5434 break;
5437 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5438 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5439 (void) pthread_mutex_unlock(&np->rn_lock);
5440 free(audit_data.ed_auth);
5441 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5444 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5445 if (rc != REP_PROTOCOL_SUCCESS) {
5446 (void) pthread_mutex_unlock(&np->rn_lock);
5447 free(audit_data.ed_auth);
5448 return (rc);
5451 if (svcname != NULL && (rc =
5452 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5453 REP_PROTOCOL_SUCCESS) {
5454 (void) pthread_mutex_unlock(&np->rn_lock);
5455 free(audit_data.ed_auth);
5456 return (rc);
5459 if (instname != NULL && (rc =
5460 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5461 REP_PROTOCOL_SUCCESS) {
5462 (void) pthread_mutex_unlock(&np->rn_lock);
5463 free(audit_data.ed_auth);
5464 return (rc);
5467 audit_data.ed_fmri = fmri;
5468 audit_data.ed_snapname = (char *)name;
5470 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5471 &sz_out)) != REP_PROTOCOL_SUCCESS) {
5472 (void) pthread_mutex_unlock(&np->rn_lock);
5473 free(audit_data.ed_auth);
5474 return (rc);
5476 if (perm_rc != REP_PROTOCOL_SUCCESS) {
5477 (void) pthread_mutex_unlock(&np->rn_lock);
5478 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5479 ADT_FAIL_VALUE_AUTH, &audit_data);
5480 free(audit_data.ed_auth);
5481 return (perm_rc);
5484 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5485 audit_data.ed_auth);
5486 (void) pthread_mutex_unlock(&np->rn_lock);
5488 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5490 if (rc == REP_PROTOCOL_SUCCESS) {
5491 rc_node_assign(outpp, outp);
5492 rc_node_rele(outp);
5495 (void) pthread_mutex_lock(&np->rn_lock);
5496 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5497 (void) pthread_mutex_unlock(&np->rn_lock);
5499 if (rc == REP_PROTOCOL_SUCCESS) {
5500 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5501 &audit_data);
5503 if (audit_data.ed_auth != NULL)
5504 free(audit_data.ed_auth);
5505 return (rc);
5509 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5511 rc_node_t *np, *outp;
5513 RC_NODE_PTR_GET_CHECK(np, npp);
5514 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5515 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5518 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5519 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5520 (void) pthread_mutex_unlock(&outp->rn_lock);
5521 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5524 return (rc_attach_snapshot(outp, 0, np, NULL,
5525 NULL)); /* drops outp's lock */
5529 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5531 rc_node_t *np;
5532 rc_node_t *cp;
5533 uint32_t snapid;
5534 char old_name[REP_PROTOCOL_NAME_LEN];
5535 int rc;
5536 size_t sz_out;
5537 char old_fmri[REP_PROTOCOL_FMRI_LEN];
5539 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5540 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5541 (void) pthread_mutex_unlock(&np->rn_lock);
5542 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5544 snapid = np->rn_snapshot_id;
5545 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5546 &sz_out);
5547 (void) pthread_mutex_unlock(&np->rn_lock);
5548 if (rc != REP_PROTOCOL_SUCCESS)
5549 return (rc);
5550 if (np->rn_name != NULL) {
5551 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5552 sizeof (old_name)) {
5553 return (REP_PROTOCOL_FAIL_TRUNCATED);
5557 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5558 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5559 (void) pthread_mutex_unlock(&cp->rn_lock);
5560 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5563 rc = rc_attach_snapshot(cp, snapid, NULL,
5564 old_fmri, old_name); /* drops cp's lock */
5565 return (rc);
5569 * If the pgname property group under ent has type pgtype, and it has a
5570 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5571 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5572 * a property meeting the requirements exists in either the instance or its
5573 * parent.
5575 * Returns
5576 * _SUCCESS - see above
5577 * _DELETED - ent or one of its ancestors was deleted
5578 * _NO_RESOURCES - no resources
5579 * _NOT_FOUND - no matching property was found
5581 static int
5582 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5583 const char *propname, rep_protocol_value_type_t ptype)
5585 int ret;
5586 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5588 assert(!MUTEX_HELD(&ent->rn_lock));
5590 (void) pthread_mutex_lock(&ent->rn_lock);
5591 ret = rc_node_find_named_child(ent, pgname,
5592 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5593 (void) pthread_mutex_unlock(&ent->rn_lock);
5595 switch (ret) {
5596 case REP_PROTOCOL_SUCCESS:
5597 break;
5599 case REP_PROTOCOL_FAIL_DELETED:
5600 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5601 return (ret);
5603 default:
5604 bad_error("rc_node_find_named_child", ret);
5607 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5608 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5609 &svc);
5610 if (ret != REP_PROTOCOL_SUCCESS) {
5611 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5612 if (pg != NULL)
5613 rc_node_rele(pg);
5614 return (ret);
5616 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5618 (void) pthread_mutex_lock(&svc->rn_lock);
5619 ret = rc_node_find_named_child(svc, pgname,
5620 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5621 (void) pthread_mutex_unlock(&svc->rn_lock);
5623 rc_node_rele(svc);
5625 switch (ret) {
5626 case REP_PROTOCOL_SUCCESS:
5627 break;
5629 case REP_PROTOCOL_FAIL_DELETED:
5630 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5631 if (pg != NULL)
5632 rc_node_rele(pg);
5633 return (ret);
5635 default:
5636 bad_error("rc_node_find_named_child", ret);
5640 if (pg != NULL &&
5641 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5642 rc_node_rele(pg);
5643 pg = NULL;
5646 if (spg != NULL &&
5647 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5648 rc_node_rele(spg);
5649 spg = NULL;
5652 if (pg == NULL) {
5653 if (spg == NULL)
5654 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5655 pg = spg;
5656 spg = NULL;
5660 * At this point, pg is non-NULL, and is a property group node of the
5661 * correct type. spg, if non-NULL, is also a property group node of
5662 * the correct type. Check for the property in pg first, then spg
5663 * (if applicable).
5665 (void) pthread_mutex_lock(&pg->rn_lock);
5666 ret = rc_node_find_named_child(pg, propname,
5667 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5668 (void) pthread_mutex_unlock(&pg->rn_lock);
5669 rc_node_rele(pg);
5670 switch (ret) {
5671 case REP_PROTOCOL_SUCCESS:
5672 if (prop != NULL) {
5673 if (prop->rn_valtype == ptype) {
5674 rc_node_rele(prop);
5675 if (spg != NULL)
5676 rc_node_rele(spg);
5677 return (REP_PROTOCOL_SUCCESS);
5679 rc_node_rele(prop);
5681 break;
5683 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5684 if (spg != NULL)
5685 rc_node_rele(spg);
5686 return (ret);
5688 case REP_PROTOCOL_FAIL_DELETED:
5689 break;
5691 default:
5692 bad_error("rc_node_find_named_child", ret);
5695 if (spg == NULL)
5696 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5698 pg = spg;
5700 (void) pthread_mutex_lock(&pg->rn_lock);
5701 ret = rc_node_find_named_child(pg, propname,
5702 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5703 (void) pthread_mutex_unlock(&pg->rn_lock);
5704 rc_node_rele(pg);
5705 switch (ret) {
5706 case REP_PROTOCOL_SUCCESS:
5707 if (prop != NULL) {
5708 if (prop->rn_valtype == ptype) {
5709 rc_node_rele(prop);
5710 return (REP_PROTOCOL_SUCCESS);
5712 rc_node_rele(prop);
5714 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5716 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5717 return (ret);
5719 case REP_PROTOCOL_FAIL_DELETED:
5720 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5722 default:
5723 bad_error("rc_node_find_named_child", ret);
5726 return (REP_PROTOCOL_SUCCESS);
5730 * Given a property group node, returns _SUCCESS if the property group may
5731 * be read without any special authorization.
5733 * Fails with:
5734 * _DELETED - np or an ancestor node was deleted
5735 * _TYPE_MISMATCH - np does not refer to a property group
5736 * _NO_RESOURCES - no resources
5737 * _PERMISSION_DENIED - authorization is required
5739 static int
5740 rc_node_pg_check_read_protect(rc_node_t *np)
5742 int ret;
5743 rc_node_t *ent;
5745 assert(!MUTEX_HELD(&np->rn_lock));
5747 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5748 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5750 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5751 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5752 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5753 return (REP_PROTOCOL_SUCCESS);
5755 ret = rc_node_parent(np, &ent);
5757 if (ret != REP_PROTOCOL_SUCCESS)
5758 return (ret);
5760 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5761 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5763 rc_node_rele(ent);
5765 switch (ret) {
5766 case REP_PROTOCOL_FAIL_NOT_FOUND:
5767 return (REP_PROTOCOL_SUCCESS);
5768 case REP_PROTOCOL_SUCCESS:
5769 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5770 case REP_PROTOCOL_FAIL_DELETED:
5771 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5772 return (ret);
5773 default:
5774 bad_error("rc_svc_prop_exists", ret);
5777 return (REP_PROTOCOL_SUCCESS);
5781 * Fails with
5782 * _DELETED - np's node or parent has been deleted
5783 * _TYPE_MISMATCH - np's node is not a property
5784 * _NO_RESOURCES - out of memory
5785 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5786 * _BAD_REQUEST - np's parent is not a property group
5788 static int
5789 rc_node_property_may_read(rc_node_t *np)
5791 int ret;
5792 perm_status_t granted = PERM_DENIED;
5793 rc_node_t *pgp;
5794 permcheck_t *pcp;
5795 audit_event_data_t audit_data;
5796 size_t sz_out;
5798 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5799 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5801 if (client_is_privileged())
5802 return (REP_PROTOCOL_SUCCESS);
5804 #ifdef NATIVE_BUILD
5805 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5806 #else
5807 ret = rc_node_parent(np, &pgp);
5809 if (ret != REP_PROTOCOL_SUCCESS)
5810 return (ret);
5812 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5813 rc_node_rele(pgp);
5814 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5817 ret = rc_node_pg_check_read_protect(pgp);
5819 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5820 rc_node_rele(pgp);
5821 return (ret);
5824 pcp = pc_create();
5826 if (pcp == NULL) {
5827 rc_node_rele(pgp);
5828 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5831 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5833 if (ret == REP_PROTOCOL_SUCCESS) {
5834 const char * const auth =
5835 perm_auth_for_pgtype(pgp->rn_type);
5837 if (auth != NULL)
5838 ret = perm_add_enabling(pcp, auth);
5842 * If you are permitted to modify the value, you may also
5843 * read it. This means that both the MODIFY and VALUE
5844 * authorizations are acceptable. We don't allow requests
5845 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5846 * however, to avoid leaking possibly valuable information
5847 * since such a user can't change the property anyway.
5849 if (ret == REP_PROTOCOL_SUCCESS)
5850 ret = perm_add_enabling_values(pcp, pgp,
5851 AUTH_PROP_MODIFY);
5853 if (ret == REP_PROTOCOL_SUCCESS &&
5854 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5855 ret = perm_add_enabling_values(pcp, pgp,
5856 AUTH_PROP_VALUE);
5858 if (ret == REP_PROTOCOL_SUCCESS)
5859 ret = perm_add_enabling_values(pcp, pgp,
5860 AUTH_PROP_READ);
5862 rc_node_rele(pgp);
5864 if (ret == REP_PROTOCOL_SUCCESS) {
5865 granted = perm_granted(pcp);
5866 if (granted == PERM_FAIL)
5867 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5868 if (granted == PERM_GONE)
5869 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5872 if (ret == REP_PROTOCOL_SUCCESS) {
5873 /* Generate a read_prop audit event. */
5874 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5875 if (audit_data.ed_fmri == NULL)
5876 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5878 if (ret == REP_PROTOCOL_SUCCESS) {
5879 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5880 REP_PROTOCOL_FMRI_LEN, &sz_out);
5882 if (ret == REP_PROTOCOL_SUCCESS) {
5883 int status;
5884 int ret_value;
5886 if (granted == PERM_DENIED) {
5887 status = ADT_FAILURE;
5888 ret_value = ADT_FAIL_VALUE_AUTH;
5889 } else {
5890 status = ADT_SUCCESS;
5891 ret_value = ADT_SUCCESS;
5893 audit_data.ed_auth = pcp->pc_auth_string;
5894 smf_audit_event(ADT_smf_read_prop,
5895 status, ret_value, &audit_data);
5897 free(audit_data.ed_fmri);
5899 pc_free(pcp);
5901 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5902 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5904 return (ret);
5905 #endif /* NATIVE_BUILD */
5909 * Iteration
5911 static int
5912 rc_iter_filter_name(rc_node_t *np, void *s)
5914 const char *name = s;
5916 return (strcmp(np->rn_name, name) == 0);
5919 static int
5920 rc_iter_filter_type(rc_node_t *np, void *s)
5922 const char *type = s;
5924 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5927 /*ARGSUSED*/
5928 static int
5929 rc_iter_null_filter(rc_node_t *np, void *s)
5931 return (1);
5935 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5936 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5937 * If successful, leaves a hold on np & increments np->rn_other_refs
5939 * If composed is true, then set up for iteration across the top level of np's
5940 * composition chain. If successful, leaves a hold on np and increments
5941 * rn_other_refs for the top level of np's composition chain.
5943 * Fails with
5944 * _NO_RESOURCES
5945 * _INVALID_TYPE
5946 * _TYPE_MISMATCH - np cannot carry type children
5947 * _DELETED
5949 static int
5950 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5951 rc_iter_filter_func *filter, void *arg, boolean_t composed)
5953 rc_node_iter_t *nip;
5954 int res;
5956 assert(*resp == NULL);
5958 nip = uu_zalloc(sizeof (*nip));
5959 if (nip == NULL)
5960 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5962 /* np is held by the client's rc_node_ptr_t */
5963 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5964 composed = 1;
5966 if (!composed) {
5967 (void) pthread_mutex_lock(&np->rn_lock);
5969 if ((res = rc_node_fill_children(np, type)) !=
5970 REP_PROTOCOL_SUCCESS) {
5971 (void) pthread_mutex_unlock(&np->rn_lock);
5972 uu_free(nip);
5973 return (res);
5976 nip->rni_clevel = -1;
5978 nip->rni_iter = uu_list_walk_start(np->rn_children,
5979 UU_WALK_ROBUST);
5980 if (nip->rni_iter != NULL) {
5981 nip->rni_iter_node = np;
5982 rc_node_hold_other(np);
5983 } else {
5984 (void) pthread_mutex_unlock(&np->rn_lock);
5985 uu_free(nip);
5986 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5988 (void) pthread_mutex_unlock(&np->rn_lock);
5989 } else {
5990 rc_node_t *ent;
5992 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5993 /* rn_cchain isn't valid until children are loaded. */
5994 (void) pthread_mutex_lock(&np->rn_lock);
5995 res = rc_node_fill_children(np,
5996 REP_PROTOCOL_ENTITY_SNAPLEVEL);
5997 (void) pthread_mutex_unlock(&np->rn_lock);
5998 if (res != REP_PROTOCOL_SUCCESS) {
5999 uu_free(nip);
6000 return (res);
6003 /* Check for an empty snapshot. */
6004 if (np->rn_cchain[0] == NULL)
6005 goto empty;
6008 /* Start at the top of the composition chain. */
6009 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6010 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6011 /* Empty composition chain. */
6012 empty:
6013 nip->rni_clevel = -1;
6014 nip->rni_iter = NULL;
6015 /* It's ok, iter_next() will return _DONE. */
6016 goto out;
6019 ent = np->rn_cchain[nip->rni_clevel];
6020 assert(ent != NULL);
6022 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6023 break;
6025 /* Someone deleted it, so try the next one. */
6028 res = rc_node_fill_children(ent, type);
6030 if (res == REP_PROTOCOL_SUCCESS) {
6031 nip->rni_iter = uu_list_walk_start(ent->rn_children,
6032 UU_WALK_ROBUST);
6034 if (nip->rni_iter == NULL)
6035 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6036 else {
6037 nip->rni_iter_node = ent;
6038 rc_node_hold_other(ent);
6042 if (res != REP_PROTOCOL_SUCCESS) {
6043 (void) pthread_mutex_unlock(&ent->rn_lock);
6044 uu_free(nip);
6045 return (res);
6048 (void) pthread_mutex_unlock(&ent->rn_lock);
6051 out:
6052 rc_node_hold(np); /* released by rc_iter_end() */
6053 nip->rni_parent = np;
6054 nip->rni_type = type;
6055 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6056 nip->rni_filter_arg = arg;
6057 *resp = nip;
6058 return (REP_PROTOCOL_SUCCESS);
6061 static void
6062 rc_iter_end(rc_node_iter_t *iter)
6064 rc_node_t *np = iter->rni_parent;
6066 if (iter->rni_clevel >= 0)
6067 np = np->rn_cchain[iter->rni_clevel];
6069 assert(MUTEX_HELD(&np->rn_lock));
6070 if (iter->rni_iter != NULL)
6071 uu_list_walk_end(iter->rni_iter);
6072 iter->rni_iter = NULL;
6074 (void) pthread_mutex_unlock(&np->rn_lock);
6075 rc_node_rele(iter->rni_parent);
6076 if (iter->rni_iter_node != NULL)
6077 rc_node_rele_other(iter->rni_iter_node);
6081 * Fails with
6082 * _NOT_SET - npp is reset
6083 * _DELETED - npp's node has been deleted
6084 * _NOT_APPLICABLE - npp's node is not a property
6085 * _NO_RESOURCES - out of memory
6087 static int
6088 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6090 rc_node_t *np;
6092 rc_node_iter_t *nip;
6094 assert(*iterp == NULL);
6096 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6098 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6099 (void) pthread_mutex_unlock(&np->rn_lock);
6100 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6103 nip = uu_zalloc(sizeof (*nip));
6104 if (nip == NULL) {
6105 (void) pthread_mutex_unlock(&np->rn_lock);
6106 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6109 nip->rni_parent = np;
6110 nip->rni_iter = NULL;
6111 nip->rni_clevel = -1;
6112 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6113 nip->rni_offset = 0;
6114 nip->rni_last_offset = 0;
6116 rc_node_hold_locked(np);
6118 *iterp = nip;
6119 (void) pthread_mutex_unlock(&np->rn_lock);
6121 return (REP_PROTOCOL_SUCCESS);
6125 * Returns:
6126 * _NO_RESOURCES - out of memory
6127 * _NOT_SET - npp is reset
6128 * _DELETED - npp's node has been deleted
6129 * _TYPE_MISMATCH - npp's node is not a property
6130 * _NOT_FOUND - property has no values
6131 * _TRUNCATED - property has >1 values (first is written into out)
6132 * _SUCCESS - property has 1 value (which is written into out)
6133 * _PERMISSION_DENIED - no authorization to read property value(s)
6135 * We shorten *sz_out to not include anything after the final '\0'.
6138 rc_node_get_property_value(rc_node_ptr_t *npp,
6139 struct rep_protocol_value_response *out, size_t *sz_out)
6141 rc_node_t *np;
6142 size_t w;
6143 int ret;
6145 assert(*sz_out == sizeof (*out));
6147 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6148 ret = rc_node_property_may_read(np);
6149 rc_node_rele(np);
6151 if (ret != REP_PROTOCOL_SUCCESS)
6152 return (ret);
6154 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6156 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6157 (void) pthread_mutex_unlock(&np->rn_lock);
6158 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6161 if (np->rn_values_size == 0) {
6162 (void) pthread_mutex_unlock(&np->rn_lock);
6163 return (REP_PROTOCOL_FAIL_NOT_FOUND);
6165 out->rpr_type = np->rn_valtype;
6166 w = strlcpy(out->rpr_value, &np->rn_values[0],
6167 sizeof (out->rpr_value));
6169 if (w >= sizeof (out->rpr_value))
6170 backend_panic("value too large");
6172 *sz_out = offsetof(struct rep_protocol_value_response,
6173 rpr_value[w + 1]);
6175 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6176 REP_PROTOCOL_SUCCESS;
6177 (void) pthread_mutex_unlock(&np->rn_lock);
6178 return (ret);
6182 rc_iter_next_value(rc_node_iter_t *iter,
6183 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6185 rc_node_t *np = iter->rni_parent;
6186 const char *vals;
6187 size_t len;
6189 size_t start;
6190 size_t w;
6191 int ret;
6193 rep_protocol_responseid_t result;
6195 assert(*sz_out == sizeof (*out));
6197 (void) memset(out, '\0', *sz_out);
6199 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6200 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6202 RC_NODE_CHECK(np);
6203 ret = rc_node_property_may_read(np);
6205 if (ret != REP_PROTOCOL_SUCCESS)
6206 return (ret);
6208 RC_NODE_CHECK_AND_LOCK(np);
6210 vals = np->rn_values;
6211 len = np->rn_values_size;
6213 out->rpr_type = np->rn_valtype;
6215 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6217 if (len == 0 || start >= len) {
6218 result = REP_PROTOCOL_DONE;
6219 *sz_out -= sizeof (out->rpr_value);
6220 } else {
6221 w = strlcpy(out->rpr_value, &vals[start],
6222 sizeof (out->rpr_value));
6224 if (w >= sizeof (out->rpr_value))
6225 backend_panic("value too large");
6227 *sz_out = offsetof(struct rep_protocol_value_response,
6228 rpr_value[w + 1]);
6231 * update the offsets if we're not repeating
6233 if (!repeat) {
6234 iter->rni_last_offset = iter->rni_offset;
6235 iter->rni_offset += (w + 1);
6238 result = REP_PROTOCOL_SUCCESS;
6241 (void) pthread_mutex_unlock(&np->rn_lock);
6242 return (result);
6246 * Entry point for ITER_START from client.c. Validate the arguments & call
6247 * rc_iter_create().
6249 * Fails with
6250 * _NOT_SET
6251 * _DELETED
6252 * _TYPE_MISMATCH - np cannot carry type children
6253 * _BAD_REQUEST - flags is invalid
6254 * pattern is invalid
6255 * _NO_RESOURCES
6256 * _INVALID_TYPE
6257 * _TYPE_MISMATCH - *npp cannot have children of type
6258 * _BACKEND_ACCESS
6261 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6262 uint32_t type, uint32_t flags, const char *pattern)
6264 rc_node_t *np;
6265 rc_iter_filter_func *f = NULL;
6266 int rc;
6268 RC_NODE_PTR_GET_CHECK(np, npp);
6270 if (pattern != NULL && pattern[0] == '\0')
6271 pattern = NULL;
6273 if (type == REP_PROTOCOL_ENTITY_VALUE) {
6274 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6275 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6276 if (flags != RP_ITER_START_ALL || pattern != NULL)
6277 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6279 rc = rc_node_setup_value_iter(npp, iterp);
6280 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6281 return (rc);
6284 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6285 REP_PROTOCOL_SUCCESS)
6286 return (rc);
6288 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6289 (pattern == NULL))
6290 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6292 /* Composition only works for instances & snapshots. */
6293 if ((flags & RP_ITER_START_COMPOSED) &&
6294 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6295 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6296 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6298 if (pattern != NULL) {
6299 if ((rc = rc_check_type_name(type, pattern)) !=
6300 REP_PROTOCOL_SUCCESS)
6301 return (rc);
6302 pattern = strdup(pattern);
6303 if (pattern == NULL)
6304 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6307 switch (flags & RP_ITER_START_FILT_MASK) {
6308 case RP_ITER_START_ALL:
6309 f = NULL;
6310 break;
6311 case RP_ITER_START_EXACT:
6312 f = rc_iter_filter_name;
6313 break;
6314 case RP_ITER_START_PGTYPE:
6315 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6316 free((void *)pattern);
6317 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6319 f = rc_iter_filter_type;
6320 break;
6321 default:
6322 free((void *)pattern);
6323 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6326 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6327 flags & RP_ITER_START_COMPOSED);
6328 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6329 free((void *)pattern);
6331 return (rc);
6335 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6336 * the filter.
6337 * For composed iterators, then check to see if there's an overlapping entity
6338 * (see embedded comments). If we reach the end of the list, start over at
6339 * the next level.
6341 * Returns
6342 * _BAD_REQUEST - iter walks values
6343 * _TYPE_MISMATCH - iter does not walk type entities
6344 * _DELETED - parent was deleted
6345 * _NO_RESOURCES
6346 * _INVALID_TYPE - type is invalid
6347 * _DONE
6348 * _SUCCESS
6350 * For composed property group iterators, can also return
6351 * _TYPE_MISMATCH - parent cannot have type children
6354 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6356 rc_node_t *np = iter->rni_parent;
6357 rc_node_t *res;
6358 int rc;
6360 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6361 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6363 if (iter->rni_iter == NULL) {
6364 rc_node_clear(out, 0);
6365 return (REP_PROTOCOL_DONE);
6368 if (iter->rni_type != type) {
6369 rc_node_clear(out, 0);
6370 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6373 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
6375 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6376 (void) pthread_mutex_unlock(&np->rn_lock);
6377 rc_node_clear(out, 1);
6378 return (REP_PROTOCOL_FAIL_DELETED);
6381 if (iter->rni_clevel >= 0) {
6382 /* Composed iterator. Iterate over appropriate level. */
6383 (void) pthread_mutex_unlock(&np->rn_lock);
6384 np = np->rn_cchain[iter->rni_clevel];
6386 * If iter->rni_parent is an instance or a snapshot, np must
6387 * be valid since iter holds iter->rni_parent & possible
6388 * levels (service, instance, snaplevel) cannot be destroyed
6389 * while rni_parent is held. If iter->rni_parent is
6390 * a composed property group then rc_node_setup_cpg() put
6391 * a hold on np.
6394 (void) pthread_mutex_lock(&np->rn_lock);
6396 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6397 (void) pthread_mutex_unlock(&np->rn_lock);
6398 rc_node_clear(out, 1);
6399 return (REP_PROTOCOL_FAIL_DELETED);
6403 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6405 for (;;) {
6406 res = uu_list_walk_next(iter->rni_iter);
6407 if (res == NULL) {
6408 rc_node_t *parent = iter->rni_parent;
6410 #if COMPOSITION_DEPTH == 2
6411 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6412 /* release walker and lock */
6413 rc_iter_end(iter);
6414 break;
6417 /* Stop walking current level. */
6418 uu_list_walk_end(iter->rni_iter);
6419 iter->rni_iter = NULL;
6420 (void) pthread_mutex_unlock(&np->rn_lock);
6421 rc_node_rele_other(iter->rni_iter_node);
6422 iter->rni_iter_node = NULL;
6424 /* Start walking next level. */
6425 ++iter->rni_clevel;
6426 np = parent->rn_cchain[iter->rni_clevel];
6427 assert(np != NULL);
6428 #else
6429 #error This code must be updated.
6430 #endif
6432 (void) pthread_mutex_lock(&np->rn_lock);
6434 rc = rc_node_fill_children(np, iter->rni_type);
6436 if (rc == REP_PROTOCOL_SUCCESS) {
6437 iter->rni_iter =
6438 uu_list_walk_start(np->rn_children,
6439 UU_WALK_ROBUST);
6441 if (iter->rni_iter == NULL)
6442 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6443 else {
6444 iter->rni_iter_node = np;
6445 rc_node_hold_other(np);
6449 if (rc != REP_PROTOCOL_SUCCESS) {
6450 (void) pthread_mutex_unlock(&np->rn_lock);
6451 rc_node_clear(out, 0);
6452 return (rc);
6455 continue;
6458 if (res->rn_id.rl_type != type ||
6459 !iter->rni_filter(res, iter->rni_filter_arg))
6460 continue;
6463 * If we're composed and not at the top level, check to see if
6464 * there's an entity at a higher level with the same name. If
6465 * so, skip this one.
6467 if (iter->rni_clevel > 0) {
6468 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6469 rc_node_t *pg;
6471 #if COMPOSITION_DEPTH == 2
6472 assert(iter->rni_clevel == 1);
6474 (void) pthread_mutex_unlock(&np->rn_lock);
6475 (void) pthread_mutex_lock(&ent->rn_lock);
6476 rc = rc_node_find_named_child(ent, res->rn_name, type,
6477 &pg);
6478 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6479 rc_node_rele(pg);
6480 (void) pthread_mutex_unlock(&ent->rn_lock);
6481 if (rc != REP_PROTOCOL_SUCCESS) {
6482 rc_node_clear(out, 0);
6483 return (rc);
6485 (void) pthread_mutex_lock(&np->rn_lock);
6487 /* Make sure np isn't being deleted all of a sudden. */
6488 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6489 (void) pthread_mutex_unlock(&np->rn_lock);
6490 rc_node_clear(out, 1);
6491 return (REP_PROTOCOL_FAIL_DELETED);
6494 if (pg != NULL)
6495 /* Keep going. */
6496 continue;
6497 #else
6498 #error This code must be updated.
6499 #endif
6503 * If we're composed, iterating over property groups, and not
6504 * at the bottom level, check to see if there's a pg at lower
6505 * level with the same name. If so, return a cpg.
6507 if (iter->rni_clevel >= 0 &&
6508 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6509 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6510 #if COMPOSITION_DEPTH == 2
6511 rc_node_t *pg;
6512 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6514 rc_node_hold(res); /* While we drop np->rn_lock */
6516 (void) pthread_mutex_unlock(&np->rn_lock);
6517 (void) pthread_mutex_lock(&ent->rn_lock);
6518 rc = rc_node_find_named_child(ent, res->rn_name, type,
6519 &pg);
6520 /* holds pg if not NULL */
6521 (void) pthread_mutex_unlock(&ent->rn_lock);
6522 if (rc != REP_PROTOCOL_SUCCESS) {
6523 rc_node_rele(res);
6524 rc_node_clear(out, 0);
6525 return (rc);
6528 (void) pthread_mutex_lock(&np->rn_lock);
6529 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6530 (void) pthread_mutex_unlock(&np->rn_lock);
6531 rc_node_rele(res);
6532 if (pg != NULL)
6533 rc_node_rele(pg);
6534 rc_node_clear(out, 1);
6535 return (REP_PROTOCOL_FAIL_DELETED);
6538 if (pg == NULL) {
6539 (void) pthread_mutex_unlock(&np->rn_lock);
6540 rc_node_rele(res);
6541 (void) pthread_mutex_lock(&np->rn_lock);
6542 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6543 (void) pthread_mutex_unlock(&np->
6544 rn_lock);
6545 rc_node_clear(out, 1);
6546 return (REP_PROTOCOL_FAIL_DELETED);
6548 } else {
6549 rc_node_t *cpg;
6551 /* Keep res held for rc_node_setup_cpg(). */
6553 cpg = rc_node_alloc();
6554 if (cpg == NULL) {
6555 (void) pthread_mutex_unlock(
6556 &np->rn_lock);
6557 rc_node_rele(res);
6558 rc_node_rele(pg);
6559 rc_node_clear(out, 0);
6560 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6563 switch (rc_node_setup_cpg(cpg, res, pg)) {
6564 case REP_PROTOCOL_SUCCESS:
6565 res = cpg;
6566 break;
6568 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6569 /* Nevermind. */
6570 (void) pthread_mutex_unlock(&np->
6571 rn_lock);
6572 rc_node_destroy(cpg);
6573 rc_node_rele(pg);
6574 rc_node_rele(res);
6575 (void) pthread_mutex_lock(&np->
6576 rn_lock);
6577 if (!rc_node_wait_flag(np,
6578 RC_NODE_DYING)) {
6579 (void) pthread_mutex_unlock(&
6580 np->rn_lock);
6581 rc_node_clear(out, 1);
6582 return
6583 (REP_PROTOCOL_FAIL_DELETED);
6585 break;
6587 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6588 rc_node_destroy(cpg);
6589 (void) pthread_mutex_unlock(
6590 &np->rn_lock);
6591 rc_node_rele(res);
6592 rc_node_rele(pg);
6593 rc_node_clear(out, 0);
6594 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6596 default:
6597 assert(0);
6598 abort();
6601 #else
6602 #error This code must be updated.
6603 #endif
6606 rc_node_hold(res);
6607 (void) pthread_mutex_unlock(&np->rn_lock);
6608 break;
6610 rc_node_assign(out, res);
6612 if (res == NULL)
6613 return (REP_PROTOCOL_DONE);
6614 rc_node_rele(res);
6615 return (REP_PROTOCOL_SUCCESS);
6618 void
6619 rc_iter_destroy(rc_node_iter_t **nipp)
6621 rc_node_iter_t *nip = *nipp;
6622 rc_node_t *np;
6624 if (nip == NULL)
6625 return; /* already freed */
6627 np = nip->rni_parent;
6629 if (nip->rni_filter_arg != NULL)
6630 free(nip->rni_filter_arg);
6631 nip->rni_filter_arg = NULL;
6633 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6634 nip->rni_iter != NULL) {
6635 if (nip->rni_clevel < 0)
6636 (void) pthread_mutex_lock(&np->rn_lock);
6637 else
6638 (void) pthread_mutex_lock(
6639 &np->rn_cchain[nip->rni_clevel]->rn_lock);
6640 rc_iter_end(nip); /* release walker and lock */
6642 nip->rni_parent = NULL;
6644 uu_free(nip);
6645 *nipp = NULL;
6649 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6651 rc_node_t *np;
6652 permcheck_t *pcp;
6653 int ret;
6654 perm_status_t granted;
6655 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6656 char *auth_string = NULL;
6658 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6660 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6661 rc_node_rele(np);
6662 np = np->rn_cchain[0];
6663 RC_NODE_CHECK_AND_HOLD(np);
6666 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6667 rc_node_rele(np);
6668 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6671 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6672 rc_node_rele(np);
6673 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6676 #ifdef NATIVE_BUILD
6677 if (client_is_privileged())
6678 goto skip_checks;
6679 rc_node_rele(np);
6680 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6681 #else
6682 if (is_main_repository == 0)
6683 goto skip_checks;
6685 /* permission check */
6686 pcp = pc_create();
6687 if (pcp == NULL) {
6688 rc_node_rele(np);
6689 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6692 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
6693 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6694 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6695 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6696 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6697 rc_node_t *instn;
6699 /* solaris.smf.modify can be used */
6700 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6701 if (ret != REP_PROTOCOL_SUCCESS) {
6702 pc_free(pcp);
6703 rc_node_rele(np);
6704 return (ret);
6707 /* solaris.smf.manage can be used. */
6708 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6710 if (ret != REP_PROTOCOL_SUCCESS) {
6711 pc_free(pcp);
6712 rc_node_rele(np);
6713 return (ret);
6716 /* general/action_authorization values can be used. */
6717 ret = rc_node_parent(np, &instn);
6718 if (ret != REP_PROTOCOL_SUCCESS) {
6719 assert(ret == REP_PROTOCOL_FAIL_DELETED);
6720 rc_node_rele(np);
6721 pc_free(pcp);
6722 return (REP_PROTOCOL_FAIL_DELETED);
6725 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6727 ret = perm_add_inst_action_auth(pcp, instn);
6728 rc_node_rele(instn);
6729 switch (ret) {
6730 case REP_PROTOCOL_SUCCESS:
6731 break;
6733 case REP_PROTOCOL_FAIL_DELETED:
6734 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6735 rc_node_rele(np);
6736 pc_free(pcp);
6737 return (ret);
6739 default:
6740 bad_error("perm_add_inst_action_auth", ret);
6743 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6744 authorized = RC_AUTH_PASSED; /* No check on commit. */
6745 } else {
6746 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6748 if (ret == REP_PROTOCOL_SUCCESS) {
6749 /* propertygroup-type-specific authorization */
6750 /* no locking because rn_type won't change anyway */
6751 const char * const auth =
6752 perm_auth_for_pgtype(np->rn_type);
6754 if (auth != NULL)
6755 ret = perm_add_enabling(pcp, auth);
6758 if (ret == REP_PROTOCOL_SUCCESS)
6759 /* propertygroup/transaction-type-specific auths */
6760 ret =
6761 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6763 if (ret == REP_PROTOCOL_SUCCESS)
6764 ret =
6765 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6767 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6768 if (ret == REP_PROTOCOL_SUCCESS &&
6769 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6770 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6771 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6773 if (ret != REP_PROTOCOL_SUCCESS) {
6774 pc_free(pcp);
6775 rc_node_rele(np);
6776 return (ret);
6780 granted = perm_granted(pcp);
6781 ret = map_granted_status(granted, pcp, &auth_string);
6782 pc_free(pcp);
6784 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6785 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6786 free(auth_string);
6787 rc_node_rele(np);
6788 return (ret);
6791 if (granted == PERM_DENIED) {
6793 * If we get here, the authorization failed.
6794 * Unfortunately, we don't have enough information at this
6795 * point to generate the security audit events. We'll only
6796 * get that information when the client tries to commit the
6797 * event. Thus, we'll remember the failed authorization,
6798 * so that we can generate the audit events later.
6800 authorized = RC_AUTH_FAILED;
6802 #endif /* NATIVE_BUILD */
6804 skip_checks:
6805 rc_node_assign(txp, np);
6806 txp->rnp_authorized = authorized;
6807 if (authorized != RC_AUTH_UNKNOWN) {
6808 /* Save the authorization string. */
6809 if (txp->rnp_auth_string != NULL)
6810 free((void *)txp->rnp_auth_string);
6811 txp->rnp_auth_string = auth_string;
6812 auth_string = NULL; /* Don't free until done with txp. */
6815 rc_node_rele(np);
6816 if (auth_string != NULL)
6817 free(auth_string);
6818 return (REP_PROTOCOL_SUCCESS);
6822 * Return 1 if the given transaction commands only modify the values of
6823 * properties other than "modify_authorization". Return -1 if any of the
6824 * commands are invalid, and 0 otherwise.
6826 static int
6827 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6829 const struct rep_protocol_transaction_cmd *cmds;
6830 uintptr_t loc;
6831 uint32_t sz;
6832 rc_node_t *prop;
6833 boolean_t ok;
6835 assert(!MUTEX_HELD(&pg->rn_lock));
6837 loc = (uintptr_t)cmds_arg;
6839 while (cmds_sz > 0) {
6840 cmds = (struct rep_protocol_transaction_cmd *)loc;
6842 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6843 return (-1);
6845 sz = cmds->rptc_size;
6846 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6847 return (-1);
6849 sz = TX_SIZE(sz);
6850 if (sz > cmds_sz)
6851 return (-1);
6853 switch (cmds[0].rptc_action) {
6854 case REP_PROTOCOL_TX_ENTRY_CLEAR:
6855 break;
6857 case REP_PROTOCOL_TX_ENTRY_REPLACE:
6858 /* Check type */
6859 (void) pthread_mutex_lock(&pg->rn_lock);
6860 ok = B_FALSE;
6861 if (rc_node_find_named_child(pg,
6862 (const char *)cmds[0].rptc_data,
6863 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6864 REP_PROTOCOL_SUCCESS) {
6865 if (prop != NULL) {
6866 ok = prop->rn_valtype ==
6867 cmds[0].rptc_type;
6869 * rc_node_find_named_child()
6870 * places a hold on prop which we
6871 * do not need to hang on to.
6873 rc_node_rele(prop);
6876 (void) pthread_mutex_unlock(&pg->rn_lock);
6877 if (ok)
6878 break;
6879 return (0);
6881 default:
6882 return (0);
6885 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6886 == 0)
6887 return (0);
6889 loc += sz;
6890 cmds_sz -= sz;
6893 return (1);
6897 * Return 1 if any of the given transaction commands affect
6898 * "action_authorization". Return -1 if any of the commands are invalid and
6899 * 0 in all other cases.
6901 static int
6902 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6904 const struct rep_protocol_transaction_cmd *cmds;
6905 uintptr_t loc;
6906 uint32_t sz;
6908 loc = (uintptr_t)cmds_arg;
6910 while (cmds_sz > 0) {
6911 cmds = (struct rep_protocol_transaction_cmd *)loc;
6913 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6914 return (-1);
6916 sz = cmds->rptc_size;
6917 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6918 return (-1);
6920 sz = TX_SIZE(sz);
6921 if (sz > cmds_sz)
6922 return (-1);
6924 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6925 == 0)
6926 return (1);
6928 loc += sz;
6929 cmds_sz -= sz;
6932 return (0);
6936 * Returns 1 if the transaction commands only modify properties named
6937 * 'enabled'.
6939 static int
6940 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6942 const struct rep_protocol_transaction_cmd *cmd;
6943 uintptr_t loc;
6944 uint32_t sz;
6946 loc = (uintptr_t)cmds_arg;
6948 while (cmds_sz > 0) {
6949 cmd = (struct rep_protocol_transaction_cmd *)loc;
6951 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6952 return (-1);
6954 sz = cmd->rptc_size;
6955 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6956 return (-1);
6958 sz = TX_SIZE(sz);
6959 if (sz > cmds_sz)
6960 return (-1);
6962 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6963 != 0)
6964 return (0);
6966 loc += sz;
6967 cmds_sz -= sz;
6970 return (1);
6974 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6976 rc_node_t *np = txp->rnp_node;
6977 rc_node_t *pp;
6978 rc_node_t *nnp;
6979 rc_node_pg_notify_t *pnp;
6980 int rc;
6981 permcheck_t *pcp;
6982 perm_status_t granted;
6983 int normal;
6984 char *pg_fmri = NULL;
6985 char *auth_string = NULL;
6986 int auth_status = ADT_SUCCESS;
6987 int auth_ret_value = ADT_SUCCESS;
6988 size_t sz_out;
6989 int tx_flag = 1;
6990 tx_commit_data_t *tx_data = NULL;
6992 RC_NODE_CHECK(np);
6994 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6995 (txp->rnp_auth_string != NULL)) {
6996 auth_string = strdup(txp->rnp_auth_string);
6997 if (auth_string == NULL)
6998 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7001 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
7002 is_main_repository) {
7003 #ifdef NATIVE_BUILD
7004 if (!client_is_privileged()) {
7005 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
7007 #else
7008 /* permission check: depends on contents of transaction */
7009 pcp = pc_create();
7010 if (pcp == NULL)
7011 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7013 /* If normal is cleared, we won't do the normal checks. */
7014 normal = 1;
7015 rc = REP_PROTOCOL_SUCCESS;
7017 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
7018 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
7019 /* Touching general[framework]/action_authorization? */
7020 rc = tx_modifies_action(cmds, cmds_sz);
7021 if (rc == -1) {
7022 pc_free(pcp);
7023 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7026 if (rc) {
7028 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7029 * can be used.
7031 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7033 if (rc == REP_PROTOCOL_SUCCESS)
7034 rc = perm_add_enabling(pcp,
7035 AUTH_MANAGE);
7037 normal = 0;
7038 } else {
7039 rc = REP_PROTOCOL_SUCCESS;
7041 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7042 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7043 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7044 rc_node_t *instn;
7046 rc = tx_only_enabled(cmds, cmds_sz);
7047 if (rc == -1) {
7048 pc_free(pcp);
7049 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7052 if (rc) {
7053 rc = rc_node_parent(np, &instn);
7054 if (rc != REP_PROTOCOL_SUCCESS) {
7055 assert(rc == REP_PROTOCOL_FAIL_DELETED);
7056 pc_free(pcp);
7057 return (rc);
7060 assert(instn->rn_id.rl_type ==
7061 REP_PROTOCOL_ENTITY_INSTANCE);
7063 rc = perm_add_inst_action_auth(pcp, instn);
7064 rc_node_rele(instn);
7065 switch (rc) {
7066 case REP_PROTOCOL_SUCCESS:
7067 break;
7069 case REP_PROTOCOL_FAIL_DELETED:
7070 case REP_PROTOCOL_FAIL_NO_RESOURCES:
7071 pc_free(pcp);
7072 return (rc);
7074 default:
7075 bad_error("perm_add_inst_action_auth",
7076 rc);
7078 } else {
7079 rc = REP_PROTOCOL_SUCCESS;
7083 if (rc == REP_PROTOCOL_SUCCESS && normal) {
7084 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7086 if (rc == REP_PROTOCOL_SUCCESS) {
7087 /* Add pgtype-specific authorization. */
7088 const char * const auth =
7089 perm_auth_for_pgtype(np->rn_type);
7091 if (auth != NULL)
7092 rc = perm_add_enabling(pcp, auth);
7095 /* Add pg-specific modify_authorization auths. */
7096 if (rc == REP_PROTOCOL_SUCCESS)
7097 rc = perm_add_enabling_values(pcp, np,
7098 AUTH_PROP_MODIFY);
7100 /* If value_authorization values are ok, add them. */
7101 if (rc == REP_PROTOCOL_SUCCESS) {
7102 rc = tx_allow_value(cmds, cmds_sz, np);
7103 if (rc == -1)
7104 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7105 else if (rc)
7106 rc = perm_add_enabling_values(pcp, np,
7107 AUTH_PROP_VALUE);
7111 if (rc == REP_PROTOCOL_SUCCESS) {
7112 granted = perm_granted(pcp);
7113 rc = map_granted_status(granted, pcp, &auth_string);
7114 if ((granted == PERM_DENIED) && auth_string) {
7116 * _PERMISSION_DENIED should not cause us
7117 * to exit at this point, because we still
7118 * want to generate an audit event.
7120 rc = REP_PROTOCOL_SUCCESS;
7124 pc_free(pcp);
7126 if (rc != REP_PROTOCOL_SUCCESS)
7127 goto cleanout;
7129 if (granted == PERM_DENIED) {
7130 auth_status = ADT_FAILURE;
7131 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7132 tx_flag = 0;
7134 #endif /* NATIVE_BUILD */
7135 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7136 auth_status = ADT_FAILURE;
7137 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7138 tx_flag = 0;
7141 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7142 if (pg_fmri == NULL) {
7143 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7144 goto cleanout;
7146 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7147 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7148 goto cleanout;
7152 * Parse the transaction commands into a useful form.
7154 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7155 REP_PROTOCOL_SUCCESS) {
7156 goto cleanout;
7159 if (tx_flag == 0) {
7160 /* Authorization failed. Generate audit events. */
7161 generate_property_events(tx_data, pg_fmri, auth_string,
7162 auth_status, auth_ret_value);
7163 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7164 goto cleanout;
7167 nnp = rc_node_alloc();
7168 if (nnp == NULL) {
7169 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7170 goto cleanout;
7173 nnp->rn_id = np->rn_id; /* structure assignment */
7174 nnp->rn_hash = np->rn_hash;
7175 nnp->rn_name = strdup(np->rn_name);
7176 nnp->rn_type = strdup(np->rn_type);
7177 nnp->rn_pgflags = np->rn_pgflags;
7179 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7181 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7182 rc_node_destroy(nnp);
7183 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7184 goto cleanout;
7187 (void) pthread_mutex_lock(&np->rn_lock);
7190 * We must have all of the old properties in the cache, or the
7191 * database deletions could cause inconsistencies.
7193 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7194 REP_PROTOCOL_SUCCESS) {
7195 (void) pthread_mutex_unlock(&np->rn_lock);
7196 rc_node_destroy(nnp);
7197 goto cleanout;
7200 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7201 (void) pthread_mutex_unlock(&np->rn_lock);
7202 rc_node_destroy(nnp);
7203 rc = REP_PROTOCOL_FAIL_DELETED;
7204 goto cleanout;
7207 if (np->rn_flags & RC_NODE_OLD) {
7208 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7209 (void) pthread_mutex_unlock(&np->rn_lock);
7210 rc_node_destroy(nnp);
7211 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7212 goto cleanout;
7215 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7216 if (pp == NULL) {
7217 /* our parent is gone, we're going next... */
7218 rc_node_destroy(nnp);
7219 (void) pthread_mutex_lock(&np->rn_lock);
7220 if (np->rn_flags & RC_NODE_OLD) {
7221 (void) pthread_mutex_unlock(&np->rn_lock);
7222 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7223 goto cleanout;
7225 (void) pthread_mutex_unlock(&np->rn_lock);
7226 rc = REP_PROTOCOL_FAIL_DELETED;
7227 goto cleanout;
7229 (void) pthread_mutex_unlock(&pp->rn_lock);
7232 * prepare for the transaction
7234 (void) pthread_mutex_lock(&np->rn_lock);
7235 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7236 (void) pthread_mutex_unlock(&np->rn_lock);
7237 (void) pthread_mutex_lock(&pp->rn_lock);
7238 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7239 (void) pthread_mutex_unlock(&pp->rn_lock);
7240 rc_node_destroy(nnp);
7241 rc = REP_PROTOCOL_FAIL_DELETED;
7242 goto cleanout;
7244 nnp->rn_gen_id = np->rn_gen_id;
7245 (void) pthread_mutex_unlock(&np->rn_lock);
7247 /* Sets nnp->rn_gen_id on success. */
7248 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7250 (void) pthread_mutex_lock(&np->rn_lock);
7251 if (rc != REP_PROTOCOL_SUCCESS) {
7252 rc_node_rele_flag(np, RC_NODE_IN_TX);
7253 (void) pthread_mutex_unlock(&np->rn_lock);
7254 (void) pthread_mutex_lock(&pp->rn_lock);
7255 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7256 (void) pthread_mutex_unlock(&pp->rn_lock);
7257 rc_node_destroy(nnp);
7258 rc_node_clear(txp, 0);
7259 if (rc == REP_PROTOCOL_DONE)
7260 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7261 goto cleanout;
7265 * Notify waiters
7267 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7268 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7269 rc_pg_notify_fire(pnp);
7270 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7272 np->rn_flags |= RC_NODE_OLD;
7273 (void) pthread_mutex_unlock(&np->rn_lock);
7275 rc_notify_remove_node(np);
7278 * replace np with nnp
7280 rc_node_relink_child(pp, np, nnp);
7283 * all done -- clear the transaction.
7285 rc_node_clear(txp, 0);
7286 generate_property_events(tx_data, pg_fmri, auth_string,
7287 auth_status, auth_ret_value);
7289 rc = REP_PROTOCOL_SUCCESS;
7291 cleanout:
7292 free(auth_string);
7293 free(pg_fmri);
7294 tx_commit_data_free(tx_data);
7295 return (rc);
7298 void
7299 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7301 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7302 pnp->rnpn_pg = NULL;
7303 pnp->rnpn_fd = -1;
7307 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7309 rc_node_t *np;
7311 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7313 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7314 (void) pthread_mutex_unlock(&np->rn_lock);
7315 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7319 * wait for any transaction in progress to complete
7321 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7322 (void) pthread_mutex_unlock(&np->rn_lock);
7323 return (REP_PROTOCOL_FAIL_DELETED);
7326 if (np->rn_flags & RC_NODE_OLD) {
7327 (void) pthread_mutex_unlock(&np->rn_lock);
7328 return (REP_PROTOCOL_FAIL_NOT_LATEST);
7331 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7332 rc_pg_notify_fire(pnp);
7333 pnp->rnpn_pg = np;
7334 pnp->rnpn_fd = fd;
7335 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7336 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7338 (void) pthread_mutex_unlock(&np->rn_lock);
7339 return (REP_PROTOCOL_SUCCESS);
7342 void
7343 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7345 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7346 rc_pg_notify_fire(pnp);
7347 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7349 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7352 void
7353 rc_notify_info_init(rc_notify_info_t *rnip)
7355 int i;
7357 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7358 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7359 rc_notify_pool);
7361 rnip->rni_notify.rcn_node = NULL;
7362 rnip->rni_notify.rcn_info = rnip;
7364 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7365 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7367 (void) pthread_cond_init(&rnip->rni_cv, NULL);
7369 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7370 rnip->rni_namelist[i] = NULL;
7371 rnip->rni_typelist[i] = NULL;
7375 static void
7376 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7378 assert(MUTEX_HELD(&rc_pg_notify_lock));
7380 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7382 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7383 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7384 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7387 static void
7388 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7390 rc_notify_t *me = &rnip->rni_notify;
7391 rc_notify_t *np;
7393 assert(MUTEX_HELD(&rc_pg_notify_lock));
7395 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7397 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7398 rnip->rni_flags |= RC_NOTIFY_DRAIN;
7399 (void) pthread_cond_broadcast(&rnip->rni_cv);
7401 (void) uu_list_remove(rc_notify_info_list, rnip);
7404 * clean up any notifications at the beginning of the list
7406 if (uu_list_first(rc_notify_list) == me) {
7408 * We can't call rc_notify_remove_locked() unless
7409 * rc_notify_in_use is 0.
7411 while (rc_notify_in_use) {
7412 (void) pthread_cond_wait(&rc_pg_notify_cv,
7413 &rc_pg_notify_lock);
7415 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7416 np->rcn_info == NULL)
7417 rc_notify_remove_locked(np);
7419 (void) uu_list_remove(rc_notify_list, me);
7421 while (rnip->rni_waiters) {
7422 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7423 (void) pthread_cond_broadcast(&rnip->rni_cv);
7424 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7427 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7430 static int
7431 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7432 const char *name)
7434 int i;
7435 int rc;
7436 char *f;
7438 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7439 if (rc != REP_PROTOCOL_SUCCESS)
7440 return (rc);
7442 f = strdup(name);
7443 if (f == NULL)
7444 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7446 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7448 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7449 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7451 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7452 if (arr[i] == NULL)
7453 break;
7456 * Don't add name if it's already being tracked.
7458 if (strcmp(arr[i], f) == 0) {
7459 free(f);
7460 goto out;
7464 if (i == RC_NOTIFY_MAX_NAMES) {
7465 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7466 free(f);
7467 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7470 arr[i] = f;
7472 out:
7473 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7474 rc_notify_info_insert_locked(rnip);
7476 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7477 return (REP_PROTOCOL_SUCCESS);
7481 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7483 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7487 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7489 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7493 * Wait for and report an event of interest to rnip, a notification client
7496 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7497 char *outp, size_t sz)
7499 rc_notify_t *np;
7500 rc_notify_t *me = &rnip->rni_notify;
7501 rc_node_t *nnp;
7502 rc_notify_delete_t *ndp;
7504 int am_first_info;
7506 if (sz > 0)
7507 outp[0] = 0;
7509 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7511 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7512 RC_NOTIFY_ACTIVE) {
7514 * If I'm first on the notify list, it is my job to
7515 * clean up any notifications I pass by. I can't do that
7516 * if someone is blocking the list from removals, so I
7517 * have to wait until they have all drained.
7519 am_first_info = (uu_list_first(rc_notify_list) == me);
7520 if (am_first_info && rc_notify_in_use) {
7521 rnip->rni_waiters++;
7522 (void) pthread_cond_wait(&rc_pg_notify_cv,
7523 &rc_pg_notify_lock);
7524 rnip->rni_waiters--;
7525 continue;
7529 * Search the list for a node of interest.
7531 np = uu_list_next(rc_notify_list, me);
7532 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7533 rc_notify_t *next = uu_list_next(rc_notify_list, np);
7535 if (am_first_info) {
7536 if (np->rcn_info) {
7538 * Passing another client -- stop
7539 * cleaning up notifications
7541 am_first_info = 0;
7542 } else {
7543 rc_notify_remove_locked(np);
7546 np = next;
7550 * Nothing of interest -- wait for notification
7552 if (np == NULL) {
7553 rnip->rni_waiters++;
7554 (void) pthread_cond_wait(&rnip->rni_cv,
7555 &rc_pg_notify_lock);
7556 rnip->rni_waiters--;
7557 continue;
7561 * found something to report -- move myself after the
7562 * notification and process it.
7564 (void) uu_list_remove(rc_notify_list, me);
7565 (void) uu_list_insert_after(rc_notify_list, np, me);
7567 if ((ndp = np->rcn_delete) != NULL) {
7568 (void) strlcpy(outp, ndp->rnd_fmri, sz);
7569 if (am_first_info)
7570 rc_notify_remove_locked(np);
7571 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7572 rc_node_clear(out, 0);
7573 return (REP_PROTOCOL_SUCCESS);
7576 nnp = np->rcn_node;
7577 assert(nnp != NULL);
7580 * We can't bump nnp's reference count without grabbing its
7581 * lock, and rc_pg_notify_lock is a leaf lock. So we
7582 * temporarily block all removals to keep nnp from
7583 * disappearing.
7585 rc_notify_in_use++;
7586 assert(rc_notify_in_use > 0);
7587 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7589 rc_node_assign(out, nnp);
7591 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7592 assert(rc_notify_in_use > 0);
7593 rc_notify_in_use--;
7595 if (am_first_info) {
7597 * While we had the lock dropped, another thread
7598 * may have also incremented rc_notify_in_use. We
7599 * need to make sure that we're back to 0 before
7600 * removing the node.
7602 while (rc_notify_in_use) {
7603 (void) pthread_cond_wait(&rc_pg_notify_cv,
7604 &rc_pg_notify_lock);
7606 rc_notify_remove_locked(np);
7608 if (rc_notify_in_use == 0)
7609 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7610 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7612 return (REP_PROTOCOL_SUCCESS);
7615 * If we're the last one out, let people know it's clear.
7617 if (rnip->rni_waiters == 0)
7618 (void) pthread_cond_broadcast(&rnip->rni_cv);
7619 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7620 return (REP_PROTOCOL_DONE);
7623 static void
7624 rc_notify_info_reset(rc_notify_info_t *rnip)
7626 int i;
7628 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7629 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7630 rc_notify_info_remove_locked(rnip);
7631 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7632 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7633 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7635 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7636 if (rnip->rni_namelist[i] != NULL) {
7637 free((void *)rnip->rni_namelist[i]);
7638 rnip->rni_namelist[i] = NULL;
7640 if (rnip->rni_typelist[i] != NULL) {
7641 free((void *)rnip->rni_typelist[i]);
7642 rnip->rni_typelist[i] = NULL;
7646 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7647 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7648 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7651 void
7652 rc_notify_info_fini(rc_notify_info_t *rnip)
7654 rc_notify_info_reset(rnip);
7656 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7657 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7658 rc_notify_pool);