4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
25 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include "rcm_module.h"
31 * Short-circuits unloading of modules with no registrations, so that
32 * they are present during the next db_sync cycle.
34 #define MOD_REFCNT_INIT 2
36 int need_cleanup
; /* flag indicating if clean up is needed */
38 static mutex_t mod_lock
; /* protects module list */
39 static module_t
*module_head
; /* linked list of modules */
40 static rsrc_node_t
*rsrc_root
; /* root of all resources */
45 static void rcmd_db_print();
46 static void rcm_handle_free(rcm_handle_t
*);
47 static rcm_handle_t
*rcm_handle_alloc(module_t
*);
48 static void rsrc_clients_free(client_t
*);
49 static struct rcm_mod_ops
*modops_from_v1(void *);
50 static int call_getinfo(struct rcm_mod_ops
*, rcm_handle_t
*, char *, id_t
,
51 uint_t
, char **, char **, nvlist_t
*, rcm_info_t
**);
52 static int node_action(rsrc_node_t
*, void *);
54 extern void start_polling_thread();
57 * translate /dev name to a /devices path
59 * N.B. This routine can be enhanced to understand network names
60 * and friendly names in the future.
63 resolve_name(char *alias
)
66 const char *dev
= "/dev/";
68 if (strlen(alias
) == 0)
71 if (strncmp(alias
, dev
, strlen(dev
)) == 0) {
73 * Treat /dev/... as a symbolic link
75 tmp
= s_malloc(PATH_MAX
);
76 if (realpath(alias
, tmp
) != NULL
) {
81 /* Fail to resolve /dev/ name, use the name as is */
84 return (s_strdup(alias
));
88 * Figure out resource type based on "resolved" name
90 * N.B. This routine does not figure out file system mount points.
91 * This is determined at runtime when filesys module register
92 * with RCM_FILESYS flag.
95 rsrc_get_type(const char *resolved_name
)
97 if (resolved_name
[0] != '/')
98 return (RSRC_TYPE_ABSTRACT
);
100 if (strncmp("/devices/", resolved_name
, 9) == 0)
101 return (RSRC_TYPE_DEVICE
);
103 return (RSRC_TYPE_NORMAL
);
108 * module_load, module_unload, module_info, module_attach, module_detach,
109 * cli_module_hold, cli_module_rele
112 #ifdef ENABLE_MODULE_DETACH
114 * call unregister() entry point to allow module to unregister for
115 * resources without getting confused.
118 module_detach(module_t
*module
)
120 struct rcm_mod_ops
*ops
= module
->modops
;
122 rcm_log_message(RCM_TRACE2
, "module_detach(name=%s)\n", module
->name
);
124 ops
->rcmop_unregister(module
->rcmhandle
);
126 #endif /* ENABLE_MODULE_DETACH */
129 * call register() entry point to allow module to register for resources
132 module_attach(module_t
*module
)
134 struct rcm_mod_ops
*ops
= module
->modops
;
136 rcm_log_message(RCM_TRACE2
, "module_attach(name=%s)\n", module
->name
);
138 if (ops
->rcmop_register(module
->rcmhandle
) != RCM_SUCCESS
) {
139 rcm_log_message(RCM_WARNING
,
140 gettext("module %s register() failed\n"), module
->name
);
145 module_init(module_t
*module
)
147 if (module
->dlhandle
)
149 return (module
->init());
152 return (script_init(module
));
156 * call rmc_mod_info() entry of module
159 module_info(module_t
*module
)
161 if (module
->dlhandle
)
163 return (module
->info());
166 return (script_info(module
));
170 module_fini(module_t
*module
)
172 if (module
->dlhandle
)
174 return (module
->fini());
177 return (script_fini(module
));
181 * call rmc_mod_fini() entry of module, dlclose module, and free memory
184 module_unload(module_t
*module
)
186 int version
= module
->modops
->version
;
188 rcm_log_message(RCM_DEBUG
, "module_unload(name=%s)\n", module
->name
);
190 (void) module_fini(module
);
192 rcm_handle_free(module
->rcmhandle
);
198 * Free memory associated with converted ops vector
200 free(module
->modops
);
203 case RCM_MOD_OPS_VERSION
:
208 if (module
->dlhandle
)
209 rcm_module_close(module
->dlhandle
);
215 * Locate the module, execute rcm_mod_init() and check ops vector version
218 module_load(char *modname
)
222 rcm_log_message(RCM_DEBUG
, "module_load(name=%s)\n", modname
);
227 module
= s_calloc(1, sizeof (*module
));
228 module
->name
= s_strdup(modname
);
229 module
->modops
= NULL
;
230 rcm_init_queue(&module
->client_q
);
232 if (rcm_is_script(modname
) == 0) {
234 module
->dlhandle
= rcm_module_open(modname
);
236 if (module
->dlhandle
== NULL
) {
237 rcm_log_message(RCM_NOTICE
,
238 gettext("cannot open module %s\n"), modname
);
243 * dlsym rcm_mod_init/fini/info() entry points
245 module
->init
= (struct rcm_mod_ops
*(*)())dlsym(
246 module
->dlhandle
, "rcm_mod_init");
247 module
->fini
= (int (*)())dlsym(
248 module
->dlhandle
, "rcm_mod_fini");
249 module
->info
= (const char *(*)())dlsym(module
->dlhandle
,
251 if (module
->init
== NULL
|| module
->fini
== NULL
||
252 module
->info
== NULL
) {
253 rcm_log_message(RCM_ERROR
,
254 gettext("missing entries in module %s\n"), modname
);
260 module
->dlhandle
= NULL
;
261 module
->init
= (struct rcm_mod_ops
*(*)()) NULL
;
262 module
->fini
= (int (*)()) NULL
;
263 module
->info
= (const char *(*)()) NULL
;
266 if ((module
->modops
= module_init(module
)) == NULL
) {
267 if (module
->dlhandle
)
268 rcm_log_message(RCM_ERROR
,
269 gettext("cannot init module %s\n"), modname
);
274 * Check ops vector version
276 switch (module
->modops
->version
) {
278 module
->modops
= modops_from_v1((void *)module
->modops
);
281 case RCM_MOD_OPS_VERSION
:
285 rcm_log_message(RCM_ERROR
,
286 gettext("module %s rejected: version %d not supported\n"),
287 modname
, module
->modops
->version
);
288 (void) module_fini(module
);
293 * Make sure all fields are set
295 if ((module
->modops
->rcmop_register
== NULL
) ||
296 (module
->modops
->rcmop_unregister
== NULL
) ||
297 (module
->modops
->rcmop_get_info
== NULL
) ||
298 (module
->modops
->rcmop_request_suspend
== NULL
) ||
299 (module
->modops
->rcmop_notify_resume
== NULL
) ||
300 (module
->modops
->rcmop_request_offline
== NULL
) ||
301 (module
->modops
->rcmop_notify_online
== NULL
) ||
302 (module
->modops
->rcmop_notify_remove
== NULL
)) {
303 rcm_log_message(RCM_ERROR
,
304 gettext("module %s rejected: has NULL ops fields\n"),
306 (void) module_fini(module
);
310 module
->rcmhandle
= rcm_handle_alloc(module
);
314 if (module
->modops
&& module
->modops
->version
== RCM_MOD_OPS_V1
)
315 free(module
->modops
);
317 if (module
->dlhandle
)
318 rcm_module_close(module
->dlhandle
);
326 * add one to module hold count. load the module if not loaded
329 cli_module_hold(char *modname
)
333 rcm_log_message(RCM_TRACE3
, "cli_module_hold(%s)\n", modname
);
335 (void) mutex_lock(&mod_lock
);
336 module
= module_head
;
338 if (strcmp(module
->name
, modname
) == 0) {
341 module
= module
->next
;
346 (void) mutex_unlock(&mod_lock
);
351 * Module not found, attempt to load it
353 if ((module
= module_load(modname
)) == NULL
) {
354 (void) mutex_unlock(&mod_lock
);
359 * Hold module and link module into module list
361 module
->ref_count
= MOD_REFCNT_INIT
;
362 module
->next
= module_head
;
363 module_head
= module
;
365 (void) mutex_unlock(&mod_lock
);
371 * decrement module hold count. Unload it if no reference
374 cli_module_rele(module_t
*module
)
376 module_t
*curr
= module_head
, *prev
= NULL
;
378 rcm_log_message(RCM_TRACE3
, "cli_module_rele(name=%s)\n", module
->name
);
380 (void) mutex_lock(&mod_lock
);
381 if (--(module
->ref_count
) != 0) {
382 (void) mutex_unlock(&mod_lock
);
386 rcm_log_message(RCM_TRACE2
, "unloading module %s\n", module
->name
);
389 * Unlink the module from list
391 while (curr
&& (curr
!= module
)) {
396 rcm_log_message(RCM_ERROR
,
397 gettext("Unexpected error: module %s not found.\n"),
399 } else if (prev
== NULL
) {
400 module_head
= curr
->next
;
402 prev
->next
= curr
->next
;
404 (void) mutex_unlock(&mod_lock
);
406 module_unload(module
);
410 * Gather usage info be passed back to requester. Discard info if user does
411 * not care (list == NULL).
414 add_busy_rsrc_to_list(char *alias
, pid_t pid
, int state
, int seq_num
,
415 char *modname
, const char *infostr
, const char *errstr
,
416 nvlist_t
*client_props
, rcm_info_t
**list
)
427 info
= s_calloc(1, sizeof (*info
));
428 if (errno
= nvlist_alloc(&(info
->info
), NV_UNIQUE_NAME
, 0)) {
429 rcm_log_message(RCM_ERROR
, "failed (nvlist_alloc=%s).\n",
435 if ((errno
= nvlist_add_string(info
->info
, RCM_RSRCNAME
, alias
)) ||
436 (errno
= nvlist_add_int32(info
->info
, RCM_SEQ_NUM
, seq_num
)) ||
437 (errno
= nvlist_add_int64(info
->info
, RCM_CLIENT_ID
, pid
)) ||
438 (errno
= nvlist_add_int32(info
->info
, RCM_RSRCSTATE
, state
))) {
439 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
445 * Daemon calls to add_busy_rsrc_to_list may pass in
446 * error/info. Add these through librcm interfaces.
449 rcm_log_message(RCM_TRACE3
, "adding error string: %s\n",
451 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_ERROR
,
453 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
460 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_INFO
,
462 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
469 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_MODNAME
,
471 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
478 if (errno
= nvlist_pack(client_props
, &buf
, &buflen
,
479 NV_ENCODE_NATIVE
, 0)) {
480 rcm_log_message(RCM_ERROR
, "failed (nvlist_pack=%s).\n",
484 if (errno
= nvlist_add_byte_array(info
->info
,
485 RCM_CLIENT_PROPERTIES
, (uchar_t
*)buf
, buflen
)) {
486 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
494 /* link info at end of list */
506 * Resource client realted operations:
507 * rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
508 * rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
511 /* Allocate rsrc_client_t structure. Load module if necessary. */
514 rsrc_client_alloc(char *alias
, char *modname
, pid_t pid
, uint_t flag
)
519 assert((alias
!= NULL
) && (modname
!= NULL
));
521 rcm_log_message(RCM_TRACE4
, "rsrc_client_alloc(%s, %s, %ld)\n",
522 alias
, modname
, pid
);
524 if ((mod
= cli_module_hold(modname
)) == NULL
) {
528 client
= s_calloc(1, sizeof (client_t
));
529 client
->module
= mod
;
531 client
->alias
= s_strdup(alias
);
532 client
->prv_flags
= 0;
533 client
->state
= RCM_STATE_ONLINE
;
536 /* This queue is protected by rcm_req_lock */
537 rcm_enqueue_tail(&mod
->client_q
, &client
->queue
);
542 /* Find client in list matching modname and pid */
544 rsrc_client_find(char *modname
, pid_t pid
, client_t
**list
)
546 client_t
*client
= *list
;
548 rcm_log_message(RCM_TRACE4
, "rsrc_client_find(%s, %ld, %p)\n",
549 modname
, pid
, (void *)list
);
552 if ((client
->pid
== pid
) &&
553 strcmp(modname
, client
->module
->name
) == 0) {
556 client
= client
->next
;
561 /* Add a client to client list */
563 rsrc_client_add(client_t
*client
, client_t
**list
)
565 rcm_log_message(RCM_TRACE4
, "rsrc_client_add: %s, %s, %ld\n",
566 client
->alias
, client
->module
->name
, client
->pid
);
568 client
->next
= *list
;
572 /* Remove client from list and destroy it */
574 rsrc_client_remove(client_t
*client
, client_t
**list
)
576 client_t
*tmp
, *prev
= NULL
;
578 rcm_log_message(RCM_TRACE4
, "rsrc_client_remove: %s, %s, %ld\n",
579 client
->alias
, client
->module
->name
, client
->pid
);
589 prev
->next
= tmp
->next
;
594 rsrc_clients_free(tmp
);
599 /* Free a list of clients. Called from cleanup thread only */
601 rsrc_clients_free(client_t
*list
)
603 client_t
*client
= list
;
608 * Note that the rcm daemon is single threaded while
609 * executing this routine. So there is no need to acquire
610 * rcm_req_lock here while dequeuing.
612 rcm_dequeue(&client
->queue
);
614 if (client
->module
) {
615 cli_module_rele(client
->module
);
627 * Invoke a callback into a single client
628 * This is the core of rcm_mod_ops interface
631 rsrc_client_action(client_t
*client
, int cmd
, void *arg
)
633 int rval
= RCM_SUCCESS
;
634 char *dummy_error
= NULL
;
638 nvlist_t
*client_props
= NULL
;
639 rcm_info_t
*depend_info
= NULL
;
640 struct rcm_mod_ops
*ops
= client
->module
->modops
;
641 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
643 rcm_log_message(RCM_TRACE4
,
644 "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client
->alias
,
645 client
->module
->name
, cmd
, targ
->flag
);
648 * Create a per-operation handle, increment seq_num by 1 so we will
649 * know if a module uses this handle to callback into rcm_daemon.
651 hdl
= rcm_handle_alloc(client
->module
);
652 hdl
->seq_num
= targ
->seq_num
+ 1;
655 * Filter out operations for which the client didn't register.
663 if ((client
->flag
& RCM_REGISTER_DR
) == 0) {
664 rcm_handle_free(hdl
);
665 return (RCM_SUCCESS
);
668 case CMD_REQUEST_CHANGE
:
669 case CMD_NOTIFY_CHANGE
:
670 if ((client
->flag
& RCM_REGISTER_CAPACITY
) == 0) {
671 rcm_handle_free(hdl
);
672 return (RCM_SUCCESS
);
676 if ((client
->flag
& RCM_REGISTER_EVENT
) == 0) {
677 rcm_handle_free(hdl
);
678 return (RCM_SUCCESS
);
684 * Create nvlist_t for any client-specific properties.
686 if (errno
= nvlist_alloc(&client_props
, NV_UNIQUE_NAME
, 0)) {
687 rcm_log_message(RCM_ERROR
,
688 "client action failed (nvlist_alloc=%s)\n",
694 * Process the operation via a callback to the client module.
698 rval
= call_getinfo(ops
, hdl
, client
->alias
, client
->pid
,
699 targ
->flag
, &info
, &error
, client_props
, &depend_info
);
703 if (((targ
->flag
& RCM_QUERY_CANCEL
) == 0) &&
704 (client
->state
== RCM_STATE_SUSPEND
)) {
708 if ((targ
->flag
& RCM_QUERY
) == 0) {
709 rcm_log_message(RCM_DEBUG
, "suspending %s\n",
711 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
712 rcm_log_message(RCM_DEBUG
, "suspend query %s\n",
715 rcm_log_message(RCM_DEBUG
,
716 "suspend query %s cancelled\n", client
->alias
);
720 * Update the client's state before the operation.
721 * If this is a cancelled query, then updating the state is
722 * the only thing that needs to be done, so break afterwards.
724 if ((targ
->flag
& RCM_QUERY
) == 0) {
725 client
->state
= RCM_STATE_SUSPENDING
;
726 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
727 client
->state
= RCM_STATE_SUSPEND_QUERYING
;
729 client
->state
= RCM_STATE_ONLINE
;
733 rval
= ops
->rcmop_request_suspend(hdl
, client
->alias
,
734 client
->pid
, targ
->interval
, targ
->flag
, &error
,
737 /* Update the client's state after the operation. */
738 if ((targ
->flag
& RCM_QUERY
) == 0) {
739 if (rval
== RCM_SUCCESS
) {
740 client
->state
= RCM_STATE_SUSPEND
;
742 client
->state
= RCM_STATE_SUSPEND_FAIL
;
745 if (rval
== RCM_SUCCESS
) {
746 client
->state
= RCM_STATE_SUSPEND_QUERY
;
748 client
->state
= RCM_STATE_SUSPEND_QUERY_FAIL
;
754 if (client
->state
== RCM_STATE_ONLINE
) {
757 client
->state
= RCM_STATE_RESUMING
;
758 rval
= ops
->rcmop_notify_resume(hdl
, client
->alias
, client
->pid
,
759 targ
->flag
, &error
, &depend_info
);
761 /* online state is unconditional */
762 client
->state
= RCM_STATE_ONLINE
;
766 if (((targ
->flag
& RCM_QUERY_CANCEL
) == 0) &&
767 (client
->state
== RCM_STATE_OFFLINE
)) {
771 if ((targ
->flag
& RCM_QUERY
) == 0) {
772 rcm_log_message(RCM_DEBUG
, "offlining %s\n",
774 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
775 rcm_log_message(RCM_DEBUG
, "offline query %s\n",
778 rcm_log_message(RCM_DEBUG
,
779 "offline query %s cancelled\n", client
->alias
);
783 * Update the client's state before the operation.
784 * If this is a cancelled query, then updating the state is
785 * the only thing that needs to be done, so break afterwards.
787 if ((targ
->flag
& RCM_QUERY
) == 0) {
788 client
->state
= RCM_STATE_OFFLINING
;
789 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
790 client
->state
= RCM_STATE_OFFLINE_QUERYING
;
792 client
->state
= RCM_STATE_ONLINE
;
796 rval
= ops
->rcmop_request_offline(hdl
, client
->alias
,
797 client
->pid
, targ
->flag
, &error
, &depend_info
);
800 * If this is a retire operation and we managed to call
801 * into at least one client, set retcode to RCM_SUCCESS to
802 * indicate that retire has been subject to constraints
803 * This retcode will be further modified by actual return
806 if ((targ
->flag
& RCM_RETIRE_REQUEST
) &&
807 (targ
->retcode
== RCM_NO_CONSTRAINT
)) {
808 rcm_log_message(RCM_DEBUG
,
809 "at least 1 client, constraint applied: %s\n",
811 targ
->retcode
= RCM_SUCCESS
;
814 /* Update the client's state after the operation. */
815 if ((targ
->flag
& RCM_QUERY
) == 0) {
816 if (rval
== RCM_SUCCESS
) {
817 client
->state
= RCM_STATE_OFFLINE
;
819 client
->state
= RCM_STATE_OFFLINE_FAIL
;
822 if (rval
== RCM_SUCCESS
) {
823 client
->state
= RCM_STATE_OFFLINE_QUERY
;
825 client
->state
= RCM_STATE_OFFLINE_QUERY_FAIL
;
831 if (client
->state
== RCM_STATE_ONLINE
) {
835 rcm_log_message(RCM_DEBUG
, "onlining %s\n", client
->alias
);
837 client
->state
= RCM_STATE_ONLINING
;
838 rval
= ops
->rcmop_notify_online(hdl
, client
->alias
, client
->pid
,
839 targ
->flag
, &error
, &depend_info
);
840 client
->state
= RCM_STATE_ONLINE
;
844 rcm_log_message(RCM_DEBUG
, "removing %s\n", client
->alias
);
845 client
->state
= RCM_STATE_REMOVING
;
846 rval
= ops
->rcmop_notify_remove(hdl
, client
->alias
, client
->pid
,
847 targ
->flag
, &error
, &depend_info
);
848 client
->state
= RCM_STATE_REMOVE
;
851 case CMD_REQUEST_CHANGE
:
852 rcm_log_message(RCM_DEBUG
, "requesting state change of %s\n",
854 if (ops
->rcmop_request_capacity_change
)
855 rval
= ops
->rcmop_request_capacity_change(hdl
,
856 client
->alias
, client
->pid
, targ
->flag
, targ
->nvl
,
857 &error
, &depend_info
);
860 case CMD_NOTIFY_CHANGE
:
861 rcm_log_message(RCM_DEBUG
, "requesting state change of %s\n",
863 if (ops
->rcmop_notify_capacity_change
)
864 rval
= ops
->rcmop_notify_capacity_change(hdl
,
865 client
->alias
, client
->pid
, targ
->flag
, targ
->nvl
,
866 &error
, &depend_info
);
870 rcm_log_message(RCM_DEBUG
, "delivering event to %s\n",
872 if (ops
->rcmop_notify_event
)
873 rval
= ops
->rcmop_notify_event(hdl
, client
->alias
,
874 client
->pid
, targ
->flag
, &error
, targ
->nvl
,
879 rcm_log_message(RCM_ERROR
, gettext("unknown command %d\n"),
885 /* reset error code to the most significant error */
886 if (rval
!= RCM_SUCCESS
)
887 targ
->retcode
= rval
;
890 * XXX - The code below may produce duplicate rcm_info_t's on error?
892 if ((cmd
!= CMD_GETINFO
) &&
893 ((rval
!= RCM_SUCCESS
) ||
895 (targ
->flag
& RCM_SCOPE
))) {
896 (void) call_getinfo(ops
, hdl
, client
->alias
, client
->pid
,
897 targ
->flag
& (~(RCM_INCLUDE_DEPENDENT
|RCM_INCLUDE_SUBTREE
)),
898 &info
, &dummy_error
, client_props
, &depend_info
);
900 (void) free(dummy_error
);
901 } else if (cmd
!= CMD_GETINFO
) {
902 nvlist_free(client_props
);
907 add_busy_rsrc_to_list(client
->alias
, client
->pid
, client
->state
,
908 targ
->seq_num
, client
->module
->name
, info
, error
,
909 client_props
, targ
->info
);
910 nvlist_free(client_props
);
920 (void) rcm_append_info(targ
->info
, depend_info
);
922 rcm_free_info(depend_info
);
926 rcm_handle_free(hdl
);
931 * invoke a callback into a list of clients, return 0 if all success
934 rsrc_client_action_list(client_t
*list
, int cmd
, void *arg
)
936 int error
, rval
= RCM_SUCCESS
;
937 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
940 client_t
*client
= list
;
944 * Make offline idempotent in the retire
947 if ((targ
->flag
& RCM_RETIRE_REQUEST
) &&
948 client
->state
== RCM_STATE_REMOVE
) {
949 client
->state
= RCM_STATE_ONLINE
;
950 rcm_log_message(RCM_DEBUG
, "RETIRE: idempotent client "
951 "state: REMOVE -> ONLINE: %s\n", client
->alias
);
954 if (client
->state
== RCM_STATE_REMOVE
)
957 error
= rsrc_client_action(client
, cmd
, arg
);
958 if (error
!= RCM_SUCCESS
) {
967 * Node realted operations:
969 * rn_alloc, rn_free, rn_find_child,
970 * rn_get_child, rn_get_sibling,
971 * rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
974 /* Allocate node based on a logical or physical name */
976 rn_alloc(char *name
, int type
)
980 rcm_log_message(RCM_TRACE4
, "rn_alloc(%s, %d)\n", name
, type
);
982 node
= s_calloc(1, sizeof (*node
));
983 node
->name
= s_strdup(name
);
990 * Free node along with its siblings and children
993 rn_free(rsrc_node_t
*node
)
1000 rn_free(node
->child
);
1003 if (node
->sibling
) {
1004 rn_free(node
->sibling
);
1007 rsrc_clients_free(node
->users
);
1015 static rsrc_node_t
*
1016 rn_get_sibling(rsrc_node_t
*node
)
1018 return (node
->sibling
);
1024 static rsrc_node_t
*
1025 rn_get_child(rsrc_node_t
*node
)
1027 return (node
->child
);
1031 * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1033 static rsrc_node_t
*
1034 rn_find_child(rsrc_node_t
*parent
, char *childname
, int flag
, int type
)
1036 rsrc_node_t
*child
= parent
->child
;
1037 rsrc_node_t
*new, *prev
= NULL
;
1039 rcm_log_message(RCM_TRACE4
,
1040 "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1041 parent
->name
, childname
, flag
, type
);
1044 * Children are ordered based on strcmp.
1046 while (child
&& (strcmp(child
->name
, childname
) < 0)) {
1048 child
= child
->sibling
;
1051 if (child
&& (strcmp(child
->name
, childname
) == 0)) {
1055 if (flag
!= RSRC_NODE_CREATE
)
1058 new = rn_alloc(childname
, type
);
1059 new->parent
= parent
;
1060 new->sibling
= child
;
1063 * Set this linkage last so we don't break ongoing operations.
1065 * N.B. Assume setting a pointer is an atomic operation.
1068 parent
->child
= new;
1070 prev
->sibling
= new;
1077 * Pathname related help functions
1080 pn_preprocess(char *pathname
, int type
)
1084 if (type
!= RSRC_TYPE_DEVICE
)
1088 * For devices, convert ':' to '/' (treat minor nodes and children)
1090 tmp
= strchr(pathname
, ':');
1098 pn_getnextcomp(char *pathname
, char **lasts
)
1102 if (pathname
== NULL
)
1106 while (*pathname
== '/')
1109 if (*pathname
== '\0')
1112 slash
= strchr(pathname
, '/');
1113 if (slash
!= NULL
) {
1124 * Find a node in tree based on device, which is the physical pathname
1125 * of the form /sbus@.../esp@.../sd@...
1128 rsrc_node_find(char *rsrcname
, int flag
, rsrc_node_t
**nodep
)
1130 char *pathname
, *nodename
, *lasts
;
1134 rcm_log_message(RCM_TRACE4
, "rn_node_find(%s, 0x%x)\n", rsrcname
, flag
);
1137 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1138 * look under /SYSTEM.
1140 pathname
= resolve_name(rsrcname
);
1141 if (pathname
== NULL
)
1144 type
= rsrc_get_type(pathname
);
1146 case RSRC_TYPE_DEVICE
:
1147 case RSRC_TYPE_NORMAL
:
1148 node
= rn_find_child(rsrc_root
, "SYSTEM", RSRC_NODE_CREATE
,
1152 case RSRC_TYPE_ABSTRACT
:
1153 node
= rn_find_child(rsrc_root
, "ABSTRACT", RSRC_NODE_CREATE
,
1158 /* just to make sure */
1164 * Find position of device within tree. Upon exiting the loop, device
1165 * should be placed between prev and curr.
1167 pn_preprocess(pathname
, type
);
1169 while ((nodename
= pn_getnextcomp(lasts
, &lasts
)) != NULL
) {
1170 rsrc_node_t
*parent
= node
;
1171 node
= rn_find_child(parent
, nodename
, flag
, type
);
1173 assert((flag
& RSRC_NODE_CREATE
) == 0);
1176 return (RCM_SUCCESS
);
1181 return (RCM_SUCCESS
);
1185 * add a usage client to a node
1189 rsrc_node_add_user(rsrc_node_t
*node
, char *alias
, char *modname
, pid_t pid
,
1194 rcm_log_message(RCM_TRACE3
,
1195 "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1196 node
->name
, alias
, modname
, pid
, flag
);
1198 user
= rsrc_client_find(modname
, pid
, &node
->users
);
1201 * If a client_t already exists, add the registration and return
1202 * success if it's a valid registration request.
1204 * Return EALREADY if the resource is already registered.
1205 * This means either the client_t already has the requested
1206 * registration flagged, or that a DR registration was attempted
1207 * on a resource already in use in the DR operations state model.
1211 if (user
->flag
& (flag
& RCM_REGISTER_MASK
)) {
1215 if ((flag
& RCM_REGISTER_DR
) &&
1216 (user
->state
!= RCM_STATE_REMOVE
)) {
1220 user
->flag
|= (flag
& RCM_REGISTER_MASK
);
1221 if ((flag
& RCM_REGISTER_DR
) ||
1222 (user
->state
== RCM_STATE_REMOVE
)) {
1223 user
->state
= RCM_STATE_ONLINE
;
1226 return (RCM_SUCCESS
);
1230 * Otherwise create a new client_t and create a new registration.
1232 if ((user
= rsrc_client_alloc(alias
, modname
, pid
, flag
)) != NULL
) {
1233 rsrc_client_add(user
, &node
->users
);
1235 if (flag
& RCM_FILESYS
)
1236 node
->type
= RSRC_TYPE_FILESYS
;
1238 return (RCM_SUCCESS
);
1242 * remove a usage client of a node
1245 rsrc_node_remove_user(rsrc_node_t
*node
, char *modname
, pid_t pid
, uint_t flag
)
1249 rcm_log_message(RCM_TRACE3
,
1250 "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node
->name
, modname
,
1253 user
= rsrc_client_find(modname
, pid
, &node
->users
);
1254 if ((user
== NULL
) || (user
->state
== RCM_STATE_REMOVE
)) {
1255 rcm_log_message(RCM_NOTICE
, gettext(
1256 "client not registered: module=%s, pid=%d, dev=%s\n"),
1257 modname
, pid
, node
->name
);
1261 /* Strip off the registration being removed (DR, event, capacity) */
1262 user
->flag
= user
->flag
& (~(flag
& RCM_REGISTER_MASK
));
1265 * Mark the client as removed if all registrations have been removed
1267 if ((user
->flag
& RCM_REGISTER_MASK
) == 0)
1268 user
->state
= RCM_STATE_REMOVE
;
1270 return (RCM_SUCCESS
);
1274 * Tree walking function - rsrc_walk
1277 #define MAX_TREE_DEPTH 32
1279 #define RN_WALK_CONTINUE 0
1280 #define RN_WALK_PRUNESIB 1
1281 #define RN_WALK_PRUNECHILD 2
1282 #define RN_WALK_TERMINATE 3
1284 #define EMPTY_STACK(sp) ((sp)->depth == 0)
1285 #define TOP_NODE(sp) ((sp)->node[(sp)->depth - 1])
1286 #define PRUNE_SIB(sp) ((sp)->prunesib[(sp)->depth - 1])
1287 #define PRUNE_CHILD(sp) ((sp)->prunechild[(sp)->depth - 1])
1288 #define POP_STACK(sp) ((sp)->depth)--
1289 #define PUSH_STACK(sp, rn) \
1290 (sp)->node[(sp)->depth] = (rn); \
1291 (sp)->prunesib[(sp)->depth] = 0; \
1292 (sp)->prunechild[(sp)->depth] = 0; \
1296 rsrc_node_t
*node
[MAX_TREE_DEPTH
];
1297 char prunesib
[MAX_TREE_DEPTH
];
1298 char prunechild
[MAX_TREE_DEPTH
];
1302 /* walking one node and update node stack */
1305 walk_one_node(struct rn_stack
*sp
, void *arg
,
1306 int (*node_callback
)(rsrc_node_t
*, void *))
1309 rsrc_node_t
*child
, *sibling
;
1310 rsrc_node_t
*node
= TOP_NODE(sp
);
1312 rcm_log_message(RCM_TRACE4
, "walk_one_node(%s)\n", node
->name
);
1314 switch (node_callback(node
, arg
)) {
1315 case RN_WALK_TERMINATE
:
1317 while (!EMPTY_STACK(sp
)) {
1318 node
= TOP_NODE(sp
);
1323 case RN_WALK_PRUNESIB
:
1327 case RN_WALK_PRUNECHILD
:
1328 PRUNE_CHILD(sp
) = 1;
1331 case RN_WALK_CONTINUE
:
1337 * Push child on the stack
1339 if (!PRUNE_CHILD(sp
) && (child
= rn_get_child(node
)) != NULL
) {
1340 PUSH_STACK(sp
, child
);
1345 * Pop the stack till a node's sibling can be pushed
1347 prunesib
= PRUNE_SIB(sp
);
1349 while (!EMPTY_STACK(sp
) &&
1350 (prunesib
|| (sibling
= rn_get_sibling(node
)) == NULL
)) {
1351 node
= TOP_NODE(sp
);
1352 prunesib
= PRUNE_SIB(sp
);
1356 if (EMPTY_STACK(sp
)) {
1361 * push sibling onto the stack
1363 PUSH_STACK(sp
, sibling
);
1367 * walk tree rooted at root in child-first order
1370 rsrc_walk(rsrc_node_t
*root
, void *arg
,
1371 int (*node_callback
)(rsrc_node_t
*, void *))
1373 struct rn_stack stack
;
1375 rcm_log_message(RCM_TRACE3
, "rsrc_walk(%s)\n", root
->name
);
1378 * Push root on stack and walk in child-first order
1381 PUSH_STACK(&stack
, root
);
1382 PRUNE_SIB(&stack
) = 1;
1384 while (!EMPTY_STACK(&stack
)) {
1385 walk_one_node(&stack
, arg
, node_callback
);
1390 * Callback for a command action on a node
1393 node_action(rsrc_node_t
*node
, void *arg
)
1395 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
1396 uint_t flag
= targ
->flag
;
1398 rcm_log_message(RCM_TRACE4
, "node_action(%s)\n", node
->name
);
1401 * If flag indicates operation on a filesystem, we don't callback on
1402 * the filesystem root to avoid infinite recursion on filesystem module.
1404 * N.B. Such request should only come from filesystem RCM module.
1406 if (flag
& RCM_FILESYS
) {
1407 assert(node
->type
== RSRC_TYPE_FILESYS
);
1408 targ
->flag
&= ~RCM_FILESYS
;
1409 return (RN_WALK_CONTINUE
);
1413 * Execute state change callback
1415 (void) rsrc_client_action_list(node
->users
, targ
->cmd
, arg
);
1418 * Upon hitting a filesys root, prune children.
1419 * The filesys module should have taken care of
1422 if (node
->type
== RSRC_TYPE_FILESYS
)
1423 return (RN_WALK_PRUNECHILD
);
1425 return (RN_WALK_CONTINUE
);
1429 * Execute a command on a subtree under root.
1432 rsrc_tree_action(rsrc_node_t
*root
, int cmd
, tree_walk_arg_t
*arg
)
1434 rcm_log_message(RCM_TRACE2
, "tree_action(%s, %d)\n", root
->name
, cmd
);
1439 * If RCM_RETIRE_REQUEST is set, just walk one node and preset
1440 * retcode to NO_CONSTRAINT
1442 if (arg
->flag
& RCM_RETIRE_REQUEST
) {
1443 rcm_log_message(RCM_TRACE1
, "tree_action: RETIRE_REQ: walking "
1444 "only root node: %s\n", root
->name
);
1445 arg
->retcode
= RCM_NO_CONSTRAINT
;
1446 (void) node_action(root
, arg
);
1448 arg
->retcode
= RCM_SUCCESS
;
1449 rsrc_walk(root
, (void *)arg
, node_action
);
1452 return (arg
->retcode
);
1456 * Get info on current regsitrations
1459 rsrc_usage_info(char **rsrcnames
, uint_t flag
, int seq_num
, rcm_info_t
**info
)
1462 rcm_info_t
*result
= NULL
;
1463 tree_walk_arg_t arg
;
1470 arg
.seq_num
= seq_num
;
1472 for (i
= 0; rsrcnames
[i
] != NULL
; i
++) {
1474 rcm_log_message(RCM_TRACE2
, "rsrc_usage_info(%s, 0x%x, %d)\n",
1475 rsrcnames
[i
], flag
, seq_num
);
1477 if (flag
& RCM_INCLUDE_DEPENDENT
) {
1478 initial_req
= ((seq_num
& SEQ_NUM_MASK
) == 0);
1481 * if redundant request, skip the operation
1483 if (info_req_add(rsrcnames
[i
], flag
, seq_num
) != 0) {
1488 rv
= rsrc_node_find(rsrcnames
[i
], 0, &node
);
1489 if ((rv
!= RCM_SUCCESS
) || (node
== NULL
)) {
1490 if ((flag
& RCM_INCLUDE_DEPENDENT
) && initial_req
)
1491 info_req_remove(seq_num
);
1496 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1499 if (flag
& RCM_INCLUDE_SUBTREE
) {
1500 (void) rsrc_tree_action(node
, CMD_GETINFO
, &arg
);
1502 arg
.cmd
= CMD_GETINFO
;
1503 (void) node_action(node
, (void *)&arg
);
1506 if ((flag
& RCM_INCLUDE_DEPENDENT
) && initial_req
)
1507 info_req_remove(seq_num
);
1511 (void) rcm_append_info(info
, result
);
1516 * Get the list of currently loaded module
1522 rcm_info_t
*info
= NULL
;
1524 (void) mutex_lock(&mod_lock
);
1527 char *modinfo
= s_strdup(module_info(mod
));
1528 add_busy_rsrc_to_list("dummy", 0, 0, 0, mod
->name
,
1529 modinfo
, NULL
, NULL
, &info
);
1532 (void) mutex_unlock(&mod_lock
);
1538 * Initialize resource map - load all modules
1545 struct dirent
*entp
;
1550 rcm_log_message(RCM_DEBUG
, "rcmd_db_init(): initialize database\n");
1552 if (script_main_init() == -1)
1555 rsrc_root
= rn_alloc("/", RSRC_TYPE_NORMAL
);
1557 for (i
= 0; (dir_name
= rcm_dir(i
, &rcm_script
)) != NULL
; i
++) {
1559 if ((mod_dir
= opendir(dir_name
)) == NULL
) {
1560 continue; /* try next directory */
1563 rcm_log_message(RCM_TRACE2
, "search directory %s\n", dir_name
);
1565 while ((entp
= readdir(mod_dir
)) != NULL
) {
1568 if (strcmp(entp
->d_name
, ".") == 0 ||
1569 strcmp(entp
->d_name
, "..") == 0)
1572 if (rcm_script
== 0) {
1574 if (((tmp
= strstr(entp
->d_name
,
1575 RCM_MODULE_SUFFIX
)) == NULL
) ||
1576 (tmp
[strlen(RCM_MODULE_SUFFIX
)] != '\0')) {
1581 module
= cli_module_hold(entp
->d_name
);
1582 if (module
== NULL
) {
1583 if (rcm_script
== 0)
1584 rcm_log_message(RCM_ERROR
,
1585 gettext("%s: failed to load\n"),
1590 if (module
->ref_count
== MOD_REFCNT_INIT
) {
1592 * ask module to register for resource 1st time
1594 module_attach(module
);
1596 cli_module_rele(module
);
1598 (void) closedir(mod_dir
);
1605 * sync resource map - ask all modules to register again
1610 static time_t sync_time
= (time_t)-1;
1611 const time_t interval
= 5; /* resync at most every 5 sec */
1614 time_t curr
= time(NULL
);
1616 if ((sync_time
!= (time_t)-1) && (curr
- sync_time
< interval
))
1620 (void) mutex_lock(&mod_lock
);
1624 * Hold module by incrementing ref count and release
1625 * mod_lock to avoid deadlock, since rcmop_register()
1626 * may callback into the daemon and request mod_lock.
1629 (void) mutex_unlock(&mod_lock
);
1631 mod
->modops
->rcmop_register(mod
->rcmhandle
);
1633 (void) mutex_lock(&mod_lock
);
1637 (void) mutex_unlock(&mod_lock
);
1641 * Determine if a process is alive
1644 proc_exist(pid_t pid
)
1647 const char *procfs
= "/proc";
1650 if (pid
== (pid_t
)0) {
1654 (void) snprintf(path
, sizeof (path
), "%s/%ld", procfs
, pid
);
1655 return (stat(path
, &sb
) == 0);
1659 * Cleaup client list
1661 * N.B. This routine runs in a single-threaded environment only. It is only
1662 * called by the cleanup thread, which never runs in parallel with other
1666 clean_client_list(client_t
**listp
)
1668 client_t
*client
= *listp
;
1671 * Cleanup notification clients for which pid no longer exists
1674 if ((client
->state
!= RCM_STATE_REMOVE
) &&
1675 proc_exist(client
->pid
)) {
1676 listp
= &client
->next
;
1682 * Destroy this client_t. rsrc_client_remove updates
1683 * listp to point to the next client.
1685 rsrc_client_remove(client
, listp
);
1692 clean_node(rsrc_node_t
*node
, void *arg
)
1694 rcm_log_message(RCM_TRACE4
, "clean_node(%s)\n", node
->name
);
1696 clean_client_list(&node
->users
);
1698 return (RN_WALK_CONTINUE
);
1704 rcm_log_message(RCM_TRACE4
,
1705 "clean_rsrc_tree(): delete stale dr clients\n");
1707 rsrc_walk(rsrc_root
, NULL
, clean_node
);
1713 extern barrier_t barrier
;
1714 extern void clean_dr_list();
1717 (void) mutex_lock(&rcm_req_lock
);
1718 start_polling_thread();
1719 (void) mutex_unlock(&rcm_req_lock
);
1721 (void) mutex_lock(&barrier
.lock
);
1722 while (need_cleanup
== 0)
1723 (void) cond_wait(&barrier
.cv
, &barrier
.lock
);
1724 (void) mutex_unlock(&barrier
.lock
);
1727 * Make sure all other threads are either blocked or exited.
1729 rcmd_set_state(RCMD_CLEANUP
);
1739 * clean resource tree
1743 rcmd_set_state(RCMD_NORMAL
);
1750 rcm_log_message(RCM_DEBUG
,
1751 "rcm_db_clean(): launch thread to clean database\n");
1753 if (thr_create(NULL
, NULL
, (void *(*)(void *))db_clean
,
1754 NULL
, THR_DETACHED
, NULL
) != 0) {
1755 rcm_log_message(RCM_WARNING
,
1756 gettext("failed to create cleanup thread %s\n"),
1763 print_node(rsrc_node_t
*node
, void *arg
)
1767 rcm_log_message(RCM_DEBUG
, "rscname: %s, state = 0x%x\n", node
->name
);
1768 rcm_log_message(RCM_DEBUG
, " users:\n");
1770 if ((user
= node
->users
) == NULL
) {
1771 rcm_log_message(RCM_DEBUG
, " none\n");
1772 return (RN_WALK_CONTINUE
);
1776 rcm_log_message(RCM_DEBUG
, " %s, %d, %s\n",
1777 user
->module
->name
, user
->pid
, user
->alias
);
1780 return (RN_WALK_CONTINUE
);
1788 rcm_log_message(RCM_DEBUG
, "modules:\n");
1789 (void) mutex_lock(&mod_lock
);
1792 rcm_log_message(RCM_DEBUG
, " %s\n", mod
->name
);
1795 (void) mutex_unlock(&mod_lock
);
1797 rcm_log_message(RCM_DEBUG
, "\nresource tree:\n");
1799 rsrc_walk(rsrc_root
, NULL
, print_node
);
1801 rcm_log_message(RCM_DEBUG
, "\n");
1805 * Allocate handle from calling into each RCM module
1807 static rcm_handle_t
*
1808 rcm_handle_alloc(module_t
*module
)
1812 hdl
= s_malloc(sizeof (rcm_handle_t
));
1814 hdl
->modname
= module
->name
;
1816 hdl
->lrcm_ops
= &rcm_ops
; /* for callback into daemon directly */
1817 hdl
->module
= module
;
1826 rcm_handle_free(rcm_handle_t
*handle
)
1832 * help function that exit on memory outage
1835 s_malloc(size_t size
)
1837 void *buf
= malloc(size
);
1846 s_calloc(int n
, size_t size
)
1848 void *buf
= calloc(n
, size
);
1857 s_realloc(void *ptr
, size_t size
)
1859 void *new = realloc(ptr
, size
);
1868 s_strdup(const char *str
)
1870 char *buf
= strdup(str
);
1879 * Convert a version 1 ops vector to current ops vector
1880 * Fields missing in version 1 are set to NULL.
1882 static struct rcm_mod_ops
*
1883 modops_from_v1(void *ops_v1
)
1885 struct rcm_mod_ops
*ops
;
1887 ops
= s_calloc(1, sizeof (struct rcm_mod_ops
));
1888 bcopy(ops_v1
, ops
, sizeof (struct rcm_mod_ops_v1
));
1892 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1894 call_getinfo(struct rcm_mod_ops
*ops
, rcm_handle_t
*hdl
, char *alias
, id_t pid
,
1895 uint_t flag
, char **info
, char **error
, nvlist_t
*client_props
,
1899 struct rcm_mod_ops_v1
*v1_ops
;
1901 if (ops
->version
== RCM_MOD_OPS_V1
) {
1902 v1_ops
= (struct rcm_mod_ops_v1
*)ops
;
1903 rval
= v1_ops
->rcmop_get_info(hdl
, alias
, pid
, flag
, info
,
1905 if (rval
!= RCM_SUCCESS
&& *info
!= NULL
)
1906 *error
= strdup(*info
);
1909 return (ops
->rcmop_get_info(hdl
, alias
, pid
, flag
, info
, error
,
1910 client_props
, infop
));
1915 rcm_init_queue(rcm_queue_t
*head
)
1917 head
->next
= head
->prev
= head
;
1921 rcm_enqueue_head(rcm_queue_t
*head
, rcm_queue_t
*element
)
1923 rcm_enqueue(head
, element
);
1927 rcm_enqueue_tail(rcm_queue_t
*head
, rcm_queue_t
*element
)
1929 rcm_enqueue(head
->prev
, element
);
1933 rcm_enqueue(rcm_queue_t
*list_element
, rcm_queue_t
*element
)
1935 element
->next
= list_element
->next
;
1936 element
->prev
= list_element
;
1937 element
->next
->prev
= element
;
1938 list_element
->next
= element
;
1942 rcm_dequeue_head(rcm_queue_t
*head
)
1944 rcm_queue_t
*element
= head
->next
;
1945 rcm_dequeue(element
);
1950 rcm_dequeue_tail(rcm_queue_t
*head
)
1952 rcm_queue_t
*element
= head
->prev
;
1953 rcm_dequeue(element
);
1958 rcm_dequeue(rcm_queue_t
*element
)
1960 element
->prev
->next
= element
->next
;
1961 element
->next
->prev
= element
->prev
;
1962 element
->next
= element
->prev
= NULL
;