4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
26 #include "rcm_module.h"
29 * Short-circuits unloading of modules with no registrations, so that
30 * they are present during the next db_sync cycle.
32 #define MOD_REFCNT_INIT 2
34 int need_cleanup
; /* flag indicating if clean up is needed */
36 static mutex_t mod_lock
; /* protects module list */
37 static module_t
*module_head
; /* linked list of modules */
38 static rsrc_node_t
*rsrc_root
; /* root of all resources */
43 static void rcmd_db_print();
44 static void rcm_handle_free(rcm_handle_t
*);
45 static rcm_handle_t
*rcm_handle_alloc(module_t
*);
46 static void rsrc_clients_free(client_t
*);
47 static struct rcm_mod_ops
*modops_from_v1(void *);
48 static int call_getinfo(struct rcm_mod_ops
*, rcm_handle_t
*, char *, id_t
,
49 uint_t
, char **, char **, nvlist_t
*, rcm_info_t
**);
50 static int node_action(rsrc_node_t
*, void *);
52 extern void start_polling_thread();
55 * translate /dev name to a /devices path
57 * N.B. This routine can be enhanced to understand network names
58 * and friendly names in the future.
61 resolve_name(char *alias
)
64 const char *dev
= "/dev/";
66 if (strlen(alias
) == 0)
69 if (strncmp(alias
, dev
, strlen(dev
)) == 0) {
71 * Treat /dev/... as a symbolic link
73 tmp
= s_malloc(PATH_MAX
);
74 if (realpath(alias
, tmp
) != NULL
) {
79 /* Fail to resolve /dev/ name, use the name as is */
82 return (s_strdup(alias
));
86 * Figure out resource type based on "resolved" name
88 * N.B. This routine does not figure out file system mount points.
89 * This is determined at runtime when filesys module register
90 * with RCM_FILESYS flag.
93 rsrc_get_type(const char *resolved_name
)
95 if (resolved_name
[0] != '/')
96 return (RSRC_TYPE_ABSTRACT
);
98 if (strncmp("/devices/", resolved_name
, 9) == 0)
99 return (RSRC_TYPE_DEVICE
);
101 return (RSRC_TYPE_NORMAL
);
106 * module_load, module_unload, module_info, module_attach, module_detach,
107 * cli_module_hold, cli_module_rele
110 #ifdef ENABLE_MODULE_DETACH
112 * call unregister() entry point to allow module to unregister for
113 * resources without getting confused.
116 module_detach(module_t
*module
)
118 struct rcm_mod_ops
*ops
= module
->modops
;
120 rcm_log_message(RCM_TRACE2
, "module_detach(name=%s)\n", module
->name
);
122 ops
->rcmop_unregister(module
->rcmhandle
);
124 #endif /* ENABLE_MODULE_DETACH */
127 * call register() entry point to allow module to register for resources
130 module_attach(module_t
*module
)
132 struct rcm_mod_ops
*ops
= module
->modops
;
134 rcm_log_message(RCM_TRACE2
, "module_attach(name=%s)\n", module
->name
);
136 if (ops
->rcmop_register(module
->rcmhandle
) != RCM_SUCCESS
) {
137 rcm_log_message(RCM_WARNING
,
138 gettext("module %s register() failed\n"), module
->name
);
143 module_init(module_t
*module
)
145 if (module
->dlhandle
)
147 return (module
->init());
150 return (script_init(module
));
154 * call rmc_mod_info() entry of module
157 module_info(module_t
*module
)
159 if (module
->dlhandle
)
161 return (module
->info());
164 return (script_info(module
));
168 module_fini(module_t
*module
)
170 if (module
->dlhandle
)
172 return (module
->fini());
175 return (script_fini(module
));
179 * call rmc_mod_fini() entry of module, dlclose module, and free memory
182 module_unload(module_t
*module
)
184 int version
= module
->modops
->version
;
186 rcm_log_message(RCM_DEBUG
, "module_unload(name=%s)\n", module
->name
);
188 (void) module_fini(module
);
190 rcm_handle_free(module
->rcmhandle
);
196 * Free memory associated with converted ops vector
198 free(module
->modops
);
201 case RCM_MOD_OPS_VERSION
:
206 if (module
->dlhandle
)
207 rcm_module_close(module
->dlhandle
);
213 * Locate the module, execute rcm_mod_init() and check ops vector version
216 module_load(char *modname
)
220 rcm_log_message(RCM_DEBUG
, "module_load(name=%s)\n", modname
);
225 module
= s_calloc(1, sizeof (*module
));
226 module
->name
= s_strdup(modname
);
227 module
->modops
= NULL
;
228 rcm_init_queue(&module
->client_q
);
230 if (rcm_is_script(modname
) == 0) {
232 module
->dlhandle
= rcm_module_open(modname
);
234 if (module
->dlhandle
== NULL
) {
235 rcm_log_message(RCM_NOTICE
,
236 gettext("cannot open module %s\n"), modname
);
241 * dlsym rcm_mod_init/fini/info() entry points
243 module
->init
= (struct rcm_mod_ops
*(*)())dlsym(
244 module
->dlhandle
, "rcm_mod_init");
245 module
->fini
= (int (*)())dlsym(
246 module
->dlhandle
, "rcm_mod_fini");
247 module
->info
= (const char *(*)())dlsym(module
->dlhandle
,
249 if (module
->init
== NULL
|| module
->fini
== NULL
||
250 module
->info
== NULL
) {
251 rcm_log_message(RCM_ERROR
,
252 gettext("missing entries in module %s\n"), modname
);
258 module
->dlhandle
= NULL
;
259 module
->init
= (struct rcm_mod_ops
*(*)()) NULL
;
260 module
->fini
= (int (*)()) NULL
;
261 module
->info
= (const char *(*)()) NULL
;
264 if ((module
->modops
= module_init(module
)) == NULL
) {
265 if (module
->dlhandle
)
266 rcm_log_message(RCM_ERROR
,
267 gettext("cannot init module %s\n"), modname
);
272 * Check ops vector version
274 switch (module
->modops
->version
) {
276 module
->modops
= modops_from_v1((void *)module
->modops
);
279 case RCM_MOD_OPS_VERSION
:
283 rcm_log_message(RCM_ERROR
,
284 gettext("module %s rejected: version %d not supported\n"),
285 modname
, module
->modops
->version
);
286 (void) module_fini(module
);
291 * Make sure all fields are set
293 if ((module
->modops
->rcmop_register
== NULL
) ||
294 (module
->modops
->rcmop_unregister
== NULL
) ||
295 (module
->modops
->rcmop_get_info
== NULL
) ||
296 (module
->modops
->rcmop_request_suspend
== NULL
) ||
297 (module
->modops
->rcmop_notify_resume
== NULL
) ||
298 (module
->modops
->rcmop_request_offline
== NULL
) ||
299 (module
->modops
->rcmop_notify_online
== NULL
) ||
300 (module
->modops
->rcmop_notify_remove
== NULL
)) {
301 rcm_log_message(RCM_ERROR
,
302 gettext("module %s rejected: has NULL ops fields\n"),
304 (void) module_fini(module
);
308 module
->rcmhandle
= rcm_handle_alloc(module
);
312 if (module
->modops
&& module
->modops
->version
== RCM_MOD_OPS_V1
)
313 free(module
->modops
);
315 if (module
->dlhandle
)
316 rcm_module_close(module
->dlhandle
);
324 * add one to module hold count. load the module if not loaded
327 cli_module_hold(char *modname
)
331 rcm_log_message(RCM_TRACE3
, "cli_module_hold(%s)\n", modname
);
333 (void) mutex_lock(&mod_lock
);
334 module
= module_head
;
336 if (strcmp(module
->name
, modname
) == 0) {
339 module
= module
->next
;
344 (void) mutex_unlock(&mod_lock
);
349 * Module not found, attempt to load it
351 if ((module
= module_load(modname
)) == NULL
) {
352 (void) mutex_unlock(&mod_lock
);
357 * Hold module and link module into module list
359 module
->ref_count
= MOD_REFCNT_INIT
;
360 module
->next
= module_head
;
361 module_head
= module
;
363 (void) mutex_unlock(&mod_lock
);
369 * decrement module hold count. Unload it if no reference
372 cli_module_rele(module_t
*module
)
374 module_t
*curr
= module_head
, *prev
= NULL
;
376 rcm_log_message(RCM_TRACE3
, "cli_module_rele(name=%s)\n", module
->name
);
378 (void) mutex_lock(&mod_lock
);
379 if (--(module
->ref_count
) != 0) {
380 (void) mutex_unlock(&mod_lock
);
384 rcm_log_message(RCM_TRACE2
, "unloading module %s\n", module
->name
);
387 * Unlink the module from list
389 while (curr
&& (curr
!= module
)) {
394 rcm_log_message(RCM_ERROR
,
395 gettext("Unexpected error: module %s not found.\n"),
397 } else if (prev
== NULL
) {
398 module_head
= curr
->next
;
400 prev
->next
= curr
->next
;
402 (void) mutex_unlock(&mod_lock
);
404 module_unload(module
);
408 * Gather usage info be passed back to requester. Discard info if user does
409 * not care (list == NULL).
412 add_busy_rsrc_to_list(char *alias
, pid_t pid
, int state
, int seq_num
,
413 char *modname
, const char *infostr
, const char *errstr
,
414 nvlist_t
*client_props
, rcm_info_t
**list
)
425 info
= s_calloc(1, sizeof (*info
));
426 if (errno
= nvlist_alloc(&(info
->info
), NV_UNIQUE_NAME
, 0)) {
427 rcm_log_message(RCM_ERROR
, "failed (nvlist_alloc=%s).\n",
433 if ((errno
= nvlist_add_string(info
->info
, RCM_RSRCNAME
, alias
)) ||
434 (errno
= nvlist_add_int32(info
->info
, RCM_SEQ_NUM
, seq_num
)) ||
435 (errno
= nvlist_add_int64(info
->info
, RCM_CLIENT_ID
, pid
)) ||
436 (errno
= nvlist_add_int32(info
->info
, RCM_RSRCSTATE
, state
))) {
437 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
443 * Daemon calls to add_busy_rsrc_to_list may pass in
444 * error/info. Add these through librcm interfaces.
447 rcm_log_message(RCM_TRACE3
, "adding error string: %s\n",
449 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_ERROR
,
451 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
458 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_INFO
,
460 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
467 if (errno
= nvlist_add_string(info
->info
, RCM_CLIENT_MODNAME
,
469 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
476 if (errno
= nvlist_pack(client_props
, &buf
, &buflen
,
477 NV_ENCODE_NATIVE
, 0)) {
478 rcm_log_message(RCM_ERROR
, "failed (nvlist_pack=%s).\n",
482 if (errno
= nvlist_add_byte_array(info
->info
,
483 RCM_CLIENT_PROPERTIES
, (uchar_t
*)buf
, buflen
)) {
484 rcm_log_message(RCM_ERROR
, "failed (nvlist_add=%s).\n",
492 /* link info at end of list */
504 * Resource client realted operations:
505 * rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
506 * rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
509 /* Allocate rsrc_client_t structure. Load module if necessary. */
512 rsrc_client_alloc(char *alias
, char *modname
, pid_t pid
, uint_t flag
)
517 assert((alias
!= NULL
) && (modname
!= NULL
));
519 rcm_log_message(RCM_TRACE4
, "rsrc_client_alloc(%s, %s, %ld)\n",
520 alias
, modname
, pid
);
522 if ((mod
= cli_module_hold(modname
)) == NULL
) {
526 client
= s_calloc(1, sizeof (client_t
));
527 client
->module
= mod
;
529 client
->alias
= s_strdup(alias
);
530 client
->prv_flags
= 0;
531 client
->state
= RCM_STATE_ONLINE
;
534 /* This queue is protected by rcm_req_lock */
535 rcm_enqueue_tail(&mod
->client_q
, &client
->queue
);
540 /* Find client in list matching modname and pid */
542 rsrc_client_find(char *modname
, pid_t pid
, client_t
**list
)
544 client_t
*client
= *list
;
546 rcm_log_message(RCM_TRACE4
, "rsrc_client_find(%s, %ld, %p)\n",
547 modname
, pid
, (void *)list
);
550 if ((client
->pid
== pid
) &&
551 strcmp(modname
, client
->module
->name
) == 0) {
554 client
= client
->next
;
559 /* Add a client to client list */
561 rsrc_client_add(client_t
*client
, client_t
**list
)
563 rcm_log_message(RCM_TRACE4
, "rsrc_client_add: %s, %s, %ld\n",
564 client
->alias
, client
->module
->name
, client
->pid
);
566 client
->next
= *list
;
570 /* Remove client from list and destroy it */
572 rsrc_client_remove(client_t
*client
, client_t
**list
)
574 client_t
*tmp
, *prev
= NULL
;
576 rcm_log_message(RCM_TRACE4
, "rsrc_client_remove: %s, %s, %ld\n",
577 client
->alias
, client
->module
->name
, client
->pid
);
587 prev
->next
= tmp
->next
;
592 rsrc_clients_free(tmp
);
597 /* Free a list of clients. Called from cleanup thread only */
599 rsrc_clients_free(client_t
*list
)
601 client_t
*client
= list
;
606 * Note that the rcm daemon is single threaded while
607 * executing this routine. So there is no need to acquire
608 * rcm_req_lock here while dequeuing.
610 rcm_dequeue(&client
->queue
);
612 if (client
->module
) {
613 cli_module_rele(client
->module
);
625 * Invoke a callback into a single client
626 * This is the core of rcm_mod_ops interface
629 rsrc_client_action(client_t
*client
, int cmd
, void *arg
)
631 int rval
= RCM_SUCCESS
;
632 char *dummy_error
= NULL
;
636 nvlist_t
*client_props
= NULL
;
637 rcm_info_t
*depend_info
= NULL
;
638 struct rcm_mod_ops
*ops
= client
->module
->modops
;
639 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
641 rcm_log_message(RCM_TRACE4
,
642 "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client
->alias
,
643 client
->module
->name
, cmd
, targ
->flag
);
646 * Create a per-operation handle, increment seq_num by 1 so we will
647 * know if a module uses this handle to callback into rcm_daemon.
649 hdl
= rcm_handle_alloc(client
->module
);
650 hdl
->seq_num
= targ
->seq_num
+ 1;
653 * Filter out operations for which the client didn't register.
661 if ((client
->flag
& RCM_REGISTER_DR
) == 0) {
662 rcm_handle_free(hdl
);
663 return (RCM_SUCCESS
);
666 case CMD_REQUEST_CHANGE
:
667 case CMD_NOTIFY_CHANGE
:
668 if ((client
->flag
& RCM_REGISTER_CAPACITY
) == 0) {
669 rcm_handle_free(hdl
);
670 return (RCM_SUCCESS
);
674 if ((client
->flag
& RCM_REGISTER_EVENT
) == 0) {
675 rcm_handle_free(hdl
);
676 return (RCM_SUCCESS
);
682 * Create nvlist_t for any client-specific properties.
684 if (errno
= nvlist_alloc(&client_props
, NV_UNIQUE_NAME
, 0)) {
685 rcm_log_message(RCM_ERROR
,
686 "client action failed (nvlist_alloc=%s)\n",
692 * Process the operation via a callback to the client module.
696 rval
= call_getinfo(ops
, hdl
, client
->alias
, client
->pid
,
697 targ
->flag
, &info
, &error
, client_props
, &depend_info
);
701 if (((targ
->flag
& RCM_QUERY_CANCEL
) == 0) &&
702 (client
->state
== RCM_STATE_SUSPEND
)) {
706 if ((targ
->flag
& RCM_QUERY
) == 0) {
707 rcm_log_message(RCM_DEBUG
, "suspending %s\n",
709 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
710 rcm_log_message(RCM_DEBUG
, "suspend query %s\n",
713 rcm_log_message(RCM_DEBUG
,
714 "suspend query %s cancelled\n", client
->alias
);
718 * Update the client's state before the operation.
719 * If this is a cancelled query, then updating the state is
720 * the only thing that needs to be done, so break afterwards.
722 if ((targ
->flag
& RCM_QUERY
) == 0) {
723 client
->state
= RCM_STATE_SUSPENDING
;
724 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
725 client
->state
= RCM_STATE_SUSPEND_QUERYING
;
727 client
->state
= RCM_STATE_ONLINE
;
731 rval
= ops
->rcmop_request_suspend(hdl
, client
->alias
,
732 client
->pid
, targ
->interval
, targ
->flag
, &error
,
735 /* Update the client's state after the operation. */
736 if ((targ
->flag
& RCM_QUERY
) == 0) {
737 if (rval
== RCM_SUCCESS
) {
738 client
->state
= RCM_STATE_SUSPEND
;
740 client
->state
= RCM_STATE_SUSPEND_FAIL
;
743 if (rval
== RCM_SUCCESS
) {
744 client
->state
= RCM_STATE_SUSPEND_QUERY
;
746 client
->state
= RCM_STATE_SUSPEND_QUERY_FAIL
;
752 if (client
->state
== RCM_STATE_ONLINE
) {
755 client
->state
= RCM_STATE_RESUMING
;
756 rval
= ops
->rcmop_notify_resume(hdl
, client
->alias
, client
->pid
,
757 targ
->flag
, &error
, &depend_info
);
759 /* online state is unconditional */
760 client
->state
= RCM_STATE_ONLINE
;
764 if (((targ
->flag
& RCM_QUERY_CANCEL
) == 0) &&
765 (client
->state
== RCM_STATE_OFFLINE
)) {
769 if ((targ
->flag
& RCM_QUERY
) == 0) {
770 rcm_log_message(RCM_DEBUG
, "offlining %s\n",
772 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
773 rcm_log_message(RCM_DEBUG
, "offline query %s\n",
776 rcm_log_message(RCM_DEBUG
,
777 "offline query %s cancelled\n", client
->alias
);
781 * Update the client's state before the operation.
782 * If this is a cancelled query, then updating the state is
783 * the only thing that needs to be done, so break afterwards.
785 if ((targ
->flag
& RCM_QUERY
) == 0) {
786 client
->state
= RCM_STATE_OFFLINING
;
787 } else if ((targ
->flag
& RCM_QUERY_CANCEL
) == 0) {
788 client
->state
= RCM_STATE_OFFLINE_QUERYING
;
790 client
->state
= RCM_STATE_ONLINE
;
794 rval
= ops
->rcmop_request_offline(hdl
, client
->alias
,
795 client
->pid
, targ
->flag
, &error
, &depend_info
);
798 * If this is a retire operation and we managed to call
799 * into at least one client, set retcode to RCM_SUCCESS to
800 * indicate that retire has been subject to constraints
801 * This retcode will be further modified by actual return
804 if ((targ
->flag
& RCM_RETIRE_REQUEST
) &&
805 (targ
->retcode
== RCM_NO_CONSTRAINT
)) {
806 rcm_log_message(RCM_DEBUG
,
807 "at least 1 client, constraint applied: %s\n",
809 targ
->retcode
= RCM_SUCCESS
;
812 /* Update the client's state after the operation. */
813 if ((targ
->flag
& RCM_QUERY
) == 0) {
814 if (rval
== RCM_SUCCESS
) {
815 client
->state
= RCM_STATE_OFFLINE
;
817 client
->state
= RCM_STATE_OFFLINE_FAIL
;
820 if (rval
== RCM_SUCCESS
) {
821 client
->state
= RCM_STATE_OFFLINE_QUERY
;
823 client
->state
= RCM_STATE_OFFLINE_QUERY_FAIL
;
829 if (client
->state
== RCM_STATE_ONLINE
) {
833 rcm_log_message(RCM_DEBUG
, "onlining %s\n", client
->alias
);
835 client
->state
= RCM_STATE_ONLINING
;
836 rval
= ops
->rcmop_notify_online(hdl
, client
->alias
, client
->pid
,
837 targ
->flag
, &error
, &depend_info
);
838 client
->state
= RCM_STATE_ONLINE
;
842 rcm_log_message(RCM_DEBUG
, "removing %s\n", client
->alias
);
843 client
->state
= RCM_STATE_REMOVING
;
844 rval
= ops
->rcmop_notify_remove(hdl
, client
->alias
, client
->pid
,
845 targ
->flag
, &error
, &depend_info
);
846 client
->state
= RCM_STATE_REMOVE
;
849 case CMD_REQUEST_CHANGE
:
850 rcm_log_message(RCM_DEBUG
, "requesting state change of %s\n",
852 if (ops
->rcmop_request_capacity_change
)
853 rval
= ops
->rcmop_request_capacity_change(hdl
,
854 client
->alias
, client
->pid
, targ
->flag
, targ
->nvl
,
855 &error
, &depend_info
);
858 case CMD_NOTIFY_CHANGE
:
859 rcm_log_message(RCM_DEBUG
, "requesting state change of %s\n",
861 if (ops
->rcmop_notify_capacity_change
)
862 rval
= ops
->rcmop_notify_capacity_change(hdl
,
863 client
->alias
, client
->pid
, targ
->flag
, targ
->nvl
,
864 &error
, &depend_info
);
868 rcm_log_message(RCM_DEBUG
, "delivering event to %s\n",
870 if (ops
->rcmop_notify_event
)
871 rval
= ops
->rcmop_notify_event(hdl
, client
->alias
,
872 client
->pid
, targ
->flag
, &error
, targ
->nvl
,
877 rcm_log_message(RCM_ERROR
, gettext("unknown command %d\n"),
883 /* reset error code to the most significant error */
884 if (rval
!= RCM_SUCCESS
)
885 targ
->retcode
= rval
;
888 * XXX - The code below may produce duplicate rcm_info_t's on error?
890 if ((cmd
!= CMD_GETINFO
) &&
891 ((rval
!= RCM_SUCCESS
) ||
893 (targ
->flag
& RCM_SCOPE
))) {
894 (void) call_getinfo(ops
, hdl
, client
->alias
, client
->pid
,
895 targ
->flag
& (~(RCM_INCLUDE_DEPENDENT
|RCM_INCLUDE_SUBTREE
)),
896 &info
, &dummy_error
, client_props
, &depend_info
);
898 (void) free(dummy_error
);
899 } else if (cmd
!= CMD_GETINFO
) {
900 nvlist_free(client_props
);
905 add_busy_rsrc_to_list(client
->alias
, client
->pid
, client
->state
,
906 targ
->seq_num
, client
->module
->name
, info
, error
,
907 client_props
, targ
->info
);
908 nvlist_free(client_props
);
918 (void) rcm_append_info(targ
->info
, depend_info
);
920 rcm_free_info(depend_info
);
924 rcm_handle_free(hdl
);
929 * invoke a callback into a list of clients, return 0 if all success
932 rsrc_client_action_list(client_t
*list
, int cmd
, void *arg
)
934 int error
, rval
= RCM_SUCCESS
;
935 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
938 client_t
*client
= list
;
942 * Make offline idempotent in the retire
945 if ((targ
->flag
& RCM_RETIRE_REQUEST
) &&
946 client
->state
== RCM_STATE_REMOVE
) {
947 client
->state
= RCM_STATE_ONLINE
;
948 rcm_log_message(RCM_DEBUG
, "RETIRE: idempotent client "
949 "state: REMOVE -> ONLINE: %s\n", client
->alias
);
952 if (client
->state
== RCM_STATE_REMOVE
)
955 error
= rsrc_client_action(client
, cmd
, arg
);
956 if (error
!= RCM_SUCCESS
) {
965 * Node realted operations:
967 * rn_alloc, rn_free, rn_find_child,
968 * rn_get_child, rn_get_sibling,
969 * rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
972 /* Allocate node based on a logical or physical name */
974 rn_alloc(char *name
, int type
)
978 rcm_log_message(RCM_TRACE4
, "rn_alloc(%s, %d)\n", name
, type
);
980 node
= s_calloc(1, sizeof (*node
));
981 node
->name
= s_strdup(name
);
988 * Free node along with its siblings and children
991 rn_free(rsrc_node_t
*node
)
998 rn_free(node
->child
);
1001 if (node
->sibling
) {
1002 rn_free(node
->sibling
);
1005 rsrc_clients_free(node
->users
);
1013 static rsrc_node_t
*
1014 rn_get_sibling(rsrc_node_t
*node
)
1016 return (node
->sibling
);
1022 static rsrc_node_t
*
1023 rn_get_child(rsrc_node_t
*node
)
1025 return (node
->child
);
1029 * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1031 static rsrc_node_t
*
1032 rn_find_child(rsrc_node_t
*parent
, char *childname
, int flag
, int type
)
1034 rsrc_node_t
*child
= parent
->child
;
1035 rsrc_node_t
*new, *prev
= NULL
;
1037 rcm_log_message(RCM_TRACE4
,
1038 "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1039 parent
->name
, childname
, flag
, type
);
1042 * Children are ordered based on strcmp.
1044 while (child
&& (strcmp(child
->name
, childname
) < 0)) {
1046 child
= child
->sibling
;
1049 if (child
&& (strcmp(child
->name
, childname
) == 0)) {
1053 if (flag
!= RSRC_NODE_CREATE
)
1056 new = rn_alloc(childname
, type
);
1057 new->parent
= parent
;
1058 new->sibling
= child
;
1061 * Set this linkage last so we don't break ongoing operations.
1063 * N.B. Assume setting a pointer is an atomic operation.
1066 parent
->child
= new;
1068 prev
->sibling
= new;
1075 * Pathname related help functions
1078 pn_preprocess(char *pathname
, int type
)
1082 if (type
!= RSRC_TYPE_DEVICE
)
1086 * For devices, convert ':' to '/' (treat minor nodes and children)
1088 tmp
= strchr(pathname
, ':');
1096 pn_getnextcomp(char *pathname
, char **lasts
)
1100 if (pathname
== NULL
)
1104 while (*pathname
== '/')
1107 if (*pathname
== '\0')
1110 slash
= strchr(pathname
, '/');
1111 if (slash
!= NULL
) {
1122 * Find a node in tree based on device, which is the physical pathname
1123 * of the form /sbus@.../esp@.../sd@...
1126 rsrc_node_find(char *rsrcname
, int flag
, rsrc_node_t
**nodep
)
1128 char *pathname
, *nodename
, *lasts
;
1132 rcm_log_message(RCM_TRACE4
, "rn_node_find(%s, 0x%x)\n", rsrcname
, flag
);
1135 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1136 * look under /SYSTEM.
1138 pathname
= resolve_name(rsrcname
);
1139 if (pathname
== NULL
)
1142 type
= rsrc_get_type(pathname
);
1144 case RSRC_TYPE_DEVICE
:
1145 case RSRC_TYPE_NORMAL
:
1146 node
= rn_find_child(rsrc_root
, "SYSTEM", RSRC_NODE_CREATE
,
1150 case RSRC_TYPE_ABSTRACT
:
1151 node
= rn_find_child(rsrc_root
, "ABSTRACT", RSRC_NODE_CREATE
,
1156 /* just to make sure */
1162 * Find position of device within tree. Upon exiting the loop, device
1163 * should be placed between prev and curr.
1165 pn_preprocess(pathname
, type
);
1167 while ((nodename
= pn_getnextcomp(lasts
, &lasts
)) != NULL
) {
1168 rsrc_node_t
*parent
= node
;
1169 node
= rn_find_child(parent
, nodename
, flag
, type
);
1171 assert((flag
& RSRC_NODE_CREATE
) == 0);
1174 return (RCM_SUCCESS
);
1179 return (RCM_SUCCESS
);
1183 * add a usage client to a node
1187 rsrc_node_add_user(rsrc_node_t
*node
, char *alias
, char *modname
, pid_t pid
,
1192 rcm_log_message(RCM_TRACE3
,
1193 "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1194 node
->name
, alias
, modname
, pid
, flag
);
1196 user
= rsrc_client_find(modname
, pid
, &node
->users
);
1199 * If a client_t already exists, add the registration and return
1200 * success if it's a valid registration request.
1202 * Return EALREADY if the resource is already registered.
1203 * This means either the client_t already has the requested
1204 * registration flagged, or that a DR registration was attempted
1205 * on a resource already in use in the DR operations state model.
1209 if (user
->flag
& (flag
& RCM_REGISTER_MASK
)) {
1213 if ((flag
& RCM_REGISTER_DR
) &&
1214 (user
->state
!= RCM_STATE_REMOVE
)) {
1218 user
->flag
|= (flag
& RCM_REGISTER_MASK
);
1219 if ((flag
& RCM_REGISTER_DR
) ||
1220 (user
->state
== RCM_STATE_REMOVE
)) {
1221 user
->state
= RCM_STATE_ONLINE
;
1224 return (RCM_SUCCESS
);
1228 * Otherwise create a new client_t and create a new registration.
1230 if ((user
= rsrc_client_alloc(alias
, modname
, pid
, flag
)) != NULL
) {
1231 rsrc_client_add(user
, &node
->users
);
1233 if (flag
& RCM_FILESYS
)
1234 node
->type
= RSRC_TYPE_FILESYS
;
1236 return (RCM_SUCCESS
);
1240 * remove a usage client of a node
1243 rsrc_node_remove_user(rsrc_node_t
*node
, char *modname
, pid_t pid
, uint_t flag
)
1247 rcm_log_message(RCM_TRACE3
,
1248 "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node
->name
, modname
,
1251 user
= rsrc_client_find(modname
, pid
, &node
->users
);
1252 if ((user
== NULL
) || (user
->state
== RCM_STATE_REMOVE
)) {
1253 rcm_log_message(RCM_NOTICE
, gettext(
1254 "client not registered: module=%s, pid=%d, dev=%s\n"),
1255 modname
, pid
, node
->name
);
1259 /* Strip off the registration being removed (DR, event, capacity) */
1260 user
->flag
= user
->flag
& (~(flag
& RCM_REGISTER_MASK
));
1263 * Mark the client as removed if all registrations have been removed
1265 if ((user
->flag
& RCM_REGISTER_MASK
) == 0)
1266 user
->state
= RCM_STATE_REMOVE
;
1268 return (RCM_SUCCESS
);
1272 * Tree walking function - rsrc_walk
1275 #define MAX_TREE_DEPTH 32
1277 #define RN_WALK_CONTINUE 0
1278 #define RN_WALK_PRUNESIB 1
1279 #define RN_WALK_PRUNECHILD 2
1280 #define RN_WALK_TERMINATE 3
1282 #define EMPTY_STACK(sp) ((sp)->depth == 0)
1283 #define TOP_NODE(sp) ((sp)->node[(sp)->depth - 1])
1284 #define PRUNE_SIB(sp) ((sp)->prunesib[(sp)->depth - 1])
1285 #define PRUNE_CHILD(sp) ((sp)->prunechild[(sp)->depth - 1])
1286 #define POP_STACK(sp) ((sp)->depth)--
1287 #define PUSH_STACK(sp, rn) \
1288 (sp)->node[(sp)->depth] = (rn); \
1289 (sp)->prunesib[(sp)->depth] = 0; \
1290 (sp)->prunechild[(sp)->depth] = 0; \
1294 rsrc_node_t
*node
[MAX_TREE_DEPTH
];
1295 char prunesib
[MAX_TREE_DEPTH
];
1296 char prunechild
[MAX_TREE_DEPTH
];
1300 /* walking one node and update node stack */
1303 walk_one_node(struct rn_stack
*sp
, void *arg
,
1304 int (*node_callback
)(rsrc_node_t
*, void *))
1307 rsrc_node_t
*child
, *sibling
;
1308 rsrc_node_t
*node
= TOP_NODE(sp
);
1310 rcm_log_message(RCM_TRACE4
, "walk_one_node(%s)\n", node
->name
);
1312 switch (node_callback(node
, arg
)) {
1313 case RN_WALK_TERMINATE
:
1315 while (!EMPTY_STACK(sp
)) {
1316 node
= TOP_NODE(sp
);
1321 case RN_WALK_PRUNESIB
:
1325 case RN_WALK_PRUNECHILD
:
1326 PRUNE_CHILD(sp
) = 1;
1329 case RN_WALK_CONTINUE
:
1335 * Push child on the stack
1337 if (!PRUNE_CHILD(sp
) && (child
= rn_get_child(node
)) != NULL
) {
1338 PUSH_STACK(sp
, child
);
1343 * Pop the stack till a node's sibling can be pushed
1345 prunesib
= PRUNE_SIB(sp
);
1347 while (!EMPTY_STACK(sp
) &&
1348 (prunesib
|| (sibling
= rn_get_sibling(node
)) == NULL
)) {
1349 node
= TOP_NODE(sp
);
1350 prunesib
= PRUNE_SIB(sp
);
1354 if (EMPTY_STACK(sp
)) {
1359 * push sibling onto the stack
1361 PUSH_STACK(sp
, sibling
);
1365 * walk tree rooted at root in child-first order
1368 rsrc_walk(rsrc_node_t
*root
, void *arg
,
1369 int (*node_callback
)(rsrc_node_t
*, void *))
1371 struct rn_stack stack
;
1373 rcm_log_message(RCM_TRACE3
, "rsrc_walk(%s)\n", root
->name
);
1376 * Push root on stack and walk in child-first order
1379 PUSH_STACK(&stack
, root
);
1380 PRUNE_SIB(&stack
) = 1;
1382 while (!EMPTY_STACK(&stack
)) {
1383 walk_one_node(&stack
, arg
, node_callback
);
1388 * Callback for a command action on a node
1391 node_action(rsrc_node_t
*node
, void *arg
)
1393 tree_walk_arg_t
*targ
= (tree_walk_arg_t
*)arg
;
1394 uint_t flag
= targ
->flag
;
1396 rcm_log_message(RCM_TRACE4
, "node_action(%s)\n", node
->name
);
1399 * If flag indicates operation on a filesystem, we don't callback on
1400 * the filesystem root to avoid infinite recursion on filesystem module.
1402 * N.B. Such request should only come from filesystem RCM module.
1404 if (flag
& RCM_FILESYS
) {
1405 assert(node
->type
== RSRC_TYPE_FILESYS
);
1406 targ
->flag
&= ~RCM_FILESYS
;
1407 return (RN_WALK_CONTINUE
);
1411 * Execute state change callback
1413 (void) rsrc_client_action_list(node
->users
, targ
->cmd
, arg
);
1416 * Upon hitting a filesys root, prune children.
1417 * The filesys module should have taken care of
1420 if (node
->type
== RSRC_TYPE_FILESYS
)
1421 return (RN_WALK_PRUNECHILD
);
1423 return (RN_WALK_CONTINUE
);
1427 * Execute a command on a subtree under root.
1430 rsrc_tree_action(rsrc_node_t
*root
, int cmd
, tree_walk_arg_t
*arg
)
1432 rcm_log_message(RCM_TRACE2
, "tree_action(%s, %d)\n", root
->name
, cmd
);
1437 * If RCM_RETIRE_REQUEST is set, just walk one node and preset
1438 * retcode to NO_CONSTRAINT
1440 if (arg
->flag
& RCM_RETIRE_REQUEST
) {
1441 rcm_log_message(RCM_TRACE1
, "tree_action: RETIRE_REQ: walking "
1442 "only root node: %s\n", root
->name
);
1443 arg
->retcode
= RCM_NO_CONSTRAINT
;
1444 (void) node_action(root
, arg
);
1446 arg
->retcode
= RCM_SUCCESS
;
1447 rsrc_walk(root
, (void *)arg
, node_action
);
1450 return (arg
->retcode
);
1454 * Get info on current regsitrations
1457 rsrc_usage_info(char **rsrcnames
, uint_t flag
, int seq_num
, rcm_info_t
**info
)
1460 rcm_info_t
*result
= NULL
;
1461 tree_walk_arg_t arg
;
1468 arg
.seq_num
= seq_num
;
1470 for (i
= 0; rsrcnames
[i
] != NULL
; i
++) {
1472 rcm_log_message(RCM_TRACE2
, "rsrc_usage_info(%s, 0x%x, %d)\n",
1473 rsrcnames
[i
], flag
, seq_num
);
1475 if (flag
& RCM_INCLUDE_DEPENDENT
) {
1476 initial_req
= ((seq_num
& SEQ_NUM_MASK
) == 0);
1479 * if redundant request, skip the operation
1481 if (info_req_add(rsrcnames
[i
], flag
, seq_num
) != 0) {
1486 rv
= rsrc_node_find(rsrcnames
[i
], 0, &node
);
1487 if ((rv
!= RCM_SUCCESS
) || (node
== NULL
)) {
1488 if ((flag
& RCM_INCLUDE_DEPENDENT
) && initial_req
)
1489 info_req_remove(seq_num
);
1494 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1497 if (flag
& RCM_INCLUDE_SUBTREE
) {
1498 (void) rsrc_tree_action(node
, CMD_GETINFO
, &arg
);
1500 arg
.cmd
= CMD_GETINFO
;
1501 (void) node_action(node
, (void *)&arg
);
1504 if ((flag
& RCM_INCLUDE_DEPENDENT
) && initial_req
)
1505 info_req_remove(seq_num
);
1509 (void) rcm_append_info(info
, result
);
1514 * Get the list of currently loaded module
1520 rcm_info_t
*info
= NULL
;
1522 (void) mutex_lock(&mod_lock
);
1525 char *modinfo
= s_strdup(module_info(mod
));
1526 add_busy_rsrc_to_list("dummy", 0, 0, 0, mod
->name
,
1527 modinfo
, NULL
, NULL
, &info
);
1530 (void) mutex_unlock(&mod_lock
);
1536 * Initialize resource map - load all modules
1543 struct dirent
*entp
;
1548 rcm_log_message(RCM_DEBUG
, "rcmd_db_init(): initialize database\n");
1550 if (script_main_init() == -1)
1553 rsrc_root
= rn_alloc("/", RSRC_TYPE_NORMAL
);
1555 for (i
= 0; (dir_name
= rcm_dir(i
, &rcm_script
)) != NULL
; i
++) {
1557 if ((mod_dir
= opendir(dir_name
)) == NULL
) {
1558 continue; /* try next directory */
1561 rcm_log_message(RCM_TRACE2
, "search directory %s\n", dir_name
);
1563 while ((entp
= readdir(mod_dir
)) != NULL
) {
1566 if (strcmp(entp
->d_name
, ".") == 0 ||
1567 strcmp(entp
->d_name
, "..") == 0)
1570 if (rcm_script
== 0) {
1572 if (((tmp
= strstr(entp
->d_name
,
1573 RCM_MODULE_SUFFIX
)) == NULL
) ||
1574 (tmp
[strlen(RCM_MODULE_SUFFIX
)] != '\0')) {
1579 module
= cli_module_hold(entp
->d_name
);
1580 if (module
== NULL
) {
1581 if (rcm_script
== 0)
1582 rcm_log_message(RCM_ERROR
,
1583 gettext("%s: failed to load\n"),
1588 if (module
->ref_count
== MOD_REFCNT_INIT
) {
1590 * ask module to register for resource 1st time
1592 module_attach(module
);
1594 cli_module_rele(module
);
1596 (void) closedir(mod_dir
);
1603 * sync resource map - ask all modules to register again
1608 static time_t sync_time
= (time_t)-1;
1609 const time_t interval
= 5; /* resync at most every 5 sec */
1612 time_t curr
= time(NULL
);
1614 if ((sync_time
!= (time_t)-1) && (curr
- sync_time
< interval
))
1618 (void) mutex_lock(&mod_lock
);
1622 * Hold module by incrementing ref count and release
1623 * mod_lock to avoid deadlock, since rcmop_register()
1624 * may callback into the daemon and request mod_lock.
1627 (void) mutex_unlock(&mod_lock
);
1629 mod
->modops
->rcmop_register(mod
->rcmhandle
);
1631 (void) mutex_lock(&mod_lock
);
1635 (void) mutex_unlock(&mod_lock
);
1639 * Determine if a process is alive
1642 proc_exist(pid_t pid
)
1645 const char *procfs
= "/proc";
1648 if (pid
== (pid_t
)0) {
1652 (void) snprintf(path
, sizeof (path
), "%s/%ld", procfs
, pid
);
1653 return (stat(path
, &sb
) == 0);
1657 * Cleaup client list
1659 * N.B. This routine runs in a single-threaded environment only. It is only
1660 * called by the cleanup thread, which never runs in parallel with other
1664 clean_client_list(client_t
**listp
)
1666 client_t
*client
= *listp
;
1669 * Cleanup notification clients for which pid no longer exists
1672 if ((client
->state
!= RCM_STATE_REMOVE
) &&
1673 proc_exist(client
->pid
)) {
1674 listp
= &client
->next
;
1680 * Destroy this client_t. rsrc_client_remove updates
1681 * listp to point to the next client.
1683 rsrc_client_remove(client
, listp
);
1690 clean_node(rsrc_node_t
*node
, void *arg
)
1692 rcm_log_message(RCM_TRACE4
, "clean_node(%s)\n", node
->name
);
1694 clean_client_list(&node
->users
);
1696 return (RN_WALK_CONTINUE
);
1702 rcm_log_message(RCM_TRACE4
,
1703 "clean_rsrc_tree(): delete stale dr clients\n");
1705 rsrc_walk(rsrc_root
, NULL
, clean_node
);
1711 extern barrier_t barrier
;
1712 extern void clean_dr_list();
1715 (void) mutex_lock(&rcm_req_lock
);
1716 start_polling_thread();
1717 (void) mutex_unlock(&rcm_req_lock
);
1719 (void) mutex_lock(&barrier
.lock
);
1720 while (need_cleanup
== 0)
1721 (void) cond_wait(&barrier
.cv
, &barrier
.lock
);
1722 (void) mutex_unlock(&barrier
.lock
);
1725 * Make sure all other threads are either blocked or exited.
1727 rcmd_set_state(RCMD_CLEANUP
);
1737 * clean resource tree
1741 rcmd_set_state(RCMD_NORMAL
);
1748 rcm_log_message(RCM_DEBUG
,
1749 "rcm_db_clean(): launch thread to clean database\n");
1751 if (thr_create(NULL
, 0, (void *(*)(void *))db_clean
,
1752 NULL
, THR_DETACHED
, NULL
) != 0) {
1753 rcm_log_message(RCM_WARNING
,
1754 gettext("failed to create cleanup thread %s\n"),
1761 print_node(rsrc_node_t
*node
, void *arg
)
1765 rcm_log_message(RCM_DEBUG
, "rscname: %s, state = 0x%x\n", node
->name
);
1766 rcm_log_message(RCM_DEBUG
, " users:\n");
1768 if ((user
= node
->users
) == NULL
) {
1769 rcm_log_message(RCM_DEBUG
, " none\n");
1770 return (RN_WALK_CONTINUE
);
1774 rcm_log_message(RCM_DEBUG
, " %s, %d, %s\n",
1775 user
->module
->name
, user
->pid
, user
->alias
);
1778 return (RN_WALK_CONTINUE
);
1786 rcm_log_message(RCM_DEBUG
, "modules:\n");
1787 (void) mutex_lock(&mod_lock
);
1790 rcm_log_message(RCM_DEBUG
, " %s\n", mod
->name
);
1793 (void) mutex_unlock(&mod_lock
);
1795 rcm_log_message(RCM_DEBUG
, "\nresource tree:\n");
1797 rsrc_walk(rsrc_root
, NULL
, print_node
);
1799 rcm_log_message(RCM_DEBUG
, "\n");
1803 * Allocate handle from calling into each RCM module
1805 static rcm_handle_t
*
1806 rcm_handle_alloc(module_t
*module
)
1810 hdl
= s_malloc(sizeof (rcm_handle_t
));
1812 hdl
->modname
= module
->name
;
1814 hdl
->lrcm_ops
= &rcm_ops
; /* for callback into daemon directly */
1815 hdl
->module
= module
;
1824 rcm_handle_free(rcm_handle_t
*handle
)
1830 * help function that exit on memory outage
1833 s_malloc(size_t size
)
1835 void *buf
= malloc(size
);
1844 s_calloc(int n
, size_t size
)
1846 void *buf
= calloc(n
, size
);
1855 s_realloc(void *ptr
, size_t size
)
1857 void *new = realloc(ptr
, size
);
1866 s_strdup(const char *str
)
1868 char *buf
= strdup(str
);
1877 * Convert a version 1 ops vector to current ops vector
1878 * Fields missing in version 1 are set to NULL.
1880 static struct rcm_mod_ops
*
1881 modops_from_v1(void *ops_v1
)
1883 struct rcm_mod_ops
*ops
;
1885 ops
= s_calloc(1, sizeof (struct rcm_mod_ops
));
1886 bcopy(ops_v1
, ops
, sizeof (struct rcm_mod_ops_v1
));
1890 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1892 call_getinfo(struct rcm_mod_ops
*ops
, rcm_handle_t
*hdl
, char *alias
, id_t pid
,
1893 uint_t flag
, char **info
, char **error
, nvlist_t
*client_props
,
1897 struct rcm_mod_ops_v1
*v1_ops
;
1899 if (ops
->version
== RCM_MOD_OPS_V1
) {
1900 v1_ops
= (struct rcm_mod_ops_v1
*)ops
;
1901 rval
= v1_ops
->rcmop_get_info(hdl
, alias
, pid
, flag
, info
,
1903 if (rval
!= RCM_SUCCESS
&& *info
!= NULL
)
1904 *error
= strdup(*info
);
1907 return (ops
->rcmop_get_info(hdl
, alias
, pid
, flag
, info
, error
,
1908 client_props
, infop
));
1913 rcm_init_queue(rcm_queue_t
*head
)
1915 head
->next
= head
->prev
= head
;
1919 rcm_enqueue_head(rcm_queue_t
*head
, rcm_queue_t
*element
)
1921 rcm_enqueue(head
, element
);
1925 rcm_enqueue_tail(rcm_queue_t
*head
, rcm_queue_t
*element
)
1927 rcm_enqueue(head
->prev
, element
);
1931 rcm_enqueue(rcm_queue_t
*list_element
, rcm_queue_t
*element
)
1933 element
->next
= list_element
->next
;
1934 element
->prev
= list_element
;
1935 element
->next
->prev
= element
;
1936 list_element
->next
= element
;
1940 rcm_dequeue_head(rcm_queue_t
*head
)
1942 rcm_queue_t
*element
= head
->next
;
1943 rcm_dequeue(element
);
1948 rcm_dequeue_tail(rcm_queue_t
*head
)
1950 rcm_queue_t
*element
= head
->prev
;
1951 rcm_dequeue(element
);
1956 rcm_dequeue(rcm_queue_t
*element
)
1958 element
->prev
->next
= element
->next
;
1959 element
->next
->prev
= element
->prev
;
1960 element
->next
= element
->prev
= NULL
;