dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / rcm_daemon / common / rcm_subr.c
blob4c150f32052cba0f5dcc29bd2991a07d1b460a68
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
25 #include "rcm_impl.h"
26 #include "rcm_module.h"
29 * Short-circuits unloading of modules with no registrations, so that
30 * they are present during the next db_sync cycle.
32 #define MOD_REFCNT_INIT 2
34 int need_cleanup; /* flag indicating if clean up is needed */
36 static mutex_t mod_lock; /* protects module list */
37 static module_t *module_head; /* linked list of modules */
38 static rsrc_node_t *rsrc_root; /* root of all resources */
41 * Misc help routines
43 static void rcmd_db_print();
44 static void rcm_handle_free(rcm_handle_t *);
45 static rcm_handle_t *rcm_handle_alloc(module_t *);
46 static void rsrc_clients_free(client_t *);
47 static struct rcm_mod_ops *modops_from_v1(void *);
48 static int call_getinfo(struct rcm_mod_ops *, rcm_handle_t *, char *, id_t,
49 uint_t, char **, char **, nvlist_t *, rcm_info_t **);
50 static int node_action(rsrc_node_t *, void *);
52 extern void start_polling_thread();
55 * translate /dev name to a /devices path
57 * N.B. This routine can be enhanced to understand network names
58 * and friendly names in the future.
60 char *
61 resolve_name(char *alias)
63 char *tmp;
64 const char *dev = "/dev/";
66 if (strlen(alias) == 0)
67 return (NULL);
69 if (strncmp(alias, dev, strlen(dev)) == 0) {
71 * Treat /dev/... as a symbolic link
73 tmp = s_malloc(PATH_MAX);
74 if (realpath(alias, tmp) != NULL) {
75 return (tmp);
76 } else {
77 free(tmp);
79 /* Fail to resolve /dev/ name, use the name as is */
82 return (s_strdup(alias));
86 * Figure out resource type based on "resolved" name
88 * N.B. This routine does not figure out file system mount points.
89 * This is determined at runtime when filesys module register
90 * with RCM_FILESYS flag.
92 int
93 rsrc_get_type(const char *resolved_name)
95 if (resolved_name[0] != '/')
96 return (RSRC_TYPE_ABSTRACT);
98 if (strncmp("/devices/", resolved_name, 9) == 0)
99 return (RSRC_TYPE_DEVICE);
101 return (RSRC_TYPE_NORMAL);
105 * Module operations:
106 * module_load, module_unload, module_info, module_attach, module_detach,
107 * cli_module_hold, cli_module_rele
110 #ifdef ENABLE_MODULE_DETACH
112 * call unregister() entry point to allow module to unregister for
113 * resources without getting confused.
115 static void
116 module_detach(module_t *module)
118 struct rcm_mod_ops *ops = module->modops;
120 rcm_log_message(RCM_TRACE2, "module_detach(name=%s)\n", module->name);
122 ops->rcmop_unregister(module->rcmhandle);
124 #endif /* ENABLE_MODULE_DETACH */
127 * call register() entry point to allow module to register for resources
129 static void
130 module_attach(module_t *module)
132 struct rcm_mod_ops *ops = module->modops;
134 rcm_log_message(RCM_TRACE2, "module_attach(name=%s)\n", module->name);
136 if (ops->rcmop_register(module->rcmhandle) != RCM_SUCCESS) {
137 rcm_log_message(RCM_WARNING,
138 gettext("module %s register() failed\n"), module->name);
142 struct rcm_mod_ops *
143 module_init(module_t *module)
145 if (module->dlhandle)
146 /* rcm module */
147 return (module->init());
148 else
149 /* rcm script */
150 return (script_init(module));
154 * call rmc_mod_info() entry of module
156 static const char *
157 module_info(module_t *module)
159 if (module->dlhandle)
160 /* rcm module */
161 return (module->info());
162 else
163 /* rcm script */
164 return (script_info(module));
168 module_fini(module_t *module)
170 if (module->dlhandle)
171 /* rcm module */
172 return (module->fini());
173 else
174 /* rcm script */
175 return (script_fini(module));
179 * call rmc_mod_fini() entry of module, dlclose module, and free memory
181 static void
182 module_unload(module_t *module)
184 int version = module->modops->version;
186 rcm_log_message(RCM_DEBUG, "module_unload(name=%s)\n", module->name);
188 (void) module_fini(module);
190 rcm_handle_free(module->rcmhandle);
191 free(module->name);
193 switch (version) {
194 case RCM_MOD_OPS_V1:
196 * Free memory associated with converted ops vector
198 free(module->modops);
199 break;
201 case RCM_MOD_OPS_VERSION:
202 default:
203 break;
206 if (module->dlhandle)
207 rcm_module_close(module->dlhandle);
209 free(module);
213 * Locate the module, execute rcm_mod_init() and check ops vector version
215 static module_t *
216 module_load(char *modname)
218 module_t *module;
220 rcm_log_message(RCM_DEBUG, "module_load(name=%s)\n", modname);
223 * dlopen the module
225 module = s_calloc(1, sizeof (*module));
226 module->name = s_strdup(modname);
227 module->modops = NULL;
228 rcm_init_queue(&module->client_q);
230 if (rcm_is_script(modname) == 0) {
231 /* rcm module */
232 module->dlhandle = rcm_module_open(modname);
234 if (module->dlhandle == NULL) {
235 rcm_log_message(RCM_NOTICE,
236 gettext("cannot open module %s\n"), modname);
237 goto fail;
241 * dlsym rcm_mod_init/fini/info() entry points
243 module->init = (struct rcm_mod_ops *(*)())dlsym(
244 module->dlhandle, "rcm_mod_init");
245 module->fini = (int (*)())dlsym(
246 module->dlhandle, "rcm_mod_fini");
247 module->info = (const char *(*)())dlsym(module->dlhandle,
248 "rcm_mod_info");
249 if (module->init == NULL || module->fini == NULL ||
250 module->info == NULL) {
251 rcm_log_message(RCM_ERROR,
252 gettext("missing entries in module %s\n"), modname);
253 goto fail;
256 } else {
257 /* rcm script */
258 module->dlhandle = NULL;
259 module->init = (struct rcm_mod_ops *(*)()) NULL;
260 module->fini = (int (*)()) NULL;
261 module->info = (const char *(*)()) NULL;
264 if ((module->modops = module_init(module)) == NULL) {
265 if (module->dlhandle)
266 rcm_log_message(RCM_ERROR,
267 gettext("cannot init module %s\n"), modname);
268 goto fail;
272 * Check ops vector version
274 switch (module->modops->version) {
275 case RCM_MOD_OPS_V1:
276 module->modops = modops_from_v1((void *)module->modops);
277 break;
279 case RCM_MOD_OPS_VERSION:
280 break;
282 default:
283 rcm_log_message(RCM_ERROR,
284 gettext("module %s rejected: version %d not supported\n"),
285 modname, module->modops->version);
286 (void) module_fini(module);
287 goto fail;
291 * Make sure all fields are set
293 if ((module->modops->rcmop_register == NULL) ||
294 (module->modops->rcmop_unregister == NULL) ||
295 (module->modops->rcmop_get_info == NULL) ||
296 (module->modops->rcmop_request_suspend == NULL) ||
297 (module->modops->rcmop_notify_resume == NULL) ||
298 (module->modops->rcmop_request_offline == NULL) ||
299 (module->modops->rcmop_notify_online == NULL) ||
300 (module->modops->rcmop_notify_remove == NULL)) {
301 rcm_log_message(RCM_ERROR,
302 gettext("module %s rejected: has NULL ops fields\n"),
303 modname);
304 (void) module_fini(module);
305 goto fail;
308 module->rcmhandle = rcm_handle_alloc(module);
309 return (module);
311 fail:
312 if (module->modops && module->modops->version == RCM_MOD_OPS_V1)
313 free(module->modops);
315 if (module->dlhandle)
316 rcm_module_close(module->dlhandle);
318 free(module->name);
319 free(module);
320 return (NULL);
324 * add one to module hold count. load the module if not loaded
326 static module_t *
327 cli_module_hold(char *modname)
329 module_t *module;
331 rcm_log_message(RCM_TRACE3, "cli_module_hold(%s)\n", modname);
333 (void) mutex_lock(&mod_lock);
334 module = module_head;
335 while (module) {
336 if (strcmp(module->name, modname) == 0) {
337 break;
339 module = module->next;
342 if (module) {
343 module->ref_count++;
344 (void) mutex_unlock(&mod_lock);
345 return (module);
349 * Module not found, attempt to load it
351 if ((module = module_load(modname)) == NULL) {
352 (void) mutex_unlock(&mod_lock);
353 return (NULL);
357 * Hold module and link module into module list
359 module->ref_count = MOD_REFCNT_INIT;
360 module->next = module_head;
361 module_head = module;
363 (void) mutex_unlock(&mod_lock);
365 return (module);
369 * decrement module hold count. Unload it if no reference
371 static void
372 cli_module_rele(module_t *module)
374 module_t *curr = module_head, *prev = NULL;
376 rcm_log_message(RCM_TRACE3, "cli_module_rele(name=%s)\n", module->name);
378 (void) mutex_lock(&mod_lock);
379 if (--(module->ref_count) != 0) {
380 (void) mutex_unlock(&mod_lock);
381 return;
384 rcm_log_message(RCM_TRACE2, "unloading module %s\n", module->name);
387 * Unlink the module from list
389 while (curr && (curr != module)) {
390 prev = curr;
391 curr = curr->next;
393 if (curr == NULL) {
394 rcm_log_message(RCM_ERROR,
395 gettext("Unexpected error: module %s not found.\n"),
396 module->name);
397 } else if (prev == NULL) {
398 module_head = curr->next;
399 } else {
400 prev->next = curr->next;
402 (void) mutex_unlock(&mod_lock);
404 module_unload(module);
408 * Gather usage info be passed back to requester. Discard info if user does
409 * not care (list == NULL).
411 void
412 add_busy_rsrc_to_list(char *alias, pid_t pid, int state, int seq_num,
413 char *modname, const char *infostr, const char *errstr,
414 nvlist_t *client_props, rcm_info_t **list)
416 rcm_info_t *info;
417 rcm_info_t *tmp;
418 char *buf = NULL;
419 size_t buflen = 0;
421 if (list == NULL) {
422 return;
425 info = s_calloc(1, sizeof (*info));
426 if (errno = nvlist_alloc(&(info->info), NV_UNIQUE_NAME, 0)) {
427 rcm_log_message(RCM_ERROR, "failed (nvlist_alloc=%s).\n",
428 strerror(errno));
429 rcmd_exit(errno);
432 /*LINTED*/
433 if ((errno = nvlist_add_string(info->info, RCM_RSRCNAME, alias)) ||
434 (errno = nvlist_add_int32(info->info, RCM_SEQ_NUM, seq_num)) ||
435 (errno = nvlist_add_int64(info->info, RCM_CLIENT_ID, pid)) ||
436 (errno = nvlist_add_int32(info->info, RCM_RSRCSTATE, state))) {
437 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
438 strerror(errno));
439 rcmd_exit(errno);
443 * Daemon calls to add_busy_rsrc_to_list may pass in
444 * error/info. Add these through librcm interfaces.
446 if (errstr) {
447 rcm_log_message(RCM_TRACE3, "adding error string: %s\n",
448 errstr);
449 if (errno = nvlist_add_string(info->info, RCM_CLIENT_ERROR,
450 (char *)errstr)) {
451 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
452 strerror(errno));
453 rcmd_exit(errno);
457 if (infostr) {
458 if (errno = nvlist_add_string(info->info, RCM_CLIENT_INFO,
459 (char *)infostr)) {
460 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
461 strerror(errno));
462 rcmd_exit(errno);
466 if (modname) {
467 if (errno = nvlist_add_string(info->info, RCM_CLIENT_MODNAME,
468 modname)) {
469 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
470 strerror(errno));
471 rcmd_exit(errno);
475 if (client_props) {
476 if (errno = nvlist_pack(client_props, &buf, &buflen,
477 NV_ENCODE_NATIVE, 0)) {
478 rcm_log_message(RCM_ERROR, "failed (nvlist_pack=%s).\n",
479 strerror(errno));
480 rcmd_exit(errno);
482 if (errno = nvlist_add_byte_array(info->info,
483 RCM_CLIENT_PROPERTIES, (uchar_t *)buf, buflen)) {
484 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
485 strerror(errno));
486 rcmd_exit(errno);
488 (void) free(buf);
492 /* link info at end of list */
493 if (*list) {
494 tmp = *list;
495 while (tmp->next)
496 tmp = tmp->next;
497 tmp->next = info;
498 } else {
499 *list = info;
504 * Resource client realted operations:
505 * rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
506 * rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
509 /* Allocate rsrc_client_t structure. Load module if necessary. */
510 /*ARGSUSED*/
511 static client_t *
512 rsrc_client_alloc(char *alias, char *modname, pid_t pid, uint_t flag)
514 client_t *client;
515 module_t *mod;
517 assert((alias != NULL) && (modname != NULL));
519 rcm_log_message(RCM_TRACE4, "rsrc_client_alloc(%s, %s, %ld)\n",
520 alias, modname, pid);
522 if ((mod = cli_module_hold(modname)) == NULL) {
523 return (NULL);
526 client = s_calloc(1, sizeof (client_t));
527 client->module = mod;
528 client->pid = pid;
529 client->alias = s_strdup(alias);
530 client->prv_flags = 0;
531 client->state = RCM_STATE_ONLINE;
532 client->flag = flag;
534 /* This queue is protected by rcm_req_lock */
535 rcm_enqueue_tail(&mod->client_q, &client->queue);
537 return (client);
540 /* Find client in list matching modname and pid */
541 client_t *
542 rsrc_client_find(char *modname, pid_t pid, client_t **list)
544 client_t *client = *list;
546 rcm_log_message(RCM_TRACE4, "rsrc_client_find(%s, %ld, %p)\n",
547 modname, pid, (void *)list);
549 while (client) {
550 if ((client->pid == pid) &&
551 strcmp(modname, client->module->name) == 0) {
552 break;
554 client = client->next;
556 return (client);
559 /* Add a client to client list */
560 static void
561 rsrc_client_add(client_t *client, client_t **list)
563 rcm_log_message(RCM_TRACE4, "rsrc_client_add: %s, %s, %ld\n",
564 client->alias, client->module->name, client->pid);
566 client->next = *list;
567 *list = client;
570 /* Remove client from list and destroy it */
571 static void
572 rsrc_client_remove(client_t *client, client_t **list)
574 client_t *tmp, *prev = NULL;
576 rcm_log_message(RCM_TRACE4, "rsrc_client_remove: %s, %s, %ld\n",
577 client->alias, client->module->name, client->pid);
579 tmp = *list;
580 while (tmp) {
581 if (client != tmp) {
582 prev = tmp;
583 tmp = tmp->next;
584 continue;
586 if (prev) {
587 prev->next = tmp->next;
588 } else {
589 *list = tmp->next;
591 tmp->next = NULL;
592 rsrc_clients_free(tmp);
593 return;
597 /* Free a list of clients. Called from cleanup thread only */
598 static void
599 rsrc_clients_free(client_t *list)
601 client_t *client = list;
603 while (client) {
606 * Note that the rcm daemon is single threaded while
607 * executing this routine. So there is no need to acquire
608 * rcm_req_lock here while dequeuing.
610 rcm_dequeue(&client->queue);
612 if (client->module) {
613 cli_module_rele(client->module);
615 list = client->next;
616 if (client->alias) {
617 free(client->alias);
619 free(client);
620 client = list;
625 * Invoke a callback into a single client
626 * This is the core of rcm_mod_ops interface
628 static int
629 rsrc_client_action(client_t *client, int cmd, void *arg)
631 int rval = RCM_SUCCESS;
632 char *dummy_error = NULL;
633 char *error = NULL;
634 char *info = NULL;
635 rcm_handle_t *hdl;
636 nvlist_t *client_props = NULL;
637 rcm_info_t *depend_info = NULL;
638 struct rcm_mod_ops *ops = client->module->modops;
639 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
641 rcm_log_message(RCM_TRACE4,
642 "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client->alias,
643 client->module->name, cmd, targ->flag);
646 * Create a per-operation handle, increment seq_num by 1 so we will
647 * know if a module uses this handle to callback into rcm_daemon.
649 hdl = rcm_handle_alloc(client->module);
650 hdl->seq_num = targ->seq_num + 1;
653 * Filter out operations for which the client didn't register.
655 switch (cmd) {
656 case CMD_SUSPEND:
657 case CMD_RESUME:
658 case CMD_OFFLINE:
659 case CMD_ONLINE:
660 case CMD_REMOVE:
661 if ((client->flag & RCM_REGISTER_DR) == 0) {
662 rcm_handle_free(hdl);
663 return (RCM_SUCCESS);
665 break;
666 case CMD_REQUEST_CHANGE:
667 case CMD_NOTIFY_CHANGE:
668 if ((client->flag & RCM_REGISTER_CAPACITY) == 0) {
669 rcm_handle_free(hdl);
670 return (RCM_SUCCESS);
672 break;
673 case CMD_EVENT:
674 if ((client->flag & RCM_REGISTER_EVENT) == 0) {
675 rcm_handle_free(hdl);
676 return (RCM_SUCCESS);
678 break;
682 * Create nvlist_t for any client-specific properties.
684 if (errno = nvlist_alloc(&client_props, NV_UNIQUE_NAME, 0)) {
685 rcm_log_message(RCM_ERROR,
686 "client action failed (nvlist_alloc=%s)\n",
687 strerror(errno));
688 rcmd_exit(errno);
692 * Process the operation via a callback to the client module.
694 switch (cmd) {
695 case CMD_GETINFO:
696 rval = call_getinfo(ops, hdl, client->alias, client->pid,
697 targ->flag, &info, &error, client_props, &depend_info);
698 break;
700 case CMD_SUSPEND:
701 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
702 (client->state == RCM_STATE_SUSPEND)) {
703 break;
706 if ((targ->flag & RCM_QUERY) == 0) {
707 rcm_log_message(RCM_DEBUG, "suspending %s\n",
708 client->alias);
709 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
710 rcm_log_message(RCM_DEBUG, "suspend query %s\n",
711 client->alias);
712 } else {
713 rcm_log_message(RCM_DEBUG,
714 "suspend query %s cancelled\n", client->alias);
718 * Update the client's state before the operation.
719 * If this is a cancelled query, then updating the state is
720 * the only thing that needs to be done, so break afterwards.
722 if ((targ->flag & RCM_QUERY) == 0) {
723 client->state = RCM_STATE_SUSPENDING;
724 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
725 client->state = RCM_STATE_SUSPEND_QUERYING;
726 } else {
727 client->state = RCM_STATE_ONLINE;
728 break;
731 rval = ops->rcmop_request_suspend(hdl, client->alias,
732 client->pid, targ->interval, targ->flag, &error,
733 &depend_info);
735 /* Update the client's state after the operation. */
736 if ((targ->flag & RCM_QUERY) == 0) {
737 if (rval == RCM_SUCCESS) {
738 client->state = RCM_STATE_SUSPEND;
739 } else {
740 client->state = RCM_STATE_SUSPEND_FAIL;
742 } else {
743 if (rval == RCM_SUCCESS) {
744 client->state = RCM_STATE_SUSPEND_QUERY;
745 } else {
746 client->state = RCM_STATE_SUSPEND_QUERY_FAIL;
749 break;
751 case CMD_RESUME:
752 if (client->state == RCM_STATE_ONLINE) {
753 break;
755 client->state = RCM_STATE_RESUMING;
756 rval = ops->rcmop_notify_resume(hdl, client->alias, client->pid,
757 targ->flag, &error, &depend_info);
759 /* online state is unconditional */
760 client->state = RCM_STATE_ONLINE;
761 break;
763 case CMD_OFFLINE:
764 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
765 (client->state == RCM_STATE_OFFLINE)) {
766 break;
769 if ((targ->flag & RCM_QUERY) == 0) {
770 rcm_log_message(RCM_DEBUG, "offlining %s\n",
771 client->alias);
772 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
773 rcm_log_message(RCM_DEBUG, "offline query %s\n",
774 client->alias);
775 } else {
776 rcm_log_message(RCM_DEBUG,
777 "offline query %s cancelled\n", client->alias);
781 * Update the client's state before the operation.
782 * If this is a cancelled query, then updating the state is
783 * the only thing that needs to be done, so break afterwards.
785 if ((targ->flag & RCM_QUERY) == 0) {
786 client->state = RCM_STATE_OFFLINING;
787 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
788 client->state = RCM_STATE_OFFLINE_QUERYING;
789 } else {
790 client->state = RCM_STATE_ONLINE;
791 break;
794 rval = ops->rcmop_request_offline(hdl, client->alias,
795 client->pid, targ->flag, &error, &depend_info);
798 * If this is a retire operation and we managed to call
799 * into at least one client, set retcode to RCM_SUCCESS to
800 * indicate that retire has been subject to constraints
801 * This retcode will be further modified by actual return
802 * code.
804 if ((targ->flag & RCM_RETIRE_REQUEST) &&
805 (targ->retcode == RCM_NO_CONSTRAINT)) {
806 rcm_log_message(RCM_DEBUG,
807 "at least 1 client, constraint applied: %s\n",
808 client->alias);
809 targ->retcode = RCM_SUCCESS;
812 /* Update the client's state after the operation. */
813 if ((targ->flag & RCM_QUERY) == 0) {
814 if (rval == RCM_SUCCESS) {
815 client->state = RCM_STATE_OFFLINE;
816 } else {
817 client->state = RCM_STATE_OFFLINE_FAIL;
819 } else {
820 if (rval == RCM_SUCCESS) {
821 client->state = RCM_STATE_OFFLINE_QUERY;
822 } else {
823 client->state = RCM_STATE_OFFLINE_QUERY_FAIL;
826 break;
828 case CMD_ONLINE:
829 if (client->state == RCM_STATE_ONLINE) {
830 break;
833 rcm_log_message(RCM_DEBUG, "onlining %s\n", client->alias);
835 client->state = RCM_STATE_ONLINING;
836 rval = ops->rcmop_notify_online(hdl, client->alias, client->pid,
837 targ->flag, &error, &depend_info);
838 client->state = RCM_STATE_ONLINE;
839 break;
841 case CMD_REMOVE:
842 rcm_log_message(RCM_DEBUG, "removing %s\n", client->alias);
843 client->state = RCM_STATE_REMOVING;
844 rval = ops->rcmop_notify_remove(hdl, client->alias, client->pid,
845 targ->flag, &error, &depend_info);
846 client->state = RCM_STATE_REMOVE;
847 break;
849 case CMD_REQUEST_CHANGE:
850 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
851 client->alias);
852 if (ops->rcmop_request_capacity_change)
853 rval = ops->rcmop_request_capacity_change(hdl,
854 client->alias, client->pid, targ->flag, targ->nvl,
855 &error, &depend_info);
856 break;
858 case CMD_NOTIFY_CHANGE:
859 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
860 client->alias);
861 if (ops->rcmop_notify_capacity_change)
862 rval = ops->rcmop_notify_capacity_change(hdl,
863 client->alias, client->pid, targ->flag, targ->nvl,
864 &error, &depend_info);
865 break;
867 case CMD_EVENT:
868 rcm_log_message(RCM_DEBUG, "delivering event to %s\n",
869 client->alias);
870 if (ops->rcmop_notify_event)
871 rval = ops->rcmop_notify_event(hdl, client->alias,
872 client->pid, targ->flag, &error, targ->nvl,
873 &depend_info);
874 break;
876 default:
877 rcm_log_message(RCM_ERROR, gettext("unknown command %d\n"),
878 cmd);
879 rval = RCM_FAILURE;
880 break;
883 /* reset error code to the most significant error */
884 if (rval != RCM_SUCCESS)
885 targ->retcode = rval;
888 * XXX - The code below may produce duplicate rcm_info_t's on error?
890 if ((cmd != CMD_GETINFO) &&
891 ((rval != RCM_SUCCESS) ||
892 (error != NULL) ||
893 (targ->flag & RCM_SCOPE))) {
894 (void) call_getinfo(ops, hdl, client->alias, client->pid,
895 targ->flag & (~(RCM_INCLUDE_DEPENDENT|RCM_INCLUDE_SUBTREE)),
896 &info, &dummy_error, client_props, &depend_info);
897 if (dummy_error)
898 (void) free(dummy_error);
899 } else if (cmd != CMD_GETINFO) {
900 nvlist_free(client_props);
901 client_props = NULL;
904 if (client_props) {
905 add_busy_rsrc_to_list(client->alias, client->pid, client->state,
906 targ->seq_num, client->module->name, info, error,
907 client_props, targ->info);
908 nvlist_free(client_props);
911 if (info)
912 (void) free(info);
913 if (error)
914 (void) free(error);
916 if (depend_info) {
917 if (targ->info) {
918 (void) rcm_append_info(targ->info, depend_info);
919 } else {
920 rcm_free_info(depend_info);
924 rcm_handle_free(hdl);
925 return (rval);
929 * invoke a callback into a list of clients, return 0 if all success
932 rsrc_client_action_list(client_t *list, int cmd, void *arg)
934 int error, rval = RCM_SUCCESS;
935 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
937 while (list) {
938 client_t *client = list;
939 list = client->next;
942 * Make offline idempotent in the retire
943 * case
945 if ((targ->flag & RCM_RETIRE_REQUEST) &&
946 client->state == RCM_STATE_REMOVE) {
947 client->state = RCM_STATE_ONLINE;
948 rcm_log_message(RCM_DEBUG, "RETIRE: idempotent client "
949 "state: REMOVE -> ONLINE: %s\n", client->alias);
952 if (client->state == RCM_STATE_REMOVE)
953 continue;
955 error = rsrc_client_action(client, cmd, arg);
956 if (error != RCM_SUCCESS) {
957 rval = error;
961 return (rval);
965 * Node realted operations:
967 * rn_alloc, rn_free, rn_find_child,
968 * rn_get_child, rn_get_sibling,
969 * rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
972 /* Allocate node based on a logical or physical name */
973 static rsrc_node_t *
974 rn_alloc(char *name, int type)
976 rsrc_node_t *node;
978 rcm_log_message(RCM_TRACE4, "rn_alloc(%s, %d)\n", name, type);
980 node = s_calloc(1, sizeof (*node));
981 node->name = s_strdup(name);
982 node->type = type;
984 return (node);
988 * Free node along with its siblings and children
990 static void
991 rn_free(rsrc_node_t *node)
993 if (node == NULL) {
994 return;
997 if (node->child) {
998 rn_free(node->child);
1001 if (node->sibling) {
1002 rn_free(node->sibling);
1005 rsrc_clients_free(node->users);
1006 free(node->name);
1007 free(node);
1011 * Find next sibling
1013 static rsrc_node_t *
1014 rn_get_sibling(rsrc_node_t *node)
1016 return (node->sibling);
1020 * Find first child
1022 static rsrc_node_t *
1023 rn_get_child(rsrc_node_t *node)
1025 return (node->child);
1029 * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1031 static rsrc_node_t *
1032 rn_find_child(rsrc_node_t *parent, char *childname, int flag, int type)
1034 rsrc_node_t *child = parent->child;
1035 rsrc_node_t *new, *prev = NULL;
1037 rcm_log_message(RCM_TRACE4,
1038 "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1039 parent->name, childname, flag, type);
1042 * Children are ordered based on strcmp.
1044 while (child && (strcmp(child->name, childname) < 0)) {
1045 prev = child;
1046 child = child->sibling;
1049 if (child && (strcmp(child->name, childname) == 0)) {
1050 return (child);
1053 if (flag != RSRC_NODE_CREATE)
1054 return (NULL);
1056 new = rn_alloc(childname, type);
1057 new->parent = parent;
1058 new->sibling = child;
1061 * Set this linkage last so we don't break ongoing operations.
1063 * N.B. Assume setting a pointer is an atomic operation.
1065 if (prev == NULL) {
1066 parent->child = new;
1067 } else {
1068 prev->sibling = new;
1071 return (new);
1075 * Pathname related help functions
1077 static void
1078 pn_preprocess(char *pathname, int type)
1080 char *tmp;
1082 if (type != RSRC_TYPE_DEVICE)
1083 return;
1086 * For devices, convert ':' to '/' (treat minor nodes and children)
1088 tmp = strchr(pathname, ':');
1089 if (tmp == NULL)
1090 return;
1092 *tmp = '/';
1095 static char *
1096 pn_getnextcomp(char *pathname, char **lasts)
1098 char *slash;
1100 if (pathname == NULL)
1101 return (NULL);
1103 /* skip slashes' */
1104 while (*pathname == '/')
1105 ++pathname;
1107 if (*pathname == '\0')
1108 return (NULL);
1110 slash = strchr(pathname, '/');
1111 if (slash != NULL) {
1112 *slash = '\0';
1113 *lasts = slash + 1;
1114 } else {
1115 *lasts = NULL;
1118 return (pathname);
1122 * Find a node in tree based on device, which is the physical pathname
1123 * of the form /sbus@.../esp@.../sd@...
1126 rsrc_node_find(char *rsrcname, int flag, rsrc_node_t **nodep)
1128 char *pathname, *nodename, *lasts;
1129 rsrc_node_t *node;
1130 int type;
1132 rcm_log_message(RCM_TRACE4, "rn_node_find(%s, 0x%x)\n", rsrcname, flag);
1135 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1136 * look under /SYSTEM.
1138 pathname = resolve_name(rsrcname);
1139 if (pathname == NULL)
1140 return (EINVAL);
1142 type = rsrc_get_type(pathname);
1143 switch (type) {
1144 case RSRC_TYPE_DEVICE:
1145 case RSRC_TYPE_NORMAL:
1146 node = rn_find_child(rsrc_root, "SYSTEM", RSRC_NODE_CREATE,
1147 RSRC_TYPE_NORMAL);
1148 break;
1150 case RSRC_TYPE_ABSTRACT:
1151 node = rn_find_child(rsrc_root, "ABSTRACT", RSRC_NODE_CREATE,
1152 RSRC_TYPE_NORMAL);
1153 break;
1155 default:
1156 /* just to make sure */
1157 free(pathname);
1158 return (EINVAL);
1162 * Find position of device within tree. Upon exiting the loop, device
1163 * should be placed between prev and curr.
1165 pn_preprocess(pathname, type);
1166 lasts = pathname;
1167 while ((nodename = pn_getnextcomp(lasts, &lasts)) != NULL) {
1168 rsrc_node_t *parent = node;
1169 node = rn_find_child(parent, nodename, flag, type);
1170 if (node == NULL) {
1171 assert((flag & RSRC_NODE_CREATE) == 0);
1172 free(pathname);
1173 *nodep = NULL;
1174 return (RCM_SUCCESS);
1177 free(pathname);
1178 *nodep = node;
1179 return (RCM_SUCCESS);
1183 * add a usage client to a node
1185 /*ARGSUSED*/
1187 rsrc_node_add_user(rsrc_node_t *node, char *alias, char *modname, pid_t pid,
1188 uint_t flag)
1190 client_t *user;
1192 rcm_log_message(RCM_TRACE3,
1193 "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1194 node->name, alias, modname, pid, flag);
1196 user = rsrc_client_find(modname, pid, &node->users);
1199 * If a client_t already exists, add the registration and return
1200 * success if it's a valid registration request.
1202 * Return EALREADY if the resource is already registered.
1203 * This means either the client_t already has the requested
1204 * registration flagged, or that a DR registration was attempted
1205 * on a resource already in use in the DR operations state model.
1207 if (user != NULL) {
1209 if (user->flag & (flag & RCM_REGISTER_MASK)) {
1210 return (EALREADY);
1213 if ((flag & RCM_REGISTER_DR) &&
1214 (user->state != RCM_STATE_REMOVE)) {
1215 return (EALREADY);
1218 user->flag |= (flag & RCM_REGISTER_MASK);
1219 if ((flag & RCM_REGISTER_DR) ||
1220 (user->state == RCM_STATE_REMOVE)) {
1221 user->state = RCM_STATE_ONLINE;
1224 return (RCM_SUCCESS);
1228 * Otherwise create a new client_t and create a new registration.
1230 if ((user = rsrc_client_alloc(alias, modname, pid, flag)) != NULL) {
1231 rsrc_client_add(user, &node->users);
1233 if (flag & RCM_FILESYS)
1234 node->type = RSRC_TYPE_FILESYS;
1236 return (RCM_SUCCESS);
1240 * remove a usage client of a node
1243 rsrc_node_remove_user(rsrc_node_t *node, char *modname, pid_t pid, uint_t flag)
1245 client_t *user;
1247 rcm_log_message(RCM_TRACE3,
1248 "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node->name, modname,
1249 pid, flag);
1251 user = rsrc_client_find(modname, pid, &node->users);
1252 if ((user == NULL) || (user->state == RCM_STATE_REMOVE)) {
1253 rcm_log_message(RCM_NOTICE, gettext(
1254 "client not registered: module=%s, pid=%d, dev=%s\n"),
1255 modname, pid, node->name);
1256 return (ENOENT);
1259 /* Strip off the registration being removed (DR, event, capacity) */
1260 user->flag = user->flag & (~(flag & RCM_REGISTER_MASK));
1263 * Mark the client as removed if all registrations have been removed
1265 if ((user->flag & RCM_REGISTER_MASK) == 0)
1266 user->state = RCM_STATE_REMOVE;
1268 return (RCM_SUCCESS);
1272 * Tree walking function - rsrc_walk
1275 #define MAX_TREE_DEPTH 32
1277 #define RN_WALK_CONTINUE 0
1278 #define RN_WALK_PRUNESIB 1
1279 #define RN_WALK_PRUNECHILD 2
1280 #define RN_WALK_TERMINATE 3
1282 #define EMPTY_STACK(sp) ((sp)->depth == 0)
1283 #define TOP_NODE(sp) ((sp)->node[(sp)->depth - 1])
1284 #define PRUNE_SIB(sp) ((sp)->prunesib[(sp)->depth - 1])
1285 #define PRUNE_CHILD(sp) ((sp)->prunechild[(sp)->depth - 1])
1286 #define POP_STACK(sp) ((sp)->depth)--
1287 #define PUSH_STACK(sp, rn) \
1288 (sp)->node[(sp)->depth] = (rn); \
1289 (sp)->prunesib[(sp)->depth] = 0; \
1290 (sp)->prunechild[(sp)->depth] = 0; \
1291 ((sp)->depth)++
1293 struct rn_stack {
1294 rsrc_node_t *node[MAX_TREE_DEPTH];
1295 char prunesib[MAX_TREE_DEPTH];
1296 char prunechild[MAX_TREE_DEPTH];
1297 int depth;
1300 /* walking one node and update node stack */
1301 /*ARGSUSED*/
1302 static void
1303 walk_one_node(struct rn_stack *sp, void *arg,
1304 int (*node_callback)(rsrc_node_t *, void *))
1306 int prunesib;
1307 rsrc_node_t *child, *sibling;
1308 rsrc_node_t *node = TOP_NODE(sp);
1310 rcm_log_message(RCM_TRACE4, "walk_one_node(%s)\n", node->name);
1312 switch (node_callback(node, arg)) {
1313 case RN_WALK_TERMINATE:
1314 POP_STACK(sp);
1315 while (!EMPTY_STACK(sp)) {
1316 node = TOP_NODE(sp);
1317 POP_STACK(sp);
1319 return;
1321 case RN_WALK_PRUNESIB:
1322 PRUNE_SIB(sp) = 1;
1323 break;
1325 case RN_WALK_PRUNECHILD:
1326 PRUNE_CHILD(sp) = 1;
1327 break;
1329 case RN_WALK_CONTINUE:
1330 default:
1331 break;
1335 * Push child on the stack
1337 if (!PRUNE_CHILD(sp) && (child = rn_get_child(node)) != NULL) {
1338 PUSH_STACK(sp, child);
1339 return;
1343 * Pop the stack till a node's sibling can be pushed
1345 prunesib = PRUNE_SIB(sp);
1346 POP_STACK(sp);
1347 while (!EMPTY_STACK(sp) &&
1348 (prunesib || (sibling = rn_get_sibling(node)) == NULL)) {
1349 node = TOP_NODE(sp);
1350 prunesib = PRUNE_SIB(sp);
1351 POP_STACK(sp);
1354 if (EMPTY_STACK(sp)) {
1355 return;
1359 * push sibling onto the stack
1361 PUSH_STACK(sp, sibling);
1365 * walk tree rooted at root in child-first order
1367 static void
1368 rsrc_walk(rsrc_node_t *root, void *arg,
1369 int (*node_callback)(rsrc_node_t *, void *))
1371 struct rn_stack stack;
1373 rcm_log_message(RCM_TRACE3, "rsrc_walk(%s)\n", root->name);
1376 * Push root on stack and walk in child-first order
1378 stack.depth = 0;
1379 PUSH_STACK(&stack, root);
1380 PRUNE_SIB(&stack) = 1;
1382 while (!EMPTY_STACK(&stack)) {
1383 walk_one_node(&stack, arg, node_callback);
1388 * Callback for a command action on a node
1390 static int
1391 node_action(rsrc_node_t *node, void *arg)
1393 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
1394 uint_t flag = targ->flag;
1396 rcm_log_message(RCM_TRACE4, "node_action(%s)\n", node->name);
1399 * If flag indicates operation on a filesystem, we don't callback on
1400 * the filesystem root to avoid infinite recursion on filesystem module.
1402 * N.B. Such request should only come from filesystem RCM module.
1404 if (flag & RCM_FILESYS) {
1405 assert(node->type == RSRC_TYPE_FILESYS);
1406 targ->flag &= ~RCM_FILESYS;
1407 return (RN_WALK_CONTINUE);
1411 * Execute state change callback
1413 (void) rsrc_client_action_list(node->users, targ->cmd, arg);
1416 * Upon hitting a filesys root, prune children.
1417 * The filesys module should have taken care of
1418 * children by now.
1420 if (node->type == RSRC_TYPE_FILESYS)
1421 return (RN_WALK_PRUNECHILD);
1423 return (RN_WALK_CONTINUE);
1427 * Execute a command on a subtree under root.
1430 rsrc_tree_action(rsrc_node_t *root, int cmd, tree_walk_arg_t *arg)
1432 rcm_log_message(RCM_TRACE2, "tree_action(%s, %d)\n", root->name, cmd);
1434 arg->cmd = cmd;
1437 * If RCM_RETIRE_REQUEST is set, just walk one node and preset
1438 * retcode to NO_CONSTRAINT
1440 if (arg->flag & RCM_RETIRE_REQUEST) {
1441 rcm_log_message(RCM_TRACE1, "tree_action: RETIRE_REQ: walking "
1442 "only root node: %s\n", root->name);
1443 arg->retcode = RCM_NO_CONSTRAINT;
1444 (void) node_action(root, arg);
1445 } else {
1446 arg->retcode = RCM_SUCCESS;
1447 rsrc_walk(root, (void *)arg, node_action);
1450 return (arg->retcode);
1454 * Get info on current regsitrations
1457 rsrc_usage_info(char **rsrcnames, uint_t flag, int seq_num, rcm_info_t **info)
1459 rsrc_node_t *node;
1460 rcm_info_t *result = NULL;
1461 tree_walk_arg_t arg;
1462 int initial_req;
1463 int rv;
1464 int i;
1466 arg.flag = flag;
1467 arg.info = &result;
1468 arg.seq_num = seq_num;
1470 for (i = 0; rsrcnames[i] != NULL; i++) {
1472 rcm_log_message(RCM_TRACE2, "rsrc_usage_info(%s, 0x%x, %d)\n",
1473 rsrcnames[i], flag, seq_num);
1475 if (flag & RCM_INCLUDE_DEPENDENT) {
1476 initial_req = ((seq_num & SEQ_NUM_MASK) == 0);
1479 * if redundant request, skip the operation
1481 if (info_req_add(rsrcnames[i], flag, seq_num) != 0) {
1482 continue;
1486 rv = rsrc_node_find(rsrcnames[i], 0, &node);
1487 if ((rv != RCM_SUCCESS) || (node == NULL)) {
1488 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1489 info_req_remove(seq_num);
1490 continue;
1494 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1495 * or just the node.
1497 if (flag & RCM_INCLUDE_SUBTREE) {
1498 (void) rsrc_tree_action(node, CMD_GETINFO, &arg);
1499 } else {
1500 arg.cmd = CMD_GETINFO;
1501 (void) node_action(node, (void *)&arg);
1504 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1505 info_req_remove(seq_num);
1508 out:
1509 (void) rcm_append_info(info, result);
1510 return (rv);
1514 * Get the list of currently loaded module
1516 rcm_info_t *
1517 rsrc_mod_info()
1519 module_t *mod;
1520 rcm_info_t *info = NULL;
1522 (void) mutex_lock(&mod_lock);
1523 mod = module_head;
1524 while (mod) {
1525 char *modinfo = s_strdup(module_info(mod));
1526 add_busy_rsrc_to_list("dummy", 0, 0, 0, mod->name,
1527 modinfo, NULL, NULL, &info);
1528 mod = mod->next;
1530 (void) mutex_unlock(&mod_lock);
1532 return (info);
1536 * Initialize resource map - load all modules
1538 void
1539 rcmd_db_init()
1541 char *tmp;
1542 DIR *mod_dir;
1543 struct dirent *entp;
1544 int i;
1545 char *dir_name;
1546 int rcm_script;
1548 rcm_log_message(RCM_DEBUG, "rcmd_db_init(): initialize database\n");
1550 if (script_main_init() == -1)
1551 rcmd_exit(errno);
1553 rsrc_root = rn_alloc("/", RSRC_TYPE_NORMAL);
1555 for (i = 0; (dir_name = rcm_dir(i, &rcm_script)) != NULL; i++) {
1557 if ((mod_dir = opendir(dir_name)) == NULL) {
1558 continue; /* try next directory */
1561 rcm_log_message(RCM_TRACE2, "search directory %s\n", dir_name);
1563 while ((entp = readdir(mod_dir)) != NULL) {
1564 module_t *module;
1566 if (strcmp(entp->d_name, ".") == 0 ||
1567 strcmp(entp->d_name, "..") == 0)
1568 continue;
1570 if (rcm_script == 0) {
1571 /* rcm module */
1572 if (((tmp = strstr(entp->d_name,
1573 RCM_MODULE_SUFFIX)) == NULL) ||
1574 (tmp[strlen(RCM_MODULE_SUFFIX)] != '\0')) {
1575 continue;
1579 module = cli_module_hold(entp->d_name);
1580 if (module == NULL) {
1581 if (rcm_script == 0)
1582 rcm_log_message(RCM_ERROR,
1583 gettext("%s: failed to load\n"),
1584 entp->d_name);
1585 continue;
1588 if (module->ref_count == MOD_REFCNT_INIT) {
1590 * ask module to register for resource 1st time
1592 module_attach(module);
1594 cli_module_rele(module);
1596 (void) closedir(mod_dir);
1599 rcmd_db_print();
1603 * sync resource map - ask all modules to register again
1605 void
1606 rcmd_db_sync()
1608 static time_t sync_time = (time_t)-1;
1609 const time_t interval = 5; /* resync at most every 5 sec */
1611 module_t *mod;
1612 time_t curr = time(NULL);
1614 if ((sync_time != (time_t)-1) && (curr - sync_time < interval))
1615 return;
1617 sync_time = curr;
1618 (void) mutex_lock(&mod_lock);
1619 mod = module_head;
1620 while (mod) {
1622 * Hold module by incrementing ref count and release
1623 * mod_lock to avoid deadlock, since rcmop_register()
1624 * may callback into the daemon and request mod_lock.
1626 mod->ref_count++;
1627 (void) mutex_unlock(&mod_lock);
1629 mod->modops->rcmop_register(mod->rcmhandle);
1631 (void) mutex_lock(&mod_lock);
1632 mod->ref_count--;
1633 mod = mod->next;
1635 (void) mutex_unlock(&mod_lock);
1639 * Determine if a process is alive
1642 proc_exist(pid_t pid)
1644 char path[64];
1645 const char *procfs = "/proc";
1646 struct stat sb;
1648 if (pid == (pid_t)0) {
1649 return (1);
1652 (void) snprintf(path, sizeof (path), "%s/%ld", procfs, pid);
1653 return (stat(path, &sb) == 0);
1657 * Cleaup client list
1659 * N.B. This routine runs in a single-threaded environment only. It is only
1660 * called by the cleanup thread, which never runs in parallel with other
1661 * threads.
1663 static void
1664 clean_client_list(client_t **listp)
1666 client_t *client = *listp;
1669 * Cleanup notification clients for which pid no longer exists
1671 while (client) {
1672 if ((client->state != RCM_STATE_REMOVE) &&
1673 proc_exist(client->pid)) {
1674 listp = &client->next;
1675 client = *listp;
1676 continue;
1680 * Destroy this client_t. rsrc_client_remove updates
1681 * listp to point to the next client.
1683 rsrc_client_remove(client, listp);
1684 client = *listp;
1688 /*ARGSUSED*/
1689 static int
1690 clean_node(rsrc_node_t *node, void *arg)
1692 rcm_log_message(RCM_TRACE4, "clean_node(%s)\n", node->name);
1694 clean_client_list(&node->users);
1696 return (RN_WALK_CONTINUE);
1699 static void
1700 clean_rsrc_tree()
1702 rcm_log_message(RCM_TRACE4,
1703 "clean_rsrc_tree(): delete stale dr clients\n");
1705 rsrc_walk(rsrc_root, NULL, clean_node);
1708 static void
1709 db_clean()
1711 extern barrier_t barrier;
1712 extern void clean_dr_list();
1714 for (;;) {
1715 (void) mutex_lock(&rcm_req_lock);
1716 start_polling_thread();
1717 (void) mutex_unlock(&rcm_req_lock);
1719 (void) mutex_lock(&barrier.lock);
1720 while (need_cleanup == 0)
1721 (void) cond_wait(&barrier.cv, &barrier.lock);
1722 (void) mutex_unlock(&barrier.lock);
1725 * Make sure all other threads are either blocked or exited.
1727 rcmd_set_state(RCMD_CLEANUP);
1729 need_cleanup = 0;
1732 * clean dr_req_list
1734 clean_dr_list();
1737 * clean resource tree
1739 clean_rsrc_tree();
1741 rcmd_set_state(RCMD_NORMAL);
1745 void
1746 rcmd_db_clean()
1748 rcm_log_message(RCM_DEBUG,
1749 "rcm_db_clean(): launch thread to clean database\n");
1751 if (thr_create(NULL, 0, (void *(*)(void *))db_clean,
1752 NULL, THR_DETACHED, NULL) != 0) {
1753 rcm_log_message(RCM_WARNING,
1754 gettext("failed to create cleanup thread %s\n"),
1755 strerror(errno));
1759 /*ARGSUSED*/
1760 static int
1761 print_node(rsrc_node_t *node, void *arg)
1763 client_t *user;
1765 rcm_log_message(RCM_DEBUG, "rscname: %s, state = 0x%x\n", node->name);
1766 rcm_log_message(RCM_DEBUG, " users:\n");
1768 if ((user = node->users) == NULL) {
1769 rcm_log_message(RCM_DEBUG, " none\n");
1770 return (RN_WALK_CONTINUE);
1773 while (user) {
1774 rcm_log_message(RCM_DEBUG, " %s, %d, %s\n",
1775 user->module->name, user->pid, user->alias);
1776 user = user->next;
1778 return (RN_WALK_CONTINUE);
1781 static void
1782 rcmd_db_print()
1784 module_t *mod;
1786 rcm_log_message(RCM_DEBUG, "modules:\n");
1787 (void) mutex_lock(&mod_lock);
1788 mod = module_head;
1789 while (mod) {
1790 rcm_log_message(RCM_DEBUG, " %s\n", mod->name);
1791 mod = mod->next;
1793 (void) mutex_unlock(&mod_lock);
1795 rcm_log_message(RCM_DEBUG, "\nresource tree:\n");
1797 rsrc_walk(rsrc_root, NULL, print_node);
1799 rcm_log_message(RCM_DEBUG, "\n");
1803 * Allocate handle from calling into each RCM module
1805 static rcm_handle_t *
1806 rcm_handle_alloc(module_t *module)
1808 rcm_handle_t *hdl;
1810 hdl = s_malloc(sizeof (rcm_handle_t));
1812 hdl->modname = module->name;
1813 hdl->pid = 0;
1814 hdl->lrcm_ops = &rcm_ops; /* for callback into daemon directly */
1815 hdl->module = module;
1817 return (hdl);
1821 * Free rcm_handle_t
1823 static void
1824 rcm_handle_free(rcm_handle_t *handle)
1826 free(handle);
1830 * help function that exit on memory outage
1832 void *
1833 s_malloc(size_t size)
1835 void *buf = malloc(size);
1837 if (buf == NULL) {
1838 rcmd_exit(ENOMEM);
1840 return (buf);
1843 void *
1844 s_calloc(int n, size_t size)
1846 void *buf = calloc(n, size);
1848 if (buf == NULL) {
1849 rcmd_exit(ENOMEM);
1851 return (buf);
1854 void *
1855 s_realloc(void *ptr, size_t size)
1857 void *new = realloc(ptr, size);
1859 if (new == NULL) {
1860 rcmd_exit(ENOMEM);
1862 return (new);
1865 char *
1866 s_strdup(const char *str)
1868 char *buf = strdup(str);
1870 if (buf == NULL) {
1871 rcmd_exit(ENOMEM);
1873 return (buf);
1877 * Convert a version 1 ops vector to current ops vector
1878 * Fields missing in version 1 are set to NULL.
1880 static struct rcm_mod_ops *
1881 modops_from_v1(void *ops_v1)
1883 struct rcm_mod_ops *ops;
1885 ops = s_calloc(1, sizeof (struct rcm_mod_ops));
1886 bcopy(ops_v1, ops, sizeof (struct rcm_mod_ops_v1));
1887 return (ops);
1890 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1891 static int
1892 call_getinfo(struct rcm_mod_ops *ops, rcm_handle_t *hdl, char *alias, id_t pid,
1893 uint_t flag, char **info, char **error, nvlist_t *client_props,
1894 rcm_info_t **infop)
1896 int rval;
1897 struct rcm_mod_ops_v1 *v1_ops;
1899 if (ops->version == RCM_MOD_OPS_V1) {
1900 v1_ops = (struct rcm_mod_ops_v1 *)ops;
1901 rval = v1_ops->rcmop_get_info(hdl, alias, pid, flag, info,
1902 infop);
1903 if (rval != RCM_SUCCESS && *info != NULL)
1904 *error = strdup(*info);
1905 return (rval);
1906 } else {
1907 return (ops->rcmop_get_info(hdl, alias, pid, flag, info, error,
1908 client_props, infop));
1912 void
1913 rcm_init_queue(rcm_queue_t *head)
1915 head->next = head->prev = head;
1918 void
1919 rcm_enqueue_head(rcm_queue_t *head, rcm_queue_t *element)
1921 rcm_enqueue(head, element);
1924 void
1925 rcm_enqueue_tail(rcm_queue_t *head, rcm_queue_t *element)
1927 rcm_enqueue(head->prev, element);
1930 void
1931 rcm_enqueue(rcm_queue_t *list_element, rcm_queue_t *element)
1933 element->next = list_element->next;
1934 element->prev = list_element;
1935 element->next->prev = element;
1936 list_element->next = element;
1939 rcm_queue_t *
1940 rcm_dequeue_head(rcm_queue_t *head)
1942 rcm_queue_t *element = head->next;
1943 rcm_dequeue(element);
1944 return (element);
1947 rcm_queue_t *
1948 rcm_dequeue_tail(rcm_queue_t *head)
1950 rcm_queue_t *element = head->prev;
1951 rcm_dequeue(element);
1952 return (element);
1955 void
1956 rcm_dequeue(rcm_queue_t *element)
1958 element->prev->next = element->next;
1959 element->next->prev = element->prev;
1960 element->next = element->prev = NULL;