8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / cmd / rcm_daemon / common / ibpart_rcm.c
blob6d47379ad570da6fbfef4932f6723f20addfff85
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * This RCM module adds support to the RCM framework for IBPART links
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <sys/types.h>
34 #include <synch.h>
35 #include <assert.h>
36 #include <strings.h>
37 #include "rcm_module.h"
38 #include <libintl.h>
39 #include <libdllink.h>
40 #include <libdlib.h>
41 #include <libdlpi.h>
44 * Definitions
46 #ifndef lint
47 #define _(x) gettext(x)
48 #else
49 #define _(x) x
50 #endif
52 /* Some generic well-knowns and defaults used in this module */
53 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
54 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
56 /* IBPART link flags */
57 typedef enum {
58 IBPART_OFFLINED = 0x1,
59 IBPART_CONSUMER_OFFLINED = 0x2,
60 IBPART_STALE = 0x4
61 } ibpart_flag_t;
63 /* link representation */
64 typedef struct dl_ibpart {
65 struct dl_ibpart *dlib_next; /* next IBPART on this link */
66 struct dl_ibpart *dlib_prev; /* prev IBPART on this link */
67 datalink_id_t dlib_ibpart_id;
68 ibpart_flag_t dlib_flags; /* IBPART link flags */
69 } dl_ibpart_t;
71 /* IBPART Cache state flags */
72 typedef enum {
73 CACHE_NODE_STALE = 0x1, /* stale cached data */
74 CACHE_NODE_NEW = 0x2, /* new cached nodes */
75 CACHE_NODE_OFFLINED = 0x4 /* nodes offlined */
76 } cache_node_state_t;
78 /* Network Cache lookup options */
79 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
80 #define CACHE_REFRESH 0x2 /* refresh cache */
82 /* Cache element */
83 typedef struct link_cache {
84 struct link_cache *pc_next; /* next cached resource */
85 struct link_cache *pc_prev; /* prev cached resource */
86 char *pc_resource; /* resource name */
87 datalink_id_t pc_linkid; /* linkid */
88 dl_ibpart_t *pc_ibpart; /* IBPART list on this link */
89 cache_node_state_t pc_state; /* cache state flags */
90 } link_cache_t;
93 * Global cache for network IBPARTs
95 static link_cache_t cache_head;
96 static link_cache_t cache_tail;
97 static mutex_t cache_lock;
98 static int events_registered = 0;
100 static dladm_handle_t dld_handle = NULL;
103 * RCM module interface prototypes
105 static int ibpart_register(rcm_handle_t *);
106 static int ibpart_unregister(rcm_handle_t *);
107 static int ibpart_get_info(rcm_handle_t *, char *, id_t, uint_t,
108 char **, char **, nvlist_t *, rcm_info_t **);
109 static int ibpart_suspend(rcm_handle_t *, char *, id_t,
110 timespec_t *, uint_t, char **, rcm_info_t **);
111 static int ibpart_resume(rcm_handle_t *, char *, id_t, uint_t,
112 char **, rcm_info_t **);
113 static int ibpart_offline(rcm_handle_t *, char *, id_t, uint_t,
114 char **, rcm_info_t **);
115 static int ibpart_undo_offline(rcm_handle_t *, char *, id_t,
116 uint_t, char **, rcm_info_t **);
117 static int ibpart_remove(rcm_handle_t *, char *, id_t, uint_t,
118 char **, rcm_info_t **);
119 static int ibpart_notify_event(rcm_handle_t *, char *, id_t,
120 uint_t, char **, nvlist_t *, rcm_info_t **);
121 static int ibpart_configure(rcm_handle_t *, datalink_id_t);
123 /* Module private routines */
124 static void cache_free();
125 static int cache_update(rcm_handle_t *);
126 static void cache_remove(link_cache_t *);
127 static void node_free(link_cache_t *);
128 static void cache_insert(link_cache_t *);
129 static link_cache_t *cache_lookup(rcm_handle_t *, char *, char);
130 static int ibpart_consumer_offline(rcm_handle_t *, link_cache_t *,
131 char **, uint_t, rcm_info_t **);
132 static void ibpart_consumer_online(rcm_handle_t *, link_cache_t *,
133 char **, uint_t, rcm_info_t **);
134 static int ibpart_offline_ibpart(link_cache_t *, uint32_t,
135 cache_node_state_t);
136 static void ibpart_online_ibpart(link_cache_t *);
137 static char *ibpart_usage(link_cache_t *);
138 static void ibpart_log_err(datalink_id_t, char **, char *);
139 static int ibpart_consumer_notify(rcm_handle_t *, datalink_id_t,
140 char **, uint_t, rcm_info_t **);
142 /* Module-Private data */
143 static struct rcm_mod_ops ibpart_ops =
145 RCM_MOD_OPS_VERSION,
146 ibpart_register,
147 ibpart_unregister,
148 ibpart_get_info,
149 ibpart_suspend,
150 ibpart_resume,
151 ibpart_offline,
152 ibpart_undo_offline,
153 ibpart_remove,
154 NULL,
155 NULL,
156 ibpart_notify_event
160 * rcm_mod_init() - Update registrations, and return the ops structure.
162 struct rcm_mod_ops *
163 rcm_mod_init(void)
165 char errmsg[DLADM_STRSIZE];
166 dladm_status_t status;
168 rcm_log_message(RCM_TRACE1, "IBPART: mod_init\n");
170 cache_head.pc_next = &cache_tail;
171 cache_head.pc_prev = NULL;
172 cache_tail.pc_prev = &cache_head;
173 cache_tail.pc_next = NULL;
174 (void) mutex_init(&cache_lock, 0, NULL);
176 if ((status = dladm_open(&dld_handle)) != DLADM_STATUS_OK) {
177 rcm_log_message(RCM_WARNING,
178 "IBPART: mod_init failed: cannot open datalink "
179 "handle: %s\n", dladm_status2str(status, errmsg));
180 return (NULL);
183 /* Return the ops vectors */
184 return (&ibpart_ops);
188 * rcm_mod_info() - Return a string describing this module.
190 const char *
191 rcm_mod_info(void)
193 rcm_log_message(RCM_TRACE1, "IBPART: mod_info\n");
195 return ("IBPART module");
199 * rcm_mod_fini() - Destroy the network IBPART cache.
202 rcm_mod_fini(void)
204 rcm_log_message(RCM_TRACE1, "IBPART: mod_fini\n");
207 * Note that ibpart_unregister() does not seem to be called anywhere,
208 * therefore we free the cache nodes here. In theory we should call
209 * rcm_register_interest() for each node before we free it, the
210 * framework does not provide the rcm_handle to allow us to do so.
212 cache_free();
213 (void) mutex_destroy(&cache_lock);
215 dladm_close(dld_handle);
216 return (RCM_SUCCESS);
220 * ibpart_register() - Make sure the cache is properly sync'ed, and its
221 * registrations are in order.
223 static int
224 ibpart_register(rcm_handle_t *hd)
226 rcm_log_message(RCM_TRACE1, "IBPART: register\n");
228 if (cache_update(hd) < 0)
229 return (RCM_FAILURE);
232 * Need to register interest in all new resources
233 * getting attached, so we get attach event notifications
235 if (!events_registered) {
236 if (rcm_register_event(hd, RCM_RESOURCE_LINK_NEW, 0, NULL)
237 != RCM_SUCCESS) {
238 rcm_log_message(RCM_ERROR,
239 _("IBPART: failed to register %s\n"),
240 RCM_RESOURCE_LINK_NEW);
241 return (RCM_FAILURE);
242 } else {
243 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
244 RCM_RESOURCE_LINK_NEW);
245 events_registered++;
249 return (RCM_SUCCESS);
253 * ibpart_unregister() - Walk the cache, unregistering all the networks.
255 static int
256 ibpart_unregister(rcm_handle_t *hd)
258 link_cache_t *node;
260 rcm_log_message(RCM_TRACE1, "IBPART: unregister\n");
262 /* Walk the cache, unregistering everything */
263 (void) mutex_lock(&cache_lock);
264 node = cache_head.pc_next;
265 while (node != &cache_tail) {
266 if (rcm_unregister_interest(hd, node->pc_resource, 0)
267 != RCM_SUCCESS) {
268 rcm_log_message(RCM_ERROR,
269 _("IBPART: failed to unregister %s\n"),
270 node->pc_resource);
271 (void) mutex_unlock(&cache_lock);
272 return (RCM_FAILURE);
274 cache_remove(node);
275 node_free(node);
276 node = cache_head.pc_next;
278 (void) mutex_unlock(&cache_lock);
281 * Unregister interest in all new resources
283 if (events_registered) {
284 if (rcm_unregister_event(hd, RCM_RESOURCE_LINK_NEW, 0)
285 != RCM_SUCCESS) {
286 rcm_log_message(RCM_ERROR,
287 _("IBPART: failed to unregister %s\n"),
288 RCM_RESOURCE_LINK_NEW);
289 return (RCM_FAILURE);
290 } else {
291 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
292 RCM_RESOURCE_LINK_NEW);
293 events_registered--;
297 return (RCM_SUCCESS);
301 * ibpart_offline() - Offline IBPARTs on a specific node.
303 static int
304 ibpart_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
305 char **errorp, rcm_info_t **info)
307 link_cache_t *node;
309 rcm_log_message(RCM_TRACE1, "IBPART: offline(%s)\n", rsrc);
311 /* Lock the cache and lookup the resource */
312 (void) mutex_lock(&cache_lock);
313 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
314 if (node == NULL) {
315 /* should not happen because the resource is registered. */
316 ibpart_log_err(node->pc_linkid, errorp,
317 "unrecognized resource");
318 (void) mutex_unlock(&cache_lock);
319 return (RCM_SUCCESS);
323 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
325 if (ibpart_consumer_offline(hd, node, errorp, flags, info) ==
326 RCM_SUCCESS) {
327 rcm_log_message(RCM_DEBUG,
328 "IBPART: consumers agreed on offline\n");
329 } else {
330 ibpart_log_err(node->pc_linkid, errorp,
331 "consumers failed to offline");
332 (void) mutex_unlock(&cache_lock);
333 return (RCM_FAILURE);
336 /* Check if it's a query */
337 if (flags & RCM_QUERY) {
338 rcm_log_message(RCM_TRACE1,
339 "IBPART: offline query succeeded(%s)\n", rsrc);
340 (void) mutex_unlock(&cache_lock);
341 return (RCM_SUCCESS);
344 if (ibpart_offline_ibpart(node, IBPART_OFFLINED, CACHE_NODE_OFFLINED) !=
345 RCM_SUCCESS) {
346 ibpart_online_ibpart(node);
347 ibpart_log_err(node->pc_linkid, errorp, "offline failed");
348 (void) mutex_unlock(&cache_lock);
349 return (RCM_FAILURE);
352 rcm_log_message(RCM_TRACE1, "IBPART: Offline succeeded(%s)\n", rsrc);
353 (void) mutex_unlock(&cache_lock);
354 return (RCM_SUCCESS);
358 * ibpart_undo_offline() - Undo offline of a previously offlined node.
360 /*ARGSUSED*/
361 static int
362 ibpart_undo_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
363 char **errorp, rcm_info_t **info)
365 link_cache_t *node;
367 rcm_log_message(RCM_TRACE1, "IBPART: online(%s)\n", rsrc);
369 (void) mutex_lock(&cache_lock);
370 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
371 if (node == NULL) {
372 ibpart_log_err(DATALINK_INVALID_LINKID, errorp, "no such link");
373 (void) mutex_unlock(&cache_lock);
374 errno = ENOENT;
375 return (RCM_FAILURE);
378 /* Check if no attempt should be made to online the link here */
379 if (!(node->pc_state & CACHE_NODE_OFFLINED)) {
380 ibpart_log_err(node->pc_linkid, errorp, "link not offlined");
381 (void) mutex_unlock(&cache_lock);
382 errno = ENOTSUP;
383 return (RCM_SUCCESS);
386 ibpart_online_ibpart(node);
389 * Inform IP interfaces on associated IBPARTs to be onlined
391 ibpart_consumer_online(hd, node, errorp, flags, info);
393 node->pc_state &= ~CACHE_NODE_OFFLINED;
394 rcm_log_message(RCM_TRACE1, "IBPART: online succeeded(%s)\n", rsrc);
395 (void) mutex_unlock(&cache_lock);
396 return (RCM_SUCCESS);
399 static void
400 ibpart_online_ibpart(link_cache_t *node)
402 dl_ibpart_t *ibpart;
403 dladm_status_t status;
404 char errmsg[DLADM_STRSIZE];
407 * Try to bring on all offlined IBPARTs
409 for (ibpart = node->pc_ibpart; ibpart != NULL;
410 ibpart = ibpart->dlib_next) {
411 if (!(ibpart->dlib_flags & IBPART_OFFLINED))
412 continue;
414 rcm_log_message(RCM_TRACE1, "IBPART: online DLID %d\n",
415 ibpart->dlib_ibpart_id);
416 if ((status = dladm_part_up(dld_handle,
417 ibpart->dlib_ibpart_id, 0)) != DLADM_STATUS_OK) {
419 * Print a warning message and continue to online
420 * other IBPARTs.
422 rcm_log_message(RCM_WARNING,
423 _("IBPART: IBPART online failed (%u): %s\n"),
424 ibpart->dlib_ibpart_id,
425 dladm_status2str(status, errmsg));
426 } else {
427 ibpart->dlib_flags &= ~IBPART_OFFLINED;
432 static int
433 ibpart_offline_ibpart(link_cache_t *node, uint32_t flags,
434 cache_node_state_t state)
436 dl_ibpart_t *ibpart;
437 dladm_status_t status;
438 char errmsg[DLADM_STRSIZE];
440 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_offline_ibpart "
441 "(%s %u %u)\n", node->pc_resource, flags, state);
444 * Try to delete all explicit created IBPART
446 for (ibpart = node->pc_ibpart; ibpart != NULL;
447 ibpart = ibpart->dlib_next) {
448 rcm_log_message(RCM_TRACE1, "IBPART: offline DLID %d\n",
449 ibpart->dlib_ibpart_id);
450 if ((status = dladm_part_delete(dld_handle,
451 ibpart->dlib_ibpart_id, DLADM_OPT_ACTIVE)) !=
452 DLADM_STATUS_OK) {
453 rcm_log_message(RCM_WARNING,
454 _("IBPART: IBPART offline failed (%u): %s\n"),
455 ibpart->dlib_ibpart_id,
456 dladm_status2str(status, errmsg));
457 return (RCM_FAILURE);
458 } else {
459 rcm_log_message(RCM_TRACE1,
460 "IBPART: IBPART offline succeeded(%u)\n",
461 ibpart->dlib_ibpart_id);
462 ibpart->dlib_flags |= flags;
466 node->pc_state |= state;
467 return (RCM_SUCCESS);
471 * ibpart_get_info() - Gather usage information for this resource.
473 /*ARGSUSED*/
475 ibpart_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
476 char **usagep, char **errorp, nvlist_t *props, rcm_info_t **info)
478 link_cache_t *node;
480 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s)\n", rsrc);
482 (void) mutex_lock(&cache_lock);
483 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
484 if (node == NULL) {
485 rcm_log_message(RCM_INFO,
486 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc);
487 (void) mutex_unlock(&cache_lock);
488 errno = ENOENT;
489 return (RCM_FAILURE);
492 *usagep = ibpart_usage(node);
493 (void) mutex_unlock(&cache_lock);
494 if (*usagep == NULL) {
495 /* most likely malloc failure */
496 rcm_log_message(RCM_ERROR,
497 _("IBPART: get_info(%s) malloc failure\n"), rsrc);
498 (void) mutex_unlock(&cache_lock);
499 errno = ENOMEM;
500 return (RCM_FAILURE);
503 /* Set client/role properties */
504 (void) nvlist_add_string(props, RCM_CLIENT_NAME, "IBPART");
506 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s) info = %s\n",
507 rsrc, *usagep);
508 return (RCM_SUCCESS);
512 * ibpart_suspend() - Nothing to do, always okay
514 /*ARGSUSED*/
515 static int
516 ibpart_suspend(rcm_handle_t *hd, char *rsrc, id_t id, timespec_t *interval,
517 uint_t flags, char **errorp, rcm_info_t **info)
519 rcm_log_message(RCM_TRACE1, "IBPART: suspend(%s)\n", rsrc);
520 return (RCM_SUCCESS);
524 * ibpart_resume() - Nothing to do, always okay
526 /*ARGSUSED*/
527 static int
528 ibpart_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
529 char **errorp, rcm_info_t **info)
531 rcm_log_message(RCM_TRACE1, "IBPART: resume(%s)\n", rsrc);
532 return (RCM_SUCCESS);
536 * ibpart_consumer_remove()
538 * Notify IBPART consumers to remove cache.
540 static int
541 ibpart_consumer_remove(rcm_handle_t *hd, link_cache_t *node, uint_t flags,
542 rcm_info_t **info)
544 dl_ibpart_t *ibpart = NULL;
545 char rsrc[RCM_LINK_RESOURCE_MAX];
546 int ret = RCM_SUCCESS;
548 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove (%s)\n",
549 node->pc_resource);
551 for (ibpart = node->pc_ibpart; ibpart != NULL;
552 ibpart = ibpart->dlib_next) {
555 * This will only be called when the offline operation
556 * succeeds, so the IBPART consumers must have been offlined
557 * at this point.
559 assert(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED);
561 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
562 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
564 ret = rcm_notify_remove(hd, rsrc, flags, info);
565 if (ret != RCM_SUCCESS) {
566 rcm_log_message(RCM_WARNING,
567 _("IBPART: notify remove failed (%s)\n"), rsrc);
568 break;
572 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove done\n");
573 return (ret);
577 * ibpart_remove() - remove a resource from cache
579 /*ARGSUSED*/
580 static int
581 ibpart_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
582 char **errorp, rcm_info_t **info)
584 link_cache_t *node;
585 int rv;
587 rcm_log_message(RCM_TRACE1, "IBPART: remove(%s)\n", rsrc);
589 (void) mutex_lock(&cache_lock);
590 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
591 if (node == NULL) {
592 rcm_log_message(RCM_INFO,
593 _("IBPART: remove(%s) unrecognized resource\n"), rsrc);
594 (void) mutex_unlock(&cache_lock);
595 errno = ENOENT;
596 return (RCM_FAILURE);
599 /* remove the cached entry for the resource */
600 cache_remove(node);
601 (void) mutex_unlock(&cache_lock);
603 rv = ibpart_consumer_remove(hd, node, flags, info);
604 node_free(node);
605 return (rv);
609 * ibpart_notify_event - Project private implementation to receive new resource
610 * events. It intercepts all new resource events. If the
611 * new resource is a network resource, pass up a notify
612 * for it too. The new resource need not be cached, since
613 * it is done at register again.
615 /*ARGSUSED*/
616 static int
617 ibpart_notify_event(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
618 char **errorp, nvlist_t *nvl, rcm_info_t **info)
620 nvpair_t *nvp = NULL;
621 datalink_id_t linkid;
622 uint64_t id64;
623 int rv = RCM_SUCCESS;
625 rcm_log_message(RCM_TRACE1, "IBPART: notify_event(%s)\n", rsrc);
627 if (strcmp(rsrc, RCM_RESOURCE_LINK_NEW) != 0) {
628 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
629 "unrecognized event");
630 errno = EINVAL;
631 return (RCM_FAILURE);
634 /* Update cache to reflect latest IBPARTs */
635 if (cache_update(hd) < 0) {
636 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
637 "private Cache update failed");
638 return (RCM_FAILURE);
642 * Try best to recover all configuration.
644 rcm_log_message(RCM_DEBUG, "IBPART: process_nvlist\n");
645 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
646 if (strcmp(nvpair_name(nvp), RCM_NV_LINKID) != 0)
647 continue;
649 if (nvpair_value_uint64(nvp, &id64) != 0) {
650 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
651 "cannot get linkid");
652 rv = RCM_FAILURE;
653 continue;
656 linkid = (datalink_id_t)id64;
657 if (ibpart_configure(hd, linkid) != 0) {
658 ibpart_log_err(linkid, errorp, "configuring failed");
659 rv = RCM_FAILURE;
660 continue;
663 /* Notify all IBPART consumers */
664 if (ibpart_consumer_notify(hd, linkid, errorp, flags,
665 info) != 0) {
666 ibpart_log_err(linkid, errorp,
667 "consumer notify failed");
668 rv = RCM_FAILURE;
672 rcm_log_message(RCM_TRACE1,
673 "IBPART: notify_event: link configuration complete\n");
674 return (rv);
678 * ibpart_usage - Determine the usage of a link.
679 * The returned buffer is owned by caller, and the caller
680 * must free it up when done.
682 static char *
683 ibpart_usage(link_cache_t *node)
685 dl_ibpart_t *ibpart;
686 int nibpart;
687 char *buf;
688 const char *fmt;
689 char *sep;
690 char errmsg[DLADM_STRSIZE];
691 char name[MAXLINKNAMELEN];
692 dladm_status_t status;
693 size_t bufsz;
695 rcm_log_message(RCM_TRACE2, "IBPART: usage(%s)\n", node->pc_resource);
697 assert(MUTEX_HELD(&cache_lock));
698 if ((status = dladm_datalink_id2info(dld_handle, node->pc_linkid, NULL,
699 NULL, NULL, name, sizeof (name))) != DLADM_STATUS_OK) {
700 rcm_log_message(RCM_ERROR,
701 _("IBPART: usage(%s) get link name failure(%s)\n"),
702 node->pc_resource, dladm_status2str(status, errmsg));
703 return (NULL);
706 if (node->pc_state & CACHE_NODE_OFFLINED)
707 fmt = _("%1$s offlined");
708 else
709 fmt = _("%1$s IBPART: ");
711 /* TRANSLATION_NOTE: separator used between IBPART linkids */
712 sep = _(", ");
714 nibpart = 0;
715 for (ibpart = node->pc_ibpart; ibpart != NULL;
716 ibpart = ibpart->dlib_next)
717 nibpart++;
719 /* space for IBPARTs and separators, plus message */
720 bufsz = nibpart * (MAXLINKNAMELEN + strlen(sep)) +
721 strlen(fmt) + MAXLINKNAMELEN + 1;
722 if ((buf = malloc(bufsz)) == NULL) {
723 rcm_log_message(RCM_ERROR,
724 _("IBPART: usage(%s) malloc failure(%s)\n"),
725 node->pc_resource, strerror(errno));
726 return (NULL);
728 (void) snprintf(buf, bufsz, fmt, name);
730 if (node->pc_state & CACHE_NODE_OFFLINED) {
731 /* Nothing else to do */
732 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
733 node->pc_resource, buf);
734 return (buf);
737 for (ibpart = node->pc_ibpart; ibpart != NULL;
738 ibpart = ibpart->dlib_next) {
739 rcm_log_message(RCM_DEBUG, "IBPART:= %u\n",
740 ibpart->dlib_ibpart_id);
742 if ((status = dladm_datalink_id2info(dld_handle,
743 ibpart->dlib_ibpart_id, NULL, NULL, NULL, name,
744 sizeof (name))) != DLADM_STATUS_OK) {
745 rcm_log_message(RCM_ERROR,
746 _("IBPART: usage(%s) get ibpart %u name "
747 "failure(%s)\n"), node->pc_resource,
748 ibpart->dlib_ibpart_id,
749 dladm_status2str(status, errmsg));
750 free(buf);
751 return (NULL);
754 (void) strlcat(buf, name, bufsz);
755 if (ibpart->dlib_next != NULL)
756 (void) strlcat(buf, sep, bufsz);
759 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
760 node->pc_resource, buf);
762 return (buf);
766 * Cache management routines, all cache management functions should be
767 * be called with cache_lock held.
771 * cache_lookup() - Get a cache node for a resource.
772 * Call with cache lock held.
774 * This ensures that the cache is consistent with the system state and
775 * returns a pointer to the cache element corresponding to the resource.
777 static link_cache_t *
778 cache_lookup(rcm_handle_t *hd, char *rsrc, char options)
780 link_cache_t *node;
782 rcm_log_message(RCM_TRACE2, "IBPART: cache lookup(%s)\n", rsrc);
784 assert(MUTEX_HELD(&cache_lock));
785 if (options & CACHE_REFRESH) {
786 /* drop lock since update locks cache again */
787 (void) mutex_unlock(&cache_lock);
788 (void) cache_update(hd);
789 (void) mutex_lock(&cache_lock);
792 node = cache_head.pc_next;
793 for (; node != &cache_tail; node = node->pc_next) {
794 if (strcmp(rsrc, node->pc_resource) == 0) {
795 rcm_log_message(RCM_TRACE2,
796 "IBPART: cache lookup succeeded(%s)\n", rsrc);
797 return (node);
800 return (NULL);
804 * node_free - Free a node from the cache
806 static void
807 node_free(link_cache_t *node)
809 dl_ibpart_t *ibpart, *next;
811 if (node != NULL) {
812 free(node->pc_resource);
814 /* free the IBPART list */
815 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
816 next = ibpart->dlib_next;
817 free(ibpart);
819 free(node);
824 * cache_insert - Insert a resource node in cache
826 static void
827 cache_insert(link_cache_t *node)
829 assert(MUTEX_HELD(&cache_lock));
831 /* insert at the head for best performance */
832 node->pc_next = cache_head.pc_next;
833 node->pc_prev = &cache_head;
835 node->pc_next->pc_prev = node;
836 node->pc_prev->pc_next = node;
840 * cache_remove() - Remove a resource node from cache.
842 static void
843 cache_remove(link_cache_t *node)
845 assert(MUTEX_HELD(&cache_lock));
846 node->pc_next->pc_prev = node->pc_prev;
847 node->pc_prev->pc_next = node->pc_next;
848 node->pc_next = NULL;
849 node->pc_prev = NULL;
852 typedef struct ibpart_update_arg_s {
853 rcm_handle_t *hd;
854 int retval;
855 } ibpart_update_arg_t;
858 * ibpart_update() - Update physical interface properties
860 static int
861 ibpart_update(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
863 ibpart_update_arg_t *ibpart_update_argp = arg;
864 rcm_handle_t *hd = ibpart_update_argp->hd;
865 link_cache_t *node;
866 dl_ibpart_t *ibpart;
867 char *rsrc;
868 dladm_ib_attr_t ibpart_attr;
869 dladm_status_t status;
870 char errmsg[DLADM_STRSIZE];
871 boolean_t newnode = B_FALSE;
872 int ret = -1;
874 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update(%u)\n", ibpartid);
876 assert(MUTEX_HELD(&cache_lock));
877 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
878 DLADM_OPT_ACTIVE);
879 if (status != DLADM_STATUS_OK) {
880 rcm_log_message(RCM_TRACE1,
881 "IBPART: ibpart_update() cannot get ibpart information for "
882 "%u(%s)\n", ibpartid, dladm_status2str(status, errmsg));
883 return (DLADM_WALK_CONTINUE);
886 if (ibpart_attr.dia_physlinkid == DATALINK_INVALID_LINKID) {
888 * Skip the IB port nodes.
890 rcm_log_message(RCM_TRACE1,
891 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
892 ibpartid);
893 return (DLADM_WALK_CONTINUE);
896 rsrc = malloc(RCM_LINK_RESOURCE_MAX);
897 if (rsrc == NULL) {
898 rcm_log_message(RCM_ERROR, _("IBPART: malloc error(%s): %u\n"),
899 strerror(errno), ibpartid);
900 goto done;
903 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
904 RCM_LINK_PREFIX, ibpart_attr.dia_physlinkid);
906 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
907 if (node != NULL) {
908 rcm_log_message(RCM_DEBUG,
909 "IBPART: %s already registered (ibpartid:%d)\n",
910 rsrc, ibpart_attr.dia_partlinkid);
911 free(rsrc);
912 } else {
913 rcm_log_message(RCM_DEBUG,
914 "IBPART: %s is a new resource (ibpartid:%d)\n",
915 rsrc, ibpart_attr.dia_partlinkid);
916 if ((node = calloc(1, sizeof (link_cache_t))) == NULL) {
917 free(rsrc);
918 rcm_log_message(RCM_ERROR, _("IBPART: calloc: %s\n"),
919 strerror(errno));
920 goto done;
923 node->pc_resource = rsrc;
924 node->pc_ibpart = NULL;
925 node->pc_linkid = ibpart_attr.dia_physlinkid;
926 node->pc_state |= CACHE_NODE_NEW;
927 newnode = B_TRUE;
930 for (ibpart = node->pc_ibpart; ibpart != NULL;
931 ibpart = ibpart->dlib_next) {
932 if (ibpart->dlib_ibpart_id == ibpartid) {
933 ibpart->dlib_flags &= ~IBPART_STALE;
934 break;
938 if (ibpart == NULL) {
939 if ((ibpart = calloc(1, sizeof (dl_ibpart_t))) == NULL) {
940 rcm_log_message(RCM_ERROR, _("IBPART: malloc: %s\n"),
941 strerror(errno));
942 if (newnode) {
943 free(rsrc);
944 free(node);
946 goto done;
948 ibpart->dlib_ibpart_id = ibpartid;
949 ibpart->dlib_next = node->pc_ibpart;
950 ibpart->dlib_prev = NULL;
951 if (node->pc_ibpart != NULL)
952 node->pc_ibpart->dlib_prev = ibpart;
953 node->pc_ibpart = ibpart;
956 node->pc_state &= ~CACHE_NODE_STALE;
958 if (newnode)
959 cache_insert(node);
961 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_update: succeeded(%u)\n",
962 ibpartid);
963 ret = 0;
964 done:
965 ibpart_update_argp->retval = ret;
966 return (ret == 0 ? DLADM_WALK_CONTINUE : DLADM_WALK_TERMINATE);
970 * ibpart_update_all() - Determine all IBPART links in the system
972 static int
973 ibpart_update_all(rcm_handle_t *hd)
975 ibpart_update_arg_t arg = {NULL, 0};
977 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update_all\n");
979 assert(MUTEX_HELD(&cache_lock));
980 arg.hd = hd;
981 (void) dladm_walk_datalink_id(ibpart_update, dld_handle, &arg,
982 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_ACTIVE);
983 return (arg.retval);
987 * cache_update() - Update cache with latest interface info
989 static int
990 cache_update(rcm_handle_t *hd)
992 link_cache_t *node, *nnode;
993 dl_ibpart_t *ibpart;
994 int rv;
996 rcm_log_message(RCM_TRACE2, "IBPART: cache_update\n");
998 (void) mutex_lock(&cache_lock);
1000 /* first we walk the entire cache, marking each entry stale */
1001 node = cache_head.pc_next;
1002 for (; node != &cache_tail; node = node->pc_next) {
1003 node->pc_state |= CACHE_NODE_STALE;
1004 for (ibpart = node->pc_ibpart; ibpart != NULL;
1005 ibpart = ibpart->dlib_next)
1006 ibpart->dlib_flags |= IBPART_STALE;
1009 rv = ibpart_update_all(hd);
1012 * Continue to delete all stale nodes from the cache even
1013 * ibpart_update_all() failed. Unregister link that are not offlined
1014 * and still in cache
1016 for (node = cache_head.pc_next; node != &cache_tail; node = nnode) {
1017 dl_ibpart_t *ibpart, *next;
1019 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
1020 next = ibpart->dlib_next;
1022 /* clear stale IBPARTs */
1023 if (ibpart->dlib_flags & IBPART_STALE) {
1024 if (ibpart->dlib_prev != NULL)
1025 ibpart->dlib_prev->dlib_next = next;
1026 else
1027 node->pc_ibpart = next;
1029 if (next != NULL)
1030 next->dlib_prev = ibpart->dlib_prev;
1031 free(ibpart);
1035 nnode = node->pc_next;
1036 if (node->pc_state & CACHE_NODE_STALE) {
1037 (void) rcm_unregister_interest(hd, node->pc_resource,
1039 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
1040 node->pc_resource);
1041 assert(node->pc_ibpart == NULL);
1042 cache_remove(node);
1043 node_free(node);
1044 continue;
1047 if (!(node->pc_state & CACHE_NODE_NEW))
1048 continue;
1050 if (rcm_register_interest(hd, node->pc_resource, 0, NULL) !=
1051 RCM_SUCCESS) {
1052 rcm_log_message(RCM_ERROR,
1053 _("IBPART: failed to register %s\n"),
1054 node->pc_resource);
1055 rv = -1;
1056 } else {
1057 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
1058 node->pc_resource);
1059 node->pc_state &= ~CACHE_NODE_NEW;
1063 (void) mutex_unlock(&cache_lock);
1064 return (rv);
1068 * cache_free() - Empty the cache
1070 static void
1071 cache_free()
1073 link_cache_t *node;
1075 rcm_log_message(RCM_TRACE2, "IBPART: cache_free\n");
1077 (void) mutex_lock(&cache_lock);
1078 node = cache_head.pc_next;
1079 while (node != &cache_tail) {
1080 cache_remove(node);
1081 node_free(node);
1082 node = cache_head.pc_next;
1084 (void) mutex_unlock(&cache_lock);
1088 * ibpart_log_err() - RCM error log wrapper
1090 static void
1091 ibpart_log_err(datalink_id_t linkid, char **errorp, char *errmsg)
1093 char link[MAXLINKNAMELEN];
1094 char errstr[DLADM_STRSIZE];
1095 dladm_status_t status;
1096 int len;
1097 const char *errfmt;
1098 char *error;
1100 link[0] = '\0';
1101 if (linkid != DATALINK_INVALID_LINKID) {
1102 char rsrc[RCM_LINK_RESOURCE_MAX];
1104 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u",
1105 RCM_LINK_PREFIX, linkid);
1107 rcm_log_message(RCM_ERROR, _("IBPART: %s(%s)\n"), errmsg, rsrc);
1108 if ((status = dladm_datalink_id2info(dld_handle, linkid, NULL,
1109 NULL, NULL, link, sizeof (link))) != DLADM_STATUS_OK) {
1110 rcm_log_message(RCM_WARNING,
1111 _("IBPART: cannot get link name for (%s) %s\n"),
1112 rsrc, dladm_status2str(status, errstr));
1114 } else {
1115 rcm_log_message(RCM_ERROR, _("IBPART: %s\n"), errmsg);
1118 errfmt = strlen(link) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1119 len = strlen(errfmt) + strlen(errmsg) + MAXLINKNAMELEN + 1;
1120 if ((error = malloc(len)) != NULL) {
1121 if (strlen(link) > 0)
1122 (void) snprintf(error, len, errfmt, errmsg, link);
1123 else
1124 (void) snprintf(error, len, errfmt, errmsg);
1127 if (errorp != NULL)
1128 *errorp = error;
1132 * ibpart_consumer_online()
1134 * Notify online to IBPART consumers.
1136 /* ARGSUSED */
1137 static void
1138 ibpart_consumer_online(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1139 uint_t flags, rcm_info_t **info)
1141 dl_ibpart_t *ibpart;
1142 char rsrc[RCM_LINK_RESOURCE_MAX];
1144 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online (%s)\n",
1145 node->pc_resource);
1147 for (ibpart = node->pc_ibpart; ibpart != NULL;
1148 ibpart = ibpart->dlib_next) {
1149 if (!(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED))
1150 continue;
1152 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1153 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1155 if (rcm_notify_online(hd, rsrc, flags, info) == RCM_SUCCESS)
1156 ibpart->dlib_flags &= ~IBPART_CONSUMER_OFFLINED;
1159 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online done\n");
1163 * ibpart_consumer_offline()
1165 * Offline IBPART consumers.
1167 static int
1168 ibpart_consumer_offline(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1169 uint_t flags, rcm_info_t **info)
1171 dl_ibpart_t *ibpart;
1172 char rsrc[RCM_LINK_RESOURCE_MAX];
1173 int ret = RCM_SUCCESS;
1175 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline (%s)\n",
1176 node->pc_resource);
1178 for (ibpart = node->pc_ibpart; ibpart != NULL;
1179 ibpart = ibpart->dlib_next) {
1180 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1181 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1183 ret = rcm_request_offline(hd, rsrc, flags, info);
1184 if (ret != RCM_SUCCESS)
1185 break;
1187 ibpart->dlib_flags |= IBPART_CONSUMER_OFFLINED;
1190 if (ibpart != NULL)
1191 ibpart_consumer_online(hd, node, errorp, flags, info);
1193 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline done\n");
1194 return (ret);
1198 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1199 * Return 0 on success, -1 on failure.
1201 static int
1202 ibpart_notify_new_ibpart(rcm_handle_t *hd, char *rsrc)
1204 link_cache_t *node;
1205 dl_ibpart_t *ibpart;
1206 nvlist_t *nvl = NULL;
1207 uint64_t id;
1208 int ret = -1;
1210 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1211 rsrc);
1213 (void) mutex_lock(&cache_lock);
1214 if ((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) == NULL) {
1215 (void) mutex_unlock(&cache_lock);
1216 return (0);
1219 if (nvlist_alloc(&nvl, 0, 0) != 0) {
1220 (void) mutex_unlock(&cache_lock);
1221 rcm_log_message(RCM_WARNING,
1222 _("IBPART: failed to allocate nvlist\n"));
1223 goto done;
1226 for (ibpart = node->pc_ibpart; ibpart != NULL;
1227 ibpart = ibpart->dlib_next) {
1228 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart "
1229 "add (%u)\n", ibpart->dlib_ibpart_id);
1231 id = ibpart->dlib_ibpart_id;
1232 if (nvlist_add_uint64(nvl, RCM_NV_LINKID, id) != 0) {
1233 rcm_log_message(RCM_ERROR,
1234 _("IBPART: failed to construct nvlist\n"));
1235 (void) mutex_unlock(&cache_lock);
1236 goto done;
1239 (void) mutex_unlock(&cache_lock);
1241 if (rcm_notify_event(hd, RCM_RESOURCE_LINK_NEW, 0, nvl, NULL) !=
1242 RCM_SUCCESS) {
1243 rcm_log_message(RCM_ERROR,
1244 _("IBPART: failed to notify %s event for %s\n"),
1245 RCM_RESOURCE_LINK_NEW, node->pc_resource);
1246 goto done;
1249 ret = 0;
1250 done:
1251 nvlist_free(nvl);
1252 return (ret);
1256 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1258 static int
1259 ibpart_consumer_notify(rcm_handle_t *hd, datalink_id_t linkid, char **errorp,
1260 uint_t flags, rcm_info_t **info)
1262 char rsrc[RCM_LINK_RESOURCE_MAX];
1263 link_cache_t *node;
1265 /* Check for the interface in the cache */
1266 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u", RCM_LINK_PREFIX,
1267 linkid);
1269 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify(%s)\n",
1270 rsrc);
1273 * Inform IP consumers of the new link.
1275 if (ibpart_notify_new_ibpart(hd, rsrc) != 0) {
1276 (void) mutex_lock(&cache_lock);
1277 if ((node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH)) != NULL) {
1278 (void) ibpart_offline_ibpart(node, IBPART_STALE,
1279 CACHE_NODE_STALE);
1281 (void) mutex_unlock(&cache_lock);
1282 rcm_log_message(RCM_TRACE2,
1283 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc);
1284 return (-1);
1287 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify "
1288 "succeeded\n");
1289 return (0);
1292 typedef struct ibpart_up_arg_s {
1293 datalink_id_t linkid;
1294 int retval;
1295 } ibpart_up_arg_t;
1297 static int
1298 ibpart_up(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
1300 ibpart_up_arg_t *ibpart_up_argp = arg;
1301 dladm_status_t status;
1302 dladm_ib_attr_t ibpart_attr;
1303 char errmsg[DLADM_STRSIZE];
1305 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
1306 DLADM_OPT_PERSIST);
1307 if (status != DLADM_STATUS_OK) {
1308 rcm_log_message(RCM_TRACE1,
1309 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1310 "(%s)\n", ibpartid, dladm_status2str(status, errmsg));
1311 return (DLADM_WALK_CONTINUE);
1314 if (ibpart_attr.dia_physlinkid != ibpart_up_argp->linkid)
1315 return (DLADM_WALK_CONTINUE);
1317 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_up(%u)\n", ibpartid);
1318 if ((status = dladm_part_up(handle, ibpartid, 0)) == DLADM_STATUS_OK)
1319 return (DLADM_WALK_CONTINUE);
1322 * Prompt the warning message and continue to UP other IBPARTs.
1324 rcm_log_message(RCM_WARNING,
1325 _("IBPART: IBPART up failed (%u): %s\n"),
1326 ibpartid, dladm_status2str(status, errmsg));
1328 ibpart_up_argp->retval = -1;
1329 return (DLADM_WALK_CONTINUE);
1333 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1335 static int
1336 ibpart_configure(rcm_handle_t *hd, datalink_id_t linkid)
1338 char rsrc[RCM_LINK_RESOURCE_MAX];
1339 link_cache_t *node;
1340 ibpart_up_arg_t arg = {DATALINK_INVALID_LINKID, 0};
1342 /* Check for the IBPARTs in the cache */
1343 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u", RCM_LINK_PREFIX, linkid);
1345 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_configure(%s)\n", rsrc);
1347 /* Check if the link is new or was previously offlined */
1348 (void) mutex_lock(&cache_lock);
1349 if (((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) != NULL) &&
1350 (!(node->pc_state & CACHE_NODE_OFFLINED))) {
1351 rcm_log_message(RCM_TRACE2,
1352 "IBPART: Skipping configured interface(%s)\n", rsrc);
1353 (void) mutex_unlock(&cache_lock);
1354 return (0);
1356 (void) mutex_unlock(&cache_lock);
1358 arg.linkid = linkid;
1359 (void) dladm_walk_datalink_id(ibpart_up, dld_handle, &arg,
1360 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_PERSIST);
1362 if (arg.retval == 0) {
1363 rcm_log_message(RCM_TRACE2,
1364 "IBPART: ibpart_configure succeeded(%s)\n", rsrc);
1366 return (arg.retval);