4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * This RCM module adds support to the RCM framework for IBPART links
33 #include <sys/types.h>
37 #include "rcm_module.h"
39 #include <libdllink.h>
47 #define _(x) gettext(x)
52 /* Some generic well-knowns and defaults used in this module */
53 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
54 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
56 /* IBPART link flags */
58 IBPART_OFFLINED
= 0x1,
59 IBPART_CONSUMER_OFFLINED
= 0x2,
63 /* link representation */
64 typedef struct dl_ibpart
{
65 struct dl_ibpart
*dlib_next
; /* next IBPART on this link */
66 struct dl_ibpart
*dlib_prev
; /* prev IBPART on this link */
67 datalink_id_t dlib_ibpart_id
;
68 ibpart_flag_t dlib_flags
; /* IBPART link flags */
71 /* IBPART Cache state flags */
73 CACHE_NODE_STALE
= 0x1, /* stale cached data */
74 CACHE_NODE_NEW
= 0x2, /* new cached nodes */
75 CACHE_NODE_OFFLINED
= 0x4 /* nodes offlined */
78 /* Network Cache lookup options */
79 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
80 #define CACHE_REFRESH 0x2 /* refresh cache */
83 typedef struct link_cache
{
84 struct link_cache
*pc_next
; /* next cached resource */
85 struct link_cache
*pc_prev
; /* prev cached resource */
86 char *pc_resource
; /* resource name */
87 datalink_id_t pc_linkid
; /* linkid */
88 dl_ibpart_t
*pc_ibpart
; /* IBPART list on this link */
89 cache_node_state_t pc_state
; /* cache state flags */
93 * Global cache for network IBPARTs
95 static link_cache_t cache_head
;
96 static link_cache_t cache_tail
;
97 static mutex_t cache_lock
;
98 static int events_registered
= 0;
100 static dladm_handle_t dld_handle
= NULL
;
103 * RCM module interface prototypes
105 static int ibpart_register(rcm_handle_t
*);
106 static int ibpart_unregister(rcm_handle_t
*);
107 static int ibpart_get_info(rcm_handle_t
*, char *, id_t
, uint_t
,
108 char **, char **, nvlist_t
*, rcm_info_t
**);
109 static int ibpart_suspend(rcm_handle_t
*, char *, id_t
,
110 timespec_t
*, uint_t
, char **, rcm_info_t
**);
111 static int ibpart_resume(rcm_handle_t
*, char *, id_t
, uint_t
,
112 char **, rcm_info_t
**);
113 static int ibpart_offline(rcm_handle_t
*, char *, id_t
, uint_t
,
114 char **, rcm_info_t
**);
115 static int ibpart_undo_offline(rcm_handle_t
*, char *, id_t
,
116 uint_t
, char **, rcm_info_t
**);
117 static int ibpart_remove(rcm_handle_t
*, char *, id_t
, uint_t
,
118 char **, rcm_info_t
**);
119 static int ibpart_notify_event(rcm_handle_t
*, char *, id_t
,
120 uint_t
, char **, nvlist_t
*, rcm_info_t
**);
121 static int ibpart_configure(rcm_handle_t
*, datalink_id_t
);
123 /* Module private routines */
124 static void cache_free();
125 static int cache_update(rcm_handle_t
*);
126 static void cache_remove(link_cache_t
*);
127 static void node_free(link_cache_t
*);
128 static void cache_insert(link_cache_t
*);
129 static link_cache_t
*cache_lookup(rcm_handle_t
*, char *, char);
130 static int ibpart_consumer_offline(rcm_handle_t
*, link_cache_t
*,
131 char **, uint_t
, rcm_info_t
**);
132 static void ibpart_consumer_online(rcm_handle_t
*, link_cache_t
*,
133 char **, uint_t
, rcm_info_t
**);
134 static int ibpart_offline_ibpart(link_cache_t
*, uint32_t,
136 static void ibpart_online_ibpart(link_cache_t
*);
137 static char *ibpart_usage(link_cache_t
*);
138 static void ibpart_log_err(datalink_id_t
, char **, char *);
139 static int ibpart_consumer_notify(rcm_handle_t
*, datalink_id_t
,
140 char **, uint_t
, rcm_info_t
**);
142 /* Module-Private data */
143 static struct rcm_mod_ops ibpart_ops
=
160 * rcm_mod_init() - Update registrations, and return the ops structure.
165 char errmsg
[DLADM_STRSIZE
];
166 dladm_status_t status
;
168 rcm_log_message(RCM_TRACE1
, "IBPART: mod_init\n");
170 cache_head
.pc_next
= &cache_tail
;
171 cache_head
.pc_prev
= NULL
;
172 cache_tail
.pc_prev
= &cache_head
;
173 cache_tail
.pc_next
= NULL
;
174 (void) mutex_init(&cache_lock
, 0, NULL
);
176 if ((status
= dladm_open(&dld_handle
)) != DLADM_STATUS_OK
) {
177 rcm_log_message(RCM_WARNING
,
178 "IBPART: mod_init failed: cannot open datalink "
179 "handle: %s\n", dladm_status2str(status
, errmsg
));
183 /* Return the ops vectors */
184 return (&ibpart_ops
);
188 * rcm_mod_info() - Return a string describing this module.
193 rcm_log_message(RCM_TRACE1
, "IBPART: mod_info\n");
195 return ("IBPART module");
199 * rcm_mod_fini() - Destroy the network IBPART cache.
204 rcm_log_message(RCM_TRACE1
, "IBPART: mod_fini\n");
207 * Note that ibpart_unregister() does not seem to be called anywhere,
208 * therefore we free the cache nodes here. In theory we should call
209 * rcm_register_interest() for each node before we free it, the
210 * framework does not provide the rcm_handle to allow us to do so.
213 (void) mutex_destroy(&cache_lock
);
215 dladm_close(dld_handle
);
216 return (RCM_SUCCESS
);
220 * ibpart_register() - Make sure the cache is properly sync'ed, and its
221 * registrations are in order.
224 ibpart_register(rcm_handle_t
*hd
)
226 rcm_log_message(RCM_TRACE1
, "IBPART: register\n");
228 if (cache_update(hd
) < 0)
229 return (RCM_FAILURE
);
232 * Need to register interest in all new resources
233 * getting attached, so we get attach event notifications
235 if (!events_registered
) {
236 if (rcm_register_event(hd
, RCM_RESOURCE_LINK_NEW
, 0, NULL
)
238 rcm_log_message(RCM_ERROR
,
239 _("IBPART: failed to register %s\n"),
240 RCM_RESOURCE_LINK_NEW
);
241 return (RCM_FAILURE
);
243 rcm_log_message(RCM_DEBUG
, "IBPART: registered %s\n",
244 RCM_RESOURCE_LINK_NEW
);
249 return (RCM_SUCCESS
);
253 * ibpart_unregister() - Walk the cache, unregistering all the networks.
256 ibpart_unregister(rcm_handle_t
*hd
)
260 rcm_log_message(RCM_TRACE1
, "IBPART: unregister\n");
262 /* Walk the cache, unregistering everything */
263 (void) mutex_lock(&cache_lock
);
264 node
= cache_head
.pc_next
;
265 while (node
!= &cache_tail
) {
266 if (rcm_unregister_interest(hd
, node
->pc_resource
, 0)
268 rcm_log_message(RCM_ERROR
,
269 _("IBPART: failed to unregister %s\n"),
271 (void) mutex_unlock(&cache_lock
);
272 return (RCM_FAILURE
);
276 node
= cache_head
.pc_next
;
278 (void) mutex_unlock(&cache_lock
);
281 * Unregister interest in all new resources
283 if (events_registered
) {
284 if (rcm_unregister_event(hd
, RCM_RESOURCE_LINK_NEW
, 0)
286 rcm_log_message(RCM_ERROR
,
287 _("IBPART: failed to unregister %s\n"),
288 RCM_RESOURCE_LINK_NEW
);
289 return (RCM_FAILURE
);
291 rcm_log_message(RCM_DEBUG
, "IBPART: unregistered %s\n",
292 RCM_RESOURCE_LINK_NEW
);
297 return (RCM_SUCCESS
);
301 * ibpart_offline() - Offline IBPARTs on a specific node.
304 ibpart_offline(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
305 char **errorp
, rcm_info_t
**info
)
309 rcm_log_message(RCM_TRACE1
, "IBPART: offline(%s)\n", rsrc
);
311 /* Lock the cache and lookup the resource */
312 (void) mutex_lock(&cache_lock
);
313 node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
);
315 /* should not happen because the resource is registered. */
316 ibpart_log_err(node
->pc_linkid
, errorp
,
317 "unrecognized resource");
318 (void) mutex_unlock(&cache_lock
);
319 return (RCM_SUCCESS
);
323 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
325 if (ibpart_consumer_offline(hd
, node
, errorp
, flags
, info
) ==
327 rcm_log_message(RCM_DEBUG
,
328 "IBPART: consumers agreed on offline\n");
330 ibpart_log_err(node
->pc_linkid
, errorp
,
331 "consumers failed to offline");
332 (void) mutex_unlock(&cache_lock
);
333 return (RCM_FAILURE
);
336 /* Check if it's a query */
337 if (flags
& RCM_QUERY
) {
338 rcm_log_message(RCM_TRACE1
,
339 "IBPART: offline query succeeded(%s)\n", rsrc
);
340 (void) mutex_unlock(&cache_lock
);
341 return (RCM_SUCCESS
);
344 if (ibpart_offline_ibpart(node
, IBPART_OFFLINED
, CACHE_NODE_OFFLINED
) !=
346 ibpart_online_ibpart(node
);
347 ibpart_log_err(node
->pc_linkid
, errorp
, "offline failed");
348 (void) mutex_unlock(&cache_lock
);
349 return (RCM_FAILURE
);
352 rcm_log_message(RCM_TRACE1
, "IBPART: Offline succeeded(%s)\n", rsrc
);
353 (void) mutex_unlock(&cache_lock
);
354 return (RCM_SUCCESS
);
358 * ibpart_undo_offline() - Undo offline of a previously offlined node.
362 ibpart_undo_offline(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
363 char **errorp
, rcm_info_t
**info
)
367 rcm_log_message(RCM_TRACE1
, "IBPART: online(%s)\n", rsrc
);
369 (void) mutex_lock(&cache_lock
);
370 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
372 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
, "no such link");
373 (void) mutex_unlock(&cache_lock
);
375 return (RCM_FAILURE
);
378 /* Check if no attempt should be made to online the link here */
379 if (!(node
->pc_state
& CACHE_NODE_OFFLINED
)) {
380 ibpart_log_err(node
->pc_linkid
, errorp
, "link not offlined");
381 (void) mutex_unlock(&cache_lock
);
383 return (RCM_SUCCESS
);
386 ibpart_online_ibpart(node
);
389 * Inform IP interfaces on associated IBPARTs to be onlined
391 ibpart_consumer_online(hd
, node
, errorp
, flags
, info
);
393 node
->pc_state
&= ~CACHE_NODE_OFFLINED
;
394 rcm_log_message(RCM_TRACE1
, "IBPART: online succeeded(%s)\n", rsrc
);
395 (void) mutex_unlock(&cache_lock
);
396 return (RCM_SUCCESS
);
400 ibpart_online_ibpart(link_cache_t
*node
)
403 dladm_status_t status
;
404 char errmsg
[DLADM_STRSIZE
];
407 * Try to bring on all offlined IBPARTs
409 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
410 ibpart
= ibpart
->dlib_next
) {
411 if (!(ibpart
->dlib_flags
& IBPART_OFFLINED
))
414 rcm_log_message(RCM_TRACE1
, "IBPART: online DLID %d\n",
415 ibpart
->dlib_ibpart_id
);
416 if ((status
= dladm_part_up(dld_handle
,
417 ibpart
->dlib_ibpart_id
, 0)) != DLADM_STATUS_OK
) {
419 * Print a warning message and continue to online
422 rcm_log_message(RCM_WARNING
,
423 _("IBPART: IBPART online failed (%u): %s\n"),
424 ibpart
->dlib_ibpart_id
,
425 dladm_status2str(status
, errmsg
));
427 ibpart
->dlib_flags
&= ~IBPART_OFFLINED
;
433 ibpart_offline_ibpart(link_cache_t
*node
, uint32_t flags
,
434 cache_node_state_t state
)
437 dladm_status_t status
;
438 char errmsg
[DLADM_STRSIZE
];
440 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_offline_ibpart "
441 "(%s %u %u)\n", node
->pc_resource
, flags
, state
);
444 * Try to delete all explicit created IBPART
446 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
447 ibpart
= ibpart
->dlib_next
) {
448 rcm_log_message(RCM_TRACE1
, "IBPART: offline DLID %d\n",
449 ibpart
->dlib_ibpart_id
);
450 if ((status
= dladm_part_delete(dld_handle
,
451 ibpart
->dlib_ibpart_id
, DLADM_OPT_ACTIVE
)) !=
453 rcm_log_message(RCM_WARNING
,
454 _("IBPART: IBPART offline failed (%u): %s\n"),
455 ibpart
->dlib_ibpart_id
,
456 dladm_status2str(status
, errmsg
));
457 return (RCM_FAILURE
);
459 rcm_log_message(RCM_TRACE1
,
460 "IBPART: IBPART offline succeeded(%u)\n",
461 ibpart
->dlib_ibpart_id
);
462 ibpart
->dlib_flags
|= flags
;
466 node
->pc_state
|= state
;
467 return (RCM_SUCCESS
);
471 * ibpart_get_info() - Gather usage information for this resource.
475 ibpart_get_info(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
476 char **usagep
, char **errorp
, nvlist_t
*props
, rcm_info_t
**info
)
480 rcm_log_message(RCM_TRACE1
, "IBPART: get_info(%s)\n", rsrc
);
482 (void) mutex_lock(&cache_lock
);
483 node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
);
485 rcm_log_message(RCM_INFO
,
486 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc
);
487 (void) mutex_unlock(&cache_lock
);
489 return (RCM_FAILURE
);
492 *usagep
= ibpart_usage(node
);
493 (void) mutex_unlock(&cache_lock
);
494 if (*usagep
== NULL
) {
495 /* most likely malloc failure */
496 rcm_log_message(RCM_ERROR
,
497 _("IBPART: get_info(%s) malloc failure\n"), rsrc
);
498 (void) mutex_unlock(&cache_lock
);
500 return (RCM_FAILURE
);
503 /* Set client/role properties */
504 (void) nvlist_add_string(props
, RCM_CLIENT_NAME
, "IBPART");
506 rcm_log_message(RCM_TRACE1
, "IBPART: get_info(%s) info = %s\n",
508 return (RCM_SUCCESS
);
512 * ibpart_suspend() - Nothing to do, always okay
516 ibpart_suspend(rcm_handle_t
*hd
, char *rsrc
, id_t id
, timespec_t
*interval
,
517 uint_t flags
, char **errorp
, rcm_info_t
**info
)
519 rcm_log_message(RCM_TRACE1
, "IBPART: suspend(%s)\n", rsrc
);
520 return (RCM_SUCCESS
);
524 * ibpart_resume() - Nothing to do, always okay
528 ibpart_resume(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
529 char **errorp
, rcm_info_t
**info
)
531 rcm_log_message(RCM_TRACE1
, "IBPART: resume(%s)\n", rsrc
);
532 return (RCM_SUCCESS
);
536 * ibpart_consumer_remove()
538 * Notify IBPART consumers to remove cache.
541 ibpart_consumer_remove(rcm_handle_t
*hd
, link_cache_t
*node
, uint_t flags
,
544 dl_ibpart_t
*ibpart
= NULL
;
545 char rsrc
[RCM_LINK_RESOURCE_MAX
];
546 int ret
= RCM_SUCCESS
;
548 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_remove (%s)\n",
551 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
552 ibpart
= ibpart
->dlib_next
) {
555 * This will only be called when the offline operation
556 * succeeds, so the IBPART consumers must have been offlined
559 assert(ibpart
->dlib_flags
& IBPART_CONSUMER_OFFLINED
);
561 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
562 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
564 ret
= rcm_notify_remove(hd
, rsrc
, flags
, info
);
565 if (ret
!= RCM_SUCCESS
) {
566 rcm_log_message(RCM_WARNING
,
567 _("IBPART: notify remove failed (%s)\n"), rsrc
);
572 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_remove done\n");
577 * ibpart_remove() - remove a resource from cache
581 ibpart_remove(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
582 char **errorp
, rcm_info_t
**info
)
587 rcm_log_message(RCM_TRACE1
, "IBPART: remove(%s)\n", rsrc
);
589 (void) mutex_lock(&cache_lock
);
590 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
592 rcm_log_message(RCM_INFO
,
593 _("IBPART: remove(%s) unrecognized resource\n"), rsrc
);
594 (void) mutex_unlock(&cache_lock
);
596 return (RCM_FAILURE
);
599 /* remove the cached entry for the resource */
601 (void) mutex_unlock(&cache_lock
);
603 rv
= ibpart_consumer_remove(hd
, node
, flags
, info
);
609 * ibpart_notify_event - Project private implementation to receive new resource
610 * events. It intercepts all new resource events. If the
611 * new resource is a network resource, pass up a notify
612 * for it too. The new resource need not be cached, since
613 * it is done at register again.
617 ibpart_notify_event(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
618 char **errorp
, nvlist_t
*nvl
, rcm_info_t
**info
)
620 nvpair_t
*nvp
= NULL
;
621 datalink_id_t linkid
;
623 int rv
= RCM_SUCCESS
;
625 rcm_log_message(RCM_TRACE1
, "IBPART: notify_event(%s)\n", rsrc
);
627 if (strcmp(rsrc
, RCM_RESOURCE_LINK_NEW
) != 0) {
628 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
629 "unrecognized event");
631 return (RCM_FAILURE
);
634 /* Update cache to reflect latest IBPARTs */
635 if (cache_update(hd
) < 0) {
636 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
637 "private Cache update failed");
638 return (RCM_FAILURE
);
642 * Try best to recover all configuration.
644 rcm_log_message(RCM_DEBUG
, "IBPART: process_nvlist\n");
645 while ((nvp
= nvlist_next_nvpair(nvl
, nvp
)) != NULL
) {
646 if (strcmp(nvpair_name(nvp
), RCM_NV_LINKID
) != 0)
649 if (nvpair_value_uint64(nvp
, &id64
) != 0) {
650 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
651 "cannot get linkid");
656 linkid
= (datalink_id_t
)id64
;
657 if (ibpart_configure(hd
, linkid
) != 0) {
658 ibpart_log_err(linkid
, errorp
, "configuring failed");
663 /* Notify all IBPART consumers */
664 if (ibpart_consumer_notify(hd
, linkid
, errorp
, flags
,
666 ibpart_log_err(linkid
, errorp
,
667 "consumer notify failed");
672 rcm_log_message(RCM_TRACE1
,
673 "IBPART: notify_event: link configuration complete\n");
678 * ibpart_usage - Determine the usage of a link.
679 * The returned buffer is owned by caller, and the caller
680 * must free it up when done.
683 ibpart_usage(link_cache_t
*node
)
690 char errmsg
[DLADM_STRSIZE
];
691 char name
[MAXLINKNAMELEN
];
692 dladm_status_t status
;
695 rcm_log_message(RCM_TRACE2
, "IBPART: usage(%s)\n", node
->pc_resource
);
697 assert(MUTEX_HELD(&cache_lock
));
698 if ((status
= dladm_datalink_id2info(dld_handle
, node
->pc_linkid
, NULL
,
699 NULL
, NULL
, name
, sizeof (name
))) != DLADM_STATUS_OK
) {
700 rcm_log_message(RCM_ERROR
,
701 _("IBPART: usage(%s) get link name failure(%s)\n"),
702 node
->pc_resource
, dladm_status2str(status
, errmsg
));
706 if (node
->pc_state
& CACHE_NODE_OFFLINED
)
707 fmt
= _("%1$s offlined");
709 fmt
= _("%1$s IBPART: ");
711 /* TRANSLATION_NOTE: separator used between IBPART linkids */
715 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
716 ibpart
= ibpart
->dlib_next
)
719 /* space for IBPARTs and separators, plus message */
720 bufsz
= nibpart
* (MAXLINKNAMELEN
+ strlen(sep
)) +
721 strlen(fmt
) + MAXLINKNAMELEN
+ 1;
722 if ((buf
= malloc(bufsz
)) == NULL
) {
723 rcm_log_message(RCM_ERROR
,
724 _("IBPART: usage(%s) malloc failure(%s)\n"),
725 node
->pc_resource
, strerror(errno
));
728 (void) snprintf(buf
, bufsz
, fmt
, name
);
730 if (node
->pc_state
& CACHE_NODE_OFFLINED
) {
731 /* Nothing else to do */
732 rcm_log_message(RCM_TRACE2
, "IBPART: usage (%s) info = %s\n",
733 node
->pc_resource
, buf
);
737 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
738 ibpart
= ibpart
->dlib_next
) {
739 rcm_log_message(RCM_DEBUG
, "IBPART:= %u\n",
740 ibpart
->dlib_ibpart_id
);
742 if ((status
= dladm_datalink_id2info(dld_handle
,
743 ibpart
->dlib_ibpart_id
, NULL
, NULL
, NULL
, name
,
744 sizeof (name
))) != DLADM_STATUS_OK
) {
745 rcm_log_message(RCM_ERROR
,
746 _("IBPART: usage(%s) get ibpart %u name "
747 "failure(%s)\n"), node
->pc_resource
,
748 ibpart
->dlib_ibpart_id
,
749 dladm_status2str(status
, errmsg
));
754 (void) strlcat(buf
, name
, bufsz
);
755 if (ibpart
->dlib_next
!= NULL
)
756 (void) strlcat(buf
, sep
, bufsz
);
759 rcm_log_message(RCM_TRACE2
, "IBPART: usage (%s) info = %s\n",
760 node
->pc_resource
, buf
);
766 * Cache management routines, all cache management functions should be
767 * be called with cache_lock held.
771 * cache_lookup() - Get a cache node for a resource.
772 * Call with cache lock held.
774 * This ensures that the cache is consistent with the system state and
775 * returns a pointer to the cache element corresponding to the resource.
777 static link_cache_t
*
778 cache_lookup(rcm_handle_t
*hd
, char *rsrc
, char options
)
782 rcm_log_message(RCM_TRACE2
, "IBPART: cache lookup(%s)\n", rsrc
);
784 assert(MUTEX_HELD(&cache_lock
));
785 if (options
& CACHE_REFRESH
) {
786 /* drop lock since update locks cache again */
787 (void) mutex_unlock(&cache_lock
);
788 (void) cache_update(hd
);
789 (void) mutex_lock(&cache_lock
);
792 node
= cache_head
.pc_next
;
793 for (; node
!= &cache_tail
; node
= node
->pc_next
) {
794 if (strcmp(rsrc
, node
->pc_resource
) == 0) {
795 rcm_log_message(RCM_TRACE2
,
796 "IBPART: cache lookup succeeded(%s)\n", rsrc
);
804 * node_free - Free a node from the cache
807 node_free(link_cache_t
*node
)
809 dl_ibpart_t
*ibpart
, *next
;
812 free(node
->pc_resource
);
814 /* free the IBPART list */
815 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
; ibpart
= next
) {
816 next
= ibpart
->dlib_next
;
824 * cache_insert - Insert a resource node in cache
827 cache_insert(link_cache_t
*node
)
829 assert(MUTEX_HELD(&cache_lock
));
831 /* insert at the head for best performance */
832 node
->pc_next
= cache_head
.pc_next
;
833 node
->pc_prev
= &cache_head
;
835 node
->pc_next
->pc_prev
= node
;
836 node
->pc_prev
->pc_next
= node
;
840 * cache_remove() - Remove a resource node from cache.
843 cache_remove(link_cache_t
*node
)
845 assert(MUTEX_HELD(&cache_lock
));
846 node
->pc_next
->pc_prev
= node
->pc_prev
;
847 node
->pc_prev
->pc_next
= node
->pc_next
;
848 node
->pc_next
= NULL
;
849 node
->pc_prev
= NULL
;
852 typedef struct ibpart_update_arg_s
{
855 } ibpart_update_arg_t
;
858 * ibpart_update() - Update physical interface properties
861 ibpart_update(dladm_handle_t handle
, datalink_id_t ibpartid
, void *arg
)
863 ibpart_update_arg_t
*ibpart_update_argp
= arg
;
864 rcm_handle_t
*hd
= ibpart_update_argp
->hd
;
868 dladm_ib_attr_t ibpart_attr
;
869 dladm_status_t status
;
870 char errmsg
[DLADM_STRSIZE
];
871 boolean_t newnode
= B_FALSE
;
874 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_update(%u)\n", ibpartid
);
876 assert(MUTEX_HELD(&cache_lock
));
877 status
= dladm_part_info(handle
, ibpartid
, &ibpart_attr
,
879 if (status
!= DLADM_STATUS_OK
) {
880 rcm_log_message(RCM_TRACE1
,
881 "IBPART: ibpart_update() cannot get ibpart information for "
882 "%u(%s)\n", ibpartid
, dladm_status2str(status
, errmsg
));
883 return (DLADM_WALK_CONTINUE
);
886 if (ibpart_attr
.dia_physlinkid
== DATALINK_INVALID_LINKID
) {
888 * Skip the IB port nodes.
890 rcm_log_message(RCM_TRACE1
,
891 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
893 return (DLADM_WALK_CONTINUE
);
896 rsrc
= malloc(RCM_LINK_RESOURCE_MAX
);
898 rcm_log_message(RCM_ERROR
, _("IBPART: malloc error(%s): %u\n"),
899 strerror(errno
), ibpartid
);
903 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
904 RCM_LINK_PREFIX
, ibpart_attr
.dia_physlinkid
);
906 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
908 rcm_log_message(RCM_DEBUG
,
909 "IBPART: %s already registered (ibpartid:%d)\n",
910 rsrc
, ibpart_attr
.dia_partlinkid
);
913 rcm_log_message(RCM_DEBUG
,
914 "IBPART: %s is a new resource (ibpartid:%d)\n",
915 rsrc
, ibpart_attr
.dia_partlinkid
);
916 if ((node
= calloc(1, sizeof (link_cache_t
))) == NULL
) {
918 rcm_log_message(RCM_ERROR
, _("IBPART: calloc: %s\n"),
923 node
->pc_resource
= rsrc
;
924 node
->pc_ibpart
= NULL
;
925 node
->pc_linkid
= ibpart_attr
.dia_physlinkid
;
926 node
->pc_state
|= CACHE_NODE_NEW
;
930 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
931 ibpart
= ibpart
->dlib_next
) {
932 if (ibpart
->dlib_ibpart_id
== ibpartid
) {
933 ibpart
->dlib_flags
&= ~IBPART_STALE
;
938 if (ibpart
== NULL
) {
939 if ((ibpart
= calloc(1, sizeof (dl_ibpart_t
))) == NULL
) {
940 rcm_log_message(RCM_ERROR
, _("IBPART: malloc: %s\n"),
948 ibpart
->dlib_ibpart_id
= ibpartid
;
949 ibpart
->dlib_next
= node
->pc_ibpart
;
950 ibpart
->dlib_prev
= NULL
;
951 if (node
->pc_ibpart
!= NULL
)
952 node
->pc_ibpart
->dlib_prev
= ibpart
;
953 node
->pc_ibpart
= ibpart
;
956 node
->pc_state
&= ~CACHE_NODE_STALE
;
961 rcm_log_message(RCM_TRACE3
, "IBPART: ibpart_update: succeeded(%u)\n",
965 ibpart_update_argp
->retval
= ret
;
966 return (ret
== 0 ? DLADM_WALK_CONTINUE
: DLADM_WALK_TERMINATE
);
970 * ibpart_update_all() - Determine all IBPART links in the system
973 ibpart_update_all(rcm_handle_t
*hd
)
975 ibpart_update_arg_t arg
= {NULL
, 0};
977 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_update_all\n");
979 assert(MUTEX_HELD(&cache_lock
));
981 (void) dladm_walk_datalink_id(ibpart_update
, dld_handle
, &arg
,
982 DATALINK_CLASS_PART
, DATALINK_ANY_MEDIATYPE
, DLADM_OPT_ACTIVE
);
987 * cache_update() - Update cache with latest interface info
990 cache_update(rcm_handle_t
*hd
)
992 link_cache_t
*node
, *nnode
;
996 rcm_log_message(RCM_TRACE2
, "IBPART: cache_update\n");
998 (void) mutex_lock(&cache_lock
);
1000 /* first we walk the entire cache, marking each entry stale */
1001 node
= cache_head
.pc_next
;
1002 for (; node
!= &cache_tail
; node
= node
->pc_next
) {
1003 node
->pc_state
|= CACHE_NODE_STALE
;
1004 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1005 ibpart
= ibpart
->dlib_next
)
1006 ibpart
->dlib_flags
|= IBPART_STALE
;
1009 rv
= ibpart_update_all(hd
);
1012 * Continue to delete all stale nodes from the cache even
1013 * ibpart_update_all() failed. Unregister link that are not offlined
1014 * and still in cache
1016 for (node
= cache_head
.pc_next
; node
!= &cache_tail
; node
= nnode
) {
1017 dl_ibpart_t
*ibpart
, *next
;
1019 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
; ibpart
= next
) {
1020 next
= ibpart
->dlib_next
;
1022 /* clear stale IBPARTs */
1023 if (ibpart
->dlib_flags
& IBPART_STALE
) {
1024 if (ibpart
->dlib_prev
!= NULL
)
1025 ibpart
->dlib_prev
->dlib_next
= next
;
1027 node
->pc_ibpart
= next
;
1030 next
->dlib_prev
= ibpart
->dlib_prev
;
1035 nnode
= node
->pc_next
;
1036 if (node
->pc_state
& CACHE_NODE_STALE
) {
1037 (void) rcm_unregister_interest(hd
, node
->pc_resource
,
1039 rcm_log_message(RCM_DEBUG
, "IBPART: unregistered %s\n",
1041 assert(node
->pc_ibpart
== NULL
);
1047 if (!(node
->pc_state
& CACHE_NODE_NEW
))
1050 if (rcm_register_interest(hd
, node
->pc_resource
, 0, NULL
) !=
1052 rcm_log_message(RCM_ERROR
,
1053 _("IBPART: failed to register %s\n"),
1057 rcm_log_message(RCM_DEBUG
, "IBPART: registered %s\n",
1059 node
->pc_state
&= ~CACHE_NODE_NEW
;
1063 (void) mutex_unlock(&cache_lock
);
1068 * cache_free() - Empty the cache
1075 rcm_log_message(RCM_TRACE2
, "IBPART: cache_free\n");
1077 (void) mutex_lock(&cache_lock
);
1078 node
= cache_head
.pc_next
;
1079 while (node
!= &cache_tail
) {
1082 node
= cache_head
.pc_next
;
1084 (void) mutex_unlock(&cache_lock
);
1088 * ibpart_log_err() - RCM error log wrapper
1091 ibpart_log_err(datalink_id_t linkid
, char **errorp
, char *errmsg
)
1093 char link
[MAXLINKNAMELEN
];
1094 char errstr
[DLADM_STRSIZE
];
1095 dladm_status_t status
;
1101 if (linkid
!= DATALINK_INVALID_LINKID
) {
1102 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1104 (void) snprintf(rsrc
, sizeof (rsrc
), "%s/%u",
1105 RCM_LINK_PREFIX
, linkid
);
1107 rcm_log_message(RCM_ERROR
, _("IBPART: %s(%s)\n"), errmsg
, rsrc
);
1108 if ((status
= dladm_datalink_id2info(dld_handle
, linkid
, NULL
,
1109 NULL
, NULL
, link
, sizeof (link
))) != DLADM_STATUS_OK
) {
1110 rcm_log_message(RCM_WARNING
,
1111 _("IBPART: cannot get link name for (%s) %s\n"),
1112 rsrc
, dladm_status2str(status
, errstr
));
1115 rcm_log_message(RCM_ERROR
, _("IBPART: %s\n"), errmsg
);
1118 errfmt
= strlen(link
) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1119 len
= strlen(errfmt
) + strlen(errmsg
) + MAXLINKNAMELEN
+ 1;
1120 if ((error
= malloc(len
)) != NULL
) {
1121 if (strlen(link
) > 0)
1122 (void) snprintf(error
, len
, errfmt
, errmsg
, link
);
1124 (void) snprintf(error
, len
, errfmt
, errmsg
);
1132 * ibpart_consumer_online()
1134 * Notify online to IBPART consumers.
1138 ibpart_consumer_online(rcm_handle_t
*hd
, link_cache_t
*node
, char **errorp
,
1139 uint_t flags
, rcm_info_t
**info
)
1141 dl_ibpart_t
*ibpart
;
1142 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1144 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_online (%s)\n",
1147 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1148 ibpart
= ibpart
->dlib_next
) {
1149 if (!(ibpart
->dlib_flags
& IBPART_CONSUMER_OFFLINED
))
1152 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
1153 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
1155 if (rcm_notify_online(hd
, rsrc
, flags
, info
) == RCM_SUCCESS
)
1156 ibpart
->dlib_flags
&= ~IBPART_CONSUMER_OFFLINED
;
1159 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_online done\n");
1163 * ibpart_consumer_offline()
1165 * Offline IBPART consumers.
1168 ibpart_consumer_offline(rcm_handle_t
*hd
, link_cache_t
*node
, char **errorp
,
1169 uint_t flags
, rcm_info_t
**info
)
1171 dl_ibpart_t
*ibpart
;
1172 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1173 int ret
= RCM_SUCCESS
;
1175 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_offline (%s)\n",
1178 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1179 ibpart
= ibpart
->dlib_next
) {
1180 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
1181 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
1183 ret
= rcm_request_offline(hd
, rsrc
, flags
, info
);
1184 if (ret
!= RCM_SUCCESS
)
1187 ibpart
->dlib_flags
|= IBPART_CONSUMER_OFFLINED
;
1191 ibpart_consumer_online(hd
, node
, errorp
, flags
, info
);
1193 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_offline done\n");
1198 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1199 * Return 0 on success, -1 on failure.
1202 ibpart_notify_new_ibpart(rcm_handle_t
*hd
, char *rsrc
)
1205 dl_ibpart_t
*ibpart
;
1206 nvlist_t
*nvl
= NULL
;
1210 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1213 (void) mutex_lock(&cache_lock
);
1214 if ((node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
)) == NULL
) {
1215 (void) mutex_unlock(&cache_lock
);
1219 if (nvlist_alloc(&nvl
, 0, 0) != 0) {
1220 (void) mutex_unlock(&cache_lock
);
1221 rcm_log_message(RCM_WARNING
,
1222 _("IBPART: failed to allocate nvlist\n"));
1226 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1227 ibpart
= ibpart
->dlib_next
) {
1228 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_notify_new_ibpart "
1229 "add (%u)\n", ibpart
->dlib_ibpart_id
);
1231 id
= ibpart
->dlib_ibpart_id
;
1232 if (nvlist_add_uint64(nvl
, RCM_NV_LINKID
, id
) != 0) {
1233 rcm_log_message(RCM_ERROR
,
1234 _("IBPART: failed to construct nvlist\n"));
1235 (void) mutex_unlock(&cache_lock
);
1239 (void) mutex_unlock(&cache_lock
);
1241 if (rcm_notify_event(hd
, RCM_RESOURCE_LINK_NEW
, 0, nvl
, NULL
) !=
1243 rcm_log_message(RCM_ERROR
,
1244 _("IBPART: failed to notify %s event for %s\n"),
1245 RCM_RESOURCE_LINK_NEW
, node
->pc_resource
);
1256 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1259 ibpart_consumer_notify(rcm_handle_t
*hd
, datalink_id_t linkid
, char **errorp
,
1260 uint_t flags
, rcm_info_t
**info
)
1262 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1265 /* Check for the interface in the cache */
1266 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u", RCM_LINK_PREFIX
,
1269 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_notify(%s)\n",
1273 * Inform IP consumers of the new link.
1275 if (ibpart_notify_new_ibpart(hd
, rsrc
) != 0) {
1276 (void) mutex_lock(&cache_lock
);
1277 if ((node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
)) != NULL
) {
1278 (void) ibpart_offline_ibpart(node
, IBPART_STALE
,
1281 (void) mutex_unlock(&cache_lock
);
1282 rcm_log_message(RCM_TRACE2
,
1283 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc
);
1287 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_notify "
1292 typedef struct ibpart_up_arg_s
{
1293 datalink_id_t linkid
;
1298 ibpart_up(dladm_handle_t handle
, datalink_id_t ibpartid
, void *arg
)
1300 ibpart_up_arg_t
*ibpart_up_argp
= arg
;
1301 dladm_status_t status
;
1302 dladm_ib_attr_t ibpart_attr
;
1303 char errmsg
[DLADM_STRSIZE
];
1305 status
= dladm_part_info(handle
, ibpartid
, &ibpart_attr
,
1307 if (status
!= DLADM_STATUS_OK
) {
1308 rcm_log_message(RCM_TRACE1
,
1309 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1310 "(%s)\n", ibpartid
, dladm_status2str(status
, errmsg
));
1311 return (DLADM_WALK_CONTINUE
);
1314 if (ibpart_attr
.dia_physlinkid
!= ibpart_up_argp
->linkid
)
1315 return (DLADM_WALK_CONTINUE
);
1317 rcm_log_message(RCM_TRACE3
, "IBPART: ibpart_up(%u)\n", ibpartid
);
1318 if ((status
= dladm_part_up(handle
, ibpartid
, 0)) == DLADM_STATUS_OK
)
1319 return (DLADM_WALK_CONTINUE
);
1322 * Prompt the warning message and continue to UP other IBPARTs.
1324 rcm_log_message(RCM_WARNING
,
1325 _("IBPART: IBPART up failed (%u): %s\n"),
1326 ibpartid
, dladm_status2str(status
, errmsg
));
1328 ibpart_up_argp
->retval
= -1;
1329 return (DLADM_WALK_CONTINUE
);
1333 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1336 ibpart_configure(rcm_handle_t
*hd
, datalink_id_t linkid
)
1338 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1340 ibpart_up_arg_t arg
= {DATALINK_INVALID_LINKID
, 0};
1342 /* Check for the IBPARTs in the cache */
1343 (void) snprintf(rsrc
, sizeof (rsrc
), "%s/%u", RCM_LINK_PREFIX
, linkid
);
1345 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_configure(%s)\n", rsrc
);
1347 /* Check if the link is new or was previously offlined */
1348 (void) mutex_lock(&cache_lock
);
1349 if (((node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
)) != NULL
) &&
1350 (!(node
->pc_state
& CACHE_NODE_OFFLINED
))) {
1351 rcm_log_message(RCM_TRACE2
,
1352 "IBPART: Skipping configured interface(%s)\n", rsrc
);
1353 (void) mutex_unlock(&cache_lock
);
1356 (void) mutex_unlock(&cache_lock
);
1358 arg
.linkid
= linkid
;
1359 (void) dladm_walk_datalink_id(ibpart_up
, dld_handle
, &arg
,
1360 DATALINK_CLASS_PART
, DATALINK_ANY_MEDIATYPE
, DLADM_OPT_PERSIST
);
1362 if (arg
.retval
== 0) {
1363 rcm_log_message(RCM_TRACE2
,
1364 "IBPART: ibpart_configure succeeded(%s)\n", rsrc
);
1366 return (arg
.retval
);