4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * This RCM module adds support to the RCM framework for IBPART links
33 #include <sys/types.h>
37 #include "rcm_module.h"
39 #include <libdllink.h>
46 #define _(x) gettext(x)
48 /* Some generic well-knowns and defaults used in this module */
49 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
50 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
52 /* IBPART link flags */
54 IBPART_OFFLINED
= 0x1,
55 IBPART_CONSUMER_OFFLINED
= 0x2,
59 /* link representation */
60 typedef struct dl_ibpart
{
61 struct dl_ibpart
*dlib_next
; /* next IBPART on this link */
62 struct dl_ibpart
*dlib_prev
; /* prev IBPART on this link */
63 datalink_id_t dlib_ibpart_id
;
64 ibpart_flag_t dlib_flags
; /* IBPART link flags */
67 /* IBPART Cache state flags */
69 CACHE_NODE_STALE
= 0x1, /* stale cached data */
70 CACHE_NODE_NEW
= 0x2, /* new cached nodes */
71 CACHE_NODE_OFFLINED
= 0x4 /* nodes offlined */
74 /* Network Cache lookup options */
75 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
76 #define CACHE_REFRESH 0x2 /* refresh cache */
79 typedef struct link_cache
{
80 struct link_cache
*pc_next
; /* next cached resource */
81 struct link_cache
*pc_prev
; /* prev cached resource */
82 char *pc_resource
; /* resource name */
83 datalink_id_t pc_linkid
; /* linkid */
84 dl_ibpart_t
*pc_ibpart
; /* IBPART list on this link */
85 cache_node_state_t pc_state
; /* cache state flags */
89 * Global cache for network IBPARTs
91 static link_cache_t cache_head
;
92 static link_cache_t cache_tail
;
93 static mutex_t cache_lock
;
94 static int events_registered
= 0;
96 static dladm_handle_t dld_handle
= NULL
;
99 * RCM module interface prototypes
101 static int ibpart_register(rcm_handle_t
*);
102 static int ibpart_unregister(rcm_handle_t
*);
103 static int ibpart_get_info(rcm_handle_t
*, char *, id_t
, uint_t
,
104 char **, char **, nvlist_t
*, rcm_info_t
**);
105 static int ibpart_suspend(rcm_handle_t
*, char *, id_t
,
106 timespec_t
*, uint_t
, char **, rcm_info_t
**);
107 static int ibpart_resume(rcm_handle_t
*, char *, id_t
, uint_t
,
108 char **, rcm_info_t
**);
109 static int ibpart_offline(rcm_handle_t
*, char *, id_t
, uint_t
,
110 char **, rcm_info_t
**);
111 static int ibpart_undo_offline(rcm_handle_t
*, char *, id_t
,
112 uint_t
, char **, rcm_info_t
**);
113 static int ibpart_remove(rcm_handle_t
*, char *, id_t
, uint_t
,
114 char **, rcm_info_t
**);
115 static int ibpart_notify_event(rcm_handle_t
*, char *, id_t
,
116 uint_t
, char **, nvlist_t
*, rcm_info_t
**);
117 static int ibpart_configure(rcm_handle_t
*, datalink_id_t
);
119 /* Module private routines */
120 static void cache_free();
121 static int cache_update(rcm_handle_t
*);
122 static void cache_remove(link_cache_t
*);
123 static void node_free(link_cache_t
*);
124 static void cache_insert(link_cache_t
*);
125 static link_cache_t
*cache_lookup(rcm_handle_t
*, char *, char);
126 static int ibpart_consumer_offline(rcm_handle_t
*, link_cache_t
*,
127 char **, uint_t
, rcm_info_t
**);
128 static void ibpart_consumer_online(rcm_handle_t
*, link_cache_t
*,
129 char **, uint_t
, rcm_info_t
**);
130 static int ibpart_offline_ibpart(link_cache_t
*, uint32_t,
132 static void ibpart_online_ibpart(link_cache_t
*);
133 static char *ibpart_usage(link_cache_t
*);
134 static void ibpart_log_err(datalink_id_t
, char **, char *);
135 static int ibpart_consumer_notify(rcm_handle_t
*, datalink_id_t
,
136 char **, uint_t
, rcm_info_t
**);
138 /* Module-Private data */
139 static struct rcm_mod_ops ibpart_ops
=
156 * rcm_mod_init() - Update registrations, and return the ops structure.
161 char errmsg
[DLADM_STRSIZE
];
162 dladm_status_t status
;
164 rcm_log_message(RCM_TRACE1
, "IBPART: mod_init\n");
166 cache_head
.pc_next
= &cache_tail
;
167 cache_head
.pc_prev
= NULL
;
168 cache_tail
.pc_prev
= &cache_head
;
169 cache_tail
.pc_next
= NULL
;
170 (void) mutex_init(&cache_lock
, 0, NULL
);
172 if ((status
= dladm_open(&dld_handle
)) != DLADM_STATUS_OK
) {
173 rcm_log_message(RCM_WARNING
,
174 "IBPART: mod_init failed: cannot open datalink "
175 "handle: %s\n", dladm_status2str(status
, errmsg
));
179 /* Return the ops vectors */
180 return (&ibpart_ops
);
184 * rcm_mod_info() - Return a string describing this module.
189 rcm_log_message(RCM_TRACE1
, "IBPART: mod_info\n");
191 return ("IBPART module");
195 * rcm_mod_fini() - Destroy the network IBPART cache.
200 rcm_log_message(RCM_TRACE1
, "IBPART: mod_fini\n");
203 * Note that ibpart_unregister() does not seem to be called anywhere,
204 * therefore we free the cache nodes here. In theory we should call
205 * rcm_register_interest() for each node before we free it, the
206 * framework does not provide the rcm_handle to allow us to do so.
209 (void) mutex_destroy(&cache_lock
);
211 dladm_close(dld_handle
);
212 return (RCM_SUCCESS
);
216 * ibpart_register() - Make sure the cache is properly sync'ed, and its
217 * registrations are in order.
220 ibpart_register(rcm_handle_t
*hd
)
222 rcm_log_message(RCM_TRACE1
, "IBPART: register\n");
224 if (cache_update(hd
) < 0)
225 return (RCM_FAILURE
);
228 * Need to register interest in all new resources
229 * getting attached, so we get attach event notifications
231 if (!events_registered
) {
232 if (rcm_register_event(hd
, RCM_RESOURCE_LINK_NEW
, 0, NULL
)
234 rcm_log_message(RCM_ERROR
,
235 _("IBPART: failed to register %s\n"),
236 RCM_RESOURCE_LINK_NEW
);
237 return (RCM_FAILURE
);
239 rcm_log_message(RCM_DEBUG
, "IBPART: registered %s\n",
240 RCM_RESOURCE_LINK_NEW
);
245 return (RCM_SUCCESS
);
249 * ibpart_unregister() - Walk the cache, unregistering all the networks.
252 ibpart_unregister(rcm_handle_t
*hd
)
256 rcm_log_message(RCM_TRACE1
, "IBPART: unregister\n");
258 /* Walk the cache, unregistering everything */
259 (void) mutex_lock(&cache_lock
);
260 node
= cache_head
.pc_next
;
261 while (node
!= &cache_tail
) {
262 if (rcm_unregister_interest(hd
, node
->pc_resource
, 0)
264 rcm_log_message(RCM_ERROR
,
265 _("IBPART: failed to unregister %s\n"),
267 (void) mutex_unlock(&cache_lock
);
268 return (RCM_FAILURE
);
272 node
= cache_head
.pc_next
;
274 (void) mutex_unlock(&cache_lock
);
277 * Unregister interest in all new resources
279 if (events_registered
) {
280 if (rcm_unregister_event(hd
, RCM_RESOURCE_LINK_NEW
, 0)
282 rcm_log_message(RCM_ERROR
,
283 _("IBPART: failed to unregister %s\n"),
284 RCM_RESOURCE_LINK_NEW
);
285 return (RCM_FAILURE
);
287 rcm_log_message(RCM_DEBUG
, "IBPART: unregistered %s\n",
288 RCM_RESOURCE_LINK_NEW
);
293 return (RCM_SUCCESS
);
297 * ibpart_offline() - Offline IBPARTs on a specific node.
300 ibpart_offline(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
301 char **errorp
, rcm_info_t
**info
)
305 rcm_log_message(RCM_TRACE1
, "IBPART: offline(%s)\n", rsrc
);
307 /* Lock the cache and lookup the resource */
308 (void) mutex_lock(&cache_lock
);
309 node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
);
311 /* should not happen because the resource is registered. */
312 ibpart_log_err(node
->pc_linkid
, errorp
,
313 "unrecognized resource");
314 (void) mutex_unlock(&cache_lock
);
315 return (RCM_SUCCESS
);
319 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
321 if (ibpart_consumer_offline(hd
, node
, errorp
, flags
, info
) ==
323 rcm_log_message(RCM_DEBUG
,
324 "IBPART: consumers agreed on offline\n");
326 ibpart_log_err(node
->pc_linkid
, errorp
,
327 "consumers failed to offline");
328 (void) mutex_unlock(&cache_lock
);
329 return (RCM_FAILURE
);
332 /* Check if it's a query */
333 if (flags
& RCM_QUERY
) {
334 rcm_log_message(RCM_TRACE1
,
335 "IBPART: offline query succeeded(%s)\n", rsrc
);
336 (void) mutex_unlock(&cache_lock
);
337 return (RCM_SUCCESS
);
340 if (ibpart_offline_ibpart(node
, IBPART_OFFLINED
, CACHE_NODE_OFFLINED
) !=
342 ibpart_online_ibpart(node
);
343 ibpart_log_err(node
->pc_linkid
, errorp
, "offline failed");
344 (void) mutex_unlock(&cache_lock
);
345 return (RCM_FAILURE
);
348 rcm_log_message(RCM_TRACE1
, "IBPART: Offline succeeded(%s)\n", rsrc
);
349 (void) mutex_unlock(&cache_lock
);
350 return (RCM_SUCCESS
);
354 * ibpart_undo_offline() - Undo offline of a previously offlined node.
358 ibpart_undo_offline(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
359 char **errorp
, rcm_info_t
**info
)
363 rcm_log_message(RCM_TRACE1
, "IBPART: online(%s)\n", rsrc
);
365 (void) mutex_lock(&cache_lock
);
366 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
368 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
, "no such link");
369 (void) mutex_unlock(&cache_lock
);
371 return (RCM_FAILURE
);
374 /* Check if no attempt should be made to online the link here */
375 if (!(node
->pc_state
& CACHE_NODE_OFFLINED
)) {
376 ibpart_log_err(node
->pc_linkid
, errorp
, "link not offlined");
377 (void) mutex_unlock(&cache_lock
);
379 return (RCM_SUCCESS
);
382 ibpart_online_ibpart(node
);
385 * Inform IP interfaces on associated IBPARTs to be onlined
387 ibpart_consumer_online(hd
, node
, errorp
, flags
, info
);
389 node
->pc_state
&= ~CACHE_NODE_OFFLINED
;
390 rcm_log_message(RCM_TRACE1
, "IBPART: online succeeded(%s)\n", rsrc
);
391 (void) mutex_unlock(&cache_lock
);
392 return (RCM_SUCCESS
);
396 ibpart_online_ibpart(link_cache_t
*node
)
399 dladm_status_t status
;
400 char errmsg
[DLADM_STRSIZE
];
403 * Try to bring on all offlined IBPARTs
405 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
406 ibpart
= ibpart
->dlib_next
) {
407 if (!(ibpart
->dlib_flags
& IBPART_OFFLINED
))
410 rcm_log_message(RCM_TRACE1
, "IBPART: online DLID %d\n",
411 ibpart
->dlib_ibpart_id
);
412 if ((status
= dladm_part_up(dld_handle
,
413 ibpart
->dlib_ibpart_id
, 0)) != DLADM_STATUS_OK
) {
415 * Print a warning message and continue to online
418 rcm_log_message(RCM_WARNING
,
419 _("IBPART: IBPART online failed (%u): %s\n"),
420 ibpart
->dlib_ibpart_id
,
421 dladm_status2str(status
, errmsg
));
423 ibpart
->dlib_flags
&= ~IBPART_OFFLINED
;
429 ibpart_offline_ibpart(link_cache_t
*node
, uint32_t flags
,
430 cache_node_state_t state
)
433 dladm_status_t status
;
434 char errmsg
[DLADM_STRSIZE
];
436 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_offline_ibpart "
437 "(%s %u %u)\n", node
->pc_resource
, flags
, state
);
440 * Try to delete all explicit created IBPART
442 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
443 ibpart
= ibpart
->dlib_next
) {
444 rcm_log_message(RCM_TRACE1
, "IBPART: offline DLID %d\n",
445 ibpart
->dlib_ibpart_id
);
446 if ((status
= dladm_part_delete(dld_handle
,
447 ibpart
->dlib_ibpart_id
, DLADM_OPT_ACTIVE
)) !=
449 rcm_log_message(RCM_WARNING
,
450 _("IBPART: IBPART offline failed (%u): %s\n"),
451 ibpart
->dlib_ibpart_id
,
452 dladm_status2str(status
, errmsg
));
453 return (RCM_FAILURE
);
455 rcm_log_message(RCM_TRACE1
,
456 "IBPART: IBPART offline succeeded(%u)\n",
457 ibpart
->dlib_ibpart_id
);
458 ibpart
->dlib_flags
|= flags
;
462 node
->pc_state
|= state
;
463 return (RCM_SUCCESS
);
467 * ibpart_get_info() - Gather usage information for this resource.
471 ibpart_get_info(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
472 char **usagep
, char **errorp
, nvlist_t
*props
, rcm_info_t
**info
)
476 rcm_log_message(RCM_TRACE1
, "IBPART: get_info(%s)\n", rsrc
);
478 (void) mutex_lock(&cache_lock
);
479 node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
);
481 rcm_log_message(RCM_INFO
,
482 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc
);
483 (void) mutex_unlock(&cache_lock
);
485 return (RCM_FAILURE
);
488 *usagep
= ibpart_usage(node
);
489 (void) mutex_unlock(&cache_lock
);
490 if (*usagep
== NULL
) {
491 /* most likely malloc failure */
492 rcm_log_message(RCM_ERROR
,
493 _("IBPART: get_info(%s) malloc failure\n"), rsrc
);
494 (void) mutex_unlock(&cache_lock
);
496 return (RCM_FAILURE
);
499 /* Set client/role properties */
500 (void) nvlist_add_string(props
, RCM_CLIENT_NAME
, "IBPART");
502 rcm_log_message(RCM_TRACE1
, "IBPART: get_info(%s) info = %s\n",
504 return (RCM_SUCCESS
);
508 * ibpart_suspend() - Nothing to do, always okay
512 ibpart_suspend(rcm_handle_t
*hd
, char *rsrc
, id_t id
, timespec_t
*interval
,
513 uint_t flags
, char **errorp
, rcm_info_t
**info
)
515 rcm_log_message(RCM_TRACE1
, "IBPART: suspend(%s)\n", rsrc
);
516 return (RCM_SUCCESS
);
520 * ibpart_resume() - Nothing to do, always okay
524 ibpart_resume(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
525 char **errorp
, rcm_info_t
**info
)
527 rcm_log_message(RCM_TRACE1
, "IBPART: resume(%s)\n", rsrc
);
528 return (RCM_SUCCESS
);
532 * ibpart_consumer_remove()
534 * Notify IBPART consumers to remove cache.
537 ibpart_consumer_remove(rcm_handle_t
*hd
, link_cache_t
*node
, uint_t flags
,
540 dl_ibpart_t
*ibpart
= NULL
;
541 char rsrc
[RCM_LINK_RESOURCE_MAX
];
542 int ret
= RCM_SUCCESS
;
544 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_remove (%s)\n",
547 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
548 ibpart
= ibpart
->dlib_next
) {
551 * This will only be called when the offline operation
552 * succeeds, so the IBPART consumers must have been offlined
555 assert(ibpart
->dlib_flags
& IBPART_CONSUMER_OFFLINED
);
557 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
558 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
560 ret
= rcm_notify_remove(hd
, rsrc
, flags
, info
);
561 if (ret
!= RCM_SUCCESS
) {
562 rcm_log_message(RCM_WARNING
,
563 _("IBPART: notify remove failed (%s)\n"), rsrc
);
568 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_remove done\n");
573 * ibpart_remove() - remove a resource from cache
577 ibpart_remove(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
578 char **errorp
, rcm_info_t
**info
)
583 rcm_log_message(RCM_TRACE1
, "IBPART: remove(%s)\n", rsrc
);
585 (void) mutex_lock(&cache_lock
);
586 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
588 rcm_log_message(RCM_INFO
,
589 _("IBPART: remove(%s) unrecognized resource\n"), rsrc
);
590 (void) mutex_unlock(&cache_lock
);
592 return (RCM_FAILURE
);
595 /* remove the cached entry for the resource */
597 (void) mutex_unlock(&cache_lock
);
599 rv
= ibpart_consumer_remove(hd
, node
, flags
, info
);
605 * ibpart_notify_event - Project private implementation to receive new resource
606 * events. It intercepts all new resource events. If the
607 * new resource is a network resource, pass up a notify
608 * for it too. The new resource need not be cached, since
609 * it is done at register again.
613 ibpart_notify_event(rcm_handle_t
*hd
, char *rsrc
, id_t id
, uint_t flags
,
614 char **errorp
, nvlist_t
*nvl
, rcm_info_t
**info
)
616 nvpair_t
*nvp
= NULL
;
617 datalink_id_t linkid
;
619 int rv
= RCM_SUCCESS
;
621 rcm_log_message(RCM_TRACE1
, "IBPART: notify_event(%s)\n", rsrc
);
623 if (strcmp(rsrc
, RCM_RESOURCE_LINK_NEW
) != 0) {
624 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
625 "unrecognized event");
627 return (RCM_FAILURE
);
630 /* Update cache to reflect latest IBPARTs */
631 if (cache_update(hd
) < 0) {
632 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
633 "private Cache update failed");
634 return (RCM_FAILURE
);
638 * Try best to recover all configuration.
640 rcm_log_message(RCM_DEBUG
, "IBPART: process_nvlist\n");
641 while ((nvp
= nvlist_next_nvpair(nvl
, nvp
)) != NULL
) {
642 if (strcmp(nvpair_name(nvp
), RCM_NV_LINKID
) != 0)
645 if (nvpair_value_uint64(nvp
, &id64
) != 0) {
646 ibpart_log_err(DATALINK_INVALID_LINKID
, errorp
,
647 "cannot get linkid");
652 linkid
= (datalink_id_t
)id64
;
653 if (ibpart_configure(hd
, linkid
) != 0) {
654 ibpart_log_err(linkid
, errorp
, "configuring failed");
659 /* Notify all IBPART consumers */
660 if (ibpart_consumer_notify(hd
, linkid
, errorp
, flags
,
662 ibpart_log_err(linkid
, errorp
,
663 "consumer notify failed");
668 rcm_log_message(RCM_TRACE1
,
669 "IBPART: notify_event: link configuration complete\n");
674 * ibpart_usage - Determine the usage of a link.
675 * The returned buffer is owned by caller, and the caller
676 * must free it up when done.
679 ibpart_usage(link_cache_t
*node
)
686 char errmsg
[DLADM_STRSIZE
];
687 char name
[MAXLINKNAMELEN
];
688 dladm_status_t status
;
691 rcm_log_message(RCM_TRACE2
, "IBPART: usage(%s)\n", node
->pc_resource
);
693 assert(MUTEX_HELD(&cache_lock
));
694 if ((status
= dladm_datalink_id2info(dld_handle
, node
->pc_linkid
, NULL
,
695 NULL
, NULL
, name
, sizeof (name
))) != DLADM_STATUS_OK
) {
696 rcm_log_message(RCM_ERROR
,
697 _("IBPART: usage(%s) get link name failure(%s)\n"),
698 node
->pc_resource
, dladm_status2str(status
, errmsg
));
702 if (node
->pc_state
& CACHE_NODE_OFFLINED
)
703 fmt
= _("%1$s offlined");
705 fmt
= _("%1$s IBPART: ");
707 /* TRANSLATION_NOTE: separator used between IBPART linkids */
711 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
712 ibpart
= ibpart
->dlib_next
)
715 /* space for IBPARTs and separators, plus message */
716 bufsz
= nibpart
* (MAXLINKNAMELEN
+ strlen(sep
)) +
717 strlen(fmt
) + MAXLINKNAMELEN
+ 1;
718 if ((buf
= malloc(bufsz
)) == NULL
) {
719 rcm_log_message(RCM_ERROR
,
720 _("IBPART: usage(%s) malloc failure(%s)\n"),
721 node
->pc_resource
, strerror(errno
));
724 (void) snprintf(buf
, bufsz
, fmt
, name
);
726 if (node
->pc_state
& CACHE_NODE_OFFLINED
) {
727 /* Nothing else to do */
728 rcm_log_message(RCM_TRACE2
, "IBPART: usage (%s) info = %s\n",
729 node
->pc_resource
, buf
);
733 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
734 ibpart
= ibpart
->dlib_next
) {
735 rcm_log_message(RCM_DEBUG
, "IBPART:= %u\n",
736 ibpart
->dlib_ibpart_id
);
738 if ((status
= dladm_datalink_id2info(dld_handle
,
739 ibpart
->dlib_ibpart_id
, NULL
, NULL
, NULL
, name
,
740 sizeof (name
))) != DLADM_STATUS_OK
) {
741 rcm_log_message(RCM_ERROR
,
742 _("IBPART: usage(%s) get ibpart %u name "
743 "failure(%s)\n"), node
->pc_resource
,
744 ibpart
->dlib_ibpart_id
,
745 dladm_status2str(status
, errmsg
));
750 (void) strlcat(buf
, name
, bufsz
);
751 if (ibpart
->dlib_next
!= NULL
)
752 (void) strlcat(buf
, sep
, bufsz
);
755 rcm_log_message(RCM_TRACE2
, "IBPART: usage (%s) info = %s\n",
756 node
->pc_resource
, buf
);
762 * Cache management routines, all cache management functions should be
763 * be called with cache_lock held.
767 * cache_lookup() - Get a cache node for a resource.
768 * Call with cache lock held.
770 * This ensures that the cache is consistent with the system state and
771 * returns a pointer to the cache element corresponding to the resource.
773 static link_cache_t
*
774 cache_lookup(rcm_handle_t
*hd
, char *rsrc
, char options
)
778 rcm_log_message(RCM_TRACE2
, "IBPART: cache lookup(%s)\n", rsrc
);
780 assert(MUTEX_HELD(&cache_lock
));
781 if (options
& CACHE_REFRESH
) {
782 /* drop lock since update locks cache again */
783 (void) mutex_unlock(&cache_lock
);
784 (void) cache_update(hd
);
785 (void) mutex_lock(&cache_lock
);
788 node
= cache_head
.pc_next
;
789 for (; node
!= &cache_tail
; node
= node
->pc_next
) {
790 if (strcmp(rsrc
, node
->pc_resource
) == 0) {
791 rcm_log_message(RCM_TRACE2
,
792 "IBPART: cache lookup succeeded(%s)\n", rsrc
);
800 * node_free - Free a node from the cache
803 node_free(link_cache_t
*node
)
805 dl_ibpart_t
*ibpart
, *next
;
808 free(node
->pc_resource
);
810 /* free the IBPART list */
811 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
; ibpart
= next
) {
812 next
= ibpart
->dlib_next
;
820 * cache_insert - Insert a resource node in cache
823 cache_insert(link_cache_t
*node
)
825 assert(MUTEX_HELD(&cache_lock
));
827 /* insert at the head for best performance */
828 node
->pc_next
= cache_head
.pc_next
;
829 node
->pc_prev
= &cache_head
;
831 node
->pc_next
->pc_prev
= node
;
832 node
->pc_prev
->pc_next
= node
;
836 * cache_remove() - Remove a resource node from cache.
839 cache_remove(link_cache_t
*node
)
841 assert(MUTEX_HELD(&cache_lock
));
842 node
->pc_next
->pc_prev
= node
->pc_prev
;
843 node
->pc_prev
->pc_next
= node
->pc_next
;
844 node
->pc_next
= NULL
;
845 node
->pc_prev
= NULL
;
848 typedef struct ibpart_update_arg_s
{
851 } ibpart_update_arg_t
;
854 * ibpart_update() - Update physical interface properties
857 ibpart_update(dladm_handle_t handle
, datalink_id_t ibpartid
, void *arg
)
859 ibpart_update_arg_t
*ibpart_update_argp
= arg
;
860 rcm_handle_t
*hd
= ibpart_update_argp
->hd
;
864 dladm_ib_attr_t ibpart_attr
;
865 dladm_status_t status
;
866 char errmsg
[DLADM_STRSIZE
];
867 boolean_t newnode
= B_FALSE
;
870 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_update(%u)\n", ibpartid
);
872 assert(MUTEX_HELD(&cache_lock
));
873 status
= dladm_part_info(handle
, ibpartid
, &ibpart_attr
,
875 if (status
!= DLADM_STATUS_OK
) {
876 rcm_log_message(RCM_TRACE1
,
877 "IBPART: ibpart_update() cannot get ibpart information for "
878 "%u(%s)\n", ibpartid
, dladm_status2str(status
, errmsg
));
879 return (DLADM_WALK_CONTINUE
);
882 if (ibpart_attr
.dia_physlinkid
== DATALINK_INVALID_LINKID
) {
884 * Skip the IB port nodes.
886 rcm_log_message(RCM_TRACE1
,
887 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
889 return (DLADM_WALK_CONTINUE
);
892 rsrc
= malloc(RCM_LINK_RESOURCE_MAX
);
894 rcm_log_message(RCM_ERROR
, _("IBPART: malloc error(%s): %u\n"),
895 strerror(errno
), ibpartid
);
899 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
900 RCM_LINK_PREFIX
, ibpart_attr
.dia_physlinkid
);
902 node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
);
904 rcm_log_message(RCM_DEBUG
,
905 "IBPART: %s already registered (ibpartid:%d)\n",
906 rsrc
, ibpart_attr
.dia_partlinkid
);
909 rcm_log_message(RCM_DEBUG
,
910 "IBPART: %s is a new resource (ibpartid:%d)\n",
911 rsrc
, ibpart_attr
.dia_partlinkid
);
912 if ((node
= calloc(1, sizeof (link_cache_t
))) == NULL
) {
914 rcm_log_message(RCM_ERROR
, _("IBPART: calloc: %s\n"),
919 node
->pc_resource
= rsrc
;
920 node
->pc_ibpart
= NULL
;
921 node
->pc_linkid
= ibpart_attr
.dia_physlinkid
;
922 node
->pc_state
|= CACHE_NODE_NEW
;
926 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
927 ibpart
= ibpart
->dlib_next
) {
928 if (ibpart
->dlib_ibpart_id
== ibpartid
) {
929 ibpart
->dlib_flags
&= ~IBPART_STALE
;
934 if (ibpart
== NULL
) {
935 if ((ibpart
= calloc(1, sizeof (dl_ibpart_t
))) == NULL
) {
936 rcm_log_message(RCM_ERROR
, _("IBPART: malloc: %s\n"),
944 ibpart
->dlib_ibpart_id
= ibpartid
;
945 ibpart
->dlib_next
= node
->pc_ibpart
;
946 ibpart
->dlib_prev
= NULL
;
947 if (node
->pc_ibpart
!= NULL
)
948 node
->pc_ibpart
->dlib_prev
= ibpart
;
949 node
->pc_ibpart
= ibpart
;
952 node
->pc_state
&= ~CACHE_NODE_STALE
;
957 rcm_log_message(RCM_TRACE3
, "IBPART: ibpart_update: succeeded(%u)\n",
961 ibpart_update_argp
->retval
= ret
;
962 return (ret
== 0 ? DLADM_WALK_CONTINUE
: DLADM_WALK_TERMINATE
);
966 * ibpart_update_all() - Determine all IBPART links in the system
969 ibpart_update_all(rcm_handle_t
*hd
)
971 ibpart_update_arg_t arg
= {NULL
, 0};
973 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_update_all\n");
975 assert(MUTEX_HELD(&cache_lock
));
977 (void) dladm_walk_datalink_id(ibpart_update
, dld_handle
, &arg
,
978 DATALINK_CLASS_PART
, DATALINK_ANY_MEDIATYPE
, DLADM_OPT_ACTIVE
);
983 * cache_update() - Update cache with latest interface info
986 cache_update(rcm_handle_t
*hd
)
988 link_cache_t
*node
, *nnode
;
992 rcm_log_message(RCM_TRACE2
, "IBPART: cache_update\n");
994 (void) mutex_lock(&cache_lock
);
996 /* first we walk the entire cache, marking each entry stale */
997 node
= cache_head
.pc_next
;
998 for (; node
!= &cache_tail
; node
= node
->pc_next
) {
999 node
->pc_state
|= CACHE_NODE_STALE
;
1000 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1001 ibpart
= ibpart
->dlib_next
)
1002 ibpart
->dlib_flags
|= IBPART_STALE
;
1005 rv
= ibpart_update_all(hd
);
1008 * Continue to delete all stale nodes from the cache even
1009 * ibpart_update_all() failed. Unregister link that are not offlined
1010 * and still in cache
1012 for (node
= cache_head
.pc_next
; node
!= &cache_tail
; node
= nnode
) {
1013 dl_ibpart_t
*ibpart
, *next
;
1015 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
; ibpart
= next
) {
1016 next
= ibpart
->dlib_next
;
1018 /* clear stale IBPARTs */
1019 if (ibpart
->dlib_flags
& IBPART_STALE
) {
1020 if (ibpart
->dlib_prev
!= NULL
)
1021 ibpart
->dlib_prev
->dlib_next
= next
;
1023 node
->pc_ibpart
= next
;
1026 next
->dlib_prev
= ibpart
->dlib_prev
;
1031 nnode
= node
->pc_next
;
1032 if (node
->pc_state
& CACHE_NODE_STALE
) {
1033 (void) rcm_unregister_interest(hd
, node
->pc_resource
,
1035 rcm_log_message(RCM_DEBUG
, "IBPART: unregistered %s\n",
1037 assert(node
->pc_ibpart
== NULL
);
1043 if (!(node
->pc_state
& CACHE_NODE_NEW
))
1046 if (rcm_register_interest(hd
, node
->pc_resource
, 0, NULL
) !=
1048 rcm_log_message(RCM_ERROR
,
1049 _("IBPART: failed to register %s\n"),
1053 rcm_log_message(RCM_DEBUG
, "IBPART: registered %s\n",
1055 node
->pc_state
&= ~CACHE_NODE_NEW
;
1059 (void) mutex_unlock(&cache_lock
);
1064 * cache_free() - Empty the cache
1071 rcm_log_message(RCM_TRACE2
, "IBPART: cache_free\n");
1073 (void) mutex_lock(&cache_lock
);
1074 node
= cache_head
.pc_next
;
1075 while (node
!= &cache_tail
) {
1078 node
= cache_head
.pc_next
;
1080 (void) mutex_unlock(&cache_lock
);
1084 * ibpart_log_err() - RCM error log wrapper
1087 ibpart_log_err(datalink_id_t linkid
, char **errorp
, char *errmsg
)
1089 char link
[MAXLINKNAMELEN
];
1090 char errstr
[DLADM_STRSIZE
];
1091 dladm_status_t status
;
1097 if (linkid
!= DATALINK_INVALID_LINKID
) {
1098 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1100 (void) snprintf(rsrc
, sizeof (rsrc
), "%s/%u",
1101 RCM_LINK_PREFIX
, linkid
);
1103 rcm_log_message(RCM_ERROR
, _("IBPART: %s(%s)\n"), errmsg
, rsrc
);
1104 if ((status
= dladm_datalink_id2info(dld_handle
, linkid
, NULL
,
1105 NULL
, NULL
, link
, sizeof (link
))) != DLADM_STATUS_OK
) {
1106 rcm_log_message(RCM_WARNING
,
1107 _("IBPART: cannot get link name for (%s) %s\n"),
1108 rsrc
, dladm_status2str(status
, errstr
));
1111 rcm_log_message(RCM_ERROR
, _("IBPART: %s\n"), errmsg
);
1114 errfmt
= strlen(link
) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1115 len
= strlen(errfmt
) + strlen(errmsg
) + MAXLINKNAMELEN
+ 1;
1116 if ((error
= malloc(len
)) != NULL
) {
1117 if (strlen(link
) > 0)
1118 (void) snprintf(error
, len
, errfmt
, errmsg
, link
);
1120 (void) snprintf(error
, len
, errfmt
, errmsg
);
1128 * ibpart_consumer_online()
1130 * Notify online to IBPART consumers.
1134 ibpart_consumer_online(rcm_handle_t
*hd
, link_cache_t
*node
, char **errorp
,
1135 uint_t flags
, rcm_info_t
**info
)
1137 dl_ibpart_t
*ibpart
;
1138 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1140 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_online (%s)\n",
1143 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1144 ibpart
= ibpart
->dlib_next
) {
1145 if (!(ibpart
->dlib_flags
& IBPART_CONSUMER_OFFLINED
))
1148 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
1149 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
1151 if (rcm_notify_online(hd
, rsrc
, flags
, info
) == RCM_SUCCESS
)
1152 ibpart
->dlib_flags
&= ~IBPART_CONSUMER_OFFLINED
;
1155 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_online done\n");
1159 * ibpart_consumer_offline()
1161 * Offline IBPART consumers.
1164 ibpart_consumer_offline(rcm_handle_t
*hd
, link_cache_t
*node
, char **errorp
,
1165 uint_t flags
, rcm_info_t
**info
)
1167 dl_ibpart_t
*ibpart
;
1168 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1169 int ret
= RCM_SUCCESS
;
1171 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_offline (%s)\n",
1174 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1175 ibpart
= ibpart
->dlib_next
) {
1176 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u",
1177 RCM_LINK_PREFIX
, ibpart
->dlib_ibpart_id
);
1179 ret
= rcm_request_offline(hd
, rsrc
, flags
, info
);
1180 if (ret
!= RCM_SUCCESS
)
1183 ibpart
->dlib_flags
|= IBPART_CONSUMER_OFFLINED
;
1187 ibpart_consumer_online(hd
, node
, errorp
, flags
, info
);
1189 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_offline done\n");
1194 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1195 * Return 0 on success, -1 on failure.
1198 ibpart_notify_new_ibpart(rcm_handle_t
*hd
, char *rsrc
)
1201 dl_ibpart_t
*ibpart
;
1202 nvlist_t
*nvl
= NULL
;
1206 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1209 (void) mutex_lock(&cache_lock
);
1210 if ((node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
)) == NULL
) {
1211 (void) mutex_unlock(&cache_lock
);
1215 if (nvlist_alloc(&nvl
, 0, 0) != 0) {
1216 (void) mutex_unlock(&cache_lock
);
1217 rcm_log_message(RCM_WARNING
,
1218 _("IBPART: failed to allocate nvlist\n"));
1222 for (ibpart
= node
->pc_ibpart
; ibpart
!= NULL
;
1223 ibpart
= ibpart
->dlib_next
) {
1224 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_notify_new_ibpart "
1225 "add (%u)\n", ibpart
->dlib_ibpart_id
);
1227 id
= ibpart
->dlib_ibpart_id
;
1228 if (nvlist_add_uint64(nvl
, RCM_NV_LINKID
, id
) != 0) {
1229 rcm_log_message(RCM_ERROR
,
1230 _("IBPART: failed to construct nvlist\n"));
1231 (void) mutex_unlock(&cache_lock
);
1235 (void) mutex_unlock(&cache_lock
);
1237 if (rcm_notify_event(hd
, RCM_RESOURCE_LINK_NEW
, 0, nvl
, NULL
) !=
1239 rcm_log_message(RCM_ERROR
,
1240 _("IBPART: failed to notify %s event for %s\n"),
1241 RCM_RESOURCE_LINK_NEW
, node
->pc_resource
);
1252 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1255 ibpart_consumer_notify(rcm_handle_t
*hd
, datalink_id_t linkid
, char **errorp
,
1256 uint_t flags
, rcm_info_t
**info
)
1258 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1261 /* Check for the interface in the cache */
1262 (void) snprintf(rsrc
, RCM_LINK_RESOURCE_MAX
, "%s/%u", RCM_LINK_PREFIX
,
1265 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_notify(%s)\n",
1269 * Inform IP consumers of the new link.
1271 if (ibpart_notify_new_ibpart(hd
, rsrc
) != 0) {
1272 (void) mutex_lock(&cache_lock
);
1273 if ((node
= cache_lookup(hd
, rsrc
, CACHE_NO_REFRESH
)) != NULL
) {
1274 (void) ibpart_offline_ibpart(node
, IBPART_STALE
,
1277 (void) mutex_unlock(&cache_lock
);
1278 rcm_log_message(RCM_TRACE2
,
1279 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc
);
1283 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_consumer_notify "
1288 typedef struct ibpart_up_arg_s
{
1289 datalink_id_t linkid
;
1294 ibpart_up(dladm_handle_t handle
, datalink_id_t ibpartid
, void *arg
)
1296 ibpart_up_arg_t
*ibpart_up_argp
= arg
;
1297 dladm_status_t status
;
1298 dladm_ib_attr_t ibpart_attr
;
1299 char errmsg
[DLADM_STRSIZE
];
1301 status
= dladm_part_info(handle
, ibpartid
, &ibpart_attr
,
1303 if (status
!= DLADM_STATUS_OK
) {
1304 rcm_log_message(RCM_TRACE1
,
1305 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1306 "(%s)\n", ibpartid
, dladm_status2str(status
, errmsg
));
1307 return (DLADM_WALK_CONTINUE
);
1310 if (ibpart_attr
.dia_physlinkid
!= ibpart_up_argp
->linkid
)
1311 return (DLADM_WALK_CONTINUE
);
1313 rcm_log_message(RCM_TRACE3
, "IBPART: ibpart_up(%u)\n", ibpartid
);
1314 if ((status
= dladm_part_up(handle
, ibpartid
, 0)) == DLADM_STATUS_OK
)
1315 return (DLADM_WALK_CONTINUE
);
1318 * Prompt the warning message and continue to UP other IBPARTs.
1320 rcm_log_message(RCM_WARNING
,
1321 _("IBPART: IBPART up failed (%u): %s\n"),
1322 ibpartid
, dladm_status2str(status
, errmsg
));
1324 ibpart_up_argp
->retval
= -1;
1325 return (DLADM_WALK_CONTINUE
);
1329 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1332 ibpart_configure(rcm_handle_t
*hd
, datalink_id_t linkid
)
1334 char rsrc
[RCM_LINK_RESOURCE_MAX
];
1336 ibpart_up_arg_t arg
= {DATALINK_INVALID_LINKID
, 0};
1338 /* Check for the IBPARTs in the cache */
1339 (void) snprintf(rsrc
, sizeof (rsrc
), "%s/%u", RCM_LINK_PREFIX
, linkid
);
1341 rcm_log_message(RCM_TRACE2
, "IBPART: ibpart_configure(%s)\n", rsrc
);
1343 /* Check if the link is new or was previously offlined */
1344 (void) mutex_lock(&cache_lock
);
1345 if (((node
= cache_lookup(hd
, rsrc
, CACHE_REFRESH
)) != NULL
) &&
1346 (!(node
->pc_state
& CACHE_NODE_OFFLINED
))) {
1347 rcm_log_message(RCM_TRACE2
,
1348 "IBPART: Skipping configured interface(%s)\n", rsrc
);
1349 (void) mutex_unlock(&cache_lock
);
1352 (void) mutex_unlock(&cache_lock
);
1354 arg
.linkid
= linkid
;
1355 (void) dladm_walk_datalink_id(ibpart_up
, dld_handle
, &arg
,
1356 DATALINK_CLASS_PART
, DATALINK_ANY_MEDIATYPE
, DLADM_OPT_PERSIST
);
1358 if (arg
.retval
== 0) {
1359 rcm_log_message(RCM_TRACE2
,
1360 "IBPART: ibpart_configure succeeded(%s)\n", rsrc
);
1362 return (arg
.retval
);