dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / rcm_daemon / common / ibpart_rcm.c
bloba7747be55488f522f16668eae6730d290f5f30c3
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * This RCM module adds support to the RCM framework for IBPART links
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <sys/types.h>
34 #include <synch.h>
35 #include <assert.h>
36 #include <strings.h>
37 #include "rcm_module.h"
38 #include <libintl.h>
39 #include <libdllink.h>
40 #include <libdlib.h>
41 #include <libdlpi.h>
44 * Definitions
46 #define _(x) gettext(x)
48 /* Some generic well-knowns and defaults used in this module */
49 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
50 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
52 /* IBPART link flags */
53 typedef enum {
54 IBPART_OFFLINED = 0x1,
55 IBPART_CONSUMER_OFFLINED = 0x2,
56 IBPART_STALE = 0x4
57 } ibpart_flag_t;
59 /* link representation */
60 typedef struct dl_ibpart {
61 struct dl_ibpart *dlib_next; /* next IBPART on this link */
62 struct dl_ibpart *dlib_prev; /* prev IBPART on this link */
63 datalink_id_t dlib_ibpart_id;
64 ibpart_flag_t dlib_flags; /* IBPART link flags */
65 } dl_ibpart_t;
67 /* IBPART Cache state flags */
68 typedef enum {
69 CACHE_NODE_STALE = 0x1, /* stale cached data */
70 CACHE_NODE_NEW = 0x2, /* new cached nodes */
71 CACHE_NODE_OFFLINED = 0x4 /* nodes offlined */
72 } cache_node_state_t;
74 /* Network Cache lookup options */
75 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
76 #define CACHE_REFRESH 0x2 /* refresh cache */
78 /* Cache element */
79 typedef struct link_cache {
80 struct link_cache *pc_next; /* next cached resource */
81 struct link_cache *pc_prev; /* prev cached resource */
82 char *pc_resource; /* resource name */
83 datalink_id_t pc_linkid; /* linkid */
84 dl_ibpart_t *pc_ibpart; /* IBPART list on this link */
85 cache_node_state_t pc_state; /* cache state flags */
86 } link_cache_t;
89 * Global cache for network IBPARTs
91 static link_cache_t cache_head;
92 static link_cache_t cache_tail;
93 static mutex_t cache_lock;
94 static int events_registered = 0;
96 static dladm_handle_t dld_handle = NULL;
99 * RCM module interface prototypes
101 static int ibpart_register(rcm_handle_t *);
102 static int ibpart_unregister(rcm_handle_t *);
103 static int ibpart_get_info(rcm_handle_t *, char *, id_t, uint_t,
104 char **, char **, nvlist_t *, rcm_info_t **);
105 static int ibpart_suspend(rcm_handle_t *, char *, id_t,
106 timespec_t *, uint_t, char **, rcm_info_t **);
107 static int ibpart_resume(rcm_handle_t *, char *, id_t, uint_t,
108 char **, rcm_info_t **);
109 static int ibpart_offline(rcm_handle_t *, char *, id_t, uint_t,
110 char **, rcm_info_t **);
111 static int ibpart_undo_offline(rcm_handle_t *, char *, id_t,
112 uint_t, char **, rcm_info_t **);
113 static int ibpart_remove(rcm_handle_t *, char *, id_t, uint_t,
114 char **, rcm_info_t **);
115 static int ibpart_notify_event(rcm_handle_t *, char *, id_t,
116 uint_t, char **, nvlist_t *, rcm_info_t **);
117 static int ibpart_configure(rcm_handle_t *, datalink_id_t);
119 /* Module private routines */
120 static void cache_free();
121 static int cache_update(rcm_handle_t *);
122 static void cache_remove(link_cache_t *);
123 static void node_free(link_cache_t *);
124 static void cache_insert(link_cache_t *);
125 static link_cache_t *cache_lookup(rcm_handle_t *, char *, char);
126 static int ibpart_consumer_offline(rcm_handle_t *, link_cache_t *,
127 char **, uint_t, rcm_info_t **);
128 static void ibpart_consumer_online(rcm_handle_t *, link_cache_t *,
129 char **, uint_t, rcm_info_t **);
130 static int ibpart_offline_ibpart(link_cache_t *, uint32_t,
131 cache_node_state_t);
132 static void ibpart_online_ibpart(link_cache_t *);
133 static char *ibpart_usage(link_cache_t *);
134 static void ibpart_log_err(datalink_id_t, char **, char *);
135 static int ibpart_consumer_notify(rcm_handle_t *, datalink_id_t,
136 char **, uint_t, rcm_info_t **);
138 /* Module-Private data */
139 static struct rcm_mod_ops ibpart_ops =
141 RCM_MOD_OPS_VERSION,
142 ibpart_register,
143 ibpart_unregister,
144 ibpart_get_info,
145 ibpart_suspend,
146 ibpart_resume,
147 ibpart_offline,
148 ibpart_undo_offline,
149 ibpart_remove,
150 NULL,
151 NULL,
152 ibpart_notify_event
156 * rcm_mod_init() - Update registrations, and return the ops structure.
158 struct rcm_mod_ops *
159 rcm_mod_init(void)
161 char errmsg[DLADM_STRSIZE];
162 dladm_status_t status;
164 rcm_log_message(RCM_TRACE1, "IBPART: mod_init\n");
166 cache_head.pc_next = &cache_tail;
167 cache_head.pc_prev = NULL;
168 cache_tail.pc_prev = &cache_head;
169 cache_tail.pc_next = NULL;
170 (void) mutex_init(&cache_lock, 0, NULL);
172 if ((status = dladm_open(&dld_handle)) != DLADM_STATUS_OK) {
173 rcm_log_message(RCM_WARNING,
174 "IBPART: mod_init failed: cannot open datalink "
175 "handle: %s\n", dladm_status2str(status, errmsg));
176 return (NULL);
179 /* Return the ops vectors */
180 return (&ibpart_ops);
184 * rcm_mod_info() - Return a string describing this module.
186 const char *
187 rcm_mod_info(void)
189 rcm_log_message(RCM_TRACE1, "IBPART: mod_info\n");
191 return ("IBPART module");
195 * rcm_mod_fini() - Destroy the network IBPART cache.
198 rcm_mod_fini(void)
200 rcm_log_message(RCM_TRACE1, "IBPART: mod_fini\n");
203 * Note that ibpart_unregister() does not seem to be called anywhere,
204 * therefore we free the cache nodes here. In theory we should call
205 * rcm_register_interest() for each node before we free it, the
206 * framework does not provide the rcm_handle to allow us to do so.
208 cache_free();
209 (void) mutex_destroy(&cache_lock);
211 dladm_close(dld_handle);
212 return (RCM_SUCCESS);
216 * ibpart_register() - Make sure the cache is properly sync'ed, and its
217 * registrations are in order.
219 static int
220 ibpart_register(rcm_handle_t *hd)
222 rcm_log_message(RCM_TRACE1, "IBPART: register\n");
224 if (cache_update(hd) < 0)
225 return (RCM_FAILURE);
228 * Need to register interest in all new resources
229 * getting attached, so we get attach event notifications
231 if (!events_registered) {
232 if (rcm_register_event(hd, RCM_RESOURCE_LINK_NEW, 0, NULL)
233 != RCM_SUCCESS) {
234 rcm_log_message(RCM_ERROR,
235 _("IBPART: failed to register %s\n"),
236 RCM_RESOURCE_LINK_NEW);
237 return (RCM_FAILURE);
238 } else {
239 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
240 RCM_RESOURCE_LINK_NEW);
241 events_registered++;
245 return (RCM_SUCCESS);
249 * ibpart_unregister() - Walk the cache, unregistering all the networks.
251 static int
252 ibpart_unregister(rcm_handle_t *hd)
254 link_cache_t *node;
256 rcm_log_message(RCM_TRACE1, "IBPART: unregister\n");
258 /* Walk the cache, unregistering everything */
259 (void) mutex_lock(&cache_lock);
260 node = cache_head.pc_next;
261 while (node != &cache_tail) {
262 if (rcm_unregister_interest(hd, node->pc_resource, 0)
263 != RCM_SUCCESS) {
264 rcm_log_message(RCM_ERROR,
265 _("IBPART: failed to unregister %s\n"),
266 node->pc_resource);
267 (void) mutex_unlock(&cache_lock);
268 return (RCM_FAILURE);
270 cache_remove(node);
271 node_free(node);
272 node = cache_head.pc_next;
274 (void) mutex_unlock(&cache_lock);
277 * Unregister interest in all new resources
279 if (events_registered) {
280 if (rcm_unregister_event(hd, RCM_RESOURCE_LINK_NEW, 0)
281 != RCM_SUCCESS) {
282 rcm_log_message(RCM_ERROR,
283 _("IBPART: failed to unregister %s\n"),
284 RCM_RESOURCE_LINK_NEW);
285 return (RCM_FAILURE);
286 } else {
287 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
288 RCM_RESOURCE_LINK_NEW);
289 events_registered--;
293 return (RCM_SUCCESS);
297 * ibpart_offline() - Offline IBPARTs on a specific node.
299 static int
300 ibpart_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
301 char **errorp, rcm_info_t **info)
303 link_cache_t *node;
305 rcm_log_message(RCM_TRACE1, "IBPART: offline(%s)\n", rsrc);
307 /* Lock the cache and lookup the resource */
308 (void) mutex_lock(&cache_lock);
309 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
310 if (node == NULL) {
311 /* should not happen because the resource is registered. */
312 ibpart_log_err(node->pc_linkid, errorp,
313 "unrecognized resource");
314 (void) mutex_unlock(&cache_lock);
315 return (RCM_SUCCESS);
319 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
321 if (ibpart_consumer_offline(hd, node, errorp, flags, info) ==
322 RCM_SUCCESS) {
323 rcm_log_message(RCM_DEBUG,
324 "IBPART: consumers agreed on offline\n");
325 } else {
326 ibpart_log_err(node->pc_linkid, errorp,
327 "consumers failed to offline");
328 (void) mutex_unlock(&cache_lock);
329 return (RCM_FAILURE);
332 /* Check if it's a query */
333 if (flags & RCM_QUERY) {
334 rcm_log_message(RCM_TRACE1,
335 "IBPART: offline query succeeded(%s)\n", rsrc);
336 (void) mutex_unlock(&cache_lock);
337 return (RCM_SUCCESS);
340 if (ibpart_offline_ibpart(node, IBPART_OFFLINED, CACHE_NODE_OFFLINED) !=
341 RCM_SUCCESS) {
342 ibpart_online_ibpart(node);
343 ibpart_log_err(node->pc_linkid, errorp, "offline failed");
344 (void) mutex_unlock(&cache_lock);
345 return (RCM_FAILURE);
348 rcm_log_message(RCM_TRACE1, "IBPART: Offline succeeded(%s)\n", rsrc);
349 (void) mutex_unlock(&cache_lock);
350 return (RCM_SUCCESS);
354 * ibpart_undo_offline() - Undo offline of a previously offlined node.
356 /*ARGSUSED*/
357 static int
358 ibpart_undo_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
359 char **errorp, rcm_info_t **info)
361 link_cache_t *node;
363 rcm_log_message(RCM_TRACE1, "IBPART: online(%s)\n", rsrc);
365 (void) mutex_lock(&cache_lock);
366 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
367 if (node == NULL) {
368 ibpart_log_err(DATALINK_INVALID_LINKID, errorp, "no such link");
369 (void) mutex_unlock(&cache_lock);
370 errno = ENOENT;
371 return (RCM_FAILURE);
374 /* Check if no attempt should be made to online the link here */
375 if (!(node->pc_state & CACHE_NODE_OFFLINED)) {
376 ibpart_log_err(node->pc_linkid, errorp, "link not offlined");
377 (void) mutex_unlock(&cache_lock);
378 errno = ENOTSUP;
379 return (RCM_SUCCESS);
382 ibpart_online_ibpart(node);
385 * Inform IP interfaces on associated IBPARTs to be onlined
387 ibpart_consumer_online(hd, node, errorp, flags, info);
389 node->pc_state &= ~CACHE_NODE_OFFLINED;
390 rcm_log_message(RCM_TRACE1, "IBPART: online succeeded(%s)\n", rsrc);
391 (void) mutex_unlock(&cache_lock);
392 return (RCM_SUCCESS);
395 static void
396 ibpart_online_ibpart(link_cache_t *node)
398 dl_ibpart_t *ibpart;
399 dladm_status_t status;
400 char errmsg[DLADM_STRSIZE];
403 * Try to bring on all offlined IBPARTs
405 for (ibpart = node->pc_ibpart; ibpart != NULL;
406 ibpart = ibpart->dlib_next) {
407 if (!(ibpart->dlib_flags & IBPART_OFFLINED))
408 continue;
410 rcm_log_message(RCM_TRACE1, "IBPART: online DLID %d\n",
411 ibpart->dlib_ibpart_id);
412 if ((status = dladm_part_up(dld_handle,
413 ibpart->dlib_ibpart_id, 0)) != DLADM_STATUS_OK) {
415 * Print a warning message and continue to online
416 * other IBPARTs.
418 rcm_log_message(RCM_WARNING,
419 _("IBPART: IBPART online failed (%u): %s\n"),
420 ibpart->dlib_ibpart_id,
421 dladm_status2str(status, errmsg));
422 } else {
423 ibpart->dlib_flags &= ~IBPART_OFFLINED;
428 static int
429 ibpart_offline_ibpart(link_cache_t *node, uint32_t flags,
430 cache_node_state_t state)
432 dl_ibpart_t *ibpart;
433 dladm_status_t status;
434 char errmsg[DLADM_STRSIZE];
436 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_offline_ibpart "
437 "(%s %u %u)\n", node->pc_resource, flags, state);
440 * Try to delete all explicit created IBPART
442 for (ibpart = node->pc_ibpart; ibpart != NULL;
443 ibpart = ibpart->dlib_next) {
444 rcm_log_message(RCM_TRACE1, "IBPART: offline DLID %d\n",
445 ibpart->dlib_ibpart_id);
446 if ((status = dladm_part_delete(dld_handle,
447 ibpart->dlib_ibpart_id, DLADM_OPT_ACTIVE)) !=
448 DLADM_STATUS_OK) {
449 rcm_log_message(RCM_WARNING,
450 _("IBPART: IBPART offline failed (%u): %s\n"),
451 ibpart->dlib_ibpart_id,
452 dladm_status2str(status, errmsg));
453 return (RCM_FAILURE);
454 } else {
455 rcm_log_message(RCM_TRACE1,
456 "IBPART: IBPART offline succeeded(%u)\n",
457 ibpart->dlib_ibpart_id);
458 ibpart->dlib_flags |= flags;
462 node->pc_state |= state;
463 return (RCM_SUCCESS);
467 * ibpart_get_info() - Gather usage information for this resource.
469 /*ARGSUSED*/
471 ibpart_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
472 char **usagep, char **errorp, nvlist_t *props, rcm_info_t **info)
474 link_cache_t *node;
476 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s)\n", rsrc);
478 (void) mutex_lock(&cache_lock);
479 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
480 if (node == NULL) {
481 rcm_log_message(RCM_INFO,
482 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc);
483 (void) mutex_unlock(&cache_lock);
484 errno = ENOENT;
485 return (RCM_FAILURE);
488 *usagep = ibpart_usage(node);
489 (void) mutex_unlock(&cache_lock);
490 if (*usagep == NULL) {
491 /* most likely malloc failure */
492 rcm_log_message(RCM_ERROR,
493 _("IBPART: get_info(%s) malloc failure\n"), rsrc);
494 (void) mutex_unlock(&cache_lock);
495 errno = ENOMEM;
496 return (RCM_FAILURE);
499 /* Set client/role properties */
500 (void) nvlist_add_string(props, RCM_CLIENT_NAME, "IBPART");
502 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s) info = %s\n",
503 rsrc, *usagep);
504 return (RCM_SUCCESS);
508 * ibpart_suspend() - Nothing to do, always okay
510 /*ARGSUSED*/
511 static int
512 ibpart_suspend(rcm_handle_t *hd, char *rsrc, id_t id, timespec_t *interval,
513 uint_t flags, char **errorp, rcm_info_t **info)
515 rcm_log_message(RCM_TRACE1, "IBPART: suspend(%s)\n", rsrc);
516 return (RCM_SUCCESS);
520 * ibpart_resume() - Nothing to do, always okay
522 /*ARGSUSED*/
523 static int
524 ibpart_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
525 char **errorp, rcm_info_t **info)
527 rcm_log_message(RCM_TRACE1, "IBPART: resume(%s)\n", rsrc);
528 return (RCM_SUCCESS);
532 * ibpart_consumer_remove()
534 * Notify IBPART consumers to remove cache.
536 static int
537 ibpart_consumer_remove(rcm_handle_t *hd, link_cache_t *node, uint_t flags,
538 rcm_info_t **info)
540 dl_ibpart_t *ibpart = NULL;
541 char rsrc[RCM_LINK_RESOURCE_MAX];
542 int ret = RCM_SUCCESS;
544 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove (%s)\n",
545 node->pc_resource);
547 for (ibpart = node->pc_ibpart; ibpart != NULL;
548 ibpart = ibpart->dlib_next) {
551 * This will only be called when the offline operation
552 * succeeds, so the IBPART consumers must have been offlined
553 * at this point.
555 assert(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED);
557 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
558 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
560 ret = rcm_notify_remove(hd, rsrc, flags, info);
561 if (ret != RCM_SUCCESS) {
562 rcm_log_message(RCM_WARNING,
563 _("IBPART: notify remove failed (%s)\n"), rsrc);
564 break;
568 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove done\n");
569 return (ret);
573 * ibpart_remove() - remove a resource from cache
575 /*ARGSUSED*/
576 static int
577 ibpart_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
578 char **errorp, rcm_info_t **info)
580 link_cache_t *node;
581 int rv;
583 rcm_log_message(RCM_TRACE1, "IBPART: remove(%s)\n", rsrc);
585 (void) mutex_lock(&cache_lock);
586 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
587 if (node == NULL) {
588 rcm_log_message(RCM_INFO,
589 _("IBPART: remove(%s) unrecognized resource\n"), rsrc);
590 (void) mutex_unlock(&cache_lock);
591 errno = ENOENT;
592 return (RCM_FAILURE);
595 /* remove the cached entry for the resource */
596 cache_remove(node);
597 (void) mutex_unlock(&cache_lock);
599 rv = ibpart_consumer_remove(hd, node, flags, info);
600 node_free(node);
601 return (rv);
605 * ibpart_notify_event - Project private implementation to receive new resource
606 * events. It intercepts all new resource events. If the
607 * new resource is a network resource, pass up a notify
608 * for it too. The new resource need not be cached, since
609 * it is done at register again.
611 /*ARGSUSED*/
612 static int
613 ibpart_notify_event(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
614 char **errorp, nvlist_t *nvl, rcm_info_t **info)
616 nvpair_t *nvp = NULL;
617 datalink_id_t linkid;
618 uint64_t id64;
619 int rv = RCM_SUCCESS;
621 rcm_log_message(RCM_TRACE1, "IBPART: notify_event(%s)\n", rsrc);
623 if (strcmp(rsrc, RCM_RESOURCE_LINK_NEW) != 0) {
624 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
625 "unrecognized event");
626 errno = EINVAL;
627 return (RCM_FAILURE);
630 /* Update cache to reflect latest IBPARTs */
631 if (cache_update(hd) < 0) {
632 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
633 "private Cache update failed");
634 return (RCM_FAILURE);
638 * Try best to recover all configuration.
640 rcm_log_message(RCM_DEBUG, "IBPART: process_nvlist\n");
641 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
642 if (strcmp(nvpair_name(nvp), RCM_NV_LINKID) != 0)
643 continue;
645 if (nvpair_value_uint64(nvp, &id64) != 0) {
646 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
647 "cannot get linkid");
648 rv = RCM_FAILURE;
649 continue;
652 linkid = (datalink_id_t)id64;
653 if (ibpart_configure(hd, linkid) != 0) {
654 ibpart_log_err(linkid, errorp, "configuring failed");
655 rv = RCM_FAILURE;
656 continue;
659 /* Notify all IBPART consumers */
660 if (ibpart_consumer_notify(hd, linkid, errorp, flags,
661 info) != 0) {
662 ibpart_log_err(linkid, errorp,
663 "consumer notify failed");
664 rv = RCM_FAILURE;
668 rcm_log_message(RCM_TRACE1,
669 "IBPART: notify_event: link configuration complete\n");
670 return (rv);
674 * ibpart_usage - Determine the usage of a link.
675 * The returned buffer is owned by caller, and the caller
676 * must free it up when done.
678 static char *
679 ibpart_usage(link_cache_t *node)
681 dl_ibpart_t *ibpart;
682 int nibpart;
683 char *buf;
684 const char *fmt;
685 char *sep;
686 char errmsg[DLADM_STRSIZE];
687 char name[MAXLINKNAMELEN];
688 dladm_status_t status;
689 size_t bufsz;
691 rcm_log_message(RCM_TRACE2, "IBPART: usage(%s)\n", node->pc_resource);
693 assert(MUTEX_HELD(&cache_lock));
694 if ((status = dladm_datalink_id2info(dld_handle, node->pc_linkid, NULL,
695 NULL, NULL, name, sizeof (name))) != DLADM_STATUS_OK) {
696 rcm_log_message(RCM_ERROR,
697 _("IBPART: usage(%s) get link name failure(%s)\n"),
698 node->pc_resource, dladm_status2str(status, errmsg));
699 return (NULL);
702 if (node->pc_state & CACHE_NODE_OFFLINED)
703 fmt = _("%1$s offlined");
704 else
705 fmt = _("%1$s IBPART: ");
707 /* TRANSLATION_NOTE: separator used between IBPART linkids */
708 sep = _(", ");
710 nibpart = 0;
711 for (ibpart = node->pc_ibpart; ibpart != NULL;
712 ibpart = ibpart->dlib_next)
713 nibpart++;
715 /* space for IBPARTs and separators, plus message */
716 bufsz = nibpart * (MAXLINKNAMELEN + strlen(sep)) +
717 strlen(fmt) + MAXLINKNAMELEN + 1;
718 if ((buf = malloc(bufsz)) == NULL) {
719 rcm_log_message(RCM_ERROR,
720 _("IBPART: usage(%s) malloc failure(%s)\n"),
721 node->pc_resource, strerror(errno));
722 return (NULL);
724 (void) snprintf(buf, bufsz, fmt, name);
726 if (node->pc_state & CACHE_NODE_OFFLINED) {
727 /* Nothing else to do */
728 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
729 node->pc_resource, buf);
730 return (buf);
733 for (ibpart = node->pc_ibpart; ibpart != NULL;
734 ibpart = ibpart->dlib_next) {
735 rcm_log_message(RCM_DEBUG, "IBPART:= %u\n",
736 ibpart->dlib_ibpart_id);
738 if ((status = dladm_datalink_id2info(dld_handle,
739 ibpart->dlib_ibpart_id, NULL, NULL, NULL, name,
740 sizeof (name))) != DLADM_STATUS_OK) {
741 rcm_log_message(RCM_ERROR,
742 _("IBPART: usage(%s) get ibpart %u name "
743 "failure(%s)\n"), node->pc_resource,
744 ibpart->dlib_ibpart_id,
745 dladm_status2str(status, errmsg));
746 free(buf);
747 return (NULL);
750 (void) strlcat(buf, name, bufsz);
751 if (ibpart->dlib_next != NULL)
752 (void) strlcat(buf, sep, bufsz);
755 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
756 node->pc_resource, buf);
758 return (buf);
762 * Cache management routines, all cache management functions should be
763 * be called with cache_lock held.
767 * cache_lookup() - Get a cache node for a resource.
768 * Call with cache lock held.
770 * This ensures that the cache is consistent with the system state and
771 * returns a pointer to the cache element corresponding to the resource.
773 static link_cache_t *
774 cache_lookup(rcm_handle_t *hd, char *rsrc, char options)
776 link_cache_t *node;
778 rcm_log_message(RCM_TRACE2, "IBPART: cache lookup(%s)\n", rsrc);
780 assert(MUTEX_HELD(&cache_lock));
781 if (options & CACHE_REFRESH) {
782 /* drop lock since update locks cache again */
783 (void) mutex_unlock(&cache_lock);
784 (void) cache_update(hd);
785 (void) mutex_lock(&cache_lock);
788 node = cache_head.pc_next;
789 for (; node != &cache_tail; node = node->pc_next) {
790 if (strcmp(rsrc, node->pc_resource) == 0) {
791 rcm_log_message(RCM_TRACE2,
792 "IBPART: cache lookup succeeded(%s)\n", rsrc);
793 return (node);
796 return (NULL);
800 * node_free - Free a node from the cache
802 static void
803 node_free(link_cache_t *node)
805 dl_ibpart_t *ibpart, *next;
807 if (node != NULL) {
808 free(node->pc_resource);
810 /* free the IBPART list */
811 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
812 next = ibpart->dlib_next;
813 free(ibpart);
815 free(node);
820 * cache_insert - Insert a resource node in cache
822 static void
823 cache_insert(link_cache_t *node)
825 assert(MUTEX_HELD(&cache_lock));
827 /* insert at the head for best performance */
828 node->pc_next = cache_head.pc_next;
829 node->pc_prev = &cache_head;
831 node->pc_next->pc_prev = node;
832 node->pc_prev->pc_next = node;
836 * cache_remove() - Remove a resource node from cache.
838 static void
839 cache_remove(link_cache_t *node)
841 assert(MUTEX_HELD(&cache_lock));
842 node->pc_next->pc_prev = node->pc_prev;
843 node->pc_prev->pc_next = node->pc_next;
844 node->pc_next = NULL;
845 node->pc_prev = NULL;
848 typedef struct ibpart_update_arg_s {
849 rcm_handle_t *hd;
850 int retval;
851 } ibpart_update_arg_t;
854 * ibpart_update() - Update physical interface properties
856 static int
857 ibpart_update(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
859 ibpart_update_arg_t *ibpart_update_argp = arg;
860 rcm_handle_t *hd = ibpart_update_argp->hd;
861 link_cache_t *node;
862 dl_ibpart_t *ibpart;
863 char *rsrc;
864 dladm_ib_attr_t ibpart_attr;
865 dladm_status_t status;
866 char errmsg[DLADM_STRSIZE];
867 boolean_t newnode = B_FALSE;
868 int ret = -1;
870 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update(%u)\n", ibpartid);
872 assert(MUTEX_HELD(&cache_lock));
873 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
874 DLADM_OPT_ACTIVE);
875 if (status != DLADM_STATUS_OK) {
876 rcm_log_message(RCM_TRACE1,
877 "IBPART: ibpart_update() cannot get ibpart information for "
878 "%u(%s)\n", ibpartid, dladm_status2str(status, errmsg));
879 return (DLADM_WALK_CONTINUE);
882 if (ibpart_attr.dia_physlinkid == DATALINK_INVALID_LINKID) {
884 * Skip the IB port nodes.
886 rcm_log_message(RCM_TRACE1,
887 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
888 ibpartid);
889 return (DLADM_WALK_CONTINUE);
892 rsrc = malloc(RCM_LINK_RESOURCE_MAX);
893 if (rsrc == NULL) {
894 rcm_log_message(RCM_ERROR, _("IBPART: malloc error(%s): %u\n"),
895 strerror(errno), ibpartid);
896 goto done;
899 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
900 RCM_LINK_PREFIX, ibpart_attr.dia_physlinkid);
902 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
903 if (node != NULL) {
904 rcm_log_message(RCM_DEBUG,
905 "IBPART: %s already registered (ibpartid:%d)\n",
906 rsrc, ibpart_attr.dia_partlinkid);
907 free(rsrc);
908 } else {
909 rcm_log_message(RCM_DEBUG,
910 "IBPART: %s is a new resource (ibpartid:%d)\n",
911 rsrc, ibpart_attr.dia_partlinkid);
912 if ((node = calloc(1, sizeof (link_cache_t))) == NULL) {
913 free(rsrc);
914 rcm_log_message(RCM_ERROR, _("IBPART: calloc: %s\n"),
915 strerror(errno));
916 goto done;
919 node->pc_resource = rsrc;
920 node->pc_ibpart = NULL;
921 node->pc_linkid = ibpart_attr.dia_physlinkid;
922 node->pc_state |= CACHE_NODE_NEW;
923 newnode = B_TRUE;
926 for (ibpart = node->pc_ibpart; ibpart != NULL;
927 ibpart = ibpart->dlib_next) {
928 if (ibpart->dlib_ibpart_id == ibpartid) {
929 ibpart->dlib_flags &= ~IBPART_STALE;
930 break;
934 if (ibpart == NULL) {
935 if ((ibpart = calloc(1, sizeof (dl_ibpart_t))) == NULL) {
936 rcm_log_message(RCM_ERROR, _("IBPART: malloc: %s\n"),
937 strerror(errno));
938 if (newnode) {
939 free(rsrc);
940 free(node);
942 goto done;
944 ibpart->dlib_ibpart_id = ibpartid;
945 ibpart->dlib_next = node->pc_ibpart;
946 ibpart->dlib_prev = NULL;
947 if (node->pc_ibpart != NULL)
948 node->pc_ibpart->dlib_prev = ibpart;
949 node->pc_ibpart = ibpart;
952 node->pc_state &= ~CACHE_NODE_STALE;
954 if (newnode)
955 cache_insert(node);
957 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_update: succeeded(%u)\n",
958 ibpartid);
959 ret = 0;
960 done:
961 ibpart_update_argp->retval = ret;
962 return (ret == 0 ? DLADM_WALK_CONTINUE : DLADM_WALK_TERMINATE);
966 * ibpart_update_all() - Determine all IBPART links in the system
968 static int
969 ibpart_update_all(rcm_handle_t *hd)
971 ibpart_update_arg_t arg = {NULL, 0};
973 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update_all\n");
975 assert(MUTEX_HELD(&cache_lock));
976 arg.hd = hd;
977 (void) dladm_walk_datalink_id(ibpart_update, dld_handle, &arg,
978 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_ACTIVE);
979 return (arg.retval);
983 * cache_update() - Update cache with latest interface info
985 static int
986 cache_update(rcm_handle_t *hd)
988 link_cache_t *node, *nnode;
989 dl_ibpart_t *ibpart;
990 int rv;
992 rcm_log_message(RCM_TRACE2, "IBPART: cache_update\n");
994 (void) mutex_lock(&cache_lock);
996 /* first we walk the entire cache, marking each entry stale */
997 node = cache_head.pc_next;
998 for (; node != &cache_tail; node = node->pc_next) {
999 node->pc_state |= CACHE_NODE_STALE;
1000 for (ibpart = node->pc_ibpart; ibpart != NULL;
1001 ibpart = ibpart->dlib_next)
1002 ibpart->dlib_flags |= IBPART_STALE;
1005 rv = ibpart_update_all(hd);
1008 * Continue to delete all stale nodes from the cache even
1009 * ibpart_update_all() failed. Unregister link that are not offlined
1010 * and still in cache
1012 for (node = cache_head.pc_next; node != &cache_tail; node = nnode) {
1013 dl_ibpart_t *ibpart, *next;
1015 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
1016 next = ibpart->dlib_next;
1018 /* clear stale IBPARTs */
1019 if (ibpart->dlib_flags & IBPART_STALE) {
1020 if (ibpart->dlib_prev != NULL)
1021 ibpart->dlib_prev->dlib_next = next;
1022 else
1023 node->pc_ibpart = next;
1025 if (next != NULL)
1026 next->dlib_prev = ibpart->dlib_prev;
1027 free(ibpart);
1031 nnode = node->pc_next;
1032 if (node->pc_state & CACHE_NODE_STALE) {
1033 (void) rcm_unregister_interest(hd, node->pc_resource,
1035 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
1036 node->pc_resource);
1037 assert(node->pc_ibpart == NULL);
1038 cache_remove(node);
1039 node_free(node);
1040 continue;
1043 if (!(node->pc_state & CACHE_NODE_NEW))
1044 continue;
1046 if (rcm_register_interest(hd, node->pc_resource, 0, NULL) !=
1047 RCM_SUCCESS) {
1048 rcm_log_message(RCM_ERROR,
1049 _("IBPART: failed to register %s\n"),
1050 node->pc_resource);
1051 rv = -1;
1052 } else {
1053 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
1054 node->pc_resource);
1055 node->pc_state &= ~CACHE_NODE_NEW;
1059 (void) mutex_unlock(&cache_lock);
1060 return (rv);
1064 * cache_free() - Empty the cache
1066 static void
1067 cache_free()
1069 link_cache_t *node;
1071 rcm_log_message(RCM_TRACE2, "IBPART: cache_free\n");
1073 (void) mutex_lock(&cache_lock);
1074 node = cache_head.pc_next;
1075 while (node != &cache_tail) {
1076 cache_remove(node);
1077 node_free(node);
1078 node = cache_head.pc_next;
1080 (void) mutex_unlock(&cache_lock);
1084 * ibpart_log_err() - RCM error log wrapper
1086 static void
1087 ibpart_log_err(datalink_id_t linkid, char **errorp, char *errmsg)
1089 char link[MAXLINKNAMELEN];
1090 char errstr[DLADM_STRSIZE];
1091 dladm_status_t status;
1092 int len;
1093 const char *errfmt;
1094 char *error;
1096 link[0] = '\0';
1097 if (linkid != DATALINK_INVALID_LINKID) {
1098 char rsrc[RCM_LINK_RESOURCE_MAX];
1100 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u",
1101 RCM_LINK_PREFIX, linkid);
1103 rcm_log_message(RCM_ERROR, _("IBPART: %s(%s)\n"), errmsg, rsrc);
1104 if ((status = dladm_datalink_id2info(dld_handle, linkid, NULL,
1105 NULL, NULL, link, sizeof (link))) != DLADM_STATUS_OK) {
1106 rcm_log_message(RCM_WARNING,
1107 _("IBPART: cannot get link name for (%s) %s\n"),
1108 rsrc, dladm_status2str(status, errstr));
1110 } else {
1111 rcm_log_message(RCM_ERROR, _("IBPART: %s\n"), errmsg);
1114 errfmt = strlen(link) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1115 len = strlen(errfmt) + strlen(errmsg) + MAXLINKNAMELEN + 1;
1116 if ((error = malloc(len)) != NULL) {
1117 if (strlen(link) > 0)
1118 (void) snprintf(error, len, errfmt, errmsg, link);
1119 else
1120 (void) snprintf(error, len, errfmt, errmsg);
1123 if (errorp != NULL)
1124 *errorp = error;
1128 * ibpart_consumer_online()
1130 * Notify online to IBPART consumers.
1132 /* ARGSUSED */
1133 static void
1134 ibpart_consumer_online(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1135 uint_t flags, rcm_info_t **info)
1137 dl_ibpart_t *ibpart;
1138 char rsrc[RCM_LINK_RESOURCE_MAX];
1140 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online (%s)\n",
1141 node->pc_resource);
1143 for (ibpart = node->pc_ibpart; ibpart != NULL;
1144 ibpart = ibpart->dlib_next) {
1145 if (!(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED))
1146 continue;
1148 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1149 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1151 if (rcm_notify_online(hd, rsrc, flags, info) == RCM_SUCCESS)
1152 ibpart->dlib_flags &= ~IBPART_CONSUMER_OFFLINED;
1155 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online done\n");
1159 * ibpart_consumer_offline()
1161 * Offline IBPART consumers.
1163 static int
1164 ibpart_consumer_offline(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1165 uint_t flags, rcm_info_t **info)
1167 dl_ibpart_t *ibpart;
1168 char rsrc[RCM_LINK_RESOURCE_MAX];
1169 int ret = RCM_SUCCESS;
1171 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline (%s)\n",
1172 node->pc_resource);
1174 for (ibpart = node->pc_ibpart; ibpart != NULL;
1175 ibpart = ibpart->dlib_next) {
1176 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1177 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1179 ret = rcm_request_offline(hd, rsrc, flags, info);
1180 if (ret != RCM_SUCCESS)
1181 break;
1183 ibpart->dlib_flags |= IBPART_CONSUMER_OFFLINED;
1186 if (ibpart != NULL)
1187 ibpart_consumer_online(hd, node, errorp, flags, info);
1189 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline done\n");
1190 return (ret);
1194 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1195 * Return 0 on success, -1 on failure.
1197 static int
1198 ibpart_notify_new_ibpart(rcm_handle_t *hd, char *rsrc)
1200 link_cache_t *node;
1201 dl_ibpart_t *ibpart;
1202 nvlist_t *nvl = NULL;
1203 uint64_t id;
1204 int ret = -1;
1206 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1207 rsrc);
1209 (void) mutex_lock(&cache_lock);
1210 if ((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) == NULL) {
1211 (void) mutex_unlock(&cache_lock);
1212 return (0);
1215 if (nvlist_alloc(&nvl, 0, 0) != 0) {
1216 (void) mutex_unlock(&cache_lock);
1217 rcm_log_message(RCM_WARNING,
1218 _("IBPART: failed to allocate nvlist\n"));
1219 goto done;
1222 for (ibpart = node->pc_ibpart; ibpart != NULL;
1223 ibpart = ibpart->dlib_next) {
1224 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart "
1225 "add (%u)\n", ibpart->dlib_ibpart_id);
1227 id = ibpart->dlib_ibpart_id;
1228 if (nvlist_add_uint64(nvl, RCM_NV_LINKID, id) != 0) {
1229 rcm_log_message(RCM_ERROR,
1230 _("IBPART: failed to construct nvlist\n"));
1231 (void) mutex_unlock(&cache_lock);
1232 goto done;
1235 (void) mutex_unlock(&cache_lock);
1237 if (rcm_notify_event(hd, RCM_RESOURCE_LINK_NEW, 0, nvl, NULL) !=
1238 RCM_SUCCESS) {
1239 rcm_log_message(RCM_ERROR,
1240 _("IBPART: failed to notify %s event for %s\n"),
1241 RCM_RESOURCE_LINK_NEW, node->pc_resource);
1242 goto done;
1245 ret = 0;
1246 done:
1247 nvlist_free(nvl);
1248 return (ret);
1252 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1254 static int
1255 ibpart_consumer_notify(rcm_handle_t *hd, datalink_id_t linkid, char **errorp,
1256 uint_t flags, rcm_info_t **info)
1258 char rsrc[RCM_LINK_RESOURCE_MAX];
1259 link_cache_t *node;
1261 /* Check for the interface in the cache */
1262 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u", RCM_LINK_PREFIX,
1263 linkid);
1265 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify(%s)\n",
1266 rsrc);
1269 * Inform IP consumers of the new link.
1271 if (ibpart_notify_new_ibpart(hd, rsrc) != 0) {
1272 (void) mutex_lock(&cache_lock);
1273 if ((node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH)) != NULL) {
1274 (void) ibpart_offline_ibpart(node, IBPART_STALE,
1275 CACHE_NODE_STALE);
1277 (void) mutex_unlock(&cache_lock);
1278 rcm_log_message(RCM_TRACE2,
1279 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc);
1280 return (-1);
1283 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify "
1284 "succeeded\n");
1285 return (0);
1288 typedef struct ibpart_up_arg_s {
1289 datalink_id_t linkid;
1290 int retval;
1291 } ibpart_up_arg_t;
1293 static int
1294 ibpart_up(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
1296 ibpart_up_arg_t *ibpart_up_argp = arg;
1297 dladm_status_t status;
1298 dladm_ib_attr_t ibpart_attr;
1299 char errmsg[DLADM_STRSIZE];
1301 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
1302 DLADM_OPT_PERSIST);
1303 if (status != DLADM_STATUS_OK) {
1304 rcm_log_message(RCM_TRACE1,
1305 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1306 "(%s)\n", ibpartid, dladm_status2str(status, errmsg));
1307 return (DLADM_WALK_CONTINUE);
1310 if (ibpart_attr.dia_physlinkid != ibpart_up_argp->linkid)
1311 return (DLADM_WALK_CONTINUE);
1313 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_up(%u)\n", ibpartid);
1314 if ((status = dladm_part_up(handle, ibpartid, 0)) == DLADM_STATUS_OK)
1315 return (DLADM_WALK_CONTINUE);
1318 * Prompt the warning message and continue to UP other IBPARTs.
1320 rcm_log_message(RCM_WARNING,
1321 _("IBPART: IBPART up failed (%u): %s\n"),
1322 ibpartid, dladm_status2str(status, errmsg));
1324 ibpart_up_argp->retval = -1;
1325 return (DLADM_WALK_CONTINUE);
1329 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1331 static int
1332 ibpart_configure(rcm_handle_t *hd, datalink_id_t linkid)
1334 char rsrc[RCM_LINK_RESOURCE_MAX];
1335 link_cache_t *node;
1336 ibpart_up_arg_t arg = {DATALINK_INVALID_LINKID, 0};
1338 /* Check for the IBPARTs in the cache */
1339 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u", RCM_LINK_PREFIX, linkid);
1341 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_configure(%s)\n", rsrc);
1343 /* Check if the link is new or was previously offlined */
1344 (void) mutex_lock(&cache_lock);
1345 if (((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) != NULL) &&
1346 (!(node->pc_state & CACHE_NODE_OFFLINED))) {
1347 rcm_log_message(RCM_TRACE2,
1348 "IBPART: Skipping configured interface(%s)\n", rsrc);
1349 (void) mutex_unlock(&cache_lock);
1350 return (0);
1352 (void) mutex_unlock(&cache_lock);
1354 arg.linkid = linkid;
1355 (void) dladm_walk_datalink_id(ibpart_up, dld_handle, &arg,
1356 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_PERSIST);
1358 if (arg.retval == 0) {
1359 rcm_log_message(RCM_TRACE2,
1360 "IBPART: ibpart_configure succeeded(%s)\n", rsrc);
1362 return (arg.retval);