4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2012 Milan Jurik. All rights reserved.
29 * This module provides for the management of interconnect adapters
30 * inter-node connections (aka paths), and IPC. Adapter descriptors are
31 * maintained on a linked list; one list per adapter devname. Each
32 * adapter descriptor heads a linked list of path descriptors. There is
33 * also a linked list of ipc_info descriptors; one for each node. Each
34 * ipc_info descriptor heads a circular list of ipc tokens (the tokens are
35 * embedded within a path descriptor). The tokens are used in round robin
39 * The exported interface consists of the following functions:
41 * - rsmka_remove_adapter
43 * [add_path and remove_path only called for current adapters]
45 * - rsmka_remove_path [a path down request is implicit]
47 * - rsmka_path_up [called at clock ipl for Sun Cluster]
48 * - rsmka_path_down [called at clock ipl for Sun Cluster]
49 * - rsmka_disconnect_node [called at clock ipl for Sun Cluster;
50 * treat like path-down for all node paths;
51 * can be before node_alive; always before
54 * [node_alive and node_died are always paired]
55 * - rsmka_node_alive called after the first cluster path is up
59 * [set the local node id]
60 * - rsmka_set_my_nodeid called to set the variable my_nodeid to the
63 * Processing for these functions is setup as a state machine supported
64 * by the data structures described above.
66 * For Sun Cluster these are called from the Path-Manager/Kernel-Agent
67 * Interface (rsmka_pm_interface.cc).
69 * The functions rsm_path_up, rsm_path_down, and rsm_disconnect_node are
70 * called at clock interrupt level from the Path-Manager/Kernel-Agent
71 * Interface which precludes sleeping; so these functions may (optionally)
72 * defer processing to an independent thread running at normal ipl.
77 * (mutex) work_queue.work_mutex
78 * protects linked list of work tokens and used
79 * with cv_wait/cv_signal thread synchronization.
80 * No other locks acquired when held.
82 * (mutex) adapter_listhead_base.listlock
83 * protects linked list of adapter listheads
84 * Always acquired before listhead->mutex
87 * (mutex) ipc_info_lock
88 * protects ipc_info list and sendq token lists
89 * Always acquired before listhead->mutex
91 * (mutex) listhead->mutex
92 * protects adapter listhead, linked list of
93 * adapters, and linked list of paths.
96 * protects the path descriptor.
97 * work_queue.work_mutex may be acquired when holding
100 * (mutex) adapter->mutex
101 * protects adapter descriptor contents. used
102 * mainly for ref_cnt update.
105 #include <sys/param.h>
106 #include <sys/kmem.h>
107 #include <sys/errno.h>
108 #include <sys/time.h>
109 #include <sys/devops.h>
110 #include <sys/ddi_impldefs.h>
111 #include <sys/cmn_err.h>
113 #include <sys/sunddi.h>
114 #include <sys/proc.h>
115 #include <sys/thread.h>
116 #include <sys/taskq.h>
117 #include <sys/callb.h>
119 #include <sys/rsm/rsm.h>
121 #include <sys/rsm/rsmka_path_int.h>
123 extern void _cplpl_init();
124 extern void _cplpl_fini();
125 extern pri_t maxclsyspri
;
126 extern int rsm_hash_size
;
128 extern rsm_node_id_t my_nodeid
;
129 extern rsmhash_table_t rsm_import_segs
;
130 extern rsm_intr_hand_ret_t
rsm_srv_func(rsm_controller_object_t
*,
131 rsm_intr_q_op_t
, rsm_addr_t
, void *, size_t, rsm_intr_hand_arg_t
);
132 extern void rsmseg_unload(rsmseg_t
*);
133 extern void rsm_suspend_complete(rsm_node_id_t src_node
, int flag
);
134 extern int rsmipc_send_controlmsg(path_t
*path
, int msgtype
);
135 extern void rsmka_path_monitor_initialize();
136 extern void rsmka_path_monitor_terminate();
138 extern adapter_t loopback_adapter
;
140 * Lint errors and warnings are displayed; informational messages
147 * macros SQ_TOKEN_TO_PATH and WORK_TOKEN_TO_PATH use a null pointer
148 * for computational purposes. Ignore the lint warning.
150 /* lint -save -e413 */
151 /* FUNCTION PROTOTYPES */
152 static adapter_t
*init_adapter(char *, int, rsm_addr_t
,
153 rsm_controller_handle_t
, rsm_ops_t
*, srv_handler_arg_t
*);
154 adapter_t
*rsmka_lookup_adapter(char *, int);
155 static ipc_info_t
*lookup_ipc_info(rsm_node_id_t
);
156 static ipc_info_t
*init_ipc_info(rsm_node_id_t
, boolean_t
);
157 static path_t
*lookup_path(char *, int, rsm_node_id_t
, rsm_addr_t
);
158 static void pathup_to_pathactive(ipc_info_t
*, rsm_node_id_t
);
159 static void path_importer_disconnect(path_t
*);
160 boolean_t
rsmka_do_path_active(path_t
*, int);
161 static boolean_t
do_path_up(path_t
*, int);
162 static void do_path_down(path_t
*, int);
163 static void enqueue_work(work_token_t
*);
164 static boolean_t
cancel_work(work_token_t
*);
165 static void link_path(path_t
*);
166 static void destroy_path(path_t
*);
167 static void link_sendq_token(sendq_token_t
*, rsm_node_id_t
);
168 static void unlink_sendq_token(sendq_token_t
*, rsm_node_id_t
);
169 boolean_t
rsmka_check_node_alive(rsm_node_id_t
);
170 static void do_deferred_work(caddr_t
);
171 static int create_ipc_sendq(path_t
*);
172 static void destroy_ipc_info(ipc_info_t
*);
173 void rsmka_pathmanager_cleanup();
174 void rsmka_release_adapter(adapter_t
*);
176 kt_did_t rsm_thread_id
;
177 int rsmka_terminate_workthread_loop
= 0;
179 static struct adapter_listhead_list adapter_listhead_base
;
180 static work_queue_t work_queue
;
182 /* protect ipc_info descriptor manipulation */
183 static kmutex_t ipc_info_lock
;
185 static ipc_info_t
*ipc_info_head
= NULL
;
187 static int category
= RSM_PATH_MANAGER
| RSM_KERNEL_AGENT
;
189 /* for synchronization with rsmipc_send() in rsm.c */
190 kmutex_t ipc_info_cvlock
;
191 kcondvar_t ipc_info_cv
;
196 * RSMKA PATHMANAGER INITIALIZATION AND CLEANUP ROUTINES
202 * Called from the rsm module (rsm.c) _init() routine
205 rsmka_pathmanager_init()
209 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
210 "rsmka_pathmanager_init enter\n"));
212 /* initialization for locks and condition variables */
213 mutex_init(&work_queue
.work_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
214 mutex_init(&ipc_info_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
215 mutex_init(&ipc_info_cvlock
, NULL
, MUTEX_DEFAULT
, NULL
);
216 mutex_init(&adapter_listhead_base
.listlock
, NULL
,
217 MUTEX_DEFAULT
, NULL
);
219 cv_init(&work_queue
.work_cv
, NULL
, CV_DEFAULT
, NULL
);
220 cv_init(&ipc_info_cv
, NULL
, CV_DEFAULT
, NULL
);
222 tp
= thread_create(NULL
, 0, do_deferred_work
, NULL
, 0, &p0
,
223 TS_RUN
, maxclsyspri
);
224 rsm_thread_id
= tp
->t_did
;
226 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
227 "rsmka_pathmanager_init done\n"));
231 rsmka_pathmanager_cleanup()
233 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
234 "rsmka_pathmanager_cleanup enter\n"));
236 ASSERT(work_queue
.head
== NULL
);
239 * In processing the remove path callbacks from the path monitor
240 * object, all deferred work will have been completed. So
241 * awaken the deferred work thread to give it a chance to exit
244 mutex_enter(&work_queue
.work_mutex
);
245 rsmka_terminate_workthread_loop
++;
246 cv_signal(&work_queue
.work_cv
);
247 mutex_exit(&work_queue
.work_mutex
);
250 * Wait for the deferred work thread to exit before
251 * destroying the locks and cleaning up other data
255 thread_join(rsm_thread_id
);
258 * Destroy locks & condition variables
260 mutex_destroy(&work_queue
.work_mutex
);
261 cv_destroy(&work_queue
.work_cv
);
263 mutex_enter(&ipc_info_lock
);
264 while (ipc_info_head
)
265 destroy_ipc_info(ipc_info_head
);
266 mutex_exit(&ipc_info_lock
);
268 mutex_destroy(&ipc_info_lock
);
270 mutex_destroy(&ipc_info_cvlock
);
271 cv_destroy(&ipc_info_cv
);
273 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
274 "rsmka_pathmanager_cleanup done\n"));
279 rsmka_set_my_nodeid(rsm_node_id_t local_nodeid
)
281 my_nodeid
= local_nodeid
;
283 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
284 "rsm: node %d \n", my_nodeid
));
289 * DEFERRED WORK THREAD AND WORK QUEUE SUPPORT ROUTINES
294 * This function is the executable code of the thread which handles
295 * deferred work. Work is deferred when a function is called at
296 * clock ipl and processing may require blocking.
299 * The thread is created by a call to taskq_create in rsmka_pathmanager_init.
300 * After creation, a call to taskq_dispatch causes this function to
301 * execute. It loops forever - blocked until work is enqueued from
302 * rsmka_do_path_active, do_path_down, or rsmka_disconnect_node.
303 * rsmka_pathmanager_cleanup (called from _fini) will
304 * set rsmka_terminate_workthread_loop and the task processing will
308 do_deferred_work(caddr_t arg
/*ARGSUSED*/)
313 work_token_t
*work_token
;
315 rsm_send_q_handle_t sendq_handle
;
320 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_deferred_work enter\n"));
322 CALLB_CPR_INIT(&cprinfo
, &work_queue
.work_mutex
, callb_generic_cpr
,
323 "rsm_deferred_work");
326 mutex_enter(&work_queue
.work_mutex
);
328 if (rsmka_terminate_workthread_loop
) {
332 /* When there is no work to do, block here */
333 while (work_queue
.head
== NULL
) {
334 /* Since no work to do, Safe to CPR */
335 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
336 cv_wait(&work_queue
.work_cv
, &work_queue
.work_mutex
);
337 CALLB_CPR_SAFE_END(&cprinfo
, &work_queue
.work_mutex
);
339 if (rsmka_terminate_workthread_loop
) {
345 * Remove a work token and begin work
347 work_token
= work_queue
.head
;
348 work_queue
.head
= work_token
->next
;
349 if (work_queue
.tail
== work_token
)
350 work_queue
.tail
= NULL
;
352 work_opcode
= work_token
->opcode
;
353 path
= WORK_TOKEN_TO_PATH(work_token
, work_opcode
-1);
354 work_token
->next
= NULL
;
355 mutex_exit(&work_queue
.work_mutex
);
358 switch (work_opcode
) {
360 DBG_PRINTF((category
, RSM_DEBUG
,
361 "do_deferred_work:up, path = %lx\n", path
));
362 error
= create_ipc_sendq(path
);
363 mutex_enter(&path
->mutex
);
364 if (path
->state
!= RSMKA_PATH_UP
) {
366 * path state has changed, if sendq was created,
367 * destroy it and return. Don't need to worry
368 * about sendq ref_cnt since no one starts
369 * using the sendq till path state becomes
372 if (error
== RSM_SUCCESS
) {
373 sendq_handle
= path
->sendq_token
.
375 path
->sendq_token
.rsmpi_sendq_handle
=
377 adapter
= path
->local_adapter
;
378 mutex_exit(&path
->mutex
);
380 if (sendq_handle
!= NULL
) {
385 mutex_enter(&path
->mutex
);
387 /* free up work token */
388 work_token
->opcode
= 0;
391 * decrement reference count for the path
392 * descriptor and signal for synchronization
393 * with rsmka_remove_path. PATH_HOLD_NOLOCK was
394 * done by rsmka_path_up.
396 PATH_RELE_NOLOCK(path
);
397 mutex_exit(&path
->mutex
);
401 if (error
== RSM_SUCCESS
) {
402 DBG_PRINTF((category
, RSM_DEBUG
,
403 "do_deferred_work:success on up\n"));
404 /* clear flag since sendq_create succeeded */
405 path
->flags
&= ~RSMKA_SQCREATE_PENDING
;
406 path
->state
= RSMKA_PATH_ACTIVE
;
409 * now that path is active we send the
410 * RSMIPC_MSG_SQREADY to the remote endpoint
412 path
->procmsg_cnt
= 0;
413 path
->sendq_token
.msgbuf_avail
= 0;
415 /* Calculate local incarnation number */
417 if (tv
.tv_sec
== RSM_UNKNOWN_INCN
)
419 path
->local_incn
= (int64_t)tv
.tv_sec
;
422 * if send fails here its due to some
423 * non-transient error because QUEUE_FULL is
424 * not possible here since we are the first
425 * message on this sendq. The error will cause
426 * the path to go down anyways, so ignore
429 (void) rsmipc_send_controlmsg(path
,
431 /* wait for SQREADY_ACK message */
432 path
->flags
|= RSMKA_WAIT_FOR_SQACK
;
435 * sendq create failed possibly because
436 * the remote end is not yet ready eg.
437 * handler not registered, set a flag
438 * so that when there is an indication
439 * that the remote end is ready
440 * rsmka_do_path_active will be retried.
442 path
->flags
|= RSMKA_SQCREATE_PENDING
;
445 /* free up work token */
446 work_token
->opcode
= 0;
449 * decrement reference count for the path
450 * descriptor and signal for synchronization with
451 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
454 PATH_RELE_NOLOCK(path
);
455 mutex_exit(&path
->mutex
);
459 DBG_PRINTF((category
, RSM_DEBUG
,
460 "do_deferred_work:down, path = %lx\n", path
));
463 * Unlike the processing of path_down in the case
464 * where the RSMKA_NO_SLEEP flag is not set, here,
465 * the state of the path is changed directly to
466 * RSMKA_PATH_DOWN. This is because in this case
467 * where the RSMKA_NO_SLEEP flag is set, any other
468 * calls referring this path will just queue up
469 * and will be processed only after the path
470 * down processing has completed.
472 mutex_enter(&path
->mutex
);
473 path
->state
= RSMKA_PATH_DOWN
;
475 * clear the WAIT_FOR_SQACK flag since path is down.
477 path
->flags
&= ~RSMKA_WAIT_FOR_SQACK
;
480 * this wakes up any thread waiting to receive credits
481 * in rsmipc_send to tell it that the path is down
482 * thus releasing the sendq.
484 cv_broadcast(&path
->sendq_token
.sendq_cv
);
486 mutex_exit(&path
->mutex
);
488 /* drain the messages from the receive msgbuf */
489 taskq_wait(path
->recv_taskq
);
492 * The path_importer_disconnect function has to
493 * be called after releasing the mutex on the path
494 * in order to avoid any recursive mutex enter panics
496 path_importer_disconnect(path
);
497 DBG_PRINTF((category
, RSM_DEBUG
,
498 "do_deferred_work: success on down\n"));
500 * decrement reference count for the path
501 * descriptor and signal for synchronization with
502 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
503 * by rsmka_path_down.
505 mutex_enter(&path
->mutex
);
509 * Some IPC messages left in the recv_buf,
512 if (path
->msgbuf_cnt
!= 0)
514 "path=%lx msgbuf_cnt != 0\n",
519 * Don't want to destroy a send queue when a token
520 * has been acquired; so wait 'til the token is
521 * no longer referenced (with a cv_wait).
523 while (path
->sendq_token
.ref_cnt
!= 0)
524 cv_wait(&path
->sendq_token
.sendq_cv
,
527 sendq_handle
= path
->sendq_token
.rsmpi_sendq_handle
;
528 path
->sendq_token
.rsmpi_sendq_handle
= NULL
;
530 /* destroy the send queue and release the handle */
531 if (sendq_handle
!= NULL
) {
532 adapter
= path
->local_adapter
;
533 adapter
->rsmpi_ops
->rsm_sendq_destroy(
537 work_token
->opcode
= 0;
538 PATH_RELE_NOLOCK(path
);
539 mutex_exit(&path
->mutex
);
542 DBG_PRINTF((category
, RSM_DEBUG
,
543 "do_deferred_work: bad work token opcode\n"));
549 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_deferred_work done\n"));
551 * CALLB_CPR_EXIT does a mutex_exit for
552 * the work_queue.work_mutex
554 CALLB_CPR_EXIT(&cprinfo
);
558 * Work is inserted at the tail of the list and processed from the
562 enqueue_work(work_token_t
*token
)
564 work_token_t
*tail_token
;
566 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "enqueue_work enter\n"));
568 ASSERT(MUTEX_HELD(&work_queue
.work_mutex
));
571 if (work_queue
.head
== NULL
) {
572 work_queue
.head
= work_queue
.tail
= token
;
574 tail_token
= work_queue
.tail
;
575 work_queue
.tail
= tail_token
->next
= token
;
578 /* wake up deferred work thread */
579 cv_signal(&work_queue
.work_cv
);
581 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "enqueue_work done\n"));
586 * If the work_token is found on the work queue, the work is cancelled
587 * by removing the token from the work queue.
589 * Return true if a work_token was found and cancelled, otherwise return false
591 * enqueue_work increments the path refcnt to make sure that the path doesn't
592 * go away, callers of cancel_work need to decrement the refcnt of the path to
593 * which this work_token belongs if a work_token is found in the work_queue
594 * and cancelled ie. when the return value is B_TRUE.
597 cancel_work(work_token_t
*work_token
)
599 work_token_t
*current_token
;
600 work_token_t
*prev_token
= NULL
;
601 boolean_t cancelled
= B_FALSE
;
603 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "cancel_work enter\n"));
605 ASSERT(MUTEX_HELD(&work_queue
.work_mutex
));
608 current_token
= work_queue
.head
;
609 while (current_token
!= NULL
) {
610 if (current_token
== work_token
) {
611 if (work_token
== work_queue
.head
)
612 work_queue
.head
= work_token
->next
;
614 prev_token
->next
= work_token
->next
;
615 if (work_token
== work_queue
.tail
)
616 work_queue
.tail
= prev_token
;
618 current_token
->opcode
= 0;
619 current_token
->next
= NULL
;
620 /* found and cancelled work */
622 DBG_PRINTF((category
, RSM_DEBUG
,
623 "cancelled_work = 0x%p\n", work_token
));
626 prev_token
= current_token
;
627 current_token
= current_token
->next
;
630 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "cancel_work done\n"));
635 * EXTERNAL INTERFACES
637 * For Galileo Clustering, these routine are called from
638 * rsmka_pm_interface.cc
644 * If the adapter is supported by rsmpi then initialize an adapter descriptor
645 * and link it to the list of adapters. The adapter attributes are obtained
646 * from rsmpi and stored in the descriptor. Finally, a service handler
647 * for incoming ipc on this adapter is registered with rsmpi.
648 * A pointer for the adapter descriptor is returned as a cookie to the
649 * caller. The cookie may be use with subsequent calls to save the time of
650 * adapter descriptor lookup.
652 * The adapter descriptor maintains a reference count which is intialized
653 * to 1 and incremented on lookups; when a cookie is used in place of
654 * a lookup, an explicit ADAPTER_HOLD is required.
658 rsmka_add_adapter(char *name
, int instance
, rsm_addr_t hwaddr
)
661 rsm_controller_object_t rsmpi_adapter_object
;
662 rsm_controller_handle_t rsmpi_adapter_handle
;
663 rsm_ops_t
*rsmpi_ops_vector
;
664 int adapter_is_supported
;
665 rsm_controller_attr_t
*attr
;
666 srv_handler_arg_t
*srv_hdlr_argp
;
669 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_add_adapter enter\n"));
671 DBG_PRINTF((category
, RSM_DEBUG
,
672 "rsmka_add_adapter: name = %s instance = %d hwaddr = %llx \n",
673 name
, instance
, hwaddr
));
675 /* verify name length */
676 if (strlen(name
) >= MAXNAMELEN
) {
677 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
678 "rsmka_add_adapter done: name too long\n"));
683 /* Check if rsmpi supports this adapter type */
684 adapter_is_supported
= rsm_get_controller(name
, instance
,
685 &rsmpi_adapter_object
, RSM_VERSION
);
687 if (adapter_is_supported
!= RSM_SUCCESS
) {
688 DBG_PRINTF((category
, RSM_ERR
,
689 "rsmka_add_adapter done: adapter not supported\n"));
693 rsmpi_adapter_handle
= rsmpi_adapter_object
.handle
;
694 rsmpi_ops_vector
= rsmpi_adapter_object
.ops
;
696 /* Get adapter attributes */
697 result
= rsm_get_controller_attr(rsmpi_adapter_handle
, &attr
);
698 if (result
!= RSM_SUCCESS
) {
699 DBG_PRINTF((category
, RSM_ERR
,
700 "rsm: get_controller_attr(%d) Failed %x\n",
702 (void) rsm_release_controller(name
, instance
,
703 &rsmpi_adapter_object
);
707 DBG_PRINTF((category
, RSM_DEBUG
,
708 "rsmka_add_adapter: register service offset = %d\n", hwaddr
));
711 * create a srv_handler_arg_t object, initialize it and register
712 * it along with rsm_srv_func. This get passed as the
713 * rsm_intr_hand_arg_t when the handler gets invoked.
715 srv_hdlr_argp
= kmem_zalloc(sizeof (srv_handler_arg_t
), KM_SLEEP
);
717 (void) strcpy(srv_hdlr_argp
->adapter_name
, name
);
718 srv_hdlr_argp
->adapter_instance
= instance
;
719 srv_hdlr_argp
->adapter_hwaddr
= hwaddr
;
721 /* Have rsmpi register the ipc receive handler for this adapter */
723 * Currently, we need to pass in a separate service identifier for
724 * each adapter. In order to obtain a unique service identifier
725 * value for an adapter, we add the hardware address of the
726 * adapter to the base service identifier(RSM_SERVICE which is
727 * defined as RSM_INTR_T_KA as per the RSMPI specification).
728 * NOTE: This may result in using some of the service identifier
729 * values defined for RSM_INTR_T_XPORT(the Sun Cluster Transport).
731 result
= rsmpi_ops_vector
->rsm_register_handler(
732 rsmpi_adapter_handle
, &rsmpi_adapter_object
,
733 RSM_SERVICE
+(uint_t
)hwaddr
, rsm_srv_func
,
734 (rsm_intr_hand_arg_t
)srv_hdlr_argp
, NULL
, 0);
736 if (result
!= RSM_SUCCESS
) {
737 DBG_PRINTF((category
, RSM_ERR
,
738 "rsmka_add_adapter done: rsm_register_handler"
744 /* Initialize an adapter descriptor and add it to the adapter list */
745 adapter
= init_adapter(name
, instance
, hwaddr
,
746 rsmpi_adapter_handle
, rsmpi_ops_vector
, srv_hdlr_argp
);
748 /* Copy over the attributes from the pointer returned to us */
749 adapter
->rsm_attr
= *attr
;
752 * With the addition of the topology obtainment interface, applications
753 * now get the local nodeid from the topology data structure.
755 * adapter->rsm_attr.attr_node_id = my_nodeid;
757 DBG_PRINTF((category
, RSM_ERR
,
758 "rsmka_add_adapter: adapter = %lx\n", adapter
));
760 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_add_adapter done\n"));
762 /* return adapter pointer as a cookie for later fast access */
763 return ((void *)adapter
);
768 * Unlink the adapter descriptor and call rsmka_release_adapter which
769 * will decrement the reference count and possibly free the desriptor.
772 rsmka_remove_adapter(char *name
, uint_t instance
, void *cookie
, int flags
)
775 adapter_listhead_t
*listhead
;
776 adapter_t
*prev
, *current
;
777 rsm_controller_object_t rsm_cntl_obj
;
780 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
781 "rsmka_remove_adapter enter\n"));
783 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
784 "rsmka_remove_adapter: cookie = %lx\n", cookie
));
786 if (flags
& RSMKA_USE_COOKIE
) {
787 adapter
= (adapter_t
*)cookie
;
789 adapter
= rsmka_lookup_adapter(name
, instance
);
791 * rsmka_lookup_adapter increments the ref_cnt; need
792 * to decrement here to get true count
794 ADAPTER_RELE(adapter
);
796 ASSERT(adapter
->next_path
== NULL
);
798 listhead
= adapter
->listhead
;
800 mutex_enter(&adapter_listhead_base
.listlock
);
802 mutex_enter(&listhead
->mutex
);
804 /* find the adapter in the list and remove it */
806 current
= listhead
->next_adapter
;
807 while (current
!= NULL
) {
808 if (adapter
->instance
== current
->instance
) {
812 current
= current
->next
;
815 ASSERT(current
!= NULL
);
818 listhead
->next_adapter
= current
->next
;
820 prev
->next
= current
->next
;
822 listhead
->adapter_count
--;
824 mutex_exit(&listhead
->mutex
);
826 mutex_exit(&adapter_listhead_base
.listlock
);
828 mutex_enter(¤t
->mutex
);
831 * unregister the handler
833 current
->rsmpi_ops
->rsm_unregister_handler(current
->rsmpi_handle
,
834 RSM_SERVICE
+current
->hwaddr
, rsm_srv_func
,
835 (rsm_intr_hand_arg_t
)current
->hdlr_argp
);
837 DBG_PRINTF((category
, RSM_DEBUG
, "rsmka_remove_adapter: unreg hdlr "
838 ":adapter=%lx, hwaddr=%lx\n", current
, current
->hwaddr
));
840 rsm_cntl_obj
.handle
= current
->rsmpi_handle
;
841 rsm_cntl_obj
.ops
= current
->rsmpi_ops
;
843 (void) rsm_release_controller(current
->listhead
->adapter_devname
,
844 current
->instance
, &rsm_cntl_obj
);
846 mutex_exit(¤t
->mutex
);
848 rsmka_release_adapter(current
);
850 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
851 "rsmka_remove_adapter done\n"));
857 * An adapter descriptor will exist from an earlier add_adapter. This
859 * initialize the path descriptor
860 * initialize the ipc descriptor (it may already exist)
861 * initialize and link a sendq token for this path
864 rsmka_add_path(char *adapter_name
, int adapter_instance
,
865 rsm_node_id_t remote_node
,
866 rsm_addr_t remote_hwaddr
, int rem_adapt_instance
,
867 void *cookie
, int flags
)
872 char tq_name
[TASKQ_NAMELEN
];
874 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_add_path enter\n"));
876 /* allocate new path descriptor */
877 path
= kmem_zalloc(sizeof (path_t
), KM_SLEEP
);
879 if (flags
& RSMKA_USE_COOKIE
) {
880 adapter
= (adapter_t
*)cookie
;
881 ADAPTER_HOLD(adapter
);
883 adapter
= rsmka_lookup_adapter(adapter_name
, adapter_instance
);
886 DBG_PRINTF((category
, RSM_DEBUG
,
887 "rsmka_add_path: adapter = %lx\n", adapter
));
890 * initialize path descriptor
891 * don't need to increment adapter reference count because
892 * it can't be removed if paths exist for it.
894 mutex_init(&path
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
897 path
->state
= RSMKA_PATH_DOWN
;
898 path
->remote_node
= remote_node
;
899 path
->remote_hwaddr
= remote_hwaddr
;
900 path
->remote_devinst
= rem_adapt_instance
;
901 path
->local_adapter
= adapter
;
903 /* taskq is for sendq on adapter with remote_hwaddr on remote_node */
904 (void) snprintf(tq_name
, sizeof (tq_name
), "%x_%llx",
905 remote_node
, (unsigned long long) remote_hwaddr
);
907 path
->recv_taskq
= taskq_create_instance(tq_name
, adapter_instance
,
908 RSMKA_ONE_THREAD
, maxclsyspri
, RSMIPC_MAX_MESSAGES
,
909 RSMIPC_MAX_MESSAGES
, TASKQ_PREPOPULATE
);
911 /* allocate the message buffer array */
912 path
->msgbuf_queue
= (msgbuf_elem_t
*)kmem_zalloc(
913 RSMIPC_MAX_MESSAGES
* sizeof (msgbuf_elem_t
), KM_SLEEP
);
916 * init cond variables for synch with rsmipc_send()
917 * and rsmka_remove_path
919 cv_init(&path
->sendq_token
.sendq_cv
, NULL
, CV_DEFAULT
, NULL
);
920 cv_init(&path
->hold_cv
, NULL
, CV_DEFAULT
, NULL
);
922 /* link path descriptor on adapter path list */
925 /* link the path sendq token on the ipc_info token list */
926 link_sendq_token(&path
->sendq_token
, remote_node
);
928 /* ADAPTER_HOLD done above by rsmka_lookup_adapter */
929 ADAPTER_RELE(adapter
);
931 DBG_PRINTF((category
, RSM_DEBUG
, "rsmka_add_path: path = %lx\n", path
));
933 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_add_path done\n"));
934 return ((void *)path
);
938 * Wait for the path descriptor reference count to become zero then
939 * directly call path down processing. Finally, unlink the sendq token and
940 * free the path descriptor memory.
942 * Note: lookup_path locks the path and increments the path hold count
945 rsmka_remove_path(char *adapter_name
, int instance
, rsm_node_id_t remote_node
,
946 rsm_addr_t remote_hwaddr
, void *path_cookie
, int flags
)
950 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_remove_path enter\n"));
952 if (flags
& RSMKA_USE_COOKIE
) {
953 path
= (path_t
*)path_cookie
;
954 mutex_enter(&path
->mutex
);
956 path
= lookup_path(adapter_name
, instance
, remote_node
,
960 * remember, lookup_path increments the reference
961 * count - so decrement now so we can get to zero
963 PATH_RELE_NOLOCK(path
);
966 DBG_PRINTF((category
, RSM_DEBUG
,
967 "rsmka_remove_path: path = %lx\n", path
));
969 while (path
->state
== RSMKA_PATH_GOING_DOWN
)
970 cv_wait(&path
->hold_cv
, &path
->mutex
);
972 /* attempt to cancel any possibly pending work */
973 mutex_enter(&work_queue
.work_mutex
);
974 if (cancel_work(&path
->work_token
[RSMKA_IPC_UP_INDEX
])) {
975 PATH_RELE_NOLOCK(path
);
977 if (cancel_work(&path
->work_token
[RSMKA_IPC_DOWN_INDEX
])) {
978 PATH_RELE_NOLOCK(path
);
980 mutex_exit(&work_queue
.work_mutex
);
983 * The path descriptor ref cnt was set to 1 initially when
984 * the path was added. So we need to do a decrement here to
987 PATH_RELE_NOLOCK(path
);
989 switch (path
->state
) {
992 path
->flags
&= ~RSMKA_SQCREATE_PENDING
;
993 path
->state
= RSMKA_PATH_DOWN
;
995 case RSMKA_PATH_DOWN
:
998 case RSMKA_PATH_ACTIVE
:
1000 * rsmka_remove_path should not call do_path_down
1001 * with the RSMKA_NO_SLEEP flag set since for
1002 * this code path, the deferred work would
1003 * incorrectly do a PATH_RELE_NOLOCK.
1005 do_path_down(path
, 0);
1008 mutex_exit(&path
->mutex
);
1009 DBG_PRINTF((category
, RSM_ERR
,
1010 "rsm_remove_path: invalid path state %d\n",
1017 * wait for all references to the path to be released. If a thread
1018 * was waiting to receive credits do_path_down should wake it up
1019 * since the path is going down and that will cause the sleeping
1020 * thread to release its hold on the path.
1022 while (path
->ref_cnt
!= 0) {
1023 cv_wait(&path
->hold_cv
, &path
->mutex
);
1026 mutex_exit(&path
->mutex
);
1029 * remove from ipc token list
1030 * NOTE: use the remote_node value from the path structure
1031 * since for RSMKA_USE_COOKIE being set, the remote_node
1032 * value passed into rsmka_remove_path is 0.
1034 unlink_sendq_token(&path
->sendq_token
, path
->remote_node
);
1036 /* unlink from adapter path list and free path descriptor */
1039 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_remove_path done\n"));
1045 * lookup_path locks the path and increments the path hold count. If the remote
1046 * node is not in the alive state, do_path_up will release the lock and
1047 * decrement the hold count. Otherwise rsmka_do_path_active will release the
1048 * lock prior to waking up the work thread.
1051 * The path descriptor ref_cnt is incremented here; it will be decremented
1052 * when path up processing is completed in do_path_up or by the work thread
1053 * if the path up is deferred.
1057 rsmka_path_up(char *adapter_name
, uint_t adapter_instance
,
1058 rsm_node_id_t remote_node
, rsm_addr_t remote_hwaddr
,
1059 void *path_cookie
, int flags
)
1063 boolean_t rval
= B_TRUE
;
1065 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_path_up enter\n"));
1067 if (flags
& RSMKA_USE_COOKIE
) {
1068 path
= (path_t
*)path_cookie
;
1069 mutex_enter(&path
->mutex
);
1070 PATH_HOLD_NOLOCK(path
);
1072 path
= lookup_path(adapter_name
, adapter_instance
,
1073 remote_node
, remote_hwaddr
);
1076 while (path
->state
== RSMKA_PATH_GOING_DOWN
)
1077 cv_wait(&path
->hold_cv
, &path
->mutex
);
1079 DBG_PRINTF((category
, RSM_DEBUG
, "rsmka_path_up: path = %lx\n", path
));
1080 rval
= do_path_up(path
, flags
);
1081 mutex_exit(&path
->mutex
);
1083 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_path_up done\n"));
1090 * lookup_path locks the path and increments the path hold count. If the
1091 * current state is ACTIVE the path lock is release prior to waking up
1092 * the work thread in do_path_down . The work thread will decrement the hold
1093 * count when the work for this is finished.
1097 * The path descriptor ref_cnt is incremented here; it will be decremented
1098 * when path down processing is completed in do_path_down or by the work thread
1099 * if the path down is deferred.
1103 rsmka_path_down(char *adapter_devname
, int instance
, rsm_node_id_t remote_node
,
1104 rsm_addr_t remote_hwaddr
, void *path_cookie
, int flags
)
1107 boolean_t rval
= B_TRUE
;
1109 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_path_down enter\n"));
1111 if (flags
& RSMKA_USE_COOKIE
) {
1112 path
= (path_t
*)path_cookie
;
1113 mutex_enter(&path
->mutex
);
1114 PATH_HOLD_NOLOCK(path
);
1116 path
= lookup_path(adapter_devname
, instance
, remote_node
,
1120 while (path
->state
== RSMKA_PATH_GOING_DOWN
)
1121 cv_wait(&path
->hold_cv
, &path
->mutex
);
1123 DBG_PRINTF((category
, RSM_DEBUG
,
1124 "rsmka_path_down: path = %lx\n", path
));
1126 switch (path
->state
) {
1128 /* clear the flag */
1129 path
->flags
&= ~RSMKA_SQCREATE_PENDING
;
1130 path
->state
= RSMKA_PATH_GOING_DOWN
;
1131 mutex_exit(&path
->mutex
);
1134 * release path->mutex since enqueued tasks acquire it.
1135 * Drain all the enqueued tasks.
1137 taskq_wait(path
->recv_taskq
);
1139 mutex_enter(&path
->mutex
);
1140 path
->state
= RSMKA_PATH_DOWN
;
1141 PATH_RELE_NOLOCK(path
);
1143 case RSMKA_PATH_DOWN
:
1144 PATH_RELE_NOLOCK(path
);
1146 case RSMKA_PATH_ACTIVE
:
1147 do_path_down(path
, flags
);
1149 * Need to release the path refcnt. Either done in do_path_down
1150 * or do_deferred_work for RSMKA_NO_SLEEP being set. Has to be
1151 * done here for RSMKA_NO_SLEEP not set.
1153 if (!(flags
& RSMKA_NO_SLEEP
))
1154 PATH_RELE_NOLOCK(path
);
1157 DBG_PRINTF((category
, RSM_ERR
,
1158 "rsm_path_down: invalid path state %d\n", path
->state
));
1162 mutex_exit(&path
->mutex
);
1163 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_path_down done\n"));
1169 * Paths cannot become active until node_is_alive is marked true
1170 * in the ipc_info descriptor for the node
1172 * In the event this is called before any paths have been added,
1173 * init_ipc_info if called here.
1177 rsmka_node_alive(rsm_node_id_t remote_node
)
1179 ipc_info_t
*ipc_info
;
1181 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_node_alive enter\n"));
1183 DBG_PRINTF((category
, RSM_DEBUG
,
1184 "rsmka_node_alive: remote_node = %x\n", remote_node
));
1186 ipc_info
= lookup_ipc_info(remote_node
);
1188 if (ipc_info
== NULL
) {
1189 ipc_info
= init_ipc_info(remote_node
, B_TRUE
);
1190 DBG_PRINTF((category
, RSM_DEBUG
,
1191 "rsmka_node_alive: new ipc_info = %lx\n", ipc_info
));
1193 ASSERT(ipc_info
->node_is_alive
== B_FALSE
);
1194 ipc_info
->node_is_alive
= B_TRUE
;
1197 pathup_to_pathactive(ipc_info
, remote_node
);
1199 mutex_exit(&ipc_info_lock
);
1201 /* rsmipc_send() may be waiting for a sendq_token */
1202 mutex_enter(&ipc_info_cvlock
);
1203 cv_broadcast(&ipc_info_cv
);
1204 mutex_exit(&ipc_info_cvlock
);
1206 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_node_alive done\n"));
1214 * Paths cannot become active when node_is_alive is marked false
1215 * in the ipc_info descriptor for the node
1218 rsmka_node_died(rsm_node_id_t remote_node
)
1220 ipc_info_t
*ipc_info
;
1222 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_node_died enter\n"));
1224 DBG_PRINTF((category
, RSM_DEBUG
,
1225 "rsmka_node_died: remote_node = %x\n", remote_node
));
1227 ipc_info
= lookup_ipc_info(remote_node
);
1228 if (ipc_info
== NULL
)
1231 ASSERT(ipc_info
->node_is_alive
== B_TRUE
);
1232 ipc_info
->node_is_alive
= B_FALSE
;
1234 rsm_suspend_complete(remote_node
, RSM_SUSPEND_NODEDEAD
);
1236 mutex_exit(&ipc_info_lock
);
1238 /* rsmipc_send() may be waiting for a sendq_token */
1239 mutex_enter(&ipc_info_cvlock
);
1240 cv_broadcast(&ipc_info_cv
);
1241 mutex_exit(&ipc_info_cvlock
);
1243 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsmka_node_died done\n"));
1249 * Treat like path_down for all paths for the specified remote node.
1250 * Always invoked before node died.
1252 * NOTE: This routine is not called from the cluster path interface; the
1253 * rsmka_path_down is called directly for each path.
1256 rsmka_disconnect_node(rsm_node_id_t remote_node
, int flags
)
1258 ipc_info_t
*ipc_info
;
1260 sendq_token_t
*sendq_token
;
1261 work_token_t
*up_token
;
1262 work_token_t
*down_token
;
1263 boolean_t do_work
= B_FALSE
;
1264 boolean_t cancelled
= B_FALSE
;
1266 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1267 "rsmka_disconnect_node enter\n"));
1269 DBG_PRINTF((category
, RSM_DEBUG
,
1270 "rsmka_disconnect_node: node = %d\n", remote_node
));
1272 if (flags
& RSMKA_NO_SLEEP
) {
1273 ipc_info
= lookup_ipc_info(remote_node
);
1275 sendq_token
= ipc_info
->token_list
;
1277 while (sendq_token
!= NULL
) {
1278 path
= SQ_TOKEN_TO_PATH(sendq_token
);
1280 up_token
= &path
->work_token
[RSMKA_IPC_UP_INDEX
];
1281 down_token
= &path
->work_token
[RSMKA_IPC_DOWN_INDEX
];
1283 mutex_enter(&work_queue
.work_mutex
);
1285 /* if an up token is enqueued, remove it */
1286 cancelled
= cancel_work(up_token
);
1289 * If the path is active and down work hasn't
1290 * already been setup then down work is needed.
1292 * if up work wasn't canceled because it was
1293 * already being processed then down work is needed
1295 if (path
->state
== RSMKA_PATH_ACTIVE
) {
1296 if (down_token
->opcode
== 0)
1299 if (up_token
->opcode
== RSMKA_IPC_UP
)
1302 if (do_work
== B_TRUE
) {
1303 down_token
->opcode
= RSMKA_IPC_DOWN
;
1304 enqueue_work(down_token
);
1306 mutex_exit(&work_queue
.work_mutex
);
1308 if (do_work
== B_FALSE
)
1314 sendq_token
= sendq_token
->next
;
1318 * Now that all the work is enqueued, wakeup the work
1321 mutex_enter(&work_queue
.work_mutex
);
1322 cv_signal(&work_queue
.work_cv
);
1323 mutex_exit(&work_queue
.work_mutex
);
1325 IPCINFO_RELE_NOLOCK(ipc_info
);
1326 mutex_exit(&ipc_info_lock
);
1329 /* get locked ipc_info descriptor */
1330 ipc_info
= lookup_ipc_info(remote_node
);
1332 sendq_token
= ipc_info
->token_list
;
1333 while (sendq_token
!= NULL
) {
1334 path
= SQ_TOKEN_TO_PATH(sendq_token
);
1335 DBG_PRINTF((category
, RSM_DEBUG
,
1336 "rsmka_disconnect_node: path_down"
1339 (void) rsmka_path_down(0, 0, 0, 0,
1340 path
, RSMKA_USE_COOKIE
);
1341 sendq_token
= sendq_token
->next
;
1342 if (sendq_token
== ipc_info
->token_list
)
1345 mutex_exit(&ipc_info_lock
);
1348 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1349 "rsmka_disconnect_node done\n"));
1354 * Called from rsm_node_alive - if a path to a remote node is in
1355 * state RSMKA_PATH_UP, transition the state to RSMKA_PATH_ACTIVE with a
1356 * call to rsmka_do_path_active.
1359 * The path descriptor ref_cnt is incremented here; it will be decremented
1360 * when path up processing is completed in rsmka_do_path_active or by the work
1361 * thread if the path up is deferred.
1364 pathup_to_pathactive(ipc_info_t
*ipc_info
, rsm_node_id_t remote_node
)
1367 sendq_token_t
*token
;
1369 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1370 "pathup_to_pathactive enter\n"));
1372 remote_node
= remote_node
;
1374 ASSERT(MUTEX_HELD(&ipc_info_lock
));
1376 token
= ipc_info
->token_list
;
1377 while (token
!= NULL
) {
1378 path
= SQ_TOKEN_TO_PATH(token
);
1379 mutex_enter(&path
->mutex
);
1380 if (path
->state
== RSMKA_PATH_UP
) {
1381 PATH_HOLD_NOLOCK(path
);
1382 (void) rsmka_do_path_active(path
, 0);
1384 mutex_exit(&path
->mutex
);
1385 token
= token
->next
;
1386 if (token
== ipc_info
->token_list
)
1390 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1391 "pathup_to_pathactive done\n"));
1395 * Called from pathup_to_pathactive and do_path_up. The objective is to
1396 * create an ipc send queue and transition to state RSMKA_PATH_ACTIVE.
1397 * For the no sleep case we may need to defer the work using a token.
1401 rsmka_do_path_active(path_t
*path
, int flags
)
1403 work_token_t
*up_token
= &path
->work_token
[RSMKA_IPC_UP_INDEX
];
1404 work_token_t
*down_token
= &path
->work_token
[RSMKA_IPC_DOWN_INDEX
];
1405 boolean_t do_work
= B_FALSE
;
1409 rsm_send_q_handle_t sqhdl
;
1411 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1412 "rsmka_do_path_active enter\n"));
1414 ASSERT(MUTEX_HELD(&path
->mutex
));
1416 if (flags
& RSMKA_NO_SLEEP
) {
1417 mutex_enter(&work_queue
.work_mutex
);
1419 /* if a down token is enqueued, remove it */
1420 if (cancel_work(down_token
)) {
1421 PATH_RELE_NOLOCK(path
);
1425 * If the path is not active and up work hasn't
1426 * already been setup then up work is needed.
1428 * if down work wasn't canceled because it was
1429 * already being processed then up work is needed
1431 if (path
->state
!= RSMKA_PATH_ACTIVE
) {
1432 if (up_token
->opcode
== 0)
1435 if (down_token
->opcode
== RSMKA_IPC_DOWN
)
1438 if (do_work
== B_TRUE
) {
1439 up_token
->opcode
= RSMKA_IPC_UP
;
1440 enqueue_work(up_token
);
1443 PATH_RELE_NOLOCK(path
);
1445 mutex_exit(&work_queue
.work_mutex
);
1446 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1447 "rsmka_do_path_active done\n"));
1451 * Drop the path lock before calling create_ipc_sendq, shouldn't
1452 * hold locks across calls to RSMPI routines.
1454 mutex_exit(&path
->mutex
);
1456 error
= create_ipc_sendq(path
);
1458 mutex_enter(&path
->mutex
);
1459 if (path
->state
!= RSMKA_PATH_UP
) {
1461 * path state has changed, if sendq was created,
1462 * destroy it and return
1464 if (error
== RSM_SUCCESS
) {
1465 sqhdl
= path
->sendq_token
.rsmpi_sendq_handle
;
1466 path
->sendq_token
.rsmpi_sendq_handle
= NULL
;
1467 adapter
= path
->local_adapter
;
1468 mutex_exit(&path
->mutex
);
1470 if (sqhdl
!= NULL
) {
1471 adapter
->rsmpi_ops
->rsm_sendq_destroy(
1474 mutex_enter(&path
->mutex
);
1476 PATH_RELE_NOLOCK(path
);
1478 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1479 "rsmka_do_path_active done: path=%lx not UP\n",
1481 return (error
? B_FALSE
: B_TRUE
);
1484 if (error
== RSM_SUCCESS
) {
1485 /* clear flag since sendq_create succeeded */
1486 path
->flags
&= ~RSMKA_SQCREATE_PENDING
;
1487 path
->state
= RSMKA_PATH_ACTIVE
;
1489 * now that path is active we send the
1490 * RSMIPC_MSG_SQREADY to the remote endpoint
1492 path
->procmsg_cnt
= 0;
1493 path
->sendq_token
.msgbuf_avail
= 0;
1495 /* Calculate local incarnation number */
1497 if (tv
.tv_sec
== RSM_UNKNOWN_INCN
)
1499 path
->local_incn
= (int64_t)tv
.tv_sec
;
1502 * if send fails here its due to some non-transient
1503 * error because QUEUE_FULL is not possible here since
1504 * we are the first message on this sendq. The error
1505 * will cause the path to go down anyways so ignore
1508 (void) rsmipc_send_controlmsg(path
, RSMIPC_MSG_SQREADY
);
1509 /* wait for SQREADY_ACK message */
1510 path
->flags
|= RSMKA_WAIT_FOR_SQACK
;
1512 DBG_PRINTF((category
, RSM_DEBUG
,
1513 "rsmka_do_path_active success\n"));
1516 * sendq create failed possibly because
1517 * the remote end is not yet ready eg.
1518 * handler not registered, set a flag
1519 * so that when there is an indication
1520 * that the remote end is ready rsmka_do_path_active
1523 path
->flags
|= RSMKA_SQCREATE_PENDING
;
1526 PATH_RELE_NOLOCK(path
);
1528 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1529 "rsmka_do_path_active done\n"));
1530 return (error
? B_FALSE
: B_TRUE
);
1536 * Called from rsm_path_up.
1537 * If the remote node state is "alive" then call rsmka_do_path_active
1538 * otherwise just transition path state to RSMKA_PATH_UP.
1541 do_path_up(path_t
*path
, int flags
)
1544 boolean_t node_alive
;
1546 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_path_up enter\n"));
1548 ASSERT(MUTEX_HELD(&path
->mutex
));
1550 /* path moved to ACTIVE by rsm_sqcreateop_callback - just return */
1551 if (path
->state
== RSMKA_PATH_ACTIVE
) {
1552 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1553 "do_path_up done: already ACTIVE\n"));
1554 PATH_RELE_NOLOCK(path
);
1558 path
->state
= RSMKA_PATH_UP
;
1560 /* initialize the receive msgbuf counters */
1561 path
->msgbuf_head
= 0;
1562 path
->msgbuf_tail
= RSMIPC_MAX_MESSAGES
- 1;
1563 path
->msgbuf_cnt
= 0;
1564 path
->procmsg_cnt
= 0;
1566 * rsmka_check_node_alive acquires ipc_info_lock, in order to maintain
1567 * correct lock ordering drop the path lock before calling it.
1569 mutex_exit(&path
->mutex
);
1571 node_alive
= rsmka_check_node_alive(path
->remote_node
);
1573 mutex_enter(&path
->mutex
);
1574 if (node_alive
== B_TRUE
)
1575 rval
= rsmka_do_path_active(path
, flags
);
1577 PATH_RELE_NOLOCK(path
);
1581 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_path_up done\n"));
1588 * Called from rsm_remove_path, rsm_path_down, deferred_work.
1589 * Destroy the send queue on this path.
1590 * Disconnect segments being imported from the remote node
1591 * Disconnect segments being imported by the remote node
1595 do_path_down(path_t
*path
, int flags
)
1597 work_token_t
*up_token
= &path
->work_token
[RSMKA_IPC_UP_INDEX
];
1598 work_token_t
*down_token
= &path
->work_token
[RSMKA_IPC_DOWN_INDEX
];
1599 boolean_t do_work
= B_FALSE
;
1601 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_path_down enter\n"));
1603 ASSERT(MUTEX_HELD(&path
->mutex
));
1605 if (flags
& RSMKA_NO_SLEEP
) {
1606 mutex_enter(&work_queue
.work_mutex
);
1607 DBG_PRINTF((category
, RSM_DEBUG
,
1608 "do_path_down: after work_mutex\n"));
1610 /* if an up token is enqueued, remove it */
1611 if (cancel_work(up_token
)) {
1612 PATH_RELE_NOLOCK(path
);
1616 * If the path is active and down work hasn't
1617 * already been setup then down work is needed.
1619 * if up work wasn't canceled because it was
1620 * already being processed then down work is needed
1622 if (path
->state
== RSMKA_PATH_ACTIVE
) {
1623 if (down_token
->opcode
== 0)
1626 if (up_token
->opcode
== RSMKA_IPC_UP
)
1629 if (do_work
== B_TRUE
) {
1630 down_token
->opcode
= RSMKA_IPC_DOWN
;
1631 enqueue_work(down_token
);
1633 PATH_RELE_NOLOCK(path
);
1636 mutex_exit(&work_queue
.work_mutex
);
1641 * Change state of the path to RSMKA_PATH_GOING_DOWN and
1642 * release the path mutex. Any other thread referring
1643 * this path would cv_wait till the state of the path
1644 * remains RSMKA_PATH_GOING_DOWN.
1645 * On completing the path down processing, change the
1646 * state of RSMKA_PATH_DOWN indicating that the path
1649 path
->state
= RSMKA_PATH_GOING_DOWN
;
1652 * clear the WAIT_FOR_SQACK flag since path is going down.
1654 path
->flags
&= ~RSMKA_WAIT_FOR_SQACK
;
1657 * this wakes up any thread waiting to receive credits
1658 * in rsmipc_send to tell it that the path is going down
1660 cv_broadcast(&path
->sendq_token
.sendq_cv
);
1662 mutex_exit(&path
->mutex
);
1665 * drain the messages from the receive msgbuf, the
1666 * tasks in the taskq_thread acquire the path->mutex
1667 * so we drop the path mutex before taskq_wait.
1669 taskq_wait(path
->recv_taskq
);
1672 * Disconnect segments being imported from the remote node
1673 * The path_importer_disconnect function needs to be called
1674 * only after releasing the mutex on the path. This is to
1675 * avoid a recursive mutex enter when doing the
1676 * rsmka_get_sendq_token.
1678 path_importer_disconnect(path
);
1681 * Get the path mutex, change the state of the path to
1682 * RSMKA_PATH_DOWN since the path down processing has
1683 * completed and cv_signal anyone who was waiting since
1684 * the state was RSMKA_PATH_GOING_DOWN.
1685 * NOTE: Do not do a mutex_exit here. We entered this
1686 * routine with the path lock held by the caller. The
1687 * caller eventually releases the path lock by doing a
1690 mutex_enter(&path
->mutex
);
1694 * Some IPC messages left in the recv_buf,
1695 * they'll be dropped
1697 if (path
->msgbuf_cnt
!= 0)
1698 cmn_err(CE_NOTE
, "path=%lx msgbuf_cnt != 0\n",
1701 while (path
->sendq_token
.ref_cnt
!= 0)
1702 cv_wait(&path
->sendq_token
.sendq_cv
,
1705 /* release the rsmpi handle */
1706 if (path
->sendq_token
.rsmpi_sendq_handle
!= NULL
)
1707 path
->local_adapter
->rsmpi_ops
->rsm_sendq_destroy(
1708 path
->sendq_token
.rsmpi_sendq_handle
);
1710 path
->sendq_token
.rsmpi_sendq_handle
= NULL
;
1712 path
->state
= RSMKA_PATH_DOWN
;
1714 cv_signal(&path
->hold_cv
);
1718 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "do_path_down done\n"));
1723 * Search through the list of imported segments for segments using this path
1724 * and unload the memory mappings for each one. The application will
1725 * get an error return when a barrier close is invoked.
1726 * NOTE: This function has to be called only after releasing the mutex on
1727 * the path. This is to avoid any recursive mutex panics on the path mutex
1728 * since the path_importer_disconnect function would end up calling
1729 * rsmka_get_sendq_token which requires the path mutex.
1733 path_importer_disconnect(path_t
*path
)
1736 adapter_t
*adapter
= path
->local_adapter
;
1737 rsm_node_id_t remote_node
= path
->remote_node
;
1738 rsmresource_t
*p
= NULL
;
1741 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1742 "path_importer_disconnect enter\n"));
1744 rw_enter(&rsm_import_segs
.rsmhash_rw
, RW_READER
);
1746 if (rsm_import_segs
.bucket
!= NULL
) {
1747 for (i
= 0; i
< rsm_hash_size
; i
++) {
1748 p
= rsm_import_segs
.bucket
[i
];
1749 for (; p
; p
= p
->rsmrc_next
) {
1750 if ((p
->rsmrc_node
== remote_node
) &&
1751 (p
->rsmrc_adapter
== adapter
)) {
1752 seg
= (rsmseg_t
*)p
;
1754 * In order to make rsmseg_unload and
1755 * path_importer_disconnect thread safe, acquire the
1756 * segment lock here. rsmseg_unload is responsible for
1757 * releasing the lock. rsmseg_unload releases the lock
1758 * just before a call to rsmipc_send or in case of an
1759 * early exit which occurs if the segment was in the
1760 * state RSM_STATE_CONNECTING or RSM_STATE_NEW.
1762 rsmseglock_acquire(seg
);
1763 seg
->s_flags
|= RSM_FORCE_DISCONNECT
;
1769 rw_exit(&rsm_import_segs
.rsmhash_rw
);
1771 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1772 "path_importer_disconnect done\n"));
1780 * ADAPTER UTILITY FUNCTIONS
1787 * Allocate new adapter list head structure and add it to the beginning of
1788 * the list of adapter list heads. There is one list for each adapter
1789 * device name (or type).
1791 static adapter_listhead_t
*
1792 init_listhead(char *name
)
1794 adapter_listhead_t
*listhead
;
1796 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_listhead enter\n"));
1798 /* allocation and initialization */
1799 listhead
= kmem_zalloc(sizeof (adapter_listhead_t
), KM_SLEEP
);
1800 mutex_init(&listhead
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
1801 (void) strcpy(listhead
->adapter_devname
, name
);
1803 /* link into list of listheads */
1804 mutex_enter(&adapter_listhead_base
.listlock
);
1805 if (adapter_listhead_base
.next
== NULL
) {
1806 adapter_listhead_base
.next
= listhead
;
1807 listhead
->next_listhead
= NULL
;
1809 listhead
->next_listhead
= adapter_listhead_base
.next
;
1810 adapter_listhead_base
.next
= listhead
;
1812 mutex_exit(&adapter_listhead_base
.listlock
);
1814 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_listhead done\n"));
1821 * Search the list of adapter list heads for a match on name.
1824 static adapter_listhead_t
*
1825 lookup_adapter_listhead(char *name
)
1827 adapter_listhead_t
*listhead
;
1829 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1830 "lookup_adapter_listhead enter\n"));
1832 mutex_enter(&adapter_listhead_base
.listlock
);
1833 listhead
= adapter_listhead_base
.next
;
1834 while (listhead
!= NULL
) {
1835 if (strcmp(name
, listhead
->adapter_devname
) == 0)
1837 listhead
= listhead
->next_listhead
;
1839 mutex_exit(&adapter_listhead_base
.listlock
);
1841 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1842 "lookup_adapter_listhead done\n"));
1849 * Get the adapter list head corresponding to devname and search for
1850 * an adapter descriptor with a match on the instance number. If
1851 * successful, increment the descriptor reference count and return
1852 * the descriptor pointer to the caller.
1856 rsmka_lookup_adapter(char *devname
, int instance
)
1858 adapter_listhead_t
*listhead
;
1859 adapter_t
*current
= NULL
;
1861 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1862 "rsmka_lookup_adapter enter\n"));
1864 listhead
= lookup_adapter_listhead(devname
);
1865 if (listhead
!= NULL
) {
1866 mutex_enter(&listhead
->mutex
);
1868 current
= listhead
->next_adapter
;
1869 while (current
!= NULL
) {
1870 if (current
->instance
== instance
) {
1871 ADAPTER_HOLD(current
);
1874 current
= current
->next
;
1877 mutex_exit(&listhead
->mutex
);
1880 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1881 "rsmka_lookup_adapter done\n"));
1887 * Called from rsmka_remove_adapter or rsmseg_free.
1888 * rsm_bind() and rsm_connect() store the adapter pointer returned
1889 * from rsmka_getadapter. The pointer is kept in the segment descriptor.
1890 * When the segment is freed, this routine is called by rsmseg_free to decrement
1891 * the adapter descriptor reference count and possibly free the
1895 rsmka_release_adapter(adapter_t
*adapter
)
1897 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1898 "rsmka_release_adapter enter\n"));
1900 if (adapter
== &loopback_adapter
) {
1901 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1902 "rsmka_release_adapter done\n"));
1906 mutex_enter(&adapter
->mutex
);
1908 /* decrement reference count */
1909 ADAPTER_RELE_NOLOCK(adapter
);
1912 * if the adapter descriptor reference count is equal to the
1913 * initialization value of one, then the descriptor has been
1914 * unlinked and can now be freed.
1916 if (adapter
->ref_cnt
== 1) {
1917 mutex_exit(&adapter
->mutex
);
1919 mutex_destroy(&adapter
->mutex
);
1920 kmem_free(adapter
->hdlr_argp
, sizeof (srv_handler_arg_t
));
1921 kmem_free(adapter
, sizeof (adapter_t
));
1924 mutex_exit(&adapter
->mutex
);
1926 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
1927 "rsmka_release_adapter done\n"));
1934 * Singly linked list. Add to the front.
1937 link_adapter(adapter_t
*adapter
)
1940 adapter_listhead_t
*listhead
;
1943 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_adapter enter\n"));
1945 mutex_enter(&adapter_listhead_base
.listlock
);
1947 mutex_enter(&adapter
->listhead
->mutex
);
1949 listhead
= adapter
->listhead
;
1950 current
= listhead
->next_adapter
;
1951 listhead
->next_adapter
= adapter
;
1952 adapter
->next
= current
;
1953 ADAPTER_HOLD(adapter
);
1955 adapter
->listhead
->adapter_count
++;
1957 mutex_exit(&adapter
->listhead
->mutex
);
1959 mutex_exit(&adapter_listhead_base
.listlock
);
1961 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_adapter done\n"));
1966 * Return adapter descriptor
1968 * lookup_adapter_listhead returns with the the list of adapter listheads
1969 * locked. After adding the adapter descriptor, the adapter listhead list
1973 init_adapter(char *name
, int instance
, rsm_addr_t hwaddr
,
1974 rsm_controller_handle_t handle
, rsm_ops_t
*ops
,
1975 srv_handler_arg_t
*hdlr_argp
)
1978 adapter_listhead_t
*listhead
;
1980 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_adapter enter\n"));
1982 adapter
= kmem_zalloc(sizeof (adapter_t
), KM_SLEEP
);
1983 adapter
->instance
= instance
;
1984 adapter
->hwaddr
= hwaddr
;
1985 adapter
->rsmpi_handle
= handle
;
1986 adapter
->rsmpi_ops
= ops
;
1987 adapter
->hdlr_argp
= hdlr_argp
;
1988 mutex_init(&adapter
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
1989 ADAPTER_HOLD(adapter
);
1992 listhead
= lookup_adapter_listhead(name
);
1993 if (listhead
== NULL
) {
1994 listhead
= init_listhead(name
);
1997 adapter
->listhead
= listhead
;
1999 link_adapter(adapter
);
2001 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_adapter done\n"));
2008 * PATH UTILITY FUNCTIONS
2014 * Search the per adapter path list for a match on remote node and
2015 * hwaddr. The path ref_cnt must be greater than zero or the path
2016 * is in the process of being removed.
2018 * Acquire the path lock and increment the path hold count.
2021 lookup_path(char *adapter_devname
, int adapter_instance
,
2022 rsm_node_id_t remote_node
, rsm_addr_t hwaddr
)
2027 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "lookup_path enter\n"));
2029 adapter
= rsmka_lookup_adapter(adapter_devname
, adapter_instance
);
2030 ASSERT(adapter
!= NULL
);
2032 mutex_enter(&adapter
->listhead
->mutex
);
2034 /* start at the list head */
2035 current
= adapter
->next_path
;
2037 while (current
!= NULL
) {
2038 if ((current
->remote_node
== remote_node
) &&
2039 (current
->remote_hwaddr
== hwaddr
) &&
2040 (current
->ref_cnt
> 0))
2043 current
= current
->next_path
;
2045 if (current
!= NULL
) {
2046 mutex_enter(¤t
->mutex
);
2047 PATH_HOLD_NOLOCK(current
);
2050 mutex_exit(&adapter
->listhead
->mutex
);
2051 ADAPTER_RELE(adapter
);
2053 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "lookup_path done\n"));
2059 * This interface is similar to lookup_path but takes only the local
2060 * adapter name, instance and remote adapters hwaddr to identify the
2061 * path. This is used in the interrupt handler routines where nodeid
2062 * is not always available.
2065 rsm_find_path(char *adapter_devname
, int adapter_instance
, rsm_addr_t hwaddr
)
2070 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsm_find_path enter\n"));
2072 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2073 "rsm_find_path:adapter=%s:%d,rem=%llx\n",
2074 adapter_devname
, adapter_instance
, hwaddr
));
2076 adapter
= rsmka_lookup_adapter(adapter_devname
, adapter_instance
);
2079 * its possible that we are here due to an interrupt but the adapter
2080 * has been removed after we received the callback.
2082 if (adapter
== NULL
)
2085 mutex_enter(&adapter
->listhead
->mutex
);
2087 /* start at the list head */
2088 current
= adapter
->next_path
;
2090 while (current
!= NULL
) {
2091 if ((current
->remote_hwaddr
== hwaddr
) &&
2092 (current
->ref_cnt
> 0))
2095 current
= current
->next_path
;
2097 if (current
!= NULL
) {
2098 mutex_enter(¤t
->mutex
);
2099 PATH_HOLD_NOLOCK(current
);
2102 mutex_exit(&adapter
->listhead
->mutex
);
2104 rsmka_release_adapter(adapter
);
2106 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rsm_find_path done\n"));
2113 * Add the path to the head of the (per adapter) list of paths
2116 link_path(path_t
*path
)
2119 adapter_t
*adapter
= path
->local_adapter
;
2122 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_path enter\n"));
2124 mutex_enter(&adapter_listhead_base
.listlock
);
2126 mutex_enter(&adapter
->listhead
->mutex
);
2128 first_path
= adapter
->next_path
;
2129 adapter
->next_path
= path
;
2130 path
->next_path
= first_path
;
2132 adapter
->listhead
->path_count
++;
2134 mutex_exit(&adapter
->listhead
->mutex
);
2136 mutex_exit(&adapter_listhead_base
.listlock
);
2138 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_path done\n"));
2142 * Search the per-adapter list of paths for the specified path, beginning
2143 * at the head of the list. Unlink the path and free the descriptor
2147 destroy_path(path_t
*path
)
2150 adapter_t
*adapter
= path
->local_adapter
;
2151 path_t
*prev
, *current
;
2153 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "destroy_path enter\n"));
2155 mutex_enter(&adapter_listhead_base
.listlock
);
2157 mutex_enter(&path
->local_adapter
->listhead
->mutex
);
2158 ASSERT(path
->ref_cnt
== 0);
2160 /* start at the list head */
2162 current
= adapter
->next_path
;
2164 while (current
!= NULL
) {
2165 if (path
->remote_node
== current
->remote_node
&&
2166 path
->remote_hwaddr
== current
->remote_hwaddr
)
2170 current
= current
->next_path
;
2175 adapter
->next_path
= current
->next_path
;
2177 prev
->next_path
= current
->next_path
;
2179 path
->local_adapter
->listhead
->path_count
--;
2181 mutex_exit(&path
->local_adapter
->listhead
->mutex
);
2183 mutex_exit(&adapter_listhead_base
.listlock
);
2185 taskq_destroy(path
->recv_taskq
);
2187 kmem_free(path
->msgbuf_queue
,
2188 RSMIPC_MAX_MESSAGES
* sizeof (msgbuf_elem_t
));
2190 mutex_destroy(¤t
->mutex
);
2191 cv_destroy(¤t
->sendq_token
.sendq_cv
);
2192 cv_destroy(&path
->hold_cv
);
2193 kmem_free(current
, sizeof (path_t
));
2195 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "destroy_path done\n"));
2199 rsmka_enqueue_msgbuf(path_t
*path
, void *data
)
2201 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2202 "rsmka_enqueue_msgbuf enter\n"));
2204 ASSERT(MUTEX_HELD(&path
->mutex
));
2206 ASSERT(path
->msgbuf_cnt
< RSMIPC_MAX_MESSAGES
);
2208 /* increment the count and advance the tail */
2212 if (path
->msgbuf_tail
== RSMIPC_MAX_MESSAGES
- 1) {
2213 path
->msgbuf_tail
= 0;
2215 path
->msgbuf_tail
++;
2218 path
->msgbuf_queue
[path
->msgbuf_tail
].active
= B_TRUE
;
2220 bcopy(data
, &(path
->msgbuf_queue
[path
->msgbuf_tail
].msg
),
2221 sizeof (rsmipc_request_t
));
2223 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2224 "rsmka_enqueue_msgbuf done\n"));
2229 * get the head of the queue using rsmka_gethead_msgbuf and then call
2230 * rsmka_dequeue_msgbuf to remove it.
2233 rsmka_dequeue_msgbuf(path_t
*path
)
2235 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2236 "rsmka_dequeue_msgbuf enter\n"));
2238 ASSERT(MUTEX_HELD(&path
->mutex
));
2240 if (path
->msgbuf_cnt
== 0)
2245 path
->msgbuf_queue
[path
->msgbuf_head
].active
= B_FALSE
;
2247 if (path
->msgbuf_head
== RSMIPC_MAX_MESSAGES
- 1) {
2248 path
->msgbuf_head
= 0;
2250 path
->msgbuf_head
++;
2253 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2254 "rsmka_dequeue_msgbuf done\n"));
2259 rsmka_gethead_msgbuf(path_t
*path
)
2261 msgbuf_elem_t
*head
;
2263 ASSERT(MUTEX_HELD(&path
->mutex
));
2265 if (path
->msgbuf_cnt
== 0)
2268 head
= &path
->msgbuf_queue
[path
->msgbuf_head
];
2274 * Called by rsm_connect which needs the hardware address of the
2275 * remote adapter. A search is done through the paths for the local
2276 * adapter for a match on the specified remote node.
2279 get_remote_nodeid(adapter_t
*adapter
, rsm_addr_t remote_hwaddr
)
2282 rsm_node_id_t remote_node
;
2283 path_t
*current
= adapter
->next_path
;
2285 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_remote_nodeid enter\n"));
2287 mutex_enter(&adapter
->listhead
->mutex
);
2288 while (current
!= NULL
) {
2289 if (current
->remote_hwaddr
== remote_hwaddr
) {
2290 remote_node
= current
->remote_node
;
2293 current
= current
->next_path
;
2296 if (current
== NULL
)
2297 remote_node
= (rsm_node_id_t
)-1;
2299 mutex_exit(&adapter
->listhead
->mutex
);
2301 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_remote_nodeid done\n"));
2303 return (remote_node
);
2307 * Called by rsm_connect which needs the hardware address of the
2308 * remote adapter. A search is done through the paths for the local
2309 * adapter for a match on the specified remote node.
2312 get_remote_hwaddr(adapter_t
*adapter
, rsm_node_id_t remote_node
)
2315 rsm_addr_t remote_hwaddr
;
2316 path_t
*current
= adapter
->next_path
;
2318 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_remote_hwaddr enter\n"));
2320 mutex_enter(&adapter
->listhead
->mutex
);
2321 while (current
!= NULL
) {
2322 if (current
->remote_node
== remote_node
) {
2323 remote_hwaddr
= current
->remote_hwaddr
;
2326 current
= current
->next_path
;
2328 if (current
== NULL
)
2330 mutex_exit(&adapter
->listhead
->mutex
);
2332 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_remote_hwaddr done\n"));
2334 return (remote_hwaddr
);
2337 * IPC UTILITY FUNCTIONS
2342 * If an entry exists, return with the ipc_info_lock held
2345 lookup_ipc_info(rsm_node_id_t remote_node
)
2347 ipc_info_t
*ipc_info
;
2349 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "lookup_ipc_info enter\n"));
2351 mutex_enter(&ipc_info_lock
);
2353 ipc_info
= ipc_info_head
;
2354 if (ipc_info
== NULL
) {
2355 mutex_exit(&ipc_info_lock
);
2356 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2357 "lookup_ipc_info done: ipc_info is NULL\n"));
2361 while (ipc_info
->remote_node
!= remote_node
) {
2362 ipc_info
= ipc_info
->next
;
2363 if (ipc_info
== NULL
) {
2364 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2365 "lookup_ipc_info: ipc_info not found\n"));
2366 mutex_exit(&ipc_info_lock
);
2371 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "lookup_ipc_info done\n"));
2377 * Create an ipc_info descriptor and return with ipc_info_lock held
2380 init_ipc_info(rsm_node_id_t remote_node
, boolean_t state
)
2382 ipc_info_t
*ipc_info
;
2384 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_ipc_info enter\n"));
2387 * allocate an ipc_info descriptor and add it to a
2388 * singly linked list
2391 ipc_info
= kmem_zalloc(sizeof (ipc_info_t
), KM_SLEEP
);
2392 ipc_info
->remote_node
= remote_node
;
2393 ipc_info
->node_is_alive
= state
;
2395 mutex_enter(&ipc_info_lock
);
2396 if (ipc_info_head
== NULL
) {
2397 DBG_PRINTF((category
, RSM_DEBUG
,
2398 "init_ipc_info:ipc_info_head = %lx\n", ipc_info
));
2399 ipc_info_head
= ipc_info
;
2400 ipc_info
->next
= NULL
;
2402 ipc_info
->next
= ipc_info_head
;
2403 ipc_info_head
= ipc_info
;
2406 ipc_info
->remote_node
= remote_node
;
2408 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "init_ipc_info done\n"));
2414 destroy_ipc_info(ipc_info_t
*ipc_info
)
2416 ipc_info_t
*current
= ipc_info_head
;
2419 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "destroy_ipc_info enter\n"));
2421 ASSERT(MUTEX_HELD(&ipc_info_lock
));
2423 while (current
!= ipc_info
) {
2425 current
= current
->next
;
2427 ASSERT(current
!= NULL
);
2429 if (current
!= ipc_info_head
)
2430 prev
->next
= current
->next
;
2432 ipc_info_head
= current
->next
;
2434 kmem_free(current
, sizeof (ipc_info_t
));
2436 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "destroy_ipc_info done\n"));
2441 * Sendq tokens are kept on a circular list. If tokens A, B, C, & D are
2442 * on the list headed by ipc_info, then ipc_info points to A, A points to
2443 * D, D to C, C to B, and B to A.
2446 link_sendq_token(sendq_token_t
*token
, rsm_node_id_t remote_node
)
2448 ipc_info_t
*ipc_info
;
2450 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_sendq_token enter\n"));
2452 ipc_info
= lookup_ipc_info(remote_node
);
2453 if (ipc_info
== NULL
) {
2454 ipc_info
= init_ipc_info(remote_node
, B_FALSE
);
2455 DBG_PRINTF((category
, RSM_DEBUG
,
2456 "link_sendq_token: new ipc_info = %lx\n", ipc_info
));
2459 DBG_PRINTF((category
, RSM_DEBUG
,
2460 "link_sendq_token: ipc_info = %lx\n", ipc_info
));
2462 if (ipc_info
->token_list
== NULL
) {
2463 ipc_info
->token_list
= token
;
2464 ipc_info
->current_token
= token
;
2465 DBG_PRINTF((category
, RSM_DEBUG
,
2466 "link_sendq_token: current = %lx\n", token
));
2467 token
->next
= token
;
2469 DBG_PRINTF((category
, RSM_DEBUG
,
2470 "link_sendq_token: token = %lx\n", token
));
2471 token
->next
= ipc_info
->token_list
->next
;
2472 ipc_info
->token_list
->next
= token
;
2473 ipc_info
->token_list
= token
;
2477 mutex_exit(&ipc_info_lock
);
2479 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "link_sendq_token done\n"));
2484 unlink_sendq_token(sendq_token_t
*token
, rsm_node_id_t remote_node
)
2486 sendq_token_t
*prev
, *start
, *current
;
2487 ipc_info_t
*ipc_info
;
2488 path_t
*path
= SQ_TOKEN_TO_PATH(token
);
2490 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2491 "unlink_sendq_token enter\n"));
2493 ASSERT(path
->ref_cnt
== 0);
2495 ipc_info
= lookup_ipc_info(remote_node
);
2496 if (ipc_info
== NULL
) {
2497 DBG_PRINTF((category
, RSM_DEBUG
,
2498 "ipc_info for %d not found\n", remote_node
));
2502 prev
= ipc_info
->token_list
;
2503 start
= current
= ipc_info
->token_list
->next
;
2506 if (current
== token
) {
2507 if (current
->next
!= current
) {
2508 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2509 "found token, removed it\n"));
2510 prev
->next
= token
->next
;
2511 if (ipc_info
->token_list
== token
)
2512 ipc_info
->token_list
= prev
;
2513 ipc_info
->current_token
= token
->next
;
2515 /* list will be empty */
2516 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2517 "removed token, list empty\n"));
2518 ipc_info
->token_list
= NULL
;
2519 ipc_info
->current_token
= NULL
;
2524 current
= current
->next
;
2525 if (current
== start
) {
2526 DBG_PRINTF((category
, RSM_DEBUG
,
2527 "unlink_sendq_token: token not found\n"));
2531 mutex_exit(&ipc_info_lock
);
2533 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "unlink_sendq_token done\n"));
2538 rele_sendq_token(sendq_token_t
*token
)
2542 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rele_sendq_token enter\n"));
2544 path
= SQ_TOKEN_TO_PATH(token
);
2545 mutex_enter(&path
->mutex
);
2546 PATH_RELE_NOLOCK(path
);
2547 SENDQ_TOKEN_RELE(path
);
2548 mutex_exit(&path
->mutex
);
2550 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "rele_sendq_token done\n"));
2555 * A valid ipc token can only be returned if the remote node is alive.
2556 * Tokens are on a circular list. Starting with the current token
2557 * search for a token with an endpoint in state RSM_PATH_ACTIVE.
2558 * rsmipc_send which calls rsmka_get_sendq_token expects that if there are
2559 * multiple paths available between a node-pair then consecutive calls from
2560 * a particular invocation of rsmipc_send will return a sendq that is
2561 * different from the one that was used in the previous iteration. When
2562 * prev_used is NULL it indicates that this is the first interation in a
2563 * specific rsmipc_send invocation.
2565 * Updating the current token provides round robin selection and this
2566 * is done only in the first iteration ie. when prev_used is NULL
2569 rsmka_get_sendq_token(rsm_node_id_t remote_node
, sendq_token_t
*prev_used
)
2571 sendq_token_t
*token
, *first_token
;
2573 ipc_info_t
*ipc_info
;
2575 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2576 "rsmka_get_sendq_token enter\n"));
2578 ipc_info
= lookup_ipc_info(remote_node
);
2579 if (ipc_info
== NULL
) {
2580 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2581 "rsmka_get_sendq_token done: ipc_info is NULL\n"));
2585 if (ipc_info
->node_is_alive
== B_TRUE
) {
2586 token
= first_token
= ipc_info
->current_token
;
2587 if (token
== NULL
) {
2588 mutex_exit(&ipc_info_lock
);
2589 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2590 "rsmka_get_sendq_token done: token=NULL\n"));
2595 path
= SQ_TOKEN_TO_PATH(token
);
2596 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2597 "path %lx\n", path
));
2598 mutex_enter(&path
->mutex
);
2599 if (path
->state
!= RSMKA_PATH_ACTIVE
||
2600 path
->ref_cnt
== 0) {
2601 mutex_exit(&path
->mutex
);
2603 if (token
!= prev_used
) {
2604 /* found a new token */
2607 mutex_exit(&path
->mutex
);
2610 token
= token
->next
;
2611 if (token
== first_token
) {
2613 * we didn't find a new token reuse prev_used
2614 * if the corresponding path is still up
2617 path
= SQ_TOKEN_TO_PATH(prev_used
);
2618 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2619 "path %lx\n", path
));
2620 mutex_enter(&path
->mutex
);
2621 if (path
->state
!= RSMKA_PATH_ACTIVE
||
2622 path
->ref_cnt
== 0) {
2623 mutex_exit(&path
->mutex
);
2629 mutex_exit(&ipc_info_lock
);
2630 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2631 "rsmka_get_sendq_token: token=NULL\n"));
2636 PATH_HOLD_NOLOCK(path
);
2637 SENDQ_TOKEN_HOLD(path
);
2638 if (prev_used
== NULL
) {
2639 /* change current_token only the first time */
2640 ipc_info
->current_token
= token
->next
;
2643 mutex_exit(&path
->mutex
);
2644 mutex_exit(&ipc_info_lock
);
2646 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2647 "rsmka_get_sendq_token done\n"));
2650 mutex_exit(&ipc_info_lock
);
2651 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
,
2652 "rsmka_get_sendq_token done\n"));
2662 create_ipc_sendq(path_t
*path
)
2665 sendq_token_t
*token
;
2667 int64_t srvc_offset
;
2669 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "create_ipc_sendq enter\n"));
2671 DBG_PRINTF((category
, RSM_DEBUG
, "create_ipc_sendq: path = %lx\n",
2674 adapter
= path
->local_adapter
;
2675 token
= &path
->sendq_token
;
2677 srvc_offset
= path
->remote_hwaddr
;
2679 DBG_PRINTF((category
, RSM_DEBUG
,
2680 "create_ipc_sendq: srvc_offset = %lld\n",
2683 rval
= adapter
->rsmpi_ops
->rsm_sendq_create(adapter
->rsmpi_handle
,
2684 path
->remote_hwaddr
,
2685 (rsm_intr_service_t
)(RSM_SERVICE
+srvc_offset
),
2686 (rsm_intr_pri_t
)RSM_PRI
, (size_t)RSM_QUEUE_SZ
,
2687 RSM_INTR_SEND_Q_NO_FENCE
,
2688 RSM_RESOURCE_SLEEP
, NULL
, &token
->rsmpi_sendq_handle
);
2689 if (rval
== RSM_SUCCESS
) {
2690 /* rsmipc_send() may be waiting for a sendq_token */
2691 mutex_enter(&ipc_info_cvlock
);
2692 cv_broadcast(&ipc_info_cv
);
2693 mutex_exit(&ipc_info_cvlock
);
2696 DBG_PRINTF((category
, RSM_DEBUG
, "create_ipc_sendq: handle = %lx\n",
2697 token
->rsmpi_sendq_handle
));
2698 DBG_PRINTF((category
, RSM_DEBUG
, "create_ipc_sendq: rval = %d\n",
2700 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "create_ipc_sendq done\n"));
2707 rsmka_check_node_alive(rsm_node_id_t remote_node
)
2709 ipc_info_t
*ipc_info
;
2711 DBG_PRINTF((category
, RSM_DEBUG
, "rsmka_check_node_alive enter\n"));
2713 ipc_info
= lookup_ipc_info(remote_node
);
2714 if (ipc_info
== NULL
) {
2715 DBG_PRINTF((category
, RSM_DEBUG
,
2716 "rsmka_check_node_alive done: ipc_info NULL\n"));
2720 if (ipc_info
->node_is_alive
== B_TRUE
) {
2721 mutex_exit(&ipc_info_lock
);
2722 DBG_PRINTF((category
, RSM_DEBUG
,
2723 "rsmka_check_node_alive done: node is alive\n"));
2726 mutex_exit(&ipc_info_lock
);
2727 DBG_PRINTF((category
, RSM_DEBUG
,
2728 "rsmka_check_node_alive done: node is not alive\n"));
2737 * TOPOLOGY IOCTL SUPPORT
2741 get_topology_size(int mode
)
2743 uint32_t topology_size
;
2744 int pointer_area_size
;
2745 adapter_listhead_t
*listhead
;
2746 int total_num_of_adapters
;
2747 int total_num_of_paths
;
2749 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_topology_size enter\n"));
2752 * Find the total number of adapters and paths by adding up the
2753 * individual adapter and path counts from all the listheads
2755 total_num_of_adapters
= 0;
2756 total_num_of_paths
= 0;
2757 listhead
= adapter_listhead_base
.next
;
2758 while (listhead
!= NULL
) {
2759 total_num_of_adapters
+= listhead
->adapter_count
;
2760 total_num_of_paths
+= listhead
->path_count
;
2761 listhead
= listhead
->next_listhead
;
2764 #ifdef _MULTI_DATAMODEL
2765 if ((mode
& DATAMODEL_MASK
) == DATAMODEL_ILP32
)
2767 * Add extra 4-bytes to make sure connections header
2768 * is double-word aligned
2771 (total_num_of_adapters
+ total_num_of_adapters
%2) *
2774 pointer_area_size
= total_num_of_adapters
* sizeof (caddr_t
);
2775 #else /* _MULTI_DATAMODEL */
2777 pointer_area_size
= total_num_of_adapters
* sizeof (caddr_t
);
2778 #endif /* _MULTI_DATAMODEL */
2781 topology_size
= sizeof (rsmka_topology_hdr_t
) +
2783 (total_num_of_adapters
* sizeof (rsmka_connections_hdr_t
)) +
2784 (total_num_of_paths
* sizeof (rsmka_remote_cntlr_t
));
2786 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_topology_size done\n"));
2788 return (topology_size
);
2794 get_topology(caddr_t arg
, char *bufp
, int mode
)
2797 rsmka_topology_t
*tp
= (rsmka_topology_t
*)bufp
;
2798 adapter_listhead_t
*listhead
;
2802 rsmka_connections_t
*connection
;
2803 rsmka_remote_cntlr_t
*rem_cntlr
;
2804 int total_num_of_adapters
;
2806 #ifdef _MULTI_DATAMODEL
2807 rsmka_topology32_t
*tp32
= (rsmka_topology32_t
*)bufp
;
2810 #endif /* _MULTI_DATAMODEL */
2812 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_topology enter\n"));
2815 * Find the total number of adapters by adding up the
2816 * individual adapter counts from all the listheads
2818 total_num_of_adapters
= 0;
2819 listhead
= adapter_listhead_base
.next
;
2820 while (listhead
!= NULL
) {
2821 total_num_of_adapters
+= listhead
->adapter_count
;
2822 listhead
= listhead
->next_listhead
;
2825 /* fill topology header and adjust bufp */
2826 tp
->topology_hdr
.local_nodeid
= my_nodeid
;
2827 tp
->topology_hdr
.local_cntlr_count
= total_num_of_adapters
;
2828 bufp
= (char *)&tp
->connections
[0];
2830 /* leave room for connection pointer area */
2831 #ifdef _MULTI_DATAMODEL
2832 if ((mode
& DATAMODEL_MASK
) == DATAMODEL_ILP32
)
2833 /* make sure bufp is double-word aligned */
2834 bufp
+= (total_num_of_adapters
+ total_num_of_adapters
%2) *
2837 bufp
+= total_num_of_adapters
* sizeof (caddr_t
);
2838 #else /* _MULTI_DATAMODEL */
2839 bufp
+= total_num_of_adapters
* sizeof (caddr_t
);
2840 #endif /* _MULTI_DATAMODEL */
2842 /* fill topology from the adapter and path data */
2843 listhead
= adapter_listhead_base
.next
;
2844 while (listhead
!= NULL
) {
2845 adapter
= listhead
->next_adapter
;
2846 while (adapter
!= NULL
) {
2847 /* fill in user based connection pointer */
2848 #ifdef _MULTI_DATAMODEL
2849 if ((mode
& DATAMODEL_MASK
) == DATAMODEL_ILP32
) {
2850 ulong_t delta
= (ulong_t
)bufp
- (ulong_t
)tp32
;
2851 caddr32_t userbase
= (caddr32_t
)((ulong_t
)arg
&
2853 tp32
->connections
[cntlr
++] = userbase
+ delta
;
2855 tp
->connections
[cntlr
++] = arg
+
2859 #else /* _MULTI_DATAMODEL */
2860 tp
->connections
[cntlr
++] = arg
+
2863 #endif /* _MULTI_DATAMODEL */
2864 connection
= (rsmka_connections_t
*)bufp
;
2865 (void) snprintf(connection
->hdr
.cntlr_name
,
2867 listhead
->adapter_devname
,
2869 connection
->hdr
.local_hwaddr
= adapter
->hwaddr
;
2870 connection
->hdr
.remote_cntlr_count
= 0;
2871 bufp
+= sizeof (rsmka_connections_hdr_t
);
2872 rem_cntlr
= (rsmka_remote_cntlr_t
*)bufp
;
2873 path
= adapter
->next_path
;
2874 while (path
!= NULL
) {
2875 connection
->hdr
.remote_cntlr_count
++;
2876 rem_cntlr
->remote_nodeid
= path
->remote_node
;
2877 (void) snprintf(rem_cntlr
->remote_cntlrname
,
2879 listhead
->adapter_devname
,
2880 path
->remote_devinst
);
2881 rem_cntlr
->remote_hwaddr
= path
->remote_hwaddr
;
2882 rem_cntlr
->connection_state
= path
->state
;
2884 path
= path
->next_path
;
2886 adapter
= adapter
->next
;
2887 bufp
= (char *)rem_cntlr
;
2889 listhead
= listhead
->next_listhead
;
2892 DBG_PRINTF((category
, RSM_DEBUG_VERBOSE
, "get_topology done\n"));
2898 * Called from rsm_ioctl() in rsm.c
2899 * Make sure there is no possiblity of blocking while holding
2900 * adapter_listhead_base.lock
2903 rsmka_topology_ioctl(caddr_t arg
, int cmd
, int mode
)
2905 uint32_t topology_size
;
2906 uint32_t request_size
;
2908 int error
= RSM_SUCCESS
;
2909 size_t max_toposize
;
2911 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG_VERBOSE
,
2912 "rsmka_topology_ioctl enter\n"));
2915 case RSM_IOCTL_TOPOLOGY_SIZE
:
2916 mutex_enter(&adapter_listhead_base
.listlock
);
2917 topology_size
= get_topology_size(mode
);
2918 mutex_exit(&adapter_listhead_base
.listlock
);
2919 if (ddi_copyout((caddr_t
)&topology_size
,
2920 (caddr_t
)arg
, sizeof (uint32_t), mode
))
2921 error
= RSMERR_BAD_ADDR
;
2923 case RSM_IOCTL_TOPOLOGY_DATA
:
2925 * The size of the buffer which the caller has allocated
2926 * is passed in. If the size needed for the topology data
2927 * is not sufficient, E2BIG is returned
2929 if (ddi_copyin(arg
, &request_size
, sizeof (uint32_t), mode
)) {
2930 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG_VERBOSE
,
2931 "rsmka_topology_ioctl done: BAD_ADDR\n"));
2932 return (RSMERR_BAD_ADDR
);
2934 /* calculate the max size of the topology structure */
2935 max_toposize
= sizeof (rsmka_topology_hdr_t
) +
2936 RSM_MAX_CTRL
* (sizeof (caddr_t
) +
2937 sizeof (rsmka_connections_hdr_t
)) +
2938 RSM_MAX_NODE
* sizeof (rsmka_remote_cntlr_t
);
2940 if (request_size
> max_toposize
) { /* validate request_size */
2941 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG_VERBOSE
,
2942 "rsmka_topology_ioctl done: size too large\n"));
2945 bufp
= kmem_zalloc(request_size
, KM_SLEEP
);
2946 mutex_enter(&adapter_listhead_base
.listlock
);
2947 topology_size
= get_topology_size(mode
);
2948 if (request_size
< topology_size
) {
2949 kmem_free(bufp
, request_size
);
2950 mutex_exit(&adapter_listhead_base
.listlock
);
2951 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG_VERBOSE
,
2952 "rsmka_topology_ioctl done: E2BIG\n"));
2956 /* get the topology data and copyout to the caller */
2957 get_topology(arg
, bufp
, mode
);
2958 mutex_exit(&adapter_listhead_base
.listlock
);
2959 if (ddi_copyout((caddr_t
)bufp
, (caddr_t
)arg
,
2960 topology_size
, mode
))
2961 error
= RSMERR_BAD_ADDR
;
2963 kmem_free(bufp
, request_size
);
2966 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG
,
2967 "rsmka_topology_ioctl: cmd not supported\n"));
2968 error
= DDI_FAILURE
;
2971 DBG_PRINTF((category
| RSM_IOCTL
, RSM_DEBUG_VERBOSE
,
2972 "rsmka_topology_ioctl done: %d\n", error
));