Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / ib / ibtl / ibtl_handlers.c
blob45df2628bae8308b865fc6c6f5afc277b91718d6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/ib/ibtl/impl/ibtl.h>
26 #include <sys/ib/ibtl/impl/ibtl_cm.h>
27 #include <sys/taskq.h>
28 #include <sys/disp.h>
29 #include <sys/callb.h>
30 #include <sys/proc.h>
33 * ibtl_handlers.c
37 * What's in this file?
39 * This file started as an implementation of Asynchronous Event/Error
40 * handling and Completion Queue handling. As the implementation
41 * evolved, code has been added for other ibc_* interfaces (resume,
42 * predetach, etc.) that use the same mechanisms as used for asyncs.
44 * Async and CQ handling at interrupt level.
46 * CQ handling is normally done at interrupt level using the CQ callback
47 * handler to call the appropriate IBT Client (owner of the CQ). For
48 * clients that would prefer a fully flexible non-interrupt context to
49 * do their CQ handling, a CQ can be created so that its handler is
50 * called from a non-interrupt thread. CQ handling is done frequently
51 * whereas Async handling is expected to occur very infrequently.
53 * Async handling is done by marking (or'ing in of an async_code of) the
54 * pertinent IBTL data structure, and then notifying the async_thread(s)
55 * that the data structure has async work to be done. The notification
56 * occurs by linking the data structure through its async_link onto a
57 * list of like data structures and waking up an async_thread. This
58 * list append is not done if there is already async work pending on
59 * this data structure (IBTL_ASYNC_PENDING).
61 * Async Mutex and CQ Mutex
63 * The global ibtl_async_mutex is "the" mutex used to control access
64 * to all the data needed by ibc_async_handler. All the threads that
65 * use this mutex are written so that the mutex is held for very short
66 * periods of time, and never held while making calls to functions
67 * that may block.
69 * The global ibtl_cq_mutex is used similarly by ibc_cq_handler and
70 * the ibtl_cq_thread(s).
72 * Mutex hierarchy
74 * The ibtl_clnt_list_mutex is above the ibtl_async_mutex.
75 * ibtl_clnt_list_mutex protects all of the various lists.
76 * The ibtl_async_mutex is below this in the hierarchy.
78 * The ibtl_cq_mutex is independent of the above mutexes.
80 * Threads
82 * There are "ibtl_cq_threads" number of threads created for handling
83 * Completion Queues in threads. If this feature really gets used,
84 * then we will want to do some suitable tuning. Similarly, we may
85 * want to tune the number of "ibtl_async_thread_init".
87 * The function ibtl_cq_thread is the main loop for handling a CQ in a
88 * thread. There can be multiple threads executing this same code.
89 * The code sleeps when there is no work to be done (list is empty),
90 * otherwise it pulls the first CQ structure off the list and performs
91 * the CQ handler callback to the client. After that returns, a check
92 * is made, and if another ibc_cq_handler call was made for this CQ,
93 * the client is called again.
95 * The function ibtl_async_thread is the main loop for handling async
96 * events/errors. There can be multiple threads executing this same code.
97 * The code sleeps when there is no work to be done (lists are empty),
98 * otherwise it pulls the first structure off one of the lists and
99 * performs the async callback(s) to the client(s). Note that HCA
100 * async handling is done by calling each of the clients using the HCA.
101 * When the async handling completes, the data structure having the async
102 * event/error is checked for more work before it's considered "done".
104 * Taskq
106 * The async_taskq is used here for allowing async handler callbacks to
107 * occur simultaneously to multiple clients of an HCA. This taskq could
108 * be used for other purposes, e.g., if all the async_threads are in
109 * use, but this is deemed as overkill since asyncs should occur rarely.
112 /* Globals */
113 static char ibtf_handlers[] = "ibtl_handlers";
115 /* priority for IBTL threads (async, cq, and taskq) */
116 static pri_t ibtl_pri = MAXCLSYSPRI - 1; /* maybe override in /etc/system */
118 /* taskq used for HCA asyncs */
119 #define ibtl_async_taskq system_taskq
121 /* data for async handling by threads */
122 static kmutex_t ibtl_async_mutex; /* protects most *_async_* data */
123 static kcondvar_t ibtl_async_cv; /* async_threads wait on this */
124 static kcondvar_t ibtl_clnt_cv; /* ibt_detach might wait on this */
125 static void ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp);
126 static void ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp);
128 static kt_did_t *ibtl_async_did; /* for thread_join() */
129 int ibtl_async_thread_init = 4; /* total # of async_threads to create */
130 static int ibtl_async_thread_exit = 0; /* set if/when thread(s) should exit */
132 /* async lists for various structures */
133 static ibtl_hca_devinfo_t *ibtl_async_hca_list_start, *ibtl_async_hca_list_end;
134 static ibtl_eec_t *ibtl_async_eec_list_start, *ibtl_async_eec_list_end;
135 static ibtl_qp_t *ibtl_async_qp_list_start, *ibtl_async_qp_list_end;
136 static ibtl_cq_t *ibtl_async_cq_list_start, *ibtl_async_cq_list_end;
137 static ibtl_srq_t *ibtl_async_srq_list_start, *ibtl_async_srq_list_end;
139 /* data for CQ completion handling by threads */
140 static kmutex_t ibtl_cq_mutex; /* protects the cv and the list below */
141 static kcondvar_t ibtl_cq_cv;
142 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
144 static int ibtl_cq_threads = 0; /* total # of cq threads */
145 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
146 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
148 /* value used to tell IBTL threads to exit */
149 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
150 /* Cisco Topspin Vendor ID for Rereg hack */
151 #define IBT_VENDOR_CISCO 0x05ad
153 int ibtl_eec_not_supported = 1;
155 char *ibtl_last_client_name; /* may help debugging */
156 typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
157 ibt_node_info_t *);
159 ibtl_node_info_cb_t ibtl_node_info_cb;
161 _NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
163 void
164 ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
165 ib_lid_t, ibt_node_info_t *))
167 mutex_enter(&ibtl_clnt_list_mutex);
168 ibtl_node_info_cb = node_info_cb;
169 mutex_exit(&ibtl_clnt_list_mutex);
173 * ibc_async_handler()
175 * Asynchronous Event/Error Handler.
177 * This is the function called HCA drivers to post various async
178 * event and errors mention in the IB architecture spec. See
179 * ibtl_types.h for additional details of this.
181 * This function marks the pertinent IBTF object with the async_code,
182 * and queues the object for handling by an ibtl_async_thread. If
183 * the object is NOT already marked for async processing, it is added
184 * to the associated list for that type of object, and an
185 * ibtl_async_thread is signaled to finish the async work.
187 void
188 ibc_async_handler(ibc_clnt_hdl_t hca_devp, ibt_async_code_t code,
189 ibc_async_event_t *event_p)
191 ibtl_qp_t *ibtl_qp;
192 ibtl_cq_t *ibtl_cq;
193 ibtl_srq_t *ibtl_srq;
194 ibtl_eec_t *ibtl_eec;
195 uint8_t port_minus1;
197 ibtl_async_port_event_t *portp;
199 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)",
200 hca_devp, code, event_p);
202 mutex_enter(&ibtl_async_mutex);
204 switch (code) {
205 case IBT_EVENT_PATH_MIGRATED_QP:
206 case IBT_EVENT_SQD:
207 case IBT_ERROR_CATASTROPHIC_QP:
208 case IBT_ERROR_PATH_MIGRATE_REQ_QP:
209 case IBT_EVENT_COM_EST_QP:
210 case IBT_ERROR_INVALID_REQUEST_QP:
211 case IBT_ERROR_ACCESS_VIOLATION_QP:
212 case IBT_EVENT_EMPTY_QP:
213 case IBT_FEXCH_ERROR:
214 ibtl_qp = event_p->ev_qp_hdl;
215 if (ibtl_qp == NULL) {
216 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
217 "bad qp handle");
218 break;
220 switch (code) {
221 case IBT_ERROR_CATASTROPHIC_QP:
222 ibtl_qp->qp_cat_fma_ena = event_p->ev_fma_ena; break;
223 case IBT_ERROR_PATH_MIGRATE_REQ_QP:
224 ibtl_qp->qp_pth_fma_ena = event_p->ev_fma_ena; break;
225 case IBT_ERROR_INVALID_REQUEST_QP:
226 ibtl_qp->qp_inv_fma_ena = event_p->ev_fma_ena; break;
227 case IBT_ERROR_ACCESS_VIOLATION_QP:
228 ibtl_qp->qp_acc_fma_ena = event_p->ev_fma_ena; break;
231 ibtl_qp->qp_async_codes |= code;
232 if ((ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) == 0) {
233 ibtl_qp->qp_async_flags |= IBTL_ASYNC_PENDING;
234 ibtl_qp->qp_async_link = NULL;
235 if (ibtl_async_qp_list_end == NULL)
236 ibtl_async_qp_list_start = ibtl_qp;
237 else
238 ibtl_async_qp_list_end->qp_async_link = ibtl_qp;
239 ibtl_async_qp_list_end = ibtl_qp;
240 cv_signal(&ibtl_async_cv);
242 break;
244 case IBT_ERROR_CQ:
245 ibtl_cq = event_p->ev_cq_hdl;
246 if (ibtl_cq == NULL) {
247 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
248 "bad cq handle");
249 break;
251 ibtl_cq->cq_async_codes |= code;
252 ibtl_cq->cq_fma_ena = event_p->ev_fma_ena;
253 if ((ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) == 0) {
254 ibtl_cq->cq_async_flags |= IBTL_ASYNC_PENDING;
255 ibtl_cq->cq_async_link = NULL;
256 if (ibtl_async_cq_list_end == NULL)
257 ibtl_async_cq_list_start = ibtl_cq;
258 else
259 ibtl_async_cq_list_end->cq_async_link = ibtl_cq;
260 ibtl_async_cq_list_end = ibtl_cq;
261 cv_signal(&ibtl_async_cv);
263 break;
265 case IBT_ERROR_CATASTROPHIC_SRQ:
266 case IBT_EVENT_LIMIT_REACHED_SRQ:
267 ibtl_srq = event_p->ev_srq_hdl;
268 if (ibtl_srq == NULL) {
269 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
270 "bad srq handle");
271 break;
273 ibtl_srq->srq_async_codes |= code;
274 ibtl_srq->srq_fma_ena = event_p->ev_fma_ena;
275 if ((ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) == 0) {
276 ibtl_srq->srq_async_flags |= IBTL_ASYNC_PENDING;
277 ibtl_srq->srq_async_link = NULL;
278 if (ibtl_async_srq_list_end == NULL)
279 ibtl_async_srq_list_start = ibtl_srq;
280 else
281 ibtl_async_srq_list_end->srq_async_link =
282 ibtl_srq;
283 ibtl_async_srq_list_end = ibtl_srq;
284 cv_signal(&ibtl_async_cv);
286 break;
288 case IBT_EVENT_PATH_MIGRATED_EEC:
289 case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
290 case IBT_ERROR_CATASTROPHIC_EEC:
291 case IBT_EVENT_COM_EST_EEC:
292 if (ibtl_eec_not_supported) {
293 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
294 "EEC events are disabled.");
295 break;
297 ibtl_eec = event_p->ev_eec_hdl;
298 if (ibtl_eec == NULL) {
299 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
300 "bad eec handle");
301 break;
303 switch (code) {
304 case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
305 ibtl_eec->eec_pth_fma_ena = event_p->ev_fma_ena; break;
306 case IBT_ERROR_CATASTROPHIC_EEC:
307 ibtl_eec->eec_cat_fma_ena = event_p->ev_fma_ena; break;
309 ibtl_eec->eec_async_codes |= code;
310 if ((ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) == 0) {
311 ibtl_eec->eec_async_flags |= IBTL_ASYNC_PENDING;
312 ibtl_eec->eec_async_link = NULL;
313 if (ibtl_async_eec_list_end == NULL)
314 ibtl_async_eec_list_start = ibtl_eec;
315 else
316 ibtl_async_eec_list_end->eec_async_link =
317 ibtl_eec;
318 ibtl_async_eec_list_end = ibtl_eec;
319 cv_signal(&ibtl_async_cv);
321 break;
323 case IBT_ERROR_LOCAL_CATASTROPHIC:
324 hca_devp->hd_async_codes |= code;
325 hca_devp->hd_fma_ena = event_p->ev_fma_ena;
326 /* FALLTHROUGH */
328 case IBT_EVENT_PORT_UP:
329 case IBT_PORT_CHANGE_EVENT:
330 case IBT_CLNT_REREG_EVENT:
331 case IBT_ERROR_PORT_DOWN:
332 if ((code & IBT_PORT_EVENTS) != 0) {
333 if ((port_minus1 = event_p->ev_port - 1) >=
334 hca_devp->hd_hca_attr->hca_nports) {
335 IBTF_DPRINTF_L2(ibtf_handlers,
336 "ibc_async_handler: bad port #: %d",
337 event_p->ev_port);
338 break;
340 portp = &hca_devp->hd_async_port[port_minus1];
341 if (code == IBT_EVENT_PORT_UP) {
343 * The port is just coming UP we can't have any
344 * valid older events.
346 portp->status = IBTL_HCA_PORT_UP;
347 } else if (code == IBT_ERROR_PORT_DOWN) {
349 * The port is going DOWN older events don't
350 * count.
352 portp->status = IBTL_HCA_PORT_DOWN;
353 } else if (code == IBT_PORT_CHANGE_EVENT) {
355 * For port UP and DOWN events only the latest
356 * event counts. If we get a UP after DOWN it
357 * is sufficient to send just UP and vice versa.
358 * In the case of port CHANGE event it is valid
359 * only when the port is UP already but if we
360 * receive it after UP but before UP is
361 * delivered we still need to deliver CHANGE
362 * after we deliver UP event.
364 * We will not get a CHANGE event when the port
365 * is down or DOWN event is pending.
367 portp->flags |= event_p->ev_port_flags;
368 portp->status |= IBTL_HCA_PORT_CHG;
369 } else if (code == IBT_CLNT_REREG_EVENT) {
371 * SM has requested a re-register of
372 * subscription to SM events notification.
374 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
377 hca_devp->hd_async_codes |= code;
380 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) {
381 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING;
382 hca_devp->hd_async_link = NULL;
383 if (ibtl_async_hca_list_end == NULL)
384 ibtl_async_hca_list_start = hca_devp;
385 else
386 ibtl_async_hca_list_end->hd_async_link =
387 hca_devp;
388 ibtl_async_hca_list_end = hca_devp;
389 cv_signal(&ibtl_async_cv);
392 break;
394 default:
395 IBTF_DPRINTF_L1(ibtf_handlers, "ibc_async_handler: "
396 "invalid code (0x%x)", code);
399 mutex_exit(&ibtl_async_mutex);
403 /* Finally, make the async call to the client. */
405 static void
406 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code,
407 ibt_async_event_t *event_p)
409 ibtl_clnt_t *clntp;
410 void *client_private;
411 ibt_async_handler_t async_handler;
412 char *client_name;
414 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
415 ibt_hca, code, event_p);
417 clntp = ibt_hca->ha_clnt_devp;
419 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
420 /* Record who is being called (just a debugging aid) */
421 ibtl_last_client_name = client_name = clntp->clnt_name;
422 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
424 client_private = clntp->clnt_private;
425 async_handler = clntp->clnt_modinfop->mi_async_handler;
427 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
428 mutex_enter(&ibtl_clnt_list_mutex);
429 async_handler = ibtl_cm_async_handler;
430 client_private = ibtl_cm_clnt_private;
431 mutex_exit(&ibtl_clnt_list_mutex);
432 ibt_hca = NULL;
433 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
434 "calling CM for COM_EST");
435 } else {
436 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
437 "calling client '%s'", client_name);
439 if (async_handler != NULL)
440 async_handler(client_private, ibt_hca, code, event_p);
441 else
442 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
443 "client '%s' has no async handler", client_name);
447 * Inform CM or DM about HCA events.
449 * We use taskqs to allow simultaneous notification, with sleeping.
450 * Since taskqs only allow one argument, we define a structure
451 * because we need to pass in more than one argument.
454 struct ibtl_mgr_s {
455 ibtl_hca_devinfo_t *mgr_hca_devp;
456 ibt_async_handler_t mgr_async_handler;
457 void *mgr_clnt_private;
461 * Asyncs of HCA level events for CM and DM. Call CM or DM and tell them
462 * about the HCA for the event recorded in the ibtl_hca_devinfo_t.
464 static void
465 ibtl_do_mgr_async_task(void *arg)
467 struct ibtl_mgr_s *mgrp = (struct ibtl_mgr_s *)arg;
468 ibtl_hca_devinfo_t *hca_devp = mgrp->mgr_hca_devp;
470 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_mgr_async_task(0x%x)",
471 hca_devp->hd_async_code);
473 mgrp->mgr_async_handler(mgrp->mgr_clnt_private, NULL,
474 hca_devp->hd_async_code, &hca_devp->hd_async_event);
475 kmem_free(mgrp, sizeof (*mgrp));
477 mutex_enter(&ibtl_clnt_list_mutex);
478 if (--hca_devp->hd_async_task_cnt == 0)
479 cv_signal(&hca_devp->hd_async_task_cv);
480 mutex_exit(&ibtl_clnt_list_mutex);
483 static void
484 ibt_cisco_embedded_sm_rereg_fix(void *arg)
486 struct ibtl_mgr_s *mgrp = arg;
487 ibtl_hca_devinfo_t *hca_devp;
488 ibt_node_info_t node_info;
489 ibt_status_t ibt_status;
490 ibtl_async_port_event_t *portp;
491 ib_lid_t sm_lid;
492 ib_guid_t hca_guid;
493 ibt_async_event_t *event_p;
494 ibt_hca_portinfo_t *pinfop;
495 uint8_t port;
497 hca_devp = mgrp->mgr_hca_devp;
499 mutex_enter(&ibtl_clnt_list_mutex);
500 event_p = &hca_devp->hd_async_event;
501 port = event_p->ev_port;
502 portp = &hca_devp->hd_async_port[port - 1];
503 pinfop = &hca_devp->hd_portinfop[port - 1];
504 sm_lid = pinfop->p_sm_lid;
505 hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
506 mutex_exit(&ibtl_clnt_list_mutex);
508 ibt_status = ((ibtl_node_info_cb_t)mgrp->mgr_async_handler)(hca_guid,
509 port, sm_lid, &node_info);
510 if (ibt_status == IBT_SUCCESS) {
511 if ((node_info.n_vendor_id == IBT_VENDOR_CISCO) &&
512 (node_info.n_node_type == IBT_NODE_TYPE_SWITCH)) {
513 mutex_enter(&ibtl_async_mutex);
514 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
515 hca_devp->hd_async_codes |= IBT_CLNT_REREG_EVENT;
516 mutex_exit(&ibtl_async_mutex);
519 kmem_free(mgrp, sizeof (*mgrp));
521 mutex_enter(&ibtl_clnt_list_mutex);
522 if (--hca_devp->hd_async_task_cnt == 0)
523 cv_signal(&hca_devp->hd_async_task_cv);
524 mutex_exit(&ibtl_clnt_list_mutex);
527 static void
528 ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
529 ibt_async_handler_t async_handler)
531 struct ibtl_mgr_s *mgrp;
533 if (async_handler == NULL)
534 return;
536 _NOTE(NO_COMPETING_THREADS_NOW)
537 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
538 mgrp->mgr_hca_devp = hca_devp;
539 mgrp->mgr_async_handler = async_handler;
540 mgrp->mgr_clnt_private = NULL;
541 hca_devp->hd_async_task_cnt++;
543 (void) taskq_dispatch(ibtl_async_taskq,
544 ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
545 _NOTE(COMPETING_THREADS_NOW)
548 static void
549 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
550 void *clnt_private)
552 struct ibtl_mgr_s *mgrp;
554 if (async_handler == NULL)
555 return;
557 _NOTE(NO_COMPETING_THREADS_NOW)
558 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
559 mgrp->mgr_hca_devp = hca_devp;
560 mgrp->mgr_async_handler = async_handler;
561 mgrp->mgr_clnt_private = clnt_private;
562 hca_devp->hd_async_task_cnt++;
564 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
565 TQ_SLEEP);
566 _NOTE(COMPETING_THREADS_NOW)
570 * Per client-device asyncs for HCA level events. Call each client that is
571 * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
573 static void
574 ibtl_hca_client_async_task(void *arg)
576 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg;
577 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp;
578 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp;
579 ibt_async_event_t async_event;
581 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)",
582 ibt_hca, hca_devp->hd_async_code);
584 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event));
585 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event);
587 mutex_enter(&ibtl_async_mutex);
588 if (--ibt_hca->ha_async_cnt == 0 &&
589 (ibt_hca->ha_async_flags & IBTL_ASYNC_FREE_OBJECT)) {
590 mutex_exit(&ibtl_async_mutex);
591 kmem_free(ibt_hca, sizeof (ibtl_hca_t));
592 } else
593 mutex_exit(&ibtl_async_mutex);
595 mutex_enter(&ibtl_clnt_list_mutex);
596 if (--hca_devp->hd_async_task_cnt == 0)
597 cv_signal(&hca_devp->hd_async_task_cv);
598 if (--clntp->clnt_async_cnt == 0)
599 cv_broadcast(&ibtl_clnt_cv);
601 mutex_exit(&ibtl_clnt_list_mutex);
605 * Asyncs for HCA level events.
607 * The function continues to run until there are no more async
608 * events/errors for this HCA. An event is chosen for dispatch
609 * to all clients of this HCA. This thread dispatches them via
610 * the ibtl_async_taskq, then sleeps until all tasks are done.
612 * This thread records the async_code and async_event in the
613 * ibtl_hca_devinfo_t for all client taskq threads to reference.
615 * This is called from an async or taskq thread with ibtl_async_mutex held.
617 static void
618 ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp)
620 ibtl_hca_t *ibt_hca;
621 ibt_async_event_t *eventp;
622 ibt_async_code_t code;
623 ibtl_async_port_status_t temp;
624 uint8_t nports;
625 uint8_t port_minus1;
626 ibtl_async_port_event_t *portp;
628 mutex_exit(&ibtl_async_mutex);
630 mutex_enter(&ibtl_clnt_list_mutex);
631 while (hca_devp->hd_async_busy)
632 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
633 hca_devp->hd_async_busy = 1;
634 mutex_enter(&ibtl_async_mutex);
636 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event));
637 for (;;) {
639 hca_devp->hd_async_event.ev_fma_ena = 0;
641 code = hca_devp->hd_async_codes;
642 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
643 code = IBT_ERROR_LOCAL_CATASTROPHIC;
644 hca_devp->hd_async_event.ev_fma_ena =
645 hca_devp->hd_fma_ena;
646 } else if (code & IBT_ERROR_PORT_DOWN) {
647 code = IBT_ERROR_PORT_DOWN;
648 temp = IBTL_HCA_PORT_DOWN;
649 } else if (code & IBT_EVENT_PORT_UP) {
650 code = IBT_EVENT_PORT_UP;
651 temp = IBTL_HCA_PORT_UP;
652 } else if (code & IBT_PORT_CHANGE_EVENT) {
653 code = IBT_PORT_CHANGE_EVENT;
654 temp = IBTL_HCA_PORT_CHG;
655 } else if (code & IBT_CLNT_REREG_EVENT) {
656 code = IBT_CLNT_REREG_EVENT;
657 temp = IBTL_HCA_PORT_ASYNC_CLNT_REREG;
658 } else {
659 hca_devp->hd_async_codes = 0;
660 code = 0;
663 if (code == 0) {
664 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING;
665 break;
667 hca_devp->hd_async_codes &= ~code;
669 /* PORT_UP, PORT_CHANGE, PORT_DOWN or ASYNC_REREG */
670 if ((code & IBT_PORT_EVENTS) != 0) {
671 portp = hca_devp->hd_async_port;
672 nports = hca_devp->hd_hca_attr->hca_nports;
673 for (port_minus1 = 0; port_minus1 < nports;
674 port_minus1++) {
676 * Matching event in this port, let's go handle
677 * it.
679 if ((portp[port_minus1].status & temp) != 0)
680 break;
682 if (port_minus1 >= nports) {
683 /* we checked again, but found nothing */
684 continue;
686 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: "
687 "async: port# %x code %x", port_minus1 + 1, code);
688 /* mark it to check for other ports after we're done */
689 hca_devp->hd_async_codes |= code;
692 * Copy the event information into hca_devp and clear
693 * event information from the per port data.
695 hca_devp->hd_async_event.ev_port = port_minus1 + 1;
696 if (temp == IBTL_HCA_PORT_CHG) {
697 hca_devp->hd_async_event.ev_port_flags =
698 hca_devp->hd_async_port[port_minus1].flags;
699 hca_devp->hd_async_port[port_minus1].flags = 0;
701 hca_devp->hd_async_port[port_minus1].status &= ~temp;
703 mutex_exit(&ibtl_async_mutex);
704 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1);
705 mutex_enter(&ibtl_async_mutex);
706 eventp = &hca_devp->hd_async_event;
707 eventp->ev_hca_guid =
708 hca_devp->hd_hca_attr->hca_node_guid;
711 hca_devp->hd_async_code = code;
712 hca_devp->hd_async_event.ev_hca_guid =
713 hca_devp->hd_hca_attr->hca_node_guid;
714 mutex_exit(&ibtl_async_mutex);
717 * Make sure to inform CM, DM, and IBMA if we know of them.
718 * Also, make sure not to inform them a second time, which
719 * would occur if they have the HCA open.
722 if (ibtl_ibma_async_handler)
723 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler,
724 ibtl_ibma_clnt_private);
725 /* wait for all tasks to complete */
726 while (hca_devp->hd_async_task_cnt != 0)
727 cv_wait(&hca_devp->hd_async_task_cv,
728 &ibtl_clnt_list_mutex);
731 * Hack Alert:
732 * The ibmf handler would have updated the Master SM LID if it
733 * was SM LID change event. Now lets check if the new Master SM
734 * is a Embedded Cisco Topspin SM.
736 if ((code == IBT_PORT_CHANGE_EVENT) &&
737 eventp->ev_port_flags & IBT_PORT_CHANGE_SM_LID)
738 ibtl_cm_get_node_info(hca_devp,
739 (ibt_async_handler_t)ibtl_node_info_cb);
740 /* wait for node info task to complete */
741 while (hca_devp->hd_async_task_cnt != 0)
742 cv_wait(&hca_devp->hd_async_task_cv,
743 &ibtl_clnt_list_mutex);
745 if (ibtl_dm_async_handler)
746 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler,
747 ibtl_dm_clnt_private);
748 if (ibtl_cm_async_handler)
749 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
750 ibtl_cm_clnt_private);
751 /* wait for all tasks to complete */
752 while (hca_devp->hd_async_task_cnt != 0)
753 cv_wait(&hca_devp->hd_async_task_cv,
754 &ibtl_clnt_list_mutex);
756 for (ibt_hca = hca_devp->hd_clnt_list;
757 ibt_hca != NULL;
758 ibt_hca = ibt_hca->ha_clnt_link) {
760 /* Managers are handled above */
761 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
762 ibtl_cm_async_handler)
763 continue;
764 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
765 ibtl_dm_async_handler)
766 continue;
767 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
768 ibtl_ibma_async_handler)
769 continue;
770 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
772 mutex_enter(&ibtl_async_mutex);
773 ibt_hca->ha_async_cnt++;
774 mutex_exit(&ibtl_async_mutex);
775 hca_devp->hd_async_task_cnt++;
776 (void) taskq_dispatch(ibtl_async_taskq,
777 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
780 /* wait for all tasks to complete */
781 while (hca_devp->hd_async_task_cnt != 0)
782 cv_wait(&hca_devp->hd_async_task_cv,
783 &ibtl_clnt_list_mutex);
785 mutex_enter(&ibtl_async_mutex);
787 hca_devp->hd_async_code = 0;
788 hca_devp->hd_async_busy = 0;
789 cv_broadcast(&hca_devp->hd_async_busy_cv);
790 mutex_exit(&ibtl_clnt_list_mutex);
794 * Asyncs for QP objects.
796 * The function continues to run until there are no more async
797 * events/errors for this object.
799 static void
800 ibtl_do_qp_asyncs(ibtl_qp_t *ibtl_qp)
802 ibt_async_code_t code;
803 ibt_async_event_t async_event;
805 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
806 bzero(&async_event, sizeof (async_event));
807 async_event.ev_chan_hdl = IBTL_QP2CHAN(ibtl_qp);
809 while ((code = ibtl_qp->qp_async_codes) != 0) {
810 async_event.ev_fma_ena = 0;
811 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT)
812 code = 0; /* fallthrough to "kmem_free" */
813 else if (code & IBT_ERROR_CATASTROPHIC_QP) {
814 code = IBT_ERROR_CATASTROPHIC_QP;
815 async_event.ev_fma_ena = ibtl_qp->qp_cat_fma_ena;
816 } else if (code & IBT_ERROR_INVALID_REQUEST_QP) {
817 code = IBT_ERROR_INVALID_REQUEST_QP;
818 async_event.ev_fma_ena = ibtl_qp->qp_inv_fma_ena;
819 } else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) {
820 code = IBT_ERROR_ACCESS_VIOLATION_QP;
821 async_event.ev_fma_ena = ibtl_qp->qp_acc_fma_ena;
822 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) {
823 code = IBT_ERROR_PATH_MIGRATE_REQ_QP;
824 async_event.ev_fma_ena = ibtl_qp->qp_pth_fma_ena;
825 } else if (code & IBT_EVENT_PATH_MIGRATED_QP)
826 code = IBT_EVENT_PATH_MIGRATED_QP;
827 else if (code & IBT_EVENT_SQD)
828 code = IBT_EVENT_SQD;
829 else if (code & IBT_EVENT_COM_EST_QP)
830 code = IBT_EVENT_COM_EST_QP;
831 else if (code & IBT_EVENT_EMPTY_QP)
832 code = IBT_EVENT_EMPTY_QP;
833 else {
834 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_qp_asyncs: "
835 "async: unexpected QP async code 0x%x", code);
836 ibtl_qp->qp_async_codes = 0;
837 code = 0;
839 ibtl_qp->qp_async_codes &= ~code;
841 if (code) {
842 mutex_exit(&ibtl_async_mutex);
843 ibtl_async_client_call(ibtl_qp->qp_hca,
844 code, &async_event);
845 mutex_enter(&ibtl_async_mutex);
848 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) {
849 mutex_exit(&ibtl_async_mutex);
850 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
851 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
852 kmem_free(IBTL_QP2CHAN(ibtl_qp),
853 sizeof (ibtl_channel_t));
854 mutex_enter(&ibtl_async_mutex);
855 return;
858 ibtl_qp->qp_async_flags &= ~IBTL_ASYNC_PENDING;
862 * Asyncs for SRQ objects.
864 * The function continues to run until there are no more async
865 * events/errors for this object.
867 static void
868 ibtl_do_srq_asyncs(ibtl_srq_t *ibtl_srq)
870 ibt_async_code_t code;
871 ibt_async_event_t async_event;
873 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
874 bzero(&async_event, sizeof (async_event));
875 async_event.ev_srq_hdl = ibtl_srq;
876 async_event.ev_fma_ena = ibtl_srq->srq_fma_ena;
878 while ((code = ibtl_srq->srq_async_codes) != 0) {
879 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT)
880 code = 0; /* fallthrough to "kmem_free" */
881 else if (code & IBT_ERROR_CATASTROPHIC_SRQ)
882 code = IBT_ERROR_CATASTROPHIC_SRQ;
883 else if (code & IBT_EVENT_LIMIT_REACHED_SRQ)
884 code = IBT_EVENT_LIMIT_REACHED_SRQ;
885 else {
886 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_srq_asyncs: "
887 "async: unexpected SRQ async code 0x%x", code);
888 ibtl_srq->srq_async_codes = 0;
889 code = 0;
891 ibtl_srq->srq_async_codes &= ~code;
893 if (code) {
894 mutex_exit(&ibtl_async_mutex);
895 ibtl_async_client_call(ibtl_srq->srq_hca,
896 code, &async_event);
897 mutex_enter(&ibtl_async_mutex);
900 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
901 mutex_exit(&ibtl_async_mutex);
902 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
903 mutex_enter(&ibtl_async_mutex);
904 return;
907 ibtl_srq->srq_async_flags &= ~IBTL_ASYNC_PENDING;
911 * Asyncs for CQ objects.
913 * The function continues to run until there are no more async
914 * events/errors for this object.
916 static void
917 ibtl_do_cq_asyncs(ibtl_cq_t *ibtl_cq)
919 ibt_async_code_t code;
920 ibt_async_event_t async_event;
922 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
923 bzero(&async_event, sizeof (async_event));
924 async_event.ev_cq_hdl = ibtl_cq;
925 async_event.ev_fma_ena = ibtl_cq->cq_fma_ena;
927 while ((code = ibtl_cq->cq_async_codes) != 0) {
928 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT)
929 code = 0; /* fallthrough to "kmem_free" */
930 else if (code & IBT_ERROR_CQ)
931 code = IBT_ERROR_CQ;
932 else {
933 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_cq_asyncs: "
934 "async: unexpected CQ async code 0x%x", code);
935 ibtl_cq->cq_async_codes = 0;
936 code = 0;
938 ibtl_cq->cq_async_codes &= ~code;
940 if (code) {
941 mutex_exit(&ibtl_async_mutex);
942 ibtl_async_client_call(ibtl_cq->cq_hca,
943 code, &async_event);
944 mutex_enter(&ibtl_async_mutex);
947 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
948 mutex_exit(&ibtl_async_mutex);
949 mutex_destroy(&ibtl_cq->cq_mutex);
950 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
951 mutex_enter(&ibtl_async_mutex);
952 return;
955 ibtl_cq->cq_async_flags &= ~IBTL_ASYNC_PENDING;
959 * Asyncs for EEC objects.
961 * The function continues to run until there are no more async
962 * events/errors for this object.
964 static void
965 ibtl_do_eec_asyncs(ibtl_eec_t *ibtl_eec)
967 ibt_async_code_t code;
968 ibt_async_event_t async_event;
970 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
971 bzero(&async_event, sizeof (async_event));
972 async_event.ev_chan_hdl = ibtl_eec->eec_channel;
974 while ((code = ibtl_eec->eec_async_codes) != 0) {
975 async_event.ev_fma_ena = 0;
976 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT)
977 code = 0; /* fallthrough to "kmem_free" */
978 else if (code & IBT_ERROR_CATASTROPHIC_EEC) {
979 code = IBT_ERROR_CATASTROPHIC_CHAN;
980 async_event.ev_fma_ena = ibtl_eec->eec_cat_fma_ena;
981 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) {
982 code = IBT_ERROR_PATH_MIGRATE_REQ;
983 async_event.ev_fma_ena = ibtl_eec->eec_pth_fma_ena;
984 } else if (code & IBT_EVENT_PATH_MIGRATED_EEC)
985 code = IBT_EVENT_PATH_MIGRATED;
986 else if (code & IBT_EVENT_COM_EST_EEC)
987 code = IBT_EVENT_COM_EST;
988 else {
989 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_eec_asyncs: "
990 "async: unexpected code 0x%x", code);
991 ibtl_eec->eec_async_codes = 0;
992 code = 0;
994 ibtl_eec->eec_async_codes &= ~code;
996 if (code) {
997 mutex_exit(&ibtl_async_mutex);
998 ibtl_async_client_call(ibtl_eec->eec_hca,
999 code, &async_event);
1000 mutex_enter(&ibtl_async_mutex);
1003 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
1004 mutex_exit(&ibtl_async_mutex);
1005 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1006 mutex_enter(&ibtl_async_mutex);
1007 return;
1010 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
1013 #ifdef __lock_lint
1014 kmutex_t cpr_mutex;
1015 #endif
1018 * Loop forever, calling async_handlers until all of the async lists
1019 * are empty.
1022 static void
1023 ibtl_async_thread(void)
1025 #ifndef __lock_lint
1026 kmutex_t cpr_mutex;
1027 #endif
1028 callb_cpr_t cprinfo;
1030 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1031 _NOTE(NO_COMPETING_THREADS_NOW)
1032 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1033 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1034 "ibtl_async_thread");
1035 _NOTE(COMPETING_THREADS_NOW)
1037 mutex_enter(&ibtl_async_mutex);
1039 for (;;) {
1040 if (ibtl_async_hca_list_start) {
1041 ibtl_hca_devinfo_t *hca_devp;
1043 /* remove first entry from list */
1044 hca_devp = ibtl_async_hca_list_start;
1045 ibtl_async_hca_list_start = hca_devp->hd_async_link;
1046 hca_devp->hd_async_link = NULL;
1047 if (ibtl_async_hca_list_start == NULL)
1048 ibtl_async_hca_list_end = NULL;
1050 ibtl_do_hca_asyncs(hca_devp);
1052 } else if (ibtl_async_qp_list_start) {
1053 ibtl_qp_t *ibtl_qp;
1055 /* remove from list */
1056 ibtl_qp = ibtl_async_qp_list_start;
1057 ibtl_async_qp_list_start = ibtl_qp->qp_async_link;
1058 ibtl_qp->qp_async_link = NULL;
1059 if (ibtl_async_qp_list_start == NULL)
1060 ibtl_async_qp_list_end = NULL;
1062 ibtl_do_qp_asyncs(ibtl_qp);
1064 } else if (ibtl_async_srq_list_start) {
1065 ibtl_srq_t *ibtl_srq;
1067 /* remove from list */
1068 ibtl_srq = ibtl_async_srq_list_start;
1069 ibtl_async_srq_list_start = ibtl_srq->srq_async_link;
1070 ibtl_srq->srq_async_link = NULL;
1071 if (ibtl_async_srq_list_start == NULL)
1072 ibtl_async_srq_list_end = NULL;
1074 ibtl_do_srq_asyncs(ibtl_srq);
1076 } else if (ibtl_async_eec_list_start) {
1077 ibtl_eec_t *ibtl_eec;
1079 /* remove from list */
1080 ibtl_eec = ibtl_async_eec_list_start;
1081 ibtl_async_eec_list_start = ibtl_eec->eec_async_link;
1082 ibtl_eec->eec_async_link = NULL;
1083 if (ibtl_async_eec_list_start == NULL)
1084 ibtl_async_eec_list_end = NULL;
1086 ibtl_do_eec_asyncs(ibtl_eec);
1088 } else if (ibtl_async_cq_list_start) {
1089 ibtl_cq_t *ibtl_cq;
1091 /* remove from list */
1092 ibtl_cq = ibtl_async_cq_list_start;
1093 ibtl_async_cq_list_start = ibtl_cq->cq_async_link;
1094 ibtl_cq->cq_async_link = NULL;
1095 if (ibtl_async_cq_list_start == NULL)
1096 ibtl_async_cq_list_end = NULL;
1098 ibtl_do_cq_asyncs(ibtl_cq);
1100 } else {
1101 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT)
1102 break;
1103 mutex_enter(&cpr_mutex);
1104 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1105 mutex_exit(&cpr_mutex);
1107 cv_wait(&ibtl_async_cv, &ibtl_async_mutex);
1109 mutex_exit(&ibtl_async_mutex);
1110 mutex_enter(&cpr_mutex);
1111 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1112 mutex_exit(&cpr_mutex);
1113 mutex_enter(&ibtl_async_mutex);
1117 mutex_exit(&ibtl_async_mutex);
1119 #ifndef __lock_lint
1120 mutex_enter(&cpr_mutex);
1121 CALLB_CPR_EXIT(&cprinfo);
1122 #endif
1123 mutex_destroy(&cpr_mutex);
1127 void
1128 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
1130 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
1132 mutex_enter(&ibtl_async_mutex);
1135 * If there is an active async, mark this object to be freed
1136 * by the async_thread when it's done.
1138 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) {
1139 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1140 mutex_exit(&ibtl_async_mutex);
1141 } else { /* free the object now */
1142 mutex_exit(&ibtl_async_mutex);
1143 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
1144 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
1145 kmem_free(IBTL_QP2CHAN(ibtl_qp), sizeof (ibtl_channel_t));
1149 void
1150 ibtl_free_cq_async_check(ibtl_cq_t *ibtl_cq)
1152 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_cq_async_check(%p)", ibtl_cq);
1154 mutex_enter(&ibtl_async_mutex);
1156 /* if there is an active async, mark this object to be freed */
1157 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) {
1158 ibtl_cq->cq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1159 mutex_exit(&ibtl_async_mutex);
1160 } else { /* free the object now */
1161 mutex_exit(&ibtl_async_mutex);
1162 mutex_destroy(&ibtl_cq->cq_mutex);
1163 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
1167 void
1168 ibtl_free_srq_async_check(ibtl_srq_t *ibtl_srq)
1170 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_srq_async_check(%p)",
1171 ibtl_srq);
1173 mutex_enter(&ibtl_async_mutex);
1175 /* if there is an active async, mark this object to be freed */
1176 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) {
1177 ibtl_srq->srq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1178 mutex_exit(&ibtl_async_mutex);
1179 } else { /* free the object now */
1180 mutex_exit(&ibtl_async_mutex);
1181 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
1185 void
1186 ibtl_free_eec_async_check(ibtl_eec_t *ibtl_eec)
1188 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_eec_async_check(%p)",
1189 ibtl_eec);
1191 mutex_enter(&ibtl_async_mutex);
1193 /* if there is an active async, mark this object to be freed */
1194 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) {
1195 ibtl_eec->eec_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1196 mutex_exit(&ibtl_async_mutex);
1197 } else { /* free the object now */
1198 mutex_exit(&ibtl_async_mutex);
1199 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1204 * This function differs from above in that we assume this is called
1205 * from non-interrupt context, and never called from the async_thread.
1208 void
1209 ibtl_free_hca_async_check(ibtl_hca_t *ibt_hca)
1211 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_hca_async_check(%p)",
1212 ibt_hca);
1214 mutex_enter(&ibtl_async_mutex);
1216 /* if there is an active async, mark this object to be freed */
1217 if (ibt_hca->ha_async_cnt > 0) {
1218 ibt_hca->ha_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1219 mutex_exit(&ibtl_async_mutex);
1220 } else { /* free the object now */
1221 mutex_exit(&ibtl_async_mutex);
1222 kmem_free(ibt_hca, sizeof (ibtl_hca_t));
1227 * Completion Queue Handling.
1229 * A completion queue can be handled through a simple callback
1230 * at interrupt level, or it may be queued for an ibtl_cq_thread
1231 * to handle. The latter is chosen during ibt_alloc_cq when the
1232 * IBTF_CQ_HANDLER_IN_THREAD is specified.
1235 static void
1236 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
1238 ibt_cq_handler_t cq_handler;
1239 void *arg;
1241 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
1243 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1244 cq_handler = ibtl_cq->cq_comp_handler;
1245 arg = ibtl_cq->cq_arg;
1246 if (cq_handler != NULL)
1247 cq_handler(ibtl_cq, arg);
1248 else
1249 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
1250 "no cq_handler for cq %p", ibtl_cq);
1254 * Before ibt_free_cq can continue, we need to ensure no more cq_handler
1255 * callbacks can occur. When we get the mutex, we know there are no
1256 * outstanding cq_handler callbacks. We set the cq_handler to NULL to
1257 * prohibit future callbacks.
1259 void
1260 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq)
1262 mutex_enter(&ibtl_cq->cq_mutex);
1263 ibtl_cq->cq_comp_handler = NULL;
1264 mutex_exit(&ibtl_cq->cq_mutex);
1265 if (ibtl_cq->cq_in_thread) {
1266 mutex_enter(&ibtl_cq_mutex);
1267 --ibtl_cqs_using_threads;
1268 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) {
1269 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1270 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE;
1271 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1273 mutex_exit(&ibtl_cq_mutex);
1278 * Loop forever, calling cq_handlers until the cq list
1279 * is empty.
1282 static void
1283 ibtl_cq_thread(void)
1285 #ifndef __lock_lint
1286 kmutex_t cpr_mutex;
1287 #endif
1288 callb_cpr_t cprinfo;
1290 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1291 _NOTE(NO_COMPETING_THREADS_NOW)
1292 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1293 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1294 "ibtl_cq_thread");
1295 _NOTE(COMPETING_THREADS_NOW)
1297 mutex_enter(&ibtl_cq_mutex);
1299 for (;;) {
1300 if (ibtl_cq_list_start) {
1301 ibtl_cq_t *ibtl_cq;
1303 ibtl_cq = ibtl_cq_list_start;
1304 ibtl_cq_list_start = ibtl_cq->cq_link;
1305 ibtl_cq->cq_link = NULL;
1306 if (ibtl_cq == ibtl_cq_list_end)
1307 ibtl_cq_list_end = NULL;
1309 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) {
1310 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1311 mutex_exit(&ibtl_cq_mutex);
1312 ibtl_cq_handler_call(ibtl_cq);
1313 mutex_enter(&ibtl_cq_mutex);
1315 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING;
1316 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE)
1317 cv_broadcast(&ibtl_cq_cv);
1318 } else {
1319 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
1320 break;
1321 mutex_enter(&cpr_mutex);
1322 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1323 mutex_exit(&cpr_mutex);
1325 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1327 mutex_exit(&ibtl_cq_mutex);
1328 mutex_enter(&cpr_mutex);
1329 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1330 mutex_exit(&cpr_mutex);
1331 mutex_enter(&ibtl_cq_mutex);
1335 mutex_exit(&ibtl_cq_mutex);
1336 #ifndef __lock_lint
1337 mutex_enter(&cpr_mutex);
1338 CALLB_CPR_EXIT(&cprinfo);
1339 #endif
1340 mutex_destroy(&cpr_mutex);
1345 * ibc_cq_handler()
1347 * Completion Queue Notification Handler.
1350 /*ARGSUSED*/
1351 void
1352 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq)
1354 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)",
1355 ibc_hdl, ibtl_cq);
1357 if (ibtl_cq->cq_in_thread) {
1358 mutex_enter(&ibtl_cq_mutex);
1359 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT;
1360 if ((ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) == 0) {
1361 ibtl_cq->cq_impl_flags |= IBTL_CQ_PENDING;
1362 ibtl_cq->cq_link = NULL;
1363 if (ibtl_cq_list_end == NULL)
1364 ibtl_cq_list_start = ibtl_cq;
1365 else
1366 ibtl_cq_list_end->cq_link = ibtl_cq;
1367 ibtl_cq_list_end = ibtl_cq;
1368 cv_signal(&ibtl_cq_cv);
1370 mutex_exit(&ibtl_cq_mutex);
1371 return;
1372 } else
1373 ibtl_cq_handler_call(ibtl_cq);
1378 * ibt_enable_cq_notify()
1379 * Enable Notification requests on the specified CQ.
1381 * ibt_cq The CQ handle.
1383 * notify_type Enable notifications for all (IBT_NEXT_COMPLETION)
1384 * completions, or the next Solicited completion
1385 * (IBT_NEXT_SOLICITED) only.
1387 * Completion notifications are disabled by setting the completion
1388 * handler to NULL by calling ibt_set_cq_handler().
1390 ibt_status_t
1391 ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq, ibt_cq_notify_flags_t notify_type)
1393 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_enable_cq_notify(%p, %d)",
1394 ibtl_cq, notify_type);
1396 return (IBTL_CQ2CIHCAOPS_P(ibtl_cq)->ibc_notify_cq(
1397 IBTL_CQ2CIHCA(ibtl_cq), ibtl_cq->cq_ibc_cq_hdl, notify_type));
1402 * ibt_set_cq_handler()
1403 * Register a work request completion handler with the IBTF.
1405 * ibt_cq The CQ handle.
1407 * completion_handler The completion handler.
1409 * arg The IBTF client private argument to be passed
1410 * back to the client when calling the CQ
1411 * completion handler.
1413 * Completion notifications are disabled by setting the completion
1414 * handler to NULL. When setting the handler to NULL, no additional
1415 * calls to the previous CQ handler will be initiated, but there may
1416 * be one in progress.
1418 * This function does not otherwise change the state of previous
1419 * calls to ibt_enable_cq_notify().
1421 void
1422 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
1423 void *arg)
1425 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
1426 ibtl_cq, completion_handler, arg);
1428 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1429 ibtl_cq->cq_comp_handler = completion_handler;
1430 ibtl_cq->cq_arg = arg;
1435 * Inform IBT clients about New HCAs.
1437 * We use taskqs to allow simultaneous notification, with sleeping.
1438 * Since taskqs only allow one argument, we define a structure
1439 * because we need to pass in two arguments.
1442 struct ibtl_new_hca_s {
1443 ibtl_clnt_t *nh_clntp;
1444 ibtl_hca_devinfo_t *nh_hca_devp;
1445 ibt_async_code_t nh_code;
1448 static void
1449 ibtl_tell_client_about_new_hca(void *arg)
1451 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg;
1452 ibtl_clnt_t *clntp = new_hcap->nh_clntp;
1453 ibt_async_event_t async_event;
1454 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp;
1456 bzero(&async_event, sizeof (async_event));
1457 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
1458 clntp->clnt_modinfop->mi_async_handler(
1459 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
1460 kmem_free(new_hcap, sizeof (*new_hcap));
1461 #ifdef __lock_lint
1463 ibt_hca_hdl_t hca_hdl;
1464 (void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
1466 #endif
1467 mutex_enter(&ibtl_clnt_list_mutex);
1468 if (--hca_devp->hd_async_task_cnt == 0)
1469 cv_signal(&hca_devp->hd_async_task_cv);
1470 if (--clntp->clnt_async_cnt == 0)
1471 cv_broadcast(&ibtl_clnt_cv);
1472 mutex_exit(&ibtl_clnt_list_mutex);
1476 * ibtl_announce_new_hca:
1478 * o First attach these clients in the given order
1479 * IBMA
1480 * IBCM
1482 * o Next attach all other clients in parallel.
1484 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
1485 * Retval from clients is ignored.
1487 void
1488 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp)
1490 ibtl_clnt_t *clntp;
1491 struct ibtl_new_hca_s *new_hcap;
1493 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)",
1494 hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
1496 mutex_enter(&ibtl_clnt_list_mutex);
1498 clntp = ibtl_clnt_list;
1499 while (clntp != NULL) {
1500 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1501 IBTF_DPRINTF_L4(ibtf_handlers,
1502 "ibtl_announce_new_hca: calling IBMF");
1503 if (clntp->clnt_modinfop->mi_async_handler) {
1504 _NOTE(NO_COMPETING_THREADS_NOW)
1505 new_hcap = kmem_alloc(sizeof (*new_hcap),
1506 KM_SLEEP);
1507 new_hcap->nh_clntp = clntp;
1508 new_hcap->nh_hca_devp = hca_devp;
1509 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1510 _NOTE(COMPETING_THREADS_NOW)
1511 clntp->clnt_async_cnt++;
1512 hca_devp->hd_async_task_cnt++;
1514 (void) taskq_dispatch(ibtl_async_taskq,
1515 ibtl_tell_client_about_new_hca, new_hcap,
1516 TQ_SLEEP);
1518 break;
1520 clntp = clntp->clnt_list_link;
1522 if (clntp != NULL)
1523 while (clntp->clnt_async_cnt > 0)
1524 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1525 clntp = ibtl_clnt_list;
1526 while (clntp != NULL) {
1527 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1528 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1529 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1530 if (clntp->clnt_modinfop->mi_async_handler) {
1531 _NOTE(NO_COMPETING_THREADS_NOW)
1532 new_hcap = kmem_alloc(sizeof (*new_hcap),
1533 KM_SLEEP);
1534 new_hcap->nh_clntp = clntp;
1535 new_hcap->nh_hca_devp = hca_devp;
1536 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1537 _NOTE(COMPETING_THREADS_NOW)
1538 clntp->clnt_async_cnt++;
1539 hca_devp->hd_async_task_cnt++;
1541 mutex_exit(&ibtl_clnt_list_mutex);
1542 (void) ibtl_tell_client_about_new_hca(
1543 new_hcap);
1544 mutex_enter(&ibtl_clnt_list_mutex);
1546 break;
1548 clntp = clntp->clnt_list_link;
1551 clntp = ibtl_clnt_list;
1552 while (clntp != NULL) {
1553 if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
1554 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1555 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1556 if (clntp->clnt_modinfop->mi_async_handler) {
1557 _NOTE(NO_COMPETING_THREADS_NOW)
1558 new_hcap = kmem_alloc(sizeof (*new_hcap),
1559 KM_SLEEP);
1560 new_hcap->nh_clntp = clntp;
1561 new_hcap->nh_hca_devp = hca_devp;
1562 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1563 _NOTE(COMPETING_THREADS_NOW)
1564 clntp->clnt_async_cnt++;
1565 hca_devp->hd_async_task_cnt++;
1567 (void) taskq_dispatch(ibtl_async_taskq,
1568 ibtl_tell_client_about_new_hca, new_hcap,
1569 TQ_SLEEP);
1571 break;
1573 clntp = clntp->clnt_list_link;
1575 if (clntp != NULL)
1576 while (clntp->clnt_async_cnt > 0)
1577 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1578 clntp = ibtl_clnt_list;
1579 while (clntp != NULL) {
1580 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
1581 (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
1582 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
1583 IBTF_DPRINTF_L4(ibtf_handlers,
1584 "ibtl_announce_new_hca: Calling %s ",
1585 clntp->clnt_modinfop->mi_clnt_name);
1586 if (clntp->clnt_modinfop->mi_async_handler) {
1587 _NOTE(NO_COMPETING_THREADS_NOW)
1588 new_hcap = kmem_alloc(sizeof (*new_hcap),
1589 KM_SLEEP);
1590 new_hcap->nh_clntp = clntp;
1591 new_hcap->nh_hca_devp = hca_devp;
1592 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1593 _NOTE(COMPETING_THREADS_NOW)
1594 clntp->clnt_async_cnt++;
1595 hca_devp->hd_async_task_cnt++;
1597 (void) taskq_dispatch(ibtl_async_taskq,
1598 ibtl_tell_client_about_new_hca, new_hcap,
1599 TQ_SLEEP);
1602 clntp = clntp->clnt_list_link;
1605 /* wait for all tasks to complete */
1606 while (hca_devp->hd_async_task_cnt != 0)
1607 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1609 /* wakeup thread that may be waiting to send an HCA async */
1610 ASSERT(hca_devp->hd_async_busy == 1);
1611 hca_devp->hd_async_busy = 0;
1612 cv_broadcast(&hca_devp->hd_async_busy_cv);
1613 mutex_exit(&ibtl_clnt_list_mutex);
1617 * ibtl_detach_all_clients:
1619 * Return value - 0 for Success, 1 for Failure
1621 * o First detach general clients.
1623 * o Next detach these clients
1624 * IBCM
1625 * IBDM
1627 * o Finally, detach this client
1628 * IBMA
1631 ibtl_detach_all_clients(ibtl_hca_devinfo_t *hca_devp)
1633 ib_guid_t hcaguid = hca_devp->hd_hca_attr->hca_node_guid;
1634 ibtl_hca_t *ibt_hca;
1635 ibtl_clnt_t *clntp;
1636 int retval;
1638 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_detach_all_clients(%llX)",
1639 hcaguid);
1641 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
1643 while (hca_devp->hd_async_busy)
1644 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
1645 hca_devp->hd_async_busy = 1;
1647 /* First inform general clients asynchronously */
1648 hca_devp->hd_async_event.ev_hca_guid = hcaguid;
1649 hca_devp->hd_async_event.ev_fma_ena = 0;
1650 hca_devp->hd_async_event.ev_chan_hdl = NULL;
1651 hca_devp->hd_async_event.ev_cq_hdl = NULL;
1652 hca_devp->hd_async_code = IBT_HCA_DETACH_EVENT;
1654 ibt_hca = hca_devp->hd_clnt_list;
1655 while (ibt_hca != NULL) {
1656 clntp = ibt_hca->ha_clnt_devp;
1657 if (IBTL_GENERIC_CLIENT(clntp)) {
1658 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1659 mutex_enter(&ibtl_async_mutex);
1660 ibt_hca->ha_async_cnt++;
1661 mutex_exit(&ibtl_async_mutex);
1662 hca_devp->hd_async_task_cnt++;
1664 (void) taskq_dispatch(ibtl_async_taskq,
1665 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
1667 ibt_hca = ibt_hca->ha_clnt_link;
1670 /* wait for all clients to complete */
1671 while (hca_devp->hd_async_task_cnt != 0) {
1672 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1674 /* Go thru the clients and check if any have not closed this HCA. */
1675 retval = 0;
1676 ibt_hca = hca_devp->hd_clnt_list;
1677 while (ibt_hca != NULL) {
1678 clntp = ibt_hca->ha_clnt_devp;
1679 if (IBTL_GENERIC_CLIENT(clntp)) {
1680 IBTF_DPRINTF_L2(ibtf_handlers,
1681 "ibtl_detach_all_clients: "
1682 "client '%s' failed to close the HCA.",
1683 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1684 retval = 1;
1686 ibt_hca = ibt_hca->ha_clnt_link;
1688 if (retval == 1)
1689 goto bailout;
1691 /* Next inform IBDM asynchronously */
1692 ibt_hca = hca_devp->hd_clnt_list;
1693 while (ibt_hca != NULL) {
1694 clntp = ibt_hca->ha_clnt_devp;
1695 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1696 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1697 mutex_enter(&ibtl_async_mutex);
1698 ibt_hca->ha_async_cnt++;
1699 mutex_exit(&ibtl_async_mutex);
1700 hca_devp->hd_async_task_cnt++;
1702 mutex_exit(&ibtl_clnt_list_mutex);
1703 ibtl_hca_client_async_task(ibt_hca);
1704 mutex_enter(&ibtl_clnt_list_mutex);
1705 break;
1707 ibt_hca = ibt_hca->ha_clnt_link;
1711 * Next inform IBCM.
1712 * As IBCM doesn't perform ibt_open_hca(), IBCM will not be
1713 * accessible via hca_devp->hd_clnt_list.
1714 * ibtl_cm_async_handler will NOT be NULL, if IBCM is registered.
1716 if (ibtl_cm_async_handler) {
1717 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
1718 ibtl_cm_clnt_private);
1720 /* wait for all tasks to complete */
1721 while (hca_devp->hd_async_task_cnt != 0)
1722 cv_wait(&hca_devp->hd_async_task_cv,
1723 &ibtl_clnt_list_mutex);
1726 /* Go thru the clients and check if any have not closed this HCA. */
1727 retval = 0;
1728 ibt_hca = hca_devp->hd_clnt_list;
1729 while (ibt_hca != NULL) {
1730 clntp = ibt_hca->ha_clnt_devp;
1731 if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) {
1732 IBTF_DPRINTF_L2(ibtf_handlers,
1733 "ibtl_detach_all_clients: "
1734 "client '%s' failed to close the HCA.",
1735 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1736 retval = 1;
1738 ibt_hca = ibt_hca->ha_clnt_link;
1740 if (retval == 1)
1741 goto bailout;
1743 /* Finally, inform IBMA */
1744 ibt_hca = hca_devp->hd_clnt_list;
1745 while (ibt_hca != NULL) {
1746 clntp = ibt_hca->ha_clnt_devp;
1747 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1748 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1749 mutex_enter(&ibtl_async_mutex);
1750 ibt_hca->ha_async_cnt++;
1751 mutex_exit(&ibtl_async_mutex);
1752 hca_devp->hd_async_task_cnt++;
1754 (void) taskq_dispatch(ibtl_async_taskq,
1755 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
1756 } else
1757 IBTF_DPRINTF_L2(ibtf_handlers,
1758 "ibtl_detach_all_clients: "
1759 "client '%s' is unexpectedly on the client list",
1760 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1761 ibt_hca = ibt_hca->ha_clnt_link;
1764 /* wait for IBMA to complete */
1765 while (hca_devp->hd_async_task_cnt != 0) {
1766 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1769 /* Check if this HCA's client list is empty. */
1770 ibt_hca = hca_devp->hd_clnt_list;
1771 if (ibt_hca != NULL) {
1772 IBTF_DPRINTF_L2(ibtf_handlers,
1773 "ibtl_detach_all_clients: "
1774 "client '%s' failed to close the HCA.",
1775 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1776 retval = 1;
1777 } else
1778 retval = 0;
1780 bailout:
1781 if (retval) {
1782 hca_devp->hd_state = IBTL_HCA_DEV_ATTACHED; /* fix hd_state */
1783 mutex_exit(&ibtl_clnt_list_mutex);
1784 ibtl_announce_new_hca(hca_devp);
1785 mutex_enter(&ibtl_clnt_list_mutex);
1786 } else {
1787 hca_devp->hd_async_busy = 0;
1788 cv_broadcast(&hca_devp->hd_async_busy_cv);
1791 return (retval);
1794 void
1795 ibtl_free_clnt_async_check(ibtl_clnt_t *clntp)
1797 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_clnt_async_check(%p)", clntp);
1799 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
1801 /* wait for all asyncs based on "ibtl_clnt_list" to complete */
1802 while (clntp->clnt_async_cnt != 0) {
1803 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1807 static void
1808 ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp)
1810 mutex_enter(&ibtl_clnt_list_mutex);
1811 if (--clntp->clnt_async_cnt == 0) {
1812 cv_broadcast(&ibtl_clnt_cv);
1814 mutex_exit(&ibtl_clnt_list_mutex);
1817 static void
1818 ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp)
1820 mutex_enter(&ibtl_clnt_list_mutex);
1821 ++clntp->clnt_async_cnt;
1822 mutex_exit(&ibtl_clnt_list_mutex);
1827 * Functions and data structures to inform clients that a notification
1828 * has occurred about Multicast Groups that might interest them.
1830 struct ibtl_sm_notice {
1831 ibt_clnt_hdl_t np_ibt_hdl;
1832 ib_gid_t np_sgid;
1833 ibt_subnet_event_code_t np_code;
1834 ibt_subnet_event_t np_event;
1837 static void
1838 ibtl_sm_notice_task(void *arg)
1840 struct ibtl_sm_notice *noticep = (struct ibtl_sm_notice *)arg;
1841 ibt_clnt_hdl_t ibt_hdl = noticep->np_ibt_hdl;
1842 ibt_sm_notice_handler_t sm_notice_handler;
1844 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler;
1845 if (sm_notice_handler != NULL)
1846 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg,
1847 noticep->np_sgid, noticep->np_code, &noticep->np_event);
1848 kmem_free(noticep, sizeof (*noticep));
1849 ibtl_dec_clnt_async_cnt(ibt_hdl);
1853 * Inform the client that MCG notices are not working at this time.
1855 void
1856 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
1858 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
1859 struct ibtl_sm_notice *noticep;
1860 ib_gid_t *sgidp = &ifail->smf_sgid[0];
1861 int i;
1863 for (i = 0; i < ifail->smf_num_sgids; i++) {
1864 _NOTE(NO_COMPETING_THREADS_NOW)
1865 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1866 noticep->np_ibt_hdl = ibt_hdl;
1867 noticep->np_sgid = *sgidp++;
1868 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
1869 _NOTE(COMPETING_THREADS_NOW)
1870 ibtl_inc_clnt_async_cnt(ibt_hdl);
1871 (void) taskq_dispatch(ibtl_async_taskq,
1872 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1877 * Inform all clients of the event.
1879 void
1880 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
1881 ibt_subnet_event_t *event)
1883 _NOTE(NO_COMPETING_THREADS_NOW)
1884 struct ibtl_sm_notice *noticep;
1885 ibtl_clnt_t *clntp;
1887 mutex_enter(&ibtl_clnt_list_mutex);
1888 clntp = ibtl_clnt_list;
1889 while (clntp != NULL) {
1890 if (clntp->clnt_sm_trap_handler) {
1891 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1892 noticep->np_ibt_hdl = clntp;
1893 noticep->np_sgid = sgid;
1894 noticep->np_code = code;
1895 noticep->np_event = *event;
1896 ++clntp->clnt_async_cnt;
1897 (void) taskq_dispatch(ibtl_async_taskq,
1898 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1900 clntp = clntp->clnt_list_link;
1902 mutex_exit(&ibtl_clnt_list_mutex);
1903 _NOTE(COMPETING_THREADS_NOW)
1907 * Record the handler for this client.
1909 void
1910 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
1911 ibt_sm_notice_handler_t sm_notice_handler, void *private)
1913 _NOTE(NO_COMPETING_THREADS_NOW)
1914 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
1915 ibt_hdl->clnt_sm_trap_handler_arg = private;
1916 _NOTE(COMPETING_THREADS_NOW)
1921 * ibtl_another_cq_handler_in_thread()
1923 * Conditionally increase the number of cq_threads.
1924 * The number of threads grows, based on the number of cqs using threads.
1926 * The table below controls the number of threads as follows:
1928 * Number of CQs Number of cq_threads
1929 * 0 0
1930 * 1 1
1931 * 2-3 2
1932 * 4-5 3
1933 * 6-9 4
1934 * 10-15 5
1935 * 16-23 6
1936 * 24-31 7
1937 * 32+ 8
1940 #define IBTL_CQ_MAXTHREADS 8
1941 static uint8_t ibtl_cq_scaling[IBTL_CQ_MAXTHREADS] = {
1942 1, 2, 4, 6, 10, 16, 24, 32
1945 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS];
1947 void
1948 ibtl_another_cq_handler_in_thread(void)
1950 kthread_t *t;
1951 int my_idx;
1953 mutex_enter(&ibtl_cq_mutex);
1954 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
1955 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
1956 mutex_exit(&ibtl_cq_mutex);
1957 return;
1959 my_idx = ibtl_cq_threads++;
1960 mutex_exit(&ibtl_cq_mutex);
1961 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
1962 ibtl_pri - 1);
1963 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1964 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
1965 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1968 void
1969 ibtl_thread_init(void)
1971 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
1973 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
1974 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
1975 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
1977 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL);
1978 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL);
1981 void
1982 ibtl_thread_init2(void)
1984 int i;
1985 static int initted = 0;
1986 kthread_t *t;
1988 mutex_enter(&ibtl_async_mutex);
1989 if (initted == 1) {
1990 mutex_exit(&ibtl_async_mutex);
1991 return;
1993 initted = 1;
1994 mutex_exit(&ibtl_async_mutex);
1995 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
1996 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
1997 KM_SLEEP);
1999 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
2001 for (i = 0; i < ibtl_async_thread_init; i++) {
2002 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
2003 TS_RUN, ibtl_pri - 1);
2004 ibtl_async_did[i] = t->t_did; /* thread_join() */
2006 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2007 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2008 for (i = 0; i < ibtl_cq_threads; i++) {
2009 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
2010 TS_RUN, ibtl_pri - 1);
2011 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2012 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
2013 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2015 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2018 void
2019 ibtl_thread_fini(void)
2021 int i;
2023 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
2025 /* undo the work done by ibtl_thread_init() */
2027 mutex_enter(&ibtl_cq_mutex);
2028 ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
2029 cv_broadcast(&ibtl_cq_cv);
2030 mutex_exit(&ibtl_cq_mutex);
2032 mutex_enter(&ibtl_async_mutex);
2033 ibtl_async_thread_exit = IBTL_THREAD_EXIT;
2034 cv_broadcast(&ibtl_async_cv);
2035 mutex_exit(&ibtl_async_mutex);
2037 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2038 for (i = 0; i < ibtl_cq_threads; i++)
2039 thread_join(ibtl_cq_did[i]);
2040 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2042 if (ibtl_async_did) {
2043 for (i = 0; i < ibtl_async_thread_init; i++)
2044 thread_join(ibtl_async_did[i]);
2046 kmem_free(ibtl_async_did,
2047 ibtl_async_thread_init * sizeof (kt_did_t));
2049 mutex_destroy(&ibtl_cq_mutex);
2050 cv_destroy(&ibtl_cq_cv);
2052 mutex_destroy(&ibtl_async_mutex);
2053 cv_destroy(&ibtl_async_cv);
2054 cv_destroy(&ibtl_clnt_cv);
2057 /* ARGSUSED */
2058 ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
2059 ib_lid_t lid, ibt_node_info_t *node_info)
2061 return (IBT_SUCCESS);