4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/ib/ibtl/impl/ibtl.h>
26 #include <sys/ib/ibtl/impl/ibtl_cm.h>
30 * These routines implement (most of) the verbs related to
35 static char ibtf_qp
[] = "ibtl";
37 /* This table indirectly initializes the ibt_cep_next_state[] table. */
38 typedef struct ibt_cep_next_state_s
{
39 ibt_cep_state_t next_state
;
40 ibt_cep_modify_flags_t modify_flags
;
41 } ibt_cep_next_state_t
;
44 ibt_cep_state_t current_state
;
45 ibt_cep_state_t next_state
;
46 ibt_cep_modify_flags_t modify_flags
;
47 } ibt_cep_next_state_inits
[] = {
48 { IBT_STATE_RESET
, IBT_STATE_INIT
, IBT_CEP_SET_RESET_INIT
},
49 { IBT_STATE_INIT
, IBT_STATE_RTR
, IBT_CEP_SET_INIT_RTR
},
50 { IBT_STATE_RTR
, IBT_STATE_RTS
, IBT_CEP_SET_RTR_RTS
}
53 ibt_cep_next_state_t ibt_cep_next_state
[IBT_STATE_NUM
];
55 _NOTE(SCHEME_PROTECTS_DATA("unique", ibt_cep_next_state
))
57 /* The following data and functions can increase system stability. */
59 int ibtl_qp_calls_curr
;
60 int ibtl_qp_calls_max
= 128; /* limit on # of simultaneous QP verb calls */
61 kmutex_t ibtl_qp_mutex
;
62 kcondvar_t ibtl_qp_cv
;
65 ibtl_qp_flow_control_enter(void)
67 mutex_enter(&ibtl_qp_mutex
);
68 while (ibtl_qp_calls_curr
>= ibtl_qp_calls_max
) {
69 cv_wait(&ibtl_qp_cv
, &ibtl_qp_mutex
);
72 mutex_exit(&ibtl_qp_mutex
);
76 ibtl_qp_flow_control_exit(void)
78 mutex_enter(&ibtl_qp_mutex
);
79 cv_signal(&ibtl_qp_cv
);
81 mutex_exit(&ibtl_qp_mutex
);
89 * type Specifies the type of QP to alloc in ibt_alloc_qp()
90 * qp_attrp Specifies the ibt_qp_alloc_attr_t that are needed to
91 * allocate a QP and transition it to the RTS state for
92 * UDs and INIT state for all other QPs.
94 * queue_sizes_p Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
96 * qpn_p Returned QP Number of the allocated QP.
97 * ibt_qp_p The ibt_qp_hdl_t of the allocated QP.
101 * Allocate a QP with specified attributes.
104 ibt_alloc_qp(ibt_hca_hdl_t hca_hdl
, ibt_qp_type_t type
,
105 ibt_qp_alloc_attr_t
*qp_attrp
, ibt_chan_sizes_t
*queue_sizes_p
,
106 ib_qpn_t
*qpn_p
, ibt_qp_hdl_t
*ibt_qp_p
)
109 ibtl_channel_t
*chanp
;
110 ibt_tran_srv_t qp_type
;
112 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_alloc_qp(%p, %d, %p, %p, %p, %p) ",
113 hca_hdl
, type
, qp_attrp
, queue_sizes_p
, qpn_p
, ibt_qp_p
);
117 qp_type
= IBT_UD_SRV
;
120 qp_type
= IBT_RC_SRV
;
123 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_qp: Unreliable Connected "
124 "Transport Type is not supported.");
126 return (IBT_NOT_SUPPORTED
);
128 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_qp: Reliable Datagram "
129 "Transport Type is not supported.");
131 return (IBT_NOT_SUPPORTED
);
133 /* shouldn't happen ILLEGAL Type */
134 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_qp: Illegal Transport Type "
137 return (IBT_QP_SRV_TYPE_INVALID
);
140 /* Get CI CQ handles */
141 qp_attrp
->qp_ibc_scq_hdl
= (qp_attrp
->qp_scq_hdl
== NULL
) ? NULL
:
142 qp_attrp
->qp_scq_hdl
->cq_ibc_cq_hdl
;
143 qp_attrp
->qp_ibc_rcq_hdl
= (qp_attrp
->qp_rcq_hdl
== NULL
) ? NULL
:
144 qp_attrp
->qp_rcq_hdl
->cq_ibc_cq_hdl
;
146 /* Get CI SRQ handle */
147 if ((qp_attrp
->qp_alloc_flags
& IBT_QP_USES_SRQ
) &&
148 (qp_attrp
->qp_srq_hdl
!= NULL
))
149 qp_attrp
->qp_ibc_srq_hdl
=
150 qp_attrp
->qp_srq_hdl
->srq_ibc_srq_hdl
;
152 qp_attrp
->qp_ibc_srq_hdl
= NULL
;
154 /* Allocate Channel structure */
155 chanp
= kmem_zalloc(sizeof (*chanp
), KM_SLEEP
);
157 ibtl_qp_flow_control_enter();
158 retval
= (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_alloc_qp
)(
159 IBTL_HCA2CIHCA(hca_hdl
), &chanp
->ch_qp
, type
, qp_attrp
,
160 queue_sizes_p
, qpn_p
, &chanp
->ch_qp
.qp_ibc_qp_hdl
);
161 ibtl_qp_flow_control_exit();
162 if (retval
!= IBT_SUCCESS
) {
163 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_qp: "
164 "Failed to allocate QP: %d", retval
);
165 kmem_free(chanp
, sizeof (*chanp
));
170 /* Initialize the internal QP struct. */
171 chanp
->ch_qp
.qp_type
= qp_type
;
172 chanp
->ch_qp
.qp_hca
= hca_hdl
;
173 chanp
->ch_qp
.qp_send_cq
= qp_attrp
->qp_scq_hdl
;
174 chanp
->ch_qp
.qp_recv_cq
= qp_attrp
->qp_rcq_hdl
;
175 chanp
->ch_current_state
= IBT_STATE_RESET
;
177 * The IBTA spec does not include the signal type or PD on a QP
178 * query operation. In order to implement the "CLONE" feature
179 * we need to cache these values. Mostly used by TI client.
181 chanp
->ch_qp
.qp_flags
= qp_attrp
->qp_flags
;
182 chanp
->ch_qp
.qp_pd_hdl
= qp_attrp
->qp_pd_hdl
;
183 mutex_init(&chanp
->ch_cm_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
184 cv_init(&chanp
->ch_cm_cv
, NULL
, CV_DEFAULT
, NULL
);
186 atomic_inc_32(&hca_hdl
->ha_qp_cnt
);
188 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_qp: SUCCESS: qp %p owned by '%s'",
189 chanp
, hca_hdl
->ha_clnt_devp
->clnt_name
);
201 * ibt_qp The previously allocated IBT QP Handle.
202 * modify_attrp Specifies the QP Modify attributes that to transition
203 * the QP to the RTS state for UDs (including special QPs)
204 * and INIT state for all other QPs.
210 * Transition the QP to the RTS state for UDs (including special QPs)
211 * and INIT state for all other QPs.
214 ibt_initialize_qp(ibt_qp_hdl_t ibt_qp
, ibt_qp_info_t
*modify_attrp
)
217 ibt_cep_state_t state
;
218 ibc_hca_hdl_t ibc_hca_hdl
= IBTL_CHAN2CIHCA(ibt_qp
);
219 ibc_qp_hdl_t ibc_qp_hdl
= IBTL_CHAN2CIQP(ibt_qp
);
220 ibc_operations_t
*hca_ops_p
= IBTL_CHAN2CIHCAOPS_P(ibt_qp
);
221 ibt_cep_modify_flags_t modify_flags
;
223 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_initialize_qp(%p, %p)",
224 ibt_qp
, modify_attrp
);
227 * Validate the QP Type from the channel with QP Type from the
228 * modify attribute struct.
230 if (ibt_qp
->ch_qp
.qp_type
!= modify_attrp
->qp_trans
) {
231 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_initialize_qp: "
232 "QP Type mismatch: Chan QP Type<%d>, Modify QP Type<%d>",
233 ibt_qp
->ch_qp
.qp_type
, modify_attrp
->qp_trans
);
234 return (IBT_QP_SRV_TYPE_INVALID
);
236 if (ibt_qp
->ch_current_state
!= IBT_STATE_RESET
) {
237 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_initialize_qp: "
238 "QP needs to be in RESET state: Chan QP State<%d>",
239 ibt_qp
->ch_current_state
);
240 return (IBT_CHAN_STATE_INVALID
);
244 * Initialize the QP to the RTS state for UDs
245 * and INIT state for all other QPs.
247 switch (modify_attrp
->qp_trans
) {
251 * Bring the QP to the RTS state.
253 state
= IBT_STATE_RESET
;
254 ibtl_qp_flow_control_enter();
256 modify_attrp
->qp_current_state
= state
;
257 modify_flags
= ibt_cep_next_state
[state
].modify_flags
;
258 modify_attrp
->qp_state
= state
=
259 ibt_cep_next_state
[state
].next_state
;
261 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_initialize_qp: "
262 "modifying qp state to 0x%x", state
);
263 status
= (hca_ops_p
->ibc_modify_qp
)(ibc_hca_hdl
,
264 ibc_qp_hdl
, modify_flags
, modify_attrp
, NULL
);
265 } while ((state
!= IBT_STATE_RTS
) && (status
== IBT_SUCCESS
));
266 ibtl_qp_flow_control_exit();
268 if (status
== IBT_SUCCESS
) {
269 ibt_qp
->ch_current_state
= state
;
270 ibt_qp
->ch_transport
.ud
.ud_port_num
=
271 modify_attrp
->qp_transport
.ud
.ud_port
;
272 ibt_qp
->ch_transport
.ud
.ud_qkey
=
273 modify_attrp
->qp_transport
.ud
.ud_qkey
;
281 * Bring the QP to the INIT state.
283 modify_attrp
->qp_state
= IBT_STATE_INIT
;
285 ibtl_qp_flow_control_enter();
286 status
= (hca_ops_p
->ibc_modify_qp
)(ibc_hca_hdl
, ibc_qp_hdl
,
287 IBT_CEP_SET_RESET_INIT
, modify_attrp
, NULL
);
288 ibtl_qp_flow_control_exit();
289 if (status
== IBT_SUCCESS
)
290 ibt_qp
->ch_current_state
= IBT_STATE_INIT
;
293 /* shouldn't happen ILLEGAL Type */
294 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_initialize_qp: Illegal Type %d",
295 modify_attrp
->qp_trans
);
296 return (IBT_QP_SRV_TYPE_INVALID
);
305 * ibt_alloc_special_qp
307 * hca_hdl HCA Handle.
308 * type Specifies the type of Special QP to be allocated.
309 * qp_attrp Specifies the ibt_qp_alloc_attr_t that are needed to
310 * allocate a special QP.
312 * queue_sizes_p Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
314 * qpn_p Returned qpn of the allocated QP.
315 * ibt_qp_p The ibt_qp_hdl_t of the allocated QP.
319 * Allocate a special QP with specified attributes.
322 ibt_alloc_special_qp(ibt_hca_hdl_t hca_hdl
, uint8_t port
, ibt_sqp_type_t type
,
323 ibt_qp_alloc_attr_t
*qp_attrp
, ibt_chan_sizes_t
*queue_sizes_p
,
324 ibt_qp_hdl_t
*ibt_qp_p
)
328 ibt_tran_srv_t sqp_type
;
330 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_alloc_special_qp(%p, %d, %x, %p, %p, %p)",
331 hca_hdl
, port
, type
, qp_attrp
, queue_sizes_p
, ibt_qp_p
);
336 sqp_type
= IBT_UD_SRV
;
340 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_special_qp: Raw IP "
341 "Transport Type is not supported.");
343 return (IBT_NOT_SUPPORTED
);
345 case IBT_RAWETHER_SQP
:
346 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_special_qp: Raw Ethernet "
347 "Transport Type is not supported.");
349 return (IBT_NOT_SUPPORTED
);
352 /* Shouldn't happen */
353 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_special_qp: "
354 "Illegal Type 0x%x", type
);
356 return (IBT_QP_SPECIAL_TYPE_INVALID
);
359 /* convert the CQ handles for the CI */
360 qp_attrp
->qp_ibc_scq_hdl
= qp_attrp
->qp_scq_hdl
->cq_ibc_cq_hdl
;
361 qp_attrp
->qp_ibc_rcq_hdl
= qp_attrp
->qp_rcq_hdl
->cq_ibc_cq_hdl
;
363 /* Allocate Channel structure */
364 chanp
= kmem_zalloc(sizeof (*chanp
), KM_SLEEP
);
366 ibtl_qp_flow_control_enter();
367 retval
= (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_alloc_special_qp
)(
368 IBTL_HCA2CIHCA(hca_hdl
), port
, &chanp
->ch_qp
, type
, qp_attrp
,
369 queue_sizes_p
, &chanp
->ch_qp
.qp_ibc_qp_hdl
);
370 ibtl_qp_flow_control_exit();
371 if (retval
!= IBT_SUCCESS
) {
372 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_alloc_special_qp: "
373 "Failed to allocate Special QP: %d", retval
);
374 kmem_free(chanp
, sizeof (*chanp
));
379 /* Initialize the internal QP struct. */
380 chanp
->ch_qp
.qp_type
= sqp_type
;
381 chanp
->ch_qp
.qp_hca
= hca_hdl
;
382 chanp
->ch_qp
.qp_send_cq
= qp_attrp
->qp_scq_hdl
;
383 chanp
->ch_qp
.qp_recv_cq
= qp_attrp
->qp_rcq_hdl
;
384 chanp
->ch_current_state
= IBT_STATE_RESET
;
385 mutex_init(&chanp
->ch_cm_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
386 cv_init(&chanp
->ch_cm_cv
, NULL
, CV_DEFAULT
, NULL
);
388 /* Updating these variable, so that debugger shows correct values. */
389 chanp
->ch_qp
.qp_flags
= qp_attrp
->qp_flags
;
390 chanp
->ch_qp
.qp_pd_hdl
= qp_attrp
->qp_pd_hdl
;
392 atomic_inc_32(&hca_hdl
->ha_qp_cnt
);
404 * ibtl_qp Handle for QP that needs to be flushed.
411 * Put the QP into error state to flush out work requests.
414 ibt_flush_qp(ibt_qp_hdl_t ibt_qp
)
416 ibt_qp_info_t modify_attr
;
419 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_flush_qp(%p)", ibt_qp
);
421 if (ibt_qp
->ch_qp
.qp_type
== IBT_RC_SRV
) {
422 mutex_enter(&ibtl_free_qp_mutex
);
423 if ((ibt_qp
->ch_transport
.rc
.rc_free_flags
&
424 (IBTL_RC_QP_CONNECTED
| IBTL_RC_QP_CLOSING
)) ==
425 IBTL_RC_QP_CONNECTED
) {
426 mutex_exit(&ibtl_free_qp_mutex
);
427 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_flush_qp(%p): "
428 "called with a connected RC QP", ibt_qp
);
429 return (IBT_CHAN_STATE_INVALID
);
431 mutex_exit(&ibtl_free_qp_mutex
);
434 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
437 * Set the QP state to error to flush any uncompleted WRs.
439 modify_attr
.qp_state
= IBT_STATE_ERROR
;
440 modify_attr
.qp_trans
= ibt_qp
->ch_qp
.qp_type
;
442 retval
= ibt_modify_qp(ibt_qp
, IBT_CEP_SET_STATE
, &modify_attr
, NULL
);
444 if (retval
!= IBT_SUCCESS
) {
445 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_flush_qp: "
446 "failed on chan %p: %d", ibt_qp
, retval
);
453 * ibtl_cm_chan_is_opening()
455 * Inform IBTL that the connection established process is in progress
456 * on this channel so that care need to be taken while free'ing when
457 * open is NOT yet complete.
459 * chan Channel Handle
462 ibtl_cm_chan_is_opening(ibt_channel_hdl_t chan
)
464 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_is_opening(%p)", chan
);
465 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
466 mutex_enter(&ibtl_free_qp_mutex
);
467 ASSERT(chan
->ch_transport
.rc
.rc_free_flags
== 0);
468 chan
->ch_transport
.rc
.rc_free_flags
|= IBTL_RC_QP_CONNECTING
;
469 mutex_exit(&ibtl_free_qp_mutex
);
473 * ibtl_cm_chan_open_is_aborted()
475 * Inform IBTL that the connection established on this channel has
476 * aborted. So undo what was done in ibtl_cm_chan_is_opening().
478 * chan Channel Handle
481 ibtl_cm_chan_open_is_aborted(ibt_channel_hdl_t chan
)
483 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_open_is_aborted(%p)", chan
);
484 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
485 mutex_enter(&ibtl_free_qp_mutex
);
486 chan
->ch_transport
.rc
.rc_free_flags
&= ~IBTL_RC_QP_CONNECTING
;
487 mutex_exit(&ibtl_free_qp_mutex
);
491 * ibtl_cm_chan_is_open()
493 * Inform IBTL that the connection has been established on this
494 * channel so that a later call to ibtl_cm_chan_is_closed()
495 * will be required to free the QPN used by this channel.
497 * chan Channel Handle
500 ibtl_cm_chan_is_open(ibt_channel_hdl_t chan
)
502 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_is_open(%p)", chan
);
503 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
504 mutex_enter(&ibtl_free_qp_mutex
);
505 chan
->ch_transport
.rc
.rc_free_flags
&= ~IBTL_RC_QP_CONNECTING
;
506 chan
->ch_transport
.rc
.rc_free_flags
|= IBTL_RC_QP_CONNECTED
;
507 mutex_exit(&ibtl_free_qp_mutex
);
511 * ibtl_cm_is_chan_closing()
513 * Returns 1, if the connection that has been
514 * started for this channel has moved to TIMEWAIT
517 * chan Channel Handle
520 ibtl_cm_is_chan_closing(ibt_channel_hdl_t chan
)
522 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_is_chan_closing(%p)", chan
);
523 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
524 mutex_enter(&ibtl_free_qp_mutex
);
525 if (chan
->ch_transport
.rc
.rc_free_flags
& IBTL_RC_QP_CLOSING
) {
526 mutex_exit(&ibtl_free_qp_mutex
);
529 mutex_exit(&ibtl_free_qp_mutex
);
534 * ibtl_cm_is_chan_closed()
536 * Returns 1, if the connection that has been
537 * started for this channel has completed TIMEWAIT
540 * chan Channel Handle
543 ibtl_cm_is_chan_closed(ibt_channel_hdl_t chan
)
545 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_is_chan_closed(%p)", chan
);
546 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
547 mutex_enter(&ibtl_free_qp_mutex
);
548 if (chan
->ch_transport
.rc
.rc_free_flags
& IBTL_RC_QP_CLOSED
) {
549 mutex_exit(&ibtl_free_qp_mutex
);
552 mutex_exit(&ibtl_free_qp_mutex
);
556 * ibtl_cm_chan_is_closing()
558 * Inform IBTL that the TIMEWAIT delay for the connection has been
559 * started for this channel so that the QP can be freed.
561 * chan Channel Handle
564 ibtl_cm_chan_is_closing(ibt_channel_hdl_t chan
)
566 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_is_closing(%p)", chan
);
567 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
568 mutex_enter(&ibtl_free_qp_mutex
);
569 ASSERT(chan
->ch_transport
.rc
.rc_free_flags
== IBTL_RC_QP_CONNECTED
);
570 chan
->ch_transport
.rc
.rc_free_flags
|= IBTL_RC_QP_CLOSING
;
571 mutex_exit(&ibtl_free_qp_mutex
);
574 * ibtl_cm_chan_is_closed()
576 * Inform IBTL that the TIMEWAIT delay for the connection has been
577 * reached for this channel so that the QPN can be reused.
579 * chan Channel Handle
582 ibtl_cm_chan_is_closed(ibt_channel_hdl_t chan
)
585 ibtl_hca_t
*ibtl_hca
= chan
->ch_qp
.qp_hca
;
587 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_is_closed(%p)", chan
);
588 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
589 mutex_enter(&ibtl_free_qp_mutex
);
590 ASSERT((chan
->ch_transport
.rc
.rc_free_flags
&
591 (IBTL_RC_QP_CONNECTED
| IBTL_RC_QP_CLOSING
)) ==
592 (IBTL_RC_QP_CONNECTED
| IBTL_RC_QP_CLOSING
));
594 chan
->ch_transport
.rc
.rc_free_flags
&= ~IBTL_RC_QP_CONNECTED
;
595 chan
->ch_transport
.rc
.rc_free_flags
&= ~IBTL_RC_QP_CLOSING
;
596 chan
->ch_transport
.rc
.rc_free_flags
|= IBTL_RC_QP_CLOSED
;
598 ibtl_cm_set_chan_private(chan
, NULL
);
600 if ((chan
->ch_transport
.rc
.rc_free_flags
& IBTL_RC_QP_FREED
) == 0) {
601 mutex_exit(&ibtl_free_qp_mutex
);
604 mutex_exit(&ibtl_free_qp_mutex
);
605 ibtl_qp_flow_control_enter();
606 if ((status
= (IBTL_CHAN2CIHCAOPS_P(chan
)->ibc_release_qpn
)
607 (IBTL_CHAN2CIHCA(chan
), chan
->ch_transport
.rc
.rc_qpn_hdl
)) ==
609 /* effectively, this is kmem_free(chan); */
610 ibtl_free_qp_async_check(&chan
->ch_qp
);
612 /* decrement ha_qpn_cnt and check for close in progress */
613 ibtl_close_hca_check(ibtl_hca
);
615 IBTF_DPRINTF_L2(ibtf_qp
, "ibtl_cm_chan_is_closed: "
616 "ibc_release_qpn failed: status = %d\n", status
);
617 ibtl_qp_flow_control_exit();
621 * ibtl_cm_chan_is_reused()
623 * Inform IBTL that the channel is going to be re-used
624 * chan Channel Handle
627 ibtl_cm_chan_is_reused(ibt_channel_hdl_t chan
)
629 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_cm_chan_is_reused(%p)", chan
);
630 ASSERT(chan
->ch_qp
.qp_type
== IBT_RC_SRV
);
631 mutex_enter(&ibtl_free_qp_mutex
);
632 ASSERT(((chan
->ch_transport
.rc
.rc_free_flags
& IBTL_RC_QP_CONNECTED
) !=
633 IBTL_RC_QP_CONNECTED
));
635 /* channel is no longer in closed state, shall be re-used */
636 chan
->ch_transport
.rc
.rc_free_flags
= 0;
638 mutex_exit(&ibtl_free_qp_mutex
);
643 * Function: ibt_free_qp()
645 * Input: ibt_qp Handle for Channel(QP) that needs to be freed.
649 * Returns: IBT_SUCCESS
650 * IBT_QP_STATE_INVALID
654 * Free a previously allocated QP.
657 ibt_free_qp(ibt_qp_hdl_t ibt_qp
)
660 ibtl_hca_t
*ibtl_hca
= ibt_qp
->ch_qp
.qp_hca
;
662 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_free_qp(%p)", ibt_qp
);
664 if (ibt_qp
->ch_qp
.qp_type
== IBT_RC_SRV
) {
665 ibtl_qp_flow_control_enter();
666 mutex_enter(&ibtl_free_qp_mutex
);
667 if (ibt_qp
->ch_transport
.rc
.rc_free_flags
&
668 IBTL_RC_QP_CONNECTING
) {
669 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_free_qp: ERROR - "
670 "Channel establishment is still in PROGRESS.");
671 mutex_exit(&ibtl_free_qp_mutex
);
672 ibtl_qp_flow_control_exit();
673 return (IBT_CHAN_STATE_INVALID
);
675 if (ibt_qp
->ch_transport
.rc
.rc_free_flags
&
676 IBTL_RC_QP_CONNECTED
) {
677 if ((ibt_qp
->ch_transport
.rc
.rc_free_flags
&
678 IBTL_RC_QP_CLOSING
) == 0) {
679 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_free_qp: ERROR - "
680 "need to call ibt_close_rc_channel");
681 mutex_exit(&ibtl_free_qp_mutex
);
682 ibtl_qp_flow_control_exit();
683 return (IBT_CHAN_STATE_INVALID
);
685 ibt_qp
->ch_transport
.rc
.rc_free_flags
|=
687 status
= (IBTL_CHAN2CIHCAOPS_P(ibt_qp
)->ibc_free_qp
)
688 (IBTL_CHAN2CIHCA(ibt_qp
), IBTL_CHAN2CIQP(ibt_qp
),
690 &ibt_qp
->ch_transport
.rc
.rc_qpn_hdl
);
691 mutex_exit(&ibtl_free_qp_mutex
);
692 ibtl_qp_flow_control_exit();
694 if (status
== IBT_SUCCESS
) {
695 mutex_enter(&ibtl_clnt_list_mutex
);
696 ibtl_hca
->ha_qpn_cnt
++;
697 mutex_exit(&ibtl_clnt_list_mutex
);
698 atomic_dec_32(&ibtl_hca
->ha_qp_cnt
);
699 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_free_qp(%p) - "
702 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_free_qp: "
703 "ibc_free_qp failed: status = %d", status
);
706 mutex_exit(&ibtl_free_qp_mutex
);
708 ibtl_qp_flow_control_enter();
710 status
= (IBTL_CHAN2CIHCAOPS_P(ibt_qp
)->ibc_free_qp
)
711 (IBTL_CHAN2CIHCA(ibt_qp
), IBTL_CHAN2CIQP(ibt_qp
),
712 IBC_FREE_QP_AND_QPN
, NULL
);
713 ibtl_qp_flow_control_exit();
715 if (status
== IBT_SUCCESS
) {
716 /* effectively, this is kmem_free(ibt_qp); */
717 ibtl_free_qp_async_check(&ibt_qp
->ch_qp
);
719 atomic_dec_32(&ibtl_hca
->ha_qp_cnt
);
720 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_free_qp(%p) - SUCCESS", ibt_qp
);
722 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_free_qp: "
723 "ibc_free_qp failed with error %d", status
);
730 /* helper function for ibt_query_qp */
732 ibtl_fillin_sgid(ibt_cep_path_t
*pathp
, ibtl_hca_devinfo_t
*hca_devp
)
738 port
= pathp
->cep_hca_port_num
;
739 sgid_ix
= pathp
->cep_adds_vect
.av_sgid_ix
;
740 if (port
== 0 || port
> hca_devp
->hd_hca_attr
->hca_nports
||
741 sgid_ix
>= IBTL_HDIP2SGIDTBLSZ(hca_devp
)) {
742 pathp
->cep_adds_vect
.av_sgid
.gid_prefix
= 0;
743 pathp
->cep_adds_vect
.av_sgid
.gid_guid
= 0;
745 mutex_enter(&ibtl_clnt_list_mutex
);
746 sgidp
= hca_devp
->hd_portinfop
[port
-1].p_sgid_tbl
;
747 pathp
->cep_adds_vect
.av_sgid
= sgidp
[sgid_ix
];
748 mutex_exit(&ibtl_clnt_list_mutex
);
754 * Function: ibt_query_qp
756 * Input: ibt_qp - The IBT QP Handle.
758 * Output: ibt_qp_query_attrp - Points to a ibt_qp_query_attr_t
759 * that on return contains all the
760 * attributes of the specified qp.
762 * Returns: IBT_SUCCESS
766 * Query QP attributes
770 ibt_query_qp(ibt_qp_hdl_t ibt_qp
, ibt_qp_query_attr_t
*qp_query_attrp
)
773 ibtl_hca_devinfo_t
*hca_devp
;
774 ibt_qp_info_t
*qp_infop
;
776 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_query_qp(%p, %p)",
777 ibt_qp
, qp_query_attrp
);
779 ibtl_qp_flow_control_enter();
780 retval
= (IBTL_CHAN2CIHCAOPS_P(ibt_qp
)->ibc_query_qp(
781 IBTL_CHAN2CIHCA(ibt_qp
), IBTL_CHAN2CIQP(ibt_qp
), qp_query_attrp
));
782 ibtl_qp_flow_control_exit();
783 if (retval
== IBT_SUCCESS
) {
784 ibt_qp
->ch_current_state
= qp_query_attrp
->qp_info
.qp_state
;
786 /* need to fill in sgid from port and sgid_ix for RC and UC */
787 hca_devp
= ibt_qp
->ch_qp
.qp_hca
->ha_hca_devp
;
788 qp_infop
= &qp_query_attrp
->qp_info
;
790 switch (qp_infop
->qp_trans
) {
792 ibtl_fillin_sgid(&qp_infop
->qp_transport
.rc
.rc_path
,
794 ibtl_fillin_sgid(&qp_infop
->qp_transport
.rc
.rc_alt_path
,
798 ibtl_fillin_sgid(&qp_infop
->qp_transport
.uc
.uc_path
,
800 ibtl_fillin_sgid(&qp_infop
->qp_transport
.uc
.uc_alt_path
,
805 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_query_qp: "
806 "failed on chan %p: %d", ibt_qp
, retval
);
817 * ibt_qp The IBT QP Handle.
818 * flags Specifies which attributes in ibt_qp_mod_attr_t
819 * are to be modified.
820 * qp_attrp Points to an ibt_qp_mod_attr_t struct that contains all
821 * the attributes of the specified QP that a client is
822 * allowed to modify after a QP has been allocated
824 * actual_sz Returned actual queue sizes.
828 * Modify the attributes of an existing QP.
831 ibt_modify_qp(ibt_qp_hdl_t ibt_qp
, ibt_cep_modify_flags_t flags
,
832 ibt_qp_info_t
*modify_attrp
, ibt_queue_sizes_t
*actual_sz
)
836 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_modify_qp(%p, %d, %p, %p)",
837 ibt_qp
, flags
, modify_attrp
, actual_sz
);
839 ibtl_qp_flow_control_enter();
840 retval
= (IBTL_CHAN2CIHCAOPS_P(ibt_qp
)->ibc_modify_qp
)(
841 IBTL_CHAN2CIHCA(ibt_qp
), IBTL_CHAN2CIQP(ibt_qp
), flags
,
842 modify_attrp
, actual_sz
);
843 ibtl_qp_flow_control_exit();
844 if (retval
== IBT_SUCCESS
) {
845 ibt_qp
->ch_current_state
= modify_attrp
->qp_state
;
846 if (ibt_qp
->ch_qp
.qp_type
== IBT_UD_SRV
) {
847 if (flags
& (IBT_CEP_SET_PORT
| IBT_CEP_SET_RESET_INIT
))
848 ibt_qp
->ch_transport
.ud
.ud_port_num
=
849 modify_attrp
->qp_transport
.ud
.ud_port
;
850 if (flags
& (IBT_CEP_SET_QKEY
| IBT_CEP_SET_RESET_INIT
))
851 ibt_qp
->ch_transport
.ud
.ud_qkey
=
852 modify_attrp
->qp_transport
.ud
.ud_qkey
;
855 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_modify_qp: failed on chan %p: %d",
858 if (retval
== IBT_CHAN_STATE_INVALID
) {
859 /* That means our cache had invalid QP state value. */
860 ibt_qp_query_attr_t qp_attr
;
862 /* Query the channel (QP) */
863 if (ibt_query_qp(ibt_qp
, &qp_attr
) == IBT_SUCCESS
)
864 ibt_qp
->ch_current_state
=
865 qp_attr
.qp_info
.qp_state
;
876 * rc_chan A previously allocated RC channel handle.
880 * IBT_SUCCESS on Success else appropriate error.
882 * Force the CI to use the alternate path. The alternate path becomes
883 * the primary path. A new alternate path should be loaded and enabled.
884 * Assumes that the given channel is in RTS/SQD state
887 ibt_migrate_path(ibt_channel_hdl_t rc_chan
)
890 ibt_qp_info_t qp_info
;
891 ibt_qp_query_attr_t qp_attr
;
892 ibt_cep_modify_flags_t cep_flags
;
895 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_migrate_path: channel %p", rc_chan
);
897 if (rc_chan
->ch_qp
.qp_type
!= IBT_RC_SRV
) {
898 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_migrate_path: "
899 "Invalid Channel type: Applicable only to RC Channel");
900 return (IBT_CHAN_SRV_TYPE_INVALID
);
903 if (rc_chan
->ch_current_state
!= IBT_STATE_RTS
&&
904 rc_chan
->ch_current_state
!= IBT_STATE_SQD
) {
905 if (ibt_query_qp(rc_chan
, &qp_attr
) == IBT_SUCCESS
) {
906 /* ch_current_state is fixed by ibt_query_qp */
907 if (rc_chan
->ch_current_state
!= IBT_STATE_RTS
&&
908 rc_chan
->ch_current_state
!= IBT_STATE_SQD
)
909 return (IBT_CHAN_STATE_INVALID
);
911 } else /* query_qp should never really fail */
912 return (IBT_CHAN_STATE_INVALID
);
917 cep_flags
= IBT_CEP_SET_MIG
| IBT_CEP_SET_STATE
;
918 qp_info
.qp_state
= rc_chan
->ch_current_state
;
919 qp_info
.qp_current_state
= rc_chan
->ch_current_state
;
920 qp_info
.qp_trans
= IBT_RC_SRV
;
921 qp_info
.qp_transport
.rc
.rc_mig_state
= IBT_STATE_MIGRATED
;
922 retval
= ibt_modify_qp(rc_chan
, cep_flags
, &qp_info
, NULL
);
924 if (retval
!= IBT_SUCCESS
) {
925 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_migrate_path:"
926 " ibt_modify_qp() returned = %d", retval
);
927 if (rc_chan
->ch_current_state
!= qp_info
.qp_state
&&
930 * That means our cached 'state' was invalid.
931 * We know ibt_modify_qp() fixed it up, so it
932 * might be worth retrying.
934 if (rc_chan
->ch_current_state
!= IBT_STATE_RTS
&&
935 rc_chan
->ch_current_state
!= IBT_STATE_SQD
)
936 return (IBT_CHAN_STATE_INVALID
);
937 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_migrate_path:"
938 " retrying after 'state' fixed");
950 * ibt_qp The ibt_qp_hdl_t of the allocated QP.
951 * clnt_private The client private data.
957 * Set the client private data.
960 ibt_set_qp_private(ibt_qp_hdl_t ibt_qp
, void *clnt_private
)
962 ibt_qp
->ch_clnt_private
= clnt_private
;
970 * ibt_qp The ibt_qp_hdl_t of the allocated QP.
974 * The client private data.
976 * Get the client private data.
979 ibt_get_qp_private(ibt_qp_hdl_t ibt_qp
)
981 return (ibt_qp
->ch_clnt_private
);
989 * ibt_qp The ibt_qp_hdl_t of the allocated QP.
993 * hca_guid Returned HCA GUID on which the specified QP is
994 * allocated. Valid if it is non-NULL on return.
996 * A helper function to retrieve HCA GUID for the specified QP.
999 ibt_qp_to_hca_guid(ibt_qp_hdl_t ibt_qp
)
1001 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_qp_to_hca_guid(%p)", ibt_qp
);
1003 return (IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(ibt_qp
)));
1011 * ibt_qp An QP Handle which is in SQError state.
1016 * IBT_QP_SRV_TYPE_INVALID
1017 * IBT_QP_STATE_INVALID.
1019 * Recover an UD QP which has transitioned to SQ Error state. The
1020 * ibt_recover_ud_qp() transitions the QP from SQ Error state to
1021 * Ready-To-Send QP state.
1023 * If a work request posted to a UD QP's send queue completes with an
1024 * error (see ibt_wc_status_t), the QP gets transitioned to SQ Error state.
1025 * In order to reuse this QP, ibt_recover_ud_qp() can be used to recover
1026 * the QP to a usable (Ready-to-Send) state.
1029 ibt_recover_ud_qp(ibt_qp_hdl_t ibt_qp
)
1031 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_recover_ud_qp(%p)", ibt_qp
);
1033 return (ibt_recover_ud_channel(IBTL_QP2CHAN(ibt_qp
)));
1041 * ud_chan The IBT UD QP Handle.
1042 * various attributes
1048 * IBT_CHAN_SRV_TYPE_INVALID
1049 * IBT_CHAN_STATE_INVALID
1052 * Revert the UD QP back to a usable state.
1055 ibt_recycle_ud(ibt_channel_hdl_t ud_chan
, uint8_t hca_port_num
,
1056 uint16_t pkey_ix
, ib_qkey_t qkey
)
1058 ibt_qp_query_attr_t qp_attr
;
1059 ibt_status_t retval
;
1061 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_recycle_ud(%p, %d, %x, %x): ",
1062 ud_chan
, hca_port_num
, pkey_ix
, qkey
);
1064 if (ud_chan
->ch_qp
.qp_type
!= IBT_UD_SRV
) {
1065 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_recycle_ud: "
1066 "chan %p is not a UD channel", ud_chan
);
1067 return (IBT_CHAN_SRV_TYPE_INVALID
);
1070 retval
= ibt_query_qp(ud_chan
, &qp_attr
);
1071 if (retval
!= IBT_SUCCESS
) {
1072 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_recycle_ud: "
1073 "ibt_query_qp failed on chan %p: %d", ud_chan
, retval
);
1076 if (qp_attr
.qp_info
.qp_state
!= IBT_STATE_ERROR
) {
1077 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_recycle_ud: "
1078 "chan %p is in state %d (not in ERROR state)",
1079 ud_chan
, qp_attr
.qp_info
.qp_state
);
1080 ud_chan
->ch_current_state
= qp_attr
.qp_info
.qp_state
;
1081 return (IBT_CHAN_STATE_INVALID
);
1084 /* transition the QP from ERROR to RESET */
1085 qp_attr
.qp_info
.qp_state
= IBT_STATE_RESET
;
1086 qp_attr
.qp_info
.qp_trans
= ud_chan
->ch_qp
.qp_type
;
1087 retval
= ibt_modify_qp(ud_chan
, IBT_CEP_SET_STATE
, &qp_attr
.qp_info
,
1089 if (retval
!= IBT_SUCCESS
) {
1090 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_recycle_ud: "
1091 "ibt_modify_qp(ERROR=>RESET) failed on chan %p: %d",
1095 ud_chan
->ch_current_state
= IBT_STATE_RESET
;
1097 /* transition the QP back to RTS */
1098 qp_attr
.qp_info
.qp_transport
.ud
.ud_port
= hca_port_num
;
1099 qp_attr
.qp_info
.qp_transport
.ud
.ud_qkey
= qkey
;
1100 qp_attr
.qp_info
.qp_transport
.ud
.ud_pkey_ix
= pkey_ix
;
1101 retval
= ibt_initialize_qp(ud_chan
, &qp_attr
.qp_info
);
1102 if (retval
!= IBT_SUCCESS
) {
1103 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_recycle_ud: "
1104 "ibt_initialize_qp failed on chan %p: %d", ud_chan
, retval
);
1105 /* the man page says the QP should be left in ERROR state */
1106 (void) ibt_flush_qp(ud_chan
);
1115 * chan The IBT QP Handle.
1116 * modify_flags IBT_CEP_SET_NOTHING or IBT_CEP_SET_SQD_EVENT
1122 * IBT_CHAN_HDL_INVALID
1123 * IBT_CHAN_STATE_INVALID
1127 * Place the send queue of the specified channel into the send queue
1128 * drained (SQD) state.
1132 ibt_pause_sendq(ibt_channel_hdl_t chan
, ibt_cep_modify_flags_t modify_flags
)
1134 ibt_qp_info_t modify_attr
;
1135 ibt_status_t retval
;
1137 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_pause_sendq(%p, %x)", chan
, modify_flags
);
1139 modify_flags
&= IBT_CEP_SET_SQD_EVENT
; /* ignore other bits */
1140 modify_flags
|= IBT_CEP_SET_STATE
;
1142 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1144 * Set the QP state to SQD.
1146 modify_attr
.qp_state
= IBT_STATE_SQD
;
1147 modify_attr
.qp_trans
= chan
->ch_qp
.qp_type
;
1149 retval
= ibt_modify_qp(chan
, modify_flags
, &modify_attr
, NULL
);
1151 if (retval
!= IBT_SUCCESS
) {
1152 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_pause_sendq: "
1153 "failed on chan %p: %d", chan
, retval
);
1163 * chan The IBT Channel Handle.
1168 * IBT_CHAN_HDL_INVALID
1169 * IBT_CHAN_STATE_INVALID
1171 * Un-pauses the previously paused channel. This call will transition the
1172 * QP from SQD to RTS state.
1175 ibt_unpause_sendq(ibt_channel_hdl_t chan
)
1177 ibt_qp_info_t modify_attr
;
1178 ibt_status_t retval
;
1180 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_unpause_sendq(%p)", chan
);
1182 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1185 * Set the QP state to RTS.
1187 modify_attr
.qp_current_state
= IBT_STATE_SQD
;
1188 modify_attr
.qp_state
= IBT_STATE_RTS
;
1189 modify_attr
.qp_trans
= chan
->ch_qp
.qp_type
;
1191 retval
= ibt_modify_qp(chan
, IBT_CEP_SET_STATE
, &modify_attr
, NULL
);
1192 if (retval
!= IBT_SUCCESS
) {
1193 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_unpause_sendq: "
1194 "failed on chan %p: %d", chan
, retval
);
1204 * chan A previously allocated channel handle.
1208 * request_sz Requested new sizes.
1210 * actual_sz Returned actual sizes.
1214 * Resize the SendQ/RecvQ sizes of a channel. Can only be called on
1215 * a previously opened channel.
1218 ibt_resize_queues(ibt_channel_hdl_t chan
, ibt_qflags_t flags
,
1219 ibt_queue_sizes_t
*request_sz
, ibt_queue_sizes_t
*actual_sz
)
1221 ibt_cep_modify_flags_t modify_flags
= IBT_CEP_SET_STATE
;
1222 ibt_qp_info_t modify_attr
;
1223 ibt_status_t retval
;
1225 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_resize_queues(%p, 0x%X, %p, %p)",
1226 chan
, flags
, request_sz
, actual_sz
);
1228 if ((flags
& (IBT_SEND_Q
| IBT_RECV_Q
)) == 0) {
1229 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_resize_queues: "
1230 "Flags <0x%X> not set", flags
);
1231 return (IBT_INVALID_PARAM
);
1234 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1236 modify_attr
.qp_current_state
= chan
->ch_current_state
;
1237 modify_attr
.qp_trans
= chan
->ch_qp
.qp_type
;
1238 modify_attr
.qp_state
= chan
->ch_current_state
;
1240 if (flags
& IBT_SEND_Q
) {
1241 modify_attr
.qp_sq_sz
= request_sz
->qs_sq
;
1242 modify_flags
|= IBT_CEP_SET_SQ_SIZE
;
1245 if (flags
& IBT_RECV_Q
) {
1246 modify_attr
.qp_rq_sz
= request_sz
->qs_rq
;
1247 modify_flags
|= IBT_CEP_SET_RQ_SIZE
;
1250 retval
= ibt_modify_qp(chan
, modify_flags
, &modify_attr
, actual_sz
);
1251 if (retval
!= IBT_SUCCESS
) {
1252 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_resize_queues: "
1253 "failed on QP %p: %d", chan
, retval
);
1264 * chan A previously allocated channel handle.
1266 * actual_sz Returned actual sizes.
1270 * Query the SendQ/RecvQ sizes of a channel.
1273 ibt_query_queues(ibt_channel_hdl_t chan
, ibt_queue_sizes_t
*actual_sz
)
1275 ibt_status_t retval
;
1276 ibt_qp_query_attr_t qp_query_attr
;
1278 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_query_queues(%p)", chan
);
1280 /* Perform Query QP and retrieve QP sizes. */
1281 retval
= ibt_query_qp(chan
, &qp_query_attr
);
1282 if (retval
!= IBT_SUCCESS
) {
1283 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_query_queues: "
1284 "ibt_query_qp failed: qp %p: %d", chan
, retval
);
1288 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(actual_sz
->qs_rq
,
1290 actual_sz
->qs_sq
= qp_query_attr
.qp_info
.qp_sq_sz
;
1291 actual_sz
->qs_rq
= qp_query_attr
.qp_info
.qp_rq_sz
;
1292 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(actual_sz
->qs_rq
,
1294 chan
->ch_current_state
= qp_query_attr
.qp_info
.qp_state
;
1304 * rc_chan A previously allocated channel handle.
1306 * modify_flags Bitwise "or" of any of the following:
1307 * IBT_CEP_SET_RDMA_R Enable/Disable RDMA RD
1308 * IBT_CEP_SET_RDMA_W Enable/Disable RDMA WR
1309 * IBT_CEP_SET_ATOMIC Enable/Disable Atomics
1311 * flags Channel End Point (CEP) Disable Flags (0 => enable).
1312 * IBT_CEP_NO_RDMA_RD Disable incoming RDMA RD's
1313 * IBT_CEP_NO_RDMA_WR Disable incoming RDMA WR's
1314 * IBT_CEP_NO_ATOMIC Disable incoming Atomics.
1319 * IBT_QP_SRV_TYPE_INVALID
1320 * IBT_CHAN_HDL_INVALID
1321 * IBT_CHAN_ATOMICS_NOT_SUPPORTED
1322 * IBT_CHAN_STATE_INVALID
1324 * Enable/disable RDMA operations. To enable an operation clear the
1325 * "disable" flag. Can call this function when the channel is in
1326 * INIT, RTS or SQD states. If called in any other state
1327 * IBT_CHAN_STATE_INVALID is returned. When the operation completes the
1328 * channel state is left unchanged.
1331 ibt_modify_rdma(ibt_channel_hdl_t rc_chan
,
1332 ibt_cep_modify_flags_t modify_flags
, ibt_cep_flags_t flags
)
1334 ibt_status_t retval
;
1335 ibt_qp_info_t modify_attr
;
1337 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_modify_rdma(%p, 0x%x, 0x%x)",
1338 rc_chan
, modify_flags
, flags
);
1340 if (rc_chan
->ch_qp
.qp_type
!= IBT_RC_SRV
) {
1341 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_modify_rdma: "
1342 "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1343 rc_chan
->ch_qp
.qp_type
);
1344 return (IBT_QP_SRV_TYPE_INVALID
);
1347 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1350 * Can only call this function when the channel in INIT, RTS or SQD
1353 if ((rc_chan
->ch_current_state
!= IBT_STATE_INIT
) &&
1354 (rc_chan
->ch_current_state
!= IBT_STATE_RTS
) &&
1355 (rc_chan
->ch_current_state
!= IBT_STATE_SQD
)) {
1356 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_modify_rdma: Invalid Channel "
1357 "state: 0x%X", rc_chan
->ch_current_state
);
1358 return (IBT_CHAN_STATE_INVALID
);
1361 modify_attr
.qp_state
= modify_attr
.qp_current_state
=
1362 rc_chan
->ch_current_state
;
1363 modify_attr
.qp_trans
= rc_chan
->ch_qp
.qp_type
;
1364 modify_attr
.qp_flags
= flags
;
1366 modify_flags
&= (IBT_CEP_SET_RDMA_R
| IBT_CEP_SET_RDMA_W
|
1367 IBT_CEP_SET_ATOMIC
);
1368 modify_flags
|= IBT_CEP_SET_STATE
;
1370 retval
= ibt_modify_qp(rc_chan
, modify_flags
, &modify_attr
, NULL
);
1371 if (retval
!= IBT_SUCCESS
) {
1372 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_modify_rdma: "
1373 "failed on chan %p: %d", rc_chan
, retval
);
1381 * ibt_set_rdma_resource
1383 * chan A previously allocated RC channel handle.
1384 * modify_flags Bitwise "or" of any of the following:
1385 * IBT_CEP_SET_RDMARA_OUT Initiator depth (rdma_ra_out)
1386 * IBT_CEP_SET_RDMARA_IN Responder Resources
1388 * rdma_ra_out Outgoing RDMA Reads/Atomics
1389 * rdma_ra_in Incoming RDMA Reads/Atomics
1395 * Change the number of resources to be used for incoming and outgoing
1396 * RDMA reads & Atomics. Can only be called on a previously opened
1397 * RC channel. Can only be called on a paused channel, and this will
1398 * un-pause that channel.
1401 ibt_set_rdma_resource(ibt_channel_hdl_t chan
,
1402 ibt_cep_modify_flags_t modify_flags
, uint8_t rdma_ra_out
,
1403 uint8_t resp_rdma_ra_out
)
1405 ibt_qp_info_t modify_attr
;
1406 ibt_status_t retval
;
1408 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_set_rdma_resource(%p, 0x%x, %d, %d)",
1409 chan
, modify_flags
, rdma_ra_out
, resp_rdma_ra_out
);
1411 if (chan
->ch_qp
.qp_type
!= IBT_RC_SRV
) {
1412 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_set_rdma_resource: "
1413 "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1414 chan
->ch_qp
.qp_type
);
1415 return (IBT_CHAN_SRV_TYPE_INVALID
);
1418 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1420 modify_attr
.qp_trans
= chan
->ch_qp
.qp_type
;
1421 modify_attr
.qp_state
= IBT_STATE_SQD
;
1423 modify_attr
.qp_transport
.rc
.rc_rdma_ra_out
= rdma_ra_out
;
1424 modify_attr
.qp_transport
.rc
.rc_rdma_ra_in
= resp_rdma_ra_out
;
1425 modify_flags
&= (IBT_CEP_SET_RDMARA_OUT
| IBT_CEP_SET_RDMARA_IN
);
1426 modify_flags
|= IBT_CEP_SET_STATE
;
1428 retval
= ibt_modify_qp(chan
, modify_flags
, &modify_attr
, NULL
);
1429 if (retval
!= IBT_SUCCESS
) {
1430 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_set_rdma_resource: "
1431 "failed on chan %p: %d", chan
, retval
);
1441 * rc_chan A previously allocated RC channel handle.
1442 * port_num New HCA port.
1448 * Change the primary physical port of a channel. (This is done only if
1449 * HCA supports this capability).
1452 ibt_change_port(ibt_channel_hdl_t chan
, uint8_t port_num
)
1454 ibt_cep_modify_flags_t modify_flags
;
1455 ibt_qp_info_t modify_attr
;
1456 ibt_status_t retval
;
1458 IBTF_DPRINTF_L3(ibtf_qp
, "ibt_change_port(%p, %d)", chan
, port_num
);
1460 if (chan
->ch_qp
.qp_type
!= IBT_RC_SRV
) {
1461 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_change_port: "
1462 "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1463 chan
->ch_qp
.qp_type
);
1464 return (IBT_CHAN_SRV_TYPE_INVALID
);
1466 bzero(&modify_attr
, sizeof (ibt_qp_info_t
));
1468 modify_attr
.qp_state
= IBT_STATE_SQD
;
1469 modify_attr
.qp_trans
= chan
->ch_qp
.qp_type
;
1470 modify_attr
.qp_transport
.rc
.rc_path
.cep_hca_port_num
= port_num
;
1472 modify_flags
= IBT_CEP_SET_STATE
| IBT_CEP_SET_PORT
;
1474 retval
= ibt_modify_qp(chan
, modify_flags
, &modify_attr
, NULL
);
1475 if (retval
!= IBT_SUCCESS
) {
1476 IBTF_DPRINTF_L2(ibtf_qp
, "ibt_change_port: "
1477 "failed on chan %p: %d", chan
, retval
);
1484 ibtl_init_cep_states(void)
1487 int ibt_nstate_inits
;
1489 IBTF_DPRINTF_L3(ibtf_qp
, "ibtl_init_cep_states()");
1491 ibt_nstate_inits
= sizeof (ibt_cep_next_state_inits
) /
1492 sizeof (ibt_cep_next_state_inits
[0]);
1495 * Initialize CEP next state table, using an indirect lookup table so
1496 * that this code isn't dependent on the ibt_cep_state_t enum values.
1498 for (index
= 0; index
< ibt_nstate_inits
; index
++) {
1499 ibt_cep_state_t state
;
1501 state
= ibt_cep_next_state_inits
[index
].current_state
;
1503 ibt_cep_next_state
[state
].next_state
=
1504 ibt_cep_next_state_inits
[index
].next_state
;
1506 ibt_cep_next_state
[state
].modify_flags
=
1507 ibt_cep_next_state_inits
[index
].modify_flags
;