Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / ib / ibtl / ibtl_chan.c
blob6252148f600658fc37ca8fa2d829dbd23a1f3b8b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
26 * ibtl_chan.c
28 * This file contains Transport API functions related to Channel Functions
29 * and internal Protection Domain and Address Handle Verbs functions.
32 #include <sys/ib/ibtl/impl/ibtl.h>
33 #include <sys/ib/ibtl/impl/ibtl_cm.h>
34 #include <sys/ib/ib_pkt_hdrs.h>
36 static char ibtl_chan[] = "ibtl_chan";
39 * RC Channel.
42 * Function:
43 * ibt_alloc_rc_channel
44 * Input:
45 * hca_hdl HCA Handle.
46 * flags Channel allocate flags.
47 * args A pointer to an ibt_rc_chan_alloc_args_t struct that
48 * specifies required channel attributes.
49 * Output:
50 * rc_chan_p The returned RC Channel handle.
51 * sizes NULL or a pointer to ibt_chan_sizes_s struct where
52 * new SendQ/RecvQ, and WR SGL sizes are returned.
53 * Returns:
54 * IBT_SUCCESS
55 * IBT_INVALID_PARAM
56 * Description:
57 * Allocates a RC communication channels that satisfy the specified
58 * channel attributes.
60 ibt_status_t
61 ibt_alloc_rc_channel(ibt_hca_hdl_t hca_hdl, ibt_chan_alloc_flags_t flags,
62 ibt_rc_chan_alloc_args_t *args, ibt_channel_hdl_t *rc_chan_p,
63 ibt_chan_sizes_t *sizes)
65 ibt_status_t retval;
66 ibt_qp_alloc_attr_t qp_attr;
67 ibt_qp_info_t qp_modify_attr;
68 ibt_channel_hdl_t chanp;
70 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_rc_channel(%p, %x, %p, %p)",
71 hca_hdl, flags, args, sizes);
73 bzero(&qp_modify_attr, sizeof (ibt_qp_info_t));
75 qp_attr.qp_alloc_flags = IBT_QP_NO_FLAGS;
76 if (flags & IBT_ACHAN_USER_MAP)
77 qp_attr.qp_alloc_flags |= IBT_QP_USER_MAP;
79 if (flags & IBT_ACHAN_DEFER_ALLOC)
80 qp_attr.qp_alloc_flags |= IBT_QP_DEFER_ALLOC;
82 if (flags & IBT_ACHAN_USES_SRQ) {
83 if (args->rc_srq == NULL) {
84 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
85 "NULL SRQ Handle specified.");
86 return (IBT_INVALID_PARAM);
88 qp_attr.qp_alloc_flags |= IBT_QP_USES_SRQ;
92 * Check if this request is to clone the channel, or to allocate a
93 * fresh one.
95 if (flags & IBT_ACHAN_CLONE) {
97 ibt_rc_chan_query_attr_t chan_attrs;
99 if (args->rc_clone_chan == NULL) {
100 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
101 "Clone Channel info not available.");
102 return (IBT_INVALID_PARAM);
103 } else if (args->rc_clone_chan->ch_qp.qp_hca != hca_hdl) {
104 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
105 "Clone Channel's & requested HCA Handle mismatch");
106 return (IBT_INVALID_PARAM);
109 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_rc_channel: "
110 "Clone <%p> - RC Channel", args->rc_clone_chan);
113 * Query the source channel, to obtained the attributes
114 * so that the new channel share the same attributes.
116 retval = ibt_query_rc_channel(args->rc_clone_chan, &chan_attrs);
117 if (retval != IBT_SUCCESS) {
118 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
119 "Failed to query the source channel: %d", retval);
120 return (retval);
123 /* Setup QP alloc attributes. */
124 qp_attr.qp_scq_hdl = chan_attrs.rc_scq;
125 qp_attr.qp_rcq_hdl = chan_attrs.rc_rcq;
126 qp_attr.qp_pd_hdl = chan_attrs.rc_pd;
127 qp_attr.qp_flags = chan_attrs.rc_flags;
128 qp_attr.qp_srq_hdl = chan_attrs.rc_srq;
130 bcopy(&chan_attrs.rc_chan_sizes, &qp_attr.qp_sizes,
131 sizeof (ibt_chan_sizes_t));
133 qp_modify_attr.qp_flags = chan_attrs.rc_control;
134 qp_modify_attr.qp_transport.rc.rc_path.cep_hca_port_num =
135 chan_attrs.rc_prim_path.cep_hca_port_num;
136 qp_modify_attr.qp_transport.rc.rc_path.cep_pkey_ix =
137 chan_attrs.rc_prim_path.cep_pkey_ix;
139 } else {
141 /* Setup QP alloc attributes. */
142 qp_attr.qp_scq_hdl = args->rc_scq;
143 qp_attr.qp_rcq_hdl = args->rc_rcq;
144 qp_attr.qp_pd_hdl = args->rc_pd;
145 qp_attr.qp_flags = args->rc_flags;
146 qp_attr.qp_srq_hdl = args->rc_srq;
148 bcopy(&args->rc_sizes, &qp_attr.qp_sizes,
149 sizeof (ibt_chan_sizes_t));
151 qp_modify_attr.qp_flags = args->rc_control;
153 if ((args->rc_hca_port_num == 0) ||
154 (args->rc_hca_port_num > IBTL_HCA2NPORTS(hca_hdl))) {
155 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
156 "Invalid port_num %d, range is (1 to %d)",
157 args->rc_hca_port_num, IBTL_HCA2NPORTS(hca_hdl));
158 return (IBT_HCA_PORT_INVALID);
160 qp_modify_attr.qp_transport.rc.rc_path.cep_hca_port_num =
161 args->rc_hca_port_num;
164 * We allocate the Channel initially with the default PKey,
165 * and later client can update this when the channel is opened
166 * with the pkey returned from a path record lookup.
168 mutex_enter(&ibtl_clnt_list_mutex);
169 qp_modify_attr.qp_transport.rc.rc_path.cep_pkey_ix =
170 hca_hdl->ha_hca_devp->
171 hd_portinfop[args->rc_hca_port_num - 1].p_def_pkey_ix;
172 mutex_exit(&ibtl_clnt_list_mutex);
175 /* Allocate Channel and Initialize the channel. */
176 retval = ibt_alloc_qp(hca_hdl, IBT_RC_RQP, &qp_attr, sizes, NULL,
177 &chanp);
178 if (retval != IBT_SUCCESS) {
179 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
180 "Failed to allocate QP: %d", retval);
181 *rc_chan_p = NULL;
182 return (retval);
185 qp_modify_attr.qp_trans = IBT_RC_SRV;
187 /* Initialize RC Channel by transitioning it to INIT State. */
188 retval = ibt_initialize_qp(chanp, &qp_modify_attr);
189 if (retval != IBT_SUCCESS) {
190 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_rc_channel: "
191 "Failed to Initialize QP: %d", retval);
193 /* Free the QP as we failed to initialize it. */
194 (void) ibt_free_qp(chanp);
196 *rc_chan_p = NULL;
197 return (retval);
200 *rc_chan_p = chanp;
202 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_rc_channel(%p): - SUCCESS (%p)",
203 hca_hdl, chanp);
205 return (IBT_SUCCESS);
210 * Function:
211 * ibt_query_rc_channel
212 * Input:
213 * rc_chan A previously allocated channel handle.
214 * chan_attrs A pointer to an ibt_rc_chan_query_args_t struct where
215 * Channel's current attributes are returned.
216 * Output:
217 * chan_attrs A pointer to an ibt_rc_chan_query_args_t struct where
218 * Channel's current attributes are returned.
219 * Returns:
220 * IBT_SUCCESS
221 * Description:
222 * Query an RC channel's attributes.
224 ibt_status_t
225 ibt_query_rc_channel(ibt_channel_hdl_t rc_chan,
226 ibt_rc_chan_query_attr_t *chan_attrs)
228 ibt_status_t retval;
229 ibt_qp_query_attr_t qp_attr;
231 IBTF_DPRINTF_L3(ibtl_chan, "ibt_query_rc_channel(%p, %p)",
232 rc_chan, chan_attrs);
234 if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
235 IBTF_DPRINTF_L2(ibtl_chan, "ibt_query_rc_channel: "
236 "type of channel (%d) is not RC", rc_chan->ch_qp.qp_type);
237 return (IBT_CHAN_SRV_TYPE_INVALID);
240 bzero(&qp_attr, sizeof (ibt_qp_query_attr_t));
242 /* Query the channel (QP) */
243 retval = ibt_query_qp(rc_chan, &qp_attr);
244 if (retval != IBT_SUCCESS) {
245 IBTF_DPRINTF_L2(ibtl_chan, "ibt_query_rc_channel: "
246 "ibt_query_qp failed on QP %p: %d", rc_chan, retval);
247 return (retval);
250 chan_attrs->rc_hca_guid = IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(rc_chan));
252 chan_attrs->rc_scq = qp_attr.qp_sq_cq;
253 chan_attrs->rc_rcq = qp_attr.qp_rq_cq;
254 chan_attrs->rc_pd = rc_chan->ch_qp.qp_pd_hdl;
255 chan_attrs->rc_state = qp_attr.qp_info.qp_state;
256 chan_attrs->rc_path_mtu = qp_attr.qp_info.qp_transport.rc.rc_path_mtu;
257 chan_attrs->rc_path_retry_cnt =
258 qp_attr.qp_info.qp_transport.rc.rc_retry_cnt;
259 chan_attrs->rc_path_rnr_retry_cnt =
260 qp_attr.qp_info.qp_transport.rc.rc_rnr_retry_cnt;
261 chan_attrs->rc_min_rnr_nak =
262 qp_attr.qp_info.qp_transport.rc.rc_min_rnr_nak;
264 chan_attrs->rc_prim_path = qp_attr.qp_info.qp_transport.rc.rc_path;
265 chan_attrs->rc_alt_path = qp_attr.qp_info.qp_transport.rc.rc_alt_path;
267 chan_attrs->rc_chan_sizes.cs_sq = qp_attr.qp_info.qp_sq_sz;
268 chan_attrs->rc_chan_sizes.cs_rq = qp_attr.qp_info.qp_rq_sz;
269 chan_attrs->rc_chan_sizes.cs_sq_sgl = qp_attr.qp_sq_sgl;
270 chan_attrs->rc_chan_sizes.cs_rq_sgl = qp_attr.qp_rq_sgl;
271 chan_attrs->rc_srq = qp_attr.qp_srq;
273 chan_attrs->rc_rdma_ra_out =
274 qp_attr.qp_info.qp_transport.rc.rc_rdma_ra_out;
275 chan_attrs->rc_rdma_ra_in =
276 qp_attr.qp_info.qp_transport.rc.rc_rdma_ra_in;
278 chan_attrs->rc_flags = rc_chan->ch_qp.qp_flags;
279 chan_attrs->rc_control = qp_attr.qp_info.qp_flags;
280 chan_attrs->rc_mig_state = qp_attr.qp_info.qp_transport.rc.rc_mig_state;
282 chan_attrs->rc_qpn = qp_attr.qp_qpn & IB_QPN_MASK;
283 chan_attrs->rc_dst_qpn =
284 qp_attr.qp_info.qp_transport.rc.rc_dst_qpn & IB_QPN_MASK;
286 return (retval);
291 * Function:
292 * ibt_modify_rc_channel
293 * Input:
294 * rc_chan A previously allocated channel handle.
295 * flags Specifies which attributes in ibt_rc_chan_modify_attr_t
296 * are to be modified.
297 * attrs Attributes to be modified.
298 * Output:
299 * actual_sz On return contains the new send and receive queue sizes.
300 * Returns:
301 * IBT_SUCCESS
302 * Description:
303 * Modifies an RC channel's attributes, as specified by a
304 * ibt_cep_modify_flags_t parameter to those specified in the
305 * ibt_rc_chan_modify_attr_t structure.
307 ibt_status_t
308 ibt_modify_rc_channel(ibt_channel_hdl_t rc_chan, ibt_cep_modify_flags_t flags,
309 ibt_rc_chan_modify_attr_t *attrs, ibt_queue_sizes_t *actual_sz)
311 ibt_status_t retval;
312 ibt_qp_info_t qp_info;
313 int retries = 1;
315 IBTF_DPRINTF_L3(ibtl_chan, "ibt_modify_rc_channel(%p, %x, %p, %p)",
316 rc_chan, flags, attrs, actual_sz);
318 if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
319 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_rc_channel: "
320 "type of channel (%d) is not RC", rc_chan->ch_qp.qp_type);
321 return (IBT_CHAN_SRV_TYPE_INVALID);
324 retry:
325 bzero(&qp_info, sizeof (ibt_qp_info_t));
327 if (flags & IBT_CEP_SET_ADDS_VECT) {
328 bcopy(&attrs->rc_prim_adds_vect,
329 &qp_info.qp_transport.rc.rc_path.cep_adds_vect,
330 sizeof (ibt_adds_vect_t));
333 qp_info.qp_trans = IBT_RC_SRV;
334 qp_info.qp_transport.rc.rc_path.cep_hca_port_num =
335 attrs->rc_prim_port_num;
336 qp_info.qp_transport.rc.rc_retry_cnt = attrs->rc_path_retry_cnt;
337 qp_info.qp_transport.rc.rc_rnr_retry_cnt =
338 attrs->rc_path_rnr_retry_cnt;
339 qp_info.qp_transport.rc.rc_rdma_ra_out = attrs->rc_rdma_ra_out;
340 qp_info.qp_transport.rc.rc_rdma_ra_in = attrs->rc_rdma_ra_in;
342 /* Current channel state must be either SQD or RTS. */
343 qp_info.qp_current_state = rc_chan->ch_current_state;
344 qp_info.qp_state = rc_chan->ch_current_state; /* No Change in State */
346 qp_info.qp_flags = attrs->rc_control;
347 qp_info.qp_sq_sz = attrs->rc_sq_sz;
348 qp_info.qp_rq_sz = attrs->rc_rq_sz;
349 qp_info.qp_transport.rc.rc_min_rnr_nak = attrs->rc_min_rnr_nak;
351 if (flags & IBT_CEP_SET_ALT_PATH) {
352 bcopy(&attrs->rc_alt_adds_vect,
353 &qp_info.qp_transport.rc.rc_alt_path.cep_adds_vect,
354 sizeof (ibt_adds_vect_t));
355 qp_info.qp_transport.rc.rc_alt_path.cep_hca_port_num =
356 attrs->rc_alt_port_num;
359 flags |= IBT_CEP_SET_STATE;
361 retval = ibt_modify_qp(rc_chan, flags, &qp_info, actual_sz);
362 if (retval != IBT_SUCCESS) {
363 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_rc_channel: "
364 "ibt_modify_qp failed on QP %p: %d", rc_chan, retval);
365 /* give it one more shot if the old current state was stale */
366 if (qp_info.qp_current_state != rc_chan->ch_current_state &&
367 --retries >= 0 &&
368 (qp_info.qp_current_state == IBT_STATE_RTS ||
369 qp_info.qp_current_state == IBT_STATE_SQD))
370 goto retry;
373 return (retval);
378 * UD Channel.
381 * Function:
382 * ibt_alloc_ud_channel
383 * Input:
384 * hca_hdl HCA Handle.
385 * flags Channel allocate flags.
386 * args A pointer to an ibt_ud_chan_alloc_args_t struct that
387 * specifies required channel attributes.
388 * Output:
389 * ud_chan_p The returned UD Channel handle.
390 * sizes NULL or a pointer to ibt_chan_sizes_s struct where
391 * new SendQ/RecvQ, and WR SGL sizes are returned.
392 * Returns:
393 * IBT_SUCCESS
394 * IBT_INVALID_PARAM
395 * Description:
396 * Allocate UD channels that satisfy the specified channel attributes.
398 ibt_status_t
399 ibt_alloc_ud_channel(ibt_hca_hdl_t hca_hdl, ibt_chan_alloc_flags_t flags,
400 ibt_ud_chan_alloc_args_t *args, ibt_channel_hdl_t *ud_chan_p,
401 ibt_chan_sizes_t *sizes)
403 ibt_status_t retval;
404 ibt_qp_alloc_attr_t qp_attr;
405 ibt_qp_info_t qp_modify_attr;
406 ibt_channel_hdl_t chanp;
407 ibt_chan_alloc_flags_t variant_flags;
409 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ud_channel(%p, %x, %p, %p)",
410 hca_hdl, flags, args, sizes);
412 if (flags & IBT_ACHAN_USES_FEXCH) {
413 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
414 "FEXCH QPs are allocated by ibt_alloc_ud_channel_range()");
415 return (IBT_CHAN_SRV_TYPE_INVALID);
418 bzero(&qp_modify_attr, sizeof (ibt_qp_info_t));
419 bzero(&qp_attr, sizeof (ibt_qp_alloc_attr_t));
420 qp_attr.qp_alloc_flags = IBT_QP_NO_FLAGS;
422 /* allow at most one of these flags */
423 variant_flags = flags & (IBT_ACHAN_USER_MAP | IBT_ACHAN_USES_RSS |
424 IBT_ACHAN_USES_RFCI | IBT_ACHAN_USES_FCMD | IBT_ACHAN_CLONE);
425 switch (variant_flags) {
426 case IBT_ACHAN_USER_MAP:
427 qp_attr.qp_alloc_flags |= IBT_QP_USER_MAP;
428 break;
429 case IBT_ACHAN_USES_RSS:
430 qp_attr.qp_alloc_flags |= IBT_QP_USES_RSS;
431 qp_modify_attr.qp_transport.ud.ud_rss = args->ud_rss;
432 break;
433 case IBT_ACHAN_USES_RFCI:
434 qp_attr.qp_alloc_flags |= IBT_QP_USES_RFCI;
435 qp_modify_attr.qp_transport.ud.ud_fc = qp_attr.qp_fc =
436 args->ud_fc;
437 break;
438 case IBT_ACHAN_USES_FCMD:
439 qp_attr.qp_alloc_flags |= IBT_QP_USES_FCMD;
440 qp_modify_attr.qp_transport.ud.ud_fc = qp_attr.qp_fc =
441 args->ud_fc;
442 break;
443 case IBT_ACHAN_CLONE:
444 case 0:
445 break;
446 default:
447 return (IBT_INVALID_PARAM);
450 if (flags & IBT_ACHAN_DEFER_ALLOC)
451 qp_attr.qp_alloc_flags |= IBT_QP_DEFER_ALLOC;
453 if (flags & IBT_ACHAN_USES_SRQ) {
454 if (args->ud_srq == NULL) {
455 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
456 "NULL SRQ Handle specified.");
457 return (IBT_INVALID_PARAM);
459 if (flags & IBT_ACHAN_USES_RSS) {
460 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
461 "SRQ not allowed with RSS.");
462 return (IBT_INVALID_PARAM);
464 qp_attr.qp_alloc_flags |= IBT_QP_USES_SRQ;
468 * Check if this request is to clone the channel, or to allocate a
469 * fresh one.
471 if (flags & IBT_ACHAN_CLONE) {
473 ibt_ud_chan_query_attr_t chan_attrs;
475 if (args->ud_clone_chan == NULL) {
476 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
477 "Clone Channel info not available.");
478 return (IBT_INVALID_PARAM);
479 } else if (args->ud_clone_chan->ch_qp.qp_hca != hca_hdl) {
480 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
481 "Clone Channel and HCA Handle mismatch");
482 return (IBT_INVALID_PARAM);
485 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ud_channel: "
486 "Clone <%p> - UD Channel", args->ud_clone_chan);
488 retval = ibt_query_ud_channel(args->ud_clone_chan, &chan_attrs);
489 if (retval != IBT_SUCCESS) {
490 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
491 "Failed to Query the source channel: %d", retval);
492 return (retval);
495 /* Setup QP alloc attributes. */
496 qp_attr.qp_scq_hdl = chan_attrs.ud_scq;
497 qp_attr.qp_rcq_hdl = chan_attrs.ud_rcq;
498 qp_attr.qp_pd_hdl = chan_attrs.ud_pd;
499 qp_attr.qp_flags = chan_attrs.ud_flags;
500 qp_attr.qp_srq_hdl = chan_attrs.ud_srq;
502 bcopy(&chan_attrs.ud_chan_sizes, &qp_attr.qp_sizes,
503 sizeof (ibt_chan_sizes_t));
505 qp_modify_attr.qp_transport.ud.ud_port =
506 chan_attrs.ud_hca_port_num;
507 qp_modify_attr.qp_transport.ud.ud_qkey = chan_attrs.ud_qkey;
508 qp_modify_attr.qp_transport.ud.ud_pkey_ix =
509 chan_attrs.ud_pkey_ix;
510 } else {
511 ib_pkey_t tmp_pkey;
513 /* Setup QP alloc attributes. */
514 qp_attr.qp_scq_hdl = args->ud_scq;
515 qp_attr.qp_rcq_hdl = args->ud_rcq;
516 qp_attr.qp_pd_hdl = args->ud_pd;
517 qp_attr.qp_flags = args->ud_flags;
518 qp_attr.qp_srq_hdl = args->ud_srq;
520 bcopy(&args->ud_sizes, &qp_attr.qp_sizes,
521 sizeof (ibt_chan_sizes_t));
523 qp_modify_attr.qp_transport.ud.ud_port = args->ud_hca_port_num;
524 qp_modify_attr.qp_transport.ud.ud_qkey = args->ud_qkey;
526 /* Validate input hca_port_num and pkey_ix values. */
527 if ((retval = ibt_index2pkey(hca_hdl, args->ud_hca_port_num,
528 args->ud_pkey_ix, &tmp_pkey)) != IBT_SUCCESS) {
529 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
530 "ibt_index2pkey failed, status: %d", retval);
531 *ud_chan_p = NULL;
532 return (retval);
534 qp_modify_attr.qp_transport.ud.ud_pkey_ix = args->ud_pkey_ix;
537 /* Allocate Channel and Initialize the channel. */
538 retval = ibt_alloc_qp(hca_hdl, IBT_UD_RQP, &qp_attr, sizes, NULL,
539 &chanp);
540 if (retval != IBT_SUCCESS) {
541 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
542 "Failed to allocate QP: %d", retval);
543 *ud_chan_p = NULL;
544 return (retval);
547 /* Initialize UD Channel by transitioning it to RTS State. */
548 qp_modify_attr.qp_trans = IBT_UD_SRV;
549 qp_modify_attr.qp_flags = IBT_CEP_NO_FLAGS;
550 qp_modify_attr.qp_transport.ud.ud_sq_psn = 0;
552 retval = ibt_initialize_qp(chanp, &qp_modify_attr);
553 if (retval != IBT_SUCCESS) {
554 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
555 "Failed to Initialize QP: %d", retval);
557 /* Free the QP as we failed to initialize it. */
558 (void) ibt_free_qp(chanp);
560 *ud_chan_p = NULL;
561 return (retval);
564 *ud_chan_p = chanp;
566 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ud_channel(%p): - SUCCESS (%p)",
567 hca_hdl, chanp);
569 return (IBT_SUCCESS);
574 * Function:
575 * ibt_alloc_ud_channel_range
576 * Input:
577 * hca_hdl HCA Handle.
578 * log2 Log (base 2) of the number of QPs to allocate.
579 * flags Channel allocate flags.
580 * args A pointer to an ibt_ud_chan_alloc_args_t struct that
581 * specifies required channel attributes.
582 * send_cq A pointer to an array of CQ handles.
583 * recv_cq A pointer to an array of CQ handles.
584 * Output:
585 * base_qpn_p The returned QP number of the base QP.
586 * ud_chan_p The returned UD Channel handle.
587 * sizes NULL or a pointer to ibt_chan_sizes_s struct where
588 * new SendQ/RecvQ, and WR SGL sizes are returned.
589 * Returns:
590 * IBT_SUCCESS
591 * IBT_INVALID_PARAM
592 * Description:
593 * Allocate UD channels that satisfy the specified channel attributes.
595 ibt_status_t
596 ibt_alloc_ud_channel_range(ibt_hca_hdl_t hca_hdl, uint_t log2,
597 ibt_chan_alloc_flags_t flags, ibt_ud_chan_alloc_args_t *args,
598 ibt_cq_hdl_t *send_cq, ibt_cq_hdl_t *recv_cq, ib_qpn_t *base_qpn_p,
599 ibt_channel_hdl_t *ud_chan_p, ibt_chan_sizes_t *sizes)
601 ibt_status_t retval;
602 ibt_qp_alloc_attr_t qp_attr;
603 ibt_qp_info_t qp_modify_attr;
604 ibtl_channel_t *chanp;
605 ibt_cq_hdl_t ibt_cq_hdl;
606 ibc_cq_hdl_t *ibc_send_cq, *ibc_recv_cq;
607 ibc_qp_hdl_t *ibc_qp_hdl_p;
608 int i, n = 1 << log2;
609 ib_pkey_t tmp_pkey;
612 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ud_channel_range(%p, %x, %p, %p)",
613 hca_hdl, flags, args, sizes);
615 bzero(&qp_modify_attr, sizeof (ibt_qp_info_t));
617 qp_attr.qp_alloc_flags = IBT_QP_NO_FLAGS;
619 if (flags & IBT_ACHAN_CLONE)
620 return (IBT_INVALID_PARAM);
622 if (flags & IBT_ACHAN_USER_MAP)
623 qp_attr.qp_alloc_flags |= IBT_QP_USER_MAP;
625 if (flags & IBT_ACHAN_DEFER_ALLOC)
626 qp_attr.qp_alloc_flags |= IBT_QP_DEFER_ALLOC;
628 if (flags & IBT_ACHAN_USES_SRQ) {
629 if (args->ud_srq == NULL) {
630 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel: "
631 "NULL SRQ Handle specified.");
632 return (IBT_INVALID_PARAM);
634 qp_attr.qp_alloc_flags |= IBT_QP_USES_SRQ;
637 if (flags & IBT_ACHAN_USES_FEXCH) {
638 qp_attr.qp_alloc_flags |= IBT_QP_USES_FEXCH;
639 qp_attr.qp_fc = args->ud_fc;
640 qp_modify_attr.qp_transport.ud.ud_fc = qp_attr.qp_fc =
641 args->ud_fc;
643 if (flags & IBT_ACHAN_USES_RSS) {
644 if (log2 >
645 hca_hdl->ha_hca_devp->hd_hca_attr->hca_rss_max_log2_table)
646 return (IBT_INSUFF_RESOURCE);
647 qp_attr.qp_alloc_flags |= IBT_QP_USES_RSS;
650 ibc_send_cq = kmem_alloc(sizeof (ibc_cq_hdl_t) << log2, KM_SLEEP);
651 ibc_recv_cq = kmem_alloc(sizeof (ibc_cq_hdl_t) << log2, KM_SLEEP);
652 ibc_qp_hdl_p = kmem_alloc(sizeof (ibc_qp_hdl_t) << log2, KM_SLEEP);
654 for (i = 0; i < 1 << log2; i++) {
655 ud_chan_p[i] = kmem_zalloc(sizeof (ibtl_channel_t), KM_SLEEP);
656 ibt_cq_hdl = send_cq[i];
657 ibc_send_cq[i] = ibt_cq_hdl ? ibt_cq_hdl->cq_ibc_cq_hdl : NULL;
658 ibt_cq_hdl = recv_cq[i];
659 ibc_recv_cq[i] = ibt_cq_hdl ? ibt_cq_hdl->cq_ibc_cq_hdl : NULL;
662 /* Setup QP alloc attributes. */
663 qp_attr.qp_pd_hdl = args->ud_pd;
664 qp_attr.qp_flags = args->ud_flags;
665 qp_attr.qp_srq_hdl = args->ud_srq;
667 bcopy(&args->ud_sizes, &qp_attr.qp_sizes,
668 sizeof (ibt_chan_sizes_t));
670 qp_modify_attr.qp_transport.ud.ud_port = args->ud_hca_port_num;
671 qp_modify_attr.qp_transport.ud.ud_qkey = args->ud_qkey;
673 /* Validate input hca_port_num and pkey_ix values. */
674 if ((retval = ibt_index2pkey(hca_hdl, args->ud_hca_port_num,
675 args->ud_pkey_ix, &tmp_pkey)) != IBT_SUCCESS) {
676 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel_range:"
677 " ibt_index2pkey failed, status: %d", retval);
678 goto fail;
680 qp_modify_attr.qp_transport.ud.ud_pkey_ix = args->ud_pkey_ix;
682 /* Allocate Channel and Initialize the channel. */
683 retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_qp_range)(
684 IBTL_HCA2CIHCA(hca_hdl), log2, (ibtl_qp_hdl_t *)ud_chan_p,
685 IBT_UD_RQP, &qp_attr, sizes, ibc_send_cq, ibc_recv_cq,
686 base_qpn_p, ibc_qp_hdl_p);
687 if (retval != IBT_SUCCESS) {
688 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel_range: "
689 "Failed to allocate QPs: %d", retval);
690 goto fail;
693 /* Initialize UD Channel by transitioning it to RTS State. */
694 qp_modify_attr.qp_trans = IBT_UD_SRV;
695 qp_modify_attr.qp_flags = IBT_CEP_NO_FLAGS;
696 qp_modify_attr.qp_transport.ud.ud_sq_psn = 0;
698 for (i = 0; i < n; i++) {
699 /* Initialize the internal QP struct. */
700 chanp = ud_chan_p[i];
701 chanp->ch_qp.qp_type = IBT_UD_SRV;
702 chanp->ch_qp.qp_hca = hca_hdl;
703 chanp->ch_qp.qp_ibc_qp_hdl = ibc_qp_hdl_p[i];
704 chanp->ch_qp.qp_send_cq = send_cq[i];
705 chanp->ch_qp.qp_recv_cq = recv_cq[i];
706 chanp->ch_current_state = IBT_STATE_RESET;
707 mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL);
708 cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL);
710 retval = ibt_initialize_qp(chanp, &qp_modify_attr);
711 if (retval != IBT_SUCCESS) {
712 int j;
714 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel_range:"
715 " Failed to Initialize QP: %d", retval);
717 /* Free the QP as we failed to initialize it. */
718 (void) ibt_free_qp(chanp);
719 for (j = 0; j < i; j++) {
720 chanp = ud_chan_p[j];
721 (void) ibt_free_qp(chanp);
723 goto fail;
727 * The IBTA spec does not include the signal type or PD on a QP
728 * query operation. In order to implement the "CLONE" feature
729 * we need to cache these values.
731 chanp->ch_qp.qp_flags = qp_attr.qp_flags;
732 chanp->ch_qp.qp_pd_hdl = qp_attr.qp_pd_hdl;
736 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel_range(%p): SUCCESS");
738 atomic_add_32(&hca_hdl->ha_qp_cnt, n);
740 retval = IBT_SUCCESS;
742 fail:
743 kmem_free(ibc_send_cq, sizeof (ibc_cq_hdl_t) << log2);
744 kmem_free(ibc_recv_cq, sizeof (ibc_cq_hdl_t) << log2);
745 kmem_free(ibc_qp_hdl_p, sizeof (ibc_qp_hdl_t) << log2);
746 if (retval != IBT_SUCCESS) {
747 for (i = 0; i < 1 << log2; i++) {
748 kmem_free(ud_chan_p[i], sizeof (ibtl_channel_t));
749 ud_chan_p[i] = NULL;
751 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_channel_range(%p): "
752 "failed: %d", retval);
754 return (retval);
759 * Function:
760 * ibt_query_ud_channel
761 * Input:
762 * ud_chan A previously allocated UD channel handle.
763 * Output:
764 * chan_attrs Channel's current attributes.
765 * Returns:
766 * IBT_SUCCESS
767 * Description:
768 * Query a UD channel's attributes.
770 ibt_status_t
771 ibt_query_ud_channel(ibt_channel_hdl_t ud_chan,
772 ibt_ud_chan_query_attr_t *ud_chan_attrs)
774 ibt_status_t retval;
775 ibt_qp_query_attr_t qp_attr;
777 IBTF_DPRINTF_L3(ibtl_chan, "ibt_query_ud_channel(%p, %p)",
778 ud_chan, ud_chan_attrs);
780 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
781 IBTF_DPRINTF_L2(ibtl_chan, "ibt_query_ud_channel: "
782 "type of channel (%d) is not UD", ud_chan->ch_qp.qp_type);
783 return (IBT_CHAN_SRV_TYPE_INVALID);
786 bzero(&qp_attr, sizeof (ibt_qp_query_attr_t));
788 /* Query the channel (QP) */
789 retval = ibt_query_qp(ud_chan, &qp_attr);
790 if (retval != IBT_SUCCESS) {
791 IBTF_DPRINTF_L2(ibtl_chan, "ibt_query_ud_channel: "
792 "ibt_query_qp failed on QP %p: %d", ud_chan, retval);
793 return (retval);
796 ud_chan_attrs->ud_qpn = qp_attr.qp_qpn & IB_QPN_MASK;
797 ud_chan_attrs->ud_hca_guid = IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(ud_chan));
799 ud_chan_attrs->ud_scq = qp_attr.qp_sq_cq;
800 ud_chan_attrs->ud_rcq = qp_attr.qp_rq_cq;
801 ud_chan_attrs->ud_pd = ud_chan->ch_qp.qp_pd_hdl;
803 ud_chan_attrs->ud_hca_port_num =
804 qp_attr.qp_info.qp_transport.ud.ud_port;
806 ud_chan_attrs->ud_state = qp_attr.qp_info.qp_state;
807 ud_chan_attrs->ud_pkey_ix = qp_attr.qp_info.qp_transport.ud.ud_pkey_ix;
808 ud_chan_attrs->ud_qkey = qp_attr.qp_info.qp_transport.ud.ud_qkey;
810 ud_chan_attrs->ud_chan_sizes.cs_sq = qp_attr.qp_info.qp_sq_sz;
811 ud_chan_attrs->ud_chan_sizes.cs_rq = qp_attr.qp_info.qp_rq_sz;
812 ud_chan_attrs->ud_chan_sizes.cs_sq_sgl = qp_attr.qp_sq_sgl;
813 ud_chan_attrs->ud_chan_sizes.cs_rq_sgl = qp_attr.qp_rq_sgl;
814 ud_chan_attrs->ud_srq = qp_attr.qp_srq;
816 ud_chan_attrs->ud_flags = ud_chan->ch_qp.qp_flags;
818 ud_chan_attrs->ud_query_fc = qp_attr.qp_query_fexch;
820 return (retval);
825 * Function:
826 * ibt_modify_ud_channel
827 * Input:
828 * ud_chan A previously allocated UD channel handle.
829 * flags Specifies which attributes in ibt_ud_chan_modify_attr_t
830 * are to be modified.
831 * attrs Attributes to be modified.
832 * Output:
833 * actual_sz On return contains the new send and receive queue sizes.
834 * Returns:
835 * IBT_SUCCESS
836 * Description:
837 * Modifies an UD channel's attributes, as specified by a
838 * ibt_cep_modify_flags_t parameter to those specified in the
839 * ibt_ud_chan_modify_attr_t structure.
841 ibt_status_t
842 ibt_modify_ud_channel(ibt_channel_hdl_t ud_chan, ibt_cep_modify_flags_t flags,
843 ibt_ud_chan_modify_attr_t *attrs, ibt_queue_sizes_t *actual_sz)
845 ibt_status_t retval;
846 ibt_qp_info_t qp_info;
847 ibt_cep_modify_flags_t good_flags;
848 int retries = 1;
850 IBTF_DPRINTF_L3(ibtl_chan, "ibt_modify_ud_channel(%p, %x, %p, %p)",
851 ud_chan, flags, attrs, actual_sz);
853 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
854 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_ud_channel: "
855 "type of channel (%d) is not UD", ud_chan->ch_qp.qp_type);
856 return (IBT_CHAN_SRV_TYPE_INVALID);
859 good_flags = IBT_CEP_SET_SQ_SIZE | IBT_CEP_SET_RQ_SIZE |
860 IBT_CEP_SET_QKEY;
862 if (flags & ~good_flags) {
863 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_ud_channel: "
864 "Invalid Modify Flags: %x", flags);
865 return (IBT_INVALID_PARAM);
868 retry:
869 bzero(&qp_info, sizeof (ibt_qp_info_t));
871 qp_info.qp_state = ud_chan->ch_current_state; /* No Change in State */
872 qp_info.qp_current_state = ud_chan->ch_current_state;
873 qp_info.qp_flags = IBT_CEP_NO_FLAGS;
875 qp_info.qp_sq_sz = attrs->ud_sq_sz;
876 qp_info.qp_rq_sz = attrs->ud_rq_sz;
877 qp_info.qp_trans = IBT_UD_SRV;
878 qp_info.qp_transport.ud.ud_qkey = attrs->ud_qkey;
880 flags |= IBT_CEP_SET_STATE;
882 retval = ibt_modify_qp(ud_chan, flags, &qp_info, actual_sz);
883 if (retval != IBT_SUCCESS) {
884 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_ud_channel: "
885 "ibt_modify_qp failed on QP %p: %d", ud_chan, retval);
886 /* give it one more shot if the old current state was stale */
887 if (qp_info.qp_current_state != ud_chan->ch_current_state &&
888 --retries >= 0 &&
889 (qp_info.qp_current_state == IBT_STATE_RTS ||
890 qp_info.qp_current_state == IBT_STATE_SQD))
891 goto retry;
894 return (retval);
899 * Function:
900 * ibt_recover_ud_channel
901 * Input:
902 * ud_chan An UD channel handle which is in SQError state.
903 * Output:
904 * none.
905 * Returns:
906 * IBT_SUCCESS
907 * IBT_CHAN_HDL_INVALID
908 * IBT_CHAN_SRV_TYPE_INVALID
909 * IBT_CHAN_STATE_INVALID
910 * Description:
911 * Recover an UD Channel which has transitioned to SQ Error state. The
912 * ibt_recover_ud_channel() transitions the channel from SQ Error state
913 * to Ready-To-Send channel state.
915 * If a work request posted to a UD channel's send queue completes with
916 * an error (see ibt_wc_status_t), the channel gets transitioned to SQ
917 * Error state. In order to reuse this channel, ibt_recover_ud_channel()
918 * can be used to recover the channel to a usable (Ready-to-Send) state.
920 ibt_status_t
921 ibt_recover_ud_channel(ibt_channel_hdl_t ud_chan)
923 ibt_qp_info_t modify_attr;
924 ibt_status_t retval;
926 IBTF_DPRINTF_L3(ibtl_chan, "ibt_recover_ud_channel(%p)", ud_chan);
928 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
929 IBTF_DPRINTF_L2(ibtl_chan, "ibt_recover_ud_channel: "
930 "Called for non-UD channels<%d>", ud_chan->ch_qp.qp_type);
931 return (IBT_CHAN_SRV_TYPE_INVALID);
934 bzero(&modify_attr, sizeof (ibt_qp_info_t));
936 /* Set the channel state to RTS, to activate the send processing. */
937 modify_attr.qp_state = IBT_STATE_RTS;
938 modify_attr.qp_trans = ud_chan->ch_qp.qp_type;
939 modify_attr.qp_current_state = IBT_STATE_SQE;
941 retval = ibt_modify_qp(ud_chan, IBT_CEP_SET_STATE, &modify_attr, NULL);
943 if (retval != IBT_SUCCESS)
944 IBTF_DPRINTF_L2(ibtl_chan, "ibt_recover_ud_channel: "
945 "ibt_modify_qp failed on qp %p: status = %d",
946 ud_chan, retval);
948 return (retval);
953 * Function:
954 * ibt_flush_channel
955 * Input:
956 * chan The opaque channel handle returned in a previous call
957 * to ibt_alloc_ud_channel() or ibt_alloc_rc_channel().
958 * Output:
959 * none.
960 * Returns:
961 * IBT_SUCCESS
962 * Description:
963 * Flush the specified channel. Outstanding work requests are flushed
964 * so that the client can do the associated clean up. After that, the
965 * client will usually deregister the previously registered memory,
966 * then free the channel by calling ibt_free_channel(). This function
967 * applies to UD channels, or to RC channels that have not successfully
968 * been opened.
970 ibt_status_t
971 ibt_flush_channel(ibt_channel_hdl_t chan)
973 ibt_status_t retval;
975 IBTF_DPRINTF_L3(ibtl_chan, "ibt_flush_channel(%p)", chan);
977 retval = ibt_flush_qp(chan);
978 if (retval != IBT_SUCCESS) {
979 IBTF_DPRINTF_L2(ibtl_chan, "ibt_flush_channel: "
980 "ibt_flush_qp failed on QP %p: %d", chan, retval);
983 return (retval);
988 * Function:
989 * ibt_free_channel
990 * Input:
991 * chan The opaque channel handle returned in a previous
992 * call to ibt_alloc_{ud,rc}_channel().
993 * Output:
994 * none.
995 * Returns:
996 * IBT_SUCCESS
997 * Description:
998 * Releases the resources associated with the specified channel.
999 * It is well assumed that channel has been closed before this.
1001 ibt_status_t
1002 ibt_free_channel(ibt_channel_hdl_t chan)
1004 return (ibt_free_qp(chan));
1009 * UD Destination.
1012 * Function:
1013 * ibt_alloc_ud_dest
1014 * Input:
1015 * hca_hdl HCA Handle.
1016 * pd Protection Domain
1017 * Output:
1018 * ud_dest_p Address to store the returned UD destination handle.
1019 * Returns:
1020 * IBT_SUCCESS
1021 * Description:
1022 * Allocate a UD destination handle. The returned UD destination handle
1023 * has no useful contents, but is usable after calling ibt_modify_ud_dest,
1024 * ibt_modify_reply_ud_dest, or ibt_open_ud_dest.
1026 ibt_status_t
1027 ibt_alloc_ud_dest(ibt_hca_hdl_t hca_hdl, ibt_ud_dest_flags_t flags,
1028 ibt_pd_hdl_t pd, ibt_ud_dest_hdl_t *ud_dest_p)
1030 ibt_status_t retval;
1031 ibt_ud_dest_t *ud_destp;
1032 ibt_ah_hdl_t ah;
1033 ibt_adds_vect_t adds_vect;
1035 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ud_dest(%p, %x, %p)",
1036 hca_hdl, flags, pd);
1038 bzero(&adds_vect, sizeof (adds_vect));
1039 adds_vect.av_port_num = 1;
1040 adds_vect.av_srate = IBT_SRATE_1X; /* assume the minimum */
1041 retval = ibt_alloc_ah(hca_hdl, flags, pd, &adds_vect, &ah);
1042 if (retval != IBT_SUCCESS) {
1043 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ud_dest: "
1044 "Address Handle Allocation failed: %d", retval);
1045 *ud_dest_p = NULL;
1046 return (retval);
1048 ud_destp = kmem_alloc(sizeof (*ud_destp), KM_SLEEP);
1049 ud_destp->ud_ah = ah;
1050 ud_destp->ud_dest_hca = hca_hdl;
1051 ud_destp->ud_dst_qpn = 0;
1052 ud_destp->ud_qkey = 0;
1053 *ud_dest_p = ud_destp;
1054 return (IBT_SUCCESS);
1058 * Function:
1059 * ibt_query_ud_dest
1060 * Input:
1061 * ud_dest A previously allocated UD destination handle.
1062 * Output:
1063 * dest_attrs UD destination's current attributes.
1064 * Returns:
1065 * IBT_SUCCESS
1066 * Description:
1067 * Query a UD destination's attributes.
1069 ibt_status_t
1070 ibt_query_ud_dest(ibt_ud_dest_hdl_t ud_dest,
1071 ibt_ud_dest_query_attr_t *dest_attrs)
1073 ibt_status_t retval;
1075 ASSERT(dest_attrs != NULL);
1077 /* Query Address Handle */
1078 retval = ibt_query_ah(ud_dest->ud_dest_hca, ud_dest->ud_ah,
1079 &dest_attrs->ud_pd, &dest_attrs->ud_addr_vect);
1081 if (retval != IBT_SUCCESS) {
1082 IBTF_DPRINTF_L2(ibtl_chan, "ibt_query_ud_dest: "
1083 "Failed to Query Address Handle: %d", retval);
1084 return (retval);
1087 /* Update the return struct. */
1088 dest_attrs->ud_hca_hdl = ud_dest->ud_dest_hca;
1089 dest_attrs->ud_dst_qpn = ud_dest->ud_dst_qpn;
1090 dest_attrs->ud_qkey = ud_dest->ud_qkey;
1092 return (retval);
1096 * Function:
1097 * ibt_modify_ud_dest
1098 * Input:
1099 * ud_dest A previously allocated UD destination handle
1100 * as returned by ibt_alloc_ud_dest().
1101 * qkey QKey of the destination.
1102 * dest_qpn QPN of the destination.
1103 * adds_vect NULL or Address Vector for the destination.
1105 * Output:
1106 * none.
1107 * Returns:
1108 * IBT_SUCCESS
1109 * Description:
1110 * Modify a previously allocated UD destination handle from the
1111 * arguments supplied by the caller.
1113 ibt_status_t
1114 ibt_modify_ud_dest(ibt_ud_dest_hdl_t ud_dest, ib_qkey_t qkey,
1115 ib_qpn_t dest_qpn, ibt_adds_vect_t *adds_vect)
1117 ibt_status_t retval;
1119 IBTF_DPRINTF_L3(ibtl_chan, "ibt_modify_ud_dest(%p, %x, %x, %p) ",
1120 ud_dest, qkey, dest_qpn, adds_vect);
1122 if ((adds_vect != NULL) &&
1123 (retval = ibt_modify_ah(ud_dest->ud_dest_hca, ud_dest->ud_ah,
1124 adds_vect)) != IBT_SUCCESS) {
1125 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_ud_dest: "
1126 "ibt_modify_ah() failed: status = %d", retval);
1127 return (retval);
1129 ud_dest->ud_dst_qpn = dest_qpn;
1130 ud_dest->ud_qkey = qkey;
1131 return (IBT_SUCCESS);
1135 * Function:
1136 * ibt_free_ud_dest
1137 * Input:
1138 * ud_dest The opaque destination handle returned in a previous
1139 * call to ibt_alloc_ud_dest() or ibt_alloc_mcg_dest().
1140 * Output:
1141 * none.
1142 * Returns:
1143 * IBT_SUCCESS
1144 * Description:
1145 * Releases the resources associated with the specified destination
1146 * handle.
1148 ibt_status_t
1149 ibt_free_ud_dest(ibt_ud_dest_hdl_t ud_dest)
1151 ibt_status_t retval;
1153 retval = ibt_free_ah(ud_dest->ud_dest_hca, ud_dest->ud_ah);
1154 if (retval != IBT_SUCCESS) {
1155 IBTF_DPRINTF_L2(ibtl_chan, "ibt_free_ud_dest: "
1156 "Address Handle free failed: %d", retval);
1157 return (retval);
1159 kmem_free(ud_dest, sizeof (*ud_dest));
1160 return (IBT_SUCCESS);
1163 static ibt_status_t
1164 ibtl_find_sgid_ix(ib_gid_t *sgid, ibt_channel_hdl_t ud_chan, uint8_t port,
1165 uint_t *sgid_ix_p)
1167 ibtl_hca_devinfo_t *hca_devp = ud_chan->ch_qp.qp_hca->ha_hca_devp;
1168 ib_gid_t *sgidp;
1169 uint_t i;
1170 uint_t sgid_tbl_sz;
1172 if (port == 0 || port > hca_devp->hd_hca_attr->hca_nports ||
1173 sgid->gid_prefix == 0 || sgid->gid_guid == 0) {
1174 *sgid_ix_p = 0;
1175 return (IBT_INVALID_PARAM);
1177 mutex_enter(&ibtl_clnt_list_mutex);
1178 sgidp = &hca_devp->hd_portinfop[port - 1].p_sgid_tbl[0];
1179 sgid_tbl_sz = hca_devp->hd_portinfop[port - 1].p_sgid_tbl_sz;
1180 for (i = 0; i < sgid_tbl_sz; i++, sgidp++) {
1181 if ((sgid->gid_guid != sgidp->gid_guid) ||
1182 (sgid->gid_prefix != sgidp->gid_prefix))
1183 continue;
1184 mutex_exit(&ibtl_clnt_list_mutex);
1185 *sgid_ix_p = i;
1186 return (IBT_SUCCESS);
1188 mutex_exit(&ibtl_clnt_list_mutex);
1189 *sgid_ix_p = 0;
1190 return (IBT_INVALID_PARAM);
1194 * Function:
1195 * ibt_modify_reply_ud_dest
1196 * Input:
1197 * ud_dest A previously allocated UD reply destination handle
1198 * as returned by ibt_alloc_ud_dest().
1199 * qkey Qkey. 0 means "not specified", so use the Q_Key
1200 * in the QP context.
1201 * recv_buf Pointer to the first data buffer associated with the
1202 * receive work request.
1203 * Output:
1204 * Returns:
1205 * IBT_SUCCESS
1206 * Description:
1207 * Modify a previously allocated UD destination handle, so that it
1208 * can be used to reply to the sender of the datagram contained in the
1209 * specified work request completion. If the qkey is not supplied (0),
1210 * then use the qkey in the QP (we just set qkey to a privileged QKEY).
1212 ibt_status_t
1213 ibt_modify_reply_ud_dest(ibt_channel_hdl_t ud_chan, ibt_ud_dest_hdl_t ud_dest,
1214 ib_qkey_t qkey, ibt_wc_t *wc, ib_vaddr_t recv_buf)
1216 ibt_status_t retval;
1217 ibt_adds_vect_t adds_vect;
1218 ib_grh_t *grh;
1219 uint8_t port;
1220 uint32_t ver_tc_flow;
1222 IBTF_DPRINTF_L3(ibtl_chan, "ibt_modify_reply_ud_dest(%p, %p, %x, %p, "
1223 "%llx)", ud_chan, ud_dest, qkey, wc, recv_buf);
1225 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
1226 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_reply_ud_dest: "
1227 "type of channel (%d) is not UD",
1228 ud_chan->ch_qp.qp_type);
1229 return (IBT_CHAN_SRV_TYPE_INVALID);
1231 if (qkey == 0)
1232 qkey = ud_chan->ch_transport.ud.ud_qkey;
1233 port = ud_chan->ch_transport.ud.ud_port_num;
1235 if (wc->wc_flags & IBT_WC_GRH_PRESENT) {
1236 grh = (ib_grh_t *)(uintptr_t)recv_buf;
1237 adds_vect.av_send_grh = B_TRUE;
1238 adds_vect.av_dgid.gid_prefix = b2h64(grh->SGID.gid_prefix);
1239 adds_vect.av_dgid.gid_guid = b2h64(grh->SGID.gid_guid);
1240 adds_vect.av_sgid.gid_prefix = b2h64(grh->DGID.gid_prefix);
1241 adds_vect.av_sgid.gid_guid = b2h64(grh->DGID.gid_guid);
1242 (void) ibtl_find_sgid_ix(&adds_vect.av_sgid, ud_chan,
1243 port, &adds_vect.av_sgid_ix);
1244 ver_tc_flow = b2h32(grh->IPVer_TC_Flow);
1245 adds_vect.av_flow = ver_tc_flow & IB_GRH_FLOW_LABEL_MASK;
1246 adds_vect.av_tclass = (ver_tc_flow & IB_GRH_TCLASS_MASK) >> 20;
1247 adds_vect.av_hop = grh->HopLmt;
1248 } else {
1249 adds_vect.av_send_grh = B_FALSE;
1250 adds_vect.av_dgid.gid_prefix = 0;
1251 adds_vect.av_sgid.gid_prefix = 0;
1252 adds_vect.av_dgid.gid_guid = 0;
1253 adds_vect.av_sgid.gid_guid = 0;
1254 adds_vect.av_sgid_ix = 0;
1255 adds_vect.av_flow = 0;
1256 adds_vect.av_tclass = 0;
1257 adds_vect.av_hop = 0;
1260 adds_vect.av_srate = IBT_SRATE_1X; /* assume the minimum */
1261 adds_vect.av_srvl = wc->wc_sl;
1262 adds_vect.av_dlid = wc->wc_slid;
1263 adds_vect.av_src_path = wc->wc_path_bits;
1264 adds_vect.av_port_num = port;
1266 if ((retval = ibt_modify_ah(ud_dest->ud_dest_hca, ud_dest->ud_ah,
1267 &adds_vect)) != IBT_SUCCESS) {
1268 IBTF_DPRINTF_L2(ibtl_chan, "ibt_modify_reply_ud_dest: "
1269 "ibt_alloc_ah() failed: status = %d", retval);
1270 return (retval);
1272 ud_dest->ud_dst_qpn = wc->wc_qpn & IB_QPN_MASK;
1273 ud_dest->ud_qkey = qkey;
1275 return (IBT_SUCCESS);
1280 * Function:
1281 * ibt_is_privileged_ud_dest
1282 * Input:
1283 * ud_dest A previously allocated destination handle.
1284 * Output:
1285 * none
1286 * Returns:
1287 * B_FALSE/B_TRUE
1288 * Description:
1289 * Determine if a UD destination Handle is a privileged handle.
1291 boolean_t
1292 ibt_is_privileged_ud_dest(ibt_ud_dest_hdl_t ud_dest)
1294 return ((ud_dest->ud_qkey & IB_PRIVILEGED_QKEY_BIT) ? B_TRUE : B_FALSE);
1299 * Function:
1300 * ibt_update_channel_qkey
1301 * Input:
1302 * ud_chan The UD channel handle, that is to be used to
1303 * communicate with the specified destination.
1305 * ud_dest A UD destination handle returned from
1306 * ibt_alloc_ud_dest(9F).
1307 * Output:
1308 * none
1309 * Returns:
1310 * IBT_SUCCESS
1311 * Description:
1312 * ibt_update_channel_qkey() sets the Q_Key in the specified channel context
1313 * to the Q_Key in the specified destination handle. This function can be used
1314 * to enable sends to a privileged destination. All posted send work requests
1315 * that contain a privileged destination handle now use the Q_Key in the
1316 * channel context.
1318 * ibt_update_channel_qkey() can also be used to enable the caller to receive
1319 * from the specified remote destination on the specified channel.
1321 ibt_status_t
1322 ibt_update_channel_qkey(ibt_channel_hdl_t ud_chan, ibt_ud_dest_hdl_t ud_dest)
1324 ibt_status_t retval;
1325 ibt_qp_info_t qp_info;
1327 IBTF_DPRINTF_L3(ibtl_chan, "ibt_update_channel_qkey(%p, %p)",
1328 ud_chan, ud_dest);
1330 if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
1331 IBTF_DPRINTF_L2(ibtl_chan, "ibt_update_channel_qkey: "
1332 "type of channel (%d) is not UD",
1333 ud_chan->ch_qp.qp_type);
1334 return (IBT_CHAN_SRV_TYPE_INVALID);
1336 bzero(&qp_info, sizeof (ibt_qp_info_t));
1338 qp_info.qp_trans = IBT_UD_SRV;
1339 qp_info.qp_state = ud_chan->ch_current_state;
1340 qp_info.qp_current_state = ud_chan->ch_current_state;
1341 qp_info.qp_transport.ud.ud_qkey = ud_dest->ud_qkey;
1343 retval = ibt_modify_qp(ud_chan, IBT_CEP_SET_QKEY | IBT_CEP_SET_STATE,
1344 &qp_info, NULL);
1345 if (retval != IBT_SUCCESS) {
1346 IBTF_DPRINTF_L2(ibtl_chan, "ibt_update_channel_qkey: "
1347 "Failed to modify QP %p: status %d", ud_chan, retval);
1348 } else {
1349 ud_chan->ch_transport.ud.ud_qkey = ud_dest->ud_qkey;
1352 return (retval);
1357 * Function:
1358 * ibt_set_chan_private
1359 * Input:
1360 * chan A previously allocated channel handle.
1361 * clnt_private The client private data.
1362 * Output:
1363 * none.
1364 * Returns:
1365 * none.
1366 * Description:
1367 * Set the client private data.
1369 void
1370 ibt_set_chan_private(ibt_channel_hdl_t chan, void *clnt_private)
1372 chan->ch_clnt_private = clnt_private;
1377 * Function:
1378 * ibt_get_chan_private
1379 * Input:
1380 * chan A previously allocated channel handle.
1381 * Output:
1382 * A pointer to the client private data.
1383 * Returns:
1384 * none.
1385 * Description:
1386 * Get a pointer to client private data.
1388 void *
1389 ibt_get_chan_private(ibt_channel_hdl_t chan)
1391 return (chan->ch_clnt_private);
1395 * Function:
1396 * ibt_channel_to_hca_guid
1397 * Input:
1398 * chan Channel Handle.
1399 * Output:
1400 * none.
1401 * Returns:
1402 * hca_guid Returned HCA GUID on which the specified Channel is
1403 * allocated. Valid if it is non-NULL on return.
1404 * Description:
1405 * A helper function to retrieve HCA GUID for the specified Channel.
1407 ib_guid_t
1408 ibt_channel_to_hca_guid(ibt_channel_hdl_t chan)
1410 IBTF_DPRINTF_L3(ibtl_chan, "ibt_channel_to_hca_guid(%p)", chan);
1412 return (IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(chan)));
1416 * Protection Domain Verbs Functions.
1420 * Function:
1421 * ibt_alloc_pd
1422 * Input:
1423 * hca_hdl The IBT HCA handle, the device on which we need
1424 * to create the requested Protection Domain.
1425 * flags IBT_PD_NO_FLAGS, IBT_PD_USER_MAP or IBT_PD_DEFER_ALLOC
1426 * Output:
1427 * pd IBT Protection Domain Handle.
1428 * Returns:
1429 * IBT_SUCCESS
1430 * IBT_HCA_HDL_INVALID
1431 * Description:
1432 * Allocate a Protection Domain.
1434 ibt_status_t
1435 ibt_alloc_pd(ibt_hca_hdl_t hca_hdl, ibt_pd_flags_t flags, ibt_pd_hdl_t *pd)
1437 ibt_status_t retval;
1439 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_pd(%p, %x)", hca_hdl, flags);
1441 /* re-direct the call to CI's call */
1442 ibtl_qp_flow_control_enter();
1443 retval = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_pd(
1444 IBTL_HCA2CIHCA(hca_hdl), flags, pd);
1445 ibtl_qp_flow_control_exit();
1446 if (retval != IBT_SUCCESS) {
1447 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_pd: CI PD Alloc Err");
1448 return (retval);
1451 /* Update the PDs Resource Count per HCA Device. */
1452 atomic_inc_32(&hca_hdl->ha_pd_cnt);
1454 return (retval);
1458 * Function:
1459 * ibt_free_pd
1460 * Input:
1461 * hca_hdl The IBT HCA handle, the device on which we need
1462 * to free the requested Protection Domain.
1463 * pd IBT Protection Domain Handle.
1464 * Output:
1465 * none.
1466 * Returns:
1467 * IBT_SUCCESS
1468 * IBT_HCA_HDL_INVALID
1469 * IBT_MEM_PD_HDL_INVALID
1470 * IBT_MEM_PD_IN_USE
1471 * Description:
1472 * Release/de-allocate a Protection Domain.
1474 ibt_status_t
1475 ibt_free_pd(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd)
1477 ibt_status_t retval;
1479 IBTF_DPRINTF_L3(ibtl_chan, "ibt_free_pd(%p, %p)", hca_hdl, pd);
1481 /* re-direct the call to CI's call */
1482 retval = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_pd(
1483 IBTL_HCA2CIHCA(hca_hdl), pd);
1484 if (retval != IBT_SUCCESS) {
1485 IBTF_DPRINTF_L2(ibtl_chan, "ibt_free_pd: CI Free PD Failed");
1486 return (retval);
1489 /* Update the PDs Resource Count per HCA Device. */
1490 atomic_dec_32(&hca_hdl->ha_pd_cnt);
1492 return (retval);
1497 * Address Handle Verbs Functions.
1501 * Function:
1502 * ibt_alloc_ah
1503 * Input:
1504 * hca_hdl The IBT HCA Handle.
1505 * pd The IBT Protection Domain to associate with this handle.
1506 * adds_vectp Points to an ibt_adds_vect_t struct.
1507 * Output:
1508 * ah IBT Address Handle.
1509 * Returns:
1510 * IBT_SUCCESS
1511 * IBT_HCA_HDL_INVALID
1512 * IBT_INSUFF_RESOURCE
1513 * IBT_MEM_PD_HDL_INVALID
1514 * Description:
1515 * Allocate and returns an Address Handle.
1517 ibt_status_t
1518 ibt_alloc_ah(ibt_hca_hdl_t hca_hdl, ibt_ah_flags_t flags, ibt_pd_hdl_t pd,
1519 ibt_adds_vect_t *adds_vectp, ibt_ah_hdl_t *ah)
1521 ibt_status_t retval;
1523 IBTF_DPRINTF_L3(ibtl_chan, "ibt_alloc_ah(%p, %x, %p, %p)",
1524 hca_hdl, flags, pd, adds_vectp);
1526 /* XXX - if av_send_grh, need to compute av_sgid_ix from av_sgid */
1528 /* re-direct the call to CI's call */
1529 retval = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_ah(
1530 IBTL_HCA2CIHCA(hca_hdl), flags, pd, adds_vectp, ah);
1532 if (retval != IBT_SUCCESS) {
1533 IBTF_DPRINTF_L2(ibtl_chan, "ibt_alloc_ah: "
1534 "ibc_alloc_ah failed: status = %d", retval);
1535 return (retval);
1538 /* Update the AHs Resource Count per HCA Device. */
1539 atomic_inc_32(&hca_hdl->ha_ah_cnt);
1541 return (retval);
1546 * Function:
1547 * ibt_free_ah
1548 * Input:
1549 * hca_hdl The IBT HCA Handle.
1550 * ah IBT Address Handle.
1551 * Output:
1552 * none.
1553 * Returns:
1554 * IBT_SUCCESS
1555 * IBT_HCA_HDL_INVALID
1556 * IBT_AH_HDL_INVALID
1557 * Description:
1558 * Release/de-allocate the specified Address Handle.
1560 ibt_status_t
1561 ibt_free_ah(ibt_hca_hdl_t hca_hdl, ibt_ah_hdl_t ah)
1563 ibt_status_t retval;
1565 IBTF_DPRINTF_L3(ibtl_chan, "ibt_free_ah(%p, %p)", hca_hdl, ah);
1567 /* re-direct the call to CI's call */
1568 retval = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_ah(
1569 IBTL_HCA2CIHCA(hca_hdl), ah);
1571 if (retval != IBT_SUCCESS) {
1572 IBTF_DPRINTF_L2(ibtl_chan, "ibt_free_ah: CI Free AH Failed");
1573 return (retval);
1576 /* Update the AHs Resource Count per HCA Device. */
1577 atomic_dec_32(&hca_hdl->ha_ah_cnt);
1579 return (retval);
1584 * Function:
1585 * ibt_query_ah
1586 * Input:
1587 * hca_hdl The IBT HCA Handle.
1588 * ah IBT Address Handle.
1589 * Output:
1590 * pd The Protection Domain Handle with which this
1591 * Address Handle is associated.
1592 * adds_vectp Points to an ibt_adds_vect_t struct.
1593 * Returns:
1594 * IBT_SUCCESS/IBT_HCA_HDL_INVALID/IBT_AH_HDL_INVALID
1595 * Description:
1596 * Obtain the address vector information for the specified address handle.
1598 ibt_status_t
1599 ibt_query_ah(ibt_hca_hdl_t hca_hdl, ibt_ah_hdl_t ah, ibt_pd_hdl_t *pd,
1600 ibt_adds_vect_t *adds_vectp)
1602 ibt_status_t retval;
1604 IBTF_DPRINTF_L3(ibtl_chan, "ibt_query_ah(%p, %p)", hca_hdl, ah);
1606 /* re-direct the call to CI's call */
1607 retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_ah(
1608 IBTL_HCA2CIHCA(hca_hdl), ah, pd, adds_vectp));
1611 * We need to fill in av_sgid, as the CI does only saves/restores
1612 * av_sgid_ix.
1614 if (retval == IBT_SUCCESS) {
1615 ibtl_hca_devinfo_t *hca_devp = hca_hdl->ha_hca_devp;
1616 uint8_t port = adds_vectp->av_port_num;
1618 mutex_enter(&ibtl_clnt_list_mutex);
1619 if (port > 0 && port <= hca_devp->hd_hca_attr->hca_nports &&
1620 adds_vectp->av_sgid_ix < IBTL_HDIP2SGIDTBLSZ(hca_devp)) {
1621 ib_gid_t *sgidp;
1623 sgidp = hca_devp->hd_portinfop[port-1].p_sgid_tbl;
1624 adds_vectp->av_sgid = sgidp[adds_vectp->av_sgid_ix];
1625 } else {
1626 adds_vectp->av_sgid.gid_prefix = 0;
1627 adds_vectp->av_sgid.gid_guid = 0;
1629 mutex_exit(&ibtl_clnt_list_mutex);
1631 return (retval);
1636 * Function:
1637 * ibt_modify_ah
1638 * Input:
1639 * hca_hdl The IBT HCA Handle.
1640 * ah IBT Address Handle.
1641 * Output:
1642 * adds_vectp Points to an ibt_adds_vect_t struct. The new address
1643 * vector information is specified is returned in this
1644 * structure.
1645 * Returns:
1646 * IBT_SUCCESS/IBT_HCA_HDL_INVALID/IBT_AH_HDL_INVALID
1647 * Description:
1648 * Modify the address vector information for the specified Address Handle.
1650 ibt_status_t
1651 ibt_modify_ah(ibt_hca_hdl_t hca_hdl, ibt_ah_hdl_t ah,
1652 ibt_adds_vect_t *adds_vectp)
1654 IBTF_DPRINTF_L3(ibtl_chan, "ibt_modify_ah(%p, %p)", hca_hdl, ah);
1656 /* XXX - if av_send_grh, need to compute av_sgid_ix from av_sgid */
1658 /* re-direct the call to CI's call */
1659 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_modify_ah(
1660 IBTL_HCA2CIHCA(hca_hdl), ah, adds_vectp));