preprocessor cleanup: __sparc
[unleashed/tickless.git] / usr / src / uts / common / io / ib / adapters / hermon / hermon_ci.c
blobd48373d2146540a4edeab4b79975faf3eb2928f6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
27 * hermon_ci.c
28 * Hermon Channel Interface (CI) Routines
30 * Implements all the routines necessary to interface with the IBTF.
31 * Pointers to all of these functions are passed to the IBTF at attach()
32 * time in the ibc_operations_t structure. These functions include all
33 * of the necessary routines to implement the required InfiniBand "verbs"
34 * and additional IBTF-specific interfaces.
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
42 #include <sys/ib/adapters/hermon/hermon.h>
44 extern uint32_t hermon_kernel_data_ro;
45 extern uint32_t hermon_user_data_ro;
47 /* HCA and port related operations */
48 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
49 ibt_hca_portinfo_t *);
50 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
51 ibt_port_modify_flags_t, uint8_t);
52 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
54 /* Protection Domains */
55 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
56 ibc_pd_hdl_t *);
57 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
59 /* Reliable Datagram Domains */
60 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
61 ibc_rdd_hdl_t *);
62 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
64 /* Address Handles */
65 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
66 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
67 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
68 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69 ibc_pd_hdl_t *, ibt_adds_vect_t *);
70 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
71 ibt_adds_vect_t *);
73 /* Queue Pairs */
74 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
75 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
76 ibc_qp_hdl_t *);
77 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
78 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
79 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
80 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
81 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
82 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
83 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
85 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
86 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87 ibt_qp_query_attr_t *);
88 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
89 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
91 /* Completion Queues */
92 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
93 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
94 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
95 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96 uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
97 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98 uint_t, uint_t *);
99 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
100 uint_t, uint_t, ibt_cq_handler_id_t);
101 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
102 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
103 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
104 static ibt_status_t hermon_ci_query_cq_handler_id(ibc_hca_hdl_t,
105 ibt_cq_handler_id_t, ibt_cq_handler_attr_t *);
107 /* EE Contexts */
108 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
109 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
110 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
111 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
112 ibt_eec_query_attr_t *);
113 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
114 ibt_cep_modify_flags_t, ibt_eec_info_t *);
116 /* Memory Registration */
117 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
118 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
120 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
121 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
122 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
123 ibc_mr_hdl_t *, ibt_mr_desc_t *);
124 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
125 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 ibt_mr_query_attr_t *);
127 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
128 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
129 ibt_mr_desc_t *);
130 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
131 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
132 ibt_mr_desc_t *);
133 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
134 static ibt_status_t hermon_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
135 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
137 /* Memory Windows */
138 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
139 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
140 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
141 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
142 ibt_mw_query_attr_t *);
144 /* Multicast Groups */
145 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
146 ib_gid_t, ib_lid_t);
147 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
148 ib_gid_t, ib_lid_t);
150 /* Work Request and Completion Processing */
151 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
152 ibt_send_wr_t *, uint_t, uint_t *);
153 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
154 ibt_recv_wr_t *, uint_t, uint_t *);
155 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
156 ibt_wc_t *, uint_t, uint_t *);
157 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
158 ibt_cq_notify_flags_t);
160 /* CI Object Private Data */
161 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
162 ibt_object_type_t, void *, void *, size_t);
164 /* CI Object Private Data */
165 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
166 ibt_object_type_t, void *, void *, size_t);
168 /* Shared Receive Queues */
169 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
170 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
171 ibt_srq_sizes_t *);
172 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
173 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
174 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
175 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
176 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
177 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
178 ibt_recv_wr_t *, uint_t, uint_t *);
180 /* Address translation */
181 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
182 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
183 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
184 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
185 ibt_all_wr_t *, ibc_mi_hdl_t *);
186 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
188 /* Allocate L_Key */
189 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
190 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
192 /* Physical Register Memory Region */
193 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
194 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
195 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
196 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
197 ibt_pmr_desc_t *);
199 /* Mellanox FMR */
200 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
201 ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
202 ibc_fmr_pool_hdl_t *fmr_pool);
203 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
204 ibc_fmr_pool_hdl_t fmr_pool);
205 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
206 ibc_fmr_pool_hdl_t fmr_pool);
207 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
208 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
209 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
210 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
211 ibc_mr_hdl_t mr);
213 /* Memory Allocation/Deallocation */
214 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
215 ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
216 ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
217 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
218 ibc_mem_alloc_hdl_t mem_alloc_hdl);
219 static ibt_status_t hermon_ci_not_supported();
222 * This ibc_operations_t structure includes pointers to all the entry points
223 * provided by the Hermon driver. This structure is passed to the IBTF at
224 * driver attach time, using the ibc_attach() call.
226 ibc_operations_t hermon_ibc_ops = {
227 /* HCA and port related operations */
228 hermon_ci_query_hca_ports,
229 hermon_ci_modify_ports,
230 hermon_ci_modify_system_image,
232 /* Protection Domains */
233 hermon_ci_alloc_pd,
234 hermon_ci_free_pd,
236 /* Reliable Datagram Domains */
237 hermon_ci_alloc_rdd,
238 hermon_ci_free_rdd,
240 /* Address Handles */
241 hermon_ci_alloc_ah,
242 hermon_ci_free_ah,
243 hermon_ci_query_ah,
244 hermon_ci_modify_ah,
246 /* Queue Pairs */
247 hermon_ci_alloc_qp,
248 hermon_ci_alloc_special_qp,
249 hermon_ci_alloc_qp_range,
250 hermon_ci_free_qp,
251 hermon_ci_release_qpn,
252 hermon_ci_query_qp,
253 hermon_ci_modify_qp,
255 /* Completion Queues */
256 hermon_ci_alloc_cq,
257 hermon_ci_free_cq,
258 hermon_ci_query_cq,
259 hermon_ci_resize_cq,
260 hermon_ci_modify_cq,
261 hermon_ci_alloc_cq_sched,
262 hermon_ci_free_cq_sched,
263 hermon_ci_query_cq_handler_id,
265 /* EE Contexts */
266 hermon_ci_alloc_eec,
267 hermon_ci_free_eec,
268 hermon_ci_query_eec,
269 hermon_ci_modify_eec,
271 /* Memory Registration */
272 hermon_ci_register_mr,
273 hermon_ci_register_buf,
274 hermon_ci_register_shared_mr,
275 hermon_ci_deregister_mr,
276 hermon_ci_query_mr,
277 hermon_ci_reregister_mr,
278 hermon_ci_reregister_buf,
279 hermon_ci_sync_mr,
281 /* Memory Windows */
282 hermon_ci_alloc_mw,
283 hermon_ci_free_mw,
284 hermon_ci_query_mw,
286 /* Multicast Groups */
287 hermon_ci_attach_mcg,
288 hermon_ci_detach_mcg,
290 /* Work Request and Completion Processing */
291 hermon_ci_post_send,
292 hermon_ci_post_recv,
293 hermon_ci_poll_cq,
294 hermon_ci_notify_cq,
296 /* CI Object Mapping Data */
297 hermon_ci_ci_data_in,
298 hermon_ci_ci_data_out,
300 /* Shared Receive Queue */
301 hermon_ci_alloc_srq,
302 hermon_ci_free_srq,
303 hermon_ci_query_srq,
304 hermon_ci_modify_srq,
305 hermon_ci_post_srq,
307 /* Address translation */
308 hermon_ci_map_mem_area,
309 hermon_ci_unmap_mem_area,
310 hermon_ci_map_mem_iov,
311 hermon_ci_unmap_mem_iov,
313 /* Allocate L_key */
314 hermon_ci_alloc_lkey,
316 /* Physical Register Memory Region */
317 hermon_ci_register_physical_mr,
318 hermon_ci_reregister_physical_mr,
320 /* Mellanox FMR */
321 hermon_ci_create_fmr_pool,
322 hermon_ci_destroy_fmr_pool,
323 hermon_ci_flush_fmr_pool,
324 hermon_ci_register_physical_fmr,
325 hermon_ci_deregister_fmr,
327 /* Memory allocation */
328 hermon_ci_alloc_io_mem,
329 hermon_ci_free_io_mem,
331 /* XRC not yet supported */
332 hermon_ci_not_supported, /* ibc_alloc_xrc_domain */
333 hermon_ci_not_supported, /* ibc_free_xrc_domain */
334 hermon_ci_not_supported, /* ibc_alloc_xrc_srq */
335 hermon_ci_not_supported, /* ibc_free_xrc_srq */
336 hermon_ci_not_supported, /* ibc_query_xrc_srq */
337 hermon_ci_not_supported, /* ibc_modify_xrc_srq */
338 hermon_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
339 hermon_ci_not_supported, /* ibc_free_xrc_tgt_qp */
340 hermon_ci_not_supported, /* ibc_query_xrc_tgt_qp */
341 hermon_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
343 /* Memory Region (physical) */
344 hermon_ci_register_dma_mr,
346 /* Next enhancements */
347 hermon_ci_not_supported, /* ibc_enhancement1 */
348 hermon_ci_not_supported, /* ibc_enhancement2 */
349 hermon_ci_not_supported, /* ibc_enhancement3 */
350 hermon_ci_not_supported, /* ibc_enhancement4 */
354 * Not yet implemented OPS
356 /* ARGSUSED */
357 static ibt_status_t
358 hermon_ci_not_supported()
360 return (IBT_NOT_SUPPORTED);
365 * hermon_ci_query_hca_ports()
366 * Returns HCA port attributes for either one or all of the HCA's ports.
367 * Context: Can be called only from user or kernel context.
369 static ibt_status_t
370 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
371 ibt_hca_portinfo_t *info_p)
373 hermon_state_t *state;
374 uint_t start, end, port;
375 int status, indx;
377 /* Grab the Hermon softstate pointer */
378 state = (hermon_state_t *)hca;
381 * If the specified port is zero, then we are supposed to query all
382 * ports. Otherwise, we query only the port number specified.
383 * Setup the start and end port numbers as appropriate for the loop
384 * below. Note: The first Hermon port is port number one (1).
386 if (query_port == 0) {
387 start = 1;
388 end = start + (state->hs_cfg_profile->cp_num_ports - 1);
389 } else {
390 end = start = query_port;
393 /* Query the port(s) */
394 for (port = start, indx = 0; port <= end; port++, indx++) {
395 status = hermon_port_query(state, port, &info_p[indx]);
396 if (status != DDI_SUCCESS) {
397 return (status);
400 return (IBT_SUCCESS);
405 * hermon_ci_modify_ports()
406 * Modify HCA port attributes
407 * Context: Can be called only from user or kernel context.
409 static ibt_status_t
410 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
411 ibt_port_modify_flags_t flags, uint8_t init_type)
413 hermon_state_t *state;
414 int status;
416 /* Grab the Hermon softstate pointer */
417 state = (hermon_state_t *)hca;
419 /* Modify the port(s) */
420 status = hermon_port_modify(state, port, flags, init_type);
421 return (status);
425 * hermon_ci_modify_system_image()
426 * Modify the System Image GUID
427 * Context: Can be called only from user or kernel context.
429 /* ARGSUSED */
430 static ibt_status_t
431 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
434 * This is an unsupported interface for the Hermon driver. This
435 * interface is necessary to support modification of the System
436 * Image GUID. Hermon is only capable of modifying this parameter
437 * once (during driver initialization).
439 return (IBT_NOT_SUPPORTED);
443 * hermon_ci_alloc_pd()
444 * Allocate a Protection Domain
445 * Context: Can be called only from user or kernel context.
447 /* ARGSUSED */
448 static ibt_status_t
449 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
451 hermon_state_t *state;
452 hermon_pdhdl_t pdhdl;
453 int status;
455 ASSERT(pd_p != NULL);
457 /* Grab the Hermon softstate pointer */
458 state = (hermon_state_t *)hca;
460 /* Allocate the PD */
461 status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
462 if (status != DDI_SUCCESS) {
463 return (status);
466 /* Return the Hermon PD handle */
467 *pd_p = (ibc_pd_hdl_t)pdhdl;
469 return (IBT_SUCCESS);
474 * hermon_ci_free_pd()
475 * Free a Protection Domain
476 * Context: Can be called only from user or kernel context
478 static ibt_status_t
479 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
481 hermon_state_t *state;
482 hermon_pdhdl_t pdhdl;
483 int status;
485 /* Grab the Hermon softstate pointer and PD handle */
486 state = (hermon_state_t *)hca;
487 pdhdl = (hermon_pdhdl_t)pd;
489 /* Free the PD */
490 status = hermon_pd_free(state, &pdhdl);
491 return (status);
496 * hermon_ci_alloc_rdd()
497 * Allocate a Reliable Datagram Domain
498 * Context: Can be called only from user or kernel context.
500 /* ARGSUSED */
501 static ibt_status_t
502 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
503 ibc_rdd_hdl_t *rdd_p)
506 * This is an unsupported interface for the Hermon driver. This
507 * interface is necessary to support Reliable Datagram (RD)
508 * operations. Hermon does not support RD.
510 return (IBT_NOT_SUPPORTED);
515 * hermon_free_rdd()
516 * Free a Reliable Datagram Domain
517 * Context: Can be called only from user or kernel context.
519 /* ARGSUSED */
520 static ibt_status_t
521 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
524 * This is an unsupported interface for the Hermon driver. This
525 * interface is necessary to support Reliable Datagram (RD)
526 * operations. Hermon does not support RD.
528 return (IBT_NOT_SUPPORTED);
533 * hermon_ci_alloc_ah()
534 * Allocate an Address Handle
535 * Context: Can be called only from user or kernel context.
537 /* ARGSUSED */
538 static ibt_status_t
539 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
540 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
542 hermon_state_t *state;
543 hermon_ahhdl_t ahhdl;
544 hermon_pdhdl_t pdhdl;
545 int status;
547 /* Grab the Hermon softstate pointer and PD handle */
548 state = (hermon_state_t *)hca;
549 pdhdl = (hermon_pdhdl_t)pd;
551 /* Allocate the AH */
552 status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
553 if (status != DDI_SUCCESS) {
554 return (status);
557 /* Return the Hermon AH handle */
558 *ah_p = (ibc_ah_hdl_t)ahhdl;
560 return (IBT_SUCCESS);
565 * hermon_ci_free_ah()
566 * Free an Address Handle
567 * Context: Can be called only from user or kernel context.
569 static ibt_status_t
570 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
572 hermon_state_t *state;
573 hermon_ahhdl_t ahhdl;
574 int status;
576 /* Grab the Hermon softstate pointer and AH handle */
577 state = (hermon_state_t *)hca;
578 ahhdl = (hermon_ahhdl_t)ah;
580 /* Free the AH */
581 status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
583 return (status);
588 * hermon_ci_query_ah()
589 * Return the Address Vector information for a specified Address Handle
590 * Context: Can be called from interrupt or base context.
592 static ibt_status_t
593 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
594 ibt_adds_vect_t *attr_p)
596 hermon_state_t *state;
597 hermon_ahhdl_t ahhdl;
598 hermon_pdhdl_t pdhdl;
599 int status;
601 /* Grab the Hermon softstate pointer and AH handle */
602 state = (hermon_state_t *)hca;
603 ahhdl = (hermon_ahhdl_t)ah;
605 /* Query the AH */
606 status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
607 if (status != DDI_SUCCESS) {
608 return (status);
611 /* Return the Hermon PD handle */
612 *pd_p = (ibc_pd_hdl_t)pdhdl;
614 return (IBT_SUCCESS);
619 * hermon_ci_modify_ah()
620 * Modify the Address Vector information of a specified Address Handle
621 * Context: Can be called from interrupt or base context.
623 static ibt_status_t
624 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
626 hermon_state_t *state;
627 hermon_ahhdl_t ahhdl;
628 int status;
630 /* Grab the Hermon softstate pointer and AH handle */
631 state = (hermon_state_t *)hca;
632 ahhdl = (hermon_ahhdl_t)ah;
634 /* Modify the AH */
635 status = hermon_ah_modify(state, ahhdl, attr_p);
637 return (status);
642 * hermon_ci_alloc_qp()
643 * Allocate a Queue Pair
644 * Context: Can be called only from user or kernel context.
646 static ibt_status_t
647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
648 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
649 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
651 hermon_state_t *state;
652 hermon_qp_info_t qpinfo;
653 int status;
655 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
656 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
658 /* Grab the Hermon softstate pointer */
659 state = (hermon_state_t *)hca;
661 /* Allocate the QP */
662 qpinfo.qpi_attrp = attr_p;
663 qpinfo.qpi_type = type;
664 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
665 qpinfo.qpi_queueszp = queue_sizes_p;
666 qpinfo.qpi_qpn = qpn;
667 status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
668 if (status != DDI_SUCCESS) {
669 return (status);
672 /* Return the Hermon QP handle */
673 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
675 return (IBT_SUCCESS);
680 * hermon_ci_alloc_special_qp()
681 * Allocate a Special Queue Pair
682 * Context: Can be called only from user or kernel context.
684 static ibt_status_t
685 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
686 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
687 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
688 ibc_qp_hdl_t *qp_p)
690 hermon_state_t *state;
691 hermon_qp_info_t qpinfo;
692 int status;
694 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
695 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
697 /* Grab the Hermon softstate pointer */
698 state = (hermon_state_t *)hca;
700 /* Allocate the Special QP */
701 qpinfo.qpi_attrp = attr_p;
702 qpinfo.qpi_type = type;
703 qpinfo.qpi_port = port;
704 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
705 qpinfo.qpi_queueszp = queue_sizes_p;
706 status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
707 if (status != DDI_SUCCESS) {
708 return (status);
710 /* Return the Hermon QP handle */
711 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
713 return (IBT_SUCCESS);
717 * hermon_ci_alloc_qp_range()
718 * Free a Queue Pair
719 * Context: Can be called only from user or kernel context.
721 /* ARGSUSED */
722 static ibt_status_t
723 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
724 ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
725 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
726 ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
727 ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
729 hermon_state_t *state;
730 hermon_qp_info_t qpinfo;
731 int status;
733 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
734 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
736 /* Grab the Hermon softstate pointer */
737 state = (hermon_state_t *)hca;
739 /* Allocate the QP */
740 qpinfo.qpi_attrp = attr_p;
741 qpinfo.qpi_type = type;
742 qpinfo.qpi_queueszp = queue_sizes_p;
743 qpinfo.qpi_qpn = qpn;
744 status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
745 send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
746 return (status);
750 * hermon_ci_free_qp()
751 * Free a Queue Pair
752 * Context: Can be called only from user or kernel context.
754 static ibt_status_t
755 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
756 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
758 hermon_state_t *state;
759 hermon_qphdl_t qphdl;
760 int status;
762 /* Grab the Hermon softstate pointer and QP handle */
763 state = (hermon_state_t *)hca;
764 qphdl = (hermon_qphdl_t)qp;
766 /* Free the QP */
767 status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
768 HERMON_NOSLEEP);
770 return (status);
775 * hermon_ci_release_qpn()
776 * Release a Queue Pair Number (QPN)
777 * Context: Can be called only from user or kernel context.
779 static ibt_status_t
780 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
782 hermon_state_t *state;
783 hermon_qpn_entry_t *entry;
785 /* Grab the Hermon softstate pointer and QP handle */
786 state = (hermon_state_t *)hca;
787 entry = (hermon_qpn_entry_t *)qpnh;
789 /* Release the QP number */
790 hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
792 return (IBT_SUCCESS);
797 * hermon_ci_query_qp()
798 * Query a Queue Pair
799 * Context: Can be called from interrupt or base context.
801 static ibt_status_t
802 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
803 ibt_qp_query_attr_t *attr_p)
805 hermon_state_t *state;
806 hermon_qphdl_t qphdl;
807 int status;
809 /* Grab the Hermon softstate pointer and QP handle */
810 state = (hermon_state_t *)hca;
811 qphdl = (hermon_qphdl_t)qp;
813 /* Query the QP */
814 status = hermon_qp_query(state, qphdl, attr_p);
815 return (status);
820 * hermon_ci_modify_qp()
821 * Modify a Queue Pair
822 * Context: Can be called from interrupt or base context.
824 static ibt_status_t
825 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
826 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
827 ibt_queue_sizes_t *actual_sz)
829 hermon_state_t *state;
830 hermon_qphdl_t qphdl;
831 int status;
833 /* Grab the Hermon softstate pointer and QP handle */
834 state = (hermon_state_t *)hca;
835 qphdl = (hermon_qphdl_t)qp;
837 /* Modify the QP */
838 status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
839 return (status);
844 * hermon_ci_alloc_cq()
845 * Allocate a Completion Queue
846 * Context: Can be called only from user or kernel context.
848 /* ARGSUSED */
849 static ibt_status_t
850 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
851 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
853 hermon_state_t *state;
854 hermon_cqhdl_t cqhdl;
855 int status;
857 state = (hermon_state_t *)hca;
859 /* Allocate the CQ */
860 status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
861 &cqhdl, HERMON_NOSLEEP);
862 if (status != DDI_SUCCESS) {
863 return (status);
866 /* Return the Hermon CQ handle */
867 *cq_p = (ibc_cq_hdl_t)cqhdl;
869 return (IBT_SUCCESS);
874 * hermon_ci_free_cq()
875 * Free a Completion Queue
876 * Context: Can be called only from user or kernel context.
878 static ibt_status_t
879 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
881 hermon_state_t *state;
882 hermon_cqhdl_t cqhdl;
883 int status;
885 /* Grab the Hermon softstate pointer and CQ handle */
886 state = (hermon_state_t *)hca;
887 cqhdl = (hermon_cqhdl_t)cq;
890 /* Free the CQ */
891 status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
892 return (status);
897 * hermon_ci_query_cq()
898 * Return the size of a Completion Queue
899 * Context: Can be called only from user or kernel context.
901 static ibt_status_t
902 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
903 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
905 hermon_state_t *state;
906 hermon_cqhdl_t cqhdl;
908 /* Grab the CQ handle */
909 state = (hermon_state_t *)hca;
910 cqhdl = (hermon_cqhdl_t)cq;
912 /* Query the current CQ size */
913 *entries_p = cqhdl->cq_bufsz;
914 *count_p = cqhdl->cq_intmod_count;
915 *usec_p = cqhdl->cq_intmod_usec;
916 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cqhdl))
917 *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
919 return (IBT_SUCCESS);
924 * hermon_ci_resize_cq()
925 * Change the size of a Completion Queue
926 * Context: Can be called only from user or kernel context.
928 static ibt_status_t
929 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
930 uint_t *actual_size)
932 hermon_state_t *state;
933 hermon_cqhdl_t cqhdl;
934 int status;
936 /* Grab the Hermon softstate pointer and CQ handle */
937 state = (hermon_state_t *)hca;
938 cqhdl = (hermon_cqhdl_t)cq;
940 /* Resize the CQ */
941 status = hermon_cq_resize(state, cqhdl, size, actual_size,
942 HERMON_NOSLEEP);
943 if (status != DDI_SUCCESS) {
944 return (status);
946 return (IBT_SUCCESS);
950 * hermon_ci_modify_cq()
951 * Change the interrupt moderation values of a Completion Queue
952 * Context: Can be called only from user or kernel context.
954 static ibt_status_t
955 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
956 uint_t usec, ibt_cq_handler_id_t hid)
958 hermon_state_t *state;
959 hermon_cqhdl_t cqhdl;
960 int status;
962 /* Grab the Hermon softstate pointer and CQ handle */
963 state = (hermon_state_t *)hca;
964 cqhdl = (hermon_cqhdl_t)cq;
966 /* Resize the CQ */
967 status = hermon_cq_modify(state, cqhdl, count, usec, hid,
968 HERMON_NOSLEEP);
969 return (status);
974 * hermon_ci_alloc_cq_sched()
975 * Reserve a CQ scheduling class resource
976 * Context: Can be called only from user or kernel context.
978 /* ARGSUSED */
979 static ibt_status_t
980 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
981 ibc_sched_hdl_t *sched_hdl_p)
983 int status;
985 status = hermon_cq_sched_alloc((hermon_state_t *)hca, attr,
986 (hermon_cq_sched_t **)sched_hdl_p);
987 return (status);
992 * hermon_ci_free_cq_sched()
993 * Free a CQ scheduling class resource
994 * Context: Can be called only from user or kernel context.
996 /* ARGSUSED */
997 static ibt_status_t
998 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
1000 int status;
1002 status = hermon_cq_sched_free((hermon_state_t *)hca,
1003 (hermon_cq_sched_t *)sched_hdl);
1004 return (status);
1007 static ibt_status_t
1008 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
1009 ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1011 hermon_state_t *state;
1013 state = (hermon_state_t *)hca;
1014 if (!HERMON_HID_VALID(state, hid))
1015 return (IBT_CQ_HID_INVALID);
1016 if (attrs == NULL)
1017 return (IBT_INVALID_PARAM);
1018 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attrs))
1019 attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1020 attrs->cha_dip = state->hs_dip;
1021 return (IBT_SUCCESS);
1025 * hermon_ci_alloc_eec()
1026 * Allocate an End-to-End context
1027 * Context: Can be called only from user or kernel context.
1029 /* ARGSUSED */
1030 static ibt_status_t
1031 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1032 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1035 * This is an unsupported interface for the Hermon driver. This
1036 * interface is necessary to support Reliable Datagram (RD)
1037 * operations. Hermon does not support RD.
1039 return (IBT_NOT_SUPPORTED);
1044 * hermon_ci_free_eec()
1045 * Free an End-to-End context
1046 * Context: Can be called only from user or kernel context.
1048 /* ARGSUSED */
1049 static ibt_status_t
1050 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1053 * This is an unsupported interface for the Hermon driver. This
1054 * interface is necessary to support Reliable Datagram (RD)
1055 * operations. Hermon does not support RD.
1057 return (IBT_NOT_SUPPORTED);
1062 * hermon_ci_query_eec()
1063 * Query an End-to-End context
1064 * Context: Can be called from interrupt or base context.
1066 /* ARGSUSED */
1067 static ibt_status_t
1068 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1069 ibt_eec_query_attr_t *attr_p)
1072 * This is an unsupported interface for the Hermon driver. This
1073 * interface is necessary to support Reliable Datagram (RD)
1074 * operations. Hermon does not support RD.
1076 return (IBT_NOT_SUPPORTED);
1081 * hermon_ci_modify_eec()
1082 * Modify an End-to-End context
1083 * Context: Can be called from interrupt or base context.
1085 /* ARGSUSED */
1086 static ibt_status_t
1087 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1088 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1091 * This is an unsupported interface for the Hermon driver. This
1092 * interface is necessary to support Reliable Datagram (RD)
1093 * operations. Hermon does not support RD.
1095 return (IBT_NOT_SUPPORTED);
1100 * hermon_ci_register_mr()
1101 * Prepare a virtually addressed Memory Region for use by an HCA
1102 * Context: Can be called from interrupt or base context.
1104 /* ARGSUSED */
1105 static ibt_status_t
1106 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1107 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1108 ibt_mr_desc_t *mr_desc)
1110 hermon_mr_options_t op;
1111 hermon_state_t *state;
1112 hermon_pdhdl_t pdhdl;
1113 hermon_mrhdl_t mrhdl;
1114 int status;
1116 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1118 ASSERT(mr_attr != NULL);
1119 ASSERT(mr_p != NULL);
1120 ASSERT(mr_desc != NULL);
1123 * Validate the access flags. Both Remote Write and Remote Atomic
1124 * require the Local Write flag to be set
1126 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1127 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1128 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1129 return (IBT_MR_ACCESS_REQ_INVALID);
1132 /* Grab the Hermon softstate pointer and PD handle */
1133 state = (hermon_state_t *)hca;
1134 pdhdl = (hermon_pdhdl_t)pd;
1136 /* Register the memory region */
1137 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1138 op.mro_bind_dmahdl = NULL;
1139 op.mro_bind_override_addr = 0;
1140 status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1141 &op, HERMON_MPT_DMPT);
1142 if (status != DDI_SUCCESS) {
1143 return (status);
1145 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1147 /* Fill in the mr_desc structure */
1148 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1149 mr_desc->md_lkey = mrhdl->mr_lkey;
1150 /* Only set RKey if remote access was requested */
1151 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1152 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1153 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1154 mr_desc->md_rkey = mrhdl->mr_rkey;
1158 * If region is mapped for streaming (i.e. noncoherent), then set
1159 * sync is required
1161 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1162 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1164 /* Return the Hermon MR handle */
1165 *mr_p = (ibc_mr_hdl_t)mrhdl;
1167 return (IBT_SUCCESS);
1172 * hermon_ci_register_buf()
1173 * Prepare a Memory Region specified by buf structure for use by an HCA
1174 * Context: Can be called from interrupt or base context.
1176 /* ARGSUSED */
1177 static ibt_status_t
1178 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1179 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1180 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1182 hermon_mr_options_t op;
1183 hermon_state_t *state;
1184 hermon_pdhdl_t pdhdl;
1185 hermon_mrhdl_t mrhdl;
1186 int status;
1187 ibt_mr_flags_t flags = attrp->mr_flags;
1189 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1191 ASSERT(mr_p != NULL);
1192 ASSERT(mr_desc != NULL);
1195 * Validate the access flags. Both Remote Write and Remote Atomic
1196 * require the Local Write flag to be set
1198 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1199 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1200 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1201 return (IBT_MR_ACCESS_REQ_INVALID);
1204 /* Grab the Hermon softstate pointer and PD handle */
1205 state = (hermon_state_t *)hca;
1206 pdhdl = (hermon_pdhdl_t)pd;
1208 /* Register the memory region */
1209 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1210 op.mro_bind_dmahdl = NULL;
1211 op.mro_bind_override_addr = 0;
1212 status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1213 &mrhdl, &op, HERMON_MPT_DMPT);
1214 if (status != DDI_SUCCESS) {
1215 return (status);
1217 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1219 /* Fill in the mr_desc structure */
1220 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1221 mr_desc->md_lkey = mrhdl->mr_lkey;
1222 /* Only set RKey if remote access was requested */
1223 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1224 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1225 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1226 mr_desc->md_rkey = mrhdl->mr_rkey;
1230 * If region is mapped for streaming (i.e. noncoherent), then set
1231 * sync is required
1233 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1234 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1236 /* Return the Hermon MR handle */
1237 *mr_p = (ibc_mr_hdl_t)mrhdl;
1239 return (IBT_SUCCESS);
1244 * hermon_ci_deregister_mr()
1245 * Deregister a Memory Region from an HCA translation table
1246 * Context: Can be called only from user or kernel context.
1248 static ibt_status_t
1249 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1251 hermon_state_t *state;
1252 hermon_mrhdl_t mrhdl;
1253 int status;
1255 /* Grab the Hermon softstate pointer */
1256 state = (hermon_state_t *)hca;
1257 mrhdl = (hermon_mrhdl_t)mr;
1260 * Deregister the memory region.
1262 status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1263 HERMON_NOSLEEP);
1264 return (status);
1269 * hermon_ci_query_mr()
1270 * Retrieve information about a specified Memory Region
1271 * Context: Can be called from interrupt or base context.
1273 static ibt_status_t
1274 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1275 ibt_mr_query_attr_t *mr_attr)
1277 hermon_state_t *state;
1278 hermon_mrhdl_t mrhdl;
1279 int status;
1281 ASSERT(mr_attr != NULL);
1283 /* Grab the Hermon softstate pointer and MR handle */
1284 state = (hermon_state_t *)hca;
1285 mrhdl = (hermon_mrhdl_t)mr;
1287 /* Query the memory region */
1288 status = hermon_mr_query(state, mrhdl, mr_attr);
1289 return (status);
1294 * hermon_ci_register_shared_mr()
1295 * Create a shared memory region matching an existing Memory Region
1296 * Context: Can be called from interrupt or base context.
1298 /* ARGSUSED */
1299 static ibt_status_t
1300 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1301 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1302 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1304 hermon_state_t *state;
1305 hermon_pdhdl_t pdhdl;
1306 hermon_mrhdl_t mrhdl, mrhdl_new;
1307 int status;
1309 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1311 ASSERT(mr_attr != NULL);
1312 ASSERT(mr_p != NULL);
1313 ASSERT(mr_desc != NULL);
1316 * Validate the access flags. Both Remote Write and Remote Atomic
1317 * require the Local Write flag to be set
1319 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1320 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1321 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1322 return (IBT_MR_ACCESS_REQ_INVALID);
1325 /* Grab the Hermon softstate pointer and handles */
1326 state = (hermon_state_t *)hca;
1327 pdhdl = (hermon_pdhdl_t)pd;
1328 mrhdl = (hermon_mrhdl_t)mr;
1330 /* Register the shared memory region */
1331 status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1332 &mrhdl_new);
1333 if (status != DDI_SUCCESS) {
1334 return (status);
1336 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1338 /* Fill in the mr_desc structure */
1339 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1340 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1341 /* Only set RKey if remote access was requested */
1342 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1343 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1344 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1345 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1349 * If shared region is mapped for streaming (i.e. noncoherent), then
1350 * set sync is required
1352 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1353 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1355 /* Return the Hermon MR handle */
1356 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1358 return (IBT_SUCCESS);
1363 * hermon_ci_reregister_mr()
1364 * Modify the attributes of an existing Memory Region
1365 * Context: Can be called from interrupt or base context.
1367 /* ARGSUSED */
1368 static ibt_status_t
1369 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1370 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1371 ibt_mr_desc_t *mr_desc)
1373 hermon_mr_options_t op;
1374 hermon_state_t *state;
1375 hermon_pdhdl_t pdhdl;
1376 hermon_mrhdl_t mrhdl, mrhdl_new;
1377 int status;
1379 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1381 ASSERT(mr_attr != NULL);
1382 ASSERT(mr_new != NULL);
1383 ASSERT(mr_desc != NULL);
1385 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1386 state = (hermon_state_t *)hca;
1387 mrhdl = (hermon_mrhdl_t)mr;
1388 pdhdl = (hermon_pdhdl_t)pd;
1390 /* Reregister the memory region */
1391 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1392 status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1393 &mrhdl_new, &op);
1394 if (status != DDI_SUCCESS) {
1395 return (status);
1397 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1399 /* Fill in the mr_desc structure */
1400 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1401 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1402 /* Only set RKey if remote access was requested */
1403 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1404 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1405 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1406 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1410 * If region is mapped for streaming (i.e. noncoherent), then set
1411 * sync is required
1413 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1414 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1416 /* Return the Hermon MR handle */
1417 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1419 return (IBT_SUCCESS);
1424 * hermon_ci_reregister_buf()
1425 * Modify the attributes of an existing Memory Region
1426 * Context: Can be called from interrupt or base context.
1428 /* ARGSUSED */
1429 static ibt_status_t
1430 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1431 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1432 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1434 hermon_mr_options_t op;
1435 hermon_state_t *state;
1436 hermon_pdhdl_t pdhdl;
1437 hermon_mrhdl_t mrhdl, mrhdl_new;
1438 int status;
1439 ibt_mr_flags_t flags = attrp->mr_flags;
1441 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1443 ASSERT(mr_new != NULL);
1444 ASSERT(mr_desc != NULL);
1446 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1447 state = (hermon_state_t *)hca;
1448 mrhdl = (hermon_mrhdl_t)mr;
1449 pdhdl = (hermon_pdhdl_t)pd;
1451 /* Reregister the memory region */
1452 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1453 status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1454 &mrhdl_new, &op);
1455 if (status != DDI_SUCCESS) {
1456 return (status);
1458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1460 /* Fill in the mr_desc structure */
1461 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1462 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1463 /* Only set RKey if remote access was requested */
1464 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1465 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1466 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1467 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1471 * If region is mapped for streaming (i.e. noncoherent), then set
1472 * sync is required
1474 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1475 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1477 /* Return the Hermon MR handle */
1478 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1480 return (IBT_SUCCESS);
1484 * hermon_ci_sync_mr()
1485 * Synchronize access to a Memory Region
1486 * Context: Can be called from interrupt or base context.
1488 static ibt_status_t
1489 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1491 hermon_state_t *state;
1492 int status;
1494 ASSERT(mr_segs != NULL);
1496 /* Grab the Hermon softstate pointer */
1497 state = (hermon_state_t *)hca;
1499 /* Sync the memory region */
1500 status = hermon_mr_sync(state, mr_segs, num_segs);
1501 return (status);
1506 * hermon_ci_alloc_mw()
1507 * Allocate a Memory Window
1508 * Context: Can be called from interrupt or base context.
1510 static ibt_status_t
1511 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1512 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1514 hermon_state_t *state;
1515 hermon_pdhdl_t pdhdl;
1516 hermon_mwhdl_t mwhdl;
1517 int status;
1519 ASSERT(mw_p != NULL);
1520 ASSERT(rkey_p != NULL);
1522 /* Grab the Hermon softstate pointer and PD handle */
1523 state = (hermon_state_t *)hca;
1524 pdhdl = (hermon_pdhdl_t)pd;
1526 /* Allocate the memory window */
1527 status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1528 if (status != DDI_SUCCESS) {
1529 return (status);
1531 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1533 /* Return the MW handle and RKey */
1534 *mw_p = (ibc_mw_hdl_t)mwhdl;
1535 *rkey_p = mwhdl->mr_rkey;
1537 return (IBT_SUCCESS);
1542 * hermon_ci_free_mw()
1543 * Free a Memory Window
1544 * Context: Can be called from interrupt or base context.
1546 static ibt_status_t
1547 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1549 hermon_state_t *state;
1550 hermon_mwhdl_t mwhdl;
1551 int status;
1553 /* Grab the Hermon softstate pointer and MW handle */
1554 state = (hermon_state_t *)hca;
1555 mwhdl = (hermon_mwhdl_t)mw;
1557 /* Free the memory window */
1558 status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1559 return (status);
1564 * hermon_ci_query_mw()
1565 * Return the attributes of the specified Memory Window
1566 * Context: Can be called from interrupt or base context.
1568 /* ARGSUSED */
1569 static ibt_status_t
1570 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1571 ibt_mw_query_attr_t *mw_attr_p)
1573 hermon_mwhdl_t mwhdl;
1575 ASSERT(mw_attr_p != NULL);
1577 /* Query the memory window pointer and fill in the return values */
1578 mwhdl = (hermon_mwhdl_t)mw;
1579 mutex_enter(&mwhdl->mr_lock);
1580 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1581 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1582 mutex_exit(&mwhdl->mr_lock);
1584 return (IBT_SUCCESS);
1589 * hermon_ci_register_dma_mr()
1590 * Allocate a memory region that maps physical addresses.
1591 * Context: Can be called only from user or kernel context.
1593 /* ARGSUSED */
1594 static ibt_status_t
1595 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1596 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1597 ibt_mr_desc_t *mr_desc)
1599 hermon_state_t *state;
1600 hermon_pdhdl_t pdhdl;
1601 hermon_mrhdl_t mrhdl;
1602 int status;
1604 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1606 ASSERT(mr_attr != NULL);
1607 ASSERT(mr_p != NULL);
1608 ASSERT(mr_desc != NULL);
1611 * Validate the access flags. Both Remote Write and Remote Atomic
1612 * require the Local Write flag to be set
1614 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1615 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1616 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1617 return (IBT_MR_ACCESS_REQ_INVALID);
1620 /* Grab the Hermon softstate pointer and PD handle */
1621 state = (hermon_state_t *)hca;
1622 pdhdl = (hermon_pdhdl_t)pd;
1624 status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1625 if (status != DDI_SUCCESS) {
1626 return (status);
1628 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1630 /* Fill in the mr_desc structure */
1631 mr_desc->md_vaddr = mr_attr->dmr_paddr;
1632 mr_desc->md_lkey = mrhdl->mr_lkey;
1633 /* Only set RKey if remote access was requested */
1634 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1635 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1636 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1637 mr_desc->md_rkey = mrhdl->mr_rkey;
1641 * If region is mapped for streaming (i.e. noncoherent), then set
1642 * sync is required
1644 mr_desc->md_sync_required = B_FALSE;
1646 /* Return the Hermon MR handle */
1647 *mr_p = (ibc_mr_hdl_t)mrhdl;
1649 return (IBT_SUCCESS);
1654 * hermon_ci_attach_mcg()
1655 * Attach a Queue Pair to a Multicast Group
1656 * Context: Can be called only from user or kernel context.
1658 static ibt_status_t
1659 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1660 ib_lid_t lid)
1662 hermon_state_t *state;
1663 hermon_qphdl_t qphdl;
1664 int status;
1666 /* Grab the Hermon softstate pointer and QP handles */
1667 state = (hermon_state_t *)hca;
1668 qphdl = (hermon_qphdl_t)qp;
1670 /* Attach the QP to the multicast group */
1671 status = hermon_mcg_attach(state, qphdl, gid, lid);
1672 return (status);
1677 * hermon_ci_detach_mcg()
1678 * Detach a Queue Pair to a Multicast Group
1679 * Context: Can be called only from user or kernel context.
1681 static ibt_status_t
1682 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1683 ib_lid_t lid)
1685 hermon_state_t *state;
1686 hermon_qphdl_t qphdl;
1687 int status;
1689 /* Grab the Hermon softstate pointer and QP handle */
1690 state = (hermon_state_t *)hca;
1691 qphdl = (hermon_qphdl_t)qp;
1693 /* Detach the QP from the multicast group */
1694 status = hermon_mcg_detach(state, qphdl, gid, lid);
1695 return (status);
1700 * hermon_ci_post_send()
1701 * Post send work requests to the send queue on the specified QP
1702 * Context: Can be called from interrupt or base context.
1704 static ibt_status_t
1705 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1706 uint_t num_wr, uint_t *num_posted_p)
1708 hermon_state_t *state;
1709 hermon_qphdl_t qphdl;
1710 int status;
1712 ASSERT(wr_p != NULL);
1713 ASSERT(num_wr != 0);
1715 /* Grab the Hermon softstate pointer and QP handle */
1716 state = (hermon_state_t *)hca;
1717 qphdl = (hermon_qphdl_t)qp;
1719 /* Post the send WQEs */
1720 status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1721 return (status);
1726 * hermon_ci_post_recv()
1727 * Post receive work requests to the receive queue on the specified QP
1728 * Context: Can be called from interrupt or base context.
1730 static ibt_status_t
1731 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1732 uint_t num_wr, uint_t *num_posted_p)
1734 hermon_state_t *state;
1735 hermon_qphdl_t qphdl;
1736 int status;
1738 ASSERT(wr_p != NULL);
1739 ASSERT(num_wr != 0);
1741 state = (hermon_state_t *)hca;
1742 qphdl = (hermon_qphdl_t)qp;
1744 /* Post the receive WQEs */
1745 status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1746 return (status);
1751 * hermon_ci_poll_cq()
1752 * Poll for a work request completion
1753 * Context: Can be called from interrupt or base context.
1755 static ibt_status_t
1756 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1757 uint_t num_wc, uint_t *num_polled)
1759 hermon_state_t *state;
1760 hermon_cqhdl_t cqhdl;
1761 int status;
1763 ASSERT(wc_p != NULL);
1765 /* Check for valid num_wc field */
1766 if (num_wc == 0) {
1767 return (IBT_INVALID_PARAM);
1770 /* Grab the Hermon softstate pointer and CQ handle */
1771 state = (hermon_state_t *)hca;
1772 cqhdl = (hermon_cqhdl_t)cq;
1774 /* Poll for work request completions */
1775 status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1776 return (status);
1781 * hermon_ci_notify_cq()
1782 * Enable notification events on the specified CQ
1783 * Context: Can be called from interrupt or base context.
1785 static ibt_status_t
1786 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1787 ibt_cq_notify_flags_t flags)
1789 hermon_state_t *state;
1790 hermon_cqhdl_t cqhdl;
1791 int status;
1793 /* Grab the Hermon softstate pointer and CQ handle */
1794 state = (hermon_state_t *)hca;
1795 cqhdl = (hermon_cqhdl_t)cq_hdl;
1797 /* Enable the CQ notification */
1798 status = hermon_cq_notify(state, cqhdl, flags);
1799 return (status);
1803 * hermon_ci_ci_data_in()
1804 * Exchange CI-specific data.
1805 * Context: Can be called only from user or kernel context.
1807 static ibt_status_t
1808 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1809 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1810 size_t data_sz)
1812 hermon_state_t *state;
1813 int status;
1815 /* Grab the Hermon softstate pointer */
1816 state = (hermon_state_t *)hca;
1818 /* Get the Hermon userland mapping information */
1819 status = hermon_umap_ci_data_in(state, flags, object,
1820 ibc_object_handle, data_p, data_sz);
1821 return (status);
1825 * hermon_ci_ci_data_out()
1826 * Exchange CI-specific data.
1827 * Context: Can be called only from user or kernel context.
1829 static ibt_status_t
1830 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1831 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1832 size_t data_sz)
1834 hermon_state_t *state;
1835 int status;
1837 /* Grab the Hermon softstate pointer */
1838 state = (hermon_state_t *)hca;
1840 /* Get the Hermon userland mapping information */
1841 status = hermon_umap_ci_data_out(state, flags, object,
1842 ibc_object_handle, data_p, data_sz);
1843 return (status);
1848 * hermon_ci_alloc_srq()
1849 * Allocate a Shared Receive Queue (SRQ)
1850 * Context: Can be called only from user or kernel context
1852 static ibt_status_t
1853 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
1854 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
1855 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
1857 hermon_state_t *state;
1858 hermon_pdhdl_t pdhdl;
1859 hermon_srqhdl_t srqhdl;
1860 hermon_srq_info_t srqinfo;
1861 int status;
1863 state = (hermon_state_t *)hca;
1864 pdhdl = (hermon_pdhdl_t)pd;
1866 srqinfo.srqi_ibt_srqhdl = ibt_srq;
1867 srqinfo.srqi_pd = pdhdl;
1868 srqinfo.srqi_sizes = sizes;
1869 srqinfo.srqi_real_sizes = ret_sizes_p;
1870 srqinfo.srqi_srqhdl = &srqhdl;
1871 srqinfo.srqi_flags = flags;
1873 status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
1874 if (status != DDI_SUCCESS) {
1875 return (status);
1878 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
1880 return (IBT_SUCCESS);
1884 * hermon_ci_free_srq()
1885 * Free a Shared Receive Queue (SRQ)
1886 * Context: Can be called only from user or kernel context
1888 static ibt_status_t
1889 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
1891 hermon_state_t *state;
1892 hermon_srqhdl_t srqhdl;
1893 int status;
1895 state = (hermon_state_t *)hca;
1897 /* Check for valid SRQ handle pointer */
1898 if (srq == NULL) {
1899 return (IBT_SRQ_HDL_INVALID);
1902 srqhdl = (hermon_srqhdl_t)srq;
1904 /* Free the SRQ */
1905 status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
1906 return (status);
1910 * hermon_ci_query_srq()
1911 * Query properties of a Shared Receive Queue (SRQ)
1912 * Context: Can be called from interrupt or base context.
1914 /* ARGSUSED */
1915 static ibt_status_t
1916 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
1917 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
1919 hermon_srqhdl_t srqhdl;
1921 srqhdl = (hermon_srqhdl_t)srq;
1923 mutex_enter(&srqhdl->srq_lock);
1924 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1925 mutex_exit(&srqhdl->srq_lock);
1926 return (IBT_SRQ_ERROR_STATE);
1929 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
1930 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
1931 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
1932 mutex_exit(&srqhdl->srq_lock);
1933 *limit_p = 0;
1935 return (IBT_SUCCESS);
1939 * hermon_ci_modify_srq()
1940 * Modify properties of a Shared Receive Queue (SRQ)
1941 * Context: Can be called from interrupt or base context.
1943 /* ARGSUSED */
1944 static ibt_status_t
1945 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
1946 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
1948 hermon_state_t *state;
1949 hermon_srqhdl_t srqhdl;
1950 uint_t resize_supported, cur_srq_size;
1951 int status;
1953 state = (hermon_state_t *)hca;
1954 srqhdl = (hermon_srqhdl_t)srq;
1957 * Check Error State of SRQ.
1958 * Also, while we are holding the lock we save away the current SRQ
1959 * size for later use.
1961 mutex_enter(&srqhdl->srq_lock);
1962 cur_srq_size = srqhdl->srq_wq_bufsz;
1963 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1964 mutex_exit(&srqhdl->srq_lock);
1965 return (IBT_SRQ_ERROR_STATE);
1967 mutex_exit(&srqhdl->srq_lock);
1970 * Setting the limit watermark is not currently supported. This is a
1971 * hermon hardware (firmware) limitation. We return NOT_SUPPORTED here,
1972 * and have the limit code commented out for now.
1974 * XXX If we enable the limit watermark support, we need to do checks
1975 * and set the 'srq->srq_wr_limit' here, instead of returning not
1976 * supported. The 'hermon_srq_modify' operation below is for resizing
1977 * the SRQ only, the limit work should be done here. If this is
1978 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
1979 * function should also be removed at that time.
1981 if (flags & IBT_SRQ_SET_LIMIT) {
1982 return (IBT_NOT_SUPPORTED);
1986 * Check the SET_SIZE flag. If not set, we simply return success here.
1987 * However if it is set, we check if resize is supported and only then
1988 * do we continue on with our resize processing.
1990 if (!(flags & IBT_SRQ_SET_SIZE)) {
1991 return (IBT_SUCCESS);
1994 resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
1995 IBT_HCA_RESIZE_SRQ;
1997 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
1998 return (IBT_NOT_SUPPORTED);
2002 * We do not support resizing an SRQ to be smaller than it's current
2003 * size. If a smaller (or equal) size is requested, then we simply
2004 * return success, and do nothing.
2006 if (size <= cur_srq_size) {
2007 *ret_size_p = cur_srq_size;
2008 return (IBT_SUCCESS);
2011 status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2012 HERMON_NOSLEEP);
2013 if (status != DDI_SUCCESS) {
2014 /* Set return value to current SRQ size */
2015 *ret_size_p = cur_srq_size;
2016 return (status);
2019 return (IBT_SUCCESS);
2023 * hermon_ci_post_srq()
2024 * Post a Work Request to the specified Shared Receive Queue (SRQ)
2025 * Context: Can be called from interrupt or base context.
2027 static ibt_status_t
2028 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2029 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2031 hermon_state_t *state;
2032 hermon_srqhdl_t srqhdl;
2033 int status;
2035 state = (hermon_state_t *)hca;
2036 srqhdl = (hermon_srqhdl_t)srq;
2038 status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2039 return (status);
2042 /* Address translation */
2044 struct ibc_ma_s {
2045 int h_ma_addr_list_len;
2046 void *h_ma_addr_list;
2047 ddi_dma_handle_t h_ma_dmahdl;
2048 ddi_dma_handle_t h_ma_list_hdl;
2049 ddi_acc_handle_t h_ma_list_acc_hdl;
2050 size_t h_ma_real_len;
2051 caddr_t h_ma_kaddr;
2052 ibt_phys_addr_t h_ma_list_cookie;
2055 static ibt_status_t
2056 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2057 uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2059 int status;
2060 ibt_status_t ibt_status;
2061 ibc_ma_hdl_t ma_hdl;
2062 ib_memlen_t len;
2063 ddi_dma_attr_t dma_attr;
2064 uint_t cookie_cnt;
2065 ddi_dma_cookie_t dmacookie;
2066 hermon_state_t *state;
2067 uint64_t *kaddr;
2068 uint64_t addr, endaddr, pagesize;
2069 int i, kmflag;
2070 int (*callback)(caddr_t);
2072 if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2073 return (IBT_NOT_SUPPORTED); /* XXX - not yet implemented */
2076 state = (hermon_state_t *)hca;
2077 hermon_dma_attr_init(state, &dma_attr);
2078 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2079 kmflag = KM_NOSLEEP;
2080 callback = DDI_DMA_DONTWAIT;
2081 } else {
2082 kmflag = KM_SLEEP;
2083 callback = DDI_DMA_SLEEP;
2086 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2087 if (ma_hdl == NULL) {
2088 return (IBT_INSUFF_RESOURCE);
2091 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2092 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2093 callback, NULL, &ma_hdl->h_ma_dmahdl);
2094 if (status != DDI_SUCCESS) {
2095 kmem_free(ma_hdl, sizeof (*ma_hdl));
2096 return (IBT_INSUFF_RESOURCE);
2098 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2099 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2100 callback, NULL, &dmacookie, &cookie_cnt);
2101 if (status != DDI_DMA_MAPPED) {
2102 status = ibc_get_ci_failure(0);
2103 goto marea_fail3;
2106 ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2107 ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2108 if (ma_hdl->h_ma_kaddr == NULL) {
2109 ibt_status = IBT_INSUFF_RESOURCE;
2110 goto marea_fail4;
2113 i = 0;
2114 len = 0;
2115 pagesize = PAGESIZE;
2116 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2117 while (cookie_cnt-- > 0) {
2118 addr = dmacookie.dmac_laddress;
2119 len += dmacookie.dmac_size;
2120 endaddr = addr + (dmacookie.dmac_size - 1);
2121 addr = addr & ~(pagesize - 1);
2122 while (addr <= endaddr) {
2123 if (i >= list_len) {
2124 status = IBT_PBL_TOO_SMALL;
2125 goto marea_fail5;
2127 kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2128 i++;
2129 addr += pagesize;
2130 if (addr == 0) {
2131 static int do_once = 1;
2132 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2133 do_once))
2134 if (do_once) {
2135 do_once = 0;
2136 cmn_err(CE_NOTE, "probable error in "
2137 "dma_cookie address: map_mem_area");
2139 break;
2142 if (cookie_cnt != 0)
2143 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2146 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2147 pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2148 pmr->pmr_iova = va_attrs->va_vaddr;
2149 pmr->pmr_len = len;
2150 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2151 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2152 pmr->pmr_num_buf = i;
2153 pmr->pmr_ma = ma_hdl;
2155 *ma_hdl_p = ma_hdl;
2156 return (IBT_SUCCESS);
2158 marea_fail5:
2159 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2160 marea_fail4:
2161 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2162 marea_fail3:
2163 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2164 kmem_free(ma_hdl, sizeof (*ma_hdl));
2165 *ma_hdl_p = NULL;
2166 return (ibt_status);
2170 * hermon_ci_map_mem_area()
2171 * Context: Can be called from user or base context.
2173 * Creates the memory mapping suitable for a subsequent posting of an
2174 * FRWR work request. All the info about the memory area for the
2175 * FRWR work request (wr member of "union ibt_reg_req_u") is filled
2176 * such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2177 * and then fill in the additional information only it knows.
2179 * Alternatively, creates the memory mapping for FMR.
2181 /* ARGSUSED */
2182 static ibt_status_t
2183 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2184 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2185 ibc_ma_hdl_t *ma_hdl_p)
2187 ibt_status_t ibt_status;
2188 int status;
2189 ibc_ma_hdl_t ma_hdl;
2190 ibt_wr_reg_pmr_t *pmr;
2191 ib_memlen_t len;
2192 ddi_dma_attr_t dma_attr;
2193 ddi_dma_handle_t khdl;
2194 uint_t cookie_cnt;
2195 ddi_dma_cookie_t dmacookie, kcookie;
2196 hermon_state_t *state;
2197 uint64_t *kaddr;
2198 uint64_t addr, endaddr, pagesize, kcookie_paddr;
2199 int i, j, kmflag;
2200 int (*callback)(caddr_t);
2202 if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2203 /* delegate FMR and Physical Register to other function */
2204 return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2205 &reg_req->fn_arg, ma_hdl_p));
2208 /* FRWR */
2210 state = (hermon_state_t *)hca;
2211 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2212 return (IBT_NOT_SUPPORTED);
2213 hermon_dma_attr_init(state, &dma_attr);
2214 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2215 kmflag = KM_NOSLEEP;
2216 callback = DDI_DMA_DONTWAIT;
2217 } else {
2218 kmflag = KM_SLEEP;
2219 callback = DDI_DMA_SLEEP;
2222 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2223 if (ma_hdl == NULL) {
2224 return (IBT_INSUFF_RESOURCE);
2226 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2228 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2229 callback, NULL, &ma_hdl->h_ma_dmahdl);
2230 if (status != DDI_SUCCESS) {
2231 ibt_status = IBT_INSUFF_RESOURCE;
2232 goto marea_fail0;
2234 dma_attr.dma_attr_align = 64; /* as per PRM */
2235 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2236 callback, NULL, &ma_hdl->h_ma_list_hdl);
2237 if (status != DDI_SUCCESS) {
2238 ibt_status = IBT_INSUFF_RESOURCE;
2239 goto marea_fail1;
2242 * Entries in the list in the last slot on each page cannot be used,
2243 * so 1 extra ibt_phys_addr_t is allocated per page. We add 1 more
2244 * to deal with the possibility of a less than 1 page allocation
2245 * across a page boundary.
2247 status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2248 list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2249 sizeof (ibt_phys_addr_t),
2250 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2251 &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2252 &ma_hdl->h_ma_list_acc_hdl);
2253 if (status != DDI_SUCCESS) {
2254 ibt_status = IBT_INSUFF_RESOURCE;
2255 goto marea_fail2;
2257 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2258 ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2259 DDI_DMA_CONSISTENT, callback, NULL,
2260 &kcookie, &cookie_cnt);
2261 if (status != DDI_SUCCESS) {
2262 ibt_status = IBT_INSUFF_RESOURCE;
2263 goto marea_fail3;
2265 if ((kcookie.dmac_laddress & 0x3f) != 0) {
2266 cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2267 ibt_status = ibc_get_ci_failure(0);
2268 goto marea_fail4;
2270 ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2272 if (va_attrs->va_flags & IBT_VA_BUF) {
2273 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2274 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2275 callback, NULL, &dmacookie, &cookie_cnt);
2276 } else {
2277 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2278 va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2279 va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2280 callback, NULL, &dmacookie, &cookie_cnt);
2282 if (status != DDI_DMA_MAPPED) {
2283 ibt_status = ibc_get_ci_failure(0);
2284 goto marea_fail4;
2286 i = 0; /* count the number of pbl entries */
2287 j = 0; /* count the number of links to next HERMON_PAGE */
2288 len = 0;
2289 pagesize = PAGESIZE;
2290 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2291 kcookie.dmac_size += kcookie.dmac_laddress & HERMON_PAGEOFFSET;
2292 kcookie_paddr = kcookie.dmac_laddress & HERMON_PAGEMASK;
2293 khdl = ma_hdl->h_ma_list_hdl;
2294 while (cookie_cnt-- > 0) {
2295 addr = dmacookie.dmac_laddress;
2296 len += dmacookie.dmac_size;
2297 endaddr = addr + (dmacookie.dmac_size - 1);
2298 addr = addr & ~(pagesize - 1);
2299 while (addr <= endaddr) {
2300 if (i >= list_len) {
2301 ibt_status = IBT_PBL_TOO_SMALL;
2302 goto marea_fail5;
2304 /* Deal with last entry on page. */
2305 if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2306 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2307 kcookie_paddr += HERMON_PAGESIZE;
2308 kcookie.dmac_size -= HERMON_PAGESIZE;
2309 } else {
2310 ddi_dma_nextcookie(khdl, &kcookie);
2311 kcookie_paddr = kcookie.dmac_laddress;
2313 kaddr[i+j] = htonll(kcookie_paddr);
2314 j++;
2316 kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2317 i++;
2318 addr += pagesize;
2319 if (addr == 0) {
2320 static int do_once = 1;
2321 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2322 do_once))
2323 if (do_once) {
2324 do_once = 0;
2325 cmn_err(CE_NOTE, "probable error in "
2326 "dma_cookie address: map_mem_area");
2328 break;
2331 if (cookie_cnt != 0)
2332 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2335 pmr = &reg_req->wr;
2336 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2337 pmr->pmr_len = len;
2338 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2339 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2340 pmr->pmr_num_buf = i;
2341 pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2343 *ma_hdl_p = ma_hdl;
2344 return (IBT_SUCCESS);
2346 marea_fail5:
2347 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2348 if (status != DDI_SUCCESS)
2349 HERMON_WARNING(state, "failed to unbind DMA mapping");
2350 marea_fail4:
2351 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2352 if (status != DDI_SUCCESS)
2353 HERMON_WARNING(state, "failed to unbind DMA mapping");
2354 marea_fail3:
2355 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2356 marea_fail2:
2357 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2358 marea_fail1:
2359 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2360 marea_fail0:
2361 kmem_free(ma_hdl, sizeof (*ma_hdl));
2362 *ma_hdl_p = NULL;
2363 return (ibt_status);
2367 * hermon_ci_unmap_mem_area()
2368 * Unmap the memory area
2369 * Context: Can be called from interrupt or base context.
2371 /* ARGSUSED */
2372 static ibt_status_t
2373 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2375 int status;
2376 hermon_state_t *state;
2378 if (ma_hdl == NULL) {
2379 return (IBT_MA_HDL_INVALID);
2381 state = (hermon_state_t *)hca;
2382 if (ma_hdl->h_ma_list_hdl != NULL) {
2383 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2384 if (status != DDI_SUCCESS)
2385 HERMON_WARNING(state, "failed to unbind DMA mapping");
2386 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2387 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2388 } else {
2389 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2391 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2392 if (status != DDI_SUCCESS)
2393 HERMON_WARNING(state, "failed to unbind DMA mapping");
2394 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2395 kmem_free(ma_hdl, sizeof (*ma_hdl));
2396 return (IBT_SUCCESS);
2399 struct ibc_mi_s {
2400 int imh_len;
2401 ddi_dma_handle_t imh_dmahandle[1];
2403 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2404 ibc_mi_s::imh_len
2405 ibc_mi_s::imh_dmahandle))
2409 * hermon_ci_map_mem_iov()
2410 * Map the memory
2411 * Context: Can be called from interrupt or base context.
2413 /* ARGSUSED */
2414 static ibt_status_t
2415 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2416 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2418 int status;
2419 int i, j, nds, max_nds;
2420 uint_t len;
2421 ibt_status_t ibt_status;
2422 ddi_dma_handle_t dmahdl;
2423 ddi_dma_cookie_t dmacookie;
2424 ddi_dma_attr_t dma_attr;
2425 uint_t cookie_cnt;
2426 ibc_mi_hdl_t mi_hdl;
2427 ibt_lkey_t rsvd_lkey;
2428 ibt_wr_ds_t *sgl;
2429 hermon_state_t *state;
2430 int kmflag;
2431 int (*callback)(caddr_t);
2433 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2435 state = (hermon_state_t *)hca;
2436 hermon_dma_attr_init(state, &dma_attr);
2438 nds = 0;
2439 max_nds = iov_attr->iov_wr_nds;
2440 if (iov_attr->iov_lso_hdr_sz)
2441 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2442 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
2443 rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2444 iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2445 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2446 kmflag = KM_SLEEP;
2447 callback = DDI_DMA_SLEEP;
2448 } else {
2449 kmflag = KM_NOSLEEP;
2450 callback = DDI_DMA_DONTWAIT;
2453 if (iov_attr->iov_flags & IBT_IOV_BUF) {
2454 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2455 if (mi_hdl == NULL)
2456 return (IBT_INSUFF_RESOURCE);
2457 sgl = wr->send.wr_sgl;
2458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2460 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2461 callback, NULL, &dmahdl);
2462 if (status != DDI_SUCCESS) {
2463 kmem_free(mi_hdl, sizeof (*mi_hdl));
2464 return (IBT_INSUFF_RESOURCE);
2466 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2467 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2468 &dmacookie, &cookie_cnt);
2469 if (status != DDI_DMA_MAPPED) {
2470 ddi_dma_free_handle(&dmahdl);
2471 kmem_free(mi_hdl, sizeof (*mi_hdl));
2472 return (ibc_get_ci_failure(0));
2474 while (cookie_cnt-- > 0) {
2475 if (nds > max_nds) {
2476 status = ddi_dma_unbind_handle(dmahdl);
2477 if (status != DDI_SUCCESS)
2478 HERMON_WARNING(state, "failed to "
2479 "unbind DMA mapping");
2480 ddi_dma_free_handle(&dmahdl);
2481 return (IBT_SGL_TOO_SMALL);
2483 sgl[nds].ds_va = dmacookie.dmac_laddress;
2484 sgl[nds].ds_key = rsvd_lkey;
2485 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2486 nds++;
2487 if (cookie_cnt != 0)
2488 ddi_dma_nextcookie(dmahdl, &dmacookie);
2490 wr->send.wr_nds = nds;
2491 mi_hdl->imh_len = 1;
2492 mi_hdl->imh_dmahandle[0] = dmahdl;
2493 *mi_hdl_p = mi_hdl;
2494 return (IBT_SUCCESS);
2497 if (iov_attr->iov_flags & IBT_IOV_RECV)
2498 sgl = wr->recv.wr_sgl;
2499 else
2500 sgl = wr->send.wr_sgl;
2501 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2503 len = iov_attr->iov_list_len;
2504 for (i = 0, j = 0; j < len; j++) {
2505 if (iov_attr->iov[j].iov_len == 0)
2506 continue;
2507 i++;
2509 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2510 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2511 if (mi_hdl == NULL)
2512 return (IBT_INSUFF_RESOURCE);
2513 mi_hdl->imh_len = i;
2514 for (i = 0, j = 0; j < len; j++) {
2515 if (iov_attr->iov[j].iov_len == 0)
2516 continue;
2517 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2518 callback, NULL, &dmahdl);
2519 if (status != DDI_SUCCESS) {
2520 ibt_status = IBT_INSUFF_RESOURCE;
2521 goto fail2;
2523 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2524 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2525 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2526 &dmacookie, &cookie_cnt);
2527 if (status != DDI_DMA_MAPPED) {
2528 ibt_status = ibc_get_ci_failure(0);
2529 goto fail1;
2531 if (nds + cookie_cnt > max_nds) {
2532 ibt_status = IBT_SGL_TOO_SMALL;
2533 goto fail2;
2535 while (cookie_cnt-- > 0) {
2536 sgl[nds].ds_va = dmacookie.dmac_laddress;
2537 sgl[nds].ds_key = rsvd_lkey;
2538 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2539 nds++;
2540 if (cookie_cnt != 0)
2541 ddi_dma_nextcookie(dmahdl, &dmacookie);
2543 mi_hdl->imh_dmahandle[i] = dmahdl;
2544 i++;
2547 if (iov_attr->iov_flags & IBT_IOV_RECV)
2548 wr->recv.wr_nds = nds;
2549 else
2550 wr->send.wr_nds = nds;
2551 *mi_hdl_p = mi_hdl;
2552 return (IBT_SUCCESS);
2554 fail1:
2555 ddi_dma_free_handle(&dmahdl);
2556 fail2:
2557 while (--i >= 0) {
2558 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2559 if (status != DDI_SUCCESS)
2560 HERMON_WARNING(state, "failed to unbind DMA mapping");
2561 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2563 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2564 (len - 1) * sizeof (ddi_dma_handle_t));
2565 *mi_hdl_p = NULL;
2566 return (ibt_status);
2570 * hermon_ci_unmap_mem_iov()
2571 * Unmap the memory
2572 * Context: Can be called from interrupt or base context.
2574 static ibt_status_t
2575 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2577 int status, i;
2578 hermon_state_t *state;
2580 state = (hermon_state_t *)hca;
2582 for (i = mi_hdl->imh_len; --i >= 0; ) {
2583 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2584 if (status != DDI_SUCCESS)
2585 HERMON_WARNING(state, "failed to unbind DMA mapping");
2586 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2588 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2589 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2590 return (IBT_SUCCESS);
2594 * hermon_ci_alloc_lkey()
2595 * Allocate an empty memory region for use with FRWR.
2596 * Context: Can be called from user or base context.
2598 /* ARGSUSED */
2599 static ibt_status_t
2600 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2601 ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2602 ibt_pmr_desc_t *mem_desc_p)
2604 hermon_state_t *state;
2605 hermon_pdhdl_t pdhdl;
2606 hermon_mrhdl_t mrhdl;
2607 int status;
2609 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2611 ASSERT(mr_p != NULL);
2612 ASSERT(mem_desc_p != NULL);
2614 state = (hermon_state_t *)hca;
2615 pdhdl = (hermon_pdhdl_t)pd;
2617 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2618 return (IBT_NOT_SUPPORTED);
2620 status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2621 if (status != DDI_SUCCESS) {
2622 return (status);
2624 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2626 /* Fill in the mem_desc_p structure */
2627 mem_desc_p->pmd_iova = 0;
2628 mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2629 mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2630 /* Only set RKey if remote access was requested */
2631 if (flags & IBT_KEY_REMOTE) {
2632 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2634 mem_desc_p->pmd_sync_required = B_FALSE;
2636 /* Return the Hermon MR handle */
2637 *mr_p = (ibc_mr_hdl_t)mrhdl;
2638 return (IBT_SUCCESS);
2641 /* Physical Register Memory Region */
2643 * hermon_ci_register_physical_mr()
2645 /* ARGSUSED */
2646 static ibt_status_t
2647 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2648 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2649 ibt_pmr_desc_t *mem_desc_p)
2651 return (IBT_NOT_SUPPORTED);
2655 * hermon_ci_reregister_physical_mr()
2657 /* ARGSUSED */
2658 static ibt_status_t
2659 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2660 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2661 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2663 return (IBT_NOT_SUPPORTED);
2666 /* Mellanox FMR Support */
2668 * hermon_ci_create_fmr_pool()
2669 * Creates a pool of memory regions suitable for FMR registration
2670 * Context: Can be called from base context only
2672 static ibt_status_t
2673 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2674 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2676 hermon_state_t *state;
2677 hermon_pdhdl_t pdhdl;
2678 hermon_fmrhdl_t fmrpoolhdl;
2679 int status;
2681 state = (hermon_state_t *)hca;
2683 /* Check for valid PD handle pointer */
2684 if (pd == NULL) {
2685 return (IBT_PD_HDL_INVALID);
2688 pdhdl = (hermon_pdhdl_t)pd;
2691 * Validate the access flags. Both Remote Write and Remote Atomic
2692 * require the Local Write flag to be set
2694 if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2695 (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2696 !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2697 return (IBT_MR_ACCESS_REQ_INVALID);
2700 status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2701 if (status != DDI_SUCCESS) {
2702 return (status);
2705 /* Set fmr_pool from hermon handle */
2706 *fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2708 return (IBT_SUCCESS);
2712 * hermon_ci_destroy_fmr_pool()
2713 * Free all resources associated with an FMR pool.
2714 * Context: Can be called from base context only.
2716 static ibt_status_t
2717 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2719 hermon_state_t *state;
2720 hermon_fmrhdl_t fmrpoolhdl;
2721 int status;
2723 state = (hermon_state_t *)hca;
2724 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2726 status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2727 return (status);
2731 * hermon_ci_flush_fmr_pool()
2732 * Force a flush of the memory tables, cleaning up used FMR resources.
2733 * Context: Can be called from interrupt or base context.
2735 static ibt_status_t
2736 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2738 hermon_state_t *state;
2739 hermon_fmrhdl_t fmrpoolhdl;
2740 int status;
2742 state = (hermon_state_t *)hca;
2744 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2745 status = hermon_flush_fmr_pool(state, fmrpoolhdl);
2746 return (status);
2750 * hermon_ci_register_physical_fmr()
2751 * From the 'pool' of FMR regions passed in, performs register physical
2752 * operation.
2753 * Context: Can be called from interrupt or base context.
2755 /* ARGSUSED */
2756 static ibt_status_t
2757 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2758 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2759 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2761 hermon_state_t *state;
2762 hermon_mrhdl_t mrhdl;
2763 hermon_fmrhdl_t fmrpoolhdl;
2764 int status;
2766 ASSERT(mem_pattr != NULL);
2767 ASSERT(mr_p != NULL);
2768 ASSERT(mem_desc_p != NULL);
2770 /* Grab the Hermon softstate pointer */
2771 state = (hermon_state_t *)hca;
2773 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2775 status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2776 &mrhdl, mem_desc_p);
2777 if (status != DDI_SUCCESS) {
2778 return (status);
2782 * If region is mapped for streaming (i.e. noncoherent), then set
2783 * sync is required
2785 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2786 mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2787 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2788 if (mem_desc_p->pmd_sync_required == B_TRUE) {
2789 /* Fill in DMA handle for future sync operations */
2790 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
2791 mrhdl->mr_bindinfo.bi_dmahdl =
2792 (ddi_dma_handle_t)mem_pattr->pmr_ma;
2795 /* Return the Hermon MR handle */
2796 *mr_p = (ibc_mr_hdl_t)mrhdl;
2798 return (IBT_SUCCESS);
2802 * hermon_ci_deregister_fmr()
2803 * Moves an FMR (specified by 'mr') to the deregistered state.
2804 * Context: Can be called from base context only.
2806 static ibt_status_t
2807 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2809 hermon_state_t *state;
2810 hermon_mrhdl_t mrhdl;
2811 int status;
2813 /* Grab the Hermon softstate pointer */
2814 state = (hermon_state_t *)hca;
2815 mrhdl = (hermon_mrhdl_t)mr;
2818 * Deregister the memory region, either "unmap" the FMR or deregister
2819 * the normal memory region.
2821 status = hermon_deregister_fmr(state, mrhdl);
2822 return (status);
2825 static int
2826 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
2827 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
2829 ddi_dma_handle_t dma_hdl;
2830 ddi_dma_attr_t dma_attr;
2831 ddi_acc_handle_t acc_hdl;
2832 size_t real_len;
2833 int status;
2834 int (*ddi_cb)(caddr_t);
2835 ibc_mem_alloc_hdl_t mem_alloc_hdl;
2837 hermon_dma_attr_init(state, &dma_attr);
2839 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2841 /* Allocate a DMA handle */
2842 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
2843 NULL, &dma_hdl);
2844 if (status != DDI_SUCCESS) {
2845 return (DDI_FAILURE);
2848 /* Allocate DMA memory */
2849 status = ddi_dma_mem_alloc(dma_hdl, size,
2850 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2851 NULL, kaddrp, &real_len, &acc_hdl);
2852 if (status != DDI_SUCCESS) {
2853 ddi_dma_free_handle(&dma_hdl);
2854 return (DDI_FAILURE);
2857 /* Package the hermon_dma_info contents and return */
2858 mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
2859 (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
2860 if (mem_alloc_hdl == NULL) {
2861 ddi_dma_mem_free(&acc_hdl);
2862 ddi_dma_free_handle(&dma_hdl);
2863 return (DDI_FAILURE);
2865 mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
2866 mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
2868 *mem_hdl = mem_alloc_hdl;
2870 return (DDI_SUCCESS);
2874 * hermon_ci_alloc_io_mem()
2875 * Allocate dma-able memory
2878 static ibt_status_t
2879 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
2880 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
2882 hermon_state_t *state;
2883 int status;
2885 /* Grab the Hermon softstate pointer and mem handle */
2886 state = (hermon_state_t *)hca;
2888 /* Allocate the memory and handles */
2889 status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
2890 mem_alloc_hdl_p);
2892 if (status != DDI_SUCCESS) {
2893 *mem_alloc_hdl_p = NULL;
2894 *kaddrp = NULL;
2895 return (status);
2898 return (IBT_SUCCESS);
2903 * hermon_ci_free_io_mem()
2904 * Unbind handl and free the memory
2906 /* ARGSUSED */
2907 static ibt_status_t
2908 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2910 /* Unbind the handles and free the memory */
2911 (void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
2912 ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
2913 ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
2914 kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
2916 return (IBT_SUCCESS);