4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/ib/ibtl/impl/ibtl.h>
30 * These routines implement all of the Memory Region verbs and the alloc/
31 * query/free Memory Window verbs at the TI interface.
34 static char ibtl_mem
[] = "ibtl_mem";
40 * hca_hdl - HCA Handle.
41 * pd - Protection Domain Handle.
42 * mem_attr - Requested memory region attributes.
44 * mr_hdl_p - The returned IBT memory region handle.
45 * mem_desc - Returned memory descriptor.
48 * IBT_CHAN_HDL_INVALID
51 * IBT_MR_ACCESS_REQ_INVALID
55 * Prepares a virtually addressed memory region for use by a HCA. A
56 * description of the registered memory suitable for use in Work Requests
57 * (WRs) is returned in the ibt_mr_desc_t parameter.
60 ibt_register_mr(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
, ibt_mr_attr_t
*mem_attr
,
61 ibt_mr_hdl_t
*mr_hdl_p
, ibt_mr_desc_t
*mem_desc
)
66 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_mr(%p, %p, %p)",
67 hca_hdl
, pd
, mem_attr
);
69 vaddr
= mem_attr
->mr_vaddr
;
71 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_register_mr(
72 IBTL_HCA2CIHCA(hca_hdl
), pd
, mem_attr
, IBTL_HCA2CLNT(hca_hdl
),
74 if (status
== IBT_SUCCESS
) {
75 mem_desc
->md_vaddr
= vaddr
;
76 atomic_inc_32(&hca_hdl
->ha_mr_cnt
);
88 * pd Protection Domain Handle.
89 * mem_bpattr Memory Registration attributes (IOVA and flags).
90 * bp A pointer to a buf(9S) struct.
92 * mr_hdl_p The returned IBT memory region handle.
93 * mem_desc Returned memory descriptor.
96 * IBT_CHAN_HDL_INVALID
99 * IBT_MR_ACCESS_REQ_INVALID
101 * IBT_INSUFF_RESOURCE
103 * Prepares a memory region described by a buf(9S) struct for use by a HCA.
104 * A description of the registered memory suitable for use in
105 * Work Requests (WRs) is returned in the ibt_mr_desc_t parameter.
108 ibt_register_buf(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
,
109 ibt_smr_attr_t
*mem_bpattr
, struct buf
*bp
, ibt_mr_hdl_t
*mr_hdl_p
,
110 ibt_mr_desc_t
*mem_desc
)
114 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_buf(%p, %p, %p, %p)",
115 hca_hdl
, pd
, mem_bpattr
, bp
);
117 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_register_buf(
118 IBTL_HCA2CIHCA(hca_hdl
), pd
, mem_bpattr
, bp
, IBTL_HCA2CLNT(hca_hdl
),
120 if (status
== IBT_SUCCESS
) {
121 atomic_inc_32(&hca_hdl
->ha_mr_cnt
);
132 * hca_hdl - HCA Handle.
133 * mr_hdl - The IBT Memory Region handle.
135 * attr - The pointer to Memory region attributes structure.
138 * IBT_CHAN_HDL_INVALID
141 * Retrieves information about a specified memory region.
144 ibt_query_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
,
145 ibt_mr_query_attr_t
*attr
)
147 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_query_mr(%p, %p)", hca_hdl
, mr_hdl
);
149 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_query_mr(
150 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
, attr
));
156 * ibt_deregister_mr()
158 * hca_hdl - HCA Handle.
159 * mr_hdl - The IBT Memory Region handle.
164 * IBT_CHAN_HDL_INVALID
168 * De-register the registered memory region. Remove a memory region from a
169 * HCA translation table, and free all resources associated with the
173 ibt_deregister_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
)
177 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_deregister_mr(%p, %p)", hca_hdl
, mr_hdl
);
179 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_deregister_mr(
180 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
);
181 if (status
== IBT_SUCCESS
) {
182 atomic_dec_32(&hca_hdl
->ha_mr_cnt
);
190 * ibt_reregister_mr()
192 * hca_hdl - HCA Handle.
193 * mr_hdl - The IBT Memory Region handle.
194 * pd - Optional Protection Domain Handle.
195 * mem_attr - Requested memory region attributes.
197 * mr_hdl_p - The reregistered IBT memory region handle.
198 * mem_desc - Returned memory descriptor for the new memory region.
201 * IBT_CHAN_HDL_INVALID
205 * IBT_MR_ACCESS_REQ_INVALID
207 * IBT_INSUFF_RESOURCE
210 * Modify the attributes of an existing memory region.
213 ibt_reregister_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
, ibt_pd_hdl_t pd
,
214 ibt_mr_attr_t
*mem_attr
, ibt_mr_hdl_t
*mr_hdl_p
, ibt_mr_desc_t
*mem_desc
)
217 ib_vaddr_t vaddr
= mem_attr
->mr_vaddr
;
219 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_reregister_mr(%p, %p, %p, %p)",
220 hca_hdl
, mr_hdl
, pd
, mem_attr
);
222 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_reregister_mr(
223 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
, pd
, mem_attr
,
224 IBTL_HCA2CLNT(hca_hdl
), mr_hdl_p
, mem_desc
);
226 if (status
== IBT_SUCCESS
)
227 mem_desc
->md_vaddr
= vaddr
;
228 else if (!(status
== IBT_MR_IN_USE
|| status
== IBT_HCA_HDL_INVALID
||
229 status
== IBT_MR_HDL_INVALID
)) {
231 IBTF_DPRINTF_L2(ibtl_mem
, "ibt_reregister_mr: "
232 "Re-registration Failed: %d", status
);
234 /* we lost one memory region resource */
235 atomic_dec_32(&hca_hdl
->ha_mr_cnt
);
244 * ibt_reregister_buf()
246 * hca_hdl HCA Handle.
247 * mr_hdl The IBT Memory Region handle.
248 * pd Optional Protection Domain Handle.
249 * mem_bpattr Memory Registration attributes (IOVA and flags).
250 * bp A pointer to a buf(9S) struct.
252 * mr_hdl_p The reregistered IBT memory region handle.
253 * mem_desc Returned memory descriptor for the new memory region.
256 * IBT_CHAN_HDL_INVALID
260 * IBT_MR_ACCESS_REQ_INVALID
262 * IBT_INSUFF_RESOURCE
265 * Modify the attributes of an existing memory region as described by a
266 * buf(9S) struct for use by a HCA. A description of the registered
267 * memory suitable for use in Work Requests (WRs) is returned in the
268 * ibt_mr_desc_t parameter.
271 ibt_reregister_buf(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
,
272 ibt_pd_hdl_t pd
, ibt_smr_attr_t
*mem_bpattr
, struct buf
*bp
,
273 ibt_mr_hdl_t
*mr_hdl_p
, ibt_mr_desc_t
*mem_desc
)
277 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_reregister_buf(%p, %p, %p, %p, %p)",
278 hca_hdl
, mr_hdl
, pd
, mem_bpattr
, bp
);
280 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_reregister_buf(
281 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
, pd
, mem_bpattr
, bp
,
282 IBTL_HCA2CLNT(hca_hdl
), mr_hdl_p
, mem_desc
);
284 if (!(status
== IBT_SUCCESS
|| status
== IBT_MR_IN_USE
||
285 status
== IBT_HCA_HDL_INVALID
|| status
== IBT_MR_HDL_INVALID
)) {
287 IBTF_DPRINTF_L2(ibtl_mem
, "ibt_reregister_buf: "
288 "Re-registration Mem Failed: %d", status
);
290 /* we lost one memory region resource */
291 atomic_dec_32(&hca_hdl
->ha_mr_cnt
);
299 * ibt_register_shared_mr()
301 * hca_hdl - HCA Handle.
302 * mr_hdl - The IBT Memory Region handle.
303 * pd - Protection Domain Handle.
304 * mem_sattr - Requested memory region shared attributes.
306 * mr_hdl_p - The reregistered IBT memory region handle.
307 * mem_desc - Returned memory descriptor for the new memory region.
310 * IBT_INSUFF_RESOURCE
311 * IBT_CHAN_HDL_INVALID
314 * IBT_MR_ACCESS_REQ_INVALID
316 * Given an existing memory region, a new memory region associated with
317 * the same physical locations is created.
320 ibt_register_shared_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
,
321 ibt_pd_hdl_t pd
, ibt_smr_attr_t
*mem_sattr
, ibt_mr_hdl_t
*mr_hdl_p
,
322 ibt_mr_desc_t
*mem_desc
)
326 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_shared_mr(%p, %p, %p, %p)",
327 hca_hdl
, mr_hdl
, pd
, mem_sattr
);
329 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_register_shared_mr(
330 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
, pd
, mem_sattr
,
331 IBTL_HCA2CLNT(hca_hdl
), mr_hdl_p
, mem_desc
);
332 if (status
== IBT_SUCCESS
) {
333 atomic_inc_32(&hca_hdl
->ha_mr_cnt
);
342 * hca_hdl - HCA Handle.
343 * mr_segments - A pointer to an array of ibt_mr_sync_t that describes
344 * the memory regions to sync.
345 * num_segments - The length of the mr_segments array.
350 * IBT_HCA_HDL_INVALID
356 * Make memory changes visible to incoming RDMA reads, or make the affects
357 * of an incoming RDMA writes visible to the consumer.
360 ibt_sync_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_sync_t
*mr_segments
,
364 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_sync_mr(%p, %p, %d)", hca_hdl
,
365 mr_segments
, num_segments
);
367 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_sync_mr(
368 IBTL_HCA2CIHCA(hca_hdl
), mr_segments
, num_segments
));
376 * hca_hdl - HCA Handle.
377 * pd - Protection Domain Handle.
378 * flags - Memory Window alloc flags.
380 * mw_hdl_p - The returned IBT Memory Window handle.
381 * rkey - The IBT R_Key handle.
384 * IBT_INSUFF_RESOURCE
385 * IBT_CHAN_HDL_INVALID
388 * Allocate a memory window from the HCA.
391 ibt_alloc_mw(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
, ibt_mw_flags_t flags
,
392 ibt_mw_hdl_t
*mw_hdl_p
, ibt_rkey_t
*rkey
)
396 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_alloc_mw(%p, %p, 0x%x)",
399 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_alloc_mw(
400 IBTL_HCA2CIHCA(hca_hdl
), pd
, flags
, mw_hdl_p
, rkey
);
403 * XXX - We should be able to allocate state and have a IBTF Memory
404 * Window Handle. Memory Windows are meant to be rebound on the fly
405 * (using a post) to make them fast. It is expected that alloc memory
406 * window will be done in a relatively static manner. But, we don't have
407 * a good reason to have local MW state at this point, so we won't.
409 if (status
== IBT_SUCCESS
) {
410 atomic_inc_32(&hca_hdl
->ha_mw_cnt
);
420 * hca_hdl - HCA Handle.
421 * mw_hdl - The IBT Memory Window handle.
423 * pd - Protection Domain Handle.
424 * rkey - The IBT R_Key handle.
427 * IBT_CHAN_HDL_INVALID
430 * Retrieves information about a specified memory region.
433 ibt_query_mw(ibt_hca_hdl_t hca_hdl
, ibt_mw_hdl_t mw_hdl
,
434 ibt_mw_query_attr_t
*mw_attr_p
)
436 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_query_mw(%p, %p)", hca_hdl
, mw_hdl
);
438 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_query_mw(
439 IBTL_HCA2CIHCA(hca_hdl
), mw_hdl
, mw_attr_p
));
447 * hca_hdl - HCA Handle
448 * mw_hdl - The IBT Memory Window handle.
453 * IBT_CHAN_HDL_INVALID
456 * De-allocate the Memory Window.
459 ibt_free_mw(ibt_hca_hdl_t hca_hdl
, ibt_mw_hdl_t mw_hdl
)
463 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_free_mw(%p, %p)", hca_hdl
, mw_hdl
);
465 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_free_mw(
466 IBTL_HCA2CIHCA(hca_hdl
), mw_hdl
);
468 if (status
== IBT_SUCCESS
) {
469 atomic_dec_32(&hca_hdl
->ha_mw_cnt
);
480 * va_attrs A pointer to an ibt_va_attr_t that describes the
481 * VA to be translated.
482 * paddr_list_len The number of entries in the 'paddr_list_p' array.
484 * paddr_list_p Array of ibt_phys_buf_t (allocated by the caller),
485 * in which the physical buffers that map the virtual
486 * buffer are returned.
487 * num_paddr_p The actual number of ibt_phys_buf_t that were
488 * returned in the 'paddr_list_p' array.
489 * ma_hdl_p Memory Area Handle.
493 * Translate a kernel virtual address range into HCA physical addresses.
494 * A set of physical addresses, that can be used with "Reserved L_Key",
495 * register physical, and "Fast Registration Work Request" operations
499 ibt_map_mem_area(ibt_hca_hdl_t hca_hdl
, ibt_va_attr_t
*va_attrs
,
500 uint_t paddr_list_len
, ibt_reg_req_t
*reg_req
, ibt_ma_hdl_t
*ma_hdl_p
)
504 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_map_mem_area(%p, %p, %d)",
505 hca_hdl
, va_attrs
, paddr_list_len
);
507 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_map_mem_area(
508 IBTL_HCA2CIHCA(hca_hdl
), va_attrs
,
509 NULL
, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
510 paddr_list_len
, reg_req
, ma_hdl_p
);
511 /* Not doing reference counting, which adversely effects performance */
519 * ibt_unmap_mem_area()
522 * ma_hdl Memory Area Handle.
528 * Un pin physical pages pinned during an ibt_map_mem_area() call.
531 ibt_unmap_mem_area(ibt_hca_hdl_t hca_hdl
, ibt_ma_hdl_t ma_hdl
)
535 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_unmap_mem_area(%p, %p)",
538 status
= (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_unmap_mem_area(
539 IBTL_HCA2CIHCA(hca_hdl
), ma_hdl
));
540 /* Not doing reference counting, which adversely effects performance */
550 * iov_attr A pointer to an ibt_iov_attr_t that describes the
551 * virtual ranges to be translated.
553 * wr A pointer to the work request where the output
554 * sgl (reserved_lkey, size, paddr) will be written.
555 * mi_hdl_p Memory IOV Handle.
559 * Translate an array of virtual address ranges into HCA physical
560 * addresses, sizes, and reserved_lkey.
563 ibt_map_mem_iov(ibt_hca_hdl_t hca_hdl
, ibt_iov_attr_t
*iov_attr
,
564 ibt_all_wr_t
*wr
, ibt_mi_hdl_t
*mi_hdl_p
)
568 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_map_mem_iov(%p, %p, %p)",
569 hca_hdl
, iov_attr
, wr
);
571 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_map_mem_iov(
572 IBTL_HCA2CIHCA(hca_hdl
), iov_attr
, wr
, mi_hdl_p
);
573 /* Not doing reference counting, which adversely effects performance */
581 * ibt_unmap_mem_iov()
584 * mi_hdl Memory IOV Handle.
590 * Un pin physical pages pinned during an ibt_map_mem_iov() call.
593 ibt_unmap_mem_iov(ibt_hca_hdl_t hca_hdl
, ibt_mi_hdl_t mi_hdl
)
597 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_unmap_mem_iov(%p, %p)",
600 status
= (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_unmap_mem_iov(
601 IBTL_HCA2CIHCA(hca_hdl
), mi_hdl
));
602 /* Not doing reference counting, which adversely effects performance */
612 * size Number of bytes to allocate
613 * mr_flag Possible values: IBT_MR_SLEEP, IBT_MR_NONCOHERENT
615 * kaddrp Contains pointer to the virtual address of the
616 * memory allocated by this call. (Set to NULL if
617 * memory allocation fails).
618 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc()
622 * IBT_INSUFF_RESOURCE
623 * IBT_HCA_HDL_INVALID
624 * IBT_MR_ACCESS_REQ_INVALID
627 * Wrapper for ddi_dma_mem_alloc()
630 ibt_alloc_io_mem(ibt_hca_hdl_t hca_hdl
, size_t size
, ibt_mr_flags_t mr_flag
,
631 caddr_t
*kaddrp
, ibt_mem_alloc_hdl_t
*mem_alloc_hdl
)
633 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_alloc_io_mem(
634 IBTL_HCA2CIHCA(hca_hdl
), size
, mr_flag
, kaddrp
,
635 (ibc_mem_alloc_hdl_t
*)mem_alloc_hdl
));
643 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc()
650 * Wrapper for ddi_dma_mem_free()
653 ibt_free_io_mem(ibt_hca_hdl_t hca_hdl
, ibt_mem_alloc_hdl_t mem_alloc_hdl
)
655 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_free_io_mem(
656 IBTL_HCA2CIHCA(hca_hdl
), (ibc_mem_alloc_hdl_t
)mem_alloc_hdl
));
664 * pd A protection domain handle.
665 * flags Access control.
666 * phys_buf_list_sz Requested size of Physical Buffer List (PBL)
667 * resources to be allocated.
669 * mr_hdl_p The returned IBT memory region handle.
670 * mem_desc_p Returned memory descriptor.
674 * Allocates physical buffer list resources for use in memory
678 ibt_alloc_lkey(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
, ibt_lkey_flags_t flags
,
679 uint_t phys_buf_list_sz
, ibt_mr_hdl_t
*mr_hdl_p
,
680 ibt_pmr_desc_t
*mem_desc_p
)
684 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_alloc_lkey(%p, %p, 0x%X, %d)",
685 hca_hdl
, pd
, flags
, phys_buf_list_sz
);
687 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_alloc_lkey(
688 IBTL_HCA2CIHCA(hca_hdl
), pd
, flags
, phys_buf_list_sz
, mr_hdl_p
,
690 if (status
== IBT_SUCCESS
) {
691 atomic_inc_32(&hca_hdl
->ha_mr_cnt
);
700 * ibt_register_phys_mr()
703 * pd A protection domain handle.
704 * mem_pattr Requested memory region physical attributes.
706 * mr_hdl_p The returned IBT memory region handle.
707 * mem_desc_p Returned memory descriptor.
711 * Prepares a physically addressed memory region for use by a HCA.
714 ibt_register_phys_mr(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
,
715 ibt_pmr_attr_t
*mem_pattr
, ibt_mr_hdl_t
*mr_hdl_p
,
716 ibt_pmr_desc_t
*mem_desc_p
)
720 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_phys_mr(%p, %p, %p)",
721 hca_hdl
, pd
, mem_pattr
);
723 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_register_physical_mr(
724 IBTL_HCA2CIHCA(hca_hdl
), pd
, mem_pattr
,
725 NULL
, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
726 mr_hdl_p
, mem_desc_p
);
727 if (status
== IBT_SUCCESS
) {
728 atomic_inc_32(&hca_hdl
->ha_mr_cnt
);
737 * ibt_reregister_phys_mr()
740 * mr_hdl The IBT memory region handle.
741 * pd A protection domain handle.
742 * mem_pattr Requested memory region physical attributes.
744 * mr_hdl_p The returned IBT memory region handle.
745 * mem_desc_p Returned memory descriptor.
749 * Prepares a physically addressed memory region for use by a HCA.
752 ibt_reregister_phys_mr(ibt_hca_hdl_t hca_hdl
, ibt_mr_hdl_t mr_hdl
,
753 ibt_pd_hdl_t pd
, ibt_pmr_attr_t
*mem_pattr
, ibt_mr_hdl_t
*mr_hdl_p
,
754 ibt_pmr_desc_t
*mem_desc_p
)
758 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_reregister_phys_mr(%p, %p, %p, %p)",
759 hca_hdl
, mr_hdl
, pd
, mem_pattr
);
761 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_reregister_physical_mr(
762 IBTL_HCA2CIHCA(hca_hdl
), mr_hdl
, pd
, mem_pattr
,
763 NULL
, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
764 mr_hdl_p
, mem_desc_p
);
766 if (!(status
== IBT_SUCCESS
|| status
== IBT_MR_IN_USE
||
767 status
== IBT_HCA_HDL_INVALID
|| status
== IBT_MR_HDL_INVALID
)) {
768 IBTF_DPRINTF_L2(ibtl_mem
, "ibt_reregister_phys_mr: "
769 "Re-registration Mem Failed: %d", status
);
771 /* we lost one memory region resource */
772 atomic_dec_32(&hca_hdl
->ha_mr_cnt
);
780 * Fast Memory Registration (FMR).
782 * ibt_create_fmr_pool
784 * ibt_create_fmr_pool() verifies that the HCA supports FMR and allocates
785 * and initializes an "FMR pool". This pool contains state specific to
786 * this registration, including the watermark setting to determine when
787 * to sync, and the total number of FMR regions available within this pool.
791 ibt_create_fmr_pool(ibt_hca_hdl_t hca_hdl
, ibt_pd_hdl_t pd
,
792 ibt_fmr_pool_attr_t
*fmr_params
, ibt_fmr_pool_hdl_t
*fmr_pool_p
)
796 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_create_fmr_pool(%p, %p, %p)",
797 hca_hdl
, pd
, fmr_params
);
799 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_create_fmr_pool(
800 IBTL_HCA2CIHCA(hca_hdl
), pd
, fmr_params
, fmr_pool_p
);
801 if (status
!= IBT_SUCCESS
) {
806 /* Update the FMR resource count */
807 atomic_inc_32(&hca_hdl
->ha_fmr_pool_cnt
);
814 * ibt_destroy_fmr_pool
815 * ibt_destroy_fmr_pool() deallocates all of the FMR regions in a specific
816 * pool. All state and information regarding the pool are destroyed and
817 * returned as free space once again. No more use of FMR regions in this
818 * pool are possible without a subsequent call to ibt_create_fmr_pool().
821 ibt_destroy_fmr_pool(ibt_hca_hdl_t hca_hdl
, ibt_fmr_pool_hdl_t fmr_pool
)
825 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_destroy_fmr_pool(%p, %p)",
828 status
= IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_destroy_fmr_pool(
829 IBTL_HCA2CIHCA(hca_hdl
), fmr_pool
);
830 if (status
!= IBT_SUCCESS
) {
831 IBTF_DPRINTF_L2(ibtl_mem
, "ibt_destroy_fmr_pool: "
832 "CI FMR Pool destroy failed (%d)", status
);
836 atomic_dec_32(&hca_hdl
->ha_fmr_pool_cnt
);
843 * ibt_flush_fmr_pool forces a flush to occur. At the client's request,
844 * any unmapped FMR regions (See 'ibt_deregister_mr())') are returned to
845 * a free state. This function allows for an asynchronous cleanup of
846 * formerly used FMR regions. Sync operation is also performed internally
847 * by HCA driver, when 'watermark' settings for the number of free FMR
848 * regions left in the "pool" is reached.
851 ibt_flush_fmr_pool(ibt_hca_hdl_t hca_hdl
, ibt_fmr_pool_hdl_t fmr_pool
)
853 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_flush_fmr_pool(%p, %p)",
856 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_flush_fmr_pool(
857 IBTL_HCA2CIHCA(hca_hdl
), fmr_pool
));
861 * ibt_register_physical_fmr
862 * ibt_register_physical_fmr() assigns a "free" entry from the FMR Pool.
863 * It first consults the "FMR cache" to see if this is a duplicate memory
864 * registration to something already in use. If not, then a free entry
865 * in the "pool" is marked used.
868 ibt_register_physical_fmr(ibt_hca_hdl_t hca_hdl
, ibt_fmr_pool_hdl_t fmr_pool
,
869 ibt_pmr_attr_t
*mem_pattr
, ibt_mr_hdl_t
*mr_hdl_p
,
870 ibt_pmr_desc_t
*mem_desc_p
)
872 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_physical_fmr(%p, %p, %p, %p)",
873 hca_hdl
, fmr_pool
, mem_pattr
, mem_desc_p
);
875 return (IBTL_HCA2CIHCAOPS_P(hca_hdl
)->ibc_register_physical_fmr(
876 IBTL_HCA2CIHCA(hca_hdl
), fmr_pool
, mem_pattr
,
877 NULL
, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
878 mr_hdl_p
, mem_desc_p
));
883 * The ibt_deregister_fmr un-maps the resources reserved from the FMR
884 * pool by ibt_register_physical_fmr(). The ibt_deregister_fmr() will
885 * mark the region as free in the FMR Pool.
888 ibt_deregister_fmr(ibt_hca_hdl_t hca
, ibt_mr_hdl_t mr_hdl
)
890 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_deregister_fmr(%p, %p)", hca
, mr_hdl
);
892 return (IBTL_HCA2CIHCAOPS_P(hca
)->ibc_deregister_fmr(
893 IBTL_HCA2CIHCA(hca
), mr_hdl
));
897 * ibt_register_dma_mr
900 ibt_register_dma_mr(ibt_hca_hdl_t hca
, ibt_pd_hdl_t pd
,
901 ibt_dmr_attr_t
*mem_attr
, ibt_mr_hdl_t
*mr_hdl_p
, ibt_mr_desc_t
*mem_desc
)
905 IBTF_DPRINTF_L3(ibtl_mem
, "ibt_register_dma_mr(%p, %p, %p)",
908 status
= IBTL_HCA2CIHCAOPS_P(hca
)->ibc_register_dma_mr(
909 IBTL_HCA2CIHCA(hca
), pd
, mem_attr
, NULL
, mr_hdl_p
, mem_desc
);
910 if (status
== IBT_SUCCESS
) {
911 atomic_inc_32(&hca
->ha_mr_cnt
);