2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_addr.h>
49 #include "core_priv.h"
51 static const char * const ib_events
[] = {
52 [IB_EVENT_CQ_ERR
] = "CQ error",
53 [IB_EVENT_QP_FATAL
] = "QP fatal error",
54 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
55 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
56 [IB_EVENT_COMM_EST
] = "communication established",
57 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
58 [IB_EVENT_PATH_MIG
] = "path migration successful",
59 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
60 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
61 [IB_EVENT_PORT_ACTIVE
] = "port active",
62 [IB_EVENT_PORT_ERR
] = "port error",
63 [IB_EVENT_LID_CHANGE
] = "LID change",
64 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
65 [IB_EVENT_SM_CHANGE
] = "SM change",
66 [IB_EVENT_SRQ_ERR
] = "SRQ error",
67 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
68 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
69 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
70 [IB_EVENT_GID_CHANGE
] = "GID changed",
73 const char *ib_event_msg(enum ib_event_type event
)
77 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
78 ib_events
[index
] : "unrecognized event";
80 EXPORT_SYMBOL(ib_event_msg
);
82 static const char * const wc_statuses
[] = {
83 [IB_WC_SUCCESS
] = "success",
84 [IB_WC_LOC_LEN_ERR
] = "local length error",
85 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
86 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
87 [IB_WC_LOC_PROT_ERR
] = "local protection error",
88 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
89 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
90 [IB_WC_BAD_RESP_ERR
] = "bad response error",
91 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
92 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
93 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
94 [IB_WC_REM_OP_ERR
] = "remote operation error",
95 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
96 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
97 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
98 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
99 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
100 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
101 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
102 [IB_WC_FATAL_ERR
] = "fatal error",
103 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
104 [IB_WC_GENERAL_ERR
] = "general error",
107 const char *ib_wc_status_msg(enum ib_wc_status status
)
109 size_t index
= status
;
111 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
112 wc_statuses
[index
] : "unrecognized status";
114 EXPORT_SYMBOL(ib_wc_status_msg
);
116 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
119 case IB_RATE_2_5_GBPS
: return 1;
120 case IB_RATE_5_GBPS
: return 2;
121 case IB_RATE_10_GBPS
: return 4;
122 case IB_RATE_20_GBPS
: return 8;
123 case IB_RATE_30_GBPS
: return 12;
124 case IB_RATE_40_GBPS
: return 16;
125 case IB_RATE_60_GBPS
: return 24;
126 case IB_RATE_80_GBPS
: return 32;
127 case IB_RATE_120_GBPS
: return 48;
131 EXPORT_SYMBOL(ib_rate_to_mult
);
133 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
136 case 1: return IB_RATE_2_5_GBPS
;
137 case 2: return IB_RATE_5_GBPS
;
138 case 4: return IB_RATE_10_GBPS
;
139 case 8: return IB_RATE_20_GBPS
;
140 case 12: return IB_RATE_30_GBPS
;
141 case 16: return IB_RATE_40_GBPS
;
142 case 24: return IB_RATE_60_GBPS
;
143 case 32: return IB_RATE_80_GBPS
;
144 case 48: return IB_RATE_120_GBPS
;
145 default: return IB_RATE_PORT_CURRENT
;
148 EXPORT_SYMBOL(mult_to_ib_rate
);
150 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
153 case IB_RATE_2_5_GBPS
: return 2500;
154 case IB_RATE_5_GBPS
: return 5000;
155 case IB_RATE_10_GBPS
: return 10000;
156 case IB_RATE_20_GBPS
: return 20000;
157 case IB_RATE_30_GBPS
: return 30000;
158 case IB_RATE_40_GBPS
: return 40000;
159 case IB_RATE_60_GBPS
: return 60000;
160 case IB_RATE_80_GBPS
: return 80000;
161 case IB_RATE_120_GBPS
: return 120000;
162 case IB_RATE_14_GBPS
: return 14062;
163 case IB_RATE_56_GBPS
: return 56250;
164 case IB_RATE_112_GBPS
: return 112500;
165 case IB_RATE_168_GBPS
: return 168750;
166 case IB_RATE_25_GBPS
: return 25781;
167 case IB_RATE_100_GBPS
: return 103125;
168 case IB_RATE_200_GBPS
: return 206250;
169 case IB_RATE_300_GBPS
: return 309375;
173 EXPORT_SYMBOL(ib_rate_to_mbps
);
175 __attribute_const__
enum rdma_transport_type
176 rdma_node_get_transport(enum rdma_node_type node_type
)
179 case RDMA_NODE_IB_CA
:
180 case RDMA_NODE_IB_SWITCH
:
181 case RDMA_NODE_IB_ROUTER
:
182 return RDMA_TRANSPORT_IB
;
184 return RDMA_TRANSPORT_IWARP
;
185 case RDMA_NODE_USNIC
:
186 return RDMA_TRANSPORT_USNIC
;
187 case RDMA_NODE_USNIC_UDP
:
188 return RDMA_TRANSPORT_USNIC_UDP
;
194 EXPORT_SYMBOL(rdma_node_get_transport
);
196 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
198 if (device
->get_link_layer
)
199 return device
->get_link_layer(device
, port_num
);
201 switch (rdma_node_get_transport(device
->node_type
)) {
202 case RDMA_TRANSPORT_IB
:
203 return IB_LINK_LAYER_INFINIBAND
;
204 case RDMA_TRANSPORT_IWARP
:
205 case RDMA_TRANSPORT_USNIC
:
206 case RDMA_TRANSPORT_USNIC_UDP
:
207 return IB_LINK_LAYER_ETHERNET
;
209 return IB_LINK_LAYER_UNSPECIFIED
;
212 EXPORT_SYMBOL(rdma_port_get_link_layer
);
214 /* Protection domains */
216 struct ib_pd
*ib_alloc_pd(struct ib_device
*device
)
220 pd
= device
->alloc_pd(device
, NULL
, NULL
);
225 atomic_set(&pd
->usecnt
, 0);
230 EXPORT_SYMBOL(ib_alloc_pd
);
232 int ib_dealloc_pd(struct ib_pd
*pd
)
234 if (atomic_read(&pd
->usecnt
))
237 return pd
->device
->dealloc_pd(pd
);
239 EXPORT_SYMBOL(ib_dealloc_pd
);
241 /* Address handles */
243 struct ib_ah
*ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
)
247 ah
= pd
->device
->create_ah(pd
, ah_attr
);
250 ah
->device
= pd
->device
;
253 atomic_inc(&pd
->usecnt
);
258 EXPORT_SYMBOL(ib_create_ah
);
260 int ib_init_ah_from_wc(struct ib_device
*device
, u8 port_num
,
261 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
262 struct ib_ah_attr
*ah_attr
)
268 memset(ah_attr
, 0, sizeof *ah_attr
);
269 if (rdma_cap_eth_ah(device
, port_num
)) {
270 if (!(wc
->wc_flags
& IB_WC_GRH
))
273 if (wc
->wc_flags
& IB_WC_WITH_SMAC
&&
274 wc
->wc_flags
& IB_WC_WITH_VLAN
) {
275 memcpy(ah_attr
->dmac
, wc
->smac
, ETH_ALEN
);
276 ah_attr
->vlan_id
= wc
->vlan_id
;
278 ret
= rdma_addr_find_dmac_by_grh(&grh
->dgid
, &grh
->sgid
,
279 ah_attr
->dmac
, &ah_attr
->vlan_id
);
284 ah_attr
->vlan_id
= 0xffff;
287 ah_attr
->dlid
= wc
->slid
;
288 ah_attr
->sl
= wc
->sl
;
289 ah_attr
->src_path_bits
= wc
->dlid_path_bits
;
290 ah_attr
->port_num
= port_num
;
292 if (wc
->wc_flags
& IB_WC_GRH
) {
293 ah_attr
->ah_flags
= IB_AH_GRH
;
294 ah_attr
->grh
.dgid
= grh
->sgid
;
296 ret
= ib_find_cached_gid(device
, &grh
->dgid
, &port_num
,
301 ah_attr
->grh
.sgid_index
= (u8
) gid_index
;
302 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
303 ah_attr
->grh
.flow_label
= flow_class
& 0xFFFFF;
304 ah_attr
->grh
.hop_limit
= 0xFF;
305 ah_attr
->grh
.traffic_class
= (flow_class
>> 20) & 0xFF;
309 EXPORT_SYMBOL(ib_init_ah_from_wc
);
311 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
312 const struct ib_grh
*grh
, u8 port_num
)
314 struct ib_ah_attr ah_attr
;
317 ret
= ib_init_ah_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
321 return ib_create_ah(pd
, &ah_attr
);
323 EXPORT_SYMBOL(ib_create_ah_from_wc
);
325 int ib_modify_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
327 return ah
->device
->modify_ah
?
328 ah
->device
->modify_ah(ah
, ah_attr
) :
331 EXPORT_SYMBOL(ib_modify_ah
);
333 int ib_query_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
335 return ah
->device
->query_ah
?
336 ah
->device
->query_ah(ah
, ah_attr
) :
339 EXPORT_SYMBOL(ib_query_ah
);
341 int ib_destroy_ah(struct ib_ah
*ah
)
347 ret
= ah
->device
->destroy_ah(ah
);
349 atomic_dec(&pd
->usecnt
);
353 EXPORT_SYMBOL(ib_destroy_ah
);
355 /* Shared receive queues */
357 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
358 struct ib_srq_init_attr
*srq_init_attr
)
362 if (!pd
->device
->create_srq
)
363 return ERR_PTR(-ENOSYS
);
365 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
368 srq
->device
= pd
->device
;
371 srq
->event_handler
= srq_init_attr
->event_handler
;
372 srq
->srq_context
= srq_init_attr
->srq_context
;
373 srq
->srq_type
= srq_init_attr
->srq_type
;
374 if (srq
->srq_type
== IB_SRQT_XRC
) {
375 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
376 srq
->ext
.xrc
.cq
= srq_init_attr
->ext
.xrc
.cq
;
377 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
378 atomic_inc(&srq
->ext
.xrc
.cq
->usecnt
);
380 atomic_inc(&pd
->usecnt
);
381 atomic_set(&srq
->usecnt
, 0);
386 EXPORT_SYMBOL(ib_create_srq
);
388 int ib_modify_srq(struct ib_srq
*srq
,
389 struct ib_srq_attr
*srq_attr
,
390 enum ib_srq_attr_mask srq_attr_mask
)
392 return srq
->device
->modify_srq
?
393 srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
, NULL
) :
396 EXPORT_SYMBOL(ib_modify_srq
);
398 int ib_query_srq(struct ib_srq
*srq
,
399 struct ib_srq_attr
*srq_attr
)
401 return srq
->device
->query_srq
?
402 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
404 EXPORT_SYMBOL(ib_query_srq
);
406 int ib_destroy_srq(struct ib_srq
*srq
)
409 enum ib_srq_type srq_type
;
410 struct ib_xrcd
*uninitialized_var(xrcd
);
411 struct ib_cq
*uninitialized_var(cq
);
414 if (atomic_read(&srq
->usecnt
))
418 srq_type
= srq
->srq_type
;
419 if (srq_type
== IB_SRQT_XRC
) {
420 xrcd
= srq
->ext
.xrc
.xrcd
;
421 cq
= srq
->ext
.xrc
.cq
;
424 ret
= srq
->device
->destroy_srq(srq
);
426 atomic_dec(&pd
->usecnt
);
427 if (srq_type
== IB_SRQT_XRC
) {
428 atomic_dec(&xrcd
->usecnt
);
429 atomic_dec(&cq
->usecnt
);
435 EXPORT_SYMBOL(ib_destroy_srq
);
439 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
441 struct ib_qp
*qp
= context
;
444 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
445 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
446 if (event
->element
.qp
->event_handler
)
447 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
448 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
451 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
453 mutex_lock(&xrcd
->tgt_qp_mutex
);
454 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
455 mutex_unlock(&xrcd
->tgt_qp_mutex
);
458 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
459 void (*event_handler
)(struct ib_event
*, void *),
465 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
467 return ERR_PTR(-ENOMEM
);
469 qp
->real_qp
= real_qp
;
470 atomic_inc(&real_qp
->usecnt
);
471 qp
->device
= real_qp
->device
;
472 qp
->event_handler
= event_handler
;
473 qp
->qp_context
= qp_context
;
474 qp
->qp_num
= real_qp
->qp_num
;
475 qp
->qp_type
= real_qp
->qp_type
;
477 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
478 list_add(&qp
->open_list
, &real_qp
->open_list
);
479 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
484 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
485 struct ib_qp_open_attr
*qp_open_attr
)
487 struct ib_qp
*qp
, *real_qp
;
489 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
490 return ERR_PTR(-EINVAL
);
492 qp
= ERR_PTR(-EINVAL
);
493 mutex_lock(&xrcd
->tgt_qp_mutex
);
494 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
495 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
496 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
497 qp_open_attr
->qp_context
);
501 mutex_unlock(&xrcd
->tgt_qp_mutex
);
504 EXPORT_SYMBOL(ib_open_qp
);
506 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
507 struct ib_qp_init_attr
*qp_init_attr
)
509 struct ib_qp
*qp
, *real_qp
;
510 struct ib_device
*device
;
512 device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
513 qp
= device
->create_qp(pd
, qp_init_attr
, NULL
);
519 qp
->qp_type
= qp_init_attr
->qp_type
;
521 atomic_set(&qp
->usecnt
, 0);
522 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
523 qp
->event_handler
= __ib_shared_qp_event_handler
;
526 qp
->send_cq
= qp
->recv_cq
= NULL
;
528 qp
->xrcd
= qp_init_attr
->xrcd
;
529 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
530 INIT_LIST_HEAD(&qp
->open_list
);
533 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
534 qp_init_attr
->qp_context
);
536 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
538 real_qp
->device
->destroy_qp(real_qp
);
540 qp
->event_handler
= qp_init_attr
->event_handler
;
541 qp
->qp_context
= qp_init_attr
->qp_context
;
542 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
546 qp
->recv_cq
= qp_init_attr
->recv_cq
;
547 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
548 qp
->srq
= qp_init_attr
->srq
;
550 atomic_inc(&qp_init_attr
->srq
->usecnt
);
554 qp
->send_cq
= qp_init_attr
->send_cq
;
557 atomic_inc(&pd
->usecnt
);
558 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
564 EXPORT_SYMBOL(ib_create_qp
);
566 static const struct {
568 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
569 enum ib_qp_attr_mask req_param_add_eth
[IB_QPT_MAX
];
570 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
571 enum ib_qp_attr_mask opt_param_add_eth
[IB_QPT_MAX
];
572 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
574 [IB_QPS_RESET
] = { .valid
= 1 },
578 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
581 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
582 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
585 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
588 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
591 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
594 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
596 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
602 [IB_QPS_RESET
] = { .valid
= 1 },
603 [IB_QPS_ERR
] = { .valid
= 1 },
607 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
610 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
613 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
616 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
619 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
622 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
624 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
631 [IB_QPT_UC
] = (IB_QP_AV
|
635 [IB_QPT_RC
] = (IB_QP_AV
|
639 IB_QP_MAX_DEST_RD_ATOMIC
|
640 IB_QP_MIN_RNR_TIMER
),
641 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
645 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
649 IB_QP_MAX_DEST_RD_ATOMIC
|
650 IB_QP_MIN_RNR_TIMER
),
652 .req_param_add_eth
= {
653 [IB_QPT_RC
] = (IB_QP_SMAC
),
654 [IB_QPT_UC
] = (IB_QP_SMAC
),
655 [IB_QPT_XRC_INI
] = (IB_QP_SMAC
),
656 [IB_QPT_XRC_TGT
] = (IB_QP_SMAC
)
659 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
661 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
664 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
667 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
670 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
673 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
675 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
678 .opt_param_add_eth
= {
679 [IB_QPT_RC
] = (IB_QP_ALT_SMAC
|
682 [IB_QPT_UC
] = (IB_QP_ALT_SMAC
|
685 [IB_QPT_XRC_INI
] = (IB_QP_ALT_SMAC
|
688 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_SMAC
|
695 [IB_QPS_RESET
] = { .valid
= 1 },
696 [IB_QPS_ERR
] = { .valid
= 1 },
700 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
701 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
702 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
706 IB_QP_MAX_QP_RD_ATOMIC
),
707 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
711 IB_QP_MAX_QP_RD_ATOMIC
),
712 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
714 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
715 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
718 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
720 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
723 IB_QP_PATH_MIG_STATE
),
724 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
727 IB_QP_MIN_RNR_TIMER
|
728 IB_QP_PATH_MIG_STATE
),
729 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
732 IB_QP_PATH_MIG_STATE
),
733 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
736 IB_QP_MIN_RNR_TIMER
|
737 IB_QP_PATH_MIG_STATE
),
738 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
740 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
746 [IB_QPS_RESET
] = { .valid
= 1 },
747 [IB_QPS_ERR
] = { .valid
= 1 },
751 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
753 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
756 IB_QP_PATH_MIG_STATE
),
757 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
760 IB_QP_PATH_MIG_STATE
|
761 IB_QP_MIN_RNR_TIMER
),
762 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
765 IB_QP_PATH_MIG_STATE
),
766 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
769 IB_QP_PATH_MIG_STATE
|
770 IB_QP_MIN_RNR_TIMER
),
771 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
773 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
780 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
781 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
782 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
783 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
784 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
785 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
786 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
791 [IB_QPS_RESET
] = { .valid
= 1 },
792 [IB_QPS_ERR
] = { .valid
= 1 },
796 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
798 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
801 IB_QP_PATH_MIG_STATE
),
802 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
805 IB_QP_MIN_RNR_TIMER
|
806 IB_QP_PATH_MIG_STATE
),
807 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
810 IB_QP_PATH_MIG_STATE
),
811 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
814 IB_QP_MIN_RNR_TIMER
|
815 IB_QP_PATH_MIG_STATE
),
816 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
818 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
825 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
827 [IB_QPT_UC
] = (IB_QP_AV
|
831 IB_QP_PATH_MIG_STATE
),
832 [IB_QPT_RC
] = (IB_QP_PORT
|
837 IB_QP_MAX_QP_RD_ATOMIC
|
838 IB_QP_MAX_DEST_RD_ATOMIC
|
842 IB_QP_MIN_RNR_TIMER
|
843 IB_QP_PATH_MIG_STATE
),
844 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
849 IB_QP_MAX_QP_RD_ATOMIC
|
853 IB_QP_PATH_MIG_STATE
),
854 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
857 IB_QP_MAX_DEST_RD_ATOMIC
|
861 IB_QP_MIN_RNR_TIMER
|
862 IB_QP_PATH_MIG_STATE
),
863 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
865 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
871 [IB_QPS_RESET
] = { .valid
= 1 },
872 [IB_QPS_ERR
] = { .valid
= 1 },
876 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
878 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
880 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
882 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
888 [IB_QPS_RESET
] = { .valid
= 1 },
889 [IB_QPS_ERR
] = { .valid
= 1 }
893 int ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
894 enum ib_qp_type type
, enum ib_qp_attr_mask mask
,
895 enum rdma_link_layer ll
)
897 enum ib_qp_attr_mask req_param
, opt_param
;
899 if (cur_state
< 0 || cur_state
> IB_QPS_ERR
||
900 next_state
< 0 || next_state
> IB_QPS_ERR
)
903 if (mask
& IB_QP_CUR_STATE
&&
904 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
905 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
908 if (!qp_state_table
[cur_state
][next_state
].valid
)
911 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
912 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
914 if (ll
== IB_LINK_LAYER_ETHERNET
) {
915 req_param
|= qp_state_table
[cur_state
][next_state
].
916 req_param_add_eth
[type
];
917 opt_param
|= qp_state_table
[cur_state
][next_state
].
918 opt_param_add_eth
[type
];
921 if ((mask
& req_param
) != req_param
)
924 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
929 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
931 int ib_resolve_eth_l2_attrs(struct ib_qp
*qp
,
932 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
937 if ((*qp_attr_mask
& IB_QP_AV
) &&
938 (rdma_cap_eth_ah(qp
->device
, qp_attr
->ah_attr
.port_num
))) {
939 ret
= ib_query_gid(qp
->device
, qp_attr
->ah_attr
.port_num
,
940 qp_attr
->ah_attr
.grh
.sgid_index
, &sgid
);
943 if (rdma_link_local_addr((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
)) {
944 rdma_get_ll_mac((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
, qp_attr
->ah_attr
.dmac
);
945 rdma_get_ll_mac((struct in6_addr
*)sgid
.raw
, qp_attr
->smac
);
946 if (!(*qp_attr_mask
& IB_QP_VID
))
947 qp_attr
->vlan_id
= rdma_get_vlan_id(&sgid
);
949 ret
= rdma_addr_find_dmac_by_grh(&sgid
, &qp_attr
->ah_attr
.grh
.dgid
,
950 qp_attr
->ah_attr
.dmac
, &qp_attr
->vlan_id
);
953 ret
= rdma_addr_find_smac_by_sgid(&sgid
, qp_attr
->smac
, NULL
);
957 *qp_attr_mask
|= IB_QP_SMAC
;
958 if (qp_attr
->vlan_id
< 0xFFFF)
959 *qp_attr_mask
|= IB_QP_VID
;
964 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs
);
967 int ib_modify_qp(struct ib_qp
*qp
,
968 struct ib_qp_attr
*qp_attr
,
973 ret
= ib_resolve_eth_l2_attrs(qp
, qp_attr
, &qp_attr_mask
);
977 return qp
->device
->modify_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, NULL
);
979 EXPORT_SYMBOL(ib_modify_qp
);
981 int ib_query_qp(struct ib_qp
*qp
,
982 struct ib_qp_attr
*qp_attr
,
984 struct ib_qp_init_attr
*qp_init_attr
)
986 return qp
->device
->query_qp
?
987 qp
->device
->query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
990 EXPORT_SYMBOL(ib_query_qp
);
992 int ib_close_qp(struct ib_qp
*qp
)
994 struct ib_qp
*real_qp
;
997 real_qp
= qp
->real_qp
;
1001 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1002 list_del(&qp
->open_list
);
1003 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1005 atomic_dec(&real_qp
->usecnt
);
1010 EXPORT_SYMBOL(ib_close_qp
);
1012 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1014 struct ib_xrcd
*xrcd
;
1015 struct ib_qp
*real_qp
;
1018 real_qp
= qp
->real_qp
;
1019 xrcd
= real_qp
->xrcd
;
1021 mutex_lock(&xrcd
->tgt_qp_mutex
);
1023 if (atomic_read(&real_qp
->usecnt
) == 0)
1024 list_del(&real_qp
->xrcd_list
);
1027 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1030 ret
= ib_destroy_qp(real_qp
);
1032 atomic_dec(&xrcd
->usecnt
);
1034 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1040 int ib_destroy_qp(struct ib_qp
*qp
)
1043 struct ib_cq
*scq
, *rcq
;
1047 if (atomic_read(&qp
->usecnt
))
1050 if (qp
->real_qp
!= qp
)
1051 return __ib_destroy_shared_qp(qp
);
1058 ret
= qp
->device
->destroy_qp(qp
);
1061 atomic_dec(&pd
->usecnt
);
1063 atomic_dec(&scq
->usecnt
);
1065 atomic_dec(&rcq
->usecnt
);
1067 atomic_dec(&srq
->usecnt
);
1072 EXPORT_SYMBOL(ib_destroy_qp
);
1074 /* Completion queues */
1076 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
1077 ib_comp_handler comp_handler
,
1078 void (*event_handler
)(struct ib_event
*, void *),
1080 const struct ib_cq_init_attr
*cq_attr
)
1084 cq
= device
->create_cq(device
, cq_attr
, NULL
, NULL
);
1087 cq
->device
= device
;
1089 cq
->comp_handler
= comp_handler
;
1090 cq
->event_handler
= event_handler
;
1091 cq
->cq_context
= cq_context
;
1092 atomic_set(&cq
->usecnt
, 0);
1097 EXPORT_SYMBOL(ib_create_cq
);
1099 int ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1101 return cq
->device
->modify_cq
?
1102 cq
->device
->modify_cq(cq
, cq_count
, cq_period
) : -ENOSYS
;
1104 EXPORT_SYMBOL(ib_modify_cq
);
1106 int ib_destroy_cq(struct ib_cq
*cq
)
1108 if (atomic_read(&cq
->usecnt
))
1111 return cq
->device
->destroy_cq(cq
);
1113 EXPORT_SYMBOL(ib_destroy_cq
);
1115 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1117 return cq
->device
->resize_cq
?
1118 cq
->device
->resize_cq(cq
, cqe
, NULL
) : -ENOSYS
;
1120 EXPORT_SYMBOL(ib_resize_cq
);
1122 /* Memory regions */
1124 struct ib_mr
*ib_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
1129 err
= ib_check_mr_access(mr_access_flags
);
1131 return ERR_PTR(err
);
1133 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
1136 mr
->device
= pd
->device
;
1139 atomic_inc(&pd
->usecnt
);
1140 atomic_set(&mr
->usecnt
, 0);
1145 EXPORT_SYMBOL(ib_get_dma_mr
);
1147 struct ib_mr
*ib_reg_phys_mr(struct ib_pd
*pd
,
1148 struct ib_phys_buf
*phys_buf_array
,
1150 int mr_access_flags
,
1156 err
= ib_check_mr_access(mr_access_flags
);
1158 return ERR_PTR(err
);
1160 if (!pd
->device
->reg_phys_mr
)
1161 return ERR_PTR(-ENOSYS
);
1163 mr
= pd
->device
->reg_phys_mr(pd
, phys_buf_array
, num_phys_buf
,
1164 mr_access_flags
, iova_start
);
1167 mr
->device
= pd
->device
;
1170 atomic_inc(&pd
->usecnt
);
1171 atomic_set(&mr
->usecnt
, 0);
1176 EXPORT_SYMBOL(ib_reg_phys_mr
);
1178 int ib_rereg_phys_mr(struct ib_mr
*mr
,
1181 struct ib_phys_buf
*phys_buf_array
,
1183 int mr_access_flags
,
1186 struct ib_pd
*old_pd
;
1189 ret
= ib_check_mr_access(mr_access_flags
);
1193 if (!mr
->device
->rereg_phys_mr
)
1196 if (atomic_read(&mr
->usecnt
))
1201 ret
= mr
->device
->rereg_phys_mr(mr
, mr_rereg_mask
, pd
,
1202 phys_buf_array
, num_phys_buf
,
1203 mr_access_flags
, iova_start
);
1205 if (!ret
&& (mr_rereg_mask
& IB_MR_REREG_PD
)) {
1206 atomic_dec(&old_pd
->usecnt
);
1207 atomic_inc(&pd
->usecnt
);
1212 EXPORT_SYMBOL(ib_rereg_phys_mr
);
1214 int ib_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
1216 return mr
->device
->query_mr
?
1217 mr
->device
->query_mr(mr
, mr_attr
) : -ENOSYS
;
1219 EXPORT_SYMBOL(ib_query_mr
);
1221 int ib_dereg_mr(struct ib_mr
*mr
)
1226 if (atomic_read(&mr
->usecnt
))
1230 ret
= mr
->device
->dereg_mr(mr
);
1232 atomic_dec(&pd
->usecnt
);
1236 EXPORT_SYMBOL(ib_dereg_mr
);
1238 struct ib_mr
*ib_create_mr(struct ib_pd
*pd
,
1239 struct ib_mr_init_attr
*mr_init_attr
)
1243 if (!pd
->device
->create_mr
)
1244 return ERR_PTR(-ENOSYS
);
1246 mr
= pd
->device
->create_mr(pd
, mr_init_attr
);
1249 mr
->device
= pd
->device
;
1252 atomic_inc(&pd
->usecnt
);
1253 atomic_set(&mr
->usecnt
, 0);
1258 EXPORT_SYMBOL(ib_create_mr
);
1260 int ib_destroy_mr(struct ib_mr
*mr
)
1265 if (atomic_read(&mr
->usecnt
))
1269 ret
= mr
->device
->destroy_mr(mr
);
1271 atomic_dec(&pd
->usecnt
);
1275 EXPORT_SYMBOL(ib_destroy_mr
);
1277 struct ib_mr
*ib_alloc_fast_reg_mr(struct ib_pd
*pd
, int max_page_list_len
)
1281 if (!pd
->device
->alloc_fast_reg_mr
)
1282 return ERR_PTR(-ENOSYS
);
1284 mr
= pd
->device
->alloc_fast_reg_mr(pd
, max_page_list_len
);
1287 mr
->device
= pd
->device
;
1290 atomic_inc(&pd
->usecnt
);
1291 atomic_set(&mr
->usecnt
, 0);
1296 EXPORT_SYMBOL(ib_alloc_fast_reg_mr
);
1298 struct ib_fast_reg_page_list
*ib_alloc_fast_reg_page_list(struct ib_device
*device
,
1299 int max_page_list_len
)
1301 struct ib_fast_reg_page_list
*page_list
;
1303 if (!device
->alloc_fast_reg_page_list
)
1304 return ERR_PTR(-ENOSYS
);
1306 page_list
= device
->alloc_fast_reg_page_list(device
, max_page_list_len
);
1308 if (!IS_ERR(page_list
)) {
1309 page_list
->device
= device
;
1310 page_list
->max_page_list_len
= max_page_list_len
;
1315 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list
);
1317 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list
*page_list
)
1319 page_list
->device
->free_fast_reg_page_list(page_list
);
1321 EXPORT_SYMBOL(ib_free_fast_reg_page_list
);
1323 /* Memory windows */
1325 struct ib_mw
*ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
)
1329 if (!pd
->device
->alloc_mw
)
1330 return ERR_PTR(-ENOSYS
);
1332 mw
= pd
->device
->alloc_mw(pd
, type
);
1334 mw
->device
= pd
->device
;
1338 atomic_inc(&pd
->usecnt
);
1343 EXPORT_SYMBOL(ib_alloc_mw
);
1345 int ib_dealloc_mw(struct ib_mw
*mw
)
1351 ret
= mw
->device
->dealloc_mw(mw
);
1353 atomic_dec(&pd
->usecnt
);
1357 EXPORT_SYMBOL(ib_dealloc_mw
);
1359 /* "Fast" memory regions */
1361 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
1362 int mr_access_flags
,
1363 struct ib_fmr_attr
*fmr_attr
)
1367 if (!pd
->device
->alloc_fmr
)
1368 return ERR_PTR(-ENOSYS
);
1370 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
1372 fmr
->device
= pd
->device
;
1374 atomic_inc(&pd
->usecnt
);
1379 EXPORT_SYMBOL(ib_alloc_fmr
);
1381 int ib_unmap_fmr(struct list_head
*fmr_list
)
1385 if (list_empty(fmr_list
))
1388 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
1389 return fmr
->device
->unmap_fmr(fmr_list
);
1391 EXPORT_SYMBOL(ib_unmap_fmr
);
1393 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
1399 ret
= fmr
->device
->dealloc_fmr(fmr
);
1401 atomic_dec(&pd
->usecnt
);
1405 EXPORT_SYMBOL(ib_dealloc_fmr
);
1407 /* Multicast groups */
1409 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1413 if (!qp
->device
->attach_mcast
)
1415 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1418 ret
= qp
->device
->attach_mcast(qp
, gid
, lid
);
1420 atomic_inc(&qp
->usecnt
);
1423 EXPORT_SYMBOL(ib_attach_mcast
);
1425 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1429 if (!qp
->device
->detach_mcast
)
1431 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1434 ret
= qp
->device
->detach_mcast(qp
, gid
, lid
);
1436 atomic_dec(&qp
->usecnt
);
1439 EXPORT_SYMBOL(ib_detach_mcast
);
1441 struct ib_xrcd
*ib_alloc_xrcd(struct ib_device
*device
)
1443 struct ib_xrcd
*xrcd
;
1445 if (!device
->alloc_xrcd
)
1446 return ERR_PTR(-ENOSYS
);
1448 xrcd
= device
->alloc_xrcd(device
, NULL
, NULL
);
1449 if (!IS_ERR(xrcd
)) {
1450 xrcd
->device
= device
;
1452 atomic_set(&xrcd
->usecnt
, 0);
1453 mutex_init(&xrcd
->tgt_qp_mutex
);
1454 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
1459 EXPORT_SYMBOL(ib_alloc_xrcd
);
1461 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1466 if (atomic_read(&xrcd
->usecnt
))
1469 while (!list_empty(&xrcd
->tgt_qp_list
)) {
1470 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
1471 ret
= ib_destroy_qp(qp
);
1476 return xrcd
->device
->dealloc_xrcd(xrcd
);
1478 EXPORT_SYMBOL(ib_dealloc_xrcd
);
1480 struct ib_flow
*ib_create_flow(struct ib_qp
*qp
,
1481 struct ib_flow_attr
*flow_attr
,
1484 struct ib_flow
*flow_id
;
1485 if (!qp
->device
->create_flow
)
1486 return ERR_PTR(-ENOSYS
);
1488 flow_id
= qp
->device
->create_flow(qp
, flow_attr
, domain
);
1489 if (!IS_ERR(flow_id
))
1490 atomic_inc(&qp
->usecnt
);
1493 EXPORT_SYMBOL(ib_create_flow
);
1495 int ib_destroy_flow(struct ib_flow
*flow_id
)
1498 struct ib_qp
*qp
= flow_id
->qp
;
1500 err
= qp
->device
->destroy_flow(flow_id
);
1502 atomic_dec(&qp
->usecnt
);
1505 EXPORT_SYMBOL(ib_destroy_flow
);
1507 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
1508 struct ib_mr_status
*mr_status
)
1510 return mr
->device
->check_mr_status
?
1511 mr
->device
->check_mr_status(mr
, check_mask
, mr_status
) : -ENOSYS
;
1513 EXPORT_SYMBOL(ib_check_mr_status
);