2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_addr.h>
52 #include "core_priv.h"
54 static const char * const ib_events
[] = {
55 [IB_EVENT_CQ_ERR
] = "CQ error",
56 [IB_EVENT_QP_FATAL
] = "QP fatal error",
57 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
58 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
59 [IB_EVENT_COMM_EST
] = "communication established",
60 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
61 [IB_EVENT_PATH_MIG
] = "path migration successful",
62 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
63 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
64 [IB_EVENT_PORT_ACTIVE
] = "port active",
65 [IB_EVENT_PORT_ERR
] = "port error",
66 [IB_EVENT_LID_CHANGE
] = "LID change",
67 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
68 [IB_EVENT_SM_CHANGE
] = "SM change",
69 [IB_EVENT_SRQ_ERR
] = "SRQ error",
70 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
71 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
72 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
73 [IB_EVENT_GID_CHANGE
] = "GID changed",
76 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
)
80 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
81 ib_events
[index
] : "unrecognized event";
83 EXPORT_SYMBOL(ib_event_msg
);
85 static const char * const wc_statuses
[] = {
86 [IB_WC_SUCCESS
] = "success",
87 [IB_WC_LOC_LEN_ERR
] = "local length error",
88 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
89 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
90 [IB_WC_LOC_PROT_ERR
] = "local protection error",
91 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
92 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
93 [IB_WC_BAD_RESP_ERR
] = "bad response error",
94 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
95 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
96 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
97 [IB_WC_REM_OP_ERR
] = "remote operation error",
98 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
99 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
100 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
101 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
102 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
103 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
104 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
105 [IB_WC_FATAL_ERR
] = "fatal error",
106 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
107 [IB_WC_GENERAL_ERR
] = "general error",
110 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
)
112 size_t index
= status
;
114 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
115 wc_statuses
[index
] : "unrecognized status";
117 EXPORT_SYMBOL(ib_wc_status_msg
);
119 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
122 case IB_RATE_2_5_GBPS
: return 1;
123 case IB_RATE_5_GBPS
: return 2;
124 case IB_RATE_10_GBPS
: return 4;
125 case IB_RATE_20_GBPS
: return 8;
126 case IB_RATE_30_GBPS
: return 12;
127 case IB_RATE_40_GBPS
: return 16;
128 case IB_RATE_60_GBPS
: return 24;
129 case IB_RATE_80_GBPS
: return 32;
130 case IB_RATE_120_GBPS
: return 48;
134 EXPORT_SYMBOL(ib_rate_to_mult
);
136 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
139 case 1: return IB_RATE_2_5_GBPS
;
140 case 2: return IB_RATE_5_GBPS
;
141 case 4: return IB_RATE_10_GBPS
;
142 case 8: return IB_RATE_20_GBPS
;
143 case 12: return IB_RATE_30_GBPS
;
144 case 16: return IB_RATE_40_GBPS
;
145 case 24: return IB_RATE_60_GBPS
;
146 case 32: return IB_RATE_80_GBPS
;
147 case 48: return IB_RATE_120_GBPS
;
148 default: return IB_RATE_PORT_CURRENT
;
151 EXPORT_SYMBOL(mult_to_ib_rate
);
153 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
156 case IB_RATE_2_5_GBPS
: return 2500;
157 case IB_RATE_5_GBPS
: return 5000;
158 case IB_RATE_10_GBPS
: return 10000;
159 case IB_RATE_20_GBPS
: return 20000;
160 case IB_RATE_30_GBPS
: return 30000;
161 case IB_RATE_40_GBPS
: return 40000;
162 case IB_RATE_60_GBPS
: return 60000;
163 case IB_RATE_80_GBPS
: return 80000;
164 case IB_RATE_120_GBPS
: return 120000;
165 case IB_RATE_14_GBPS
: return 14062;
166 case IB_RATE_56_GBPS
: return 56250;
167 case IB_RATE_112_GBPS
: return 112500;
168 case IB_RATE_168_GBPS
: return 168750;
169 case IB_RATE_25_GBPS
: return 25781;
170 case IB_RATE_100_GBPS
: return 103125;
171 case IB_RATE_200_GBPS
: return 206250;
172 case IB_RATE_300_GBPS
: return 309375;
176 EXPORT_SYMBOL(ib_rate_to_mbps
);
178 __attribute_const__
enum rdma_transport_type
179 rdma_node_get_transport(enum rdma_node_type node_type
)
182 case RDMA_NODE_IB_CA
:
183 case RDMA_NODE_IB_SWITCH
:
184 case RDMA_NODE_IB_ROUTER
:
185 return RDMA_TRANSPORT_IB
;
187 return RDMA_TRANSPORT_IWARP
;
188 case RDMA_NODE_USNIC
:
189 return RDMA_TRANSPORT_USNIC
;
190 case RDMA_NODE_USNIC_UDP
:
191 return RDMA_TRANSPORT_USNIC_UDP
;
197 EXPORT_SYMBOL(rdma_node_get_transport
);
199 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
201 if (device
->get_link_layer
)
202 return device
->get_link_layer(device
, port_num
);
204 switch (rdma_node_get_transport(device
->node_type
)) {
205 case RDMA_TRANSPORT_IB
:
206 return IB_LINK_LAYER_INFINIBAND
;
207 case RDMA_TRANSPORT_IWARP
:
208 case RDMA_TRANSPORT_USNIC
:
209 case RDMA_TRANSPORT_USNIC_UDP
:
210 return IB_LINK_LAYER_ETHERNET
;
212 return IB_LINK_LAYER_UNSPECIFIED
;
215 EXPORT_SYMBOL(rdma_port_get_link_layer
);
217 /* Protection domains */
220 * ib_alloc_pd - Allocates an unused protection domain.
221 * @device: The device on which to allocate the protection domain.
223 * A protection domain object provides an association between QPs, shared
224 * receive queues, address handles, memory regions, and memory windows.
226 * Every PD has a local_dma_lkey which can be used as the lkey value for local
229 struct ib_pd
*ib_alloc_pd(struct ib_device
*device
)
232 struct ib_device_attr devattr
;
235 rc
= ib_query_device(device
, &devattr
);
239 pd
= device
->alloc_pd(device
, NULL
, NULL
);
246 atomic_set(&pd
->usecnt
, 0);
248 if (devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
249 pd
->local_dma_lkey
= device
->local_dma_lkey
;
253 mr
= ib_get_dma_mr(pd
, IB_ACCESS_LOCAL_WRITE
);
256 return (struct ib_pd
*)mr
;
260 pd
->local_dma_lkey
= pd
->local_mr
->lkey
;
264 EXPORT_SYMBOL(ib_alloc_pd
);
267 * ib_dealloc_pd - Deallocates a protection domain.
268 * @pd: The protection domain to deallocate.
270 * It is an error to call this function while any resources in the pd still
271 * exist. The caller is responsible to synchronously destroy them and
272 * guarantee no new allocations will happen.
274 void ib_dealloc_pd(struct ib_pd
*pd
)
279 ret
= ib_dereg_mr(pd
->local_mr
);
284 /* uverbs manipulates usecnt with proper locking, while the kabi
285 requires the caller to guarantee we can't race here. */
286 WARN_ON(atomic_read(&pd
->usecnt
));
288 /* Making delalloc_pd a void return is a WIP, no driver should return
290 ret
= pd
->device
->dealloc_pd(pd
);
291 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
293 EXPORT_SYMBOL(ib_dealloc_pd
);
295 /* Address handles */
297 struct ib_ah
*ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
)
301 ah
= pd
->device
->create_ah(pd
, ah_attr
);
304 ah
->device
= pd
->device
;
307 atomic_inc(&pd
->usecnt
);
312 EXPORT_SYMBOL(ib_create_ah
);
314 struct find_gid_index_context
{
318 static bool find_gid_index(const union ib_gid
*gid
,
319 const struct ib_gid_attr
*gid_attr
,
322 struct find_gid_index_context
*ctx
=
323 (struct find_gid_index_context
*)context
;
325 if ((!!(ctx
->vlan_id
!= 0xffff) == !is_vlan_dev(gid_attr
->ndev
)) ||
326 (is_vlan_dev(gid_attr
->ndev
) &&
327 vlan_dev_vlan_id(gid_attr
->ndev
) != ctx
->vlan_id
))
333 static int get_sgid_index_from_eth(struct ib_device
*device
, u8 port_num
,
334 u16 vlan_id
, const union ib_gid
*sgid
,
337 struct find_gid_index_context context
= {.vlan_id
= vlan_id
};
339 return ib_find_gid_by_filter(device
, sgid
, port_num
, find_gid_index
,
340 &context
, gid_index
);
343 int ib_init_ah_from_wc(struct ib_device
*device
, u8 port_num
,
344 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
345 struct ib_ah_attr
*ah_attr
)
351 memset(ah_attr
, 0, sizeof *ah_attr
);
352 if (rdma_cap_eth_ah(device
, port_num
)) {
353 u16 vlan_id
= wc
->wc_flags
& IB_WC_WITH_VLAN
?
354 wc
->vlan_id
: 0xffff;
356 if (!(wc
->wc_flags
& IB_WC_GRH
))
359 if (!(wc
->wc_flags
& IB_WC_WITH_SMAC
) ||
360 !(wc
->wc_flags
& IB_WC_WITH_VLAN
)) {
361 ret
= rdma_addr_find_dmac_by_grh(&grh
->dgid
, &grh
->sgid
,
363 wc
->wc_flags
& IB_WC_WITH_VLAN
?
370 ret
= get_sgid_index_from_eth(device
, port_num
, vlan_id
,
371 &grh
->dgid
, &gid_index
);
375 if (wc
->wc_flags
& IB_WC_WITH_SMAC
)
376 memcpy(ah_attr
->dmac
, wc
->smac
, ETH_ALEN
);
379 ah_attr
->dlid
= wc
->slid
;
380 ah_attr
->sl
= wc
->sl
;
381 ah_attr
->src_path_bits
= wc
->dlid_path_bits
;
382 ah_attr
->port_num
= port_num
;
384 if (wc
->wc_flags
& IB_WC_GRH
) {
385 ah_attr
->ah_flags
= IB_AH_GRH
;
386 ah_attr
->grh
.dgid
= grh
->sgid
;
388 if (!rdma_cap_eth_ah(device
, port_num
)) {
389 ret
= ib_find_cached_gid_by_port(device
, &grh
->dgid
,
396 ah_attr
->grh
.sgid_index
= (u8
) gid_index
;
397 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
398 ah_attr
->grh
.flow_label
= flow_class
& 0xFFFFF;
399 ah_attr
->grh
.hop_limit
= 0xFF;
400 ah_attr
->grh
.traffic_class
= (flow_class
>> 20) & 0xFF;
404 EXPORT_SYMBOL(ib_init_ah_from_wc
);
406 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
407 const struct ib_grh
*grh
, u8 port_num
)
409 struct ib_ah_attr ah_attr
;
412 ret
= ib_init_ah_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
416 return ib_create_ah(pd
, &ah_attr
);
418 EXPORT_SYMBOL(ib_create_ah_from_wc
);
420 int ib_modify_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
422 return ah
->device
->modify_ah
?
423 ah
->device
->modify_ah(ah
, ah_attr
) :
426 EXPORT_SYMBOL(ib_modify_ah
);
428 int ib_query_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
430 return ah
->device
->query_ah
?
431 ah
->device
->query_ah(ah
, ah_attr
) :
434 EXPORT_SYMBOL(ib_query_ah
);
436 int ib_destroy_ah(struct ib_ah
*ah
)
442 ret
= ah
->device
->destroy_ah(ah
);
444 atomic_dec(&pd
->usecnt
);
448 EXPORT_SYMBOL(ib_destroy_ah
);
450 /* Shared receive queues */
452 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
453 struct ib_srq_init_attr
*srq_init_attr
)
457 if (!pd
->device
->create_srq
)
458 return ERR_PTR(-ENOSYS
);
460 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
463 srq
->device
= pd
->device
;
466 srq
->event_handler
= srq_init_attr
->event_handler
;
467 srq
->srq_context
= srq_init_attr
->srq_context
;
468 srq
->srq_type
= srq_init_attr
->srq_type
;
469 if (srq
->srq_type
== IB_SRQT_XRC
) {
470 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
471 srq
->ext
.xrc
.cq
= srq_init_attr
->ext
.xrc
.cq
;
472 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
473 atomic_inc(&srq
->ext
.xrc
.cq
->usecnt
);
475 atomic_inc(&pd
->usecnt
);
476 atomic_set(&srq
->usecnt
, 0);
481 EXPORT_SYMBOL(ib_create_srq
);
483 int ib_modify_srq(struct ib_srq
*srq
,
484 struct ib_srq_attr
*srq_attr
,
485 enum ib_srq_attr_mask srq_attr_mask
)
487 return srq
->device
->modify_srq
?
488 srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
, NULL
) :
491 EXPORT_SYMBOL(ib_modify_srq
);
493 int ib_query_srq(struct ib_srq
*srq
,
494 struct ib_srq_attr
*srq_attr
)
496 return srq
->device
->query_srq
?
497 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
499 EXPORT_SYMBOL(ib_query_srq
);
501 int ib_destroy_srq(struct ib_srq
*srq
)
504 enum ib_srq_type srq_type
;
505 struct ib_xrcd
*uninitialized_var(xrcd
);
506 struct ib_cq
*uninitialized_var(cq
);
509 if (atomic_read(&srq
->usecnt
))
513 srq_type
= srq
->srq_type
;
514 if (srq_type
== IB_SRQT_XRC
) {
515 xrcd
= srq
->ext
.xrc
.xrcd
;
516 cq
= srq
->ext
.xrc
.cq
;
519 ret
= srq
->device
->destroy_srq(srq
);
521 atomic_dec(&pd
->usecnt
);
522 if (srq_type
== IB_SRQT_XRC
) {
523 atomic_dec(&xrcd
->usecnt
);
524 atomic_dec(&cq
->usecnt
);
530 EXPORT_SYMBOL(ib_destroy_srq
);
534 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
536 struct ib_qp
*qp
= context
;
539 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
540 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
541 if (event
->element
.qp
->event_handler
)
542 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
543 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
546 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
548 mutex_lock(&xrcd
->tgt_qp_mutex
);
549 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
550 mutex_unlock(&xrcd
->tgt_qp_mutex
);
553 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
554 void (*event_handler
)(struct ib_event
*, void *),
560 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
562 return ERR_PTR(-ENOMEM
);
564 qp
->real_qp
= real_qp
;
565 atomic_inc(&real_qp
->usecnt
);
566 qp
->device
= real_qp
->device
;
567 qp
->event_handler
= event_handler
;
568 qp
->qp_context
= qp_context
;
569 qp
->qp_num
= real_qp
->qp_num
;
570 qp
->qp_type
= real_qp
->qp_type
;
572 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
573 list_add(&qp
->open_list
, &real_qp
->open_list
);
574 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
579 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
580 struct ib_qp_open_attr
*qp_open_attr
)
582 struct ib_qp
*qp
, *real_qp
;
584 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
585 return ERR_PTR(-EINVAL
);
587 qp
= ERR_PTR(-EINVAL
);
588 mutex_lock(&xrcd
->tgt_qp_mutex
);
589 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
590 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
591 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
592 qp_open_attr
->qp_context
);
596 mutex_unlock(&xrcd
->tgt_qp_mutex
);
599 EXPORT_SYMBOL(ib_open_qp
);
601 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
602 struct ib_qp_init_attr
*qp_init_attr
)
604 struct ib_qp
*qp
, *real_qp
;
605 struct ib_device
*device
;
607 device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
608 qp
= device
->create_qp(pd
, qp_init_attr
, NULL
);
614 qp
->qp_type
= qp_init_attr
->qp_type
;
616 atomic_set(&qp
->usecnt
, 0);
617 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
618 qp
->event_handler
= __ib_shared_qp_event_handler
;
621 qp
->send_cq
= qp
->recv_cq
= NULL
;
623 qp
->xrcd
= qp_init_attr
->xrcd
;
624 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
625 INIT_LIST_HEAD(&qp
->open_list
);
628 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
629 qp_init_attr
->qp_context
);
631 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
633 real_qp
->device
->destroy_qp(real_qp
);
635 qp
->event_handler
= qp_init_attr
->event_handler
;
636 qp
->qp_context
= qp_init_attr
->qp_context
;
637 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
641 qp
->recv_cq
= qp_init_attr
->recv_cq
;
642 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
643 qp
->srq
= qp_init_attr
->srq
;
645 atomic_inc(&qp_init_attr
->srq
->usecnt
);
649 qp
->send_cq
= qp_init_attr
->send_cq
;
652 atomic_inc(&pd
->usecnt
);
653 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
659 EXPORT_SYMBOL(ib_create_qp
);
661 static const struct {
663 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
664 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
665 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
667 [IB_QPS_RESET
] = { .valid
= 1 },
671 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
674 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
675 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
678 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
681 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
684 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
687 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
689 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
695 [IB_QPS_RESET
] = { .valid
= 1 },
696 [IB_QPS_ERR
] = { .valid
= 1 },
700 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
703 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
706 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
709 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
712 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
715 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
717 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
724 [IB_QPT_UC
] = (IB_QP_AV
|
728 [IB_QPT_RC
] = (IB_QP_AV
|
732 IB_QP_MAX_DEST_RD_ATOMIC
|
733 IB_QP_MIN_RNR_TIMER
),
734 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
738 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
742 IB_QP_MAX_DEST_RD_ATOMIC
|
743 IB_QP_MIN_RNR_TIMER
),
746 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
748 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
751 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
754 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
757 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
760 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
762 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
768 [IB_QPS_RESET
] = { .valid
= 1 },
769 [IB_QPS_ERR
] = { .valid
= 1 },
773 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
774 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
775 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
779 IB_QP_MAX_QP_RD_ATOMIC
),
780 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
784 IB_QP_MAX_QP_RD_ATOMIC
),
785 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
787 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
788 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
791 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
793 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
796 IB_QP_PATH_MIG_STATE
),
797 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
800 IB_QP_MIN_RNR_TIMER
|
801 IB_QP_PATH_MIG_STATE
),
802 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
805 IB_QP_PATH_MIG_STATE
),
806 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
809 IB_QP_MIN_RNR_TIMER
|
810 IB_QP_PATH_MIG_STATE
),
811 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
813 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
819 [IB_QPS_RESET
] = { .valid
= 1 },
820 [IB_QPS_ERR
] = { .valid
= 1 },
824 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
826 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
829 IB_QP_PATH_MIG_STATE
),
830 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
833 IB_QP_PATH_MIG_STATE
|
834 IB_QP_MIN_RNR_TIMER
),
835 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
838 IB_QP_PATH_MIG_STATE
),
839 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
842 IB_QP_PATH_MIG_STATE
|
843 IB_QP_MIN_RNR_TIMER
),
844 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
846 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
853 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
854 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
855 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
856 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
857 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
858 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
859 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
864 [IB_QPS_RESET
] = { .valid
= 1 },
865 [IB_QPS_ERR
] = { .valid
= 1 },
869 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
871 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
874 IB_QP_PATH_MIG_STATE
),
875 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
878 IB_QP_MIN_RNR_TIMER
|
879 IB_QP_PATH_MIG_STATE
),
880 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
883 IB_QP_PATH_MIG_STATE
),
884 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
887 IB_QP_MIN_RNR_TIMER
|
888 IB_QP_PATH_MIG_STATE
),
889 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
891 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
898 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
900 [IB_QPT_UC
] = (IB_QP_AV
|
904 IB_QP_PATH_MIG_STATE
),
905 [IB_QPT_RC
] = (IB_QP_PORT
|
910 IB_QP_MAX_QP_RD_ATOMIC
|
911 IB_QP_MAX_DEST_RD_ATOMIC
|
915 IB_QP_MIN_RNR_TIMER
|
916 IB_QP_PATH_MIG_STATE
),
917 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
922 IB_QP_MAX_QP_RD_ATOMIC
|
926 IB_QP_PATH_MIG_STATE
),
927 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
930 IB_QP_MAX_DEST_RD_ATOMIC
|
934 IB_QP_MIN_RNR_TIMER
|
935 IB_QP_PATH_MIG_STATE
),
936 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
938 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
944 [IB_QPS_RESET
] = { .valid
= 1 },
945 [IB_QPS_ERR
] = { .valid
= 1 },
949 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
951 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
953 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
955 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
961 [IB_QPS_RESET
] = { .valid
= 1 },
962 [IB_QPS_ERR
] = { .valid
= 1 }
966 int ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
967 enum ib_qp_type type
, enum ib_qp_attr_mask mask
,
968 enum rdma_link_layer ll
)
970 enum ib_qp_attr_mask req_param
, opt_param
;
972 if (cur_state
< 0 || cur_state
> IB_QPS_ERR
||
973 next_state
< 0 || next_state
> IB_QPS_ERR
)
976 if (mask
& IB_QP_CUR_STATE
&&
977 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
978 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
981 if (!qp_state_table
[cur_state
][next_state
].valid
)
984 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
985 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
987 if ((mask
& req_param
) != req_param
)
990 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
995 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
997 int ib_resolve_eth_dmac(struct ib_qp
*qp
,
998 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
1002 if (*qp_attr_mask
& IB_QP_AV
) {
1003 if (qp_attr
->ah_attr
.port_num
< rdma_start_port(qp
->device
) ||
1004 qp_attr
->ah_attr
.port_num
> rdma_end_port(qp
->device
))
1007 if (!rdma_cap_eth_ah(qp
->device
, qp_attr
->ah_attr
.port_num
))
1010 if (rdma_link_local_addr((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
)) {
1011 rdma_get_ll_mac((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
,
1012 qp_attr
->ah_attr
.dmac
);
1015 struct ib_gid_attr sgid_attr
;
1018 ret
= ib_query_gid(qp
->device
,
1019 qp_attr
->ah_attr
.port_num
,
1020 qp_attr
->ah_attr
.grh
.sgid_index
,
1023 if (ret
|| !sgid_attr
.ndev
) {
1029 ifindex
= sgid_attr
.ndev
->ifindex
;
1031 ret
= rdma_addr_find_dmac_by_grh(&sgid
,
1032 &qp_attr
->ah_attr
.grh
.dgid
,
1033 qp_attr
->ah_attr
.dmac
,
1036 dev_put(sgid_attr
.ndev
);
1042 EXPORT_SYMBOL(ib_resolve_eth_dmac
);
1045 int ib_modify_qp(struct ib_qp
*qp
,
1046 struct ib_qp_attr
*qp_attr
,
1051 ret
= ib_resolve_eth_dmac(qp
, qp_attr
, &qp_attr_mask
);
1055 return qp
->device
->modify_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, NULL
);
1057 EXPORT_SYMBOL(ib_modify_qp
);
1059 int ib_query_qp(struct ib_qp
*qp
,
1060 struct ib_qp_attr
*qp_attr
,
1062 struct ib_qp_init_attr
*qp_init_attr
)
1064 return qp
->device
->query_qp
?
1065 qp
->device
->query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
1068 EXPORT_SYMBOL(ib_query_qp
);
1070 int ib_close_qp(struct ib_qp
*qp
)
1072 struct ib_qp
*real_qp
;
1073 unsigned long flags
;
1075 real_qp
= qp
->real_qp
;
1079 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1080 list_del(&qp
->open_list
);
1081 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1083 atomic_dec(&real_qp
->usecnt
);
1088 EXPORT_SYMBOL(ib_close_qp
);
1090 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1092 struct ib_xrcd
*xrcd
;
1093 struct ib_qp
*real_qp
;
1096 real_qp
= qp
->real_qp
;
1097 xrcd
= real_qp
->xrcd
;
1099 mutex_lock(&xrcd
->tgt_qp_mutex
);
1101 if (atomic_read(&real_qp
->usecnt
) == 0)
1102 list_del(&real_qp
->xrcd_list
);
1105 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1108 ret
= ib_destroy_qp(real_qp
);
1110 atomic_dec(&xrcd
->usecnt
);
1112 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1118 int ib_destroy_qp(struct ib_qp
*qp
)
1121 struct ib_cq
*scq
, *rcq
;
1125 if (atomic_read(&qp
->usecnt
))
1128 if (qp
->real_qp
!= qp
)
1129 return __ib_destroy_shared_qp(qp
);
1136 ret
= qp
->device
->destroy_qp(qp
);
1139 atomic_dec(&pd
->usecnt
);
1141 atomic_dec(&scq
->usecnt
);
1143 atomic_dec(&rcq
->usecnt
);
1145 atomic_dec(&srq
->usecnt
);
1150 EXPORT_SYMBOL(ib_destroy_qp
);
1152 /* Completion queues */
1154 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
1155 ib_comp_handler comp_handler
,
1156 void (*event_handler
)(struct ib_event
*, void *),
1158 const struct ib_cq_init_attr
*cq_attr
)
1162 cq
= device
->create_cq(device
, cq_attr
, NULL
, NULL
);
1165 cq
->device
= device
;
1167 cq
->comp_handler
= comp_handler
;
1168 cq
->event_handler
= event_handler
;
1169 cq
->cq_context
= cq_context
;
1170 atomic_set(&cq
->usecnt
, 0);
1175 EXPORT_SYMBOL(ib_create_cq
);
1177 int ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1179 return cq
->device
->modify_cq
?
1180 cq
->device
->modify_cq(cq
, cq_count
, cq_period
) : -ENOSYS
;
1182 EXPORT_SYMBOL(ib_modify_cq
);
1184 int ib_destroy_cq(struct ib_cq
*cq
)
1186 if (atomic_read(&cq
->usecnt
))
1189 return cq
->device
->destroy_cq(cq
);
1191 EXPORT_SYMBOL(ib_destroy_cq
);
1193 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1195 return cq
->device
->resize_cq
?
1196 cq
->device
->resize_cq(cq
, cqe
, NULL
) : -ENOSYS
;
1198 EXPORT_SYMBOL(ib_resize_cq
);
1200 /* Memory regions */
1202 struct ib_mr
*ib_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
1207 err
= ib_check_mr_access(mr_access_flags
);
1209 return ERR_PTR(err
);
1211 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
1214 mr
->device
= pd
->device
;
1217 atomic_inc(&pd
->usecnt
);
1218 atomic_set(&mr
->usecnt
, 0);
1223 EXPORT_SYMBOL(ib_get_dma_mr
);
1225 int ib_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
1227 return mr
->device
->query_mr
?
1228 mr
->device
->query_mr(mr
, mr_attr
) : -ENOSYS
;
1230 EXPORT_SYMBOL(ib_query_mr
);
1232 int ib_dereg_mr(struct ib_mr
*mr
)
1237 if (atomic_read(&mr
->usecnt
))
1241 ret
= mr
->device
->dereg_mr(mr
);
1243 atomic_dec(&pd
->usecnt
);
1247 EXPORT_SYMBOL(ib_dereg_mr
);
1250 * ib_alloc_mr() - Allocates a memory region
1251 * @pd: protection domain associated with the region
1252 * @mr_type: memory region type
1253 * @max_num_sg: maximum sg entries available for registration.
1256 * Memory registeration page/sg lists must not exceed max_num_sg.
1257 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1258 * max_num_sg * used_page_size.
1261 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
,
1262 enum ib_mr_type mr_type
,
1267 if (!pd
->device
->alloc_mr
)
1268 return ERR_PTR(-ENOSYS
);
1270 mr
= pd
->device
->alloc_mr(pd
, mr_type
, max_num_sg
);
1272 mr
->device
= pd
->device
;
1275 atomic_inc(&pd
->usecnt
);
1276 atomic_set(&mr
->usecnt
, 0);
1281 EXPORT_SYMBOL(ib_alloc_mr
);
1283 /* Memory windows */
1285 struct ib_mw
*ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
)
1289 if (!pd
->device
->alloc_mw
)
1290 return ERR_PTR(-ENOSYS
);
1292 mw
= pd
->device
->alloc_mw(pd
, type
);
1294 mw
->device
= pd
->device
;
1298 atomic_inc(&pd
->usecnt
);
1303 EXPORT_SYMBOL(ib_alloc_mw
);
1305 int ib_dealloc_mw(struct ib_mw
*mw
)
1311 ret
= mw
->device
->dealloc_mw(mw
);
1313 atomic_dec(&pd
->usecnt
);
1317 EXPORT_SYMBOL(ib_dealloc_mw
);
1319 /* "Fast" memory regions */
1321 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
1322 int mr_access_flags
,
1323 struct ib_fmr_attr
*fmr_attr
)
1327 if (!pd
->device
->alloc_fmr
)
1328 return ERR_PTR(-ENOSYS
);
1330 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
1332 fmr
->device
= pd
->device
;
1334 atomic_inc(&pd
->usecnt
);
1339 EXPORT_SYMBOL(ib_alloc_fmr
);
1341 int ib_unmap_fmr(struct list_head
*fmr_list
)
1345 if (list_empty(fmr_list
))
1348 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
1349 return fmr
->device
->unmap_fmr(fmr_list
);
1351 EXPORT_SYMBOL(ib_unmap_fmr
);
1353 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
1359 ret
= fmr
->device
->dealloc_fmr(fmr
);
1361 atomic_dec(&pd
->usecnt
);
1365 EXPORT_SYMBOL(ib_dealloc_fmr
);
1367 /* Multicast groups */
1369 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1373 if (!qp
->device
->attach_mcast
)
1375 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1378 ret
= qp
->device
->attach_mcast(qp
, gid
, lid
);
1380 atomic_inc(&qp
->usecnt
);
1383 EXPORT_SYMBOL(ib_attach_mcast
);
1385 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1389 if (!qp
->device
->detach_mcast
)
1391 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1394 ret
= qp
->device
->detach_mcast(qp
, gid
, lid
);
1396 atomic_dec(&qp
->usecnt
);
1399 EXPORT_SYMBOL(ib_detach_mcast
);
1401 struct ib_xrcd
*ib_alloc_xrcd(struct ib_device
*device
)
1403 struct ib_xrcd
*xrcd
;
1405 if (!device
->alloc_xrcd
)
1406 return ERR_PTR(-ENOSYS
);
1408 xrcd
= device
->alloc_xrcd(device
, NULL
, NULL
);
1409 if (!IS_ERR(xrcd
)) {
1410 xrcd
->device
= device
;
1412 atomic_set(&xrcd
->usecnt
, 0);
1413 mutex_init(&xrcd
->tgt_qp_mutex
);
1414 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
1419 EXPORT_SYMBOL(ib_alloc_xrcd
);
1421 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1426 if (atomic_read(&xrcd
->usecnt
))
1429 while (!list_empty(&xrcd
->tgt_qp_list
)) {
1430 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
1431 ret
= ib_destroy_qp(qp
);
1436 return xrcd
->device
->dealloc_xrcd(xrcd
);
1438 EXPORT_SYMBOL(ib_dealloc_xrcd
);
1440 struct ib_flow
*ib_create_flow(struct ib_qp
*qp
,
1441 struct ib_flow_attr
*flow_attr
,
1444 struct ib_flow
*flow_id
;
1445 if (!qp
->device
->create_flow
)
1446 return ERR_PTR(-ENOSYS
);
1448 flow_id
= qp
->device
->create_flow(qp
, flow_attr
, domain
);
1449 if (!IS_ERR(flow_id
))
1450 atomic_inc(&qp
->usecnt
);
1453 EXPORT_SYMBOL(ib_create_flow
);
1455 int ib_destroy_flow(struct ib_flow
*flow_id
)
1458 struct ib_qp
*qp
= flow_id
->qp
;
1460 err
= qp
->device
->destroy_flow(flow_id
);
1462 atomic_dec(&qp
->usecnt
);
1465 EXPORT_SYMBOL(ib_destroy_flow
);
1467 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
1468 struct ib_mr_status
*mr_status
)
1470 return mr
->device
->check_mr_status
?
1471 mr
->device
->check_mr_status(mr
, check_mask
, mr_status
) : -ENOSYS
;
1473 EXPORT_SYMBOL(ib_check_mr_status
);
1476 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1477 * and set it the memory region.
1478 * @mr: memory region
1479 * @sg: dma mapped scatterlist
1480 * @sg_nents: number of entries in sg
1481 * @page_size: page vector desired page size
1484 * - The first sg element is allowed to have an offset.
1485 * - Each sg element must be aligned to page_size (or physically
1486 * contiguous to the previous element). In case an sg element has a
1487 * non contiguous offset, the mapping prefix will not include it.
1488 * - The last sg element is allowed to have length less than page_size.
1489 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1490 * then only max_num_sg entries will be mapped.
1492 * Returns the number of sg elements that were mapped to the memory region.
1494 * After this completes successfully, the memory region
1495 * is ready for registration.
1497 int ib_map_mr_sg(struct ib_mr
*mr
,
1498 struct scatterlist
*sg
,
1500 unsigned int page_size
)
1502 if (unlikely(!mr
->device
->map_mr_sg
))
1505 mr
->page_size
= page_size
;
1507 return mr
->device
->map_mr_sg(mr
, sg
, sg_nents
);
1509 EXPORT_SYMBOL(ib_map_mr_sg
);
1512 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1514 * @mr: memory region
1515 * @sgl: dma mapped scatterlist
1516 * @sg_nents: number of entries in sg
1517 * @set_page: driver page assignment function pointer
1519 * Core service helper for drivers to convert the largest
1520 * prefix of given sg list to a page vector. The sg list
1521 * prefix converted is the prefix that meet the requirements
1524 * Returns the number of sg elements that were assigned to
1527 int ib_sg_to_pages(struct ib_mr
*mr
,
1528 struct scatterlist
*sgl
,
1530 int (*set_page
)(struct ib_mr
*, u64
))
1532 struct scatterlist
*sg
;
1533 u64 last_end_dma_addr
= 0, last_page_addr
= 0;
1534 unsigned int last_page_off
= 0;
1535 u64 page_mask
= ~((u64
)mr
->page_size
- 1);
1538 mr
->iova
= sg_dma_address(&sgl
[0]);
1541 for_each_sg(sgl
, sg
, sg_nents
, i
) {
1542 u64 dma_addr
= sg_dma_address(sg
);
1543 unsigned int dma_len
= sg_dma_len(sg
);
1544 u64 end_dma_addr
= dma_addr
+ dma_len
;
1545 u64 page_addr
= dma_addr
& page_mask
;
1548 * For the second and later elements, check whether either the
1549 * end of element i-1 or the start of element i is not aligned
1550 * on a page boundary.
1552 if (i
&& (last_page_off
!= 0 || page_addr
!= dma_addr
)) {
1553 /* Stop mapping if there is a gap. */
1554 if (last_end_dma_addr
!= dma_addr
)
1558 * Coalesce this element with the last. If it is small
1559 * enough just update mr->length. Otherwise start
1560 * mapping from the next page.
1566 ret
= set_page(mr
, page_addr
);
1567 if (unlikely(ret
< 0))
1570 page_addr
+= mr
->page_size
;
1571 } while (page_addr
< end_dma_addr
);
1573 mr
->length
+= dma_len
;
1574 last_end_dma_addr
= end_dma_addr
;
1575 last_page_addr
= end_dma_addr
& page_mask
;
1576 last_page_off
= end_dma_addr
& ~page_mask
;
1581 EXPORT_SYMBOL(ib_sg_to_pages
);