2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
45 #include <net/route.h>
50 #include <rdma/rdma_cm.h>
51 #include <rdma/rdma_cm_ib.h>
52 #include <rdma/rdma_netlink.h>
54 #include <rdma/ib_cache.h>
55 #include <rdma/ib_cm.h>
56 #include <rdma/ib_sa.h>
57 #include <rdma/iw_cm.h>
59 MODULE_AUTHOR("Sean Hefty");
60 MODULE_DESCRIPTION("Generic RDMA CM Agent");
61 MODULE_LICENSE("Dual BSD/GPL");
63 #define CMA_CM_RESPONSE_TIMEOUT 20
64 #define CMA_MAX_CM_RETRIES 15
65 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
66 #define CMA_IBOE_PACKET_LIFETIME 18
68 static void cma_add_one(struct ib_device
*device
);
69 static void cma_remove_one(struct ib_device
*device
);
71 static struct ib_client cma_client
= {
74 .remove
= cma_remove_one
77 static struct ib_sa_client sa_client
;
78 static struct rdma_addr_client addr_client
;
79 static LIST_HEAD(dev_list
);
80 static LIST_HEAD(listen_any_list
);
81 static DEFINE_MUTEX(lock
);
82 static struct workqueue_struct
*cma_wq
;
83 static DEFINE_IDR(tcp_ps
);
84 static DEFINE_IDR(udp_ps
);
85 static DEFINE_IDR(ipoib_ps
);
86 static DEFINE_IDR(ib_ps
);
89 struct list_head list
;
90 struct ib_device
*device
;
91 struct completion comp
;
93 struct list_head id_list
;
96 struct rdma_bind_list
{
98 struct hlist_head owners
;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private
{
113 struct rdma_cm_id id
;
115 struct rdma_bind_list
*bind_list
;
116 struct hlist_node node
;
117 struct list_head list
; /* listen_any_list or cma_device.list */
118 struct list_head listen_list
; /* per device listens */
119 struct cma_device
*cma_dev
;
120 struct list_head mc_list
;
123 enum rdma_cm_state state
;
125 struct mutex qp_mutex
;
127 struct completion comp
;
129 struct mutex handler_mutex
;
133 struct ib_sa_query
*query
;
151 struct cma_multicast
{
152 struct rdma_id_private
*id_priv
;
154 struct ib_sa_multicast
*ib
;
156 struct list_head list
;
158 struct sockaddr_storage addr
;
163 struct work_struct work
;
164 struct rdma_id_private
*id
;
165 enum rdma_cm_state old_state
;
166 enum rdma_cm_state new_state
;
167 struct rdma_cm_event event
;
170 struct cma_ndev_work
{
171 struct work_struct work
;
172 struct rdma_id_private
*id
;
173 struct rdma_cm_event event
;
176 struct iboe_mcast_work
{
177 struct work_struct work
;
178 struct rdma_id_private
*id
;
179 struct cma_multicast
*mc
;
192 u8 ip_version
; /* IP version: 7:4 */
194 union cma_ip_addr src_addr
;
195 union cma_ip_addr dst_addr
;
198 #define CMA_VERSION 0x00
200 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
205 spin_lock_irqsave(&id_priv
->lock
, flags
);
206 ret
= (id_priv
->state
== comp
);
207 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
211 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
212 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
217 spin_lock_irqsave(&id_priv
->lock
, flags
);
218 if ((ret
= (id_priv
->state
== comp
)))
219 id_priv
->state
= exch
;
220 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
224 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
225 enum rdma_cm_state exch
)
228 enum rdma_cm_state old
;
230 spin_lock_irqsave(&id_priv
->lock
, flags
);
231 old
= id_priv
->state
;
232 id_priv
->state
= exch
;
233 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
237 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
239 return hdr
->ip_version
>> 4;
242 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
244 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
247 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
248 struct cma_device
*cma_dev
)
250 atomic_inc(&cma_dev
->refcount
);
251 id_priv
->cma_dev
= cma_dev
;
252 id_priv
->id
.device
= cma_dev
->device
;
253 id_priv
->id
.route
.addr
.dev_addr
.transport
=
254 rdma_node_get_transport(cma_dev
->device
->node_type
);
255 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
258 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
260 if (atomic_dec_and_test(&cma_dev
->refcount
))
261 complete(&cma_dev
->comp
);
264 static inline void release_mc(struct kref
*kref
)
266 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
268 kfree(mc
->multicast
.ib
);
272 static void cma_release_dev(struct rdma_id_private
*id_priv
)
275 list_del(&id_priv
->list
);
276 cma_deref_dev(id_priv
->cma_dev
);
277 id_priv
->cma_dev
= NULL
;
281 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
283 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
286 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
288 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
291 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
293 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
296 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
298 struct ib_sa_mcmember_rec rec
;
302 if (qkey
&& id_priv
->qkey
!= qkey
)
308 id_priv
->qkey
= qkey
;
312 switch (id_priv
->id
.ps
) {
315 id_priv
->qkey
= RDMA_UDP_QKEY
;
318 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
319 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
320 id_priv
->id
.port_num
, &rec
.mgid
,
323 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
331 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
333 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
334 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
335 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
338 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
342 if (addr
->sa_family
!= AF_IB
) {
343 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
345 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
352 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
353 struct rdma_id_private
*listen_id_priv
)
355 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
356 struct cma_device
*cma_dev
;
357 union ib_gid gid
, iboe_gid
;
360 enum rdma_link_layer dev_ll
= dev_addr
->dev_type
== ARPHRD_INFINIBAND
?
361 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
363 if (dev_ll
!= IB_LINK_LAYER_INFINIBAND
&&
364 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
368 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
371 memcpy(&gid
, dev_addr
->src_dev_addr
+
372 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
373 if (listen_id_priv
&&
374 rdma_port_get_link_layer(listen_id_priv
->id
.device
,
375 listen_id_priv
->id
.port_num
) == dev_ll
) {
376 cma_dev
= listen_id_priv
->cma_dev
;
377 port
= listen_id_priv
->id
.port_num
;
378 if (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
&&
379 rdma_port_get_link_layer(cma_dev
->device
, port
) == IB_LINK_LAYER_ETHERNET
)
380 ret
= ib_find_cached_gid(cma_dev
->device
, &iboe_gid
,
383 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
386 if (!ret
&& (port
== found_port
)) {
387 id_priv
->id
.port_num
= found_port
;
391 list_for_each_entry(cma_dev
, &dev_list
, list
) {
392 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
393 if (listen_id_priv
&&
394 listen_id_priv
->cma_dev
== cma_dev
&&
395 listen_id_priv
->id
.port_num
== port
)
397 if (rdma_port_get_link_layer(cma_dev
->device
, port
) == dev_ll
) {
398 if (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
&&
399 rdma_port_get_link_layer(cma_dev
->device
, port
) == IB_LINK_LAYER_ETHERNET
)
400 ret
= ib_find_cached_gid(cma_dev
->device
, &iboe_gid
, &found_port
, NULL
);
402 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
, &found_port
, NULL
);
404 if (!ret
&& (port
== found_port
)) {
405 id_priv
->id
.port_num
= found_port
;
414 cma_attach_to_dev(id_priv
, cma_dev
);
421 * Select the source IB device and address to reach the destination IB address.
423 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
425 struct cma_device
*cma_dev
, *cur_dev
;
426 struct sockaddr_ib
*addr
;
427 union ib_gid gid
, sgid
, *dgid
;
433 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
434 dgid
= (union ib_gid
*) &addr
->sib_addr
;
435 pkey
= ntohs(addr
->sib_pkey
);
437 list_for_each_entry(cur_dev
, &dev_list
, list
) {
438 if (rdma_node_get_transport(cur_dev
->device
->node_type
) != RDMA_TRANSPORT_IB
)
441 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
442 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
445 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
, &gid
); i
++) {
446 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
449 id_priv
->id
.port_num
= p
;
453 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
454 dgid
->global
.subnet_prefix
)) {
457 id_priv
->id
.port_num
= p
;
467 cma_attach_to_dev(id_priv
, cma_dev
);
468 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
469 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
470 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
474 static void cma_deref_id(struct rdma_id_private
*id_priv
)
476 if (atomic_dec_and_test(&id_priv
->refcount
))
477 complete(&id_priv
->comp
);
480 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
481 enum rdma_cm_state state
)
483 mutex_lock(&id_priv
->handler_mutex
);
484 if (id_priv
->state
!= state
) {
485 mutex_unlock(&id_priv
->handler_mutex
);
491 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
492 void *context
, enum rdma_port_space ps
,
493 enum ib_qp_type qp_type
)
495 struct rdma_id_private
*id_priv
;
497 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
499 return ERR_PTR(-ENOMEM
);
501 id_priv
->owner
= task_pid_nr(current
);
502 id_priv
->state
= RDMA_CM_IDLE
;
503 id_priv
->id
.context
= context
;
504 id_priv
->id
.event_handler
= event_handler
;
506 id_priv
->id
.qp_type
= qp_type
;
507 spin_lock_init(&id_priv
->lock
);
508 mutex_init(&id_priv
->qp_mutex
);
509 init_completion(&id_priv
->comp
);
510 atomic_set(&id_priv
->refcount
, 1);
511 mutex_init(&id_priv
->handler_mutex
);
512 INIT_LIST_HEAD(&id_priv
->listen_list
);
513 INIT_LIST_HEAD(&id_priv
->mc_list
);
514 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
518 EXPORT_SYMBOL(rdma_create_id
);
520 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
522 struct ib_qp_attr qp_attr
;
523 int qp_attr_mask
, ret
;
525 qp_attr
.qp_state
= IB_QPS_INIT
;
526 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
530 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
534 qp_attr
.qp_state
= IB_QPS_RTR
;
535 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
539 qp_attr
.qp_state
= IB_QPS_RTS
;
541 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
546 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
548 struct ib_qp_attr qp_attr
;
549 int qp_attr_mask
, ret
;
551 qp_attr
.qp_state
= IB_QPS_INIT
;
552 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
556 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
559 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
560 struct ib_qp_init_attr
*qp_init_attr
)
562 struct rdma_id_private
*id_priv
;
566 id_priv
= container_of(id
, struct rdma_id_private
, id
);
567 if (id
->device
!= pd
->device
)
570 qp
= ib_create_qp(pd
, qp_init_attr
);
574 if (id
->qp_type
== IB_QPT_UD
)
575 ret
= cma_init_ud_qp(id_priv
, qp
);
577 ret
= cma_init_conn_qp(id_priv
, qp
);
582 id_priv
->qp_num
= qp
->qp_num
;
583 id_priv
->srq
= (qp
->srq
!= NULL
);
589 EXPORT_SYMBOL(rdma_create_qp
);
591 void rdma_destroy_qp(struct rdma_cm_id
*id
)
593 struct rdma_id_private
*id_priv
;
595 id_priv
= container_of(id
, struct rdma_id_private
, id
);
596 mutex_lock(&id_priv
->qp_mutex
);
597 ib_destroy_qp(id_priv
->id
.qp
);
598 id_priv
->id
.qp
= NULL
;
599 mutex_unlock(&id_priv
->qp_mutex
);
601 EXPORT_SYMBOL(rdma_destroy_qp
);
603 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
604 struct rdma_conn_param
*conn_param
)
606 struct ib_qp_attr qp_attr
;
607 int qp_attr_mask
, ret
;
610 mutex_lock(&id_priv
->qp_mutex
);
611 if (!id_priv
->id
.qp
) {
616 /* Need to update QP attributes from default values. */
617 qp_attr
.qp_state
= IB_QPS_INIT
;
618 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
622 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
626 qp_attr
.qp_state
= IB_QPS_RTR
;
627 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
631 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
632 qp_attr
.ah_attr
.grh
.sgid_index
, &sgid
);
636 if (rdma_node_get_transport(id_priv
->cma_dev
->device
->node_type
)
637 == RDMA_TRANSPORT_IB
&&
638 rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
)
639 == IB_LINK_LAYER_ETHERNET
) {
640 ret
= rdma_addr_find_smac_by_sgid(&sgid
, qp_attr
.smac
, NULL
);
646 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
647 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
649 mutex_unlock(&id_priv
->qp_mutex
);
653 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
654 struct rdma_conn_param
*conn_param
)
656 struct ib_qp_attr qp_attr
;
657 int qp_attr_mask
, ret
;
659 mutex_lock(&id_priv
->qp_mutex
);
660 if (!id_priv
->id
.qp
) {
665 qp_attr
.qp_state
= IB_QPS_RTS
;
666 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
671 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
672 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
674 mutex_unlock(&id_priv
->qp_mutex
);
678 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
680 struct ib_qp_attr qp_attr
;
683 mutex_lock(&id_priv
->qp_mutex
);
684 if (!id_priv
->id
.qp
) {
689 qp_attr
.qp_state
= IB_QPS_ERR
;
690 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
692 mutex_unlock(&id_priv
->qp_mutex
);
696 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
697 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
699 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
703 if (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
) ==
704 IB_LINK_LAYER_INFINIBAND
)
705 pkey
= ib_addr_get_pkey(dev_addr
);
709 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
710 pkey
, &qp_attr
->pkey_index
);
714 qp_attr
->port_num
= id_priv
->id
.port_num
;
715 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
717 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
718 ret
= cma_set_qkey(id_priv
, 0);
722 qp_attr
->qkey
= id_priv
->qkey
;
723 *qp_attr_mask
|= IB_QP_QKEY
;
725 qp_attr
->qp_access_flags
= 0;
726 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
731 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
734 struct rdma_id_private
*id_priv
;
737 id_priv
= container_of(id
, struct rdma_id_private
, id
);
738 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
739 case RDMA_TRANSPORT_IB
:
740 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
741 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
743 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
746 if (qp_attr
->qp_state
== IB_QPS_RTR
)
747 qp_attr
->rq_psn
= id_priv
->seq_num
;
749 case RDMA_TRANSPORT_IWARP
:
750 if (!id_priv
->cm_id
.iw
) {
751 qp_attr
->qp_access_flags
= 0;
752 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
754 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
764 EXPORT_SYMBOL(rdma_init_qp_attr
);
766 static inline int cma_zero_addr(struct sockaddr
*addr
)
768 switch (addr
->sa_family
) {
770 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
772 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
774 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
780 static inline int cma_loopback_addr(struct sockaddr
*addr
)
782 switch (addr
->sa_family
) {
784 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
786 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
788 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
794 static inline int cma_any_addr(struct sockaddr
*addr
)
796 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
799 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
801 if (src
->sa_family
!= dst
->sa_family
)
804 switch (src
->sa_family
) {
806 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
807 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
809 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
810 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
812 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
813 &((struct sockaddr_ib
*) dst
)->sib_addr
);
817 static __be16
cma_port(struct sockaddr
*addr
)
819 struct sockaddr_ib
*sib
;
821 switch (addr
->sa_family
) {
823 return ((struct sockaddr_in
*) addr
)->sin_port
;
825 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
827 sib
= (struct sockaddr_ib
*) addr
;
828 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
829 be64_to_cpu(sib
->sib_sid_mask
)));
835 static inline int cma_any_port(struct sockaddr
*addr
)
837 return !cma_port(addr
);
840 static void cma_save_ib_info(struct rdma_cm_id
*id
, struct rdma_cm_id
*listen_id
,
841 struct ib_sa_path_rec
*path
)
843 struct sockaddr_ib
*listen_ib
, *ib
;
845 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
846 ib
= (struct sockaddr_ib
*) &id
->route
.addr
.src_addr
;
847 ib
->sib_family
= listen_ib
->sib_family
;
848 ib
->sib_pkey
= path
->pkey
;
849 ib
->sib_flowinfo
= path
->flow_label
;
850 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
851 ib
->sib_sid
= listen_ib
->sib_sid
;
852 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
853 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
855 ib
= (struct sockaddr_ib
*) &id
->route
.addr
.dst_addr
;
856 ib
->sib_family
= listen_ib
->sib_family
;
857 ib
->sib_pkey
= path
->pkey
;
858 ib
->sib_flowinfo
= path
->flow_label
;
859 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
862 static void cma_save_ip4_info(struct rdma_cm_id
*id
, struct rdma_cm_id
*listen_id
,
865 struct sockaddr_in
*listen4
, *ip4
;
867 listen4
= (struct sockaddr_in
*) &listen_id
->route
.addr
.src_addr
;
868 ip4
= (struct sockaddr_in
*) &id
->route
.addr
.src_addr
;
869 ip4
->sin_family
= listen4
->sin_family
;
870 ip4
->sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
;
871 ip4
->sin_port
= listen4
->sin_port
;
873 ip4
= (struct sockaddr_in
*) &id
->route
.addr
.dst_addr
;
874 ip4
->sin_family
= listen4
->sin_family
;
875 ip4
->sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
;
876 ip4
->sin_port
= hdr
->port
;
879 static void cma_save_ip6_info(struct rdma_cm_id
*id
, struct rdma_cm_id
*listen_id
,
882 struct sockaddr_in6
*listen6
, *ip6
;
884 listen6
= (struct sockaddr_in6
*) &listen_id
->route
.addr
.src_addr
;
885 ip6
= (struct sockaddr_in6
*) &id
->route
.addr
.src_addr
;
886 ip6
->sin6_family
= listen6
->sin6_family
;
887 ip6
->sin6_addr
= hdr
->dst_addr
.ip6
;
888 ip6
->sin6_port
= listen6
->sin6_port
;
890 ip6
= (struct sockaddr_in6
*) &id
->route
.addr
.dst_addr
;
891 ip6
->sin6_family
= listen6
->sin6_family
;
892 ip6
->sin6_addr
= hdr
->src_addr
.ip6
;
893 ip6
->sin6_port
= hdr
->port
;
896 static int cma_save_net_info(struct rdma_cm_id
*id
, struct rdma_cm_id
*listen_id
,
897 struct ib_cm_event
*ib_event
)
901 if ((listen_id
->route
.addr
.src_addr
.ss_family
== AF_IB
) &&
902 (ib_event
->event
== IB_CM_REQ_RECEIVED
)) {
903 cma_save_ib_info(id
, listen_id
, ib_event
->param
.req_rcvd
.primary_path
);
907 hdr
= ib_event
->private_data
;
908 if (hdr
->cma_version
!= CMA_VERSION
)
911 switch (cma_get_ip_ver(hdr
)) {
913 cma_save_ip4_info(id
, listen_id
, hdr
);
916 cma_save_ip6_info(id
, listen_id
, hdr
);
924 static inline int cma_user_data_offset(struct rdma_id_private
*id_priv
)
926 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
929 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
931 switch (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
932 case IB_LINK_LAYER_INFINIBAND
:
934 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
941 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
943 struct rdma_id_private
*dev_id_priv
;
946 * Remove from listen_any_list to prevent added devices from spawning
947 * additional listen requests.
950 list_del(&id_priv
->list
);
952 while (!list_empty(&id_priv
->listen_list
)) {
953 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
954 struct rdma_id_private
, listen_list
);
955 /* sync with device removal to avoid duplicate destruction */
956 list_del_init(&dev_id_priv
->list
);
957 list_del(&dev_id_priv
->listen_list
);
960 rdma_destroy_id(&dev_id_priv
->id
);
966 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
967 enum rdma_cm_state state
)
970 case RDMA_CM_ADDR_QUERY
:
971 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
973 case RDMA_CM_ROUTE_QUERY
:
974 cma_cancel_route(id_priv
);
977 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
978 cma_cancel_listens(id_priv
);
985 static void cma_release_port(struct rdma_id_private
*id_priv
)
987 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
993 hlist_del(&id_priv
->node
);
994 if (hlist_empty(&bind_list
->owners
)) {
995 idr_remove(bind_list
->ps
, bind_list
->port
);
1001 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1003 struct cma_multicast
*mc
;
1005 while (!list_empty(&id_priv
->mc_list
)) {
1006 mc
= container_of(id_priv
->mc_list
.next
,
1007 struct cma_multicast
, list
);
1008 list_del(&mc
->list
);
1009 switch (rdma_port_get_link_layer(id_priv
->cma_dev
->device
, id_priv
->id
.port_num
)) {
1010 case IB_LINK_LAYER_INFINIBAND
:
1011 ib_sa_free_multicast(mc
->multicast
.ib
);
1014 case IB_LINK_LAYER_ETHERNET
:
1015 kref_put(&mc
->mcref
, release_mc
);
1023 void rdma_destroy_id(struct rdma_cm_id
*id
)
1025 struct rdma_id_private
*id_priv
;
1026 enum rdma_cm_state state
;
1028 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1029 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1030 cma_cancel_operation(id_priv
, state
);
1033 * Wait for any active callback to finish. New callbacks will find
1034 * the id_priv state set to destroying and abort.
1036 mutex_lock(&id_priv
->handler_mutex
);
1037 mutex_unlock(&id_priv
->handler_mutex
);
1039 if (id_priv
->cma_dev
) {
1040 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
1041 case RDMA_TRANSPORT_IB
:
1042 if (id_priv
->cm_id
.ib
)
1043 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1045 case RDMA_TRANSPORT_IWARP
:
1046 if (id_priv
->cm_id
.iw
)
1047 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1052 cma_leave_mc_groups(id_priv
);
1053 cma_release_dev(id_priv
);
1056 cma_release_port(id_priv
);
1057 cma_deref_id(id_priv
);
1058 wait_for_completion(&id_priv
->comp
);
1060 if (id_priv
->internal_id
)
1061 cma_deref_id(id_priv
->id
.context
);
1063 kfree(id_priv
->id
.route
.path_rec
);
1066 EXPORT_SYMBOL(rdma_destroy_id
);
1068 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1072 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1076 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1080 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1086 cma_modify_qp_err(id_priv
);
1087 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1092 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1093 struct ib_cm_rep_event_param
*rep_data
,
1096 event
->param
.conn
.private_data
= private_data
;
1097 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1098 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1099 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1100 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1101 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1102 event
->param
.conn
.srq
= rep_data
->srq
;
1103 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1106 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1108 struct rdma_id_private
*id_priv
= cm_id
->context
;
1109 struct rdma_cm_event event
;
1112 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1113 cma_disable_callback(id_priv
, RDMA_CM_CONNECT
)) ||
1114 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1115 cma_disable_callback(id_priv
, RDMA_CM_DISCONNECT
)))
1118 memset(&event
, 0, sizeof event
);
1119 switch (ib_event
->event
) {
1120 case IB_CM_REQ_ERROR
:
1121 case IB_CM_REP_ERROR
:
1122 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1123 event
.status
= -ETIMEDOUT
;
1125 case IB_CM_REP_RECEIVED
:
1126 if (id_priv
->id
.qp
) {
1127 event
.status
= cma_rep_recv(id_priv
);
1128 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1129 RDMA_CM_EVENT_ESTABLISHED
;
1131 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1133 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1134 ib_event
->private_data
);
1136 case IB_CM_RTU_RECEIVED
:
1137 case IB_CM_USER_ESTABLISHED
:
1138 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1140 case IB_CM_DREQ_ERROR
:
1141 event
.status
= -ETIMEDOUT
; /* fall through */
1142 case IB_CM_DREQ_RECEIVED
:
1143 case IB_CM_DREP_RECEIVED
:
1144 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1145 RDMA_CM_DISCONNECT
))
1147 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1149 case IB_CM_TIMEWAIT_EXIT
:
1150 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1152 case IB_CM_MRA_RECEIVED
:
1155 case IB_CM_REJ_RECEIVED
:
1156 cma_modify_qp_err(id_priv
);
1157 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1158 event
.event
= RDMA_CM_EVENT_REJECTED
;
1159 event
.param
.conn
.private_data
= ib_event
->private_data
;
1160 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1163 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
1168 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1170 /* Destroy the CM ID by returning a non-zero value. */
1171 id_priv
->cm_id
.ib
= NULL
;
1172 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1173 mutex_unlock(&id_priv
->handler_mutex
);
1174 rdma_destroy_id(&id_priv
->id
);
1178 mutex_unlock(&id_priv
->handler_mutex
);
1182 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1183 struct ib_cm_event
*ib_event
)
1185 struct rdma_id_private
*id_priv
;
1186 struct rdma_cm_id
*id
;
1187 struct rdma_route
*rt
;
1190 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1191 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1195 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1196 if (cma_save_net_info(id
, listen_id
, ib_event
))
1200 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1201 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1206 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1207 if (rt
->num_paths
== 2)
1208 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1210 if (cma_any_addr(cma_src_addr(id_priv
))) {
1211 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1212 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1213 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1215 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
1219 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1221 id_priv
->state
= RDMA_CM_CONNECT
;
1225 rdma_destroy_id(id
);
1229 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1230 struct ib_cm_event
*ib_event
)
1232 struct rdma_id_private
*id_priv
;
1233 struct rdma_cm_id
*id
;
1236 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1237 listen_id
->ps
, IB_QPT_UD
);
1241 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1242 if (cma_save_net_info(id
, listen_id
, ib_event
))
1245 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1246 ret
= cma_translate_addr(cma_src_addr(id_priv
), &id
->route
.addr
.dev_addr
);
1251 id_priv
->state
= RDMA_CM_CONNECT
;
1254 rdma_destroy_id(id
);
1258 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1259 struct ib_cm_req_event_param
*req_data
,
1260 void *private_data
, int offset
)
1262 event
->param
.conn
.private_data
= private_data
+ offset
;
1263 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1264 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1265 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1266 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1267 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1268 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1269 event
->param
.conn
.srq
= req_data
->srq
;
1270 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1273 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1275 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1276 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1277 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1278 (id
->qp_type
== IB_QPT_UD
)) ||
1282 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1284 struct rdma_id_private
*listen_id
, *conn_id
;
1285 struct rdma_cm_event event
;
1288 u8 alt_smac
[ETH_ALEN
];
1290 u8
*palt_smac
= alt_smac
;
1291 int is_iboe
= ((rdma_node_get_transport(cm_id
->device
->node_type
) ==
1292 RDMA_TRANSPORT_IB
) &&
1293 (rdma_port_get_link_layer(cm_id
->device
,
1294 ib_event
->param
.req_rcvd
.port
) ==
1295 IB_LINK_LAYER_ETHERNET
));
1297 listen_id
= cm_id
->context
;
1298 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
))
1301 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1302 return -ECONNABORTED
;
1304 memset(&event
, 0, sizeof event
);
1305 offset
= cma_user_data_offset(listen_id
);
1306 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1307 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1308 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1309 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1310 event
.param
.ud
.private_data_len
=
1311 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1313 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1314 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1315 ib_event
->private_data
, offset
);
1322 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1323 ret
= cma_acquire_dev(conn_id
, listen_id
);
1327 conn_id
->cm_id
.ib
= cm_id
;
1328 cm_id
->context
= conn_id
;
1329 cm_id
->cm_handler
= cma_ib_handler
;
1332 * Protect against the user destroying conn_id from another thread
1333 * until we're done accessing it.
1335 atomic_inc(&conn_id
->refcount
);
1336 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1341 if (ib_event
->param
.req_rcvd
.primary_path
!= NULL
)
1342 rdma_addr_find_smac_by_sgid(
1343 &ib_event
->param
.req_rcvd
.primary_path
->sgid
,
1347 if (ib_event
->param
.req_rcvd
.alternate_path
!= NULL
)
1348 rdma_addr_find_smac_by_sgid(
1349 &ib_event
->param
.req_rcvd
.alternate_path
->sgid
,
1355 * Acquire mutex to prevent user executing rdma_destroy_id()
1356 * while we're accessing the cm_id.
1360 ib_update_cm_av(cm_id
, psmac
, palt_smac
);
1361 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
1362 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1363 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1364 mutex_unlock(&lock
);
1365 mutex_unlock(&conn_id
->handler_mutex
);
1366 mutex_unlock(&listen_id
->handler_mutex
);
1367 cma_deref_id(conn_id
);
1371 cma_deref_id(conn_id
);
1372 /* Destroy the CM ID by returning a non-zero value. */
1373 conn_id
->cm_id
.ib
= NULL
;
1375 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1376 mutex_unlock(&conn_id
->handler_mutex
);
1378 mutex_unlock(&listen_id
->handler_mutex
);
1380 rdma_destroy_id(&conn_id
->id
);
1384 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1386 if (addr
->sa_family
== AF_IB
)
1387 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
1389 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1391 EXPORT_SYMBOL(rdma_get_service_id
);
1393 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1394 struct ib_cm_compare_data
*compare
)
1396 struct cma_hdr
*cma_data
, *cma_mask
;
1398 struct in6_addr ip6_addr
;
1400 memset(compare
, 0, sizeof *compare
);
1401 cma_data
= (void *) compare
->data
;
1402 cma_mask
= (void *) compare
->mask
;
1404 switch (addr
->sa_family
) {
1406 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1407 cma_set_ip_ver(cma_data
, 4);
1408 cma_set_ip_ver(cma_mask
, 0xF);
1409 if (!cma_any_addr(addr
)) {
1410 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1411 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1415 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1416 cma_set_ip_ver(cma_data
, 6);
1417 cma_set_ip_ver(cma_mask
, 0xF);
1418 if (!cma_any_addr(addr
)) {
1419 cma_data
->dst_addr
.ip6
= ip6_addr
;
1420 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1421 sizeof cma_mask
->dst_addr
.ip6
);
1429 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1431 struct rdma_id_private
*id_priv
= iw_id
->context
;
1432 struct rdma_cm_event event
;
1434 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1435 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1437 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
1440 memset(&event
, 0, sizeof event
);
1441 switch (iw_event
->event
) {
1442 case IW_CM_EVENT_CLOSE
:
1443 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1445 case IW_CM_EVENT_CONNECT_REPLY
:
1446 memcpy(cma_src_addr(id_priv
), laddr
,
1447 rdma_addr_size(laddr
));
1448 memcpy(cma_dst_addr(id_priv
), raddr
,
1449 rdma_addr_size(raddr
));
1450 switch (iw_event
->status
) {
1452 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1453 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1454 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1458 event
.event
= RDMA_CM_EVENT_REJECTED
;
1461 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1464 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1468 case IW_CM_EVENT_ESTABLISHED
:
1469 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1470 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1471 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1477 event
.status
= iw_event
->status
;
1478 event
.param
.conn
.private_data
= iw_event
->private_data
;
1479 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1480 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1482 /* Destroy the CM ID by returning a non-zero value. */
1483 id_priv
->cm_id
.iw
= NULL
;
1484 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1485 mutex_unlock(&id_priv
->handler_mutex
);
1486 rdma_destroy_id(&id_priv
->id
);
1490 mutex_unlock(&id_priv
->handler_mutex
);
1494 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1495 struct iw_cm_event
*iw_event
)
1497 struct rdma_cm_id
*new_cm_id
;
1498 struct rdma_id_private
*listen_id
, *conn_id
;
1499 struct rdma_cm_event event
;
1501 struct ib_device_attr attr
;
1502 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1503 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1505 listen_id
= cm_id
->context
;
1506 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1507 return -ECONNABORTED
;
1509 /* Create a new RDMA id for the new IW CM ID */
1510 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1511 listen_id
->id
.context
,
1512 RDMA_PS_TCP
, IB_QPT_RC
);
1513 if (IS_ERR(new_cm_id
)) {
1517 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1518 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1519 conn_id
->state
= RDMA_CM_CONNECT
;
1521 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
1523 mutex_unlock(&conn_id
->handler_mutex
);
1524 rdma_destroy_id(new_cm_id
);
1528 ret
= cma_acquire_dev(conn_id
, listen_id
);
1530 mutex_unlock(&conn_id
->handler_mutex
);
1531 rdma_destroy_id(new_cm_id
);
1535 conn_id
->cm_id
.iw
= cm_id
;
1536 cm_id
->context
= conn_id
;
1537 cm_id
->cm_handler
= cma_iw_handler
;
1539 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
1540 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
1542 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1544 mutex_unlock(&conn_id
->handler_mutex
);
1545 rdma_destroy_id(new_cm_id
);
1549 memset(&event
, 0, sizeof event
);
1550 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1551 event
.param
.conn
.private_data
= iw_event
->private_data
;
1552 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1553 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1554 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1557 * Protect against the user destroying conn_id from another thread
1558 * until we're done accessing it.
1560 atomic_inc(&conn_id
->refcount
);
1561 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1563 /* User wants to destroy the CM ID */
1564 conn_id
->cm_id
.iw
= NULL
;
1565 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1566 mutex_unlock(&conn_id
->handler_mutex
);
1567 cma_deref_id(conn_id
);
1568 rdma_destroy_id(&conn_id
->id
);
1572 mutex_unlock(&conn_id
->handler_mutex
);
1573 cma_deref_id(conn_id
);
1576 mutex_unlock(&listen_id
->handler_mutex
);
1580 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1582 struct ib_cm_compare_data compare_data
;
1583 struct sockaddr
*addr
;
1584 struct ib_cm_id
*id
;
1588 id
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
, id_priv
);
1592 id_priv
->cm_id
.ib
= id
;
1594 addr
= cma_src_addr(id_priv
);
1595 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
1596 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1597 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1599 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1600 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1604 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1605 id_priv
->cm_id
.ib
= NULL
;
1611 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1614 struct iw_cm_id
*id
;
1616 id
= iw_create_cm_id(id_priv
->id
.device
,
1617 iw_conn_req_handler
,
1622 id_priv
->cm_id
.iw
= id
;
1624 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
1625 rdma_addr_size(cma_src_addr(id_priv
)));
1627 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1630 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1631 id_priv
->cm_id
.iw
= NULL
;
1637 static int cma_listen_handler(struct rdma_cm_id
*id
,
1638 struct rdma_cm_event
*event
)
1640 struct rdma_id_private
*id_priv
= id
->context
;
1642 id
->context
= id_priv
->id
.context
;
1643 id
->event_handler
= id_priv
->id
.event_handler
;
1644 return id_priv
->id
.event_handler(id
, event
);
1647 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1648 struct cma_device
*cma_dev
)
1650 struct rdma_id_private
*dev_id_priv
;
1651 struct rdma_cm_id
*id
;
1654 if (cma_family(id_priv
) == AF_IB
&&
1655 rdma_node_get_transport(cma_dev
->device
->node_type
) != RDMA_TRANSPORT_IB
)
1658 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
1659 id_priv
->id
.qp_type
);
1663 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1665 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
1666 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
1667 rdma_addr_size(cma_src_addr(id_priv
)));
1669 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1670 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1671 atomic_inc(&id_priv
->refcount
);
1672 dev_id_priv
->internal_id
= 1;
1673 dev_id_priv
->afonly
= id_priv
->afonly
;
1675 ret
= rdma_listen(id
, id_priv
->backlog
);
1677 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1678 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1681 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1683 struct cma_device
*cma_dev
;
1686 list_add_tail(&id_priv
->list
, &listen_any_list
);
1687 list_for_each_entry(cma_dev
, &dev_list
, list
)
1688 cma_listen_on_dev(id_priv
, cma_dev
);
1689 mutex_unlock(&lock
);
1692 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1694 struct rdma_id_private
*id_priv
;
1696 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1697 id_priv
->tos
= (u8
) tos
;
1699 EXPORT_SYMBOL(rdma_set_service_type
);
1701 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1704 struct cma_work
*work
= context
;
1705 struct rdma_route
*route
;
1707 route
= &work
->id
->id
.route
;
1710 route
->num_paths
= 1;
1711 *route
->path_rec
= *path_rec
;
1713 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1714 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
1715 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1716 work
->event
.status
= status
;
1719 queue_work(cma_wq
, &work
->work
);
1722 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1723 struct cma_work
*work
)
1725 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1726 struct ib_sa_path_rec path_rec
;
1727 ib_sa_comp_mask comp_mask
;
1728 struct sockaddr_in6
*sin6
;
1729 struct sockaddr_ib
*sib
;
1731 memset(&path_rec
, 0, sizeof path_rec
);
1732 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
1733 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
1734 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
1735 path_rec
.numb_path
= 1;
1736 path_rec
.reversible
= 1;
1737 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
1739 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1740 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1741 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1743 switch (cma_family(id_priv
)) {
1745 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1746 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1749 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
1750 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1751 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1754 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
1755 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
1756 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1760 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1761 id_priv
->id
.port_num
, &path_rec
,
1762 comp_mask
, timeout_ms
,
1763 GFP_KERNEL
, cma_query_handler
,
1764 work
, &id_priv
->query
);
1766 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1769 static void cma_work_handler(struct work_struct
*_work
)
1771 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1772 struct rdma_id_private
*id_priv
= work
->id
;
1775 mutex_lock(&id_priv
->handler_mutex
);
1776 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1779 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1780 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1784 mutex_unlock(&id_priv
->handler_mutex
);
1785 cma_deref_id(id_priv
);
1787 rdma_destroy_id(&id_priv
->id
);
1791 static void cma_ndev_work_handler(struct work_struct
*_work
)
1793 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1794 struct rdma_id_private
*id_priv
= work
->id
;
1797 mutex_lock(&id_priv
->handler_mutex
);
1798 if (id_priv
->state
== RDMA_CM_DESTROYING
||
1799 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
1802 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1803 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1808 mutex_unlock(&id_priv
->handler_mutex
);
1809 cma_deref_id(id_priv
);
1811 rdma_destroy_id(&id_priv
->id
);
1815 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1817 struct rdma_route
*route
= &id_priv
->id
.route
;
1818 struct cma_work
*work
;
1821 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1826 INIT_WORK(&work
->work
, cma_work_handler
);
1827 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1828 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1829 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1831 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1832 if (!route
->path_rec
) {
1837 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1843 kfree(route
->path_rec
);
1844 route
->path_rec
= NULL
;
1850 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1851 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1853 struct rdma_id_private
*id_priv
;
1856 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1857 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
1858 RDMA_CM_ROUTE_RESOLVED
))
1861 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
1863 if (!id
->route
.path_rec
) {
1868 id
->route
.num_paths
= num_paths
;
1871 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
1874 EXPORT_SYMBOL(rdma_set_ib_paths
);
1876 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1878 struct cma_work
*work
;
1880 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1885 INIT_WORK(&work
->work
, cma_work_handler
);
1886 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1887 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1888 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1889 queue_work(cma_wq
, &work
->work
);
1893 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
1896 struct net_device
*dev
;
1898 prio
= rt_tos2priority(tos
);
1899 dev
= ndev
->priv_flags
& IFF_802_1Q_VLAN
?
1900 vlan_dev_real_dev(ndev
) : ndev
;
1903 return netdev_get_prio_tc_map(dev
, prio
);
1905 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1906 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
)
1907 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
1908 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1913 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
1915 struct rdma_route
*route
= &id_priv
->id
.route
;
1916 struct rdma_addr
*addr
= &route
->addr
;
1917 struct cma_work
*work
;
1919 struct net_device
*ndev
= NULL
;
1922 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1927 INIT_WORK(&work
->work
, cma_work_handler
);
1929 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1930 if (!route
->path_rec
) {
1935 route
->num_paths
= 1;
1937 if (addr
->dev_addr
.bound_dev_if
)
1938 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
1944 route
->path_rec
->vlan_id
= rdma_vlan_dev_vlan_id(ndev
);
1945 memcpy(route
->path_rec
->dmac
, addr
->dev_addr
.dst_dev_addr
, ETH_ALEN
);
1946 memcpy(route
->path_rec
->smac
, ndev
->dev_addr
, ndev
->addr_len
);
1948 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
1949 &route
->path_rec
->sgid
);
1950 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
1951 &route
->path_rec
->dgid
);
1953 route
->path_rec
->hop_limit
= 1;
1954 route
->path_rec
->reversible
= 1;
1955 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
1956 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
1957 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, id_priv
->tos
);
1958 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
1959 route
->path_rec
->rate_selector
= IB_SA_EQ
;
1960 route
->path_rec
->rate
= iboe_get_rate(ndev
);
1962 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
1963 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
1964 if (!route
->path_rec
->mtu
) {
1969 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1970 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1971 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1972 work
->event
.status
= 0;
1974 queue_work(cma_wq
, &work
->work
);
1979 kfree(route
->path_rec
);
1980 route
->path_rec
= NULL
;
1986 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1988 struct rdma_id_private
*id_priv
;
1991 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1992 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
1995 atomic_inc(&id_priv
->refcount
);
1996 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1997 case RDMA_TRANSPORT_IB
:
1998 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
1999 case IB_LINK_LAYER_INFINIBAND
:
2000 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2002 case IB_LINK_LAYER_ETHERNET
:
2003 ret
= cma_resolve_iboe_route(id_priv
);
2009 case RDMA_TRANSPORT_IWARP
:
2010 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2021 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2022 cma_deref_id(id_priv
);
2025 EXPORT_SYMBOL(rdma_resolve_route
);
2027 static void cma_set_loopback(struct sockaddr
*addr
)
2029 switch (addr
->sa_family
) {
2031 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2034 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2038 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2044 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2046 struct cma_device
*cma_dev
, *cur_dev
;
2047 struct ib_port_attr port_attr
;
2055 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2056 if (cma_family(id_priv
) == AF_IB
&&
2057 rdma_node_get_transport(cur_dev
->device
->node_type
) != RDMA_TRANSPORT_IB
)
2063 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2064 if (!ib_query_port(cur_dev
->device
, p
, &port_attr
) &&
2065 port_attr
.state
== IB_PORT_ACTIVE
) {
2080 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
2084 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2088 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2089 (rdma_port_get_link_layer(cma_dev
->device
, p
) == IB_LINK_LAYER_INFINIBAND
) ?
2090 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2092 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2093 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2094 id_priv
->id
.port_num
= p
;
2095 cma_attach_to_dev(id_priv
, cma_dev
);
2096 cma_set_loopback(cma_src_addr(id_priv
));
2098 mutex_unlock(&lock
);
2102 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2103 struct rdma_dev_addr
*dev_addr
, void *context
)
2105 struct rdma_id_private
*id_priv
= context
;
2106 struct rdma_cm_event event
;
2108 memset(&event
, 0, sizeof event
);
2109 mutex_lock(&id_priv
->handler_mutex
);
2110 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2111 RDMA_CM_ADDR_RESOLVED
))
2114 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2115 if (!status
&& !id_priv
->cma_dev
)
2116 status
= cma_acquire_dev(id_priv
, NULL
);
2119 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2120 RDMA_CM_ADDR_BOUND
))
2122 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2123 event
.status
= status
;
2125 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2127 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2128 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2129 mutex_unlock(&id_priv
->handler_mutex
);
2130 cma_deref_id(id_priv
);
2131 rdma_destroy_id(&id_priv
->id
);
2135 mutex_unlock(&id_priv
->handler_mutex
);
2136 cma_deref_id(id_priv
);
2139 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2141 struct cma_work
*work
;
2145 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2149 if (!id_priv
->cma_dev
) {
2150 ret
= cma_bind_loopback(id_priv
);
2155 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2156 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2159 INIT_WORK(&work
->work
, cma_work_handler
);
2160 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2161 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2162 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2163 queue_work(cma_wq
, &work
->work
);
2170 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2172 struct cma_work
*work
;
2175 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2179 if (!id_priv
->cma_dev
) {
2180 ret
= cma_resolve_ib_dev(id_priv
);
2185 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2186 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2189 INIT_WORK(&work
->work
, cma_work_handler
);
2190 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2191 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2192 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2193 queue_work(cma_wq
, &work
->work
);
2200 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2201 struct sockaddr
*dst_addr
)
2203 if (!src_addr
|| !src_addr
->sa_family
) {
2204 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2205 src_addr
->sa_family
= dst_addr
->sa_family
;
2206 if (dst_addr
->sa_family
== AF_INET6
) {
2207 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
2208 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
2209 } else if (dst_addr
->sa_family
== AF_IB
) {
2210 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2211 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2214 return rdma_bind_addr(id
, src_addr
);
2217 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2218 struct sockaddr
*dst_addr
, int timeout_ms
)
2220 struct rdma_id_private
*id_priv
;
2223 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2224 if (id_priv
->state
== RDMA_CM_IDLE
) {
2225 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2230 if (cma_family(id_priv
) != dst_addr
->sa_family
)
2233 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2236 atomic_inc(&id_priv
->refcount
);
2237 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2238 if (cma_any_addr(dst_addr
)) {
2239 ret
= cma_resolve_loopback(id_priv
);
2241 if (dst_addr
->sa_family
== AF_IB
) {
2242 ret
= cma_resolve_ib_addr(id_priv
);
2244 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2245 dst_addr
, &id
->route
.addr
.dev_addr
,
2246 timeout_ms
, addr_handler
, id_priv
);
2254 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2255 cma_deref_id(id_priv
);
2258 EXPORT_SYMBOL(rdma_resolve_addr
);
2260 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2262 struct rdma_id_private
*id_priv
;
2263 unsigned long flags
;
2266 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2267 spin_lock_irqsave(&id_priv
->lock
, flags
);
2268 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2269 id_priv
->reuseaddr
= reuse
;
2274 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2277 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2279 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2281 struct rdma_id_private
*id_priv
;
2282 unsigned long flags
;
2285 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2286 spin_lock_irqsave(&id_priv
->lock
, flags
);
2287 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2288 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2289 id_priv
->afonly
= afonly
;
2294 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2297 EXPORT_SYMBOL(rdma_set_afonly
);
2299 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2300 struct rdma_id_private
*id_priv
)
2302 struct sockaddr
*addr
;
2303 struct sockaddr_ib
*sib
;
2307 addr
= cma_src_addr(id_priv
);
2308 port
= htons(bind_list
->port
);
2310 switch (addr
->sa_family
) {
2312 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2315 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2318 sib
= (struct sockaddr_ib
*) addr
;
2319 sid
= be64_to_cpu(sib
->sib_sid
);
2320 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2321 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2322 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2325 id_priv
->bind_list
= bind_list
;
2326 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2329 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
2330 unsigned short snum
)
2332 struct rdma_bind_list
*bind_list
;
2335 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2339 ret
= idr_alloc(ps
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
2344 bind_list
->port
= (unsigned short)ret
;
2345 cma_bind_port(bind_list
, id_priv
);
2349 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
2352 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2354 static unsigned int last_used_port
;
2355 int low
, high
, remaining
;
2358 inet_get_local_port_range(&init_net
, &low
, &high
);
2359 remaining
= (high
- low
) + 1;
2360 rover
= prandom_u32() % remaining
+ low
;
2362 if (last_used_port
!= rover
&&
2363 !idr_find(ps
, (unsigned short) rover
)) {
2364 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2366 * Remember previously used port number in order to avoid
2367 * re-using same port immediately after it is closed.
2370 last_used_port
= rover
;
2371 if (ret
!= -EADDRNOTAVAIL
)
2376 if ((rover
< low
) || (rover
> high
))
2380 return -EADDRNOTAVAIL
;
2384 * Check that the requested port is available. This is called when trying to
2385 * bind to a specific port, or when trying to listen on a bound port. In
2386 * the latter case, the provided id_priv may already be on the bind_list, but
2387 * we still need to check that it's okay to start listening.
2389 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2390 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2392 struct rdma_id_private
*cur_id
;
2393 struct sockaddr
*addr
, *cur_addr
;
2395 addr
= cma_src_addr(id_priv
);
2396 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
2397 if (id_priv
== cur_id
)
2400 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
2404 cur_addr
= cma_src_addr(cur_id
);
2405 if (id_priv
->afonly
&& cur_id
->afonly
&&
2406 (addr
->sa_family
!= cur_addr
->sa_family
))
2409 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
2410 return -EADDRNOTAVAIL
;
2412 if (!cma_addr_cmp(addr
, cur_addr
))
2418 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2420 struct rdma_bind_list
*bind_list
;
2421 unsigned short snum
;
2424 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
2425 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2428 bind_list
= idr_find(ps
, snum
);
2430 ret
= cma_alloc_port(ps
, id_priv
, snum
);
2432 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
2434 cma_bind_port(bind_list
, id_priv
);
2439 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
2441 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
2445 if (bind_list
->owners
.first
->next
)
2446 ret
= cma_check_port(bind_list
, id_priv
, 0);
2447 mutex_unlock(&lock
);
2451 static struct idr
*cma_select_inet_ps(struct rdma_id_private
*id_priv
)
2453 switch (id_priv
->id
.ps
) {
2467 static struct idr
*cma_select_ib_ps(struct rdma_id_private
*id_priv
)
2469 struct idr
*ps
= NULL
;
2470 struct sockaddr_ib
*sib
;
2471 u64 sid_ps
, mask
, sid
;
2473 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2474 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
2475 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
2477 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
2478 sid_ps
= RDMA_IB_IP_PS_IB
;
2480 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
2481 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
2482 sid_ps
= RDMA_IB_IP_PS_TCP
;
2484 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
2485 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
2486 sid_ps
= RDMA_IB_IP_PS_UDP
;
2491 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
2492 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
2493 be64_to_cpu(sib
->sib_sid_mask
));
2498 static int cma_get_port(struct rdma_id_private
*id_priv
)
2503 if (cma_family(id_priv
) != AF_IB
)
2504 ps
= cma_select_inet_ps(id_priv
);
2506 ps
= cma_select_ib_ps(id_priv
);
2508 return -EPROTONOSUPPORT
;
2511 if (cma_any_port(cma_src_addr(id_priv
)))
2512 ret
= cma_alloc_any_port(ps
, id_priv
);
2514 ret
= cma_use_port(ps
, id_priv
);
2515 mutex_unlock(&lock
);
2520 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2521 struct sockaddr
*addr
)
2523 #if IS_ENABLED(CONFIG_IPV6)
2524 struct sockaddr_in6
*sin6
;
2526 if (addr
->sa_family
!= AF_INET6
)
2529 sin6
= (struct sockaddr_in6
*) addr
;
2531 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
2534 if (!sin6
->sin6_scope_id
)
2537 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2542 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
2544 struct rdma_id_private
*id_priv
;
2547 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2548 if (id_priv
->state
== RDMA_CM_IDLE
) {
2549 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
2550 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
2555 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
2558 if (id_priv
->reuseaddr
) {
2559 ret
= cma_bind_listen(id_priv
);
2564 id_priv
->backlog
= backlog
;
2566 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2567 case RDMA_TRANSPORT_IB
:
2568 ret
= cma_ib_listen(id_priv
);
2572 case RDMA_TRANSPORT_IWARP
:
2573 ret
= cma_iw_listen(id_priv
, backlog
);
2582 cma_listen_on_all(id_priv
);
2586 id_priv
->backlog
= 0;
2587 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
2590 EXPORT_SYMBOL(rdma_listen
);
2592 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2594 struct rdma_id_private
*id_priv
;
2597 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
2598 addr
->sa_family
!= AF_IB
)
2599 return -EAFNOSUPPORT
;
2601 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2602 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
2605 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2609 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
2610 if (!cma_any_addr(addr
)) {
2611 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
2615 ret
= cma_acquire_dev(id_priv
, NULL
);
2620 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
2621 if (addr
->sa_family
== AF_INET
)
2622 id_priv
->afonly
= 1;
2623 #if IS_ENABLED(CONFIG_IPV6)
2624 else if (addr
->sa_family
== AF_INET6
)
2625 id_priv
->afonly
= init_net
.ipv6
.sysctl
.bindv6only
;
2628 ret
= cma_get_port(id_priv
);
2634 if (id_priv
->cma_dev
)
2635 cma_release_dev(id_priv
);
2637 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
2640 EXPORT_SYMBOL(rdma_bind_addr
);
2642 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
2644 struct cma_hdr
*cma_hdr
;
2647 cma_hdr
->cma_version
= CMA_VERSION
;
2648 if (cma_family(id_priv
) == AF_INET
) {
2649 struct sockaddr_in
*src4
, *dst4
;
2651 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
2652 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
2654 cma_set_ip_ver(cma_hdr
, 4);
2655 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2656 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2657 cma_hdr
->port
= src4
->sin_port
;
2658 } else if (cma_family(id_priv
) == AF_INET6
) {
2659 struct sockaddr_in6
*src6
, *dst6
;
2661 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2662 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
2664 cma_set_ip_ver(cma_hdr
, 6);
2665 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2666 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2667 cma_hdr
->port
= src6
->sin6_port
;
2672 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2673 struct ib_cm_event
*ib_event
)
2675 struct rdma_id_private
*id_priv
= cm_id
->context
;
2676 struct rdma_cm_event event
;
2677 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2680 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
2683 memset(&event
, 0, sizeof event
);
2684 switch (ib_event
->event
) {
2685 case IB_CM_SIDR_REQ_ERROR
:
2686 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2687 event
.status
= -ETIMEDOUT
;
2689 case IB_CM_SIDR_REP_RECEIVED
:
2690 event
.param
.ud
.private_data
= ib_event
->private_data
;
2691 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2692 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2693 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2694 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2697 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
2699 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2703 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2704 id_priv
->id
.route
.path_rec
,
2705 &event
.param
.ud
.ah_attr
);
2706 event
.param
.ud
.qp_num
= rep
->qpn
;
2707 event
.param
.ud
.qkey
= rep
->qkey
;
2708 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2712 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2717 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2719 /* Destroy the CM ID by returning a non-zero value. */
2720 id_priv
->cm_id
.ib
= NULL
;
2721 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2722 mutex_unlock(&id_priv
->handler_mutex
);
2723 rdma_destroy_id(&id_priv
->id
);
2727 mutex_unlock(&id_priv
->handler_mutex
);
2731 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2732 struct rdma_conn_param
*conn_param
)
2734 struct ib_cm_sidr_req_param req
;
2735 struct ib_cm_id
*id
;
2739 memset(&req
, 0, sizeof req
);
2740 offset
= cma_user_data_offset(id_priv
);
2741 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2742 if (req
.private_data_len
< conn_param
->private_data_len
)
2745 if (req
.private_data_len
) {
2746 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2750 private_data
= NULL
;
2753 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2754 memcpy(private_data
+ offset
, conn_param
->private_data
,
2755 conn_param
->private_data_len
);
2758 ret
= cma_format_hdr(private_data
, id_priv
);
2761 req
.private_data
= private_data
;
2764 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
2770 id_priv
->cm_id
.ib
= id
;
2772 req
.path
= id_priv
->id
.route
.path_rec
;
2773 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
2774 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2775 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2777 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2779 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2780 id_priv
->cm_id
.ib
= NULL
;
2783 kfree(private_data
);
2787 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2788 struct rdma_conn_param
*conn_param
)
2790 struct ib_cm_req_param req
;
2791 struct rdma_route
*route
;
2793 struct ib_cm_id
*id
;
2796 memset(&req
, 0, sizeof req
);
2797 offset
= cma_user_data_offset(id_priv
);
2798 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2799 if (req
.private_data_len
< conn_param
->private_data_len
)
2802 if (req
.private_data_len
) {
2803 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2807 private_data
= NULL
;
2810 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2811 memcpy(private_data
+ offset
, conn_param
->private_data
,
2812 conn_param
->private_data_len
);
2814 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
2819 id_priv
->cm_id
.ib
= id
;
2821 route
= &id_priv
->id
.route
;
2823 ret
= cma_format_hdr(private_data
, id_priv
);
2826 req
.private_data
= private_data
;
2829 req
.primary_path
= &route
->path_rec
[0];
2830 if (route
->num_paths
== 2)
2831 req
.alternate_path
= &route
->path_rec
[1];
2833 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
2834 req
.qp_num
= id_priv
->qp_num
;
2835 req
.qp_type
= id_priv
->id
.qp_type
;
2836 req
.starting_psn
= id_priv
->seq_num
;
2837 req
.responder_resources
= conn_param
->responder_resources
;
2838 req
.initiator_depth
= conn_param
->initiator_depth
;
2839 req
.flow_control
= conn_param
->flow_control
;
2840 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
2841 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
2842 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2843 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2844 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2845 req
.srq
= id_priv
->srq
? 1 : 0;
2847 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2849 if (ret
&& !IS_ERR(id
)) {
2850 ib_destroy_cm_id(id
);
2851 id_priv
->cm_id
.ib
= NULL
;
2854 kfree(private_data
);
2858 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2859 struct rdma_conn_param
*conn_param
)
2861 struct iw_cm_id
*cm_id
;
2863 struct iw_cm_conn_param iw_param
;
2865 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2867 return PTR_ERR(cm_id
);
2869 id_priv
->cm_id
.iw
= cm_id
;
2871 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
2872 rdma_addr_size(cma_src_addr(id_priv
)));
2873 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
2874 rdma_addr_size(cma_dst_addr(id_priv
)));
2876 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2881 iw_param
.ord
= conn_param
->initiator_depth
;
2882 iw_param
.ird
= conn_param
->responder_resources
;
2883 iw_param
.private_data
= conn_param
->private_data
;
2884 iw_param
.private_data_len
= conn_param
->private_data_len
;
2885 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
2887 memset(&iw_param
, 0, sizeof iw_param
);
2888 iw_param
.qpn
= id_priv
->qp_num
;
2890 ret
= iw_cm_connect(cm_id
, &iw_param
);
2893 iw_destroy_cm_id(cm_id
);
2894 id_priv
->cm_id
.iw
= NULL
;
2899 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2901 struct rdma_id_private
*id_priv
;
2904 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2905 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
2909 id_priv
->qp_num
= conn_param
->qp_num
;
2910 id_priv
->srq
= conn_param
->srq
;
2913 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2914 case RDMA_TRANSPORT_IB
:
2915 if (id
->qp_type
== IB_QPT_UD
)
2916 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2918 ret
= cma_connect_ib(id_priv
, conn_param
);
2920 case RDMA_TRANSPORT_IWARP
:
2921 ret
= cma_connect_iw(id_priv
, conn_param
);
2932 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
2935 EXPORT_SYMBOL(rdma_connect
);
2937 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2938 struct rdma_conn_param
*conn_param
)
2940 struct ib_cm_rep_param rep
;
2943 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2947 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2951 memset(&rep
, 0, sizeof rep
);
2952 rep
.qp_num
= id_priv
->qp_num
;
2953 rep
.starting_psn
= id_priv
->seq_num
;
2954 rep
.private_data
= conn_param
->private_data
;
2955 rep
.private_data_len
= conn_param
->private_data_len
;
2956 rep
.responder_resources
= conn_param
->responder_resources
;
2957 rep
.initiator_depth
= conn_param
->initiator_depth
;
2958 rep
.failover_accepted
= 0;
2959 rep
.flow_control
= conn_param
->flow_control
;
2960 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
2961 rep
.srq
= id_priv
->srq
? 1 : 0;
2963 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2968 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2969 struct rdma_conn_param
*conn_param
)
2971 struct iw_cm_conn_param iw_param
;
2974 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2978 iw_param
.ord
= conn_param
->initiator_depth
;
2979 iw_param
.ird
= conn_param
->responder_resources
;
2980 iw_param
.private_data
= conn_param
->private_data
;
2981 iw_param
.private_data_len
= conn_param
->private_data_len
;
2982 if (id_priv
->id
.qp
) {
2983 iw_param
.qpn
= id_priv
->qp_num
;
2985 iw_param
.qpn
= conn_param
->qp_num
;
2987 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2990 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2991 enum ib_cm_sidr_status status
, u32 qkey
,
2992 const void *private_data
, int private_data_len
)
2994 struct ib_cm_sidr_rep_param rep
;
2997 memset(&rep
, 0, sizeof rep
);
2998 rep
.status
= status
;
2999 if (status
== IB_SIDR_SUCCESS
) {
3000 ret
= cma_set_qkey(id_priv
, qkey
);
3003 rep
.qp_num
= id_priv
->qp_num
;
3004 rep
.qkey
= id_priv
->qkey
;
3006 rep
.private_data
= private_data
;
3007 rep
.private_data_len
= private_data_len
;
3009 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3012 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3014 struct rdma_id_private
*id_priv
;
3017 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3019 id_priv
->owner
= task_pid_nr(current
);
3021 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3024 if (!id
->qp
&& conn_param
) {
3025 id_priv
->qp_num
= conn_param
->qp_num
;
3026 id_priv
->srq
= conn_param
->srq
;
3029 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3030 case RDMA_TRANSPORT_IB
:
3031 if (id
->qp_type
== IB_QPT_UD
) {
3033 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3035 conn_param
->private_data
,
3036 conn_param
->private_data_len
);
3038 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3042 ret
= cma_accept_ib(id_priv
, conn_param
);
3044 ret
= cma_rep_recv(id_priv
);
3047 case RDMA_TRANSPORT_IWARP
:
3048 ret
= cma_accept_iw(id_priv
, conn_param
);
3060 cma_modify_qp_err(id_priv
);
3061 rdma_reject(id
, NULL
, 0);
3064 EXPORT_SYMBOL(rdma_accept
);
3066 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3068 struct rdma_id_private
*id_priv
;
3071 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3072 if (!id_priv
->cm_id
.ib
)
3075 switch (id
->device
->node_type
) {
3076 case RDMA_NODE_IB_CA
:
3077 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3085 EXPORT_SYMBOL(rdma_notify
);
3087 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3088 u8 private_data_len
)
3090 struct rdma_id_private
*id_priv
;
3093 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3094 if (!id_priv
->cm_id
.ib
)
3097 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3098 case RDMA_TRANSPORT_IB
:
3099 if (id
->qp_type
== IB_QPT_UD
)
3100 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3101 private_data
, private_data_len
);
3103 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3104 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3105 0, private_data
, private_data_len
);
3107 case RDMA_TRANSPORT_IWARP
:
3108 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3109 private_data
, private_data_len
);
3117 EXPORT_SYMBOL(rdma_reject
);
3119 int rdma_disconnect(struct rdma_cm_id
*id
)
3121 struct rdma_id_private
*id_priv
;
3124 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3125 if (!id_priv
->cm_id
.ib
)
3128 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3129 case RDMA_TRANSPORT_IB
:
3130 ret
= cma_modify_qp_err(id_priv
);
3133 /* Initiate or respond to a disconnect. */
3134 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3135 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3137 case RDMA_TRANSPORT_IWARP
:
3138 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3147 EXPORT_SYMBOL(rdma_disconnect
);
3149 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3151 struct rdma_id_private
*id_priv
;
3152 struct cma_multicast
*mc
= multicast
->context
;
3153 struct rdma_cm_event event
;
3156 id_priv
= mc
->id_priv
;
3157 if (cma_disable_callback(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3158 cma_disable_callback(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3162 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3163 mutex_lock(&id_priv
->qp_mutex
);
3164 if (!status
&& id_priv
->id
.qp
)
3165 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3166 be16_to_cpu(multicast
->rec
.mlid
));
3167 mutex_unlock(&id_priv
->qp_mutex
);
3169 memset(&event
, 0, sizeof event
);
3170 event
.status
= status
;
3171 event
.param
.ud
.private_data
= mc
->context
;
3173 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3174 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3175 id_priv
->id
.port_num
, &multicast
->rec
,
3176 &event
.param
.ud
.ah_attr
);
3177 event
.param
.ud
.qp_num
= 0xFFFFFF;
3178 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3180 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3182 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3184 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3185 mutex_unlock(&id_priv
->handler_mutex
);
3186 rdma_destroy_id(&id_priv
->id
);
3190 mutex_unlock(&id_priv
->handler_mutex
);
3194 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3195 struct sockaddr
*addr
, union ib_gid
*mgid
)
3197 unsigned char mc_map
[MAX_ADDR_LEN
];
3198 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3199 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3200 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3202 if (cma_any_addr(addr
)) {
3203 memset(mgid
, 0, sizeof *mgid
);
3204 } else if ((addr
->sa_family
== AF_INET6
) &&
3205 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3207 /* IPv6 address is an SA assigned MGID. */
3208 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3209 } else if (addr
->sa_family
== AF_IB
) {
3210 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3211 } else if ((addr
->sa_family
== AF_INET6
)) {
3212 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3213 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3214 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3215 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3217 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3218 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3219 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3220 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3224 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3225 struct cma_multicast
*mc
)
3227 struct ib_sa_mcmember_rec rec
;
3228 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3229 ib_sa_comp_mask comp_mask
;
3232 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3233 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3238 ret
= cma_set_qkey(id_priv
, 0);
3242 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3243 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3244 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3245 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3248 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3249 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3250 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3251 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3252 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3254 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3255 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3256 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3257 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3258 IB_SA_MCMEMBER_REC_MTU
|
3259 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3261 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3262 id_priv
->id
.port_num
, &rec
,
3263 comp_mask
, GFP_KERNEL
,
3264 cma_ib_mc_handler
, mc
);
3265 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
3268 static void iboe_mcast_work_handler(struct work_struct
*work
)
3270 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3271 struct cma_multicast
*mc
= mw
->mc
;
3272 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3274 mc
->multicast
.ib
->context
= mc
;
3275 cma_ib_mc_handler(0, m
);
3276 kref_put(&mc
->mcref
, release_mc
);
3280 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3282 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3283 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3285 if (cma_any_addr(addr
)) {
3286 memset(mgid
, 0, sizeof *mgid
);
3287 } else if (addr
->sa_family
== AF_INET6
) {
3288 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3290 mgid
->raw
[0] = 0xff;
3291 mgid
->raw
[1] = 0x0e;
3300 mgid
->raw
[10] = 0xff;
3301 mgid
->raw
[11] = 0xff;
3302 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3306 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3307 struct cma_multicast
*mc
)
3309 struct iboe_mcast_work
*work
;
3310 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3312 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3313 struct net_device
*ndev
= NULL
;
3315 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3318 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3322 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3323 if (!mc
->multicast
.ib
) {
3328 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3330 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3331 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3332 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3334 if (dev_addr
->bound_dev_if
)
3335 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3340 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3341 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3342 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3344 if (!mc
->multicast
.ib
->rec
.mtu
) {
3348 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
3349 &mc
->multicast
.ib
->rec
.port_gid
);
3352 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3353 kref_get(&mc
->mcref
);
3354 queue_work(cma_wq
, &work
->work
);
3359 kfree(mc
->multicast
.ib
);
3365 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3368 struct rdma_id_private
*id_priv
;
3369 struct cma_multicast
*mc
;
3372 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3373 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3374 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3377 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3381 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
3382 mc
->context
= context
;
3383 mc
->id_priv
= id_priv
;
3385 spin_lock(&id_priv
->lock
);
3386 list_add(&mc
->list
, &id_priv
->mc_list
);
3387 spin_unlock(&id_priv
->lock
);
3389 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3390 case RDMA_TRANSPORT_IB
:
3391 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3392 case IB_LINK_LAYER_INFINIBAND
:
3393 ret
= cma_join_ib_multicast(id_priv
, mc
);
3395 case IB_LINK_LAYER_ETHERNET
:
3396 kref_init(&mc
->mcref
);
3397 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3409 spin_lock_irq(&id_priv
->lock
);
3410 list_del(&mc
->list
);
3411 spin_unlock_irq(&id_priv
->lock
);
3416 EXPORT_SYMBOL(rdma_join_multicast
);
3418 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3420 struct rdma_id_private
*id_priv
;
3421 struct cma_multicast
*mc
;
3423 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3424 spin_lock_irq(&id_priv
->lock
);
3425 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3426 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
3427 list_del(&mc
->list
);
3428 spin_unlock_irq(&id_priv
->lock
);
3431 ib_detach_mcast(id
->qp
,
3432 &mc
->multicast
.ib
->rec
.mgid
,
3433 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
3434 if (rdma_node_get_transport(id_priv
->cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) {
3435 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3436 case IB_LINK_LAYER_INFINIBAND
:
3437 ib_sa_free_multicast(mc
->multicast
.ib
);
3440 case IB_LINK_LAYER_ETHERNET
:
3441 kref_put(&mc
->mcref
, release_mc
);
3450 spin_unlock_irq(&id_priv
->lock
);
3452 EXPORT_SYMBOL(rdma_leave_multicast
);
3454 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
3456 struct rdma_dev_addr
*dev_addr
;
3457 struct cma_ndev_work
*work
;
3459 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3461 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
3462 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
3463 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
3464 ndev
->name
, &id_priv
->id
);
3465 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3469 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
3471 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
3472 atomic_inc(&id_priv
->refcount
);
3473 queue_work(cma_wq
, &work
->work
);
3479 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
3482 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
3483 struct cma_device
*cma_dev
;
3484 struct rdma_id_private
*id_priv
;
3485 int ret
= NOTIFY_DONE
;
3487 if (dev_net(ndev
) != &init_net
)
3490 if (event
!= NETDEV_BONDING_FAILOVER
)
3493 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
3497 list_for_each_entry(cma_dev
, &dev_list
, list
)
3498 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3499 ret
= cma_netdev_change(ndev
, id_priv
);
3505 mutex_unlock(&lock
);
3509 static struct notifier_block cma_nb
= {
3510 .notifier_call
= cma_netdev_callback
3513 static void cma_add_one(struct ib_device
*device
)
3515 struct cma_device
*cma_dev
;
3516 struct rdma_id_private
*id_priv
;
3518 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
3522 cma_dev
->device
= device
;
3524 init_completion(&cma_dev
->comp
);
3525 atomic_set(&cma_dev
->refcount
, 1);
3526 INIT_LIST_HEAD(&cma_dev
->id_list
);
3527 ib_set_client_data(device
, &cma_client
, cma_dev
);
3530 list_add_tail(&cma_dev
->list
, &dev_list
);
3531 list_for_each_entry(id_priv
, &listen_any_list
, list
)
3532 cma_listen_on_dev(id_priv
, cma_dev
);
3533 mutex_unlock(&lock
);
3536 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
3538 struct rdma_cm_event event
;
3539 enum rdma_cm_state state
;
3542 /* Record that we want to remove the device */
3543 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
3544 if (state
== RDMA_CM_DESTROYING
)
3547 cma_cancel_operation(id_priv
, state
);
3548 mutex_lock(&id_priv
->handler_mutex
);
3550 /* Check for destruction from another callback. */
3551 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
3554 memset(&event
, 0, sizeof event
);
3555 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
3556 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3558 mutex_unlock(&id_priv
->handler_mutex
);
3562 static void cma_process_remove(struct cma_device
*cma_dev
)
3564 struct rdma_id_private
*id_priv
;
3568 while (!list_empty(&cma_dev
->id_list
)) {
3569 id_priv
= list_entry(cma_dev
->id_list
.next
,
3570 struct rdma_id_private
, list
);
3572 list_del(&id_priv
->listen_list
);
3573 list_del_init(&id_priv
->list
);
3574 atomic_inc(&id_priv
->refcount
);
3575 mutex_unlock(&lock
);
3577 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
3578 cma_deref_id(id_priv
);
3580 rdma_destroy_id(&id_priv
->id
);
3584 mutex_unlock(&lock
);
3586 cma_deref_dev(cma_dev
);
3587 wait_for_completion(&cma_dev
->comp
);
3590 static void cma_remove_one(struct ib_device
*device
)
3592 struct cma_device
*cma_dev
;
3594 cma_dev
= ib_get_client_data(device
, &cma_client
);
3599 list_del(&cma_dev
->list
);
3600 mutex_unlock(&lock
);
3602 cma_process_remove(cma_dev
);
3606 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3608 struct nlmsghdr
*nlh
;
3609 struct rdma_cm_id_stats
*id_stats
;
3610 struct rdma_id_private
*id_priv
;
3611 struct rdma_cm_id
*id
= NULL
;
3612 struct cma_device
*cma_dev
;
3613 int i_dev
= 0, i_id
= 0;
3616 * We export all of the IDs as a sequence of messages. Each
3617 * ID gets its own netlink message.
3621 list_for_each_entry(cma_dev
, &dev_list
, list
) {
3622 if (i_dev
< cb
->args
[0]) {
3628 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3629 if (i_id
< cb
->args
[1]) {
3634 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
3635 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
3636 RDMA_NL_RDMA_CM_ID_STATS
);
3640 memset(id_stats
, 0, sizeof *id_stats
);
3642 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
3643 id_stats
->port_num
= id
->port_num
;
3644 id_stats
->bound_dev_if
=
3645 id
->route
.addr
.dev_addr
.bound_dev_if
;
3647 if (ibnl_put_attr(skb
, nlh
,
3648 rdma_addr_size(cma_src_addr(id_priv
)),
3649 cma_src_addr(id_priv
),
3650 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
3652 if (ibnl_put_attr(skb
, nlh
,
3653 rdma_addr_size(cma_src_addr(id_priv
)),
3654 cma_dst_addr(id_priv
),
3655 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
3658 id_stats
->pid
= id_priv
->owner
;
3659 id_stats
->port_space
= id
->ps
;
3660 id_stats
->cm_state
= id_priv
->state
;
3661 id_stats
->qp_num
= id_priv
->qp_num
;
3662 id_stats
->qp_type
= id
->qp_type
;
3672 mutex_unlock(&lock
);
3673 cb
->args
[0] = i_dev
;
3679 static const struct ibnl_client_cbs cma_cb_table
[] = {
3680 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
3681 .module
= THIS_MODULE
},
3684 static int __init
cma_init(void)
3688 cma_wq
= create_singlethread_workqueue("rdma_cm");
3692 ib_sa_register_client(&sa_client
);
3693 rdma_addr_register_client(&addr_client
);
3694 register_netdevice_notifier(&cma_nb
);
3696 ret
= ib_register_client(&cma_client
);
3700 if (ibnl_add_client(RDMA_NL_RDMA_CM
, RDMA_NL_RDMA_CM_NUM_OPS
, cma_cb_table
))
3701 printk(KERN_WARNING
"RDMA CMA: failed to add netlink callback\n");
3706 unregister_netdevice_notifier(&cma_nb
);
3707 rdma_addr_unregister_client(&addr_client
);
3708 ib_sa_unregister_client(&sa_client
);
3709 destroy_workqueue(cma_wq
);
3713 static void __exit
cma_cleanup(void)
3715 ibnl_remove_client(RDMA_NL_RDMA_CM
);
3716 ib_unregister_client(&cma_client
);
3717 unregister_netdevice_notifier(&cma_nb
);
3718 rdma_addr_unregister_client(&addr_client
);
3719 ib_sa_unregister_client(&sa_client
);
3720 destroy_workqueue(cma_wq
);
3721 idr_destroy(&tcp_ps
);
3722 idr_destroy(&udp_ps
);
3723 idr_destroy(&ipoib_ps
);
3724 idr_destroy(&ib_ps
);
3727 module_init(cma_init
);
3728 module_exit(cma_cleanup
);