2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
50 #include <rdma/rdma_netlink.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/ib_cm.h>
53 #include <rdma/ib_sa.h>
54 #include <rdma/iw_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("Generic RDMA CM Agent");
58 MODULE_LICENSE("Dual BSD/GPL");
60 #define CMA_CM_RESPONSE_TIMEOUT 20
61 #define CMA_MAX_CM_RETRIES 15
62 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
63 #define CMA_IBOE_PACKET_LIFETIME 18
65 static void cma_add_one(struct ib_device
*device
);
66 static void cma_remove_one(struct ib_device
*device
);
68 static struct ib_client cma_client
= {
71 .remove
= cma_remove_one
74 static struct ib_sa_client sa_client
;
75 static struct rdma_addr_client addr_client
;
76 static LIST_HEAD(dev_list
);
77 static LIST_HEAD(listen_any_list
);
78 static DEFINE_MUTEX(lock
);
79 static struct workqueue_struct
*cma_wq
;
80 static DEFINE_IDR(sdp_ps
);
81 static DEFINE_IDR(tcp_ps
);
82 static DEFINE_IDR(udp_ps
);
83 static DEFINE_IDR(ipoib_ps
);
86 struct list_head list
;
87 struct ib_device
*device
;
88 struct completion comp
;
90 struct list_head id_list
;
93 struct rdma_bind_list
{
95 struct hlist_head owners
;
100 * Device removal can occur at anytime, so we need extra handling to
101 * serialize notifying the user of device removal with other callbacks.
102 * We do this by disabling removal notification while a callback is in process,
103 * and reporting it after the callback completes.
105 struct rdma_id_private
{
106 struct rdma_cm_id id
;
108 struct rdma_bind_list
*bind_list
;
109 struct hlist_node node
;
110 struct list_head list
; /* listen_any_list or cma_device.list */
111 struct list_head listen_list
; /* per device listens */
112 struct cma_device
*cma_dev
;
113 struct list_head mc_list
;
116 enum rdma_cm_state state
;
118 struct mutex qp_mutex
;
120 struct completion comp
;
122 struct mutex handler_mutex
;
126 struct ib_sa_query
*query
;
142 struct cma_multicast
{
143 struct rdma_id_private
*id_priv
;
145 struct ib_sa_multicast
*ib
;
147 struct list_head list
;
149 struct sockaddr_storage addr
;
154 struct work_struct work
;
155 struct rdma_id_private
*id
;
156 enum rdma_cm_state old_state
;
157 enum rdma_cm_state new_state
;
158 struct rdma_cm_event event
;
161 struct cma_ndev_work
{
162 struct work_struct work
;
163 struct rdma_id_private
*id
;
164 struct rdma_cm_event event
;
167 struct iboe_mcast_work
{
168 struct work_struct work
;
169 struct rdma_id_private
*id
;
170 struct cma_multicast
*mc
;
183 u8 ip_version
; /* IP version: 7:4 */
185 union cma_ip_addr src_addr
;
186 union cma_ip_addr dst_addr
;
191 u8 sdp_version
; /* Major version: 7:4 */
192 u8 ip_version
; /* IP version: 7:4 */
193 u8 sdp_specific1
[10];
195 __be16 sdp_specific2
;
196 union cma_ip_addr src_addr
;
197 union cma_ip_addr dst_addr
;
205 #define CMA_VERSION 0x00
206 #define SDP_MAJ_VERSION 0x2
208 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
213 spin_lock_irqsave(&id_priv
->lock
, flags
);
214 ret
= (id_priv
->state
== comp
);
215 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
219 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
220 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
225 spin_lock_irqsave(&id_priv
->lock
, flags
);
226 if ((ret
= (id_priv
->state
== comp
)))
227 id_priv
->state
= exch
;
228 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
232 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
233 enum rdma_cm_state exch
)
236 enum rdma_cm_state old
;
238 spin_lock_irqsave(&id_priv
->lock
, flags
);
239 old
= id_priv
->state
;
240 id_priv
->state
= exch
;
241 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
245 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
247 return hdr
->ip_version
>> 4;
250 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
252 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
255 static inline u8
sdp_get_majv(u8 sdp_version
)
257 return sdp_version
>> 4;
260 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
262 return hh
->ip_version
>> 4;
265 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
267 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
270 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
271 struct cma_device
*cma_dev
)
273 atomic_inc(&cma_dev
->refcount
);
274 id_priv
->cma_dev
= cma_dev
;
275 id_priv
->id
.device
= cma_dev
->device
;
276 id_priv
->id
.route
.addr
.dev_addr
.transport
=
277 rdma_node_get_transport(cma_dev
->device
->node_type
);
278 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
281 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
283 if (atomic_dec_and_test(&cma_dev
->refcount
))
284 complete(&cma_dev
->comp
);
287 static inline void release_mc(struct kref
*kref
)
289 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
291 kfree(mc
->multicast
.ib
);
295 static void cma_release_dev(struct rdma_id_private
*id_priv
)
298 list_del(&id_priv
->list
);
299 cma_deref_dev(id_priv
->cma_dev
);
300 id_priv
->cma_dev
= NULL
;
304 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
306 struct ib_sa_mcmember_rec rec
;
312 switch (id_priv
->id
.ps
) {
314 id_priv
->qkey
= RDMA_UDP_QKEY
;
317 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
318 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
319 id_priv
->id
.port_num
, &rec
.mgid
,
322 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
330 static int find_gid_port(struct ib_device
*device
, union ib_gid
*gid
, u8 port_num
)
334 struct ib_port_attr props
;
337 err
= ib_query_port(device
, port_num
, &props
);
341 for (i
= 0; i
< props
.gid_tbl_len
; ++i
) {
342 err
= ib_query_gid(device
, port_num
, i
, &tmp
);
345 if (!memcmp(&tmp
, gid
, sizeof tmp
))
352 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
354 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
355 struct cma_device
*cma_dev
;
356 union ib_gid gid
, iboe_gid
;
359 enum rdma_link_layer dev_ll
= dev_addr
->dev_type
== ARPHRD_INFINIBAND
?
360 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
363 iboe_addr_get_sgid(dev_addr
, &iboe_gid
);
364 memcpy(&gid
, dev_addr
->src_dev_addr
+
365 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
366 list_for_each_entry(cma_dev
, &dev_list
, list
) {
367 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
368 if (rdma_port_get_link_layer(cma_dev
->device
, port
) == dev_ll
) {
369 if (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
&&
370 rdma_port_get_link_layer(cma_dev
->device
, port
) == IB_LINK_LAYER_ETHERNET
)
371 ret
= find_gid_port(cma_dev
->device
, &iboe_gid
, port
);
373 ret
= find_gid_port(cma_dev
->device
, &gid
, port
);
376 id_priv
->id
.port_num
= port
;
386 cma_attach_to_dev(id_priv
, cma_dev
);
392 static void cma_deref_id(struct rdma_id_private
*id_priv
)
394 if (atomic_dec_and_test(&id_priv
->refcount
))
395 complete(&id_priv
->comp
);
398 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
399 enum rdma_cm_state state
)
401 mutex_lock(&id_priv
->handler_mutex
);
402 if (id_priv
->state
!= state
) {
403 mutex_unlock(&id_priv
->handler_mutex
);
409 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
411 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
414 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
415 void *context
, enum rdma_port_space ps
,
416 enum ib_qp_type qp_type
)
418 struct rdma_id_private
*id_priv
;
420 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
422 return ERR_PTR(-ENOMEM
);
424 id_priv
->owner
= task_pid_nr(current
);
425 id_priv
->state
= RDMA_CM_IDLE
;
426 id_priv
->id
.context
= context
;
427 id_priv
->id
.event_handler
= event_handler
;
429 id_priv
->id
.qp_type
= qp_type
;
430 spin_lock_init(&id_priv
->lock
);
431 mutex_init(&id_priv
->qp_mutex
);
432 init_completion(&id_priv
->comp
);
433 atomic_set(&id_priv
->refcount
, 1);
434 mutex_init(&id_priv
->handler_mutex
);
435 INIT_LIST_HEAD(&id_priv
->listen_list
);
436 INIT_LIST_HEAD(&id_priv
->mc_list
);
437 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
441 EXPORT_SYMBOL(rdma_create_id
);
443 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
445 struct ib_qp_attr qp_attr
;
446 int qp_attr_mask
, ret
;
448 qp_attr
.qp_state
= IB_QPS_INIT
;
449 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
453 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
457 qp_attr
.qp_state
= IB_QPS_RTR
;
458 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
462 qp_attr
.qp_state
= IB_QPS_RTS
;
464 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
469 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
471 struct ib_qp_attr qp_attr
;
472 int qp_attr_mask
, ret
;
474 qp_attr
.qp_state
= IB_QPS_INIT
;
475 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
479 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
482 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
483 struct ib_qp_init_attr
*qp_init_attr
)
485 struct rdma_id_private
*id_priv
;
489 id_priv
= container_of(id
, struct rdma_id_private
, id
);
490 if (id
->device
!= pd
->device
)
493 qp
= ib_create_qp(pd
, qp_init_attr
);
497 if (id
->qp_type
== IB_QPT_UD
)
498 ret
= cma_init_ud_qp(id_priv
, qp
);
500 ret
= cma_init_conn_qp(id_priv
, qp
);
505 id_priv
->qp_num
= qp
->qp_num
;
506 id_priv
->srq
= (qp
->srq
!= NULL
);
512 EXPORT_SYMBOL(rdma_create_qp
);
514 void rdma_destroy_qp(struct rdma_cm_id
*id
)
516 struct rdma_id_private
*id_priv
;
518 id_priv
= container_of(id
, struct rdma_id_private
, id
);
519 mutex_lock(&id_priv
->qp_mutex
);
520 ib_destroy_qp(id_priv
->id
.qp
);
521 id_priv
->id
.qp
= NULL
;
522 mutex_unlock(&id_priv
->qp_mutex
);
524 EXPORT_SYMBOL(rdma_destroy_qp
);
526 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
527 struct rdma_conn_param
*conn_param
)
529 struct ib_qp_attr qp_attr
;
530 int qp_attr_mask
, ret
;
532 mutex_lock(&id_priv
->qp_mutex
);
533 if (!id_priv
->id
.qp
) {
538 /* Need to update QP attributes from default values. */
539 qp_attr
.qp_state
= IB_QPS_INIT
;
540 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
544 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
548 qp_attr
.qp_state
= IB_QPS_RTR
;
549 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
554 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
555 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
557 mutex_unlock(&id_priv
->qp_mutex
);
561 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
562 struct rdma_conn_param
*conn_param
)
564 struct ib_qp_attr qp_attr
;
565 int qp_attr_mask
, ret
;
567 mutex_lock(&id_priv
->qp_mutex
);
568 if (!id_priv
->id
.qp
) {
573 qp_attr
.qp_state
= IB_QPS_RTS
;
574 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
579 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
580 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
582 mutex_unlock(&id_priv
->qp_mutex
);
586 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
588 struct ib_qp_attr qp_attr
;
591 mutex_lock(&id_priv
->qp_mutex
);
592 if (!id_priv
->id
.qp
) {
597 qp_attr
.qp_state
= IB_QPS_ERR
;
598 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
600 mutex_unlock(&id_priv
->qp_mutex
);
604 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
605 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
607 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
611 if (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
) ==
612 IB_LINK_LAYER_INFINIBAND
)
613 pkey
= ib_addr_get_pkey(dev_addr
);
617 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
618 pkey
, &qp_attr
->pkey_index
);
622 qp_attr
->port_num
= id_priv
->id
.port_num
;
623 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
625 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
626 ret
= cma_set_qkey(id_priv
);
630 qp_attr
->qkey
= id_priv
->qkey
;
631 *qp_attr_mask
|= IB_QP_QKEY
;
633 qp_attr
->qp_access_flags
= 0;
634 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
639 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
642 struct rdma_id_private
*id_priv
;
645 id_priv
= container_of(id
, struct rdma_id_private
, id
);
646 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
647 case RDMA_TRANSPORT_IB
:
648 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
649 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
651 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
653 if (qp_attr
->qp_state
== IB_QPS_RTR
)
654 qp_attr
->rq_psn
= id_priv
->seq_num
;
656 case RDMA_TRANSPORT_IWARP
:
657 if (!id_priv
->cm_id
.iw
) {
658 qp_attr
->qp_access_flags
= 0;
659 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
661 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
671 EXPORT_SYMBOL(rdma_init_qp_attr
);
673 static inline int cma_zero_addr(struct sockaddr
*addr
)
675 struct in6_addr
*ip6
;
677 if (addr
->sa_family
== AF_INET
)
678 return ipv4_is_zeronet(
679 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
681 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
682 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
683 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
687 static inline int cma_loopback_addr(struct sockaddr
*addr
)
689 if (addr
->sa_family
== AF_INET
)
690 return ipv4_is_loopback(
691 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
693 return ipv6_addr_loopback(
694 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
697 static inline int cma_any_addr(struct sockaddr
*addr
)
699 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
702 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
704 if (src
->sa_family
!= dst
->sa_family
)
707 switch (src
->sa_family
) {
709 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
710 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
712 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
713 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
717 static inline __be16
cma_port(struct sockaddr
*addr
)
719 if (addr
->sa_family
== AF_INET
)
720 return ((struct sockaddr_in
*) addr
)->sin_port
;
722 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
725 static inline int cma_any_port(struct sockaddr
*addr
)
727 return !cma_port(addr
);
730 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
731 u8
*ip_ver
, __be16
*port
,
732 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
736 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
740 *ip_ver
= sdp_get_ip_ver(hdr
);
741 *port
= ((struct sdp_hh
*) hdr
)->port
;
742 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
743 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
746 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
749 *ip_ver
= cma_get_ip_ver(hdr
);
750 *port
= ((struct cma_hdr
*) hdr
)->port
;
751 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
752 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
756 if (*ip_ver
!= 4 && *ip_ver
!= 6)
761 static void cma_save_net_info(struct rdma_addr
*addr
,
762 struct rdma_addr
*listen_addr
,
763 u8 ip_ver
, __be16 port
,
764 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
766 struct sockaddr_in
*listen4
, *ip4
;
767 struct sockaddr_in6
*listen6
, *ip6
;
771 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
772 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
773 ip4
->sin_family
= listen4
->sin_family
;
774 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
775 ip4
->sin_port
= listen4
->sin_port
;
777 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
778 ip4
->sin_family
= listen4
->sin_family
;
779 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
780 ip4
->sin_port
= port
;
783 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
784 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
785 ip6
->sin6_family
= listen6
->sin6_family
;
786 ip6
->sin6_addr
= dst
->ip6
;
787 ip6
->sin6_port
= listen6
->sin6_port
;
789 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
790 ip6
->sin6_family
= listen6
->sin6_family
;
791 ip6
->sin6_addr
= src
->ip6
;
792 ip6
->sin6_port
= port
;
799 static inline int cma_user_data_offset(enum rdma_port_space ps
)
805 return sizeof(struct cma_hdr
);
809 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
811 switch (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
812 case IB_LINK_LAYER_INFINIBAND
:
814 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
821 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
823 struct rdma_id_private
*dev_id_priv
;
826 * Remove from listen_any_list to prevent added devices from spawning
827 * additional listen requests.
830 list_del(&id_priv
->list
);
832 while (!list_empty(&id_priv
->listen_list
)) {
833 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
834 struct rdma_id_private
, listen_list
);
835 /* sync with device removal to avoid duplicate destruction */
836 list_del_init(&dev_id_priv
->list
);
837 list_del(&dev_id_priv
->listen_list
);
840 rdma_destroy_id(&dev_id_priv
->id
);
846 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
847 enum rdma_cm_state state
)
850 case RDMA_CM_ADDR_QUERY
:
851 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
853 case RDMA_CM_ROUTE_QUERY
:
854 cma_cancel_route(id_priv
);
857 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
858 && !id_priv
->cma_dev
)
859 cma_cancel_listens(id_priv
);
866 static void cma_release_port(struct rdma_id_private
*id_priv
)
868 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
874 hlist_del(&id_priv
->node
);
875 if (hlist_empty(&bind_list
->owners
)) {
876 idr_remove(bind_list
->ps
, bind_list
->port
);
882 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
884 struct cma_multicast
*mc
;
886 while (!list_empty(&id_priv
->mc_list
)) {
887 mc
= container_of(id_priv
->mc_list
.next
,
888 struct cma_multicast
, list
);
890 switch (rdma_port_get_link_layer(id_priv
->cma_dev
->device
, id_priv
->id
.port_num
)) {
891 case IB_LINK_LAYER_INFINIBAND
:
892 ib_sa_free_multicast(mc
->multicast
.ib
);
895 case IB_LINK_LAYER_ETHERNET
:
896 kref_put(&mc
->mcref
, release_mc
);
904 void rdma_destroy_id(struct rdma_cm_id
*id
)
906 struct rdma_id_private
*id_priv
;
907 enum rdma_cm_state state
;
909 id_priv
= container_of(id
, struct rdma_id_private
, id
);
910 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
911 cma_cancel_operation(id_priv
, state
);
914 * Wait for any active callback to finish. New callbacks will find
915 * the id_priv state set to destroying and abort.
917 mutex_lock(&id_priv
->handler_mutex
);
918 mutex_unlock(&id_priv
->handler_mutex
);
920 if (id_priv
->cma_dev
) {
921 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
922 case RDMA_TRANSPORT_IB
:
923 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
924 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
926 case RDMA_TRANSPORT_IWARP
:
927 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
928 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
933 cma_leave_mc_groups(id_priv
);
934 cma_release_dev(id_priv
);
937 cma_release_port(id_priv
);
938 cma_deref_id(id_priv
);
939 wait_for_completion(&id_priv
->comp
);
941 if (id_priv
->internal_id
)
942 cma_deref_id(id_priv
->id
.context
);
944 kfree(id_priv
->id
.route
.path_rec
);
947 EXPORT_SYMBOL(rdma_destroy_id
);
949 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
953 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
957 ret
= cma_modify_qp_rts(id_priv
, NULL
);
961 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
967 cma_modify_qp_err(id_priv
);
968 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
973 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
975 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
976 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
983 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
984 struct ib_cm_rep_event_param
*rep_data
,
987 event
->param
.conn
.private_data
= private_data
;
988 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
989 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
990 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
991 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
992 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
993 event
->param
.conn
.srq
= rep_data
->srq
;
994 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
997 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
999 struct rdma_id_private
*id_priv
= cm_id
->context
;
1000 struct rdma_cm_event event
;
1003 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1004 cma_disable_callback(id_priv
, RDMA_CM_CONNECT
)) ||
1005 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1006 cma_disable_callback(id_priv
, RDMA_CM_DISCONNECT
)))
1009 memset(&event
, 0, sizeof event
);
1010 switch (ib_event
->event
) {
1011 case IB_CM_REQ_ERROR
:
1012 case IB_CM_REP_ERROR
:
1013 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1014 event
.status
= -ETIMEDOUT
;
1016 case IB_CM_REP_RECEIVED
:
1017 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
1019 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1020 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
1021 event
.status
= cma_rep_recv(id_priv
);
1022 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1023 RDMA_CM_EVENT_ESTABLISHED
;
1025 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1026 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1027 ib_event
->private_data
);
1029 case IB_CM_RTU_RECEIVED
:
1030 case IB_CM_USER_ESTABLISHED
:
1031 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1033 case IB_CM_DREQ_ERROR
:
1034 event
.status
= -ETIMEDOUT
; /* fall through */
1035 case IB_CM_DREQ_RECEIVED
:
1036 case IB_CM_DREP_RECEIVED
:
1037 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1038 RDMA_CM_DISCONNECT
))
1040 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1042 case IB_CM_TIMEWAIT_EXIT
:
1043 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1045 case IB_CM_MRA_RECEIVED
:
1048 case IB_CM_REJ_RECEIVED
:
1049 cma_modify_qp_err(id_priv
);
1050 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1051 event
.event
= RDMA_CM_EVENT_REJECTED
;
1052 event
.param
.conn
.private_data
= ib_event
->private_data
;
1053 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1056 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
1061 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1063 /* Destroy the CM ID by returning a non-zero value. */
1064 id_priv
->cm_id
.ib
= NULL
;
1065 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1066 mutex_unlock(&id_priv
->handler_mutex
);
1067 rdma_destroy_id(&id_priv
->id
);
1071 mutex_unlock(&id_priv
->handler_mutex
);
1075 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1076 struct ib_cm_event
*ib_event
)
1078 struct rdma_id_private
*id_priv
;
1079 struct rdma_cm_id
*id
;
1080 struct rdma_route
*rt
;
1081 union cma_ip_addr
*src
, *dst
;
1086 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1087 &ip_ver
, &port
, &src
, &dst
))
1090 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1091 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1095 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1096 ip_ver
, port
, src
, dst
);
1099 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1100 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1105 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1106 if (rt
->num_paths
== 2)
1107 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1109 if (cma_any_addr((struct sockaddr
*) &rt
->addr
.src_addr
)) {
1110 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1111 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1112 ib_addr_set_pkey(&rt
->addr
.dev_addr
, rt
->path_rec
[0].pkey
);
1114 ret
= rdma_translate_ip((struct sockaddr
*) &rt
->addr
.src_addr
,
1115 &rt
->addr
.dev_addr
);
1119 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1121 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1122 id_priv
->state
= RDMA_CM_CONNECT
;
1126 rdma_destroy_id(id
);
1131 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1132 struct ib_cm_event
*ib_event
)
1134 struct rdma_id_private
*id_priv
;
1135 struct rdma_cm_id
*id
;
1136 union cma_ip_addr
*src
, *dst
;
1141 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1142 listen_id
->ps
, IB_QPT_UD
);
1147 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1148 &ip_ver
, &port
, &src
, &dst
))
1151 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1152 ip_ver
, port
, src
, dst
);
1154 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1155 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1156 &id
->route
.addr
.dev_addr
);
1161 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1162 id_priv
->state
= RDMA_CM_CONNECT
;
1165 rdma_destroy_id(id
);
1169 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1170 struct ib_cm_req_event_param
*req_data
,
1171 void *private_data
, int offset
)
1173 event
->param
.conn
.private_data
= private_data
+ offset
;
1174 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1175 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1176 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1177 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1178 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1179 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1180 event
->param
.conn
.srq
= req_data
->srq
;
1181 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1184 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1186 struct rdma_id_private
*listen_id
, *conn_id
;
1187 struct rdma_cm_event event
;
1190 listen_id
= cm_id
->context
;
1191 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1192 return -ECONNABORTED
;
1194 memset(&event
, 0, sizeof event
);
1195 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1196 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1197 if (listen_id
->id
.qp_type
== IB_QPT_UD
) {
1198 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1199 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1200 event
.param
.ud
.private_data_len
=
1201 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1203 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1204 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1205 ib_event
->private_data
, offset
);
1212 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1213 ret
= cma_acquire_dev(conn_id
);
1215 goto release_conn_id
;
1217 conn_id
->cm_id
.ib
= cm_id
;
1218 cm_id
->context
= conn_id
;
1219 cm_id
->cm_handler
= cma_ib_handler
;
1222 * Protect against the user destroying conn_id from another thread
1223 * until we're done accessing it.
1225 atomic_inc(&conn_id
->refcount
);
1226 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1229 * Acquire mutex to prevent user executing rdma_destroy_id()
1230 * while we're accessing the cm_id.
1233 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) && (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1234 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1235 mutex_unlock(&lock
);
1236 mutex_unlock(&conn_id
->handler_mutex
);
1237 cma_deref_id(conn_id
);
1240 cma_deref_id(conn_id
);
1242 /* Destroy the CM ID by returning a non-zero value. */
1243 conn_id
->cm_id
.ib
= NULL
;
1246 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1247 mutex_unlock(&conn_id
->handler_mutex
);
1248 rdma_destroy_id(&conn_id
->id
);
1251 mutex_unlock(&listen_id
->handler_mutex
);
1255 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1257 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1260 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1261 struct ib_cm_compare_data
*compare
)
1263 struct cma_hdr
*cma_data
, *cma_mask
;
1264 struct sdp_hh
*sdp_data
, *sdp_mask
;
1266 struct in6_addr ip6_addr
;
1268 memset(compare
, 0, sizeof *compare
);
1269 cma_data
= (void *) compare
->data
;
1270 cma_mask
= (void *) compare
->mask
;
1271 sdp_data
= (void *) compare
->data
;
1272 sdp_mask
= (void *) compare
->mask
;
1274 switch (addr
->sa_family
) {
1276 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1277 if (ps
== RDMA_PS_SDP
) {
1278 sdp_set_ip_ver(sdp_data
, 4);
1279 sdp_set_ip_ver(sdp_mask
, 0xF);
1280 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1281 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1283 cma_set_ip_ver(cma_data
, 4);
1284 cma_set_ip_ver(cma_mask
, 0xF);
1285 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1286 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1290 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1291 if (ps
== RDMA_PS_SDP
) {
1292 sdp_set_ip_ver(sdp_data
, 6);
1293 sdp_set_ip_ver(sdp_mask
, 0xF);
1294 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1295 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1296 sizeof sdp_mask
->dst_addr
.ip6
);
1298 cma_set_ip_ver(cma_data
, 6);
1299 cma_set_ip_ver(cma_mask
, 0xF);
1300 cma_data
->dst_addr
.ip6
= ip6_addr
;
1301 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1302 sizeof cma_mask
->dst_addr
.ip6
);
1310 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1312 struct rdma_id_private
*id_priv
= iw_id
->context
;
1313 struct rdma_cm_event event
;
1314 struct sockaddr_in
*sin
;
1317 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
1320 memset(&event
, 0, sizeof event
);
1321 switch (iw_event
->event
) {
1322 case IW_CM_EVENT_CLOSE
:
1323 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1325 case IW_CM_EVENT_CONNECT_REPLY
:
1326 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1327 *sin
= iw_event
->local_addr
;
1328 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1329 *sin
= iw_event
->remote_addr
;
1330 switch (iw_event
->status
) {
1332 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1336 event
.event
= RDMA_CM_EVENT_REJECTED
;
1339 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1342 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1346 case IW_CM_EVENT_ESTABLISHED
:
1347 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1353 event
.status
= iw_event
->status
;
1354 event
.param
.conn
.private_data
= iw_event
->private_data
;
1355 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1356 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1358 /* Destroy the CM ID by returning a non-zero value. */
1359 id_priv
->cm_id
.iw
= NULL
;
1360 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1361 mutex_unlock(&id_priv
->handler_mutex
);
1362 rdma_destroy_id(&id_priv
->id
);
1366 mutex_unlock(&id_priv
->handler_mutex
);
1370 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1371 struct iw_cm_event
*iw_event
)
1373 struct rdma_cm_id
*new_cm_id
;
1374 struct rdma_id_private
*listen_id
, *conn_id
;
1375 struct sockaddr_in
*sin
;
1376 struct net_device
*dev
= NULL
;
1377 struct rdma_cm_event event
;
1379 struct ib_device_attr attr
;
1381 listen_id
= cm_id
->context
;
1382 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1383 return -ECONNABORTED
;
1385 /* Create a new RDMA id for the new IW CM ID */
1386 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1387 listen_id
->id
.context
,
1388 RDMA_PS_TCP
, IB_QPT_RC
);
1389 if (IS_ERR(new_cm_id
)) {
1393 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1394 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1395 conn_id
->state
= RDMA_CM_CONNECT
;
1397 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1399 ret
= -EADDRNOTAVAIL
;
1400 mutex_unlock(&conn_id
->handler_mutex
);
1401 rdma_destroy_id(new_cm_id
);
1404 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1406 mutex_unlock(&conn_id
->handler_mutex
);
1407 rdma_destroy_id(new_cm_id
);
1411 ret
= cma_acquire_dev(conn_id
);
1413 mutex_unlock(&conn_id
->handler_mutex
);
1414 rdma_destroy_id(new_cm_id
);
1418 conn_id
->cm_id
.iw
= cm_id
;
1419 cm_id
->context
= conn_id
;
1420 cm_id
->cm_handler
= cma_iw_handler
;
1422 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1423 *sin
= iw_event
->local_addr
;
1424 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1425 *sin
= iw_event
->remote_addr
;
1427 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1429 mutex_unlock(&conn_id
->handler_mutex
);
1430 rdma_destroy_id(new_cm_id
);
1434 memset(&event
, 0, sizeof event
);
1435 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1436 event
.param
.conn
.private_data
= iw_event
->private_data
;
1437 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1438 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1439 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1442 * Protect against the user destroying conn_id from another thread
1443 * until we're done accessing it.
1445 atomic_inc(&conn_id
->refcount
);
1446 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1448 /* User wants to destroy the CM ID */
1449 conn_id
->cm_id
.iw
= NULL
;
1450 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1451 mutex_unlock(&conn_id
->handler_mutex
);
1452 cma_deref_id(conn_id
);
1453 rdma_destroy_id(&conn_id
->id
);
1457 mutex_unlock(&conn_id
->handler_mutex
);
1458 cma_deref_id(conn_id
);
1463 mutex_unlock(&listen_id
->handler_mutex
);
1467 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1469 struct ib_cm_compare_data compare_data
;
1470 struct sockaddr
*addr
;
1474 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1476 if (IS_ERR(id_priv
->cm_id
.ib
))
1477 return PTR_ERR(id_priv
->cm_id
.ib
);
1479 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1480 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1481 if (cma_any_addr(addr
))
1482 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1484 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1485 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1489 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1490 id_priv
->cm_id
.ib
= NULL
;
1496 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1499 struct sockaddr_in
*sin
;
1501 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1502 iw_conn_req_handler
,
1504 if (IS_ERR(id_priv
->cm_id
.iw
))
1505 return PTR_ERR(id_priv
->cm_id
.iw
);
1507 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1508 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1510 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1513 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1514 id_priv
->cm_id
.iw
= NULL
;
1520 static int cma_listen_handler(struct rdma_cm_id
*id
,
1521 struct rdma_cm_event
*event
)
1523 struct rdma_id_private
*id_priv
= id
->context
;
1525 id
->context
= id_priv
->id
.context
;
1526 id
->event_handler
= id_priv
->id
.event_handler
;
1527 return id_priv
->id
.event_handler(id
, event
);
1530 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1531 struct cma_device
*cma_dev
)
1533 struct rdma_id_private
*dev_id_priv
;
1534 struct rdma_cm_id
*id
;
1537 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
1538 id_priv
->id
.qp_type
);
1542 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1544 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
1545 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1546 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1548 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1549 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1550 atomic_inc(&id_priv
->refcount
);
1551 dev_id_priv
->internal_id
= 1;
1553 ret
= rdma_listen(id
, id_priv
->backlog
);
1555 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1556 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1559 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1561 struct cma_device
*cma_dev
;
1564 list_add_tail(&id_priv
->list
, &listen_any_list
);
1565 list_for_each_entry(cma_dev
, &dev_list
, list
)
1566 cma_listen_on_dev(id_priv
, cma_dev
);
1567 mutex_unlock(&lock
);
1570 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1572 struct rdma_id_private
*id_priv
;
1574 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1575 id_priv
->tos
= (u8
) tos
;
1577 EXPORT_SYMBOL(rdma_set_service_type
);
1579 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1582 struct cma_work
*work
= context
;
1583 struct rdma_route
*route
;
1585 route
= &work
->id
->id
.route
;
1588 route
->num_paths
= 1;
1589 *route
->path_rec
= *path_rec
;
1591 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1592 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
1593 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1594 work
->event
.status
= status
;
1597 queue_work(cma_wq
, &work
->work
);
1600 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1601 struct cma_work
*work
)
1603 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1604 struct ib_sa_path_rec path_rec
;
1605 ib_sa_comp_mask comp_mask
;
1606 struct sockaddr_in6
*sin6
;
1608 memset(&path_rec
, 0, sizeof path_rec
);
1609 rdma_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1610 rdma_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1611 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1612 path_rec
.numb_path
= 1;
1613 path_rec
.reversible
= 1;
1614 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1615 (struct sockaddr
*) &addr
->dst_addr
);
1617 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1618 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1619 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1621 if (addr
->src_addr
.ss_family
== AF_INET
) {
1622 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1623 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1625 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1626 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1627 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1630 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1631 id_priv
->id
.port_num
, &path_rec
,
1632 comp_mask
, timeout_ms
,
1633 GFP_KERNEL
, cma_query_handler
,
1634 work
, &id_priv
->query
);
1636 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1639 static void cma_work_handler(struct work_struct
*_work
)
1641 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1642 struct rdma_id_private
*id_priv
= work
->id
;
1645 mutex_lock(&id_priv
->handler_mutex
);
1646 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1649 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1650 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1654 mutex_unlock(&id_priv
->handler_mutex
);
1655 cma_deref_id(id_priv
);
1657 rdma_destroy_id(&id_priv
->id
);
1661 static void cma_ndev_work_handler(struct work_struct
*_work
)
1663 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1664 struct rdma_id_private
*id_priv
= work
->id
;
1667 mutex_lock(&id_priv
->handler_mutex
);
1668 if (id_priv
->state
== RDMA_CM_DESTROYING
||
1669 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
1672 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1673 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1678 mutex_unlock(&id_priv
->handler_mutex
);
1679 cma_deref_id(id_priv
);
1681 rdma_destroy_id(&id_priv
->id
);
1685 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1687 struct rdma_route
*route
= &id_priv
->id
.route
;
1688 struct cma_work
*work
;
1691 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1696 INIT_WORK(&work
->work
, cma_work_handler
);
1697 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1698 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1699 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1701 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1702 if (!route
->path_rec
) {
1707 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1713 kfree(route
->path_rec
);
1714 route
->path_rec
= NULL
;
1720 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1721 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1723 struct rdma_id_private
*id_priv
;
1726 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1727 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
1728 RDMA_CM_ROUTE_RESOLVED
))
1731 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
1733 if (!id
->route
.path_rec
) {
1738 id
->route
.num_paths
= num_paths
;
1741 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
1744 EXPORT_SYMBOL(rdma_set_ib_paths
);
1746 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1748 struct cma_work
*work
;
1750 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1755 INIT_WORK(&work
->work
, cma_work_handler
);
1756 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1757 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1758 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1759 queue_work(cma_wq
, &work
->work
);
1763 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
1765 struct rdma_route
*route
= &id_priv
->id
.route
;
1766 struct rdma_addr
*addr
= &route
->addr
;
1767 struct cma_work
*work
;
1769 struct sockaddr_in
*src_addr
= (struct sockaddr_in
*)&route
->addr
.src_addr
;
1770 struct sockaddr_in
*dst_addr
= (struct sockaddr_in
*)&route
->addr
.dst_addr
;
1771 struct net_device
*ndev
= NULL
;
1774 if (src_addr
->sin_family
!= dst_addr
->sin_family
)
1777 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1782 INIT_WORK(&work
->work
, cma_work_handler
);
1784 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1785 if (!route
->path_rec
) {
1790 route
->num_paths
= 1;
1792 if (addr
->dev_addr
.bound_dev_if
)
1793 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
1799 vid
= rdma_vlan_dev_vlan_id(ndev
);
1801 iboe_mac_vlan_to_ll(&route
->path_rec
->sgid
, addr
->dev_addr
.src_dev_addr
, vid
);
1802 iboe_mac_vlan_to_ll(&route
->path_rec
->dgid
, addr
->dev_addr
.dst_dev_addr
, vid
);
1804 route
->path_rec
->hop_limit
= 1;
1805 route
->path_rec
->reversible
= 1;
1806 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
1807 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
1808 route
->path_rec
->sl
= id_priv
->tos
>> 5;
1810 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
1811 route
->path_rec
->rate_selector
= IB_SA_EQ
;
1812 route
->path_rec
->rate
= iboe_get_rate(ndev
);
1814 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
1815 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
1816 if (!route
->path_rec
->mtu
) {
1821 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1822 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1823 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1824 work
->event
.status
= 0;
1826 queue_work(cma_wq
, &work
->work
);
1831 kfree(route
->path_rec
);
1832 route
->path_rec
= NULL
;
1838 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1840 struct rdma_id_private
*id_priv
;
1843 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1844 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
1847 atomic_inc(&id_priv
->refcount
);
1848 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1849 case RDMA_TRANSPORT_IB
:
1850 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
1851 case IB_LINK_LAYER_INFINIBAND
:
1852 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1854 case IB_LINK_LAYER_ETHERNET
:
1855 ret
= cma_resolve_iboe_route(id_priv
);
1861 case RDMA_TRANSPORT_IWARP
:
1862 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1873 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
1874 cma_deref_id(id_priv
);
1877 EXPORT_SYMBOL(rdma_resolve_route
);
1879 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1881 struct cma_device
*cma_dev
;
1882 struct ib_port_attr port_attr
;
1889 if (list_empty(&dev_list
)) {
1893 list_for_each_entry(cma_dev
, &dev_list
, list
)
1894 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1895 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1896 port_attr
.state
== IB_PORT_ACTIVE
)
1900 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1903 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1907 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1911 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
1912 (rdma_port_get_link_layer(cma_dev
->device
, p
) == IB_LINK_LAYER_INFINIBAND
) ?
1913 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
1915 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1916 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1917 id_priv
->id
.port_num
= p
;
1918 cma_attach_to_dev(id_priv
, cma_dev
);
1920 mutex_unlock(&lock
);
1924 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1925 struct rdma_dev_addr
*dev_addr
, void *context
)
1927 struct rdma_id_private
*id_priv
= context
;
1928 struct rdma_cm_event event
;
1930 memset(&event
, 0, sizeof event
);
1931 mutex_lock(&id_priv
->handler_mutex
);
1932 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
1933 RDMA_CM_ADDR_RESOLVED
))
1936 if (!status
&& !id_priv
->cma_dev
)
1937 status
= cma_acquire_dev(id_priv
);
1940 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
1941 RDMA_CM_ADDR_BOUND
))
1943 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1944 event
.status
= status
;
1946 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1947 ip_addr_size(src_addr
));
1948 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1951 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1952 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1953 mutex_unlock(&id_priv
->handler_mutex
);
1954 cma_deref_id(id_priv
);
1955 rdma_destroy_id(&id_priv
->id
);
1959 mutex_unlock(&id_priv
->handler_mutex
);
1960 cma_deref_id(id_priv
);
1963 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1965 struct cma_work
*work
;
1966 struct sockaddr
*src
, *dst
;
1970 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1974 if (!id_priv
->cma_dev
) {
1975 ret
= cma_bind_loopback(id_priv
);
1980 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1981 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1983 src
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1984 if (cma_zero_addr(src
)) {
1985 dst
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
1986 if ((src
->sa_family
= dst
->sa_family
) == AF_INET
) {
1987 ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
=
1988 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1990 ipv6_addr_copy(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1991 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1996 INIT_WORK(&work
->work
, cma_work_handler
);
1997 work
->old_state
= RDMA_CM_ADDR_QUERY
;
1998 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
1999 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2000 queue_work(cma_wq
, &work
->work
);
2007 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2008 struct sockaddr
*dst_addr
)
2010 if (!src_addr
|| !src_addr
->sa_family
) {
2011 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2012 if ((src_addr
->sa_family
= dst_addr
->sa_family
) == AF_INET6
) {
2013 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
2014 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
2017 return rdma_bind_addr(id
, src_addr
);
2020 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2021 struct sockaddr
*dst_addr
, int timeout_ms
)
2023 struct rdma_id_private
*id_priv
;
2026 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2027 if (id_priv
->state
== RDMA_CM_IDLE
) {
2028 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2033 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2036 atomic_inc(&id_priv
->refcount
);
2037 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
2038 if (cma_any_addr(dst_addr
))
2039 ret
= cma_resolve_loopback(id_priv
);
2041 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
2042 dst_addr
, &id
->route
.addr
.dev_addr
,
2043 timeout_ms
, addr_handler
, id_priv
);
2049 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2050 cma_deref_id(id_priv
);
2053 EXPORT_SYMBOL(rdma_resolve_addr
);
2055 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2057 struct rdma_id_private
*id_priv
;
2058 unsigned long flags
;
2061 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2062 spin_lock_irqsave(&id_priv
->lock
, flags
);
2063 if (id_priv
->state
== RDMA_CM_IDLE
) {
2064 id_priv
->reuseaddr
= reuse
;
2069 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2072 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2074 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2075 struct rdma_id_private
*id_priv
)
2077 struct sockaddr_in
*sin
;
2079 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2080 sin
->sin_port
= htons(bind_list
->port
);
2081 id_priv
->bind_list
= bind_list
;
2082 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2085 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
2086 unsigned short snum
)
2088 struct rdma_bind_list
*bind_list
;
2091 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2096 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
2097 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
2103 ret
= -EADDRNOTAVAIL
;
2108 bind_list
->port
= (unsigned short) port
;
2109 cma_bind_port(bind_list
, id_priv
);
2112 idr_remove(ps
, port
);
2118 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2120 static unsigned int last_used_port
;
2121 int low
, high
, remaining
;
2124 inet_get_local_port_range(&low
, &high
);
2125 remaining
= (high
- low
) + 1;
2126 rover
= net_random() % remaining
+ low
;
2128 if (last_used_port
!= rover
&&
2129 !idr_find(ps
, (unsigned short) rover
)) {
2130 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2132 * Remember previously used port number in order to avoid
2133 * re-using same port immediately after it is closed.
2136 last_used_port
= rover
;
2137 if (ret
!= -EADDRNOTAVAIL
)
2142 if ((rover
< low
) || (rover
> high
))
2146 return -EADDRNOTAVAIL
;
2150 * Check that the requested port is available. This is called when trying to
2151 * bind to a specific port, or when trying to listen on a bound port. In
2152 * the latter case, the provided id_priv may already be on the bind_list, but
2153 * we still need to check that it's okay to start listening.
2155 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2156 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2158 struct rdma_id_private
*cur_id
;
2159 struct sockaddr
*addr
, *cur_addr
;
2160 struct hlist_node
*node
;
2162 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
2163 if (cma_any_addr(addr
) && !reuseaddr
)
2164 return -EADDRNOTAVAIL
;
2166 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2167 if (id_priv
== cur_id
)
2170 if ((cur_id
->state
== RDMA_CM_LISTEN
) ||
2171 !reuseaddr
|| !cur_id
->reuseaddr
) {
2172 cur_addr
= (struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
;
2173 if (cma_any_addr(cur_addr
))
2174 return -EADDRNOTAVAIL
;
2176 if (!cma_addr_cmp(addr
, cur_addr
))
2183 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2185 struct rdma_bind_list
*bind_list
;
2186 unsigned short snum
;
2189 snum
= ntohs(cma_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
2190 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2193 bind_list
= idr_find(ps
, snum
);
2195 ret
= cma_alloc_port(ps
, id_priv
, snum
);
2197 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
2199 cma_bind_port(bind_list
, id_priv
);
2204 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
2206 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
2210 if (bind_list
->owners
.first
->next
)
2211 ret
= cma_check_port(bind_list
, id_priv
, 0);
2212 mutex_unlock(&lock
);
2216 static int cma_get_port(struct rdma_id_private
*id_priv
)
2221 switch (id_priv
->id
.ps
) {
2235 return -EPROTONOSUPPORT
;
2239 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2240 ret
= cma_alloc_any_port(ps
, id_priv
);
2242 ret
= cma_use_port(ps
, id_priv
);
2243 mutex_unlock(&lock
);
2248 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2249 struct sockaddr
*addr
)
2251 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2252 struct sockaddr_in6
*sin6
;
2254 if (addr
->sa_family
!= AF_INET6
)
2257 sin6
= (struct sockaddr_in6
*) addr
;
2258 if ((ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
) &&
2259 !sin6
->sin6_scope_id
)
2262 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2267 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
2269 struct rdma_id_private
*id_priv
;
2272 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2273 if (id_priv
->state
== RDMA_CM_IDLE
) {
2274 ((struct sockaddr
*) &id
->route
.addr
.src_addr
)->sa_family
= AF_INET
;
2275 ret
= rdma_bind_addr(id
, (struct sockaddr
*) &id
->route
.addr
.src_addr
);
2280 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
2283 if (id_priv
->reuseaddr
) {
2284 ret
= cma_bind_listen(id_priv
);
2289 id_priv
->backlog
= backlog
;
2291 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2292 case RDMA_TRANSPORT_IB
:
2293 ret
= cma_ib_listen(id_priv
);
2297 case RDMA_TRANSPORT_IWARP
:
2298 ret
= cma_iw_listen(id_priv
, backlog
);
2307 cma_listen_on_all(id_priv
);
2311 id_priv
->backlog
= 0;
2312 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
2315 EXPORT_SYMBOL(rdma_listen
);
2317 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2319 struct rdma_id_private
*id_priv
;
2322 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2323 return -EAFNOSUPPORT
;
2325 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2326 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
2329 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2333 if (!cma_any_addr(addr
)) {
2334 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2338 ret
= cma_acquire_dev(id_priv
);
2343 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2344 ret
= cma_get_port(id_priv
);
2350 if (id_priv
->cma_dev
)
2351 cma_release_dev(id_priv
);
2353 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
2356 EXPORT_SYMBOL(rdma_bind_addr
);
2358 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2359 struct rdma_route
*route
)
2361 struct cma_hdr
*cma_hdr
;
2362 struct sdp_hh
*sdp_hdr
;
2364 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2365 struct sockaddr_in
*src4
, *dst4
;
2367 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2368 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2373 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2375 sdp_set_ip_ver(sdp_hdr
, 4);
2376 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2377 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2378 sdp_hdr
->port
= src4
->sin_port
;
2382 cma_hdr
->cma_version
= CMA_VERSION
;
2383 cma_set_ip_ver(cma_hdr
, 4);
2384 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2385 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2386 cma_hdr
->port
= src4
->sin_port
;
2390 struct sockaddr_in6
*src6
, *dst6
;
2392 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2393 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2398 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2400 sdp_set_ip_ver(sdp_hdr
, 6);
2401 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2402 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2403 sdp_hdr
->port
= src6
->sin6_port
;
2407 cma_hdr
->cma_version
= CMA_VERSION
;
2408 cma_set_ip_ver(cma_hdr
, 6);
2409 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2410 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2411 cma_hdr
->port
= src6
->sin6_port
;
2418 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2419 struct ib_cm_event
*ib_event
)
2421 struct rdma_id_private
*id_priv
= cm_id
->context
;
2422 struct rdma_cm_event event
;
2423 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2426 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
2429 memset(&event
, 0, sizeof event
);
2430 switch (ib_event
->event
) {
2431 case IB_CM_SIDR_REQ_ERROR
:
2432 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2433 event
.status
= -ETIMEDOUT
;
2435 case IB_CM_SIDR_REP_RECEIVED
:
2436 event
.param
.ud
.private_data
= ib_event
->private_data
;
2437 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2438 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2439 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2440 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2443 ret
= cma_set_qkey(id_priv
);
2445 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2446 event
.status
= -EINVAL
;
2449 if (id_priv
->qkey
!= rep
->qkey
) {
2450 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2451 event
.status
= -EINVAL
;
2454 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2455 id_priv
->id
.route
.path_rec
,
2456 &event
.param
.ud
.ah_attr
);
2457 event
.param
.ud
.qp_num
= rep
->qpn
;
2458 event
.param
.ud
.qkey
= rep
->qkey
;
2459 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2463 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2468 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2470 /* Destroy the CM ID by returning a non-zero value. */
2471 id_priv
->cm_id
.ib
= NULL
;
2472 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2473 mutex_unlock(&id_priv
->handler_mutex
);
2474 rdma_destroy_id(&id_priv
->id
);
2478 mutex_unlock(&id_priv
->handler_mutex
);
2482 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2483 struct rdma_conn_param
*conn_param
)
2485 struct ib_cm_sidr_req_param req
;
2486 struct rdma_route
*route
;
2489 req
.private_data_len
= sizeof(struct cma_hdr
) +
2490 conn_param
->private_data_len
;
2491 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2492 if (!req
.private_data
)
2495 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2496 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2497 conn_param
->private_data
, conn_param
->private_data_len
);
2499 route
= &id_priv
->id
.route
;
2500 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2504 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2505 cma_sidr_rep_handler
, id_priv
);
2506 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2507 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2511 req
.path
= route
->path_rec
;
2512 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2513 (struct sockaddr
*) &route
->addr
.dst_addr
);
2514 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2515 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2517 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2519 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2520 id_priv
->cm_id
.ib
= NULL
;
2523 kfree(req
.private_data
);
2527 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2528 struct rdma_conn_param
*conn_param
)
2530 struct ib_cm_req_param req
;
2531 struct rdma_route
*route
;
2535 memset(&req
, 0, sizeof req
);
2536 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2537 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2538 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2542 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2543 memcpy(private_data
+ offset
, conn_param
->private_data
,
2544 conn_param
->private_data_len
);
2546 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2548 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2549 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2553 route
= &id_priv
->id
.route
;
2554 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2557 req
.private_data
= private_data
;
2559 req
.primary_path
= &route
->path_rec
[0];
2560 if (route
->num_paths
== 2)
2561 req
.alternate_path
= &route
->path_rec
[1];
2563 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2564 (struct sockaddr
*) &route
->addr
.dst_addr
);
2565 req
.qp_num
= id_priv
->qp_num
;
2566 req
.qp_type
= IB_QPT_RC
;
2567 req
.starting_psn
= id_priv
->seq_num
;
2568 req
.responder_resources
= conn_param
->responder_resources
;
2569 req
.initiator_depth
= conn_param
->initiator_depth
;
2570 req
.flow_control
= conn_param
->flow_control
;
2571 req
.retry_count
= conn_param
->retry_count
;
2572 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2573 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2574 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2575 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2576 req
.srq
= id_priv
->srq
? 1 : 0;
2578 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2580 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2581 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2582 id_priv
->cm_id
.ib
= NULL
;
2585 kfree(private_data
);
2589 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2590 struct rdma_conn_param
*conn_param
)
2592 struct iw_cm_id
*cm_id
;
2593 struct sockaddr_in
* sin
;
2595 struct iw_cm_conn_param iw_param
;
2597 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2598 if (IS_ERR(cm_id
)) {
2599 ret
= PTR_ERR(cm_id
);
2603 id_priv
->cm_id
.iw
= cm_id
;
2605 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2606 cm_id
->local_addr
= *sin
;
2608 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2609 cm_id
->remote_addr
= *sin
;
2611 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2615 iw_param
.ord
= conn_param
->initiator_depth
;
2616 iw_param
.ird
= conn_param
->responder_resources
;
2617 iw_param
.private_data
= conn_param
->private_data
;
2618 iw_param
.private_data_len
= conn_param
->private_data_len
;
2620 iw_param
.qpn
= id_priv
->qp_num
;
2622 iw_param
.qpn
= conn_param
->qp_num
;
2623 ret
= iw_cm_connect(cm_id
, &iw_param
);
2625 if (ret
&& !IS_ERR(cm_id
)) {
2626 iw_destroy_cm_id(cm_id
);
2627 id_priv
->cm_id
.iw
= NULL
;
2632 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2634 struct rdma_id_private
*id_priv
;
2637 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2638 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
2642 id_priv
->qp_num
= conn_param
->qp_num
;
2643 id_priv
->srq
= conn_param
->srq
;
2646 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2647 case RDMA_TRANSPORT_IB
:
2648 if (id
->qp_type
== IB_QPT_UD
)
2649 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2651 ret
= cma_connect_ib(id_priv
, conn_param
);
2653 case RDMA_TRANSPORT_IWARP
:
2654 ret
= cma_connect_iw(id_priv
, conn_param
);
2665 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
2668 EXPORT_SYMBOL(rdma_connect
);
2670 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2671 struct rdma_conn_param
*conn_param
)
2673 struct ib_cm_rep_param rep
;
2676 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2680 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2684 memset(&rep
, 0, sizeof rep
);
2685 rep
.qp_num
= id_priv
->qp_num
;
2686 rep
.starting_psn
= id_priv
->seq_num
;
2687 rep
.private_data
= conn_param
->private_data
;
2688 rep
.private_data_len
= conn_param
->private_data_len
;
2689 rep
.responder_resources
= conn_param
->responder_resources
;
2690 rep
.initiator_depth
= conn_param
->initiator_depth
;
2691 rep
.failover_accepted
= 0;
2692 rep
.flow_control
= conn_param
->flow_control
;
2693 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2694 rep
.srq
= id_priv
->srq
? 1 : 0;
2696 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2701 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2702 struct rdma_conn_param
*conn_param
)
2704 struct iw_cm_conn_param iw_param
;
2707 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2711 iw_param
.ord
= conn_param
->initiator_depth
;
2712 iw_param
.ird
= conn_param
->responder_resources
;
2713 iw_param
.private_data
= conn_param
->private_data
;
2714 iw_param
.private_data_len
= conn_param
->private_data_len
;
2715 if (id_priv
->id
.qp
) {
2716 iw_param
.qpn
= id_priv
->qp_num
;
2718 iw_param
.qpn
= conn_param
->qp_num
;
2720 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2723 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2724 enum ib_cm_sidr_status status
,
2725 const void *private_data
, int private_data_len
)
2727 struct ib_cm_sidr_rep_param rep
;
2730 memset(&rep
, 0, sizeof rep
);
2731 rep
.status
= status
;
2732 if (status
== IB_SIDR_SUCCESS
) {
2733 ret
= cma_set_qkey(id_priv
);
2736 rep
.qp_num
= id_priv
->qp_num
;
2737 rep
.qkey
= id_priv
->qkey
;
2739 rep
.private_data
= private_data
;
2740 rep
.private_data_len
= private_data_len
;
2742 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2745 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2747 struct rdma_id_private
*id_priv
;
2750 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2752 id_priv
->owner
= task_pid_nr(current
);
2754 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
2757 if (!id
->qp
&& conn_param
) {
2758 id_priv
->qp_num
= conn_param
->qp_num
;
2759 id_priv
->srq
= conn_param
->srq
;
2762 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2763 case RDMA_TRANSPORT_IB
:
2764 if (id
->qp_type
== IB_QPT_UD
)
2765 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2766 conn_param
->private_data
,
2767 conn_param
->private_data_len
);
2768 else if (conn_param
)
2769 ret
= cma_accept_ib(id_priv
, conn_param
);
2771 ret
= cma_rep_recv(id_priv
);
2773 case RDMA_TRANSPORT_IWARP
:
2774 ret
= cma_accept_iw(id_priv
, conn_param
);
2786 cma_modify_qp_err(id_priv
);
2787 rdma_reject(id
, NULL
, 0);
2790 EXPORT_SYMBOL(rdma_accept
);
2792 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2794 struct rdma_id_private
*id_priv
;
2797 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2798 if (!cma_has_cm_dev(id_priv
))
2801 switch (id
->device
->node_type
) {
2802 case RDMA_NODE_IB_CA
:
2803 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2811 EXPORT_SYMBOL(rdma_notify
);
2813 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2814 u8 private_data_len
)
2816 struct rdma_id_private
*id_priv
;
2819 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2820 if (!cma_has_cm_dev(id_priv
))
2823 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2824 case RDMA_TRANSPORT_IB
:
2825 if (id
->qp_type
== IB_QPT_UD
)
2826 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2827 private_data
, private_data_len
);
2829 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2830 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2831 0, private_data
, private_data_len
);
2833 case RDMA_TRANSPORT_IWARP
:
2834 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2835 private_data
, private_data_len
);
2843 EXPORT_SYMBOL(rdma_reject
);
2845 int rdma_disconnect(struct rdma_cm_id
*id
)
2847 struct rdma_id_private
*id_priv
;
2850 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2851 if (!cma_has_cm_dev(id_priv
))
2854 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2855 case RDMA_TRANSPORT_IB
:
2856 ret
= cma_modify_qp_err(id_priv
);
2859 /* Initiate or respond to a disconnect. */
2860 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2861 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2863 case RDMA_TRANSPORT_IWARP
:
2864 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2873 EXPORT_SYMBOL(rdma_disconnect
);
2875 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2877 struct rdma_id_private
*id_priv
;
2878 struct cma_multicast
*mc
= multicast
->context
;
2879 struct rdma_cm_event event
;
2882 id_priv
= mc
->id_priv
;
2883 if (cma_disable_callback(id_priv
, RDMA_CM_ADDR_BOUND
) &&
2884 cma_disable_callback(id_priv
, RDMA_CM_ADDR_RESOLVED
))
2887 mutex_lock(&id_priv
->qp_mutex
);
2888 if (!status
&& id_priv
->id
.qp
)
2889 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2890 multicast
->rec
.mlid
);
2891 mutex_unlock(&id_priv
->qp_mutex
);
2893 memset(&event
, 0, sizeof event
);
2894 event
.status
= status
;
2895 event
.param
.ud
.private_data
= mc
->context
;
2897 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2898 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2899 id_priv
->id
.port_num
, &multicast
->rec
,
2900 &event
.param
.ud
.ah_attr
);
2901 event
.param
.ud
.qp_num
= 0xFFFFFF;
2902 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2904 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2906 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2908 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2909 mutex_unlock(&id_priv
->handler_mutex
);
2910 rdma_destroy_id(&id_priv
->id
);
2914 mutex_unlock(&id_priv
->handler_mutex
);
2918 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2919 struct sockaddr
*addr
, union ib_gid
*mgid
)
2921 unsigned char mc_map
[MAX_ADDR_LEN
];
2922 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2923 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2924 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2926 if (cma_any_addr(addr
)) {
2927 memset(mgid
, 0, sizeof *mgid
);
2928 } else if ((addr
->sa_family
== AF_INET6
) &&
2929 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
2931 /* IPv6 address is an SA assigned MGID. */
2932 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2933 } else if ((addr
->sa_family
== AF_INET6
)) {
2934 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
2935 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2936 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2937 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2939 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2940 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2941 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2942 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2946 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2947 struct cma_multicast
*mc
)
2949 struct ib_sa_mcmember_rec rec
;
2950 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2951 ib_sa_comp_mask comp_mask
;
2954 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2955 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2960 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
2961 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2962 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2963 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2964 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2967 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2968 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2969 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2970 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2971 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2973 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
2974 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
2975 IB_SA_MCMEMBER_REC_RATE_SELECTOR
;
2977 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2978 id_priv
->id
.port_num
, &rec
,
2979 comp_mask
, GFP_KERNEL
,
2980 cma_ib_mc_handler
, mc
);
2981 if (IS_ERR(mc
->multicast
.ib
))
2982 return PTR_ERR(mc
->multicast
.ib
);
2987 static void iboe_mcast_work_handler(struct work_struct
*work
)
2989 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
2990 struct cma_multicast
*mc
= mw
->mc
;
2991 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
2993 mc
->multicast
.ib
->context
= mc
;
2994 cma_ib_mc_handler(0, m
);
2995 kref_put(&mc
->mcref
, release_mc
);
2999 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3001 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3002 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3004 if (cma_any_addr(addr
)) {
3005 memset(mgid
, 0, sizeof *mgid
);
3006 } else if (addr
->sa_family
== AF_INET6
) {
3007 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3009 mgid
->raw
[0] = 0xff;
3010 mgid
->raw
[1] = 0x0e;
3019 mgid
->raw
[10] = 0xff;
3020 mgid
->raw
[11] = 0xff;
3021 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3025 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3026 struct cma_multicast
*mc
)
3028 struct iboe_mcast_work
*work
;
3029 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3031 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3032 struct net_device
*ndev
= NULL
;
3034 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3037 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3041 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3042 if (!mc
->multicast
.ib
) {
3047 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3049 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3050 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3051 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3053 if (dev_addr
->bound_dev_if
)
3054 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3059 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3060 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3061 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3063 if (!mc
->multicast
.ib
->rec
.mtu
) {
3067 iboe_addr_get_sgid(dev_addr
, &mc
->multicast
.ib
->rec
.port_gid
);
3070 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3071 kref_get(&mc
->mcref
);
3072 queue_work(cma_wq
, &work
->work
);
3077 kfree(mc
->multicast
.ib
);
3083 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3086 struct rdma_id_private
*id_priv
;
3087 struct cma_multicast
*mc
;
3090 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3091 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3092 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3095 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3099 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
3100 mc
->context
= context
;
3101 mc
->id_priv
= id_priv
;
3103 spin_lock(&id_priv
->lock
);
3104 list_add(&mc
->list
, &id_priv
->mc_list
);
3105 spin_unlock(&id_priv
->lock
);
3107 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3108 case RDMA_TRANSPORT_IB
:
3109 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3110 case IB_LINK_LAYER_INFINIBAND
:
3111 ret
= cma_join_ib_multicast(id_priv
, mc
);
3113 case IB_LINK_LAYER_ETHERNET
:
3114 kref_init(&mc
->mcref
);
3115 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3127 spin_lock_irq(&id_priv
->lock
);
3128 list_del(&mc
->list
);
3129 spin_unlock_irq(&id_priv
->lock
);
3134 EXPORT_SYMBOL(rdma_join_multicast
);
3136 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3138 struct rdma_id_private
*id_priv
;
3139 struct cma_multicast
*mc
;
3141 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3142 spin_lock_irq(&id_priv
->lock
);
3143 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3144 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
3145 list_del(&mc
->list
);
3146 spin_unlock_irq(&id_priv
->lock
);
3149 ib_detach_mcast(id
->qp
,
3150 &mc
->multicast
.ib
->rec
.mgid
,
3151 mc
->multicast
.ib
->rec
.mlid
);
3152 if (rdma_node_get_transport(id_priv
->cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) {
3153 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3154 case IB_LINK_LAYER_INFINIBAND
:
3155 ib_sa_free_multicast(mc
->multicast
.ib
);
3158 case IB_LINK_LAYER_ETHERNET
:
3159 kref_put(&mc
->mcref
, release_mc
);
3168 spin_unlock_irq(&id_priv
->lock
);
3170 EXPORT_SYMBOL(rdma_leave_multicast
);
3172 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
3174 struct rdma_dev_addr
*dev_addr
;
3175 struct cma_ndev_work
*work
;
3177 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3179 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
3180 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
3181 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
3182 ndev
->name
, &id_priv
->id
);
3183 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3187 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
3189 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
3190 atomic_inc(&id_priv
->refcount
);
3191 queue_work(cma_wq
, &work
->work
);
3197 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
3200 struct net_device
*ndev
= (struct net_device
*)ctx
;
3201 struct cma_device
*cma_dev
;
3202 struct rdma_id_private
*id_priv
;
3203 int ret
= NOTIFY_DONE
;
3205 if (dev_net(ndev
) != &init_net
)
3208 if (event
!= NETDEV_BONDING_FAILOVER
)
3211 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
3215 list_for_each_entry(cma_dev
, &dev_list
, list
)
3216 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3217 ret
= cma_netdev_change(ndev
, id_priv
);
3223 mutex_unlock(&lock
);
3227 static struct notifier_block cma_nb
= {
3228 .notifier_call
= cma_netdev_callback
3231 static void cma_add_one(struct ib_device
*device
)
3233 struct cma_device
*cma_dev
;
3234 struct rdma_id_private
*id_priv
;
3236 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
3240 cma_dev
->device
= device
;
3242 init_completion(&cma_dev
->comp
);
3243 atomic_set(&cma_dev
->refcount
, 1);
3244 INIT_LIST_HEAD(&cma_dev
->id_list
);
3245 ib_set_client_data(device
, &cma_client
, cma_dev
);
3248 list_add_tail(&cma_dev
->list
, &dev_list
);
3249 list_for_each_entry(id_priv
, &listen_any_list
, list
)
3250 cma_listen_on_dev(id_priv
, cma_dev
);
3251 mutex_unlock(&lock
);
3254 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
3256 struct rdma_cm_event event
;
3257 enum rdma_cm_state state
;
3260 /* Record that we want to remove the device */
3261 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
3262 if (state
== RDMA_CM_DESTROYING
)
3265 cma_cancel_operation(id_priv
, state
);
3266 mutex_lock(&id_priv
->handler_mutex
);
3268 /* Check for destruction from another callback. */
3269 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
3272 memset(&event
, 0, sizeof event
);
3273 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
3274 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3276 mutex_unlock(&id_priv
->handler_mutex
);
3280 static void cma_process_remove(struct cma_device
*cma_dev
)
3282 struct rdma_id_private
*id_priv
;
3286 while (!list_empty(&cma_dev
->id_list
)) {
3287 id_priv
= list_entry(cma_dev
->id_list
.next
,
3288 struct rdma_id_private
, list
);
3290 list_del(&id_priv
->listen_list
);
3291 list_del_init(&id_priv
->list
);
3292 atomic_inc(&id_priv
->refcount
);
3293 mutex_unlock(&lock
);
3295 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
3296 cma_deref_id(id_priv
);
3298 rdma_destroy_id(&id_priv
->id
);
3302 mutex_unlock(&lock
);
3304 cma_deref_dev(cma_dev
);
3305 wait_for_completion(&cma_dev
->comp
);
3308 static void cma_remove_one(struct ib_device
*device
)
3310 struct cma_device
*cma_dev
;
3312 cma_dev
= ib_get_client_data(device
, &cma_client
);
3317 list_del(&cma_dev
->list
);
3318 mutex_unlock(&lock
);
3320 cma_process_remove(cma_dev
);
3324 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3326 struct nlmsghdr
*nlh
;
3327 struct rdma_cm_id_stats
*id_stats
;
3328 struct rdma_id_private
*id_priv
;
3329 struct rdma_cm_id
*id
= NULL
;
3330 struct cma_device
*cma_dev
;
3331 int i_dev
= 0, i_id
= 0;
3334 * We export all of the IDs as a sequence of messages. Each
3335 * ID gets its own netlink message.
3339 list_for_each_entry(cma_dev
, &dev_list
, list
) {
3340 if (i_dev
< cb
->args
[0]) {
3346 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3347 if (i_id
< cb
->args
[1]) {
3352 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
3353 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
3354 RDMA_NL_RDMA_CM_ID_STATS
);
3358 memset(id_stats
, 0, sizeof *id_stats
);
3360 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
3361 id_stats
->port_num
= id
->port_num
;
3362 id_stats
->bound_dev_if
=
3363 id
->route
.addr
.dev_addr
.bound_dev_if
;
3365 if (id
->route
.addr
.src_addr
.ss_family
== AF_INET
) {
3366 if (ibnl_put_attr(skb
, nlh
,
3367 sizeof(struct sockaddr_in
),
3368 &id
->route
.addr
.src_addr
,
3369 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
)) {
3372 if (ibnl_put_attr(skb
, nlh
,
3373 sizeof(struct sockaddr_in
),
3374 &id
->route
.addr
.dst_addr
,
3375 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
)) {
3378 } else if (id
->route
.addr
.src_addr
.ss_family
== AF_INET6
) {
3379 if (ibnl_put_attr(skb
, nlh
,
3380 sizeof(struct sockaddr_in6
),
3381 &id
->route
.addr
.src_addr
,
3382 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
)) {
3385 if (ibnl_put_attr(skb
, nlh
,
3386 sizeof(struct sockaddr_in6
),
3387 &id
->route
.addr
.dst_addr
,
3388 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
)) {
3393 id_stats
->pid
= id_priv
->owner
;
3394 id_stats
->port_space
= id
->ps
;
3395 id_stats
->cm_state
= id_priv
->state
;
3396 id_stats
->qp_num
= id_priv
->qp_num
;
3397 id_stats
->qp_type
= id
->qp_type
;
3407 mutex_unlock(&lock
);
3408 cb
->args
[0] = i_dev
;
3414 static const struct ibnl_client_cbs cma_cb_table
[] = {
3415 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
},
3418 static int __init
cma_init(void)
3422 cma_wq
= create_singlethread_workqueue("rdma_cm");
3426 ib_sa_register_client(&sa_client
);
3427 rdma_addr_register_client(&addr_client
);
3428 register_netdevice_notifier(&cma_nb
);
3430 ret
= ib_register_client(&cma_client
);
3434 if (ibnl_add_client(RDMA_NL_RDMA_CM
, RDMA_NL_RDMA_CM_NUM_OPS
, cma_cb_table
))
3435 printk(KERN_WARNING
"RDMA CMA: failed to add netlink callback\n");
3440 unregister_netdevice_notifier(&cma_nb
);
3441 rdma_addr_unregister_client(&addr_client
);
3442 ib_sa_unregister_client(&sa_client
);
3443 destroy_workqueue(cma_wq
);
3447 static void __exit
cma_cleanup(void)
3449 ibnl_remove_client(RDMA_NL_RDMA_CM
);
3450 ib_unregister_client(&cma_client
);
3451 unregister_netdevice_notifier(&cma_nb
);
3452 rdma_addr_unregister_client(&addr_client
);
3453 ib_sa_unregister_client(&sa_client
);
3454 destroy_workqueue(cma_wq
);
3455 idr_destroy(&sdp_ps
);
3456 idr_destroy(&tcp_ps
);
3457 idr_destroy(&udp_ps
);
3458 idr_destroy(&ipoib_ps
);
3461 module_init(cma_init
);
3462 module_exit(cma_cleanup
);