2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
57 static void cma_add_one(struct ib_device
*device
);
58 static void cma_remove_one(struct ib_device
*device
);
60 static struct ib_client cma_client
= {
63 .remove
= cma_remove_one
66 static struct ib_sa_client sa_client
;
67 static struct rdma_addr_client addr_client
;
68 static LIST_HEAD(dev_list
);
69 static LIST_HEAD(listen_any_list
);
70 static DEFINE_MUTEX(lock
);
71 static struct workqueue_struct
*cma_wq
;
72 static DEFINE_IDR(sdp_ps
);
73 static DEFINE_IDR(tcp_ps
);
74 static DEFINE_IDR(udp_ps
);
75 static DEFINE_IDR(ipoib_ps
);
79 struct list_head list
;
80 struct ib_device
*device
;
81 struct completion comp
;
83 struct list_head id_list
;
100 struct rdma_bind_list
{
102 struct hlist_head owners
;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private
{
113 struct rdma_cm_id id
;
115 struct rdma_bind_list
*bind_list
;
116 struct hlist_node node
;
117 struct list_head list
;
118 struct list_head listen_list
;
119 struct cma_device
*cma_dev
;
120 struct list_head mc_list
;
122 enum cma_state state
;
124 struct completion comp
;
126 wait_queue_head_t wait_remove
;
131 struct ib_sa_query
*query
;
145 struct cma_multicast
{
146 struct rdma_id_private
*id_priv
;
148 struct ib_sa_multicast
*ib
;
150 struct list_head list
;
152 struct sockaddr addr
;
153 u8 pad
[sizeof(struct sockaddr_in6
) -
154 sizeof(struct sockaddr
)];
158 struct work_struct work
;
159 struct rdma_id_private
*id
;
160 enum cma_state old_state
;
161 enum cma_state new_state
;
162 struct rdma_cm_event event
;
175 u8 ip_version
; /* IP version: 7:4 */
177 union cma_ip_addr src_addr
;
178 union cma_ip_addr dst_addr
;
183 u8 sdp_version
; /* Major version: 7:4 */
184 u8 ip_version
; /* IP version: 7:4 */
185 u8 sdp_specific1
[10];
188 union cma_ip_addr src_addr
;
189 union cma_ip_addr dst_addr
;
197 #define CMA_VERSION 0x00
198 #define SDP_MAJ_VERSION 0x2
200 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
205 spin_lock_irqsave(&id_priv
->lock
, flags
);
206 ret
= (id_priv
->state
== comp
);
207 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
211 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
212 enum cma_state comp
, enum cma_state exch
)
217 spin_lock_irqsave(&id_priv
->lock
, flags
);
218 if ((ret
= (id_priv
->state
== comp
)))
219 id_priv
->state
= exch
;
220 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
224 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
230 spin_lock_irqsave(&id_priv
->lock
, flags
);
231 old
= id_priv
->state
;
232 id_priv
->state
= exch
;
233 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
237 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
239 return hdr
->ip_version
>> 4;
242 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
244 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
247 static inline u8
sdp_get_majv(u8 sdp_version
)
249 return sdp_version
>> 4;
252 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
254 return hh
->ip_version
>> 4;
257 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
259 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
262 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
264 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
267 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
268 struct cma_device
*cma_dev
)
270 atomic_inc(&cma_dev
->refcount
);
271 id_priv
->cma_dev
= cma_dev
;
272 id_priv
->id
.device
= cma_dev
->device
;
273 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
276 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
278 if (atomic_dec_and_test(&cma_dev
->refcount
))
279 complete(&cma_dev
->comp
);
282 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
284 list_del(&id_priv
->list
);
285 cma_deref_dev(id_priv
->cma_dev
);
286 id_priv
->cma_dev
= NULL
;
289 static int cma_set_qkey(struct ib_device
*device
, u8 port_num
,
290 enum rdma_port_space ps
,
291 struct rdma_dev_addr
*dev_addr
, u32
*qkey
)
293 struct ib_sa_mcmember_rec rec
;
298 *qkey
= RDMA_UDP_QKEY
;
301 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
302 ret
= ib_sa_get_mcmember_rec(device
, port_num
, &rec
.mgid
, &rec
);
303 *qkey
= be32_to_cpu(rec
.qkey
);
311 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
313 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
314 struct cma_device
*cma_dev
;
318 switch (rdma_node_get_transport(dev_addr
->dev_type
)) {
319 case RDMA_TRANSPORT_IB
:
320 ib_addr_get_sgid(dev_addr
, &gid
);
322 case RDMA_TRANSPORT_IWARP
:
323 iw_addr_get_sgid(dev_addr
, &gid
);
329 list_for_each_entry(cma_dev
, &dev_list
, list
) {
330 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
331 &id_priv
->id
.port_num
, NULL
);
333 ret
= cma_set_qkey(cma_dev
->device
,
334 id_priv
->id
.port_num
,
335 id_priv
->id
.ps
, dev_addr
,
338 cma_attach_to_dev(id_priv
, cma_dev
);
345 static void cma_deref_id(struct rdma_id_private
*id_priv
)
347 if (atomic_dec_and_test(&id_priv
->refcount
))
348 complete(&id_priv
->comp
);
351 static int cma_disable_remove(struct rdma_id_private
*id_priv
,
352 enum cma_state state
)
357 spin_lock_irqsave(&id_priv
->lock
, flags
);
358 if (id_priv
->state
== state
) {
359 atomic_inc(&id_priv
->dev_remove
);
363 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
367 static void cma_enable_remove(struct rdma_id_private
*id_priv
)
369 if (atomic_dec_and_test(&id_priv
->dev_remove
))
370 wake_up(&id_priv
->wait_remove
);
373 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
375 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
378 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
379 void *context
, enum rdma_port_space ps
)
381 struct rdma_id_private
*id_priv
;
383 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
387 id_priv
->state
= CMA_IDLE
;
388 id_priv
->id
.context
= context
;
389 id_priv
->id
.event_handler
= event_handler
;
391 spin_lock_init(&id_priv
->lock
);
392 init_completion(&id_priv
->comp
);
393 atomic_set(&id_priv
->refcount
, 1);
394 init_waitqueue_head(&id_priv
->wait_remove
);
395 atomic_set(&id_priv
->dev_remove
, 0);
396 INIT_LIST_HEAD(&id_priv
->listen_list
);
397 INIT_LIST_HEAD(&id_priv
->mc_list
);
398 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
402 EXPORT_SYMBOL(rdma_create_id
);
404 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
406 struct ib_qp_attr qp_attr
;
407 int qp_attr_mask
, ret
;
409 qp_attr
.qp_state
= IB_QPS_INIT
;
410 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
414 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
418 qp_attr
.qp_state
= IB_QPS_RTR
;
419 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
423 qp_attr
.qp_state
= IB_QPS_RTS
;
425 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
430 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
432 struct ib_qp_attr qp_attr
;
433 int qp_attr_mask
, ret
;
435 qp_attr
.qp_state
= IB_QPS_INIT
;
436 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
440 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
443 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
444 struct ib_qp_init_attr
*qp_init_attr
)
446 struct rdma_id_private
*id_priv
;
450 id_priv
= container_of(id
, struct rdma_id_private
, id
);
451 if (id
->device
!= pd
->device
)
454 qp
= ib_create_qp(pd
, qp_init_attr
);
458 if (cma_is_ud_ps(id_priv
->id
.ps
))
459 ret
= cma_init_ud_qp(id_priv
, qp
);
461 ret
= cma_init_conn_qp(id_priv
, qp
);
466 id_priv
->qp_num
= qp
->qp_num
;
467 id_priv
->srq
= (qp
->srq
!= NULL
);
473 EXPORT_SYMBOL(rdma_create_qp
);
475 void rdma_destroy_qp(struct rdma_cm_id
*id
)
477 ib_destroy_qp(id
->qp
);
479 EXPORT_SYMBOL(rdma_destroy_qp
);
481 static int cma_modify_qp_rtr(struct rdma_cm_id
*id
)
483 struct ib_qp_attr qp_attr
;
484 int qp_attr_mask
, ret
;
489 /* Need to update QP attributes from default values. */
490 qp_attr
.qp_state
= IB_QPS_INIT
;
491 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
495 ret
= ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
499 qp_attr
.qp_state
= IB_QPS_RTR
;
500 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
504 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
507 static int cma_modify_qp_rts(struct rdma_cm_id
*id
)
509 struct ib_qp_attr qp_attr
;
510 int qp_attr_mask
, ret
;
515 qp_attr
.qp_state
= IB_QPS_RTS
;
516 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
520 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
523 static int cma_modify_qp_err(struct rdma_cm_id
*id
)
525 struct ib_qp_attr qp_attr
;
530 qp_attr
.qp_state
= IB_QPS_ERR
;
531 return ib_modify_qp(id
->qp
, &qp_attr
, IB_QP_STATE
);
534 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
535 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
537 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
540 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
541 ib_addr_get_pkey(dev_addr
),
542 &qp_attr
->pkey_index
);
546 qp_attr
->port_num
= id_priv
->id
.port_num
;
547 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
549 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
550 qp_attr
->qkey
= id_priv
->qkey
;
551 *qp_attr_mask
|= IB_QP_QKEY
;
553 qp_attr
->qp_access_flags
= 0;
554 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
559 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
562 struct rdma_id_private
*id_priv
;
565 id_priv
= container_of(id
, struct rdma_id_private
, id
);
566 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
567 case RDMA_TRANSPORT_IB
:
568 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
569 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
571 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
573 if (qp_attr
->qp_state
== IB_QPS_RTR
)
574 qp_attr
->rq_psn
= id_priv
->seq_num
;
576 case RDMA_TRANSPORT_IWARP
:
577 if (!id_priv
->cm_id
.iw
) {
578 qp_attr
->qp_access_flags
= 0;
579 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
581 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
591 EXPORT_SYMBOL(rdma_init_qp_attr
);
593 static inline int cma_zero_addr(struct sockaddr
*addr
)
595 struct in6_addr
*ip6
;
597 if (addr
->sa_family
== AF_INET
)
598 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
600 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
601 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
602 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
606 static inline int cma_loopback_addr(struct sockaddr
*addr
)
608 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
611 static inline int cma_any_addr(struct sockaddr
*addr
)
613 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
616 static inline __be16
cma_port(struct sockaddr
*addr
)
618 if (addr
->sa_family
== AF_INET
)
619 return ((struct sockaddr_in
*) addr
)->sin_port
;
621 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
624 static inline int cma_any_port(struct sockaddr
*addr
)
626 return !cma_port(addr
);
629 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
630 u8
*ip_ver
, __u16
*port
,
631 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
635 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
639 *ip_ver
= sdp_get_ip_ver(hdr
);
640 *port
= ((struct sdp_hh
*) hdr
)->port
;
641 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
642 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
645 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
648 *ip_ver
= cma_get_ip_ver(hdr
);
649 *port
= ((struct cma_hdr
*) hdr
)->port
;
650 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
651 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
655 if (*ip_ver
!= 4 && *ip_ver
!= 6)
660 static void cma_save_net_info(struct rdma_addr
*addr
,
661 struct rdma_addr
*listen_addr
,
662 u8 ip_ver
, __u16 port
,
663 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
665 struct sockaddr_in
*listen4
, *ip4
;
666 struct sockaddr_in6
*listen6
, *ip6
;
670 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
671 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
672 ip4
->sin_family
= listen4
->sin_family
;
673 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
674 ip4
->sin_port
= listen4
->sin_port
;
676 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
677 ip4
->sin_family
= listen4
->sin_family
;
678 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
679 ip4
->sin_port
= port
;
682 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
683 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
684 ip6
->sin6_family
= listen6
->sin6_family
;
685 ip6
->sin6_addr
= dst
->ip6
;
686 ip6
->sin6_port
= listen6
->sin6_port
;
688 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
689 ip6
->sin6_family
= listen6
->sin6_family
;
690 ip6
->sin6_addr
= src
->ip6
;
691 ip6
->sin6_port
= port
;
698 static inline int cma_user_data_offset(enum rdma_port_space ps
)
704 return sizeof(struct cma_hdr
);
708 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
710 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
711 case RDMA_TRANSPORT_IB
:
713 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
720 static inline int cma_internal_listen(struct rdma_id_private
*id_priv
)
722 return (id_priv
->state
== CMA_LISTEN
) && id_priv
->cma_dev
&&
723 cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
);
726 static void cma_destroy_listen(struct rdma_id_private
*id_priv
)
728 cma_exch(id_priv
, CMA_DESTROYING
);
730 if (id_priv
->cma_dev
) {
731 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
732 case RDMA_TRANSPORT_IB
:
733 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
734 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
736 case RDMA_TRANSPORT_IWARP
:
737 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
738 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
743 cma_detach_from_dev(id_priv
);
745 list_del(&id_priv
->listen_list
);
747 cma_deref_id(id_priv
);
748 wait_for_completion(&id_priv
->comp
);
753 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
755 struct rdma_id_private
*dev_id_priv
;
758 list_del(&id_priv
->list
);
760 while (!list_empty(&id_priv
->listen_list
)) {
761 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
762 struct rdma_id_private
, listen_list
);
763 cma_destroy_listen(dev_id_priv
);
768 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
769 enum cma_state state
)
773 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
775 case CMA_ROUTE_QUERY
:
776 cma_cancel_route(id_priv
);
779 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
781 cma_cancel_listens(id_priv
);
788 static void cma_release_port(struct rdma_id_private
*id_priv
)
790 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
796 hlist_del(&id_priv
->node
);
797 if (hlist_empty(&bind_list
->owners
)) {
798 idr_remove(bind_list
->ps
, bind_list
->port
);
804 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
806 struct cma_multicast
*mc
;
808 while (!list_empty(&id_priv
->mc_list
)) {
809 mc
= container_of(id_priv
->mc_list
.next
,
810 struct cma_multicast
, list
);
812 ib_sa_free_multicast(mc
->multicast
.ib
);
817 void rdma_destroy_id(struct rdma_cm_id
*id
)
819 struct rdma_id_private
*id_priv
;
820 enum cma_state state
;
822 id_priv
= container_of(id
, struct rdma_id_private
, id
);
823 state
= cma_exch(id_priv
, CMA_DESTROYING
);
824 cma_cancel_operation(id_priv
, state
);
827 if (id_priv
->cma_dev
) {
829 switch (rdma_node_get_transport(id
->device
->node_type
)) {
830 case RDMA_TRANSPORT_IB
:
831 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
832 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
834 case RDMA_TRANSPORT_IWARP
:
835 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
836 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
841 cma_leave_mc_groups(id_priv
);
843 cma_detach_from_dev(id_priv
);
847 cma_release_port(id_priv
);
848 cma_deref_id(id_priv
);
849 wait_for_completion(&id_priv
->comp
);
851 kfree(id_priv
->id
.route
.path_rec
);
854 EXPORT_SYMBOL(rdma_destroy_id
);
856 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
860 ret
= cma_modify_qp_rtr(&id_priv
->id
);
864 ret
= cma_modify_qp_rts(&id_priv
->id
);
868 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
874 cma_modify_qp_err(&id_priv
->id
);
875 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
880 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
882 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
883 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
890 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
891 struct ib_cm_rep_event_param
*rep_data
,
894 event
->param
.conn
.private_data
= private_data
;
895 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
896 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
897 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
898 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
899 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
900 event
->param
.conn
.srq
= rep_data
->srq
;
901 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
904 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
906 struct rdma_id_private
*id_priv
= cm_id
->context
;
907 struct rdma_cm_event event
;
910 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
913 memset(&event
, 0, sizeof event
);
914 switch (ib_event
->event
) {
915 case IB_CM_REQ_ERROR
:
916 case IB_CM_REP_ERROR
:
917 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
918 event
.status
= -ETIMEDOUT
;
920 case IB_CM_REP_RECEIVED
:
921 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
923 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
924 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
925 event
.status
= cma_rep_recv(id_priv
);
926 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
927 RDMA_CM_EVENT_ESTABLISHED
;
929 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
930 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
931 ib_event
->private_data
);
933 case IB_CM_RTU_RECEIVED
:
934 case IB_CM_USER_ESTABLISHED
:
935 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
937 case IB_CM_DREQ_ERROR
:
938 event
.status
= -ETIMEDOUT
; /* fall through */
939 case IB_CM_DREQ_RECEIVED
:
940 case IB_CM_DREP_RECEIVED
:
941 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
943 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
945 case IB_CM_TIMEWAIT_EXIT
:
946 case IB_CM_MRA_RECEIVED
:
949 case IB_CM_REJ_RECEIVED
:
950 cma_modify_qp_err(&id_priv
->id
);
951 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
952 event
.event
= RDMA_CM_EVENT_REJECTED
;
953 event
.param
.conn
.private_data
= ib_event
->private_data
;
954 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
957 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
962 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
964 /* Destroy the CM ID by returning a non-zero value. */
965 id_priv
->cm_id
.ib
= NULL
;
966 cma_exch(id_priv
, CMA_DESTROYING
);
967 cma_enable_remove(id_priv
);
968 rdma_destroy_id(&id_priv
->id
);
972 cma_enable_remove(id_priv
);
976 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
977 struct ib_cm_event
*ib_event
)
979 struct rdma_id_private
*id_priv
;
980 struct rdma_cm_id
*id
;
981 struct rdma_route
*rt
;
982 union cma_ip_addr
*src
, *dst
;
986 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
987 &ip_ver
, &port
, &src
, &dst
))
990 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
995 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
996 ip_ver
, port
, src
, dst
);
999 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1000 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1005 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1006 if (rt
->num_paths
== 2)
1007 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1009 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1010 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1011 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1012 rt
->addr
.dev_addr
.dev_type
= RDMA_NODE_IB_CA
;
1014 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1015 id_priv
->state
= CMA_CONNECT
;
1019 rdma_destroy_id(id
);
1024 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1025 struct ib_cm_event
*ib_event
)
1027 struct rdma_id_private
*id_priv
;
1028 struct rdma_cm_id
*id
;
1029 union cma_ip_addr
*src
, *dst
;
1034 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1040 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1041 &ip_ver
, &port
, &src
, &dst
))
1044 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1045 ip_ver
, port
, src
, dst
);
1047 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
1048 &id
->route
.addr
.dev_addr
);
1052 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1053 id_priv
->state
= CMA_CONNECT
;
1056 rdma_destroy_id(id
);
1060 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1061 struct ib_cm_req_event_param
*req_data
,
1062 void *private_data
, int offset
)
1064 event
->param
.conn
.private_data
= private_data
+ offset
;
1065 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1066 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1067 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1068 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1069 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1070 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1071 event
->param
.conn
.srq
= req_data
->srq
;
1072 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1075 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1077 struct rdma_id_private
*listen_id
, *conn_id
;
1078 struct rdma_cm_event event
;
1081 listen_id
= cm_id
->context
;
1082 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1083 return -ECONNABORTED
;
1085 memset(&event
, 0, sizeof event
);
1086 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1087 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1088 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1089 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1090 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1091 event
.param
.ud
.private_data_len
=
1092 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1094 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1095 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1096 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1097 ib_event
->private_data
, offset
);
1104 atomic_inc(&conn_id
->dev_remove
);
1106 ret
= cma_acquire_dev(conn_id
);
1107 mutex_unlock(&lock
);
1109 goto release_conn_id
;
1111 conn_id
->cm_id
.ib
= cm_id
;
1112 cm_id
->context
= conn_id
;
1113 cm_id
->cm_handler
= cma_ib_handler
;
1115 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1119 /* Destroy the CM ID by returning a non-zero value. */
1120 conn_id
->cm_id
.ib
= NULL
;
1123 cma_exch(conn_id
, CMA_DESTROYING
);
1124 cma_enable_remove(conn_id
);
1125 rdma_destroy_id(&conn_id
->id
);
1128 cma_enable_remove(listen_id
);
1132 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1134 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1137 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1138 struct ib_cm_compare_data
*compare
)
1140 struct cma_hdr
*cma_data
, *cma_mask
;
1141 struct sdp_hh
*sdp_data
, *sdp_mask
;
1143 struct in6_addr ip6_addr
;
1145 memset(compare
, 0, sizeof *compare
);
1146 cma_data
= (void *) compare
->data
;
1147 cma_mask
= (void *) compare
->mask
;
1148 sdp_data
= (void *) compare
->data
;
1149 sdp_mask
= (void *) compare
->mask
;
1151 switch (addr
->sa_family
) {
1153 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1154 if (ps
== RDMA_PS_SDP
) {
1155 sdp_set_ip_ver(sdp_data
, 4);
1156 sdp_set_ip_ver(sdp_mask
, 0xF);
1157 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1158 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
1160 cma_set_ip_ver(cma_data
, 4);
1161 cma_set_ip_ver(cma_mask
, 0xF);
1162 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1163 cma_mask
->dst_addr
.ip4
.addr
= ~0;
1167 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1168 if (ps
== RDMA_PS_SDP
) {
1169 sdp_set_ip_ver(sdp_data
, 6);
1170 sdp_set_ip_ver(sdp_mask
, 0xF);
1171 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1172 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1173 sizeof sdp_mask
->dst_addr
.ip6
);
1175 cma_set_ip_ver(cma_data
, 6);
1176 cma_set_ip_ver(cma_mask
, 0xF);
1177 cma_data
->dst_addr
.ip6
= ip6_addr
;
1178 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1179 sizeof cma_mask
->dst_addr
.ip6
);
1187 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1189 struct rdma_id_private
*id_priv
= iw_id
->context
;
1190 struct rdma_cm_event event
;
1191 struct sockaddr_in
*sin
;
1194 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
1197 memset(&event
, 0, sizeof event
);
1198 switch (iw_event
->event
) {
1199 case IW_CM_EVENT_CLOSE
:
1200 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1202 case IW_CM_EVENT_CONNECT_REPLY
:
1203 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1204 *sin
= iw_event
->local_addr
;
1205 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1206 *sin
= iw_event
->remote_addr
;
1207 switch (iw_event
->status
) {
1209 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1213 event
.event
= RDMA_CM_EVENT_REJECTED
;
1216 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1219 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1223 case IW_CM_EVENT_ESTABLISHED
:
1224 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1230 event
.status
= iw_event
->status
;
1231 event
.param
.conn
.private_data
= iw_event
->private_data
;
1232 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1233 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1235 /* Destroy the CM ID by returning a non-zero value. */
1236 id_priv
->cm_id
.iw
= NULL
;
1237 cma_exch(id_priv
, CMA_DESTROYING
);
1238 cma_enable_remove(id_priv
);
1239 rdma_destroy_id(&id_priv
->id
);
1243 cma_enable_remove(id_priv
);
1247 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1248 struct iw_cm_event
*iw_event
)
1250 struct rdma_cm_id
*new_cm_id
;
1251 struct rdma_id_private
*listen_id
, *conn_id
;
1252 struct sockaddr_in
*sin
;
1253 struct net_device
*dev
= NULL
;
1254 struct rdma_cm_event event
;
1257 listen_id
= cm_id
->context
;
1258 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1259 return -ECONNABORTED
;
1261 /* Create a new RDMA id for the new IW CM ID */
1262 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1263 listen_id
->id
.context
,
1269 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1270 atomic_inc(&conn_id
->dev_remove
);
1271 conn_id
->state
= CMA_CONNECT
;
1273 dev
= ip_dev_find(iw_event
->local_addr
.sin_addr
.s_addr
);
1275 ret
= -EADDRNOTAVAIL
;
1276 cma_enable_remove(conn_id
);
1277 rdma_destroy_id(new_cm_id
);
1280 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1282 cma_enable_remove(conn_id
);
1283 rdma_destroy_id(new_cm_id
);
1288 ret
= cma_acquire_dev(conn_id
);
1289 mutex_unlock(&lock
);
1291 cma_enable_remove(conn_id
);
1292 rdma_destroy_id(new_cm_id
);
1296 conn_id
->cm_id
.iw
= cm_id
;
1297 cm_id
->context
= conn_id
;
1298 cm_id
->cm_handler
= cma_iw_handler
;
1300 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1301 *sin
= iw_event
->local_addr
;
1302 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1303 *sin
= iw_event
->remote_addr
;
1305 memset(&event
, 0, sizeof event
);
1306 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1307 event
.param
.conn
.private_data
= iw_event
->private_data
;
1308 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1309 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1311 /* User wants to destroy the CM ID */
1312 conn_id
->cm_id
.iw
= NULL
;
1313 cma_exch(conn_id
, CMA_DESTROYING
);
1314 cma_enable_remove(conn_id
);
1315 rdma_destroy_id(&conn_id
->id
);
1321 cma_enable_remove(listen_id
);
1325 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1327 struct ib_cm_compare_data compare_data
;
1328 struct sockaddr
*addr
;
1332 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1334 if (IS_ERR(id_priv
->cm_id
.ib
))
1335 return PTR_ERR(id_priv
->cm_id
.ib
);
1337 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1338 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1339 if (cma_any_addr(addr
))
1340 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1342 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1343 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1347 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1348 id_priv
->cm_id
.ib
= NULL
;
1354 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1357 struct sockaddr_in
*sin
;
1359 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1360 iw_conn_req_handler
,
1362 if (IS_ERR(id_priv
->cm_id
.iw
))
1363 return PTR_ERR(id_priv
->cm_id
.iw
);
1365 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1366 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1368 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1371 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1372 id_priv
->cm_id
.iw
= NULL
;
1378 static int cma_listen_handler(struct rdma_cm_id
*id
,
1379 struct rdma_cm_event
*event
)
1381 struct rdma_id_private
*id_priv
= id
->context
;
1383 id
->context
= id_priv
->id
.context
;
1384 id
->event_handler
= id_priv
->id
.event_handler
;
1385 return id_priv
->id
.event_handler(id
, event
);
1388 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1389 struct cma_device
*cma_dev
)
1391 struct rdma_id_private
*dev_id_priv
;
1392 struct rdma_cm_id
*id
;
1395 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1399 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1401 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1402 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1403 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1405 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1406 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1408 ret
= rdma_listen(id
, id_priv
->backlog
);
1414 cma_destroy_listen(dev_id_priv
);
1417 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1419 struct cma_device
*cma_dev
;
1422 list_add_tail(&id_priv
->list
, &listen_any_list
);
1423 list_for_each_entry(cma_dev
, &dev_list
, list
)
1424 cma_listen_on_dev(id_priv
, cma_dev
);
1425 mutex_unlock(&lock
);
1428 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1430 struct sockaddr_in addr_in
;
1432 memset(&addr_in
, 0, sizeof addr_in
);
1433 addr_in
.sin_family
= af
;
1434 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1437 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1439 struct rdma_id_private
*id_priv
;
1442 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1443 if (id_priv
->state
== CMA_IDLE
) {
1444 ret
= cma_bind_any(id
, AF_INET
);
1449 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1452 id_priv
->backlog
= backlog
;
1454 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1455 case RDMA_TRANSPORT_IB
:
1456 ret
= cma_ib_listen(id_priv
);
1460 case RDMA_TRANSPORT_IWARP
:
1461 ret
= cma_iw_listen(id_priv
, backlog
);
1470 cma_listen_on_all(id_priv
);
1474 id_priv
->backlog
= 0;
1475 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1478 EXPORT_SYMBOL(rdma_listen
);
1480 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1482 struct rdma_id_private
*id_priv
;
1484 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1485 id_priv
->tos
= (u8
) tos
;
1487 EXPORT_SYMBOL(rdma_set_service_type
);
1489 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1492 struct cma_work
*work
= context
;
1493 struct rdma_route
*route
;
1495 route
= &work
->id
->id
.route
;
1498 route
->num_paths
= 1;
1499 *route
->path_rec
= *path_rec
;
1501 work
->old_state
= CMA_ROUTE_QUERY
;
1502 work
->new_state
= CMA_ADDR_RESOLVED
;
1503 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1504 work
->event
.status
= status
;
1507 queue_work(cma_wq
, &work
->work
);
1510 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1511 struct cma_work
*work
)
1513 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1514 struct ib_sa_path_rec path_rec
;
1515 ib_sa_comp_mask comp_mask
;
1516 struct sockaddr_in6
*sin6
;
1518 memset(&path_rec
, 0, sizeof path_rec
);
1519 ib_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1520 ib_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1521 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1522 path_rec
.numb_path
= 1;
1523 path_rec
.reversible
= 1;
1524 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
, &addr
->dst_addr
);
1526 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1527 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1528 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1530 if (addr
->src_addr
.sa_family
== AF_INET
) {
1531 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1532 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1534 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1535 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1536 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1539 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1540 id_priv
->id
.port_num
, &path_rec
,
1541 comp_mask
, timeout_ms
,
1542 GFP_KERNEL
, cma_query_handler
,
1543 work
, &id_priv
->query
);
1545 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1548 static void cma_work_handler(struct work_struct
*_work
)
1550 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1551 struct rdma_id_private
*id_priv
= work
->id
;
1554 atomic_inc(&id_priv
->dev_remove
);
1555 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1558 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1559 cma_exch(id_priv
, CMA_DESTROYING
);
1563 cma_enable_remove(id_priv
);
1564 cma_deref_id(id_priv
);
1566 rdma_destroy_id(&id_priv
->id
);
1570 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1572 struct rdma_route
*route
= &id_priv
->id
.route
;
1573 struct cma_work
*work
;
1576 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1581 INIT_WORK(&work
->work
, cma_work_handler
);
1582 work
->old_state
= CMA_ROUTE_QUERY
;
1583 work
->new_state
= CMA_ROUTE_RESOLVED
;
1584 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1586 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1587 if (!route
->path_rec
) {
1592 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1598 kfree(route
->path_rec
);
1599 route
->path_rec
= NULL
;
1605 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1606 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1608 struct rdma_id_private
*id_priv
;
1611 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1612 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1615 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1616 if (!id
->route
.path_rec
) {
1621 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1624 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1627 EXPORT_SYMBOL(rdma_set_ib_paths
);
1629 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1631 struct cma_work
*work
;
1633 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1638 INIT_WORK(&work
->work
, cma_work_handler
);
1639 work
->old_state
= CMA_ROUTE_QUERY
;
1640 work
->new_state
= CMA_ROUTE_RESOLVED
;
1641 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1642 queue_work(cma_wq
, &work
->work
);
1646 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1648 struct rdma_id_private
*id_priv
;
1651 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1652 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1655 atomic_inc(&id_priv
->refcount
);
1656 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1657 case RDMA_TRANSPORT_IB
:
1658 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1660 case RDMA_TRANSPORT_IWARP
:
1661 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1672 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1673 cma_deref_id(id_priv
);
1676 EXPORT_SYMBOL(rdma_resolve_route
);
1678 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1680 struct cma_device
*cma_dev
;
1681 struct ib_port_attr port_attr
;
1688 if (list_empty(&dev_list
)) {
1692 list_for_each_entry(cma_dev
, &dev_list
, list
)
1693 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1694 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1695 port_attr
.state
== IB_PORT_ACTIVE
)
1699 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1702 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1706 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1710 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1711 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1712 id_priv
->id
.port_num
= p
;
1713 cma_attach_to_dev(id_priv
, cma_dev
);
1715 mutex_unlock(&lock
);
1719 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1720 struct rdma_dev_addr
*dev_addr
, void *context
)
1722 struct rdma_id_private
*id_priv
= context
;
1723 struct rdma_cm_event event
;
1725 memset(&event
, 0, sizeof event
);
1726 atomic_inc(&id_priv
->dev_remove
);
1729 * Grab mutex to block rdma_destroy_id() from removing the device while
1730 * we're trying to acquire it.
1733 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1734 mutex_unlock(&lock
);
1738 if (!status
&& !id_priv
->cma_dev
)
1739 status
= cma_acquire_dev(id_priv
);
1740 mutex_unlock(&lock
);
1743 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1745 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1746 event
.status
= status
;
1748 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1749 ip_addr_size(src_addr
));
1750 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1753 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1754 cma_exch(id_priv
, CMA_DESTROYING
);
1755 cma_enable_remove(id_priv
);
1756 cma_deref_id(id_priv
);
1757 rdma_destroy_id(&id_priv
->id
);
1761 cma_enable_remove(id_priv
);
1762 cma_deref_id(id_priv
);
1765 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1767 struct cma_work
*work
;
1768 struct sockaddr_in
*src_in
, *dst_in
;
1772 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1776 if (!id_priv
->cma_dev
) {
1777 ret
= cma_bind_loopback(id_priv
);
1782 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1783 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1785 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1786 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1787 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1788 src_in
->sin_family
= dst_in
->sin_family
;
1789 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1793 INIT_WORK(&work
->work
, cma_work_handler
);
1794 work
->old_state
= CMA_ADDR_QUERY
;
1795 work
->new_state
= CMA_ADDR_RESOLVED
;
1796 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1797 queue_work(cma_wq
, &work
->work
);
1804 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1805 struct sockaddr
*dst_addr
)
1807 if (src_addr
&& src_addr
->sa_family
)
1808 return rdma_bind_addr(id
, src_addr
);
1810 return cma_bind_any(id
, dst_addr
->sa_family
);
1813 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1814 struct sockaddr
*dst_addr
, int timeout_ms
)
1816 struct rdma_id_private
*id_priv
;
1819 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1820 if (id_priv
->state
== CMA_IDLE
) {
1821 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1826 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1829 atomic_inc(&id_priv
->refcount
);
1830 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1831 if (cma_any_addr(dst_addr
))
1832 ret
= cma_resolve_loopback(id_priv
);
1834 ret
= rdma_resolve_ip(&addr_client
, &id
->route
.addr
.src_addr
,
1835 dst_addr
, &id
->route
.addr
.dev_addr
,
1836 timeout_ms
, addr_handler
, id_priv
);
1842 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1843 cma_deref_id(id_priv
);
1846 EXPORT_SYMBOL(rdma_resolve_addr
);
1848 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1849 struct rdma_id_private
*id_priv
)
1851 struct sockaddr_in
*sin
;
1853 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1854 sin
->sin_port
= htons(bind_list
->port
);
1855 id_priv
->bind_list
= bind_list
;
1856 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1859 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1860 unsigned short snum
)
1862 struct rdma_bind_list
*bind_list
;
1865 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1870 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1871 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1877 ret
= -EADDRNOTAVAIL
;
1882 bind_list
->port
= (unsigned short) port
;
1883 cma_bind_port(bind_list
, id_priv
);
1886 idr_remove(ps
, port
);
1892 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1894 struct rdma_bind_list
*bind_list
;
1895 int port
, ret
, low
, high
;
1897 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1902 /* FIXME: add proper port randomization per like inet_csk_get_port */
1904 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1905 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1910 inet_get_local_port_range(&low
, &high
);
1912 if (next_port
!= low
) {
1913 idr_remove(ps
, port
);
1917 ret
= -EADDRNOTAVAIL
;
1924 next_port
= port
+ 1;
1927 bind_list
->port
= (unsigned short) port
;
1928 cma_bind_port(bind_list
, id_priv
);
1931 idr_remove(ps
, port
);
1937 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1939 struct rdma_id_private
*cur_id
;
1940 struct sockaddr_in
*sin
, *cur_sin
;
1941 struct rdma_bind_list
*bind_list
;
1942 struct hlist_node
*node
;
1943 unsigned short snum
;
1945 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1946 snum
= ntohs(sin
->sin_port
);
1947 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1950 bind_list
= idr_find(ps
, snum
);
1952 return cma_alloc_port(ps
, id_priv
, snum
);
1955 * We don't support binding to any address if anyone is bound to
1956 * a specific address on the same port.
1958 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1959 return -EADDRNOTAVAIL
;
1961 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1962 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1963 return -EADDRNOTAVAIL
;
1965 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1966 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1970 cma_bind_port(bind_list
, id_priv
);
1974 static int cma_get_port(struct rdma_id_private
*id_priv
)
1979 switch (id_priv
->id
.ps
) {
1993 return -EPROTONOSUPPORT
;
1997 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
1998 ret
= cma_alloc_any_port(ps
, id_priv
);
2000 ret
= cma_use_port(ps
, id_priv
);
2001 mutex_unlock(&lock
);
2006 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2008 struct rdma_id_private
*id_priv
;
2011 if (addr
->sa_family
!= AF_INET
)
2012 return -EAFNOSUPPORT
;
2014 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2015 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2018 if (!cma_any_addr(addr
)) {
2019 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2024 ret
= cma_acquire_dev(id_priv
);
2025 mutex_unlock(&lock
);
2030 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2031 ret
= cma_get_port(id_priv
);
2037 if (!cma_any_addr(addr
)) {
2039 cma_detach_from_dev(id_priv
);
2040 mutex_unlock(&lock
);
2043 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2046 EXPORT_SYMBOL(rdma_bind_addr
);
2048 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2049 struct rdma_route
*route
)
2051 struct sockaddr_in
*src4
, *dst4
;
2052 struct cma_hdr
*cma_hdr
;
2053 struct sdp_hh
*sdp_hdr
;
2055 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2056 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2061 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2063 sdp_set_ip_ver(sdp_hdr
, 4);
2064 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2065 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2066 sdp_hdr
->port
= src4
->sin_port
;
2070 cma_hdr
->cma_version
= CMA_VERSION
;
2071 cma_set_ip_ver(cma_hdr
, 4);
2072 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2073 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2074 cma_hdr
->port
= src4
->sin_port
;
2080 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2081 struct ib_cm_event
*ib_event
)
2083 struct rdma_id_private
*id_priv
= cm_id
->context
;
2084 struct rdma_cm_event event
;
2085 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2088 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
2091 memset(&event
, 0, sizeof event
);
2092 switch (ib_event
->event
) {
2093 case IB_CM_SIDR_REQ_ERROR
:
2094 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2095 event
.status
= -ETIMEDOUT
;
2097 case IB_CM_SIDR_REP_RECEIVED
:
2098 event
.param
.ud
.private_data
= ib_event
->private_data
;
2099 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2100 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2101 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2102 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2105 if (id_priv
->qkey
!= rep
->qkey
) {
2106 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2107 event
.status
= -EINVAL
;
2110 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2111 id_priv
->id
.route
.path_rec
,
2112 &event
.param
.ud
.ah_attr
);
2113 event
.param
.ud
.qp_num
= rep
->qpn
;
2114 event
.param
.ud
.qkey
= rep
->qkey
;
2115 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2119 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
2124 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2126 /* Destroy the CM ID by returning a non-zero value. */
2127 id_priv
->cm_id
.ib
= NULL
;
2128 cma_exch(id_priv
, CMA_DESTROYING
);
2129 cma_enable_remove(id_priv
);
2130 rdma_destroy_id(&id_priv
->id
);
2134 cma_enable_remove(id_priv
);
2138 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2139 struct rdma_conn_param
*conn_param
)
2141 struct ib_cm_sidr_req_param req
;
2142 struct rdma_route
*route
;
2145 req
.private_data_len
= sizeof(struct cma_hdr
) +
2146 conn_param
->private_data_len
;
2147 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2148 if (!req
.private_data
)
2151 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2152 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2153 conn_param
->private_data
, conn_param
->private_data_len
);
2155 route
= &id_priv
->id
.route
;
2156 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2160 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2161 cma_sidr_rep_handler
, id_priv
);
2162 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2163 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2167 req
.path
= route
->path_rec
;
2168 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2169 &route
->addr
.dst_addr
);
2170 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2171 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2173 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2175 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2176 id_priv
->cm_id
.ib
= NULL
;
2179 kfree(req
.private_data
);
2183 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2184 struct rdma_conn_param
*conn_param
)
2186 struct ib_cm_req_param req
;
2187 struct rdma_route
*route
;
2191 memset(&req
, 0, sizeof req
);
2192 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2193 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2194 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2198 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2199 memcpy(private_data
+ offset
, conn_param
->private_data
,
2200 conn_param
->private_data_len
);
2202 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2204 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2205 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2209 route
= &id_priv
->id
.route
;
2210 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2213 req
.private_data
= private_data
;
2215 req
.primary_path
= &route
->path_rec
[0];
2216 if (route
->num_paths
== 2)
2217 req
.alternate_path
= &route
->path_rec
[1];
2219 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2220 &route
->addr
.dst_addr
);
2221 req
.qp_num
= id_priv
->qp_num
;
2222 req
.qp_type
= IB_QPT_RC
;
2223 req
.starting_psn
= id_priv
->seq_num
;
2224 req
.responder_resources
= conn_param
->responder_resources
;
2225 req
.initiator_depth
= conn_param
->initiator_depth
;
2226 req
.flow_control
= conn_param
->flow_control
;
2227 req
.retry_count
= conn_param
->retry_count
;
2228 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2229 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2230 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2231 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2232 req
.srq
= id_priv
->srq
? 1 : 0;
2234 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2236 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2237 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2238 id_priv
->cm_id
.ib
= NULL
;
2241 kfree(private_data
);
2245 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2246 struct rdma_conn_param
*conn_param
)
2248 struct iw_cm_id
*cm_id
;
2249 struct sockaddr_in
* sin
;
2251 struct iw_cm_conn_param iw_param
;
2253 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2254 if (IS_ERR(cm_id
)) {
2255 ret
= PTR_ERR(cm_id
);
2259 id_priv
->cm_id
.iw
= cm_id
;
2261 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2262 cm_id
->local_addr
= *sin
;
2264 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2265 cm_id
->remote_addr
= *sin
;
2267 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2271 iw_param
.ord
= conn_param
->initiator_depth
;
2272 iw_param
.ird
= conn_param
->responder_resources
;
2273 iw_param
.private_data
= conn_param
->private_data
;
2274 iw_param
.private_data_len
= conn_param
->private_data_len
;
2276 iw_param
.qpn
= id_priv
->qp_num
;
2278 iw_param
.qpn
= conn_param
->qp_num
;
2279 ret
= iw_cm_connect(cm_id
, &iw_param
);
2281 if (ret
&& !IS_ERR(cm_id
)) {
2282 iw_destroy_cm_id(cm_id
);
2283 id_priv
->cm_id
.iw
= NULL
;
2288 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2290 struct rdma_id_private
*id_priv
;
2293 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2294 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2298 id_priv
->qp_num
= conn_param
->qp_num
;
2299 id_priv
->srq
= conn_param
->srq
;
2302 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2303 case RDMA_TRANSPORT_IB
:
2304 if (cma_is_ud_ps(id
->ps
))
2305 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2307 ret
= cma_connect_ib(id_priv
, conn_param
);
2309 case RDMA_TRANSPORT_IWARP
:
2310 ret
= cma_connect_iw(id_priv
, conn_param
);
2321 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2324 EXPORT_SYMBOL(rdma_connect
);
2326 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2327 struct rdma_conn_param
*conn_param
)
2329 struct ib_cm_rep_param rep
;
2330 struct ib_qp_attr qp_attr
;
2331 int qp_attr_mask
, ret
;
2333 if (id_priv
->id
.qp
) {
2334 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2338 qp_attr
.qp_state
= IB_QPS_RTS
;
2339 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, &qp_attr
,
2344 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
2345 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
2350 memset(&rep
, 0, sizeof rep
);
2351 rep
.qp_num
= id_priv
->qp_num
;
2352 rep
.starting_psn
= id_priv
->seq_num
;
2353 rep
.private_data
= conn_param
->private_data
;
2354 rep
.private_data_len
= conn_param
->private_data_len
;
2355 rep
.responder_resources
= conn_param
->responder_resources
;
2356 rep
.initiator_depth
= conn_param
->initiator_depth
;
2357 rep
.failover_accepted
= 0;
2358 rep
.flow_control
= conn_param
->flow_control
;
2359 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2360 rep
.srq
= id_priv
->srq
? 1 : 0;
2362 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2367 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2368 struct rdma_conn_param
*conn_param
)
2370 struct iw_cm_conn_param iw_param
;
2373 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2377 iw_param
.ord
= conn_param
->initiator_depth
;
2378 iw_param
.ird
= conn_param
->responder_resources
;
2379 iw_param
.private_data
= conn_param
->private_data
;
2380 iw_param
.private_data_len
= conn_param
->private_data_len
;
2381 if (id_priv
->id
.qp
) {
2382 iw_param
.qpn
= id_priv
->qp_num
;
2384 iw_param
.qpn
= conn_param
->qp_num
;
2386 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2389 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2390 enum ib_cm_sidr_status status
,
2391 const void *private_data
, int private_data_len
)
2393 struct ib_cm_sidr_rep_param rep
;
2395 memset(&rep
, 0, sizeof rep
);
2396 rep
.status
= status
;
2397 if (status
== IB_SIDR_SUCCESS
) {
2398 rep
.qp_num
= id_priv
->qp_num
;
2399 rep
.qkey
= id_priv
->qkey
;
2401 rep
.private_data
= private_data
;
2402 rep
.private_data_len
= private_data_len
;
2404 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2407 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2409 struct rdma_id_private
*id_priv
;
2412 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2413 if (!cma_comp(id_priv
, CMA_CONNECT
))
2416 if (!id
->qp
&& conn_param
) {
2417 id_priv
->qp_num
= conn_param
->qp_num
;
2418 id_priv
->srq
= conn_param
->srq
;
2421 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2422 case RDMA_TRANSPORT_IB
:
2423 if (cma_is_ud_ps(id
->ps
))
2424 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2425 conn_param
->private_data
,
2426 conn_param
->private_data_len
);
2427 else if (conn_param
)
2428 ret
= cma_accept_ib(id_priv
, conn_param
);
2430 ret
= cma_rep_recv(id_priv
);
2432 case RDMA_TRANSPORT_IWARP
:
2433 ret
= cma_accept_iw(id_priv
, conn_param
);
2445 cma_modify_qp_err(id
);
2446 rdma_reject(id
, NULL
, 0);
2449 EXPORT_SYMBOL(rdma_accept
);
2451 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2453 struct rdma_id_private
*id_priv
;
2456 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2457 if (!cma_has_cm_dev(id_priv
))
2460 switch (id
->device
->node_type
) {
2461 case RDMA_NODE_IB_CA
:
2462 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2470 EXPORT_SYMBOL(rdma_notify
);
2472 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2473 u8 private_data_len
)
2475 struct rdma_id_private
*id_priv
;
2478 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2479 if (!cma_has_cm_dev(id_priv
))
2482 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2483 case RDMA_TRANSPORT_IB
:
2484 if (cma_is_ud_ps(id
->ps
))
2485 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2486 private_data
, private_data_len
);
2488 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2489 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2490 0, private_data
, private_data_len
);
2492 case RDMA_TRANSPORT_IWARP
:
2493 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2494 private_data
, private_data_len
);
2502 EXPORT_SYMBOL(rdma_reject
);
2504 int rdma_disconnect(struct rdma_cm_id
*id
)
2506 struct rdma_id_private
*id_priv
;
2509 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2510 if (!cma_has_cm_dev(id_priv
))
2513 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2514 case RDMA_TRANSPORT_IB
:
2515 ret
= cma_modify_qp_err(id
);
2518 /* Initiate or respond to a disconnect. */
2519 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2520 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2522 case RDMA_TRANSPORT_IWARP
:
2523 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2532 EXPORT_SYMBOL(rdma_disconnect
);
2534 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2536 struct rdma_id_private
*id_priv
;
2537 struct cma_multicast
*mc
= multicast
->context
;
2538 struct rdma_cm_event event
;
2541 id_priv
= mc
->id_priv
;
2542 if (cma_disable_remove(id_priv
, CMA_ADDR_BOUND
) &&
2543 cma_disable_remove(id_priv
, CMA_ADDR_RESOLVED
))
2546 if (!status
&& id_priv
->id
.qp
)
2547 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2548 multicast
->rec
.mlid
);
2550 memset(&event
, 0, sizeof event
);
2551 event
.status
= status
;
2552 event
.param
.ud
.private_data
= mc
->context
;
2554 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2555 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2556 id_priv
->id
.port_num
, &multicast
->rec
,
2557 &event
.param
.ud
.ah_attr
);
2558 event
.param
.ud
.qp_num
= 0xFFFFFF;
2559 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2561 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2563 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2565 cma_exch(id_priv
, CMA_DESTROYING
);
2566 cma_enable_remove(id_priv
);
2567 rdma_destroy_id(&id_priv
->id
);
2571 cma_enable_remove(id_priv
);
2575 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2576 struct sockaddr
*addr
, union ib_gid
*mgid
)
2578 unsigned char mc_map
[MAX_ADDR_LEN
];
2579 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2580 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2581 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2583 if (cma_any_addr(addr
)) {
2584 memset(mgid
, 0, sizeof *mgid
);
2585 } else if ((addr
->sa_family
== AF_INET6
) &&
2586 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFF10A01B) ==
2588 /* IPv6 address is an SA assigned MGID. */
2589 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2591 ip_ib_mc_map(sin
->sin_addr
.s_addr
, mc_map
);
2592 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2593 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2594 mc_map
[8] = ib_addr_get_pkey(dev_addr
) >> 8;
2595 mc_map
[9] = (unsigned char) ib_addr_get_pkey(dev_addr
);
2596 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2600 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2601 struct cma_multicast
*mc
)
2603 struct ib_sa_mcmember_rec rec
;
2604 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2605 ib_sa_comp_mask comp_mask
;
2608 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2609 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2614 cma_set_mgid(id_priv
, &mc
->addr
, &rec
.mgid
);
2615 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2616 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2617 ib_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2618 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2621 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2622 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2623 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2624 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2625 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2627 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2628 id_priv
->id
.port_num
, &rec
,
2629 comp_mask
, GFP_KERNEL
,
2630 cma_ib_mc_handler
, mc
);
2631 if (IS_ERR(mc
->multicast
.ib
))
2632 return PTR_ERR(mc
->multicast
.ib
);
2637 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2640 struct rdma_id_private
*id_priv
;
2641 struct cma_multicast
*mc
;
2644 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2645 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2646 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2649 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2653 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2654 mc
->context
= context
;
2655 mc
->id_priv
= id_priv
;
2657 spin_lock(&id_priv
->lock
);
2658 list_add(&mc
->list
, &id_priv
->mc_list
);
2659 spin_unlock(&id_priv
->lock
);
2661 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2662 case RDMA_TRANSPORT_IB
:
2663 ret
= cma_join_ib_multicast(id_priv
, mc
);
2671 spin_lock_irq(&id_priv
->lock
);
2672 list_del(&mc
->list
);
2673 spin_unlock_irq(&id_priv
->lock
);
2678 EXPORT_SYMBOL(rdma_join_multicast
);
2680 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2682 struct rdma_id_private
*id_priv
;
2683 struct cma_multicast
*mc
;
2685 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2686 spin_lock_irq(&id_priv
->lock
);
2687 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2688 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2689 list_del(&mc
->list
);
2690 spin_unlock_irq(&id_priv
->lock
);
2693 ib_detach_mcast(id
->qp
,
2694 &mc
->multicast
.ib
->rec
.mgid
,
2695 mc
->multicast
.ib
->rec
.mlid
);
2696 ib_sa_free_multicast(mc
->multicast
.ib
);
2701 spin_unlock_irq(&id_priv
->lock
);
2703 EXPORT_SYMBOL(rdma_leave_multicast
);
2705 static void cma_add_one(struct ib_device
*device
)
2707 struct cma_device
*cma_dev
;
2708 struct rdma_id_private
*id_priv
;
2710 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2714 cma_dev
->device
= device
;
2716 init_completion(&cma_dev
->comp
);
2717 atomic_set(&cma_dev
->refcount
, 1);
2718 INIT_LIST_HEAD(&cma_dev
->id_list
);
2719 ib_set_client_data(device
, &cma_client
, cma_dev
);
2722 list_add_tail(&cma_dev
->list
, &dev_list
);
2723 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2724 cma_listen_on_dev(id_priv
, cma_dev
);
2725 mutex_unlock(&lock
);
2728 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2730 struct rdma_cm_event event
;
2731 enum cma_state state
;
2733 /* Record that we want to remove the device */
2734 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2735 if (state
== CMA_DESTROYING
)
2738 cma_cancel_operation(id_priv
, state
);
2739 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
2741 /* Check for destruction from another callback. */
2742 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2745 memset(&event
, 0, sizeof event
);
2746 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2747 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2750 static void cma_process_remove(struct cma_device
*cma_dev
)
2752 struct rdma_id_private
*id_priv
;
2756 while (!list_empty(&cma_dev
->id_list
)) {
2757 id_priv
= list_entry(cma_dev
->id_list
.next
,
2758 struct rdma_id_private
, list
);
2760 if (cma_internal_listen(id_priv
)) {
2761 cma_destroy_listen(id_priv
);
2765 list_del_init(&id_priv
->list
);
2766 atomic_inc(&id_priv
->refcount
);
2767 mutex_unlock(&lock
);
2769 ret
= cma_remove_id_dev(id_priv
);
2770 cma_deref_id(id_priv
);
2772 rdma_destroy_id(&id_priv
->id
);
2776 mutex_unlock(&lock
);
2778 cma_deref_dev(cma_dev
);
2779 wait_for_completion(&cma_dev
->comp
);
2782 static void cma_remove_one(struct ib_device
*device
)
2784 struct cma_device
*cma_dev
;
2786 cma_dev
= ib_get_client_data(device
, &cma_client
);
2791 list_del(&cma_dev
->list
);
2792 mutex_unlock(&lock
);
2794 cma_process_remove(cma_dev
);
2798 static int cma_init(void)
2802 get_random_bytes(&next_port
, sizeof next_port
);
2803 inet_get_local_port_range(&low
, &high
);
2804 next_port
= ((unsigned int) next_port
% (high
- low
)) + low
;
2806 cma_wq
= create_singlethread_workqueue("rdma_cm");
2810 ib_sa_register_client(&sa_client
);
2811 rdma_addr_register_client(&addr_client
);
2813 ret
= ib_register_client(&cma_client
);
2819 rdma_addr_unregister_client(&addr_client
);
2820 ib_sa_unregister_client(&sa_client
);
2821 destroy_workqueue(cma_wq
);
2825 static void cma_cleanup(void)
2827 ib_unregister_client(&cma_client
);
2828 rdma_addr_unregister_client(&addr_client
);
2829 ib_sa_unregister_client(&sa_client
);
2830 destroy_workqueue(cma_wq
);
2831 idr_destroy(&sdp_ps
);
2832 idr_destroy(&tcp_ps
);
2833 idr_destroy(&udp_ps
);
2834 idr_destroy(&ipoib_ps
);
2837 module_init(cma_init
);
2838 module_exit(cma_cleanup
);