2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_cm.h>
52 #include <rdma/ib_sa.h>
53 #include <rdma/iw_cm.h>
55 MODULE_AUTHOR("Sean Hefty");
56 MODULE_DESCRIPTION("Generic RDMA CM Agent");
57 MODULE_LICENSE("Dual BSD/GPL");
59 #define CMA_CM_RESPONSE_TIMEOUT 20
60 #define CMA_MAX_CM_RETRIES 15
61 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
63 static void cma_add_one(struct ib_device
*device
);
64 static void cma_remove_one(struct ib_device
*device
);
66 static struct ib_client cma_client
= {
69 .remove
= cma_remove_one
72 static struct ib_sa_client sa_client
;
73 static struct rdma_addr_client addr_client
;
74 static LIST_HEAD(dev_list
);
75 static LIST_HEAD(listen_any_list
);
76 static DEFINE_MUTEX(lock
);
77 static struct workqueue_struct
*cma_wq
;
78 static DEFINE_IDR(sdp_ps
);
79 static DEFINE_IDR(tcp_ps
);
80 static DEFINE_IDR(udp_ps
);
81 static DEFINE_IDR(ipoib_ps
);
85 struct list_head list
;
86 struct ib_device
*device
;
87 struct completion comp
;
89 struct list_head id_list
;
106 struct rdma_bind_list
{
108 struct hlist_head owners
;
113 * Device removal can occur at anytime, so we need extra handling to
114 * serialize notifying the user of device removal with other callbacks.
115 * We do this by disabling removal notification while a callback is in process,
116 * and reporting it after the callback completes.
118 struct rdma_id_private
{
119 struct rdma_cm_id id
;
121 struct rdma_bind_list
*bind_list
;
122 struct hlist_node node
;
123 struct list_head list
; /* listen_any_list or cma_device.list */
124 struct list_head listen_list
; /* per device listens */
125 struct cma_device
*cma_dev
;
126 struct list_head mc_list
;
129 enum cma_state state
;
131 struct mutex qp_mutex
;
133 struct completion comp
;
135 struct mutex handler_mutex
;
139 struct ib_sa_query
*query
;
153 struct cma_multicast
{
154 struct rdma_id_private
*id_priv
;
156 struct ib_sa_multicast
*ib
;
158 struct list_head list
;
160 struct sockaddr_storage addr
;
164 struct work_struct work
;
165 struct rdma_id_private
*id
;
166 enum cma_state old_state
;
167 enum cma_state new_state
;
168 struct rdma_cm_event event
;
171 struct cma_ndev_work
{
172 struct work_struct work
;
173 struct rdma_id_private
*id
;
174 struct rdma_cm_event event
;
187 u8 ip_version
; /* IP version: 7:4 */
189 union cma_ip_addr src_addr
;
190 union cma_ip_addr dst_addr
;
195 u8 sdp_version
; /* Major version: 7:4 */
196 u8 ip_version
; /* IP version: 7:4 */
197 u8 sdp_specific1
[10];
199 __be16 sdp_specific2
;
200 union cma_ip_addr src_addr
;
201 union cma_ip_addr dst_addr
;
209 #define CMA_VERSION 0x00
210 #define SDP_MAJ_VERSION 0x2
212 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
217 spin_lock_irqsave(&id_priv
->lock
, flags
);
218 ret
= (id_priv
->state
== comp
);
219 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
223 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
224 enum cma_state comp
, enum cma_state exch
)
229 spin_lock_irqsave(&id_priv
->lock
, flags
);
230 if ((ret
= (id_priv
->state
== comp
)))
231 id_priv
->state
= exch
;
232 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
236 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
242 spin_lock_irqsave(&id_priv
->lock
, flags
);
243 old
= id_priv
->state
;
244 id_priv
->state
= exch
;
245 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
249 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
251 return hdr
->ip_version
>> 4;
254 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
256 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
259 static inline u8
sdp_get_majv(u8 sdp_version
)
261 return sdp_version
>> 4;
264 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
266 return hh
->ip_version
>> 4;
269 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
271 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
274 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
276 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
279 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
280 struct cma_device
*cma_dev
)
282 atomic_inc(&cma_dev
->refcount
);
283 id_priv
->cma_dev
= cma_dev
;
284 id_priv
->id
.device
= cma_dev
->device
;
285 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
288 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
290 if (atomic_dec_and_test(&cma_dev
->refcount
))
291 complete(&cma_dev
->comp
);
294 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
296 list_del(&id_priv
->list
);
297 cma_deref_dev(id_priv
->cma_dev
);
298 id_priv
->cma_dev
= NULL
;
301 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
303 struct ib_sa_mcmember_rec rec
;
309 switch (id_priv
->id
.ps
) {
311 id_priv
->qkey
= RDMA_UDP_QKEY
;
314 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
315 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
316 id_priv
->id
.port_num
, &rec
.mgid
,
319 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
327 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
329 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
330 struct cma_device
*cma_dev
;
334 rdma_addr_get_sgid(dev_addr
, &gid
);
335 list_for_each_entry(cma_dev
, &dev_list
, list
) {
336 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
337 &id_priv
->id
.port_num
, NULL
);
339 cma_attach_to_dev(id_priv
, cma_dev
);
346 static void cma_deref_id(struct rdma_id_private
*id_priv
)
348 if (atomic_dec_and_test(&id_priv
->refcount
))
349 complete(&id_priv
->comp
);
352 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
353 enum cma_state state
)
355 mutex_lock(&id_priv
->handler_mutex
);
356 if (id_priv
->state
!= state
) {
357 mutex_unlock(&id_priv
->handler_mutex
);
363 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
365 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
368 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
369 void *context
, enum rdma_port_space ps
)
371 struct rdma_id_private
*id_priv
;
373 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
375 return ERR_PTR(-ENOMEM
);
377 id_priv
->state
= CMA_IDLE
;
378 id_priv
->id
.context
= context
;
379 id_priv
->id
.event_handler
= event_handler
;
381 spin_lock_init(&id_priv
->lock
);
382 mutex_init(&id_priv
->qp_mutex
);
383 init_completion(&id_priv
->comp
);
384 atomic_set(&id_priv
->refcount
, 1);
385 mutex_init(&id_priv
->handler_mutex
);
386 INIT_LIST_HEAD(&id_priv
->listen_list
);
387 INIT_LIST_HEAD(&id_priv
->mc_list
);
388 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
392 EXPORT_SYMBOL(rdma_create_id
);
394 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
396 struct ib_qp_attr qp_attr
;
397 int qp_attr_mask
, ret
;
399 qp_attr
.qp_state
= IB_QPS_INIT
;
400 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
404 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
408 qp_attr
.qp_state
= IB_QPS_RTR
;
409 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
413 qp_attr
.qp_state
= IB_QPS_RTS
;
415 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
420 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
422 struct ib_qp_attr qp_attr
;
423 int qp_attr_mask
, ret
;
425 qp_attr
.qp_state
= IB_QPS_INIT
;
426 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
430 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
433 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
434 struct ib_qp_init_attr
*qp_init_attr
)
436 struct rdma_id_private
*id_priv
;
440 id_priv
= container_of(id
, struct rdma_id_private
, id
);
441 if (id
->device
!= pd
->device
)
444 qp
= ib_create_qp(pd
, qp_init_attr
);
448 if (cma_is_ud_ps(id_priv
->id
.ps
))
449 ret
= cma_init_ud_qp(id_priv
, qp
);
451 ret
= cma_init_conn_qp(id_priv
, qp
);
456 id_priv
->qp_num
= qp
->qp_num
;
457 id_priv
->srq
= (qp
->srq
!= NULL
);
463 EXPORT_SYMBOL(rdma_create_qp
);
465 void rdma_destroy_qp(struct rdma_cm_id
*id
)
467 struct rdma_id_private
*id_priv
;
469 id_priv
= container_of(id
, struct rdma_id_private
, id
);
470 mutex_lock(&id_priv
->qp_mutex
);
471 ib_destroy_qp(id_priv
->id
.qp
);
472 id_priv
->id
.qp
= NULL
;
473 mutex_unlock(&id_priv
->qp_mutex
);
475 EXPORT_SYMBOL(rdma_destroy_qp
);
477 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
478 struct rdma_conn_param
*conn_param
)
480 struct ib_qp_attr qp_attr
;
481 int qp_attr_mask
, ret
;
483 mutex_lock(&id_priv
->qp_mutex
);
484 if (!id_priv
->id
.qp
) {
489 /* Need to update QP attributes from default values. */
490 qp_attr
.qp_state
= IB_QPS_INIT
;
491 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
495 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
499 qp_attr
.qp_state
= IB_QPS_RTR
;
500 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
505 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
506 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
508 mutex_unlock(&id_priv
->qp_mutex
);
512 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
513 struct rdma_conn_param
*conn_param
)
515 struct ib_qp_attr qp_attr
;
516 int qp_attr_mask
, ret
;
518 mutex_lock(&id_priv
->qp_mutex
);
519 if (!id_priv
->id
.qp
) {
524 qp_attr
.qp_state
= IB_QPS_RTS
;
525 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
530 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
531 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
533 mutex_unlock(&id_priv
->qp_mutex
);
537 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
539 struct ib_qp_attr qp_attr
;
542 mutex_lock(&id_priv
->qp_mutex
);
543 if (!id_priv
->id
.qp
) {
548 qp_attr
.qp_state
= IB_QPS_ERR
;
549 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
551 mutex_unlock(&id_priv
->qp_mutex
);
555 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
556 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
558 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
561 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
562 ib_addr_get_pkey(dev_addr
),
563 &qp_attr
->pkey_index
);
567 qp_attr
->port_num
= id_priv
->id
.port_num
;
568 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
570 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
571 ret
= cma_set_qkey(id_priv
);
575 qp_attr
->qkey
= id_priv
->qkey
;
576 *qp_attr_mask
|= IB_QP_QKEY
;
578 qp_attr
->qp_access_flags
= 0;
579 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
584 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
587 struct rdma_id_private
*id_priv
;
590 id_priv
= container_of(id
, struct rdma_id_private
, id
);
591 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
592 case RDMA_TRANSPORT_IB
:
593 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
594 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
596 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
598 if (qp_attr
->qp_state
== IB_QPS_RTR
)
599 qp_attr
->rq_psn
= id_priv
->seq_num
;
601 case RDMA_TRANSPORT_IWARP
:
602 if (!id_priv
->cm_id
.iw
) {
603 qp_attr
->qp_access_flags
= 0;
604 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
606 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
616 EXPORT_SYMBOL(rdma_init_qp_attr
);
618 static inline int cma_zero_addr(struct sockaddr
*addr
)
620 struct in6_addr
*ip6
;
622 if (addr
->sa_family
== AF_INET
)
623 return ipv4_is_zeronet(
624 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
626 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
627 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
628 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
632 static inline int cma_loopback_addr(struct sockaddr
*addr
)
634 if (addr
->sa_family
== AF_INET
)
635 return ipv4_is_loopback(
636 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
638 return ipv6_addr_loopback(
639 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
642 static inline int cma_any_addr(struct sockaddr
*addr
)
644 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
647 static inline __be16
cma_port(struct sockaddr
*addr
)
649 if (addr
->sa_family
== AF_INET
)
650 return ((struct sockaddr_in
*) addr
)->sin_port
;
652 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
655 static inline int cma_any_port(struct sockaddr
*addr
)
657 return !cma_port(addr
);
660 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
661 u8
*ip_ver
, __be16
*port
,
662 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
666 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
670 *ip_ver
= sdp_get_ip_ver(hdr
);
671 *port
= ((struct sdp_hh
*) hdr
)->port
;
672 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
673 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
676 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
679 *ip_ver
= cma_get_ip_ver(hdr
);
680 *port
= ((struct cma_hdr
*) hdr
)->port
;
681 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
682 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
686 if (*ip_ver
!= 4 && *ip_ver
!= 6)
691 static void cma_save_net_info(struct rdma_addr
*addr
,
692 struct rdma_addr
*listen_addr
,
693 u8 ip_ver
, __be16 port
,
694 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
696 struct sockaddr_in
*listen4
, *ip4
;
697 struct sockaddr_in6
*listen6
, *ip6
;
701 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
702 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
703 ip4
->sin_family
= listen4
->sin_family
;
704 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
705 ip4
->sin_port
= listen4
->sin_port
;
707 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
708 ip4
->sin_family
= listen4
->sin_family
;
709 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
710 ip4
->sin_port
= port
;
713 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
714 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
715 ip6
->sin6_family
= listen6
->sin6_family
;
716 ip6
->sin6_addr
= dst
->ip6
;
717 ip6
->sin6_port
= listen6
->sin6_port
;
719 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
720 ip6
->sin6_family
= listen6
->sin6_family
;
721 ip6
->sin6_addr
= src
->ip6
;
722 ip6
->sin6_port
= port
;
729 static inline int cma_user_data_offset(enum rdma_port_space ps
)
735 return sizeof(struct cma_hdr
);
739 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
741 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
742 case RDMA_TRANSPORT_IB
:
744 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
751 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
753 struct rdma_id_private
*dev_id_priv
;
756 * Remove from listen_any_list to prevent added devices from spawning
757 * additional listen requests.
760 list_del(&id_priv
->list
);
762 while (!list_empty(&id_priv
->listen_list
)) {
763 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
764 struct rdma_id_private
, listen_list
);
765 /* sync with device removal to avoid duplicate destruction */
766 list_del_init(&dev_id_priv
->list
);
767 list_del(&dev_id_priv
->listen_list
);
770 rdma_destroy_id(&dev_id_priv
->id
);
776 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
777 enum cma_state state
)
781 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
783 case CMA_ROUTE_QUERY
:
784 cma_cancel_route(id_priv
);
787 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
788 && !id_priv
->cma_dev
)
789 cma_cancel_listens(id_priv
);
796 static void cma_release_port(struct rdma_id_private
*id_priv
)
798 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
804 hlist_del(&id_priv
->node
);
805 if (hlist_empty(&bind_list
->owners
)) {
806 idr_remove(bind_list
->ps
, bind_list
->port
);
812 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
814 struct cma_multicast
*mc
;
816 while (!list_empty(&id_priv
->mc_list
)) {
817 mc
= container_of(id_priv
->mc_list
.next
,
818 struct cma_multicast
, list
);
820 ib_sa_free_multicast(mc
->multicast
.ib
);
825 void rdma_destroy_id(struct rdma_cm_id
*id
)
827 struct rdma_id_private
*id_priv
;
828 enum cma_state state
;
830 id_priv
= container_of(id
, struct rdma_id_private
, id
);
831 state
= cma_exch(id_priv
, CMA_DESTROYING
);
832 cma_cancel_operation(id_priv
, state
);
835 if (id_priv
->cma_dev
) {
837 switch (rdma_node_get_transport(id
->device
->node_type
)) {
838 case RDMA_TRANSPORT_IB
:
839 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
840 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
842 case RDMA_TRANSPORT_IWARP
:
843 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
844 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
849 cma_leave_mc_groups(id_priv
);
851 cma_detach_from_dev(id_priv
);
855 cma_release_port(id_priv
);
856 cma_deref_id(id_priv
);
857 wait_for_completion(&id_priv
->comp
);
859 if (id_priv
->internal_id
)
860 cma_deref_id(id_priv
->id
.context
);
862 kfree(id_priv
->id
.route
.path_rec
);
865 EXPORT_SYMBOL(rdma_destroy_id
);
867 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
871 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
875 ret
= cma_modify_qp_rts(id_priv
, NULL
);
879 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
885 cma_modify_qp_err(id_priv
);
886 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
891 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
893 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
894 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
901 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
902 struct ib_cm_rep_event_param
*rep_data
,
905 event
->param
.conn
.private_data
= private_data
;
906 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
907 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
908 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
909 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
910 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
911 event
->param
.conn
.srq
= rep_data
->srq
;
912 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
915 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
917 struct rdma_id_private
*id_priv
= cm_id
->context
;
918 struct rdma_cm_event event
;
921 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
922 cma_disable_callback(id_priv
, CMA_CONNECT
)) ||
923 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
924 cma_disable_callback(id_priv
, CMA_DISCONNECT
)))
927 memset(&event
, 0, sizeof event
);
928 switch (ib_event
->event
) {
929 case IB_CM_REQ_ERROR
:
930 case IB_CM_REP_ERROR
:
931 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
932 event
.status
= -ETIMEDOUT
;
934 case IB_CM_REP_RECEIVED
:
935 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
937 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
938 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
939 event
.status
= cma_rep_recv(id_priv
);
940 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
941 RDMA_CM_EVENT_ESTABLISHED
;
943 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
944 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
945 ib_event
->private_data
);
947 case IB_CM_RTU_RECEIVED
:
948 case IB_CM_USER_ESTABLISHED
:
949 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
951 case IB_CM_DREQ_ERROR
:
952 event
.status
= -ETIMEDOUT
; /* fall through */
953 case IB_CM_DREQ_RECEIVED
:
954 case IB_CM_DREP_RECEIVED
:
955 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
957 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
959 case IB_CM_TIMEWAIT_EXIT
:
960 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
962 case IB_CM_MRA_RECEIVED
:
965 case IB_CM_REJ_RECEIVED
:
966 cma_modify_qp_err(id_priv
);
967 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
968 event
.event
= RDMA_CM_EVENT_REJECTED
;
969 event
.param
.conn
.private_data
= ib_event
->private_data
;
970 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
973 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
978 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
980 /* Destroy the CM ID by returning a non-zero value. */
981 id_priv
->cm_id
.ib
= NULL
;
982 cma_exch(id_priv
, CMA_DESTROYING
);
983 mutex_unlock(&id_priv
->handler_mutex
);
984 rdma_destroy_id(&id_priv
->id
);
988 mutex_unlock(&id_priv
->handler_mutex
);
992 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
993 struct ib_cm_event
*ib_event
)
995 struct rdma_id_private
*id_priv
;
996 struct rdma_cm_id
*id
;
997 struct rdma_route
*rt
;
998 union cma_ip_addr
*src
, *dst
;
1003 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1004 &ip_ver
, &port
, &src
, &dst
))
1007 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1012 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1013 ip_ver
, port
, src
, dst
);
1016 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1017 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1022 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1023 if (rt
->num_paths
== 2)
1024 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1026 if (cma_any_addr((struct sockaddr
*) &rt
->addr
.src_addr
)) {
1027 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1028 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1029 ib_addr_set_pkey(&rt
->addr
.dev_addr
, rt
->path_rec
[0].pkey
);
1031 ret
= rdma_translate_ip((struct sockaddr
*) &rt
->addr
.src_addr
,
1032 &rt
->addr
.dev_addr
);
1036 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1038 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1039 id_priv
->state
= CMA_CONNECT
;
1043 rdma_destroy_id(id
);
1048 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1049 struct ib_cm_event
*ib_event
)
1051 struct rdma_id_private
*id_priv
;
1052 struct rdma_cm_id
*id
;
1053 union cma_ip_addr
*src
, *dst
;
1058 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1064 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1065 &ip_ver
, &port
, &src
, &dst
))
1068 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1069 ip_ver
, port
, src
, dst
);
1071 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1072 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1073 &id
->route
.addr
.dev_addr
);
1078 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1079 id_priv
->state
= CMA_CONNECT
;
1082 rdma_destroy_id(id
);
1086 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1087 struct ib_cm_req_event_param
*req_data
,
1088 void *private_data
, int offset
)
1090 event
->param
.conn
.private_data
= private_data
+ offset
;
1091 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1092 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1093 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1094 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1095 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1096 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1097 event
->param
.conn
.srq
= req_data
->srq
;
1098 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1101 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1103 struct rdma_id_private
*listen_id
, *conn_id
;
1104 struct rdma_cm_event event
;
1107 listen_id
= cm_id
->context
;
1108 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1109 return -ECONNABORTED
;
1111 memset(&event
, 0, sizeof event
);
1112 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1113 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1114 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1115 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1116 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1117 event
.param
.ud
.private_data_len
=
1118 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1120 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1121 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1122 ib_event
->private_data
, offset
);
1129 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1131 ret
= cma_acquire_dev(conn_id
);
1132 mutex_unlock(&lock
);
1134 goto release_conn_id
;
1136 conn_id
->cm_id
.ib
= cm_id
;
1137 cm_id
->context
= conn_id
;
1138 cm_id
->cm_handler
= cma_ib_handler
;
1140 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1143 * Acquire mutex to prevent user executing rdma_destroy_id()
1144 * while we're accessing the cm_id.
1147 if (cma_comp(conn_id
, CMA_CONNECT
) &&
1148 !cma_is_ud_ps(conn_id
->id
.ps
))
1149 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1150 mutex_unlock(&lock
);
1151 mutex_unlock(&conn_id
->handler_mutex
);
1155 /* Destroy the CM ID by returning a non-zero value. */
1156 conn_id
->cm_id
.ib
= NULL
;
1159 cma_exch(conn_id
, CMA_DESTROYING
);
1160 mutex_unlock(&conn_id
->handler_mutex
);
1161 rdma_destroy_id(&conn_id
->id
);
1164 mutex_unlock(&listen_id
->handler_mutex
);
1168 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1170 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1173 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1174 struct ib_cm_compare_data
*compare
)
1176 struct cma_hdr
*cma_data
, *cma_mask
;
1177 struct sdp_hh
*sdp_data
, *sdp_mask
;
1179 struct in6_addr ip6_addr
;
1181 memset(compare
, 0, sizeof *compare
);
1182 cma_data
= (void *) compare
->data
;
1183 cma_mask
= (void *) compare
->mask
;
1184 sdp_data
= (void *) compare
->data
;
1185 sdp_mask
= (void *) compare
->mask
;
1187 switch (addr
->sa_family
) {
1189 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1190 if (ps
== RDMA_PS_SDP
) {
1191 sdp_set_ip_ver(sdp_data
, 4);
1192 sdp_set_ip_ver(sdp_mask
, 0xF);
1193 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1194 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1196 cma_set_ip_ver(cma_data
, 4);
1197 cma_set_ip_ver(cma_mask
, 0xF);
1198 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1199 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1203 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1204 if (ps
== RDMA_PS_SDP
) {
1205 sdp_set_ip_ver(sdp_data
, 6);
1206 sdp_set_ip_ver(sdp_mask
, 0xF);
1207 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1208 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1209 sizeof sdp_mask
->dst_addr
.ip6
);
1211 cma_set_ip_ver(cma_data
, 6);
1212 cma_set_ip_ver(cma_mask
, 0xF);
1213 cma_data
->dst_addr
.ip6
= ip6_addr
;
1214 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1215 sizeof cma_mask
->dst_addr
.ip6
);
1223 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1225 struct rdma_id_private
*id_priv
= iw_id
->context
;
1226 struct rdma_cm_event event
;
1227 struct sockaddr_in
*sin
;
1230 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
1233 memset(&event
, 0, sizeof event
);
1234 switch (iw_event
->event
) {
1235 case IW_CM_EVENT_CLOSE
:
1236 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1238 case IW_CM_EVENT_CONNECT_REPLY
:
1239 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1240 *sin
= iw_event
->local_addr
;
1241 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1242 *sin
= iw_event
->remote_addr
;
1243 switch (iw_event
->status
) {
1245 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1249 event
.event
= RDMA_CM_EVENT_REJECTED
;
1252 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1255 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1259 case IW_CM_EVENT_ESTABLISHED
:
1260 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1266 event
.status
= iw_event
->status
;
1267 event
.param
.conn
.private_data
= iw_event
->private_data
;
1268 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1269 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1271 /* Destroy the CM ID by returning a non-zero value. */
1272 id_priv
->cm_id
.iw
= NULL
;
1273 cma_exch(id_priv
, CMA_DESTROYING
);
1274 mutex_unlock(&id_priv
->handler_mutex
);
1275 rdma_destroy_id(&id_priv
->id
);
1279 mutex_unlock(&id_priv
->handler_mutex
);
1283 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1284 struct iw_cm_event
*iw_event
)
1286 struct rdma_cm_id
*new_cm_id
;
1287 struct rdma_id_private
*listen_id
, *conn_id
;
1288 struct sockaddr_in
*sin
;
1289 struct net_device
*dev
= NULL
;
1290 struct rdma_cm_event event
;
1292 struct ib_device_attr attr
;
1294 listen_id
= cm_id
->context
;
1295 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1296 return -ECONNABORTED
;
1298 /* Create a new RDMA id for the new IW CM ID */
1299 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1300 listen_id
->id
.context
,
1302 if (IS_ERR(new_cm_id
)) {
1306 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1307 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1308 conn_id
->state
= CMA_CONNECT
;
1310 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1312 ret
= -EADDRNOTAVAIL
;
1313 mutex_unlock(&conn_id
->handler_mutex
);
1314 rdma_destroy_id(new_cm_id
);
1317 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1319 mutex_unlock(&conn_id
->handler_mutex
);
1320 rdma_destroy_id(new_cm_id
);
1325 ret
= cma_acquire_dev(conn_id
);
1326 mutex_unlock(&lock
);
1328 mutex_unlock(&conn_id
->handler_mutex
);
1329 rdma_destroy_id(new_cm_id
);
1333 conn_id
->cm_id
.iw
= cm_id
;
1334 cm_id
->context
= conn_id
;
1335 cm_id
->cm_handler
= cma_iw_handler
;
1337 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1338 *sin
= iw_event
->local_addr
;
1339 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1340 *sin
= iw_event
->remote_addr
;
1342 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1344 mutex_unlock(&conn_id
->handler_mutex
);
1345 rdma_destroy_id(new_cm_id
);
1349 memset(&event
, 0, sizeof event
);
1350 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1351 event
.param
.conn
.private_data
= iw_event
->private_data
;
1352 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1353 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1354 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1355 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1357 /* User wants to destroy the CM ID */
1358 conn_id
->cm_id
.iw
= NULL
;
1359 cma_exch(conn_id
, CMA_DESTROYING
);
1360 mutex_unlock(&conn_id
->handler_mutex
);
1361 rdma_destroy_id(&conn_id
->id
);
1365 mutex_unlock(&conn_id
->handler_mutex
);
1370 mutex_unlock(&listen_id
->handler_mutex
);
1374 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1376 struct ib_cm_compare_data compare_data
;
1377 struct sockaddr
*addr
;
1381 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1383 if (IS_ERR(id_priv
->cm_id
.ib
))
1384 return PTR_ERR(id_priv
->cm_id
.ib
);
1386 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1387 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1388 if (cma_any_addr(addr
))
1389 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1391 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1392 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1396 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1397 id_priv
->cm_id
.ib
= NULL
;
1403 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1406 struct sockaddr_in
*sin
;
1408 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1409 iw_conn_req_handler
,
1411 if (IS_ERR(id_priv
->cm_id
.iw
))
1412 return PTR_ERR(id_priv
->cm_id
.iw
);
1414 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1415 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1417 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1420 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1421 id_priv
->cm_id
.iw
= NULL
;
1427 static int cma_listen_handler(struct rdma_cm_id
*id
,
1428 struct rdma_cm_event
*event
)
1430 struct rdma_id_private
*id_priv
= id
->context
;
1432 id
->context
= id_priv
->id
.context
;
1433 id
->event_handler
= id_priv
->id
.event_handler
;
1434 return id_priv
->id
.event_handler(id
, event
);
1437 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1438 struct cma_device
*cma_dev
)
1440 struct rdma_id_private
*dev_id_priv
;
1441 struct rdma_cm_id
*id
;
1444 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1448 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1450 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1451 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1452 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1454 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1455 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1456 atomic_inc(&id_priv
->refcount
);
1457 dev_id_priv
->internal_id
= 1;
1459 ret
= rdma_listen(id
, id_priv
->backlog
);
1461 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1462 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1465 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1467 struct cma_device
*cma_dev
;
1470 list_add_tail(&id_priv
->list
, &listen_any_list
);
1471 list_for_each_entry(cma_dev
, &dev_list
, list
)
1472 cma_listen_on_dev(id_priv
, cma_dev
);
1473 mutex_unlock(&lock
);
1476 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1478 struct rdma_id_private
*id_priv
;
1481 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1482 if (id_priv
->state
== CMA_IDLE
) {
1483 ((struct sockaddr
*) &id
->route
.addr
.src_addr
)->sa_family
= AF_INET
;
1484 ret
= rdma_bind_addr(id
, (struct sockaddr
*) &id
->route
.addr
.src_addr
);
1489 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1492 id_priv
->backlog
= backlog
;
1494 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1495 case RDMA_TRANSPORT_IB
:
1496 ret
= cma_ib_listen(id_priv
);
1500 case RDMA_TRANSPORT_IWARP
:
1501 ret
= cma_iw_listen(id_priv
, backlog
);
1510 cma_listen_on_all(id_priv
);
1514 id_priv
->backlog
= 0;
1515 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1518 EXPORT_SYMBOL(rdma_listen
);
1520 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1522 struct rdma_id_private
*id_priv
;
1524 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1525 id_priv
->tos
= (u8
) tos
;
1527 EXPORT_SYMBOL(rdma_set_service_type
);
1529 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1532 struct cma_work
*work
= context
;
1533 struct rdma_route
*route
;
1535 route
= &work
->id
->id
.route
;
1538 route
->num_paths
= 1;
1539 *route
->path_rec
= *path_rec
;
1541 work
->old_state
= CMA_ROUTE_QUERY
;
1542 work
->new_state
= CMA_ADDR_RESOLVED
;
1543 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1544 work
->event
.status
= status
;
1547 queue_work(cma_wq
, &work
->work
);
1550 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1551 struct cma_work
*work
)
1553 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1554 struct ib_sa_path_rec path_rec
;
1555 ib_sa_comp_mask comp_mask
;
1556 struct sockaddr_in6
*sin6
;
1558 memset(&path_rec
, 0, sizeof path_rec
);
1559 rdma_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1560 rdma_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1561 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1562 path_rec
.numb_path
= 1;
1563 path_rec
.reversible
= 1;
1564 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1565 (struct sockaddr
*) &addr
->dst_addr
);
1567 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1568 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1569 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1571 if (addr
->src_addr
.ss_family
== AF_INET
) {
1572 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1573 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1575 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1576 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1577 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1580 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1581 id_priv
->id
.port_num
, &path_rec
,
1582 comp_mask
, timeout_ms
,
1583 GFP_KERNEL
, cma_query_handler
,
1584 work
, &id_priv
->query
);
1586 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1589 static void cma_work_handler(struct work_struct
*_work
)
1591 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1592 struct rdma_id_private
*id_priv
= work
->id
;
1595 mutex_lock(&id_priv
->handler_mutex
);
1596 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1599 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1600 cma_exch(id_priv
, CMA_DESTROYING
);
1604 mutex_unlock(&id_priv
->handler_mutex
);
1605 cma_deref_id(id_priv
);
1607 rdma_destroy_id(&id_priv
->id
);
1611 static void cma_ndev_work_handler(struct work_struct
*_work
)
1613 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1614 struct rdma_id_private
*id_priv
= work
->id
;
1617 mutex_lock(&id_priv
->handler_mutex
);
1618 if (id_priv
->state
== CMA_DESTROYING
||
1619 id_priv
->state
== CMA_DEVICE_REMOVAL
)
1622 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1623 cma_exch(id_priv
, CMA_DESTROYING
);
1628 mutex_unlock(&id_priv
->handler_mutex
);
1629 cma_deref_id(id_priv
);
1631 rdma_destroy_id(&id_priv
->id
);
1635 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1637 struct rdma_route
*route
= &id_priv
->id
.route
;
1638 struct cma_work
*work
;
1641 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1646 INIT_WORK(&work
->work
, cma_work_handler
);
1647 work
->old_state
= CMA_ROUTE_QUERY
;
1648 work
->new_state
= CMA_ROUTE_RESOLVED
;
1649 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1651 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1652 if (!route
->path_rec
) {
1657 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1663 kfree(route
->path_rec
);
1664 route
->path_rec
= NULL
;
1670 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1671 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1673 struct rdma_id_private
*id_priv
;
1676 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1677 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1680 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1681 if (!id
->route
.path_rec
) {
1686 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1687 id
->route
.num_paths
= num_paths
;
1690 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1693 EXPORT_SYMBOL(rdma_set_ib_paths
);
1695 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1697 struct cma_work
*work
;
1699 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1704 INIT_WORK(&work
->work
, cma_work_handler
);
1705 work
->old_state
= CMA_ROUTE_QUERY
;
1706 work
->new_state
= CMA_ROUTE_RESOLVED
;
1707 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1708 queue_work(cma_wq
, &work
->work
);
1712 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1714 struct rdma_id_private
*id_priv
;
1717 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1718 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1721 atomic_inc(&id_priv
->refcount
);
1722 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1723 case RDMA_TRANSPORT_IB
:
1724 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1726 case RDMA_TRANSPORT_IWARP
:
1727 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1738 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1739 cma_deref_id(id_priv
);
1742 EXPORT_SYMBOL(rdma_resolve_route
);
1744 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1746 struct cma_device
*cma_dev
;
1747 struct ib_port_attr port_attr
;
1754 if (list_empty(&dev_list
)) {
1758 list_for_each_entry(cma_dev
, &dev_list
, list
)
1759 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1760 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1761 port_attr
.state
== IB_PORT_ACTIVE
)
1765 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1768 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1772 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1776 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
1777 (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) ?
1778 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
1780 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1781 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1782 id_priv
->id
.port_num
= p
;
1783 cma_attach_to_dev(id_priv
, cma_dev
);
1785 mutex_unlock(&lock
);
1789 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1790 struct rdma_dev_addr
*dev_addr
, void *context
)
1792 struct rdma_id_private
*id_priv
= context
;
1793 struct rdma_cm_event event
;
1795 memset(&event
, 0, sizeof event
);
1796 mutex_lock(&id_priv
->handler_mutex
);
1799 * Grab mutex to block rdma_destroy_id() from removing the device while
1800 * we're trying to acquire it.
1803 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1804 mutex_unlock(&lock
);
1808 if (!status
&& !id_priv
->cma_dev
)
1809 status
= cma_acquire_dev(id_priv
);
1810 mutex_unlock(&lock
);
1813 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1815 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1816 event
.status
= status
;
1818 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1819 ip_addr_size(src_addr
));
1820 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1823 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1824 cma_exch(id_priv
, CMA_DESTROYING
);
1825 mutex_unlock(&id_priv
->handler_mutex
);
1826 cma_deref_id(id_priv
);
1827 rdma_destroy_id(&id_priv
->id
);
1831 mutex_unlock(&id_priv
->handler_mutex
);
1832 cma_deref_id(id_priv
);
1835 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1837 struct cma_work
*work
;
1838 struct sockaddr
*src
, *dst
;
1842 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1846 if (!id_priv
->cma_dev
) {
1847 ret
= cma_bind_loopback(id_priv
);
1852 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1853 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1855 src
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1856 if (cma_zero_addr(src
)) {
1857 dst
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
1858 if ((src
->sa_family
= dst
->sa_family
) == AF_INET
) {
1859 ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
=
1860 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1862 ipv6_addr_copy(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1863 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1868 INIT_WORK(&work
->work
, cma_work_handler
);
1869 work
->old_state
= CMA_ADDR_QUERY
;
1870 work
->new_state
= CMA_ADDR_RESOLVED
;
1871 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1872 queue_work(cma_wq
, &work
->work
);
1879 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1880 struct sockaddr
*dst_addr
)
1882 if (!src_addr
|| !src_addr
->sa_family
) {
1883 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
1884 if ((src_addr
->sa_family
= dst_addr
->sa_family
) == AF_INET6
) {
1885 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
1886 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
1889 return rdma_bind_addr(id
, src_addr
);
1892 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1893 struct sockaddr
*dst_addr
, int timeout_ms
)
1895 struct rdma_id_private
*id_priv
;
1898 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1899 if (id_priv
->state
== CMA_IDLE
) {
1900 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1905 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1908 atomic_inc(&id_priv
->refcount
);
1909 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1910 if (cma_any_addr(dst_addr
))
1911 ret
= cma_resolve_loopback(id_priv
);
1913 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
1914 dst_addr
, &id
->route
.addr
.dev_addr
,
1915 timeout_ms
, addr_handler
, id_priv
);
1921 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1922 cma_deref_id(id_priv
);
1925 EXPORT_SYMBOL(rdma_resolve_addr
);
1927 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1928 struct rdma_id_private
*id_priv
)
1930 struct sockaddr_in
*sin
;
1932 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1933 sin
->sin_port
= htons(bind_list
->port
);
1934 id_priv
->bind_list
= bind_list
;
1935 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1938 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1939 unsigned short snum
)
1941 struct rdma_bind_list
*bind_list
;
1944 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1949 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1950 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1956 ret
= -EADDRNOTAVAIL
;
1961 bind_list
->port
= (unsigned short) port
;
1962 cma_bind_port(bind_list
, id_priv
);
1965 idr_remove(ps
, port
);
1971 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1973 struct rdma_bind_list
*bind_list
;
1974 int port
, ret
, low
, high
;
1976 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1983 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1984 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1989 inet_get_local_port_range(&low
, &high
);
1991 if (next_port
!= low
) {
1992 idr_remove(ps
, port
);
1996 ret
= -EADDRNOTAVAIL
;
2003 next_port
= port
+ 1;
2006 bind_list
->port
= (unsigned short) port
;
2007 cma_bind_port(bind_list
, id_priv
);
2010 idr_remove(ps
, port
);
2016 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2018 struct rdma_id_private
*cur_id
;
2019 struct sockaddr_in
*sin
, *cur_sin
;
2020 struct rdma_bind_list
*bind_list
;
2021 struct hlist_node
*node
;
2022 unsigned short snum
;
2024 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2025 snum
= ntohs(sin
->sin_port
);
2026 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2029 bind_list
= idr_find(ps
, snum
);
2031 return cma_alloc_port(ps
, id_priv
, snum
);
2034 * We don't support binding to any address if anyone is bound to
2035 * a specific address on the same port.
2037 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2038 return -EADDRNOTAVAIL
;
2040 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2041 if (cma_any_addr((struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
))
2042 return -EADDRNOTAVAIL
;
2044 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
2045 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
2049 cma_bind_port(bind_list
, id_priv
);
2053 static int cma_get_port(struct rdma_id_private
*id_priv
)
2058 switch (id_priv
->id
.ps
) {
2072 return -EPROTONOSUPPORT
;
2076 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2077 ret
= cma_alloc_any_port(ps
, id_priv
);
2079 ret
= cma_use_port(ps
, id_priv
);
2080 mutex_unlock(&lock
);
2085 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2086 struct sockaddr
*addr
)
2088 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2089 struct sockaddr_in6
*sin6
;
2091 if (addr
->sa_family
!= AF_INET6
)
2094 sin6
= (struct sockaddr_in6
*) addr
;
2095 if ((ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
) &&
2096 !sin6
->sin6_scope_id
)
2099 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2104 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2106 struct rdma_id_private
*id_priv
;
2109 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2110 return -EAFNOSUPPORT
;
2112 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2113 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2116 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2120 if (!cma_any_addr(addr
)) {
2121 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2126 ret
= cma_acquire_dev(id_priv
);
2127 mutex_unlock(&lock
);
2132 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2133 ret
= cma_get_port(id_priv
);
2139 if (id_priv
->cma_dev
) {
2141 cma_detach_from_dev(id_priv
);
2142 mutex_unlock(&lock
);
2145 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2148 EXPORT_SYMBOL(rdma_bind_addr
);
2150 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2151 struct rdma_route
*route
)
2153 struct cma_hdr
*cma_hdr
;
2154 struct sdp_hh
*sdp_hdr
;
2156 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2157 struct sockaddr_in
*src4
, *dst4
;
2159 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2160 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2165 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2167 sdp_set_ip_ver(sdp_hdr
, 4);
2168 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2169 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2170 sdp_hdr
->port
= src4
->sin_port
;
2174 cma_hdr
->cma_version
= CMA_VERSION
;
2175 cma_set_ip_ver(cma_hdr
, 4);
2176 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2177 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2178 cma_hdr
->port
= src4
->sin_port
;
2182 struct sockaddr_in6
*src6
, *dst6
;
2184 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2185 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2190 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2192 sdp_set_ip_ver(sdp_hdr
, 6);
2193 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2194 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2195 sdp_hdr
->port
= src6
->sin6_port
;
2199 cma_hdr
->cma_version
= CMA_VERSION
;
2200 cma_set_ip_ver(cma_hdr
, 6);
2201 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2202 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2203 cma_hdr
->port
= src6
->sin6_port
;
2210 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2211 struct ib_cm_event
*ib_event
)
2213 struct rdma_id_private
*id_priv
= cm_id
->context
;
2214 struct rdma_cm_event event
;
2215 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2218 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
2221 memset(&event
, 0, sizeof event
);
2222 switch (ib_event
->event
) {
2223 case IB_CM_SIDR_REQ_ERROR
:
2224 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2225 event
.status
= -ETIMEDOUT
;
2227 case IB_CM_SIDR_REP_RECEIVED
:
2228 event
.param
.ud
.private_data
= ib_event
->private_data
;
2229 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2230 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2231 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2232 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2235 ret
= cma_set_qkey(id_priv
);
2237 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2238 event
.status
= -EINVAL
;
2241 if (id_priv
->qkey
!= rep
->qkey
) {
2242 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2243 event
.status
= -EINVAL
;
2246 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2247 id_priv
->id
.route
.path_rec
,
2248 &event
.param
.ud
.ah_attr
);
2249 event
.param
.ud
.qp_num
= rep
->qpn
;
2250 event
.param
.ud
.qkey
= rep
->qkey
;
2251 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2255 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2260 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2262 /* Destroy the CM ID by returning a non-zero value. */
2263 id_priv
->cm_id
.ib
= NULL
;
2264 cma_exch(id_priv
, CMA_DESTROYING
);
2265 mutex_unlock(&id_priv
->handler_mutex
);
2266 rdma_destroy_id(&id_priv
->id
);
2270 mutex_unlock(&id_priv
->handler_mutex
);
2274 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2275 struct rdma_conn_param
*conn_param
)
2277 struct ib_cm_sidr_req_param req
;
2278 struct rdma_route
*route
;
2281 req
.private_data_len
= sizeof(struct cma_hdr
) +
2282 conn_param
->private_data_len
;
2283 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2284 if (!req
.private_data
)
2287 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2288 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2289 conn_param
->private_data
, conn_param
->private_data_len
);
2291 route
= &id_priv
->id
.route
;
2292 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2296 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2297 cma_sidr_rep_handler
, id_priv
);
2298 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2299 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2303 req
.path
= route
->path_rec
;
2304 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2305 (struct sockaddr
*) &route
->addr
.dst_addr
);
2306 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2307 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2309 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2311 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2312 id_priv
->cm_id
.ib
= NULL
;
2315 kfree(req
.private_data
);
2319 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2320 struct rdma_conn_param
*conn_param
)
2322 struct ib_cm_req_param req
;
2323 struct rdma_route
*route
;
2327 memset(&req
, 0, sizeof req
);
2328 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2329 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2330 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2334 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2335 memcpy(private_data
+ offset
, conn_param
->private_data
,
2336 conn_param
->private_data_len
);
2338 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2340 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2341 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2345 route
= &id_priv
->id
.route
;
2346 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2349 req
.private_data
= private_data
;
2351 req
.primary_path
= &route
->path_rec
[0];
2352 if (route
->num_paths
== 2)
2353 req
.alternate_path
= &route
->path_rec
[1];
2355 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2356 (struct sockaddr
*) &route
->addr
.dst_addr
);
2357 req
.qp_num
= id_priv
->qp_num
;
2358 req
.qp_type
= IB_QPT_RC
;
2359 req
.starting_psn
= id_priv
->seq_num
;
2360 req
.responder_resources
= conn_param
->responder_resources
;
2361 req
.initiator_depth
= conn_param
->initiator_depth
;
2362 req
.flow_control
= conn_param
->flow_control
;
2363 req
.retry_count
= conn_param
->retry_count
;
2364 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2365 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2366 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2367 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2368 req
.srq
= id_priv
->srq
? 1 : 0;
2370 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2372 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2373 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2374 id_priv
->cm_id
.ib
= NULL
;
2377 kfree(private_data
);
2381 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2382 struct rdma_conn_param
*conn_param
)
2384 struct iw_cm_id
*cm_id
;
2385 struct sockaddr_in
* sin
;
2387 struct iw_cm_conn_param iw_param
;
2389 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2390 if (IS_ERR(cm_id
)) {
2391 ret
= PTR_ERR(cm_id
);
2395 id_priv
->cm_id
.iw
= cm_id
;
2397 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2398 cm_id
->local_addr
= *sin
;
2400 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2401 cm_id
->remote_addr
= *sin
;
2403 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2407 iw_param
.ord
= conn_param
->initiator_depth
;
2408 iw_param
.ird
= conn_param
->responder_resources
;
2409 iw_param
.private_data
= conn_param
->private_data
;
2410 iw_param
.private_data_len
= conn_param
->private_data_len
;
2412 iw_param
.qpn
= id_priv
->qp_num
;
2414 iw_param
.qpn
= conn_param
->qp_num
;
2415 ret
= iw_cm_connect(cm_id
, &iw_param
);
2417 if (ret
&& !IS_ERR(cm_id
)) {
2418 iw_destroy_cm_id(cm_id
);
2419 id_priv
->cm_id
.iw
= NULL
;
2424 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2426 struct rdma_id_private
*id_priv
;
2429 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2430 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2434 id_priv
->qp_num
= conn_param
->qp_num
;
2435 id_priv
->srq
= conn_param
->srq
;
2438 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2439 case RDMA_TRANSPORT_IB
:
2440 if (cma_is_ud_ps(id
->ps
))
2441 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2443 ret
= cma_connect_ib(id_priv
, conn_param
);
2445 case RDMA_TRANSPORT_IWARP
:
2446 ret
= cma_connect_iw(id_priv
, conn_param
);
2457 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2460 EXPORT_SYMBOL(rdma_connect
);
2462 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2463 struct rdma_conn_param
*conn_param
)
2465 struct ib_cm_rep_param rep
;
2468 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2472 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2476 memset(&rep
, 0, sizeof rep
);
2477 rep
.qp_num
= id_priv
->qp_num
;
2478 rep
.starting_psn
= id_priv
->seq_num
;
2479 rep
.private_data
= conn_param
->private_data
;
2480 rep
.private_data_len
= conn_param
->private_data_len
;
2481 rep
.responder_resources
= conn_param
->responder_resources
;
2482 rep
.initiator_depth
= conn_param
->initiator_depth
;
2483 rep
.failover_accepted
= 0;
2484 rep
.flow_control
= conn_param
->flow_control
;
2485 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2486 rep
.srq
= id_priv
->srq
? 1 : 0;
2488 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2493 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2494 struct rdma_conn_param
*conn_param
)
2496 struct iw_cm_conn_param iw_param
;
2499 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2503 iw_param
.ord
= conn_param
->initiator_depth
;
2504 iw_param
.ird
= conn_param
->responder_resources
;
2505 iw_param
.private_data
= conn_param
->private_data
;
2506 iw_param
.private_data_len
= conn_param
->private_data_len
;
2507 if (id_priv
->id
.qp
) {
2508 iw_param
.qpn
= id_priv
->qp_num
;
2510 iw_param
.qpn
= conn_param
->qp_num
;
2512 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2515 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2516 enum ib_cm_sidr_status status
,
2517 const void *private_data
, int private_data_len
)
2519 struct ib_cm_sidr_rep_param rep
;
2522 memset(&rep
, 0, sizeof rep
);
2523 rep
.status
= status
;
2524 if (status
== IB_SIDR_SUCCESS
) {
2525 ret
= cma_set_qkey(id_priv
);
2528 rep
.qp_num
= id_priv
->qp_num
;
2529 rep
.qkey
= id_priv
->qkey
;
2531 rep
.private_data
= private_data
;
2532 rep
.private_data_len
= private_data_len
;
2534 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2537 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2539 struct rdma_id_private
*id_priv
;
2542 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2543 if (!cma_comp(id_priv
, CMA_CONNECT
))
2546 if (!id
->qp
&& conn_param
) {
2547 id_priv
->qp_num
= conn_param
->qp_num
;
2548 id_priv
->srq
= conn_param
->srq
;
2551 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2552 case RDMA_TRANSPORT_IB
:
2553 if (cma_is_ud_ps(id
->ps
))
2554 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2555 conn_param
->private_data
,
2556 conn_param
->private_data_len
);
2557 else if (conn_param
)
2558 ret
= cma_accept_ib(id_priv
, conn_param
);
2560 ret
= cma_rep_recv(id_priv
);
2562 case RDMA_TRANSPORT_IWARP
:
2563 ret
= cma_accept_iw(id_priv
, conn_param
);
2575 cma_modify_qp_err(id_priv
);
2576 rdma_reject(id
, NULL
, 0);
2579 EXPORT_SYMBOL(rdma_accept
);
2581 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2583 struct rdma_id_private
*id_priv
;
2586 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2587 if (!cma_has_cm_dev(id_priv
))
2590 switch (id
->device
->node_type
) {
2591 case RDMA_NODE_IB_CA
:
2592 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2600 EXPORT_SYMBOL(rdma_notify
);
2602 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2603 u8 private_data_len
)
2605 struct rdma_id_private
*id_priv
;
2608 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2609 if (!cma_has_cm_dev(id_priv
))
2612 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2613 case RDMA_TRANSPORT_IB
:
2614 if (cma_is_ud_ps(id
->ps
))
2615 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2616 private_data
, private_data_len
);
2618 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2619 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2620 0, private_data
, private_data_len
);
2622 case RDMA_TRANSPORT_IWARP
:
2623 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2624 private_data
, private_data_len
);
2632 EXPORT_SYMBOL(rdma_reject
);
2634 int rdma_disconnect(struct rdma_cm_id
*id
)
2636 struct rdma_id_private
*id_priv
;
2639 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2640 if (!cma_has_cm_dev(id_priv
))
2643 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2644 case RDMA_TRANSPORT_IB
:
2645 ret
= cma_modify_qp_err(id_priv
);
2648 /* Initiate or respond to a disconnect. */
2649 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2650 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2652 case RDMA_TRANSPORT_IWARP
:
2653 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2662 EXPORT_SYMBOL(rdma_disconnect
);
2664 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2666 struct rdma_id_private
*id_priv
;
2667 struct cma_multicast
*mc
= multicast
->context
;
2668 struct rdma_cm_event event
;
2671 id_priv
= mc
->id_priv
;
2672 if (cma_disable_callback(id_priv
, CMA_ADDR_BOUND
) &&
2673 cma_disable_callback(id_priv
, CMA_ADDR_RESOLVED
))
2676 mutex_lock(&id_priv
->qp_mutex
);
2677 if (!status
&& id_priv
->id
.qp
)
2678 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2679 multicast
->rec
.mlid
);
2680 mutex_unlock(&id_priv
->qp_mutex
);
2682 memset(&event
, 0, sizeof event
);
2683 event
.status
= status
;
2684 event
.param
.ud
.private_data
= mc
->context
;
2686 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2687 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2688 id_priv
->id
.port_num
, &multicast
->rec
,
2689 &event
.param
.ud
.ah_attr
);
2690 event
.param
.ud
.qp_num
= 0xFFFFFF;
2691 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2693 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2695 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2697 cma_exch(id_priv
, CMA_DESTROYING
);
2698 mutex_unlock(&id_priv
->handler_mutex
);
2699 rdma_destroy_id(&id_priv
->id
);
2703 mutex_unlock(&id_priv
->handler_mutex
);
2707 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2708 struct sockaddr
*addr
, union ib_gid
*mgid
)
2710 unsigned char mc_map
[MAX_ADDR_LEN
];
2711 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2712 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2713 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2715 if (cma_any_addr(addr
)) {
2716 memset(mgid
, 0, sizeof *mgid
);
2717 } else if ((addr
->sa_family
== AF_INET6
) &&
2718 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
2720 /* IPv6 address is an SA assigned MGID. */
2721 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2722 } else if ((addr
->sa_family
== AF_INET6
)) {
2723 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
2724 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2725 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2726 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2728 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2729 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2730 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2731 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2735 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2736 struct cma_multicast
*mc
)
2738 struct ib_sa_mcmember_rec rec
;
2739 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2740 ib_sa_comp_mask comp_mask
;
2743 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2744 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2749 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
2750 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2751 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2752 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2753 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2756 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2757 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2758 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2759 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2760 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2762 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
2763 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
2764 IB_SA_MCMEMBER_REC_RATE_SELECTOR
;
2766 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2767 id_priv
->id
.port_num
, &rec
,
2768 comp_mask
, GFP_KERNEL
,
2769 cma_ib_mc_handler
, mc
);
2770 if (IS_ERR(mc
->multicast
.ib
))
2771 return PTR_ERR(mc
->multicast
.ib
);
2776 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2779 struct rdma_id_private
*id_priv
;
2780 struct cma_multicast
*mc
;
2783 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2784 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2785 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2788 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2792 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2793 mc
->context
= context
;
2794 mc
->id_priv
= id_priv
;
2796 spin_lock(&id_priv
->lock
);
2797 list_add(&mc
->list
, &id_priv
->mc_list
);
2798 spin_unlock(&id_priv
->lock
);
2800 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2801 case RDMA_TRANSPORT_IB
:
2802 ret
= cma_join_ib_multicast(id_priv
, mc
);
2810 spin_lock_irq(&id_priv
->lock
);
2811 list_del(&mc
->list
);
2812 spin_unlock_irq(&id_priv
->lock
);
2817 EXPORT_SYMBOL(rdma_join_multicast
);
2819 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2821 struct rdma_id_private
*id_priv
;
2822 struct cma_multicast
*mc
;
2824 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2825 spin_lock_irq(&id_priv
->lock
);
2826 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2827 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2828 list_del(&mc
->list
);
2829 spin_unlock_irq(&id_priv
->lock
);
2832 ib_detach_mcast(id
->qp
,
2833 &mc
->multicast
.ib
->rec
.mgid
,
2834 mc
->multicast
.ib
->rec
.mlid
);
2835 ib_sa_free_multicast(mc
->multicast
.ib
);
2840 spin_unlock_irq(&id_priv
->lock
);
2842 EXPORT_SYMBOL(rdma_leave_multicast
);
2844 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
2846 struct rdma_dev_addr
*dev_addr
;
2847 struct cma_ndev_work
*work
;
2849 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2851 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
2852 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
2853 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
2854 ndev
->name
, &id_priv
->id
);
2855 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2859 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
2861 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
2862 atomic_inc(&id_priv
->refcount
);
2863 queue_work(cma_wq
, &work
->work
);
2869 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
2872 struct net_device
*ndev
= (struct net_device
*)ctx
;
2873 struct cma_device
*cma_dev
;
2874 struct rdma_id_private
*id_priv
;
2875 int ret
= NOTIFY_DONE
;
2877 if (dev_net(ndev
) != &init_net
)
2880 if (event
!= NETDEV_BONDING_FAILOVER
)
2883 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
2887 list_for_each_entry(cma_dev
, &dev_list
, list
)
2888 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
2889 ret
= cma_netdev_change(ndev
, id_priv
);
2895 mutex_unlock(&lock
);
2899 static struct notifier_block cma_nb
= {
2900 .notifier_call
= cma_netdev_callback
2903 static void cma_add_one(struct ib_device
*device
)
2905 struct cma_device
*cma_dev
;
2906 struct rdma_id_private
*id_priv
;
2908 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2912 cma_dev
->device
= device
;
2914 init_completion(&cma_dev
->comp
);
2915 atomic_set(&cma_dev
->refcount
, 1);
2916 INIT_LIST_HEAD(&cma_dev
->id_list
);
2917 ib_set_client_data(device
, &cma_client
, cma_dev
);
2920 list_add_tail(&cma_dev
->list
, &dev_list
);
2921 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2922 cma_listen_on_dev(id_priv
, cma_dev
);
2923 mutex_unlock(&lock
);
2926 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2928 struct rdma_cm_event event
;
2929 enum cma_state state
;
2932 /* Record that we want to remove the device */
2933 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2934 if (state
== CMA_DESTROYING
)
2937 cma_cancel_operation(id_priv
, state
);
2938 mutex_lock(&id_priv
->handler_mutex
);
2940 /* Check for destruction from another callback. */
2941 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2944 memset(&event
, 0, sizeof event
);
2945 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2946 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2948 mutex_unlock(&id_priv
->handler_mutex
);
2952 static void cma_process_remove(struct cma_device
*cma_dev
)
2954 struct rdma_id_private
*id_priv
;
2958 while (!list_empty(&cma_dev
->id_list
)) {
2959 id_priv
= list_entry(cma_dev
->id_list
.next
,
2960 struct rdma_id_private
, list
);
2962 list_del(&id_priv
->listen_list
);
2963 list_del_init(&id_priv
->list
);
2964 atomic_inc(&id_priv
->refcount
);
2965 mutex_unlock(&lock
);
2967 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
2968 cma_deref_id(id_priv
);
2970 rdma_destroy_id(&id_priv
->id
);
2974 mutex_unlock(&lock
);
2976 cma_deref_dev(cma_dev
);
2977 wait_for_completion(&cma_dev
->comp
);
2980 static void cma_remove_one(struct ib_device
*device
)
2982 struct cma_device
*cma_dev
;
2984 cma_dev
= ib_get_client_data(device
, &cma_client
);
2989 list_del(&cma_dev
->list
);
2990 mutex_unlock(&lock
);
2992 cma_process_remove(cma_dev
);
2996 static int __init
cma_init(void)
2998 int ret
, low
, high
, remaining
;
3000 get_random_bytes(&next_port
, sizeof next_port
);
3001 inet_get_local_port_range(&low
, &high
);
3002 remaining
= (high
- low
) + 1;
3003 next_port
= ((unsigned int) next_port
% remaining
) + low
;
3005 cma_wq
= create_singlethread_workqueue("rdma_cm");
3009 ib_sa_register_client(&sa_client
);
3010 rdma_addr_register_client(&addr_client
);
3011 register_netdevice_notifier(&cma_nb
);
3013 ret
= ib_register_client(&cma_client
);
3019 unregister_netdevice_notifier(&cma_nb
);
3020 rdma_addr_unregister_client(&addr_client
);
3021 ib_sa_unregister_client(&sa_client
);
3022 destroy_workqueue(cma_wq
);
3026 static void __exit
cma_cleanup(void)
3028 ib_unregister_client(&cma_client
);
3029 unregister_netdevice_notifier(&cma_nb
);
3030 rdma_addr_unregister_client(&addr_client
);
3031 ib_sa_unregister_client(&sa_client
);
3032 destroy_workqueue(cma_wq
);
3033 idr_destroy(&sdp_ps
);
3034 idr_destroy(&tcp_ps
);
3035 idr_destroy(&udp_ps
);
3036 idr_destroy(&ipoib_ps
);
3039 module_init(cma_init
);
3040 module_exit(cma_cleanup
);