2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
47 #include <rdma/rdma_cm.h>
48 #include <rdma/rdma_cm_ib.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
51 #include <rdma/ib_sa.h>
52 #include <rdma/iw_cm.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("Generic RDMA CM Agent");
56 MODULE_LICENSE("Dual BSD/GPL");
58 #define CMA_CM_RESPONSE_TIMEOUT 20
59 #define CMA_MAX_CM_RETRIES 15
60 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
62 static void cma_add_one(struct ib_device
*device
);
63 static void cma_remove_one(struct ib_device
*device
);
65 static struct ib_client cma_client
= {
68 .remove
= cma_remove_one
71 static struct ib_sa_client sa_client
;
72 static struct rdma_addr_client addr_client
;
73 static LIST_HEAD(dev_list
);
74 static LIST_HEAD(listen_any_list
);
75 static DEFINE_MUTEX(lock
);
76 static struct workqueue_struct
*cma_wq
;
77 static DEFINE_IDR(sdp_ps
);
78 static DEFINE_IDR(tcp_ps
);
79 static DEFINE_IDR(udp_ps
);
80 static DEFINE_IDR(ipoib_ps
);
84 struct list_head list
;
85 struct ib_device
*device
;
86 struct completion comp
;
88 struct list_head id_list
;
105 struct rdma_bind_list
{
107 struct hlist_head owners
;
112 * Device removal can occur at anytime, so we need extra handling to
113 * serialize notifying the user of device removal with other callbacks.
114 * We do this by disabling removal notification while a callback is in process,
115 * and reporting it after the callback completes.
117 struct rdma_id_private
{
118 struct rdma_cm_id id
;
120 struct rdma_bind_list
*bind_list
;
121 struct hlist_node node
;
122 struct list_head list
; /* listen_any_list or cma_device.list */
123 struct list_head listen_list
; /* per device listens */
124 struct cma_device
*cma_dev
;
125 struct list_head mc_list
;
128 enum cma_state state
;
130 struct mutex qp_mutex
;
132 struct completion comp
;
134 struct mutex handler_mutex
;
138 struct ib_sa_query
*query
;
152 struct cma_multicast
{
153 struct rdma_id_private
*id_priv
;
155 struct ib_sa_multicast
*ib
;
157 struct list_head list
;
159 struct sockaddr_storage addr
;
163 struct work_struct work
;
164 struct rdma_id_private
*id
;
165 enum cma_state old_state
;
166 enum cma_state new_state
;
167 struct rdma_cm_event event
;
170 struct cma_ndev_work
{
171 struct work_struct work
;
172 struct rdma_id_private
*id
;
173 struct rdma_cm_event event
;
186 u8 ip_version
; /* IP version: 7:4 */
188 union cma_ip_addr src_addr
;
189 union cma_ip_addr dst_addr
;
194 u8 sdp_version
; /* Major version: 7:4 */
195 u8 ip_version
; /* IP version: 7:4 */
196 u8 sdp_specific1
[10];
198 __be16 sdp_specific2
;
199 union cma_ip_addr src_addr
;
200 union cma_ip_addr dst_addr
;
208 #define CMA_VERSION 0x00
209 #define SDP_MAJ_VERSION 0x2
211 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
216 spin_lock_irqsave(&id_priv
->lock
, flags
);
217 ret
= (id_priv
->state
== comp
);
218 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
222 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
223 enum cma_state comp
, enum cma_state exch
)
228 spin_lock_irqsave(&id_priv
->lock
, flags
);
229 if ((ret
= (id_priv
->state
== comp
)))
230 id_priv
->state
= exch
;
231 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
235 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
241 spin_lock_irqsave(&id_priv
->lock
, flags
);
242 old
= id_priv
->state
;
243 id_priv
->state
= exch
;
244 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
248 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
250 return hdr
->ip_version
>> 4;
253 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
255 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
258 static inline u8
sdp_get_majv(u8 sdp_version
)
260 return sdp_version
>> 4;
263 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
265 return hh
->ip_version
>> 4;
268 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
270 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
273 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
275 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
278 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
279 struct cma_device
*cma_dev
)
281 atomic_inc(&cma_dev
->refcount
);
282 id_priv
->cma_dev
= cma_dev
;
283 id_priv
->id
.device
= cma_dev
->device
;
284 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
287 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
289 if (atomic_dec_and_test(&cma_dev
->refcount
))
290 complete(&cma_dev
->comp
);
293 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
295 list_del(&id_priv
->list
);
296 cma_deref_dev(id_priv
->cma_dev
);
297 id_priv
->cma_dev
= NULL
;
300 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
302 struct ib_sa_mcmember_rec rec
;
308 switch (id_priv
->id
.ps
) {
310 id_priv
->qkey
= RDMA_UDP_QKEY
;
313 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
314 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
315 id_priv
->id
.port_num
, &rec
.mgid
,
318 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
326 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
328 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
329 struct cma_device
*cma_dev
;
333 switch (rdma_node_get_transport(dev_addr
->dev_type
)) {
334 case RDMA_TRANSPORT_IB
:
335 ib_addr_get_sgid(dev_addr
, &gid
);
337 case RDMA_TRANSPORT_IWARP
:
338 iw_addr_get_sgid(dev_addr
, &gid
);
344 list_for_each_entry(cma_dev
, &dev_list
, list
) {
345 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
346 &id_priv
->id
.port_num
, NULL
);
348 cma_attach_to_dev(id_priv
, cma_dev
);
355 static void cma_deref_id(struct rdma_id_private
*id_priv
)
357 if (atomic_dec_and_test(&id_priv
->refcount
))
358 complete(&id_priv
->comp
);
361 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
362 enum cma_state state
)
364 mutex_lock(&id_priv
->handler_mutex
);
365 if (id_priv
->state
!= state
) {
366 mutex_unlock(&id_priv
->handler_mutex
);
372 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
374 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
377 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
378 void *context
, enum rdma_port_space ps
)
380 struct rdma_id_private
*id_priv
;
382 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
384 return ERR_PTR(-ENOMEM
);
386 id_priv
->state
= CMA_IDLE
;
387 id_priv
->id
.context
= context
;
388 id_priv
->id
.event_handler
= event_handler
;
390 spin_lock_init(&id_priv
->lock
);
391 mutex_init(&id_priv
->qp_mutex
);
392 init_completion(&id_priv
->comp
);
393 atomic_set(&id_priv
->refcount
, 1);
394 mutex_init(&id_priv
->handler_mutex
);
395 INIT_LIST_HEAD(&id_priv
->listen_list
);
396 INIT_LIST_HEAD(&id_priv
->mc_list
);
397 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
401 EXPORT_SYMBOL(rdma_create_id
);
403 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
405 struct ib_qp_attr qp_attr
;
406 int qp_attr_mask
, ret
;
408 qp_attr
.qp_state
= IB_QPS_INIT
;
409 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
413 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
417 qp_attr
.qp_state
= IB_QPS_RTR
;
418 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
422 qp_attr
.qp_state
= IB_QPS_RTS
;
424 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
429 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
431 struct ib_qp_attr qp_attr
;
432 int qp_attr_mask
, ret
;
434 qp_attr
.qp_state
= IB_QPS_INIT
;
435 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
439 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
442 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
443 struct ib_qp_init_attr
*qp_init_attr
)
445 struct rdma_id_private
*id_priv
;
449 id_priv
= container_of(id
, struct rdma_id_private
, id
);
450 if (id
->device
!= pd
->device
)
453 qp
= ib_create_qp(pd
, qp_init_attr
);
457 if (cma_is_ud_ps(id_priv
->id
.ps
))
458 ret
= cma_init_ud_qp(id_priv
, qp
);
460 ret
= cma_init_conn_qp(id_priv
, qp
);
465 id_priv
->qp_num
= qp
->qp_num
;
466 id_priv
->srq
= (qp
->srq
!= NULL
);
472 EXPORT_SYMBOL(rdma_create_qp
);
474 void rdma_destroy_qp(struct rdma_cm_id
*id
)
476 struct rdma_id_private
*id_priv
;
478 id_priv
= container_of(id
, struct rdma_id_private
, id
);
479 mutex_lock(&id_priv
->qp_mutex
);
480 ib_destroy_qp(id_priv
->id
.qp
);
481 id_priv
->id
.qp
= NULL
;
482 mutex_unlock(&id_priv
->qp_mutex
);
484 EXPORT_SYMBOL(rdma_destroy_qp
);
486 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
487 struct rdma_conn_param
*conn_param
)
489 struct ib_qp_attr qp_attr
;
490 int qp_attr_mask
, ret
;
492 mutex_lock(&id_priv
->qp_mutex
);
493 if (!id_priv
->id
.qp
) {
498 /* Need to update QP attributes from default values. */
499 qp_attr
.qp_state
= IB_QPS_INIT
;
500 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
504 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
508 qp_attr
.qp_state
= IB_QPS_RTR
;
509 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
514 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
515 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
517 mutex_unlock(&id_priv
->qp_mutex
);
521 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
522 struct rdma_conn_param
*conn_param
)
524 struct ib_qp_attr qp_attr
;
525 int qp_attr_mask
, ret
;
527 mutex_lock(&id_priv
->qp_mutex
);
528 if (!id_priv
->id
.qp
) {
533 qp_attr
.qp_state
= IB_QPS_RTS
;
534 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
539 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
540 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
542 mutex_unlock(&id_priv
->qp_mutex
);
546 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
548 struct ib_qp_attr qp_attr
;
551 mutex_lock(&id_priv
->qp_mutex
);
552 if (!id_priv
->id
.qp
) {
557 qp_attr
.qp_state
= IB_QPS_ERR
;
558 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
560 mutex_unlock(&id_priv
->qp_mutex
);
564 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
565 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
567 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
570 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
571 ib_addr_get_pkey(dev_addr
),
572 &qp_attr
->pkey_index
);
576 qp_attr
->port_num
= id_priv
->id
.port_num
;
577 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
579 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
580 ret
= cma_set_qkey(id_priv
);
584 qp_attr
->qkey
= id_priv
->qkey
;
585 *qp_attr_mask
|= IB_QP_QKEY
;
587 qp_attr
->qp_access_flags
= 0;
588 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
593 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
596 struct rdma_id_private
*id_priv
;
599 id_priv
= container_of(id
, struct rdma_id_private
, id
);
600 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
601 case RDMA_TRANSPORT_IB
:
602 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
603 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
605 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
607 if (qp_attr
->qp_state
== IB_QPS_RTR
)
608 qp_attr
->rq_psn
= id_priv
->seq_num
;
610 case RDMA_TRANSPORT_IWARP
:
611 if (!id_priv
->cm_id
.iw
) {
612 qp_attr
->qp_access_flags
= 0;
613 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
615 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
625 EXPORT_SYMBOL(rdma_init_qp_attr
);
627 static inline int cma_zero_addr(struct sockaddr
*addr
)
629 struct in6_addr
*ip6
;
631 if (addr
->sa_family
== AF_INET
)
632 return ipv4_is_zeronet(
633 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
635 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
636 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
637 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
641 static inline int cma_loopback_addr(struct sockaddr
*addr
)
643 if (addr
->sa_family
== AF_INET
)
644 return ipv4_is_loopback(
645 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
647 return ipv6_addr_loopback(
648 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
651 static inline int cma_any_addr(struct sockaddr
*addr
)
653 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
656 static inline __be16
cma_port(struct sockaddr
*addr
)
658 if (addr
->sa_family
== AF_INET
)
659 return ((struct sockaddr_in
*) addr
)->sin_port
;
661 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
664 static inline int cma_any_port(struct sockaddr
*addr
)
666 return !cma_port(addr
);
669 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
670 u8
*ip_ver
, __be16
*port
,
671 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
675 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
679 *ip_ver
= sdp_get_ip_ver(hdr
);
680 *port
= ((struct sdp_hh
*) hdr
)->port
;
681 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
682 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
685 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
688 *ip_ver
= cma_get_ip_ver(hdr
);
689 *port
= ((struct cma_hdr
*) hdr
)->port
;
690 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
691 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
695 if (*ip_ver
!= 4 && *ip_ver
!= 6)
700 static void cma_save_net_info(struct rdma_addr
*addr
,
701 struct rdma_addr
*listen_addr
,
702 u8 ip_ver
, __be16 port
,
703 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
705 struct sockaddr_in
*listen4
, *ip4
;
706 struct sockaddr_in6
*listen6
, *ip6
;
710 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
711 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
712 ip4
->sin_family
= listen4
->sin_family
;
713 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
714 ip4
->sin_port
= listen4
->sin_port
;
716 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
717 ip4
->sin_family
= listen4
->sin_family
;
718 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
719 ip4
->sin_port
= port
;
722 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
723 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
724 ip6
->sin6_family
= listen6
->sin6_family
;
725 ip6
->sin6_addr
= dst
->ip6
;
726 ip6
->sin6_port
= listen6
->sin6_port
;
728 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
729 ip6
->sin6_family
= listen6
->sin6_family
;
730 ip6
->sin6_addr
= src
->ip6
;
731 ip6
->sin6_port
= port
;
738 static inline int cma_user_data_offset(enum rdma_port_space ps
)
744 return sizeof(struct cma_hdr
);
748 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
750 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
751 case RDMA_TRANSPORT_IB
:
753 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
760 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
762 struct rdma_id_private
*dev_id_priv
;
765 * Remove from listen_any_list to prevent added devices from spawning
766 * additional listen requests.
769 list_del(&id_priv
->list
);
771 while (!list_empty(&id_priv
->listen_list
)) {
772 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
773 struct rdma_id_private
, listen_list
);
774 /* sync with device removal to avoid duplicate destruction */
775 list_del_init(&dev_id_priv
->list
);
776 list_del(&dev_id_priv
->listen_list
);
779 rdma_destroy_id(&dev_id_priv
->id
);
785 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
786 enum cma_state state
)
790 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
792 case CMA_ROUTE_QUERY
:
793 cma_cancel_route(id_priv
);
796 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
797 && !id_priv
->cma_dev
)
798 cma_cancel_listens(id_priv
);
805 static void cma_release_port(struct rdma_id_private
*id_priv
)
807 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
813 hlist_del(&id_priv
->node
);
814 if (hlist_empty(&bind_list
->owners
)) {
815 idr_remove(bind_list
->ps
, bind_list
->port
);
821 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
823 struct cma_multicast
*mc
;
825 while (!list_empty(&id_priv
->mc_list
)) {
826 mc
= container_of(id_priv
->mc_list
.next
,
827 struct cma_multicast
, list
);
829 ib_sa_free_multicast(mc
->multicast
.ib
);
834 void rdma_destroy_id(struct rdma_cm_id
*id
)
836 struct rdma_id_private
*id_priv
;
837 enum cma_state state
;
839 id_priv
= container_of(id
, struct rdma_id_private
, id
);
840 state
= cma_exch(id_priv
, CMA_DESTROYING
);
841 cma_cancel_operation(id_priv
, state
);
844 if (id_priv
->cma_dev
) {
846 switch (rdma_node_get_transport(id
->device
->node_type
)) {
847 case RDMA_TRANSPORT_IB
:
848 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
849 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
851 case RDMA_TRANSPORT_IWARP
:
852 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
853 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
858 cma_leave_mc_groups(id_priv
);
860 cma_detach_from_dev(id_priv
);
864 cma_release_port(id_priv
);
865 cma_deref_id(id_priv
);
866 wait_for_completion(&id_priv
->comp
);
868 if (id_priv
->internal_id
)
869 cma_deref_id(id_priv
->id
.context
);
871 kfree(id_priv
->id
.route
.path_rec
);
874 EXPORT_SYMBOL(rdma_destroy_id
);
876 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
880 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
884 ret
= cma_modify_qp_rts(id_priv
, NULL
);
888 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
894 cma_modify_qp_err(id_priv
);
895 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
900 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
902 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
903 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
910 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
911 struct ib_cm_rep_event_param
*rep_data
,
914 event
->param
.conn
.private_data
= private_data
;
915 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
916 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
917 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
918 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
919 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
920 event
->param
.conn
.srq
= rep_data
->srq
;
921 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
924 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
926 struct rdma_id_private
*id_priv
= cm_id
->context
;
927 struct rdma_cm_event event
;
930 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
931 cma_disable_callback(id_priv
, CMA_CONNECT
)) ||
932 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
933 cma_disable_callback(id_priv
, CMA_DISCONNECT
)))
936 memset(&event
, 0, sizeof event
);
937 switch (ib_event
->event
) {
938 case IB_CM_REQ_ERROR
:
939 case IB_CM_REP_ERROR
:
940 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
941 event
.status
= -ETIMEDOUT
;
943 case IB_CM_REP_RECEIVED
:
944 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
946 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
947 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
948 event
.status
= cma_rep_recv(id_priv
);
949 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
950 RDMA_CM_EVENT_ESTABLISHED
;
952 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
953 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
954 ib_event
->private_data
);
956 case IB_CM_RTU_RECEIVED
:
957 case IB_CM_USER_ESTABLISHED
:
958 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
960 case IB_CM_DREQ_ERROR
:
961 event
.status
= -ETIMEDOUT
; /* fall through */
962 case IB_CM_DREQ_RECEIVED
:
963 case IB_CM_DREP_RECEIVED
:
964 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
966 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
968 case IB_CM_TIMEWAIT_EXIT
:
969 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
971 case IB_CM_MRA_RECEIVED
:
974 case IB_CM_REJ_RECEIVED
:
975 cma_modify_qp_err(id_priv
);
976 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
977 event
.event
= RDMA_CM_EVENT_REJECTED
;
978 event
.param
.conn
.private_data
= ib_event
->private_data
;
979 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
982 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
987 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
989 /* Destroy the CM ID by returning a non-zero value. */
990 id_priv
->cm_id
.ib
= NULL
;
991 cma_exch(id_priv
, CMA_DESTROYING
);
992 mutex_unlock(&id_priv
->handler_mutex
);
993 rdma_destroy_id(&id_priv
->id
);
997 mutex_unlock(&id_priv
->handler_mutex
);
1001 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1002 struct ib_cm_event
*ib_event
)
1004 struct rdma_id_private
*id_priv
;
1005 struct rdma_cm_id
*id
;
1006 struct rdma_route
*rt
;
1007 union cma_ip_addr
*src
, *dst
;
1012 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1013 &ip_ver
, &port
, &src
, &dst
))
1016 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1021 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1022 ip_ver
, port
, src
, dst
);
1025 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1026 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1031 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1032 if (rt
->num_paths
== 2)
1033 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1035 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1036 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1037 &id
->route
.addr
.dev_addr
);
1041 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1042 id_priv
->state
= CMA_CONNECT
;
1046 rdma_destroy_id(id
);
1051 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1052 struct ib_cm_event
*ib_event
)
1054 struct rdma_id_private
*id_priv
;
1055 struct rdma_cm_id
*id
;
1056 union cma_ip_addr
*src
, *dst
;
1061 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1067 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1068 &ip_ver
, &port
, &src
, &dst
))
1071 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1072 ip_ver
, port
, src
, dst
);
1074 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1075 &id
->route
.addr
.dev_addr
);
1079 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1080 id_priv
->state
= CMA_CONNECT
;
1083 rdma_destroy_id(id
);
1087 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1088 struct ib_cm_req_event_param
*req_data
,
1089 void *private_data
, int offset
)
1091 event
->param
.conn
.private_data
= private_data
+ offset
;
1092 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1093 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1094 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1095 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1096 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1097 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1098 event
->param
.conn
.srq
= req_data
->srq
;
1099 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1102 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1104 struct rdma_id_private
*listen_id
, *conn_id
;
1105 struct rdma_cm_event event
;
1108 listen_id
= cm_id
->context
;
1109 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1110 return -ECONNABORTED
;
1112 memset(&event
, 0, sizeof event
);
1113 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1114 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1115 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1116 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1117 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1118 event
.param
.ud
.private_data_len
=
1119 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1121 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1122 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1123 ib_event
->private_data
, offset
);
1130 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1132 ret
= cma_acquire_dev(conn_id
);
1133 mutex_unlock(&lock
);
1135 goto release_conn_id
;
1137 conn_id
->cm_id
.ib
= cm_id
;
1138 cm_id
->context
= conn_id
;
1139 cm_id
->cm_handler
= cma_ib_handler
;
1141 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1144 * Acquire mutex to prevent user executing rdma_destroy_id()
1145 * while we're accessing the cm_id.
1148 if (cma_comp(conn_id
, CMA_CONNECT
) &&
1149 !cma_is_ud_ps(conn_id
->id
.ps
))
1150 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1151 mutex_unlock(&lock
);
1152 mutex_unlock(&conn_id
->handler_mutex
);
1156 /* Destroy the CM ID by returning a non-zero value. */
1157 conn_id
->cm_id
.ib
= NULL
;
1160 cma_exch(conn_id
, CMA_DESTROYING
);
1161 mutex_unlock(&conn_id
->handler_mutex
);
1162 rdma_destroy_id(&conn_id
->id
);
1165 mutex_unlock(&listen_id
->handler_mutex
);
1169 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1171 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1174 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1175 struct ib_cm_compare_data
*compare
)
1177 struct cma_hdr
*cma_data
, *cma_mask
;
1178 struct sdp_hh
*sdp_data
, *sdp_mask
;
1180 struct in6_addr ip6_addr
;
1182 memset(compare
, 0, sizeof *compare
);
1183 cma_data
= (void *) compare
->data
;
1184 cma_mask
= (void *) compare
->mask
;
1185 sdp_data
= (void *) compare
->data
;
1186 sdp_mask
= (void *) compare
->mask
;
1188 switch (addr
->sa_family
) {
1190 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1191 if (ps
== RDMA_PS_SDP
) {
1192 sdp_set_ip_ver(sdp_data
, 4);
1193 sdp_set_ip_ver(sdp_mask
, 0xF);
1194 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1195 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1197 cma_set_ip_ver(cma_data
, 4);
1198 cma_set_ip_ver(cma_mask
, 0xF);
1199 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1200 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1204 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1205 if (ps
== RDMA_PS_SDP
) {
1206 sdp_set_ip_ver(sdp_data
, 6);
1207 sdp_set_ip_ver(sdp_mask
, 0xF);
1208 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1209 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1210 sizeof sdp_mask
->dst_addr
.ip6
);
1212 cma_set_ip_ver(cma_data
, 6);
1213 cma_set_ip_ver(cma_mask
, 0xF);
1214 cma_data
->dst_addr
.ip6
= ip6_addr
;
1215 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1216 sizeof cma_mask
->dst_addr
.ip6
);
1224 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1226 struct rdma_id_private
*id_priv
= iw_id
->context
;
1227 struct rdma_cm_event event
;
1228 struct sockaddr_in
*sin
;
1231 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
1234 memset(&event
, 0, sizeof event
);
1235 switch (iw_event
->event
) {
1236 case IW_CM_EVENT_CLOSE
:
1237 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1239 case IW_CM_EVENT_CONNECT_REPLY
:
1240 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1241 *sin
= iw_event
->local_addr
;
1242 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1243 *sin
= iw_event
->remote_addr
;
1244 switch (iw_event
->status
) {
1246 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1250 event
.event
= RDMA_CM_EVENT_REJECTED
;
1253 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1256 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1260 case IW_CM_EVENT_ESTABLISHED
:
1261 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1267 event
.status
= iw_event
->status
;
1268 event
.param
.conn
.private_data
= iw_event
->private_data
;
1269 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1270 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1272 /* Destroy the CM ID by returning a non-zero value. */
1273 id_priv
->cm_id
.iw
= NULL
;
1274 cma_exch(id_priv
, CMA_DESTROYING
);
1275 mutex_unlock(&id_priv
->handler_mutex
);
1276 rdma_destroy_id(&id_priv
->id
);
1280 mutex_unlock(&id_priv
->handler_mutex
);
1284 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1285 struct iw_cm_event
*iw_event
)
1287 struct rdma_cm_id
*new_cm_id
;
1288 struct rdma_id_private
*listen_id
, *conn_id
;
1289 struct sockaddr_in
*sin
;
1290 struct net_device
*dev
= NULL
;
1291 struct rdma_cm_event event
;
1293 struct ib_device_attr attr
;
1295 listen_id
= cm_id
->context
;
1296 if (cma_disable_callback(listen_id
, CMA_LISTEN
))
1297 return -ECONNABORTED
;
1299 /* Create a new RDMA id for the new IW CM ID */
1300 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1301 listen_id
->id
.context
,
1303 if (IS_ERR(new_cm_id
)) {
1307 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1308 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1309 conn_id
->state
= CMA_CONNECT
;
1311 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1313 ret
= -EADDRNOTAVAIL
;
1314 mutex_unlock(&conn_id
->handler_mutex
);
1315 rdma_destroy_id(new_cm_id
);
1318 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1320 mutex_unlock(&conn_id
->handler_mutex
);
1321 rdma_destroy_id(new_cm_id
);
1326 ret
= cma_acquire_dev(conn_id
);
1327 mutex_unlock(&lock
);
1329 mutex_unlock(&conn_id
->handler_mutex
);
1330 rdma_destroy_id(new_cm_id
);
1334 conn_id
->cm_id
.iw
= cm_id
;
1335 cm_id
->context
= conn_id
;
1336 cm_id
->cm_handler
= cma_iw_handler
;
1338 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1339 *sin
= iw_event
->local_addr
;
1340 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1341 *sin
= iw_event
->remote_addr
;
1343 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1345 mutex_unlock(&conn_id
->handler_mutex
);
1346 rdma_destroy_id(new_cm_id
);
1350 memset(&event
, 0, sizeof event
);
1351 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1352 event
.param
.conn
.private_data
= iw_event
->private_data
;
1353 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1354 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1355 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1356 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1358 /* User wants to destroy the CM ID */
1359 conn_id
->cm_id
.iw
= NULL
;
1360 cma_exch(conn_id
, CMA_DESTROYING
);
1361 mutex_unlock(&conn_id
->handler_mutex
);
1362 rdma_destroy_id(&conn_id
->id
);
1366 mutex_unlock(&conn_id
->handler_mutex
);
1371 mutex_unlock(&listen_id
->handler_mutex
);
1375 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1377 struct ib_cm_compare_data compare_data
;
1378 struct sockaddr
*addr
;
1382 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1384 if (IS_ERR(id_priv
->cm_id
.ib
))
1385 return PTR_ERR(id_priv
->cm_id
.ib
);
1387 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1388 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1389 if (cma_any_addr(addr
))
1390 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1392 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1393 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1397 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1398 id_priv
->cm_id
.ib
= NULL
;
1404 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1407 struct sockaddr_in
*sin
;
1409 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1410 iw_conn_req_handler
,
1412 if (IS_ERR(id_priv
->cm_id
.iw
))
1413 return PTR_ERR(id_priv
->cm_id
.iw
);
1415 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1416 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1418 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1421 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1422 id_priv
->cm_id
.iw
= NULL
;
1428 static int cma_listen_handler(struct rdma_cm_id
*id
,
1429 struct rdma_cm_event
*event
)
1431 struct rdma_id_private
*id_priv
= id
->context
;
1433 id
->context
= id_priv
->id
.context
;
1434 id
->event_handler
= id_priv
->id
.event_handler
;
1435 return id_priv
->id
.event_handler(id
, event
);
1438 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1439 struct cma_device
*cma_dev
)
1441 struct rdma_id_private
*dev_id_priv
;
1442 struct rdma_cm_id
*id
;
1445 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1449 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1451 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1452 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1453 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1455 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1456 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1457 atomic_inc(&id_priv
->refcount
);
1458 dev_id_priv
->internal_id
= 1;
1460 ret
= rdma_listen(id
, id_priv
->backlog
);
1462 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1463 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1466 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1468 struct cma_device
*cma_dev
;
1471 list_add_tail(&id_priv
->list
, &listen_any_list
);
1472 list_for_each_entry(cma_dev
, &dev_list
, list
)
1473 cma_listen_on_dev(id_priv
, cma_dev
);
1474 mutex_unlock(&lock
);
1477 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1479 struct sockaddr_storage addr_in
;
1481 memset(&addr_in
, 0, sizeof addr_in
);
1482 addr_in
.ss_family
= af
;
1483 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1486 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1488 struct rdma_id_private
*id_priv
;
1491 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1492 if (id_priv
->state
== CMA_IDLE
) {
1493 ret
= cma_bind_any(id
, AF_INET
);
1498 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1501 id_priv
->backlog
= backlog
;
1503 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1504 case RDMA_TRANSPORT_IB
:
1505 ret
= cma_ib_listen(id_priv
);
1509 case RDMA_TRANSPORT_IWARP
:
1510 ret
= cma_iw_listen(id_priv
, backlog
);
1519 cma_listen_on_all(id_priv
);
1523 id_priv
->backlog
= 0;
1524 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1527 EXPORT_SYMBOL(rdma_listen
);
1529 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1531 struct rdma_id_private
*id_priv
;
1533 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1534 id_priv
->tos
= (u8
) tos
;
1536 EXPORT_SYMBOL(rdma_set_service_type
);
1538 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1541 struct cma_work
*work
= context
;
1542 struct rdma_route
*route
;
1544 route
= &work
->id
->id
.route
;
1547 route
->num_paths
= 1;
1548 *route
->path_rec
= *path_rec
;
1550 work
->old_state
= CMA_ROUTE_QUERY
;
1551 work
->new_state
= CMA_ADDR_RESOLVED
;
1552 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1553 work
->event
.status
= status
;
1556 queue_work(cma_wq
, &work
->work
);
1559 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1560 struct cma_work
*work
)
1562 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1563 struct ib_sa_path_rec path_rec
;
1564 ib_sa_comp_mask comp_mask
;
1565 struct sockaddr_in6
*sin6
;
1567 memset(&path_rec
, 0, sizeof path_rec
);
1568 ib_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1569 ib_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1570 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1571 path_rec
.numb_path
= 1;
1572 path_rec
.reversible
= 1;
1573 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1574 (struct sockaddr
*) &addr
->dst_addr
);
1576 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1577 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1578 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1580 if (addr
->src_addr
.ss_family
== AF_INET
) {
1581 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1582 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1584 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1585 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1586 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1589 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1590 id_priv
->id
.port_num
, &path_rec
,
1591 comp_mask
, timeout_ms
,
1592 GFP_KERNEL
, cma_query_handler
,
1593 work
, &id_priv
->query
);
1595 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1598 static void cma_work_handler(struct work_struct
*_work
)
1600 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1601 struct rdma_id_private
*id_priv
= work
->id
;
1604 mutex_lock(&id_priv
->handler_mutex
);
1605 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1608 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1609 cma_exch(id_priv
, CMA_DESTROYING
);
1613 mutex_unlock(&id_priv
->handler_mutex
);
1614 cma_deref_id(id_priv
);
1616 rdma_destroy_id(&id_priv
->id
);
1620 static void cma_ndev_work_handler(struct work_struct
*_work
)
1622 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1623 struct rdma_id_private
*id_priv
= work
->id
;
1626 mutex_lock(&id_priv
->handler_mutex
);
1627 if (id_priv
->state
== CMA_DESTROYING
||
1628 id_priv
->state
== CMA_DEVICE_REMOVAL
)
1631 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1632 cma_exch(id_priv
, CMA_DESTROYING
);
1637 mutex_unlock(&id_priv
->handler_mutex
);
1638 cma_deref_id(id_priv
);
1640 rdma_destroy_id(&id_priv
->id
);
1644 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1646 struct rdma_route
*route
= &id_priv
->id
.route
;
1647 struct cma_work
*work
;
1650 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1655 INIT_WORK(&work
->work
, cma_work_handler
);
1656 work
->old_state
= CMA_ROUTE_QUERY
;
1657 work
->new_state
= CMA_ROUTE_RESOLVED
;
1658 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1660 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1661 if (!route
->path_rec
) {
1666 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1672 kfree(route
->path_rec
);
1673 route
->path_rec
= NULL
;
1679 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1680 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1682 struct rdma_id_private
*id_priv
;
1685 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1686 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1689 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1690 if (!id
->route
.path_rec
) {
1695 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1698 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1701 EXPORT_SYMBOL(rdma_set_ib_paths
);
1703 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1705 struct cma_work
*work
;
1707 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1712 INIT_WORK(&work
->work
, cma_work_handler
);
1713 work
->old_state
= CMA_ROUTE_QUERY
;
1714 work
->new_state
= CMA_ROUTE_RESOLVED
;
1715 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1716 queue_work(cma_wq
, &work
->work
);
1720 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1722 struct rdma_id_private
*id_priv
;
1725 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1726 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1729 atomic_inc(&id_priv
->refcount
);
1730 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1731 case RDMA_TRANSPORT_IB
:
1732 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1734 case RDMA_TRANSPORT_IWARP
:
1735 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1746 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1747 cma_deref_id(id_priv
);
1750 EXPORT_SYMBOL(rdma_resolve_route
);
1752 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1754 struct cma_device
*cma_dev
;
1755 struct ib_port_attr port_attr
;
1762 if (list_empty(&dev_list
)) {
1766 list_for_each_entry(cma_dev
, &dev_list
, list
)
1767 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1768 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1769 port_attr
.state
== IB_PORT_ACTIVE
)
1773 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1776 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1780 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1784 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1785 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1786 id_priv
->id
.port_num
= p
;
1787 cma_attach_to_dev(id_priv
, cma_dev
);
1789 mutex_unlock(&lock
);
1793 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1794 struct rdma_dev_addr
*dev_addr
, void *context
)
1796 struct rdma_id_private
*id_priv
= context
;
1797 struct rdma_cm_event event
;
1799 memset(&event
, 0, sizeof event
);
1800 mutex_lock(&id_priv
->handler_mutex
);
1803 * Grab mutex to block rdma_destroy_id() from removing the device while
1804 * we're trying to acquire it.
1807 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1808 mutex_unlock(&lock
);
1812 if (!status
&& !id_priv
->cma_dev
)
1813 status
= cma_acquire_dev(id_priv
);
1814 mutex_unlock(&lock
);
1817 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1819 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1820 event
.status
= status
;
1822 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1823 ip_addr_size(src_addr
));
1824 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1827 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1828 cma_exch(id_priv
, CMA_DESTROYING
);
1829 mutex_unlock(&id_priv
->handler_mutex
);
1830 cma_deref_id(id_priv
);
1831 rdma_destroy_id(&id_priv
->id
);
1835 mutex_unlock(&id_priv
->handler_mutex
);
1836 cma_deref_id(id_priv
);
1839 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1841 struct cma_work
*work
;
1842 struct sockaddr_in
*src_in
, *dst_in
;
1846 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1850 if (!id_priv
->cma_dev
) {
1851 ret
= cma_bind_loopback(id_priv
);
1856 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1857 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1859 if (cma_zero_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)) {
1860 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1861 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1862 src_in
->sin_family
= dst_in
->sin_family
;
1863 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1867 INIT_WORK(&work
->work
, cma_work_handler
);
1868 work
->old_state
= CMA_ADDR_QUERY
;
1869 work
->new_state
= CMA_ADDR_RESOLVED
;
1870 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1871 queue_work(cma_wq
, &work
->work
);
1878 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1879 struct sockaddr
*dst_addr
)
1881 if (src_addr
&& src_addr
->sa_family
)
1882 return rdma_bind_addr(id
, src_addr
);
1884 return cma_bind_any(id
, dst_addr
->sa_family
);
1887 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1888 struct sockaddr
*dst_addr
, int timeout_ms
)
1890 struct rdma_id_private
*id_priv
;
1893 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1894 if (id_priv
->state
== CMA_IDLE
) {
1895 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1900 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1903 atomic_inc(&id_priv
->refcount
);
1904 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1905 if (cma_any_addr(dst_addr
))
1906 ret
= cma_resolve_loopback(id_priv
);
1908 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
1909 dst_addr
, &id
->route
.addr
.dev_addr
,
1910 timeout_ms
, addr_handler
, id_priv
);
1916 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1917 cma_deref_id(id_priv
);
1920 EXPORT_SYMBOL(rdma_resolve_addr
);
1922 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1923 struct rdma_id_private
*id_priv
)
1925 struct sockaddr_in
*sin
;
1927 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1928 sin
->sin_port
= htons(bind_list
->port
);
1929 id_priv
->bind_list
= bind_list
;
1930 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1933 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1934 unsigned short snum
)
1936 struct rdma_bind_list
*bind_list
;
1939 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1944 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1945 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1951 ret
= -EADDRNOTAVAIL
;
1956 bind_list
->port
= (unsigned short) port
;
1957 cma_bind_port(bind_list
, id_priv
);
1960 idr_remove(ps
, port
);
1966 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1968 struct rdma_bind_list
*bind_list
;
1969 int port
, ret
, low
, high
;
1971 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1976 /* FIXME: add proper port randomization per like inet_csk_get_port */
1978 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1979 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1984 inet_get_local_port_range(&low
, &high
);
1986 if (next_port
!= low
) {
1987 idr_remove(ps
, port
);
1991 ret
= -EADDRNOTAVAIL
;
1998 next_port
= port
+ 1;
2001 bind_list
->port
= (unsigned short) port
;
2002 cma_bind_port(bind_list
, id_priv
);
2005 idr_remove(ps
, port
);
2011 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2013 struct rdma_id_private
*cur_id
;
2014 struct sockaddr_in
*sin
, *cur_sin
;
2015 struct rdma_bind_list
*bind_list
;
2016 struct hlist_node
*node
;
2017 unsigned short snum
;
2019 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2020 snum
= ntohs(sin
->sin_port
);
2021 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2024 bind_list
= idr_find(ps
, snum
);
2026 return cma_alloc_port(ps
, id_priv
, snum
);
2029 * We don't support binding to any address if anyone is bound to
2030 * a specific address on the same port.
2032 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2033 return -EADDRNOTAVAIL
;
2035 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2036 if (cma_any_addr((struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
))
2037 return -EADDRNOTAVAIL
;
2039 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
2040 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
2044 cma_bind_port(bind_list
, id_priv
);
2048 static int cma_get_port(struct rdma_id_private
*id_priv
)
2053 switch (id_priv
->id
.ps
) {
2067 return -EPROTONOSUPPORT
;
2071 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2072 ret
= cma_alloc_any_port(ps
, id_priv
);
2074 ret
= cma_use_port(ps
, id_priv
);
2075 mutex_unlock(&lock
);
2080 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2082 struct rdma_id_private
*id_priv
;
2085 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2086 return -EAFNOSUPPORT
;
2088 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2089 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2092 if (!cma_any_addr(addr
)) {
2093 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2098 ret
= cma_acquire_dev(id_priv
);
2099 mutex_unlock(&lock
);
2104 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2105 ret
= cma_get_port(id_priv
);
2111 if (!cma_any_addr(addr
)) {
2113 cma_detach_from_dev(id_priv
);
2114 mutex_unlock(&lock
);
2117 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2120 EXPORT_SYMBOL(rdma_bind_addr
);
2122 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2123 struct rdma_route
*route
)
2125 struct cma_hdr
*cma_hdr
;
2126 struct sdp_hh
*sdp_hdr
;
2128 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2129 struct sockaddr_in
*src4
, *dst4
;
2131 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2132 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2137 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2139 sdp_set_ip_ver(sdp_hdr
, 4);
2140 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2141 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2142 sdp_hdr
->port
= src4
->sin_port
;
2146 cma_hdr
->cma_version
= CMA_VERSION
;
2147 cma_set_ip_ver(cma_hdr
, 4);
2148 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2149 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2150 cma_hdr
->port
= src4
->sin_port
;
2154 struct sockaddr_in6
*src6
, *dst6
;
2156 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2157 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2162 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2164 sdp_set_ip_ver(sdp_hdr
, 6);
2165 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2166 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2167 sdp_hdr
->port
= src6
->sin6_port
;
2171 cma_hdr
->cma_version
= CMA_VERSION
;
2172 cma_set_ip_ver(cma_hdr
, 6);
2173 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2174 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2175 cma_hdr
->port
= src6
->sin6_port
;
2182 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2183 struct ib_cm_event
*ib_event
)
2185 struct rdma_id_private
*id_priv
= cm_id
->context
;
2186 struct rdma_cm_event event
;
2187 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2190 if (cma_disable_callback(id_priv
, CMA_CONNECT
))
2193 memset(&event
, 0, sizeof event
);
2194 switch (ib_event
->event
) {
2195 case IB_CM_SIDR_REQ_ERROR
:
2196 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2197 event
.status
= -ETIMEDOUT
;
2199 case IB_CM_SIDR_REP_RECEIVED
:
2200 event
.param
.ud
.private_data
= ib_event
->private_data
;
2201 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2202 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2203 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2204 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2207 ret
= cma_set_qkey(id_priv
);
2209 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2210 event
.status
= -EINVAL
;
2213 if (id_priv
->qkey
!= rep
->qkey
) {
2214 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2215 event
.status
= -EINVAL
;
2218 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2219 id_priv
->id
.route
.path_rec
,
2220 &event
.param
.ud
.ah_attr
);
2221 event
.param
.ud
.qp_num
= rep
->qpn
;
2222 event
.param
.ud
.qkey
= rep
->qkey
;
2223 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2227 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2232 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2234 /* Destroy the CM ID by returning a non-zero value. */
2235 id_priv
->cm_id
.ib
= NULL
;
2236 cma_exch(id_priv
, CMA_DESTROYING
);
2237 mutex_unlock(&id_priv
->handler_mutex
);
2238 rdma_destroy_id(&id_priv
->id
);
2242 mutex_unlock(&id_priv
->handler_mutex
);
2246 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2247 struct rdma_conn_param
*conn_param
)
2249 struct ib_cm_sidr_req_param req
;
2250 struct rdma_route
*route
;
2253 req
.private_data_len
= sizeof(struct cma_hdr
) +
2254 conn_param
->private_data_len
;
2255 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2256 if (!req
.private_data
)
2259 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2260 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2261 conn_param
->private_data
, conn_param
->private_data_len
);
2263 route
= &id_priv
->id
.route
;
2264 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2268 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2269 cma_sidr_rep_handler
, id_priv
);
2270 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2271 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2275 req
.path
= route
->path_rec
;
2276 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2277 (struct sockaddr
*) &route
->addr
.dst_addr
);
2278 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2279 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2281 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2283 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2284 id_priv
->cm_id
.ib
= NULL
;
2287 kfree(req
.private_data
);
2291 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2292 struct rdma_conn_param
*conn_param
)
2294 struct ib_cm_req_param req
;
2295 struct rdma_route
*route
;
2299 memset(&req
, 0, sizeof req
);
2300 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2301 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2302 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2306 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2307 memcpy(private_data
+ offset
, conn_param
->private_data
,
2308 conn_param
->private_data_len
);
2310 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2312 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2313 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2317 route
= &id_priv
->id
.route
;
2318 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2321 req
.private_data
= private_data
;
2323 req
.primary_path
= &route
->path_rec
[0];
2324 if (route
->num_paths
== 2)
2325 req
.alternate_path
= &route
->path_rec
[1];
2327 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2328 (struct sockaddr
*) &route
->addr
.dst_addr
);
2329 req
.qp_num
= id_priv
->qp_num
;
2330 req
.qp_type
= IB_QPT_RC
;
2331 req
.starting_psn
= id_priv
->seq_num
;
2332 req
.responder_resources
= conn_param
->responder_resources
;
2333 req
.initiator_depth
= conn_param
->initiator_depth
;
2334 req
.flow_control
= conn_param
->flow_control
;
2335 req
.retry_count
= conn_param
->retry_count
;
2336 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2337 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2338 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2339 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2340 req
.srq
= id_priv
->srq
? 1 : 0;
2342 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2344 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2345 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2346 id_priv
->cm_id
.ib
= NULL
;
2349 kfree(private_data
);
2353 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2354 struct rdma_conn_param
*conn_param
)
2356 struct iw_cm_id
*cm_id
;
2357 struct sockaddr_in
* sin
;
2359 struct iw_cm_conn_param iw_param
;
2361 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2362 if (IS_ERR(cm_id
)) {
2363 ret
= PTR_ERR(cm_id
);
2367 id_priv
->cm_id
.iw
= cm_id
;
2369 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2370 cm_id
->local_addr
= *sin
;
2372 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2373 cm_id
->remote_addr
= *sin
;
2375 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2379 iw_param
.ord
= conn_param
->initiator_depth
;
2380 iw_param
.ird
= conn_param
->responder_resources
;
2381 iw_param
.private_data
= conn_param
->private_data
;
2382 iw_param
.private_data_len
= conn_param
->private_data_len
;
2384 iw_param
.qpn
= id_priv
->qp_num
;
2386 iw_param
.qpn
= conn_param
->qp_num
;
2387 ret
= iw_cm_connect(cm_id
, &iw_param
);
2389 if (ret
&& !IS_ERR(cm_id
)) {
2390 iw_destroy_cm_id(cm_id
);
2391 id_priv
->cm_id
.iw
= NULL
;
2396 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2398 struct rdma_id_private
*id_priv
;
2401 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2402 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2406 id_priv
->qp_num
= conn_param
->qp_num
;
2407 id_priv
->srq
= conn_param
->srq
;
2410 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2411 case RDMA_TRANSPORT_IB
:
2412 if (cma_is_ud_ps(id
->ps
))
2413 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2415 ret
= cma_connect_ib(id_priv
, conn_param
);
2417 case RDMA_TRANSPORT_IWARP
:
2418 ret
= cma_connect_iw(id_priv
, conn_param
);
2429 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2432 EXPORT_SYMBOL(rdma_connect
);
2434 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2435 struct rdma_conn_param
*conn_param
)
2437 struct ib_cm_rep_param rep
;
2440 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2444 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2448 memset(&rep
, 0, sizeof rep
);
2449 rep
.qp_num
= id_priv
->qp_num
;
2450 rep
.starting_psn
= id_priv
->seq_num
;
2451 rep
.private_data
= conn_param
->private_data
;
2452 rep
.private_data_len
= conn_param
->private_data_len
;
2453 rep
.responder_resources
= conn_param
->responder_resources
;
2454 rep
.initiator_depth
= conn_param
->initiator_depth
;
2455 rep
.failover_accepted
= 0;
2456 rep
.flow_control
= conn_param
->flow_control
;
2457 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2458 rep
.srq
= id_priv
->srq
? 1 : 0;
2460 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2465 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2466 struct rdma_conn_param
*conn_param
)
2468 struct iw_cm_conn_param iw_param
;
2471 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2475 iw_param
.ord
= conn_param
->initiator_depth
;
2476 iw_param
.ird
= conn_param
->responder_resources
;
2477 iw_param
.private_data
= conn_param
->private_data
;
2478 iw_param
.private_data_len
= conn_param
->private_data_len
;
2479 if (id_priv
->id
.qp
) {
2480 iw_param
.qpn
= id_priv
->qp_num
;
2482 iw_param
.qpn
= conn_param
->qp_num
;
2484 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2487 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2488 enum ib_cm_sidr_status status
,
2489 const void *private_data
, int private_data_len
)
2491 struct ib_cm_sidr_rep_param rep
;
2494 memset(&rep
, 0, sizeof rep
);
2495 rep
.status
= status
;
2496 if (status
== IB_SIDR_SUCCESS
) {
2497 ret
= cma_set_qkey(id_priv
);
2500 rep
.qp_num
= id_priv
->qp_num
;
2501 rep
.qkey
= id_priv
->qkey
;
2503 rep
.private_data
= private_data
;
2504 rep
.private_data_len
= private_data_len
;
2506 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2509 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2511 struct rdma_id_private
*id_priv
;
2514 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2515 if (!cma_comp(id_priv
, CMA_CONNECT
))
2518 if (!id
->qp
&& conn_param
) {
2519 id_priv
->qp_num
= conn_param
->qp_num
;
2520 id_priv
->srq
= conn_param
->srq
;
2523 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2524 case RDMA_TRANSPORT_IB
:
2525 if (cma_is_ud_ps(id
->ps
))
2526 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2527 conn_param
->private_data
,
2528 conn_param
->private_data_len
);
2529 else if (conn_param
)
2530 ret
= cma_accept_ib(id_priv
, conn_param
);
2532 ret
= cma_rep_recv(id_priv
);
2534 case RDMA_TRANSPORT_IWARP
:
2535 ret
= cma_accept_iw(id_priv
, conn_param
);
2547 cma_modify_qp_err(id_priv
);
2548 rdma_reject(id
, NULL
, 0);
2551 EXPORT_SYMBOL(rdma_accept
);
2553 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2555 struct rdma_id_private
*id_priv
;
2558 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2559 if (!cma_has_cm_dev(id_priv
))
2562 switch (id
->device
->node_type
) {
2563 case RDMA_NODE_IB_CA
:
2564 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2572 EXPORT_SYMBOL(rdma_notify
);
2574 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2575 u8 private_data_len
)
2577 struct rdma_id_private
*id_priv
;
2580 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2581 if (!cma_has_cm_dev(id_priv
))
2584 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2585 case RDMA_TRANSPORT_IB
:
2586 if (cma_is_ud_ps(id
->ps
))
2587 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2588 private_data
, private_data_len
);
2590 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2591 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2592 0, private_data
, private_data_len
);
2594 case RDMA_TRANSPORT_IWARP
:
2595 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2596 private_data
, private_data_len
);
2604 EXPORT_SYMBOL(rdma_reject
);
2606 int rdma_disconnect(struct rdma_cm_id
*id
)
2608 struct rdma_id_private
*id_priv
;
2611 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2612 if (!cma_has_cm_dev(id_priv
))
2615 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2616 case RDMA_TRANSPORT_IB
:
2617 ret
= cma_modify_qp_err(id_priv
);
2620 /* Initiate or respond to a disconnect. */
2621 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2622 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2624 case RDMA_TRANSPORT_IWARP
:
2625 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2634 EXPORT_SYMBOL(rdma_disconnect
);
2636 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2638 struct rdma_id_private
*id_priv
;
2639 struct cma_multicast
*mc
= multicast
->context
;
2640 struct rdma_cm_event event
;
2643 id_priv
= mc
->id_priv
;
2644 if (cma_disable_callback(id_priv
, CMA_ADDR_BOUND
) &&
2645 cma_disable_callback(id_priv
, CMA_ADDR_RESOLVED
))
2648 mutex_lock(&id_priv
->qp_mutex
);
2649 if (!status
&& id_priv
->id
.qp
)
2650 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2651 multicast
->rec
.mlid
);
2652 mutex_unlock(&id_priv
->qp_mutex
);
2654 memset(&event
, 0, sizeof event
);
2655 event
.status
= status
;
2656 event
.param
.ud
.private_data
= mc
->context
;
2658 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2659 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2660 id_priv
->id
.port_num
, &multicast
->rec
,
2661 &event
.param
.ud
.ah_attr
);
2662 event
.param
.ud
.qp_num
= 0xFFFFFF;
2663 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2665 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2667 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2669 cma_exch(id_priv
, CMA_DESTROYING
);
2670 mutex_unlock(&id_priv
->handler_mutex
);
2671 rdma_destroy_id(&id_priv
->id
);
2675 mutex_unlock(&id_priv
->handler_mutex
);
2679 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2680 struct sockaddr
*addr
, union ib_gid
*mgid
)
2682 unsigned char mc_map
[MAX_ADDR_LEN
];
2683 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2684 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2685 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2687 if (cma_any_addr(addr
)) {
2688 memset(mgid
, 0, sizeof *mgid
);
2689 } else if ((addr
->sa_family
== AF_INET6
) &&
2690 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFF10A01B) ==
2692 /* IPv6 address is an SA assigned MGID. */
2693 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2695 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2696 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2697 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2698 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2702 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2703 struct cma_multicast
*mc
)
2705 struct ib_sa_mcmember_rec rec
;
2706 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2707 ib_sa_comp_mask comp_mask
;
2710 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2711 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2716 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
2717 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2718 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2719 ib_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2720 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2723 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2724 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2725 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2726 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2727 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2729 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
2730 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
2731 IB_SA_MCMEMBER_REC_RATE_SELECTOR
;
2733 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2734 id_priv
->id
.port_num
, &rec
,
2735 comp_mask
, GFP_KERNEL
,
2736 cma_ib_mc_handler
, mc
);
2737 if (IS_ERR(mc
->multicast
.ib
))
2738 return PTR_ERR(mc
->multicast
.ib
);
2743 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2746 struct rdma_id_private
*id_priv
;
2747 struct cma_multicast
*mc
;
2750 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2751 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2752 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2755 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2759 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2760 mc
->context
= context
;
2761 mc
->id_priv
= id_priv
;
2763 spin_lock(&id_priv
->lock
);
2764 list_add(&mc
->list
, &id_priv
->mc_list
);
2765 spin_unlock(&id_priv
->lock
);
2767 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2768 case RDMA_TRANSPORT_IB
:
2769 ret
= cma_join_ib_multicast(id_priv
, mc
);
2777 spin_lock_irq(&id_priv
->lock
);
2778 list_del(&mc
->list
);
2779 spin_unlock_irq(&id_priv
->lock
);
2784 EXPORT_SYMBOL(rdma_join_multicast
);
2786 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2788 struct rdma_id_private
*id_priv
;
2789 struct cma_multicast
*mc
;
2791 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2792 spin_lock_irq(&id_priv
->lock
);
2793 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2794 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2795 list_del(&mc
->list
);
2796 spin_unlock_irq(&id_priv
->lock
);
2799 ib_detach_mcast(id
->qp
,
2800 &mc
->multicast
.ib
->rec
.mgid
,
2801 mc
->multicast
.ib
->rec
.mlid
);
2802 ib_sa_free_multicast(mc
->multicast
.ib
);
2807 spin_unlock_irq(&id_priv
->lock
);
2809 EXPORT_SYMBOL(rdma_leave_multicast
);
2811 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
2813 struct rdma_dev_addr
*dev_addr
;
2814 struct cma_ndev_work
*work
;
2816 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2818 if ((dev_addr
->src_dev
== ndev
) &&
2819 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
2820 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
2821 ndev
->name
, &id_priv
->id
);
2822 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2826 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
2828 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
2829 atomic_inc(&id_priv
->refcount
);
2830 queue_work(cma_wq
, &work
->work
);
2836 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
2839 struct net_device
*ndev
= (struct net_device
*)ctx
;
2840 struct cma_device
*cma_dev
;
2841 struct rdma_id_private
*id_priv
;
2842 int ret
= NOTIFY_DONE
;
2844 if (dev_net(ndev
) != &init_net
)
2847 if (event
!= NETDEV_BONDING_FAILOVER
)
2850 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
2854 list_for_each_entry(cma_dev
, &dev_list
, list
)
2855 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
2856 ret
= cma_netdev_change(ndev
, id_priv
);
2862 mutex_unlock(&lock
);
2866 static struct notifier_block cma_nb
= {
2867 .notifier_call
= cma_netdev_callback
2870 static void cma_add_one(struct ib_device
*device
)
2872 struct cma_device
*cma_dev
;
2873 struct rdma_id_private
*id_priv
;
2875 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2879 cma_dev
->device
= device
;
2881 init_completion(&cma_dev
->comp
);
2882 atomic_set(&cma_dev
->refcount
, 1);
2883 INIT_LIST_HEAD(&cma_dev
->id_list
);
2884 ib_set_client_data(device
, &cma_client
, cma_dev
);
2887 list_add_tail(&cma_dev
->list
, &dev_list
);
2888 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2889 cma_listen_on_dev(id_priv
, cma_dev
);
2890 mutex_unlock(&lock
);
2893 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2895 struct rdma_cm_event event
;
2896 enum cma_state state
;
2899 /* Record that we want to remove the device */
2900 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2901 if (state
== CMA_DESTROYING
)
2904 cma_cancel_operation(id_priv
, state
);
2905 mutex_lock(&id_priv
->handler_mutex
);
2907 /* Check for destruction from another callback. */
2908 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2911 memset(&event
, 0, sizeof event
);
2912 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2913 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2915 mutex_unlock(&id_priv
->handler_mutex
);
2919 static void cma_process_remove(struct cma_device
*cma_dev
)
2921 struct rdma_id_private
*id_priv
;
2925 while (!list_empty(&cma_dev
->id_list
)) {
2926 id_priv
= list_entry(cma_dev
->id_list
.next
,
2927 struct rdma_id_private
, list
);
2929 list_del(&id_priv
->listen_list
);
2930 list_del_init(&id_priv
->list
);
2931 atomic_inc(&id_priv
->refcount
);
2932 mutex_unlock(&lock
);
2934 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
2935 cma_deref_id(id_priv
);
2937 rdma_destroy_id(&id_priv
->id
);
2941 mutex_unlock(&lock
);
2943 cma_deref_dev(cma_dev
);
2944 wait_for_completion(&cma_dev
->comp
);
2947 static void cma_remove_one(struct ib_device
*device
)
2949 struct cma_device
*cma_dev
;
2951 cma_dev
= ib_get_client_data(device
, &cma_client
);
2956 list_del(&cma_dev
->list
);
2957 mutex_unlock(&lock
);
2959 cma_process_remove(cma_dev
);
2963 static int __init
cma_init(void)
2965 int ret
, low
, high
, remaining
;
2967 get_random_bytes(&next_port
, sizeof next_port
);
2968 inet_get_local_port_range(&low
, &high
);
2969 remaining
= (high
- low
) + 1;
2970 next_port
= ((unsigned int) next_port
% remaining
) + low
;
2972 cma_wq
= create_singlethread_workqueue("rdma_cm");
2976 ib_sa_register_client(&sa_client
);
2977 rdma_addr_register_client(&addr_client
);
2978 register_netdevice_notifier(&cma_nb
);
2980 ret
= ib_register_client(&cma_client
);
2986 unregister_netdevice_notifier(&cma_nb
);
2987 rdma_addr_unregister_client(&addr_client
);
2988 ib_sa_unregister_client(&sa_client
);
2989 destroy_workqueue(cma_wq
);
2993 static void __exit
cma_cleanup(void)
2995 ib_unregister_client(&cma_client
);
2996 unregister_netdevice_notifier(&cma_nb
);
2997 rdma_addr_unregister_client(&addr_client
);
2998 ib_sa_unregister_client(&sa_client
);
2999 destroy_workqueue(cma_wq
);
3000 idr_destroy(&sdp_ps
);
3001 idr_destroy(&tcp_ps
);
3002 idr_destroy(&udp_ps
);
3003 idr_destroy(&ipoib_ps
);
3006 module_init(cma_init
);
3007 module_exit(cma_cleanup
);